将scrapy爬虫的数据下载的elasticsearch中

将scrapy爬虫的数据下载的elasticsearch中

    安装 elasticsearch
- 方法一 pip install elasticsearch-dsl
- 方法二 cd elasticsearch-dsl          python setup.py install
    创建mappings实体类
# -*- coding: utf-8 -*-
from elasticsearch_dsl import Document, Date, Nested, Boolean, analyzer, Completion, Keyword, Text, Integer
from elasticsearch_dsl.connections import connections
connections.create_connection(hosts=["127.0.0.1"],timeout=60) # hosts允许连接多个服务器
class ArticleType(Document): # 相当于mappings映射
    title = Text(analyzer="ik_max_word")
    create_date = Date()
    url = Keyword()
    praise_nums = Integer()
    comment_nums = Integer()
    fav_nums = Integer()
    tags = Text(analyzer="ik_max_word")
    content = Text(analyzer="ik_max_word")
    class Index:
        name = "article" # 对应es的index

if __name__ == "__main__":
    ArticleType.init() # 通过init()方法生成index下面的type,它的mappings
    编写item
from w3lib.html import remove_tags # 去掉html 的标签
 def save_to_es(self):
        article = ArticleType()
        article.title = self[title]
        article.create_date = self["create_date"]
        article.content = remove_tags(self["content"])
        article.praise_nums = self["praise_nums"]
        article.fav_nums = self["fav_nums"]
        article.comment_nums = self["comment_nums"]
        article.url = self["url"]
        article.tags = self["tags"]
        article.save()
        return
    编写pipelines
class ElasticsearchPipeline(object):
    #将数据写入到es中
    def process_item(self, item, spider):
        #将item转换为es的数据
        item.save_to_es()
        return item
经验分享 程序员 微信小程序 职场和发展