博客
关于我
强烈建议你试试无所不能的chatGPT,快点击我
31.网站数据监控-1
阅读量:6757 次
发布时间:2019-06-26

本文共 11647 字,大约阅读时间需要 38 分钟。

网站数据监控: 思路:对网站数据字段监控并做MD5加密存入到数据,为了后期监控数据是否更新作比对,然后邮件通知,存入四个字段。
引入的包: md5_tools.py
# -*- coding:utf-8 -*- import hashlib # md5 加密 def md5_encode(md5):     md5 = md5     hash = hashlib.md5()     hash.update(bytes(md5, encoding='utf-8'))  # 要对哪个字符串进行加密,就放这里     return hash.hexdigest() # 拿到加密字符串
wenzhou.py # -*- coding: utf-8 -*-import scrapyimport timeimport refrom WEB.conmon.md5_tool import md5_encodefrom WEB.items import WebItemclass CompanyInfoSpider(scrapy.Spider):    name = 'wenzhou'    allowed_domains = ['wzszjw.wenzhou.gov.cn']    start_urls = ['http://wzszjw.wenzhou.gov.cn/col/col1357901/index.html']    custom_settings = {
"DOWNLOAD_DELAY": 0.5, "ITEM_PIPELINES":{
'WEB.pipelines.MysqlPipeline': 320}, "DOWNLOADER_MIDDLEWARES": { 'WEB.middlewares.RandomUaseragentMiddleware': 500, }, } def parse(self, response): #gbk解码 _response=response.text.encode('utf-8') # print(_response) # 转码 _response=_response.decode('utf-8') texts=re.findall(".*?·
items.py # -*- coding: utf-8 -*-# Define here the models for your scraped items## See documentation in:# https://doc.scrapy.org/en/latest/topics/items.htmlimport scrapyclass WebItem(scrapy.Item):    # define the fields for your item here like:    # name = scrapy.Field()    content_md5 = scrapy.Field() # 监控文本    website_url = scrapy.Field() # 采集页面url    website_name = scrapy.Field() # 网站名称    date_time = scrapy.Field() # 当前时间戳
piplines.py # -*- coding: utf-8 -*-from scrapy.conf import settingsimport pymysqlclass WebPipeline(object):    def process_item(self, item, spider):        return item# 数据保存mysqlclass MysqlPipeline(object):    def open_spider(self, spider):        self.host = settings.get('MYSQL_HOST')        self.port = settings.get('MYSQL_PORT')        self.user = settings.get('MYSQL_USER')        self.password = settings.get('MYSQL_PASSWORD')        self.db = settings.get(('MYSQL_DB'))        self.table = settings.get('TABLE')        self.client = pymysql.connect(host=self.host, user=self.user, password=self.password, port=self.port, db=self.db, charset='utf8')    def process_item(self, item, spider):        item_dict = dict(item)        cursor = self.client.cursor()        values = ','.join(['%s'] * len(item_dict))        keys = ','.join(item_dict.keys())        sql = 'INSERT INTO {table}({keys}) VALUES ({values})'.format(table=self.table, keys=keys, values=values)        try:            if cursor.execute(sql, tuple(item_dict.values())):  # 第一个值为sql语句第二个为 值 为一个元组                print('数据入库成功!')                self.client.commit()        except Exception as e:            print(e)            print('数据已存在,网站未更新!')            self.client.rollback()        return item    def close_spider(self, spider):        self.client.close()
setting.py # -*- coding: utf-8 -*-# Scrapy settings for WEB project## For simplicity, this download contains only settings considered important or# commonly used. You can find more settings consulting the documentation:##     https://doc.scrapy.org/en/latest/topics/settings.html#     https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#     https://doc.scrapy.org/en/latest/topics/spider-middleware.htmlBOT_NAME = 'WEB'SPIDER_MODULES = ['WEB.spiders']NEWSPIDER_MODULE = 'WEB.spiders'# mysql配置参数MYSQL_HOST = "172.16.0.55"MYSQL_PORT = 3306MYSQL_USER = "root"MYSQL_PASSWORD = "concom603"MYSQL_DB = 'web_page'TABLE = "web_page_update"# Crawl responsibly by identifying yourself (and your website) on the user-agent#USER_AGENT = 'WEB (+http://www.yourdomain.com)'# Obey robots.txt rulesROBOTSTXT_OBEY = False# Configure maximum concurrent requests performed by Scrapy (default: 16)#CONCURRENT_REQUESTS = 32# Configure a delay for requests for the same website (default: 0)# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay# See also autothrottle settings and docs#DOWNLOAD_DELAY = 3# The download delay setting will honor only one of:#CONCURRENT_REQUESTS_PER_DOMAIN = 16#CONCURRENT_REQUESTS_PER_IP = 16# Disable cookies (enabled by default)#COOKIES_ENABLED = False# Disable Telnet Console (enabled by default)#TELNETCONSOLE_ENABLED = False# Override the default request headers:#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',# 'Accept-Language': 'en',#}# Enable or disable spider middlewares# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html#SPIDER_MIDDLEWARES = {
# 'WEB.middlewares.WebSpiderMiddleware': 543,#}# Enable or disable downloader middlewares# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#DOWNLOADER_MIDDLEWARES = {
# 'WEB.middlewares.WebDownloaderMiddleware': 543,#}# Enable or disable extensions# See https://doc.scrapy.org/en/latest/topics/extensions.html#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,#}# Configure item pipelines# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html#ITEM_PIPELINES = {
# 'WEB.pipelines.WebPipeline': 300,#}# Enable and configure the AutoThrottle extension (disabled by default)# See https://doc.scrapy.org/en/latest/topics/autothrottle.html#AUTOTHROTTLE_ENABLED = True# The initial download delay#AUTOTHROTTLE_START_DELAY = 5# The maximum download delay to be set in case of high latencies#AUTOTHROTTLE_MAX_DELAY = 60# The average number of requests Scrapy should be sending in parallel to# each remote server#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0# Enable showing throttling stats for every response received:#AUTOTHROTTLE_DEBUG = False# Enable and configure HTTP caching (disabled by default)# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings#HTTPCACHE_ENABLED = True#HTTPCACHE_EXPIRATION_SECS = 0#HTTPCACHE_DIR = 'httpcache'#HTTPCACHE_IGNORE_HTTP_CODES = []#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
middlewares.py # -*- coding: utf-8 -*-# Define here the models for your spider middleware## See documentation in:# https://doc.scrapy.org/en/latest/topics/spider-middleware.htmlimport randomfrom scrapy import signalsclass WebSpiderMiddleware(object):    # Not all methods need to be defined. If a method is not defined,    # scrapy acts as if the spider middleware does not modify the    # passed objects.    @classmethod    def from_crawler(cls, crawler):        # This method is used by Scrapy to create your spiders.        s = cls()        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)        return s    def process_spider_input(self, response, spider):        # Called for each response that goes through the spider        # middleware and into the spider.        # Should return None or raise an exception.        return None    def process_spider_output(self, response, result, spider):        # Called with the results returned from the Spider, after        # it has processed the response.        # Must return an iterable of Request, dict or Item objects.        for i in result:            yield i    def process_spider_exception(self, response, exception, spider):        # Called when a spider or process_spider_input() method        # (from other spider middleware) raises an exception.        # Should return either None or an iterable of Response, dict        # or Item objects.        pass    def process_start_requests(self, start_requests, spider):        # Called with the start requests of the spider, and works        # similarly to the process_spider_output() method, except        # that it doesn’t have a response associated.        # Must return only requests (not items).        for r in start_requests:            yield r    def spider_opened(self, spider):        spider.logger.info('Spider opened: %s' % spider.name)class WebDownloaderMiddleware(object):    # Not all methods need to be defined. If a method is not defined,    # scrapy acts as if the downloader middleware does not modify the    # passed objects.    @classmethod    def from_crawler(cls, crawler):        # This method is used by Scrapy to create your spiders.        s = cls()        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)        return s    def process_request(self, request, spider):        # Called for each request that goes through the downloader        # middleware.        # Must either:        # - return None: continue processing this request        # - or return a Response object        # - or return a Request object        # - or raise IgnoreRequest: process_exception() methods of        #   installed downloader middleware will be called        return None    def process_response(self, request, response, spider):        # Called with the response returned from the downloader.        # Must either;        # - return a Response object        # - return a Request object        # - or raise IgnoreRequest        return response    def process_exception(self, request, exception, spider):        # Called when a download handler or a process_request()        # (from other downloader middleware) raises an exception.        # Must either:        # - return None: continue processing this exception        # - return a Response object: stops process_exception() chain        # - return a Request object: stops process_exception() chain        pass    def spider_opened(self, spider):        spider.logger.info('Spider opened: %s' % spider.name)# 随机更换 USER_AGENTclass RandomUaseragentMiddleware(object):    def __init__(self):        self.user_agent = [            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1",            "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0",            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27",            "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7",            "Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5",            "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0 )",            "HTC_Dream Mozilla/5.0 (Linux; U; Android 1.5; en-ca; Build/CUPCAKE) AppleWebKit/528.5  (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",            "Mozilla/5.0 (Linux; U; Android 2.3.6; en-us; Nexus S Build/GRK39F) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",            "Mozilla/5.0 (Linux; U; Android 2.1; en-us; HTC Legend Build/cupcake) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",            "Mozilla/5.0 (Linux; U; Android 2.0; en-us; Milestone Build/ SHOLS_U2_01.03.1) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",            "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; TheWorld)",            "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.648; .NET CLR 3.5.21022; .NET4.0E; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C)",            "Opera/9.80 (Windows NT 5.1; U; zh-cn) Presto/2.9.168 Version/11.50"        ]    def process_request(self, request, spider):        request.headers["User-Agent"] = random.choice(self.user_agent)

 

posted on
2018-09-25 16:24 阅读(
...) 评论(
...)

转载于:https://www.cnblogs.com/lvjing/p/9700172.html

你可能感兴趣的文章
QQ空间抢车位刷钱方法汇总
查看>>
[LeetCode] Missing Number
查看>>
java.lang.IllegalStateException: Illegal access
查看>>
STL容器的效率比较
查看>>
Ckeditor使用总结
查看>>
人事面试100问题--巧妙应答
查看>>
【工具类】怎么进入阿里云docker仓库
查看>>
Ceres-Solver库入门
查看>>
悲惨而又丢人的创业经历:草根创业者含恨倾诉为什么失败
查看>>
理解WebKit和Chromium: WebKit, WebKit2, Chromium和Chrome介绍
查看>>
hanoi塔的递归算法
查看>>
C# 校验给定的ip地址是否合法
查看>>
lumen 登陆 注册 demo
查看>>
基于服务的并行系统的通讯方式探讨
查看>>
设计模式——观察者模式
查看>>
Python多线程 简明例子
查看>>
《地球上的星星》
查看>>
mysql数据库的主从同步,实现读写分离
查看>>
89 fcanf和fprintf
查看>>
javascript——自定义右键菜单
查看>>