|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
总是最后一行出错
新建的scrapy项目
- #stocks.py
- # -*- coding: utf-8 -*-
- import scrapy
- import re
- class StocksSpider(scrapy.Spider):
- name = 'stocks'
- start_urls = ['http://quote.eastmoney.com/stocklist.html']
- def parse(self, response):
- for href in response.css('a::attr(href)').extract():
- try:
- stock = re.findall(r"[s][hz]\d{6}",href)[0]
- url = 'https://gupiao.baidu.com/stock' + stock + '.html'
- yield scrapy.Resquest(url, callback = self.parse_stock)
- except:
- continue
-
- def parse_stock(self, response):
- infoDict = {}
- stockInfo = response.css('.stock-bets')
- name = stockInfo.css('.bets-name').extract()[0]
- keyList = stockInfo.css('dt').extract()
- for i in range(len(keyList)):
- key = re.findall(r'>.*</dt>',keyList[i])[0][1:-5]
- try:
- val = re.findall(r'\d+\.?.*</dd>',valueList[i])[0][0:-5]
- except:
- val = '--'
- infoDict[key] = val
- infoDict.update(
- {'股票名称':re.findall('\s.*\(',name)[0].split()[0]+\
- re.findall('\>.*<',name)[0][1:-i]})
- yield infoDict
-
复制代码
- #pipelines.py
- # -*- coding: utf-8 -*-
- # Define your item pipelines here
- #
- # Don't forget to add your pipeline to the ITEM_PIPELINES setting
- # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
- class BaidustocksPipeline(object):
- def process_item(self, item, spider):
- return item
- class BaidustocksInfoPipeline(object):
- def open_spider(self, spider):
- self.f =open('BaidustockInfo.txt','w')
- def close_spider(self,spider):
- self.f.close()
- def process_item(self, item , spider):
- try:
- line = str(dict(item)) + '\n'
- self.f.write(line)
- except:
- pass
- return item
复制代码
- #settings.py
- # -*- coding: utf-8 -*-
- # Scrapy settings for BaiduStocks project
- #
- # For simplicity, this file contains only settings considered important or
- # commonly used. You can find more settings consulting the documentation:
- #
- # http://doc.scrapy.org/en/latest/topics/settings.html
- # http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
- # http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
- BOT_NAME = 'BaiduStocks'
- SPIDER_MODULES = ['BaiduStocks.spiders']
- NEWSPIDER_MODULE = 'BaiduStocks.spiders'
- # Crawl responsibly by identifying yourself (and your website) on the user-agent
- #USER_AGENT = 'BaiduStocks (+http://www.yourdomain.com)'
- # Obey robots.txt rules
- ROBOTSTXT_OBEY = True
- # Configure maximum concurrent requests performed by Scrapy (default: 16)
- #CONCURRENT_REQUESTS = 32
- # Configure a delay for requests for the same website (default: 0)
- # See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
- # See also autothrottle settings and docs
- #DOWNLOAD_DELAY = 3
- # The download delay setting will honor only one of:
- #CONCURRENT_REQUESTS_PER_DOMAIN = 16
- #CONCURRENT_REQUESTS_PER_IP = 16
- # Disable cookies (enabled by default)
- #COOKIES_ENABLED = False
- # Disable Telnet Console (enabled by default)
- #TELNETCONSOLE_ENABLED = False
- # Override the default request headers:
- #DEFAULT_REQUEST_HEADERS = {
- # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
- # 'Accept-Language': 'en',
- #}
- # Enable or disable spider middlewares
- # See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
- #SPIDER_MIDDLEWARES = {
- # 'BaiduStocks.middlewares.BaidustocksSpiderMiddleware': 543,
- #}
- # Enable or disable downloader middlewares
- # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
- #DOWNLOADER_MIDDLEWARES = {
- # 'BaiduStocks.middlewares.MyCustomDownloaderMiddleware': 543,
- #}
- # Enable or disable extensions
- # See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
- #EXTENSIONS = {
- # 'scrapy.extensions.telnet.TelnetConsole': None,
- #}
- # Configure item pipelines
- # See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
- ITEM_PIPELINES = {
- 'BaiduStocks.pipelines.BaidustocksInfoPipeline': 300,
- #}
- # Enable and configure the AutoThrottle extension (disabled by default)
- # See http://doc.scrapy.org/en/latest/topics/autothrottle.html
- #AUTOTHROTTLE_ENABLED = True
- # The initial download delay
- #AUTOTHROTTLE_START_DELAY = 5
- # The maximum download delay to be set in case of high latencies
- #AUTOTHROTTLE_MAX_DELAY = 60
- # The average number of requests Scrapy should be sending in parallel to
- # each remote server
- #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
- # Enable showing throttling stats for every response received:
- #AUTOTHROTTLE_DEBUG = False
- # Enable and configure HTTP caching (disabled by default)
- # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
- #HTTPCACHE_ENABLED = True
- #HTTPCACHE_EXPIRATION_SECS = 0
- #HTTPCACHE_DIR = 'httpcache'
- #HTTPCACHE_IGNORE_HTTP_CODES = []
- #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
复制代码 |
|