由于京东网站反爬虫机制比较严格,需要使用反反爬虫技术(如设置代理、随机UA等),以下是一份可能的代码实现:/n/npython/nimport scrapy/nimport re/nimport json/nimport xlwt/nfrom scrapy import Selector/nfrom scrapy.loader import ItemLoader/nfrom scrapy.spiders import CrawlSpider, Rule/nfrom scrapy.linkextractors import LinkExtractor/n/n/nclass JDPhoneSpider(CrawlSpider):/n name = 'jd_phone'/n allowed_domains = ['jd.com']/n start_urls = ['https://www.jd.com/']/n/n rules = (/n Rule(LinkExtractor(allow=r'///d+-/d+-/d+.html'), callback='parse_phone', follow=True),/n )/n/n def __init__(self, *args, **kwargs):/n super(JDPhoneSpider, self).__init__(*args, **kwargs)/n self.brands = ['Apple', 'Samsung', 'HUAWEI']/n self.headers = {/n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3',/n 'Referer': 'https://www.jd.com/',/n }/n self.proxies = [/n 'http://ip1:port1',/n 'http://ip2:port2',/n # .../n ]/n self.proxy_pool = iter(self.proxies)/n/n def parse_start_url(self, response):/n for brand in self.brands:/n url = f'https://search.jd.com/Search?keyword={brand}&enc=utf-8'/n yield scrapy.Request(url, headers=self.headers, callback=self.parse_brand)/n/n def parse_brand(self, response):/n phone_list = response.css('#J_goodsList .gl-item')/n for phone in phone_list:/n name = phone.css('.p-name em::text').get()/n price = phone.css('.p-price i::text').get()/n comment_count = phone.css('.p-commit strong a::text').get()/n is_self_run = phone.css('.p-icons .goods-icons i::text').re_first('自营')/n if name and price and comment_count:/n item = {/n 'name': name.strip(),/n 'brand': re.search(f'({'|'.join(self.brands)})', name).group(),/n 'price': float(price),/n 'comment_count': int(comment_count.replace('+', '')),/n 'is_self_run': bool(is_self_run),/n 'url': phone.css('.p-name a::attr(href)').get(),/n }/n yield scrapy.Request(item['url'], headers=self.headers, callback=self.parse_phone, meta={'item': item})/n/n next_page = response.css('.pn-next::attr(href)').get()/n if next_page:/n yield scrapy.Request(response.urljoin(next_page), headers=self.headers, callback=self.parse_brand)/n/n def parse_phone(self, response):/n item = response.meta['item']/n spec_url = response.css('#detail .p-parameter a::attr(href)').get()/n if spec_url:/n yield scrapy.Request(spec_url, headers=self.headers, callback=self.parse_spec, meta={'item': item})/n else:/n yield item/n/n def parse_spec(self, response):/n item = response.meta['item']/n specs = {}/n spec_list = response.css('.Ptable-item')/n for spec in spec_list:/n key = spec.css('.dt::text').get().strip()/n value = spec.css('.dd::text').get().strip()/n specs[key] = value/n item['specs'] = specs/n yield item/n/n def start_requests(self):/n for url in self.start_urls:/n yield scrapy.Request(url, headers=self.headers, callback=self.parse, dont_filter=True, meta={'proxy': next(self.proxy_pool)})/n/n def process_request(self, request, spider):/n request.meta['proxy'] = next(self.proxy_pool)/n print('Using proxy:', request.meta['proxy'])/n request.headers['User-Agent'] = self.headers['User-Agent']/n/n/n这个爬虫的大致思路如下:/n/n1. 首先在parse_start_url函数中获取指定品牌的手机列表页面URL,然后调用parse_brand函数解析出每个手机的基本信息(如名称、价格、评价数量、是否自营等)。/n2. 在parse_brand函数中,对于每个手机,从其页面中提取出品牌、详细参数页URL等信息,然后调用parse_phone函数解析其详细参数。/n3. 在parse_spec函数中,解析出手机的详细参数(如屏幕尺寸、摄像头像素等),并将其添加到item中。/n4. 在start_requests函数中设置代理、随机UA等反反爬虫措施。/n5. 在process_request函数中动态切换代理。需要注意的是,该函数并不是Scrapy的默认函数,需要手动添加。/n/n最终结果可以通过以下代码保存为Excel文件:/n/npython/ndef save_to_excel(items):/n book = xlwt.Workbook(encoding='utf-8')/n sheet = book.add_sheet('Sheet1')/n headers = ['名称', '品牌', '价格', '评价数量', '是否自营', '详细参数', 'URL']/n for i, header in enumerate(headers):/n sheet.write(0, i, header)/n for i, item in enumerate(items):/n sheet.write(i+1, 0, item['name'])/n sheet.write(i+1, 1, item['brand'])/n sheet.write(i+1, 2, item['price'])/n sheet.write(i+1, 3, item['comment_count'])/n sheet.write(i+1, 4, '是' if item['is_self_run'] else '否')/n sheet.write(i+1, 5, json.dumps(item['specs'], ensure_ascii=False))/n sheet.write(i+1, 6, item['url'])/n book.save('phones.xls')/n/n# 在Spider的closed事件中调用/ndef spider_closed(self, spider):/n items = list(self.items)/n save_to_excel(items)/n/n/n最终生成的Excel文件中,除了包含基本信息(如名称、价格等)外,还包含一个“详细参数”列,其中包含一个JSON格式的字符串,存储了手机的详细参数信息。可以使用Excel的“文本转列”等功能分割出各个参数。

京东热卖手机信息爬取:Scrapy实战教程

原文地址: https://www.cveoy.top/t/topic/okrV 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录