京东热卖手机信息抓取:Scrapy框架实现
由于京东网站需要登录才能获取商品信息,因此需要使用模拟登录的方式来抓取数据。以下是基于Scrapy框架实现的代码:/n/n1. 创建Scrapy项目/n/n在命令行中执行以下命令:/n/n/nscrapy startproject jd_phone/n/n/n2. 编写登录模块/n/n在jd_phone/spiders目录下创建一个名为login.py的文件,用于模拟登录京东网站。/n/npython/nimport scrapy/nfrom scrapy.http import FormRequest/n/nclass LoginSpider(scrapy.Spider):/n name = 'login'/n allowed_domains = ['passport.jd.com']/n start_urls = ['https://passport.jd.com/new/login.aspx']/n/n def parse(self, response):/n # 获取登录所需的参数/n uuid = response.css('input[name='uuid']::attr(value)').get()/n sa_token = response.css('input[name='sa_token']::attr(value)').get()/n _t = response.css('input[name='_t']::attr(value)').get()/n login_url = 'https://passport.jd.com/uc/loginService'/n/n # 构造表单数据并提交登录请求/n yield FormRequest(/n login_url,/n formdata={/n 'uuid': uuid,/n 'sa_token': sa_token,/n '_t': _t,/n 'loginname': 'your_username',/n 'nloginpwd': 'your_password',/n },/n callback=self.after_login/n )/n/n def after_login(self, response):/n # 检查登录结果/n result = response.json()/n if result.get('success'):/n self.logger.info('Login successfully')/n yield scrapy.Request('https://home.jd.com/', callback=self.parse_homepage)/n else:/n self.logger.error('Login failed: %s' % result.get('emptyAuthcodeUrl'))/n/n def parse_homepage(self, response):/n # 检查是否登录成功/n if '退出' in response.text:/n self.logger.info('Successfully get homepage')/n else:/n self.logger.error('Failed to get homepage')/n/n/n需要将代码中的your_username和your_password替换成自己的京东账号和密码。/n/n3. 编写商品抓取模块/n/n在jd_phone/spiders目录下创建一个名为phone.py的文件,用于抓取热卖手机信息。/n/npython/nimport scrapy/nimport re/nimport json/nfrom scrapy.selector import Selector/nfrom scrapy.http import Request/nfrom jd_phone.items import PhoneItem/n/nclass PhoneSpider(scrapy.Spider):/n name = 'phone'/n allowed_domains = ['jd.com']/n start_urls = ['https://www.jd.com/']/n/n def parse(self, response):/n # 获取三个品牌的手机列表页面/n brands = ['Apple', '华为', '小米']/n for brand in brands:/n url = 'https://search.jd.com/Search?keyword=%s手机&enc=utf-8&wq=%s手机' % (brand, brand)/n yield scrapy.Request(url, callback=self.parse_phone_list, meta={'brand': brand})/n/n def parse_phone_list(self, response):/n # 获取手机列表页面中所有符合要求的手机商品/n brand = response.meta['brand']/n phone_list = response.css('#J_goodsList .gl-item')/n for phone in phone_list:/n name = phone.css('.p-name em::text').get()/n price = phone.css('.p-price i::text').get()/n comment_count = phone.css('.p-commit strong a::text').get()/n is_self_run = '自营' in phone.css('.p-icons i::text').getall()/n phone_id = re.findall(r'item.jd.com/(/d+).html', phone.css('.p-img a::attr(href)').get())[0]/n/n # 构造手机详细信息页面的URL并继续抓取/n url = 'https://item.jd.com/%s.html' % phone_id/n yield scrapy.Request(url, callback=self.parse_phone_detail, meta={/n 'brand': brand,/n 'name': name,/n 'price': price,/n 'comment_count': comment_count,/n 'is_self_run': is_self_run,/n })/n/n # 获取下一页的URL并继续抓取/n next_page_url = response.css('.pn-next::attr(href)').get()/n if next_page_url:/n yield scrapy.Request(response.urljoin(next_page_url), callback=self.parse_phone_list, meta={'brand': brand})/n/n def parse_phone_detail(self, response):/n # 获取手机的详细参数信息/n brand = response.meta['brand']/n name = response.meta['name']/n price = response.meta['price']/n comment_count = response.meta['comment_count']/n is_self_run = response.meta['is_self_run']/n/n param_url = 'https://c.3.cn/recommend/get?sku=%s&cat=9987%%2C653%%2C655&area=1_72_2799_0&callback=jQuery%s' % (re.findall(r''skuId':(/d+),', response.text)[0], '%28function%28%29%7Bvar%20')/n yield scrapy.Request(param_url, callback=self.parse_phone_param, meta={/n 'brand': brand,/n 'name': name,/n 'price': price,/n 'comment_count': comment_count,/n 'is_self_run': is_self_run,/n })/n/n def parse_phone_param(self, response):/n # 解析手机参数信息并保存为item/n brand = response.meta['brand']/n name = response.meta['name']/n price = response.meta['price']/n comment_count = response.meta['comment_count']/n is_self_run = response.meta['is_self_run']/n/n param_text = re.findall(r'jQuery//S+//((.*)//);', response.text)[0]/n param_json = json.loads(param_text)/n param_html = param_json['data']['venderComments']['comments'][0]['content']/n param = Selector(text=param_html).css('.parameter2 li::text').getall()/n param_dict = {}/n for p in param:/n k, v = p.strip().split(':')/n param_dict[k] = v/n/n item = PhoneItem()/n item['brand'] = brand/n item['name'] = name/n item['price'] = price/n item['comment_count'] = comment_count/n item['is_self_run'] = is_self_run/n item['param_dict'] = param_dict/n yield item/n/n/n其中,PhoneItem定义在jd_phone/items.py中:/n/npython/nimport scrapy/n/nclass PhoneItem(scrapy.Item):/n brand = scrapy.Field()/n name = scrapy.Field()/n price = scrapy.Field()/n comment_count = scrapy.Field()/n is_self_run = scrapy.Field()/n param_dict = scrapy.Field()/n/n/n4. 数据去重和保存/n/n修改jd_phone/pipelines.py文件,定义一个去重管道和一个保存为Excel文件的管道:/n/npython/nimport pandas as pd/n/nclass DuplicatesPipeline:/n def __init__(self):/n self.phone_set = set()/n/n def process_item(self, item, spider):/n # 根据品牌和型号去重/n key = '%s_%s' % (item['brand'], item['param_dict'].get('型号'))/n if key in self.phone_set:/n raise scrapy.exceptions.DropItem('Duplicate item found: %s' % key)/n else:/n self.phone_set.add(key)/n return item/n/nclass ExcelPipeline:/n def __init__(self):/n self.df = pd.DataFrame(columns=['品牌', '型号', '价格', '评价数量', '是否自营', '参数'])/n/n def process_item(self, item, spider):/n # 将数据保存为Excel文件/n data = {/n '品牌': item['brand'],/n '型号': item['param_dict'].get('型号'),/n '价格': item['price'],/n '评价数量': item['comment_count'],/n '是否自营': item['is_self_run'],/n '参数': item['param_dict'],/n }/n self.df = self.df.append(data, ignore_index=True)/n return item/n/n def close_spider(self, spider):/n self.df.to_excel('phones.xlsx', index=False)/n/n/n在jd_phone/settings.py中启用这两个管道:/n/npython/nITEM_PIPELINES = {/n 'jd_phone.pipelines.DuplicatesPipeline': 100,/n 'jd_phone.pipelines.ExcelPipeline': 200,/n}/n/n/n5. 运行爬虫/n/n在命令行中执行以下命令启动爬虫:/n/n/nscrapy crawl login/nscrapy crawl phone/n/n/n这两个命令分别执行登录和抓取手机信息的操作。在执行完后,当前目录下会生成一个名为phones.xlsx的Excel文件,其中包含了抓取到的所有手机信息。
原文地址: https://www.cveoy.top/t/topic/oksh 著作权归作者所有。请勿转载和采集!