import\u0020time\nimport\u0020re\nimport\u0020csv\nfrom\u0020selenium\u0020import\u0020webdriver\nfrom\u0020selenium.webdriver.chrome.options\u0020import\u0020Options\nfrom\u0020bs4\u0020import\u0020BeautifulSoup\nimport\u0020pandas\u0020as\u0020pd\n\n#\u0020Using\u0020Chrome\u0020browser\u0020driver\u0020and\u0020setting\u0020it\u0020to\u0020headless\u0020mode\nchrome_options\u0020=\u0020Options()\n\ndriver\u0020=\u0020webdriver.Chrome(options=chrome_options)\n\n#\u0020Sending\u0020request\u0020and\u0020retrieving\u0020webpage\u0020content\nurl\u0020=\u0020'https://search.jd.com/Search?keyword=%E4%BA%BA%E5%B7%A5%E6%99%BA%E8%83%BD%E5%9B%BE%E4%B9%A6'\ndriver.get(url)\n\n#\u0020Delay\u0020to\u0020allow\u0020the\u0020page\u0020to\u0020load\u0020completely\ntime.sleep(5)\n\n#\u0020Initializing\u0020variables\npage_number\u0020=\u00201\nmax_pages\u0020=\u00201\u0020#\u0020Set\u0020the\u0020number\u0020of\u0020pages\u0020to\u0020crawl\ndata\u0020=\u0020[]\n\nwhile\u0020page_number\u0020<=\u0020max_pages:\n\tprint('Crawling\u0020Page',\u0020page_number)\n\n\t#\u0020Retrieving\u0020html\u0020content\u0020after\u0020the\u0020page\u0020has\u0020fully\u0020loaded\n\thtml_content\u0020=\u0020driver.page_source\n\n\t#\u0020Using\u0020BeautifulSoup\u0020to\u0020parse\u0020the\u0020html\u0020content\n\tsoup\u0020=\u0020BeautifulSoup(html_content,\u0020'html.parser')\n\n\t#\u0020Finding\u0020all\u0020divs\u0020with\u0020class\u0020'gl-i-wrap'\u0020containing\u0020product\u0020information\n\tdiv_list\u0020=\u0020soup.find_all('div',\u0020class_='gl-i-wrap')\n\n\t#\u0020Extracting\u0020text\u0020information\u0020from\u0020each\u0020div\n\tfor\u0020div\u0020in\u0020div_list:\n\t name\u0020=\u0020div.find('div',\u0020class_='p-name').get_text()\n\t price\u0020=\u0020div.find('div',\u0020class_='p-price').get_text()\n\t commit\u0020=\u0020div.find('div',\u0020class_='p-commit').get_text()\n\t commit\u0020=\u0020commit.replace('条评价',\u0020'').replace('+',\u0020'')\n\t if\u0020'万' in commit:\n\t commit\u0020=\u0020float(commit.replace('万',\u0020'')) * 10000\n\n\t #\u0020simulate\u0020click\u0020on\u0020the\u0020name,\u0020collect\u0020the\u0020information\u0020in\u0020the\u0020new\u0020page\n\t link\u0020=\u0020div.find('div',\u0020class_='p-name').find('a').get('href')\n\t if\u0020'http' not in link:\n\t link\u0020=\u0020'https:' + link\n\n\t #\u0020open\u0020new\u0020tab\n\t driver.execute_script(f'''window.open('{link}','_blank');''')\n\t #\u0020switch\u0020to\u0020the\u0020new\u0020tab\n\t windows\u0020=\u0020driver.window_handles\n\t driver.switch_to.window(windows[-1])\n\t time.sleep(5)\n\t soup_new\u0020=\u0020BeautifulSoup(driver.page_source,\u0020'html.parser')\n\n\t time.sleep(6)\n\t publisher\u0020=\u0020soup_new.find('li',\u0020title=True,\u0020clstag='shangpin|keycount|product|chubanshe_3')['title'] \n\t if\u0020soup_new.find('li',\u0020title=True,\u0020clstag='shangpin|keycount|product|chubanshe_3') is not None else '未找到'\n\t publish_date\u0020=\u0020soup_new.find('li',\u0020text=re.compile(r'出版时间:')).get_text().replace('出版时间:',\u0020'') \n\t if\u0020soup_new.find('li',\u0020text=re.compile(r'出版时间:')) is not None else '未找到'\n\n\t driver.close()\n\t driver.switch_to.window(windows[0])\n\n\t #\u0020Click\u0020on\u0020the\u0020'商品评价'\u0020button\n\t shop_button\u0020=\u0020driver.find_elements_by_xpath('//[@id='detail']/div[1]/ul/li[5]')[0]\n\t shop_button.click()\n\t time.sleep(2)\n\n\t #\u0020Extract\u0020the\u0020number\u0020of\u0020good,\u0020medium,\u0020and\u0020bad\u0020comments\n\t good_comments\u0020=\u0020driver.find_elements_by_xpath('//[@id='comment']/div[2]/div[2]/div[1]/ul/li[5]/a/em')\n\t good_comment\u0020=\u0020good_comments[0].text.strip('()+)'\n\n\t medium_comments\u0020=\u0020driver.find_elements_by_xpath('//[@id='comment']/div[2]/div[2]/div[1]/ul/li[6]/a/em')\n\t medium_comment\u0020=\u0020medium_comments[0].text.strip('()+)'\n\n\t bad_comments\u0020=\u0020driver.find_elements_by_xpath('//[@id='comment']/div[2]/div[2]/div[1]/ul/li[7]/a/em')\n\t bad_comment\u0020=\u0020bad_comments[0].text.strip('()+)'\n\n\t #\u0020Append\u0020data\u0020to\u0020the\u0020list\n\t info\u0020=\u0020{ \t '书名':\u0020name, \t '价格':\u0020price, \t '评论数':\u0020commit, \t '好评':\u0020good_comment, \t '中评':\u0020medium_comment, \t '差评':\u0020bad_comment, \t '出版社':\u0020publisher, \t '出版年份':\u0020publish_date \t }\ndata.append(info)\n\n\t#\u0020Click\u0020next\u0020page\u0020button\u0020if\u0020available\n\tnext_page_button\u0020=\u0020driver.find_element_by_class_name('pn-next')\n\t#\u0020if\u0020next_page_button:\n\t#\tnext_page_button.click()\n\t#\ttime.sleep(3)\u0020#\u0020Delay\u0020to\u0020allow\u0020the\u0020next\u0020page\u0020to\u0020load\u0020completely\n\t#\telse:\n\t#\tbreak\n\n\tpage_number\u0020+=\u00201\n\n#\u0020Closing\u0020the\u0020browser\u0020driver\ndriver.quit()\n\n#\u0020Create\u0020a\u0020DataFrame\u0020from\u0020the\u0020data\u0020list\ndf\u0020=\u0020pd.DataFrame(data)\n\n#\u0020Reorder\u0020the\u0020columns\ndf\u0020=\u0020df[['书名',\u0020'价格',\u0020'评论数',\u0020'好评',\u0020'中评',\u0020'差评',\u0020'出版社',\u0020'出版年份']]\n\n#\u0020Save\u0020data\u0020to\u0020CSV\u0020file\nfilename\u0020=\u0020'book_info.csv'\ndf.to_csv(filename,\u0020index=False,\u0020encoding='utf-8')\n\nprint('Data\u0020saved\u0020to',\u0020filename)


原文地址: https://www.cveoy.top/t/topic/pEvS 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录