"import requests\nimport bs4\nimport os\nimport time\n\ndef fetchUrl(url):\n\theaders = {\n\t'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36'\n}\n\tr = requests.get(url, headers=headers)\n\tr.raise_for_status()\n\tr.encoding = r.apparent_encoding\n\treturn r.text\n\ndef NewsCategories():\n\turl = 'http://paper.people.com.cn/rmrb/html/2023-08/19/nbs.D110000renmrb_01.htm'\n\thtml = fetchUrl(url)\n\tbsobj = bs4.BeautifulSoup(html, 'html.parser')\n\tcategories = []\n\ttemp = bsobj.find('div', attrs={'id': 'page'})\n\tif temp:\n\t categories = [a.text for a in temp.ul.find_all('a')]\n\treturn categories\n\ndef getPage(year, month, day, category):\n\turl = f'http://paper.people.com.cn/rmrb/html/{year}-{month}/{day}/nbs.D110000renmrb_01.htm'\n\thtml = fetchUrl(url)\n\tbsobj = bs4.BeautifulSoup(html, 'html.parser')\n\tpageList = []\n\ttemp = bsobj.find('div', attrs={'id': 'titleList'})\n\tif temp:\n\t pageList = temp.ul.find_all('li')\n\treturn [page.a.get('href') for page in pageList if category in page.text]\n\ndef getContent(pageUrl):\n\turl = f'http://paper.people.com.cn/rmrb/html/{pageUrl}'\n\thtml = fetchUrl(url)\n\tbsobj = bs4.BeautifulSoup(html, 'html.parser')\n\tcontent = bsobj.find('div', attrs={'class': 'text_c'}).get_text()\n\treturn content.strip()\n\ndef saveFile(content, path, filename):\n\tsave_path = os.path.join(path, filename)\n\tif not os.path.exists(path):\n\t os.makedirs(path)\n\twith open(save_path, 'w', encoding='utf-8') as f:\n\t f.write(content)\n\ndef calculateAccuracy(category, total_samples, category_count):\n\tif total_samples == 0:\n\t return 0\n\taccuracy = category_count / total_samples\n\treturn accuracy * 100\n\ndef predictCategory(content):\n\tpredicted_category = 'Example Category'\n\treturn predicted_category\n\ndef downloadArticles(beginDate_str, endDate_str, category, save_path):\n\tcategory_count = 0\n\ttotal_samples = 0\n\tcorrect_predictions = 0\n\tbeginDate = time.mktime(time.strptime(beginDate_str, "%Y%m%d"))\n\tendDate = time.mktime(time.strptime(endDate_str, "%Y%m%d"))\n\tdate_diff = int((endDate - beginDate) / 86400) + 1\n\tfor i in range(date_diff):\n\t current_date = time.strftime("%Y%m%d", time.localtime(beginDate + i * 86400))\n\t year = current_date[:4]\n\t month = current_date[4:6]\n\t day = current_date[6:]\n\t try:\n\t pageList = getPage(year, month, day, category)\n\t for pageUrl in pageList:\n\t category_count += 1\n\t total_samples += 1\n\t try:\n\t content = getContent(pageUrl)\n\t prediction = predictCategory(content)\n\t if prediction == category:\n\t correct_predictions += 1\n\t filename = f'{year}{month}{day}_{pageUrl}.txt'\n\t saveFile(content, save_path, filename)\n\t print(f'Successfully downloaded {filename}')\n\t except Exception as e:\n\t print(f'Error occurred while downloading: {str(e)}')\n\t except requests.exceptions.HTTPError as e:\n\t print(f'Requested page not found: {str(e)}')\n\taccuracy = calculateAccuracy(category, total_samples, correct_predictions)\n\tprint(f'Total samples: {total_samples}')\n\tprint(f'Correct predictions: {correct_predictions}')\n\tprint(f'Accuracy: {accuracy}%')\n\nif name == 'main':\n beginDate_str = input('输入开始时间 (YYYYMMDD): ')\n endDate_str = input('输入结束时间 (YYYYMMDD): ')\n category = input('输入关键词: ')\n save_path = input('输入的路径: ')\n downloadArticles(beginDate_str, endDate_str, category, save_path)

人民日报网页爬取工具:批量下载指定日期范围内关键词相关的文章

原文地址: https://www.cveoy.top/t/topic/qo17 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录