///'---文章爬系统---///'//n///'请输入开始日期(格式如20220706):///'//n///'请输入结束日期(格式如20220706):///'//n///'import requests//nimport bs4//nimport os//nimport datetime//nfrom datetime import date//n//ndef fetchUrl(url)://n//theaders = {//n//'accept//': //'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,/;q=0.8//',//n//'user-agent//': //'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36//',//n}//n//nr = requests.get(url, headers=headers)//n//nr.raise_for_status()//n//nr.encoding = r.apparent_encoding//n//nreturn r.text//n//ndef getPageList(year, month, day)://n//nurl = //'http://paper.people.com.cn/rmrb/html///' + year + //'-//' + month + //'///' + day + //'/nbs.D110000renmrb_01.htm//'//n//nhtml = fetchUrl(url)//n//nbsobj = bs4.BeautifulSoup(html, //'html.parser//')//n//ntemp = bsobj.find(//'div//', attrs={//'id//': //'List//'})//n//nif temp://n//npageList = temp.ul.find_all(//'div//', attrs={//'class//': //'right_title-name//'})//n//nelse://n//ntemp = bsobj.find(//'div//', attrs={//'class//': //'-container//'})//n//npageList = temp.find_all(//'div//', attrs={//'class//': //'swiper-slide//'})//n//nlinkList = []//n//nfor page in pageList://n//nlink = page.a[//'href//']//n//nurl = //'http://paper.people.com.cn/rmrb/html///' + year + //'-//' + month + //'///' + day + //'///' + link//n//nlinkList.append(url)//n//nreturn linkList//n//ndef getTitleList(year, month, day, pageUrl, category_count)://n//nhtml = fetchUrl(pageUrl)//n//nbsobj = bs4.BeautifulSoup(html, //'html.parser//')//n//ntemp = bsobj.find(//'div//', attrs={//'id//': //'titleList//'})//n//nif temp://n//ntempList = temp.ul.find_all(//'li//')//n//nelse://n//ntitleList = bsobj.find(//'ul//', attrs={//'class//': //'news-list//'}).find_all(//'li//')//n//nlinkList = []//n//nfor title in tempList://n//nlink = title.a[//'href//']//n//nif //'nw.D110000renmrb//' in link://n//nurl = //'http://paper.people.com.cn/rmrb/html///' + year + //'-//' + month + //'///' + day + //'///' + link//n//ncontent = fetchUrl(url)//n//n//# 判断文章类别//n//nif //'体育//' in content://n//ncategory = //'体育//'//n//nelif //'环境//' in content://n//ncategory = //'环境//'//n//nelse://n//ncategory = //'其他//'//n//nlinkList.append((url, category))//n//ncategory_count[category] += 1//n//# 统计每种类型的数量//n//nlinkList.append({//'url//': url, //'category//': category})//n//nreturnList//n//ndef getContent(html)://n//nbsobj = bs4.BeautifulSoup(html, //'html.parser//')//n//ntitle = bsobj.h3.text + //'//n//' + bsobj.h1.text + //'//n//' + bsobj.h2.text + //'//n//'//n//npList = bsobj.find(//'div//', attrs={//'id//': //'ozoom//'}).find_all(//'p//')//n//ncontent = //'//'//n//nfor p in pList://n//ncontent += p.text + //'//n//'//n//nreturn title + content//n//ndef saveFile(content, path, filename, category, category_count, category_ratio)://n//nif not os.path.exists(path)://n//nos.makedirs(path)//n//nwith open(os.path.join(path, filename), //'w//', encoding=//'utf-8//') as f://n//nf.write(f//'分类:{category}//n//')//n//nf.write(f//'数量:{category_count[category]}//n//')//n//nf.write(f//'比:{category_ratio[category]*100:.2f}%//n//')//n//nf.write(content)//n//nprint(f//'文章已保存为:{os.join(path, filename)}//')//n//ndef download_rmrb(year, month, day, destdir)://n//npageList = getPageList(year, month, day)//n//ncategory_count = {//'体育//': 0, //'环境//': 0, //'其他//': 0}//n//nfor page in pageList://n//ntitleList = getTitleList(year, month, day, page, category_count)//n//nfor item in titleList://n//nfor url in item[//'url//']://n//ncategory = item[//'category//']//n//nhtml = fetch(url)//n//ncontent = getContent(html)//n//ntemp = url.split(//'_//')//n//npageNo = temp[-2]//n//ntitleNo = temp[-1].split(//'.//')[0]//n//ncategory_dir = os.path.join(destdir, category)//n//nfilename = f//'{year}{month}{day}-{pageNo}-{titleNo}.txt//'//n//n//ntotal_samples = sum(category_count.values())//n//# 计算总本数量//n//ncategory_ratio = {k: v /_samples for k, v in category_count.items()}//n//# 计算每种类型总样本中的比例//n//nsaveFile(content, category_dir, filename, category, category_count, category_ratio)//n//ndef gen_dates(b_date, days)://n//nday = datetime.timedelta(days=1)//n//nfor i in range(days)://n//nyield b_date + day * i//n//ndef get_date_list(beginDate,)://n//nstart = datetime.datetime.strptime(beginDate.strftime(//'%Y%m%d//'), //'%Y%m%d//')//n//nend = datetime.datetime.strptime(endDate.strftime(//'%Y%m%d//'), //'%Y%m%d//')//n//ndata = []//n//nfor d in gen_dates(start, (end - start).days)://n//ndata.append(d)//n//nreturn data//n//n//nif name == //'main//'://n//nprint(//'---文章爬系统---//')//n//nbeginDate = input(//'请输入开始日期(格式如20220706)://')//n//nendDate = input(//'请输入结束日期(格式如20220706)://')//n//nbeginDate = date(int(beginDate[:4]), int(beginDate[4:6]), int(beginDate[6:]))//n//nendDate = date(int(endDate[:4]), int(endDate[4:6]), int(endDate[6:]))//n//n//ndata = get_date_list(beginDate)//n//nfor d in data://n//nyear = str(d.year)//n//nmonth = str(d.month) if d.month >= 10 else //'0//' + str(d.month)//n//nday = str(d.day) if d.day >= 10 else //'0//' + str(d.day)//n//ndestdir = //'./文章//'//n//ndownload_rmrb(year, month, day, destdir)//n//nprint(f//'爬取文章时间为:{year}/{month}/{day}的文章成功写入文件夹中!//')//n//nprint(//'文章完成!//')//n/

人民日报文章爬取工具 - 获取指定日期范围内的新闻内容

原文地址: https://www.cveoy.top/t/topic/qo10 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录