import re/nimport requests/nimport time/nfrom selenium.webdriver.common.by import By/nfrom bs4 import BeautifulSoup/nimport os/nfrom selenium import webdriver/nfrom selenium.webdriver.chrome.service import Service/nfrom selenium.webdriver.chrome.options import Options/nfrom urllib.parse import urlparse, urljoin/n/nRUNNING_MODE = 1 # 将运行模式设置为1/n/n# 设置要抓取的页面URL(根据需要更改)/npage_url = 'https://search.bilibili.com/article?keyword=饭拍图&page=2'/n/n# 指定保存路径和WebDriver路径/nsave_path = 'bilibili1'/nwebdriver_path = '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'/n/n# 如果目录不存在,则创建保存目录/nif not os.path.exists(save_path):/n os.makedirs(save_path)/n/n# 设置Chrome WebDriver选项/nchrome_options = Options()/nchrome_options.add_argument('--disable-gpu')/nchrome_options.add_argument('--disable-dev-shm-usage')/nchrome_options.add_argument('--disable-extensions')/nchrome_options.executable_path = webdriver_path # 在此处设置executable_path/n/n# 创建Chrome WebDriver/ndriver = webdriver.Chrome(options=chrome_options)/n/n# 从给定的URL抓取并下载图像的函数/ndef scrape_and_download_images(url, 文件夹):/n driver.get(url)/n time.sleep(3) # 等待页面加载/n/n # 使用BeautifulSoup解析页面/n soup = BeautifulSoup(driver.page_source, 'html.parser')/n article_elements = soup.find_all('a', class_='title')/n /n # 获取页面中的所有文章链接/n article_urls = [urljoin(url, article['href']) for article in article_elements]/n/n # 从每个文章链接中抓取并下载图像/n for article_url in article_urls:/n driver.get(article_url)/n time.sleep(3) # 等待页面加载/n/n # 使用BeautifulSoup解析页面/n soup = BeautifulSoup(driver.page_source, 'html.parser')/n image_elements = soup.find_all('img', {'data-src': True})/n/n # 从页面下载图像/n for img_idx, img in enumerate(image_elements):/n image_url = img['data-src']/n/n # 检查URL是否以//开头,如果是,则添加https:以使其成为有效的URL/n if image_url.startswith('//'):/n image_url = 'https:' + image_url/n/n filename = f'image_{img_idx + 1}.jpg' # 如有需要,您可以自定义文件名/n save_file_path = os.path.join(文件夹, filename)/n/n if os.path.exists(save_file_path):/n print(f'[{img_idx + 1}/{len(image_elements)}] 图像已存在,跳过下载。路径:{save_file_path}')/n else:/n try:/n with open(save_file_path, 'wb') as f:/n response = requests.get(image_url)/n f.write(response.content)/n print(f'[{img_idx + 1}/{len(image_elements)}] 下载图像:{filename}。保存路径:{save_file_path}')/n except requests.RequestException as e:/n print(f'请求错误:{e}')/n print('等待十秒后重试...')/n time.sleep(10)/n continue/n/n/n# 从页面URL抓取并下载图像/nscrape_and_download_images(page_url, save_path)/n/n# 退出WebDriver/ndriver.quit()/n/n# page+1/npage_number = int(re.search(r'page=(/d+)', page_url).group(1))/npage_number += 1/npage_url = re.sub(r'page=/d+', f'page={page_number}', page_url)/n/n# 从页面URL抓取并下载图像/nscrape_and_download_images(page_url, save_path)/n/n# 退出WebDriver/ndriver.quit()

Bilibili 文章图片爬取工具 - 批量下载饭拍图

原文地址: https://www.cveoy.top/t/topic/hF7w 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录