import requests
from bs4 import BeautifulSoup
import time
import tkinter as tk
import webbrowser
import random
import os
import re

def get_random_user_agent():
    user_agents = [
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36',
    ]
    return random.choice(user_agents)

def crawl_baidu(keyword, page_limit):
    headers = {
        'User-Agent': get_random_user_agent()
    }

    results = []
    for page in range(1, page_limit + 1):
        url = f'https://www.baidu.com/s?wd={keyword}&pn={(page - 1) * 10}'

        # 添加随机延迟
        delay = random.uniform(0.5, 1.0)
        time.sleep(delay)

        response = requests.get(url, headers=headers)
        soup = BeautifulSoup(response.text, 'html.parser')

        for result in soup.find_all('div', class_='result'):
            result_title = result.find('h3').get_text()
            result_url = result.find('a')['href']
            results.append((result_title, result_url))

    return results

def open_url(url):
    webbrowser.open(url)

def crawl_and_index():
    keywords = entry_keywords.get().split(',')  # 获取关键词列表
    page_limit = int(entry_pages.get())  # 获取指定的爬取页数
    
    # 创建文件夹用于保存网页文件
    if not os.path.exists('webpages'):
        os.makedirs('webpages')
    
    # 爬取并保存网页文件
    for keyword in keywords:
        search_results = crawl_baidu(keyword, page_limit)
        if len(search_results) > 0:
            file_name = f'webpages/{keyword}.html'
            with open(file_name, 'w', encoding='utf-8') as file:
                for index, (title, url) in enumerate(search_results, start=1):
                    file.write(f'{index}. {title}\n')
                    file.write(f'{url}\n')
                file.write('\n')
        else:
            print(f'关键词 \'{keyword}\' 没有搜索结果')

def search_local():
    keyword = entry_search.get()
    result_text.delete('1.0', tk.END)
    
    # 遍历网页文件,搜索匹配的结果
    for file_name in os.listdir('webpages'):
        with open(f'webpages/{file_name}', 'r', encoding='utf-8') as file:
            lines = file.readlines()
            found_results = [line for line in lines if keyword.lower() in line.lower()]
            
            if len(found_results) > 0:
                result_text.insert(tk.END, f'搜索结果 - {file_name[:-5]}:\n\n', 'title')
                for line in found_results:
                    result_text.insert(tk.END, line)
                result_text.insert(tk.END, '\n')
    
    if result_text.get('1.0', tk.END) == '\n':
        result_text.insert(tk.END, '没有搜索结果\n')

# 创建UI界面
window = tk.Tk()
window.title('百度搜索')
window.geometry('800x600')

label_keywords = tk.Label(window, text='请输入关键词(用逗号或空格隔开):')
label_keywords.pack()

entry_keywords = tk.Entry(window)
entry_keywords.pack()

label_pages = tk.Label(window, text='请输入爬取页数:')
label_pages.pack()

entry_pages = tk.Entry(window)
entry_pages.pack()

crawl_button = tk.Button(window, text='爬取并索引', command=crawl_and_index)
crawl_button.pack()

label_search = tk.Label(window, text='请输入搜索关键词:')
label_search.pack()

entry_search = tk.Entry(window)
entry_search.pack()

search_button = tk.Button(window, text='搜索', command=search_local)
search_button.pack()

scrollbar = tk.Scrollbar(window)
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)

result_text = tk.Text(window, yscrollcommand=scrollbar.set)
result_text.pack(fill=tk.BOTH)

scrollbar.config(command=result_text.yview)

window.mainloop()
Python实现百度搜索结果爬取和本地索引

原文地址: https://www.cveoy.top/t/topic/t0N 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录