import tkinter as tk import os import re import docx import openpyxl from pptx import Presentation import PyPDF2 from bs4 import BeautifulSoup from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity from collections import Counter import pandas as pd import matplotlib.pyplot as plt from tkinter import ttk, filedialog from matplotlib.backends.backend_tkagg import NavigationToolbar2Tk, FigureCanvasTkAgg

plt.rcParams['font.family'] = ['Microsoft YaHei'] # 使用微软雅黑字体

class SearchApp(tk.Tk): def init(self): super().init() self.title('文件检索系统') self.geometry('1600x900') self.resizable(False, False) self.create_widgets()

def create_widgets(self):
    # 选择文件夹控件
    folder_frame = tk.Frame(self)
    folder_frame.pack(side='top', pady=10)
    self.folder_label = tk.Label(folder_frame, text='选择一个文件夹:')
    self.folder_label.pack(side='left')
    self.folder_entry = tk.Entry(folder_frame, width=50)
    self.folder_entry.pack(side='left')
    self.folder_button = tk.Button(folder_frame, text='选择文件夹', command=self.select_folder)
    self.folder_button.pack(side='left', padx=10)

    # 输入关键词控件
    keywords_frame = tk.Frame(self)
    keywords_frame.pack(side='top', pady=10)
    self.keywords_label = tk.Label(keywords_frame, text='输入关键词:')
    self.keywords_label.pack(side='left')
    self.keywords_entry = tk.Entry(keywords_frame, width=50)
    self.keywords_entry.pack(side='left')

    # 选择算法控件
    algorithm_frame = tk.Frame(self)
    algorithm_frame.pack(side='top', pady=10)
    self.algorithm_label = tk.Label(algorithm_frame, text='选择算法:')
    self.algorithm_label.pack(side='left')
    self.algorithm_dropdown = ttk.Combobox(algorithm_frame, width=48)
    self.algorithm_dropdown['values'] = ['词频', 'TF-IDF', '空间向量模型']
    self.algorithm_dropdown.current(0)
    self.algorithm_dropdown.pack(side='left', padx=10)

    # 检索按钮控件
    search_button_frame = tk.Frame(self)
    search_button_frame.pack(side='top', pady=10)
    self.search_button = tk.Button(search_button_frame, text='检索', command=self.search_documents)
    self.search_button.pack()

    # 检索结果控件
    results_frame = tk.Frame(self)
    results_frame.pack(side='top', pady=10)

    self.results_text_label = tk.Label(results_frame, text='检索结果文本框:')
    self.results_text_label.pack(side='top')

    self.results_text = tk.Text(results_frame, wrap='word', height=20)
    self.results_text.pack(side='left')

    self.results_figure_label = tk.Label(results_frame, text='检索结果图表:')
    self.results_figure_label.pack(side='top', pady=(0, 10))

    self.results_figure = plt.Figure(figsize=(6, 5), dpi=100)
    self.results_plot = self.results_figure.add_subplot(111)
    self.results_canvas = FigureCanvasTkAgg(self.results_figure, results_frame)
    self.results_canvas.get_tk_widget().pack(side='left', padx=(50, 10))

    self.folder_label.pack(pady=10)
    self.folder_entry.pack(pady=5)
    self.folder_button.pack(pady=5)
    self.keywords_label.pack(pady=5)
    self.keywords_entry.pack(pady=5)
    self.algorithm_label.pack(pady=5)
    self.algorithm_dropdown.pack(pady=5)
    self.search_button.pack(pady=10)
    self.results_text.pack(pady=10)
    self.toolbar = NavigationToolbar2Tk(self.results_figure.canvas, self)
    self.toolbar.update()
    self.results_canvas = tk.Canvas(self, width=600, height=300)
    self.results_canvas.pack(padx=20, pady=(10, 20))
    self.results_figure_canvas = FigureCanvasTkAgg(self.results_figure, self.results_canvas)
    self.results_figure_canvas.draw()
    self.results_figure_canvas.get_tk_widget().pack()

def select_folder(self):
    folder_path = filedialog.askdirectory()
    self.folder_entry.delete(0, tk.END)
    self.folder_entry.insert(0, folder_path)

def search_documents(self):
    self.results_text.delete(1.0, tk.END)
    self.results_plot.clear()
    keywords = '+' + self.keywords_entry.get()
    keywords = keywords.split()
    algorithm = self.algorithm_dropdown.get()
    search_path = self.folder_entry.get()

    # 将关键词分类
    and_keywords, or_keywords, not_keywords = [], [], []
    for kw in keywords:
        if kw.startswith('+'):
            or_keywords.append(kw[1:])
        elif kw.startswith('-'):
            not_keywords.append(kw[1:])
        else:
            and_keywords.append(kw)

    vectorizer = TfidfVectorizer()
    results = []

    for foldername, subfolders, filenames in os.walk(search_path):
        for filename in filenames:
            file_path = os.path.join(foldername, filename)
            ext = filename.split('.')[-1]
            if ext == 'docx':
                doc = docx.Document(file_path)
                text = ''
                for p in doc.paragraphs:
                    text += p.text + '\n'
                results.append((file_path, text))
            elif ext == 'xlsx':
                # 读取整个 Excel 文件
                excel_content = ''
                wb = openpyxl.load_workbook(file_path, read_only=True)
                for sheet in wb.worksheets:
                    for row in sheet.iter_rows(min_row=1, max_col=sheet.max_column, max_row=sheet.max_row):
                        for cell in row:
                            if cell.value is not None:
                                excel_content = excel_content + str(cell.value) + ' '
                results.append((file_path, excel_content))
            elif ext == 'pptx':
                prs = Presentation(file_path)
                for slide in prs.slides:
                    text = ''
                    for shape in slide.shapes:
                        if hasattr(shape, 'text'):
                            text += shape.text + '\n'
                    results.append((file_path, text))
            elif ext == 'pdf':
                with open(file_path, 'rb') as f:
                    pdf = PyPDF2.PdfFileReader(f, strict=False)
                    for page in pdf.pages:
                        text = page.extract_text()
                        results.append((file_path, text))
            elif ext == 'html':
                with open(file_path, 'r') as f:
                    soup = BeautifulSoup(f.read(), 'html.parser')
                    text = soup.get_text()
                    results.append((file_path, text))

            elif ext == 'txt':
                with open(file_path, 'r') as f:
                    text = f.read()
                    results.append((file_path, text))

    if algorithm == '词频':
        # 词频检索
        for i, file in enumerate(results):
            file_path, text = file
            match = True

            for query in not_keywords:
                if re.search(r'\b{}\b'.format(query), text, re.I):
                    match = False
                    break

            if match:
                file_score = 0
                for query in and_keywords:
                    query_score = 0
                    for word in query.split():
                        query_score += text.count(word)
                    file_score += query_score
                for query in or_keywords:
                    query_score = 0
                    for word in query.split():
                        query_score += text.count(word)
                    if query_score > 0:
                        file_score += query_score
                results[i] = (file_path, text, file_score)
            else:
                results[i] = (file_path, text, 0)

    elif algorithm == 'TF-IDF':
        # TF-IDF 检索
        # 构建文档-词项矩阵
        corpus = [result[1] for result in results]
        doc_term_matrix = vectorizer.fit_transform(corpus)

        # 基于词频统计的查询处理
        tf = [Counter(text.split()) for file_path, text in results]
        tf_query = [Counter(keyword.split()) for keyword in and_keywords + or_keywords]
        tfidf_query = []
        for query in tf_query:
             tfidf_query.append([(k, v * vectorizer.idf_[vectorizer.vocabulary_[k]]) for k, v in query.items()])
        tfidf_weighted_query = [dict(w) for w in tfidf_query]

        for i, file in enumerate(results):
            file_path, text = file
            match = True

            for query in not_keywords:
                if re.search(r'\b{}\b'.format(query), text, re.I):
                    match = False
                    break

            if match:
                file_score = 0
                for query in and_keywords:
                    query_score = 0
                    for word in query.split():
                        query_score += text.count(word)
                    file_score += query_score

                query_doc_term_matrix = vectorizer.transform([query for query in or_keywords])
                query_score_matrix = cosine_similarity(query_doc_term_matrix, doc_term_matrix[i])
                for score in query_score_matrix[0]:
                    if score > 0:
                        file_score += score
                tfidf_query_strings = [' '.join([t[0] for t in tfidf_query[i]]) for i in range(len(tfidf_query))]
                tfidf_weighted_query_doc_term_matrix = vectorizer.transform(tfidf_query_strings)
                tfidf_weighted_query_score_matrix = cosine_similarity(tfidf_weighted_query_doc_term_matrix,
                                                                       doc_term_matrix[i])
                for score in tfidf_weighted_query_score_matrix[0]:
                    if score > 0:
                        file_score += score

                results[i] = (file_path, text, file_score)
            else:
                results[i] = (file_path, text, 0)

    elif algorithm == '空间向量模型':
        # 使用向量空间模型检索
        tfidf = vectorizer.fit_transform([result[1] for result in results])
        query_tfidf = vectorizer.transform(and_keywords + or_keywords)
        if not_keywords:
            query_not_tfidf = vectorizer.transform(not_keywords)
        score_matrix = cosine_similarity(tfidf, query_tfidf)
        for i, score in enumerate(score_matrix):
            if not_keywords:
                not_score = cosine_similarity(tfidf[i], query_not_tfidf)
                results[i] = (results[i][0], results[i][1], score[0] - not_score[0][0])
            else:
                results[i] = (results[i][0], results[i][1], score[0])

    # 综合排序并输出结果
    results.sort(key=lambda x: x[2], reverse=True)
    graph_results = []
    for file_path, text, score in results:
        self.results_text.insert(tk.END, f'{file_path}: {score}\n')
        graph_results.append((os.path.basename(file_path), score))
    df = pd.DataFrame(graph_results, columns=['File', 'Score'])
    df.plot.bar(x='File', y='Score', ax=self.results_plot, rot=0)
    self.results_plot.set_title('Search Results')
    self.results_figure_canvas.draw()

if name == 'main': app = SearchApp() app.mainloop()

文件检索系统:使用词频、TF-IDF 和空间向量模型进行高效搜索

原文地址: https://www.cveoy.top/t/topic/oSRu 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录