-- coding: utf-8 --\nimport math\nimport numpy as np\nimport jieba\nimport jieba.posseg as psg\nfrom gensim import corpora, models\nfrom jieba import analyse\nimport functools\n\n# 步骤2:定义停用词表加载函数。停用词表加载方法\ndef get_stopword_list():\n # 停用词表存储路径,每一行为一个词,按行读取进行加载\n # 进行编码转换确保匹配准确率\n stop_word_path = 'd:/Users/Administrator/Desktop/data/stop_words.utf8'\n stopword_list = [sw.replace('\n', '') for sw in open(stop_word_path, encoding='utf-8').readlines()]\n return stopword_list\n\n\n# 步骤3:定义分词函数。分词方法,调用jieba接口\ndef seg_to_list(sentence, pos=False):\n if not pos:\n # 不进行词性标注的分词方法\n seg_list = jieba.cut(sentence)\n else:\n # 进行词性标注的分词方法\n seg_list = psg.cut(sentence)\n return seg_list\n\n\n# 去除干扰词\ndef word_filter(seg_list, pos=False):\n stopword_list = get_stopword_list()\n filter_list = []\n # 根据 pos参数选择是否进行词性过滤\n # 不进行词性过滤,则将词性都标记为n,表示全部保留\n for seg in seg_list:\n if not pos:\n word = seg\n flag = 'n'\n else:\n word = seg.word\n flag = seg.flag\n if not flag.startswith('n'):\n continue\n # 过滤停用词表中的词,以及长度小于2的词\n if not word in stopword_list and len(word) > 1:\n filter_list.append(word)\n return filter_list\n\n\n# 数据加载\ndef load_data(pos=False, corpus_path='d:/Users/Administrator/Desktop/data/corpus.txt'):\n doc_list = []\n for line in open(corpus_path, 'r', encoding='utf-8'):\n content = line.strip()\n seg_list = seg_to_list(content, pos)\n filter_list = word_filter(seg_list, pos)\n doc_list.append(filter_list)\n return doc_list\n\n\n# IDF值统计方法\ndef train_idf(doc_list):\n idf_dic = {}\n # 总文档数\n tt_count = len(doc_list)\n # 每个词出现的文档数\n for doc in doc_list:\n for word in set(doc):\n idf_dic[word] = idf_dic.get(word, 0.0) + 1.0\n # 按公式转换为IDF值,分母加1进行平滑处理\n for k, v in idf_dic.items():\n idf_dic[k] = math.log(tt_count / (1.0 + v))\n # 对于没有在字典中的词,默认其仅在一个文档中出现,得到默认IDF值\n default_idf = math.log(tt_count / (1.0))\n return idf_dic, default_idf\n\n\n# 比较函数,用于topK关键词的按值排序\ndef cmp(e1, e2):\n res = np.sign(e1[1] - e2[1])\n if res != 0:\n return res\n else:\n a = e1[0] + e2[0]\n b = e2[0] + e1[0]\n if a > b:\n return 1\n elif a == b:\n return 0\n else:\n return -1\n\n\n# TF-IDF类\nclass TfIdf(object):\n # 四个参数分别是训练好的IDF字典、默认IDF值、处理后的待提取文本、关键词数量\n def init(self, idf_dic, default_idf, word_list, keyword_num):\n self.word_list = word_list\n self.idf_dic, self.default_idf = idf_dic, default_idf\n self.tf_dic = self.get_tf_dic()\n self.keyword_num = keyword_num\n\n # 统计TF值\n def get_tf_dic(self):\n tf_dic = {}\n for word in self.word_list:\n tf_dic[word] = tf_dic.get(word, 0.0) + 1.0\n tt_count = len(self.word_list)\n for k, v in tf_dic.items():\n tf_dic[k] = float(v) / tt_count\n return tf_dic\n\n # 按公式计算TF-IDF值\n def get_tfidf(self):\n tfidf_dic = {}\n for word in self.word_list:\n idf = self.idf_dic.get(word, self.default_idf)\n tf = self.tf_dic.get(word, 0)\n tfidf = tf * idf\n tfidf_dic[word] = tfidf\n tfidf_dic.items()\n # 根据TF-IDF值排序,去排序前keyword_num的词作为关键词\n keywords = []\n for k, v in sorted(tfidf_dic.items(), key=functools.cmp_to_key(cmp), reverse=True)[:self.keyword_num]:\n keywords.append(k)\n return keywords\n\n\ndef tfidf_extract(word_list, pos=False, keyword_num=10):\n doc_list = load_data(pos)\n idf_dic, default_idf = train_idf(doc_list)\n tfidf_model = TfIdf(idf_dic, default_idf, word_list, keyword_num)\n keywords = tfidf_model.get_tfidf()\n return keywords\n\n\ndef textrank_extract(text, pos=False, keyword_num=10):\n textrank = analyse.textrank\n keywords = textrank(text, keyword_num)\n return keywords\n\n\ntext = '''记者从国家文物局获悉,截至3月15日,19个省(区、市)180多家博物馆在做好疫情\n防控工作的前提下恢复对外开放,其中19家为一级博物馆。\n另外,沈阳故宫博物院、新四军江南指挥部纪念馆、金沙遗址博物馆等将于3月17日陆续恢复\n开放。随着疫情防控形势好转,各地博物馆、纪念馆等陆续恢复开放。记者从各恢复开放博物馆发布\n的公告获悉,各恢复开放博物馆对疫情防控期间参观观众在提前预约、测量体温等提出了明确要求,\n并提醒观众做好个人防护。\n2月27日,国家文物局发布《关于新冠肺炎疫情防控期间有序推进文博单位恢复开放和复工\n的指导意见》强调,有序恢复开放文物、博物馆单位,各文物、博物馆开放单位可采取网上实名预\n约、总量控制、分时分流、语音讲解、数字导览等措施,减少人员聚集。'''\npos = True\n\nwith open("d:/Users/Administrator/Desktop/data/corpus.txt", "r", encoding='utf-8') as f: # 打开文本\n text = f.read() # 读取文本\n\nseg_list = seg_to_list(text, pos)\nfilter_list = word_filter(seg_list, pos)\n\nprint('TF-IDF模型结果:')\nkeywords_tfidf = tfidf_extract(filter_list, True, 10)\nprint('/'.join(keywords_tfidf))\n\nprint('TextRank模型结果: ')\nkeywords_textrank = textrank_extract(text, True, 10)\nprint('/'.join(keywords_textrank))\n\nwith open("d:/Users/Administrator/Desktop/data/zrwycl.txt", "w", encoding='utf-8') as f:\n f.write('TF-IDF模型结果:\n')\n keywords_tfidf = tfidf_extract(filter_list, True, 10)\n f.write('/'.join(keywords_tfidf) + '\n')\n f.write('\nTextRank模型结果:\n')\n keywords_textrank = textrank_extract(text, True, 10)\n f.write('/'.join(keywords_textrank))


原文地址: https://www.cveoy.top/t/topic/nr34 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录