朴素贝叶斯分类器:使用划分数据集、验证集和测试集进行训练和评估
{ "title": "朴素贝叶斯分类器:使用划分数据集、验证集和测试集进行训练和评估", "description": "本代码实现了一个朴素贝叶斯分类器,并使用划分的数据集、验证集和测试集进行训练和评估。代码包含对数据集的划分、模型训练、超参数选择和模型评估等步骤。", "keywords": "朴素贝叶斯, 分类器, 数据集划分, 验证集, 测试集, 超参数选择, 模型评估", "content": "import numpy as np\nfrom collections import Counter\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\n\nclass NaiveBayes:\n def init(self, lamb=1):\n self.lamb = lamb # 贝叶斯估计的参数\n self.prior = dict() # 存储先验概率\n self.conditional = dict() # 存储条件概率\n\n def training(self, train_features, train_target):\n ""\n 根据朴素贝叶斯算法原理,使用 贝叶斯估计 计算先验概率和条件概率\n 特征集集为离散型数据,预测类别为多元. 数据集格式为np.array\n :param train_features: 训练特征集mn,m为样本数,n为特征数\n :param train_target: 训练标签集m1\n :return: 不返回任何值,更新成员变量\n ""\n train_features = np.array(train_features)\n train_target = np.array(train_target).reshape(train_features.shape[0], 1)\n m, n = train_features.shape\n labels = Counter(train_target.flatten().tolist()) # 计算各类别的样本个数\n k = len(labels.keys()) # 类别数\n for label, amount in labels.items():\n self.prior[label] = (amount + self.lamb) / (m + k * self.lamb) # 计算平滑处理后的先验概率\n for feature in range(n): # 遍历每个特征\n self.conditional[feature] = {}\n values = np.unique(train_features[:, feature])\n for value in values: # 遍历每个特征值\n self.conditional[feature][value] = {}\n for label, amount in labels.items(): # 遍历每种类别\n feature_label = train_features[train_target[:, 0] == label, :] # 截取该类别的数据集\n c_label = Counter(feature_label[:, feature].flatten().tolist()) # 计算该类别下各特征值出现的次数\n self.conditional[feature][value][label] = (c_label.get(value, 0) + self.lamb) / \n (amount + len(values) * self.lamb) # 计算平滑处理后的条件概率\n return\n\n def predict(self, features):\n ""\n 预测单个样本\n ""\n best_poster, best_label = -np.inf, -1\n for label in self.prior:\n poster = np.log(self.prior[label]) # 初始化后验概率为先验概率,同时把连乘换成取对数相加,防止下溢(即太多小于1的数相乘,结果会变成0)\n for feature in range(features.shape[0]):\n poster += np.log(self.conditional[feature][features[feature]][label])\n if poster > best_poster: # 获取后验概率最大的类别\n best_poster = poster\n best_label = label\n return best_label\n\n\ndef test():\n dataset = datasets.load_iris() # 鸢尾花数据集\n dataset = np.concatenate((dataset['data'], dataset['target'].reshape(-1, 1)), axis=1) # 组合数据\n np.random.shuffle(dataset) # 打乱数据\n features = dataset[:, :-1]\n target = dataset[:, -1:]\n\n # 划分数据集\n train_features, test_features, train_target, test_target = train_test_split(features, target, test_size=0.2, random_state=42)\n train_features, val_features, train_target, val_target = train_test_split(train_features, train_target, test_size=0.2, random_state=42)\n\n # 使用GridSearchCV选择最佳的超参数\n param_grid = {'lamb': [0.1, 1, 10]}\n nb = NaiveBayes()\n grid_search = GridSearchCV(nb, param_grid, cv=5)\n grid_search.fit(train_features, train_target)\n best_lamb = grid_search.best_params_['lamb']\n\n # 使用最佳的超参数重新训练模型\n nb = NaiveBayes(lamb=best_lamb)\n nb.training(train_features, train_target)\n\n # 在验证集上评估模型\n prediction = []\n for features in val_features:\n prediction.append(nb.predict(features))\n correct = [1 if a == b else 0 for a, b in zip(prediction, val_target)]\n accuracy = correct.count(1) / len(correct)\n print(f'Validation accuracy: {accuracy}')\n\n # 在测试集上评估模型\n prediction = []\n for features in test_features:\n prediction.append(nb.predict(features))\n correct = [1 if a == b else 0 for a, b in zip(prediction, test_target)]\n accuracy = correct.count(1) / len(correct)\n print(f'Test accuracy: {accuracy}')\n\ntest()
原文地址: https://www.cveoy.top/t/topic/pGYi 著作权归作者所有。请勿转载和采集!