import tensorflow as tf import numpy as np import os from sklearn.model_selection import train_test_split from sklearn.utils import shuffle

读取图像路径并生成每张图像的路径和标签

def load_dataset(data_dir): images = [] labels = [] for class_name in os.listdir(data_dir): class_dir = os.path.join(data_dir, class_name) for img_name in os.listdir(class_dir): img_path = os.path.join(class_dir, img_name) images.append(img_path) labels.append(class_name) # 对标签进行独热编码 labels_dict = {'妙蛙种子': [1, 0, 0, 0, 0], '小火龙': [0, 1, 0, 0, 0], '杰尼龟': [0, 0, 1, 0, 0], '皮卡丘': [0, 0, 0, 1, 0], '超梦': [0, 0, 0, 0, 1]} labels = np.array([labels_dict[label] for label in labels]) return images, labels

对图像进行预处理

def preprocess(img_path): img = tf.io.read_file(img_path) img = tf.image.decode_image(img, channels=3) img = tf.image.resize(img, [224, 224]) img = img / 255.0 return img

构建卷积神经网络

def build_model(): model = tf.keras.Sequential([ tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(224, 224, 3)), tf.keras.layers.MaxPooling2D((2, 2)), tf.keras.layers.Conv2D(64, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D((2, 2)), tf.keras.layers.Conv2D(128, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D((2, 2)), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(5, activation='softmax') ]) return model

使用TensorBoard进行可视化

def visualize(log_dir, model): tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1) model.fit(train_ds, epochs=10, validation_data=val_ds, callbacks=[tensorboard_callback])

划分训练集和测试集

def split_dataset(images, labels): X_train, X_test, y_train, y_test = train_test_split(images, labels, test_size=0.2, random_state=42, stratify=labels) X_train, y_train = shuffle(X_train, y_train) return X_train, X_test, y_train, y_test

数据增强

def data_augmentation(img): img = tf.image.random_flip_left_right(img) img = tf.image.random_brightness(img, max_delta=0.1) return img

训练模型

def train_model(model, X_train, X_test, y_train, y_test): train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train)) train_ds = train_ds.map(lambda x, y: (preprocess(x), y)) train_ds = train_ds.map(lambda x, y: (data_augmentation(x), y)) train_ds = train_ds.batch(32) train_ds = train_ds.prefetch(tf.data.experimental.AUTOTUNE)

val_ds = tf.data.Dataset.from_tensor_slices((X_test, y_test))
val_ds = val_ds.map(lambda x, y: (preprocess(x), y))
val_ds = val_ds.batch(32)
val_ds = val_ds.prefetch(tf.data.experimental.AUTOTUNE)

model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001),
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.fit(train_ds, epochs=10, validation_data=val_ds)

保存模型

def save_model(model, save_path): model.save(save_path)

if name == 'main': data_dir = 'path/to/data' log_dir = 'path/to/log' save_path = 'path/to/model.h5'

images, labels = load_dataset(data_dir)
X_train, X_test, y_train, y_test = split_dataset(images, labels)
model = build_model()
train_model(model, X_train, X_test, y_train, y_test)
save_model(model, save_path)
visualize(log_dir, model
你是真新镇宝可梦研究所的一名研究员大木博士给了一份宝可梦数据集给你让你帮他训练一个模型来区分妙蛙种子、小火龙、杰尼龟、皮卡丘和超梦。tensorflow首先根据pokmonpy批量读取图像路径并生成每张图像的路径和标签使用numpy随机函数将数据打乱顺序。构建一个简单的卷积神经网络包含卷积层、池化层和全连接层。对于卷积操作可以采用具有不同过滤器数量和大小的多个卷积层以进一步增强模型的性能。使用Te

原文地址: https://www.cveoy.top/t/topic/eChJ 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录