import numpy as np
import mindspore.dataset as ds
import os
import cv2
import mindspore
import mindspore.nn as nn
from mindspore import Tensor, load_checkpoint, load_param_into_net
from mindspore.common.initializer import Normal
from mindspore import context
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
from mindspore.ops.operations import TensorAdd
from scipy.integrate._ivp.radau import P
from mindspore import Model  # 承载网络结构
from mindspore.nn.metrics import Accuracy  # 测试模型用

np.random.seed(58)


class BasicBlock(nn.Cell):
    def __init__(self, in_channels, out_channels, stride=1, downsample=None):
        super(BasicBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, pad_mode='pad',has_bias=False)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.relu = nn.ReLU()
        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, pad_mode='pad', has_bias=False)
        self.bn2 = nn.BatchNorm2d(out_channels)
        self.downsample = downsample
        self.add = TensorAdd()

    def construct(self, x):
        identity = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)

        if self.downsample is not None:
            identity = self.downsample(x)

        out = self.add(out, identity)
        out = self.relu(out)

        return out

class ResNet(nn.Cell):
    def __init__(self, block, layers, num_classes=10):
        super(ResNet, self).__init__()
        self.in_channels = 64

        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, pad_mode='pad', has_bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same')
        self.layer1 = self.make_layer(block, 64, layers[0])
        self.layer2 = self.make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self.make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self.make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(kernel_size=1, stride=1, pad_mode='valid')  # 修改这里
        self.flatten = nn.Flatten()
        self.fc = nn.Dense(512, num_classes)

    def make_layer(self, block, out_channels, blocks, stride=1):
        downsample = None
        if stride != 1 or self.in_channels != out_channels:
            downsample = nn.SequentialCell([
                nn.Conv2d(self.in_channels, out_channels, kernel_size=1, stride=stride, has_bias=False),
                nn.BatchNorm2d(out_channels)
            ])

        layers = []
        layers.append(block(self.in_channels, out_channels, stride, downsample))
        self.in_channels = out_channels
        for _ in range(1, blocks):
            layers.append(block(out_channels, out_channels))

        return nn.SequentialCell(layers)

    def construct(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = self.avgpool(x)
        x = self.flatten(x)
        x = self.fc(x)

        return x

class TrainDatasetGenerator:
    def __init__(self, file_path):
        self.file_path = file_path
        self.img_names = os.listdir(file_path)

    def __getitem__(self, index):
        data = cv2.imread(os.path.join(self.file_path, self.img_names[index]))
        label = self.img_names[index].split('_')[0]
        label = int(label)
        data = cv2.cvtColor(data, cv2.COLOR_BGR2RGB)
        data = cv2.resize(data, (224, 224))
        data = data.transpose().astype(np.float32) / 255.
        return data, label

    def __len__(self):
        return len(self.img_names)

def load_model_from_ckpt():
    context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
    # 创建ResNet模型
    network = ResNet(BasicBlock,[2,2,2,2])
    # 加载ckpt文件中的模型参数
    param_dict = load_checkpoint('D:/pythonProject7/ckpt/checkpoint_resnet_34-8_25.ckpt')
    # 修改全连接层输出维度
    param_dict['layer4.1.conv2.weight'] = param_dict['layer4.1.conv2.weight'][:, :, :, :512]
    param_dict['fc.weight'] = param_dict['fc.weight'][:512, :]
    # 将模型参数加载到模型中
    load_param_into_net(network, param_dict)
    # 将全连接层的输出维度改为512
    network.fc = nn.Dense(512, 10)
    # 返回模型
    return network


def train_resnet():
    context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
    train_dataset_generator = TrainDatasetGenerator('D:/pythonProject7/train1')
    ds_train = ds.GeneratorDataset(train_dataset_generator, ['data', 'label'], shuffle=True)
    ds_train = ds_train.shuffle(buffer_size=10)
    ds_train = ds_train.batch(batch_size=2, drop_remainder=True)
    network = ResNet(BasicBlock, [2, 2, 2, 2], num_classes=100)
    net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
    net_opt = nn.Momentum(network.trainable_params(), learning_rate=0.001, momentum=0.9)


    model = Model(network, net_loss, net_opt, metrics={'Accuracy': Accuracy()})


    face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')  # 加载检测器
    threshold = 0.95  # 设置阈值

    cap = cv2.VideoCapture(0)
    stop = False
    while not stop:
        success, img = cap.read()
        subjects = ['1', '2', '3', '4', '5', '6', '7', '8', '9','10', 'unknown']
        # 生成图像的副本,这样就能保留原始图像
        img1 = img.copy()
        # 检测人脸
        # 将测试图像转换为灰度图像,因为opencv人脸检测器需要灰度图像
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        # 检测多尺度图像,返回值是一张脸部区域信息的列表(x,y,宽,高)
        rect = face_cascade.detectMultiScale(img, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30),
                                             flags=cv2.CASCADE_SCALE_IMAGE)
        # 如果未检测到面部
        if len(rect) == 0:
            txt = 'no face!'
            cv2.putText(img1, txt, (10, 20), cv2.FONT_HERSHEY_COMPLEX, 1, (128, 128, 0), 2)
        if not rect is None:
            for (x, y, w, h) in rect:
                face = img[y:y + w, x:x + h].astype(np.float32)  # 数值转换
                face = cv2.resize(face, (100, 100))
                face = face.transpose().astype(np.float32) / 255.
                face = np.expand_dims(face, axis=0)  # 扩展维度,变成(batch_size, channels, height, width)
                face = Tensor(face)
                cv2.rectangle(img1, (x, y), (x + w, y + h), (0, 255, 0), 2)  # 画出矩形框
                output = network(face)
                predicted_class = np.argmax(output.asnumpy(), axis=1)
                if output.asnumpy()[0][predicted_class[0]] < threshold:
                    label = 'unknown'
                else:
                    label = subjects[predicted_class[0]]
                # label = subjects[predicted_class[0]]
                cv2.putText(img1, label, (x, y), cv2.FONT_HERSHEY_COMPLEX, 1, (128, 128, 0), 2)
        cv2.imshow('img', img1)
        if (cv2.waitKey(1) & 0xFF == ord('q')):  # 按下q程序结束
            stop = True
            cv2.destroyAllWindows()  # 释放窗口


if __name__ == '__main__':
    train_resnet()

代码说明:

  1. 模型定义:
    • 定义了ResNet模型,包含BasicBlock模块,使用MindSpore框架实现。
    • 模型结构可根据需求进行调整。
  2. 数据加载:
    • 定义了TrainDatasetGenerator类,用于加载训练数据。
    • 可以根据实际数据集路径进行修改。
  3. 模型训练:
    • 使用MindSpore框架进行模型训练,设置了优化器、损失函数等。
  4. 模型加载:
    • 定义了load_model_from_ckpt函数,用于从ckpt文件加载训练好的模型参数。
  5. 人脸识别:
    • 使用OpenCV库进行人脸检测,并调用训练好的模型进行人脸识别。
    • 实时识别结果显示在摄像头画面中。

使用说明:

  1. 确保安装了MindSpore、OpenCV、NumPy等库。
  2. 将数据集路径、ckpt文件路径和识别类别信息修改为实际路径。
  3. 运行代码,即可启动摄像头进行人脸识别。

注意:

  • 本代码仅供参考,需要根据实际需求进行修改和完善。
  • 为了提高模型泛化能力,可以添加数据增强操作。
  • 可以尝试使用不同的模型结构和训练参数,以获得更好的识别效果。
  • 可以根据实际需求修改识别类别信息。
  • 代码中可能存在一些错误或优化空间,欢迎反馈。
基于MindSpore的ResNet人脸识别模型训练与实现

原文地址: https://www.cveoy.top/t/topic/jrvx 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录