人脸识别代码:利用训练出的参数进行识别

本代码示例展示了如何利用训练好的ResNet模型参数,进行人脸识别。代码中使用MindSpore框架,并使用摄像头进行实时人脸捕捉,将识别结果显示在图像上。

from mindspore.train.serialization import load_checkpoint, load_param_into_net
import numpy as np
import mindspore.dataset as ds
import cv2
import mindspore.nn as nn
import os
from mindspore import context, ops, Tensor
np.random.seed(58)


class ResidualBlock(nn.Cell):    # 这个残差块可以被用于构建深度神经网络,增加网络的层数,提高网络的性能。
    expansion = 1
    def __init__(self, in_channels, out_channels, stride=1, downsample=None):
        super(ResidualBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, pad_mode='same')
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.relu = nn.ReLU()
        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, pad_mode='same')
        self.bn2 = nn.BatchNorm2d(out_channels)
        self.downsample = downsample
        self.stride = stride

    def construct(self, x):
        identity = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)

        if self.downsample is not None:
            identity = self.downsample(x)
        out += identity
        out = self.relu(out)

        return out

class ResNet(nn.Cell):
    def __init__(self, block, layers, num_classes=34):
        super(ResNet, self).__init__()
        self.in_channels = 64

        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2,   pad_mode='valid')
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2,   pad_mode='valid')

        self.layer1 = self.make_layer(block, 64, layers[0])
        self.layer2 = self.make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self.make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self.make_layer(block, 512, layers[3], stride=2)

        self.avgpool = nn.AvgPool2d(kernel_size=3, stride=1,   pad_mode='valid')
        self.fc = nn.Dense(512 * block.expansion, num_classes)

    def make_layer(self, block, out_channels, blocks, stride=1):
        downsample = None
        if (stride != 1) or (self.in_channels != out_channels * block.expansion):
            downsample = nn.SequentialCell([
                nn.Conv2d(self.in_channels, out_channels * block.expansion, kernel_size=1, stride=stride),
                nn.BatchNorm2d(out_channels * block.expansion)
            ])
        layers = []
        layers.append(block(self.in_channels, out_channels, stride, downsample))
        self.in_channels = out_channels * block.expansion
        for _ in range(1, blocks):
            layers.append(block(self.in_channels, out_channels))
        return nn.SequentialCell(layers)

    def construct(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = self.avgpool(x)
        x = ops.Reshape()(x, (ops.Shape()(x)[0], -1))
        x = self.fc(x)

        return x


class TrainDatasetGenerator:
    def __init__(self, file_path):
        self.file_path = file_path
        self.img_names = os.listdir(file_path)

    def __getitem__(self, index=0):
        data = cv2.imread(os.path.join(self.file_path, self.img_names[index]))
        label = int(self.img_names[index].split('_')[0])
        data = cv2.resize(data, (100, 100))
        data = data.transpose().astype(np.float32) / 255.
        return data, label

    def __len__(self):
        return len(self.img_names)

def load_model_from_ckpt():
    context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
    # 创建ResNet模型
    network = ResNet(ResidualBlock, [2, 2, 2, 2])
    # 加载ckpt文件中的模型参数
    param_dict = load_checkpoint('E:/program/miniconda/envs/MindSpore/ckpt1/checkpoint_resnet-10_126.ckpt')
    # 将模型参数加载到模型中
    load_param_into_net(network, param_dict)
    # 返回模型
    return network

def train_resnet():
    context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
    train_dataset_generator = TrainDatasetGenerator('E:/program/miniconda/envs/MindSpore/exam')
    ds_train = ds.GeneratorDataset(train_dataset_generator, ['data', 'label'], shuffle=True)
    ds_train = ds_train.shuffle(buffer_size=10)
    ds_train = ds_train.batch(batch_size=4, drop_remainder=True)
    network = load_model_from_ckpt()

    face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')  # 加载检测器
    # threshold = 0.4  # 设置阈值

    cap = cv2.VideoCapture(0)
    stop = False
    while not stop:
        success, img = cap.read()
        subjects = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33']
        # 生成图像的副本,这样就能保留原始图像
        img1 = img.copy()
        # 检测人脸
        # 将测试图像转换为灰度图像,因为opencv人脸检测器需要灰度图像
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        # 检测多尺度图像,返回值是一张脸部区域信息的列表(x,y,宽,高)
        rect = face_cascade.detectMultiScale(img, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)
        # 如果未检测到面部
        if len(rect) == 0:
            cv2.putText(img1, 'unrecognized', (10, 20), cv2.FONT_HERSHEY_COMPLEX, 1, (128, 128, 0), 2)
        if not rect is None:
            for (x, y, w, h) in rect:
                face = img[y:y + w, x:x + h].astype(np.float32)  # 数值转换
                face = cv2.resize(face, (100, 100))
                face = face.transpose().astype(np.float32) / 255.
                face = np.expand_dims(face, axis=0)  # 扩展维度,变成(batch_size, channels, height, width)
                face = Tensor(face)
                cv2.rectangle(img1, (x, y), (x + w, y + h), (0, 255, 0), 2)  # 画出矩形框
                output = network(face)
                predicted_class = np.argmax(output.asnumpy(), axis=1)
                # if output.asnumpy()[0][predicted_class[0]] < threshold:
                # label = 'unknown'
                # else:
                label = subjects[predicted_class[0]]
                cv2.putText(img1, label, (x, y), cv2.FONT_HERSHEY_COMPLEX, 1, (128, 128, 0), 2)
        cv2.imshow('img', img1)

        k = cv2.waitKey(1)
        if k & 0xFF == ord('q'):  # 按下q程序结束
            stop = True
            cv2.destroyAllWindows()  # 释放窗口

if __name__ == '__main__':
    train_resnet()

流程图内容:

暂无法提供流程图。

人脸识别代码:利用训练出的参数进行识别

原文地址: https://www.cveoy.top/t/topic/jC1l 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录