import cv2 import mediapipe as mp import pandas as pd from sklearn.neighbors import KNeighborsClassifier

初始化 MediaPipe 的人体姿势模型

mp_drawing = mp.solutions.drawing_utils mp_pose = mp.solutions.pose

打开输入视频文件

cap = cv2.VideoCapture('9.mp4')

获取输入视频的帧率和分辨率

fps = int(cap.get(cv2.CAP_PROP_FPS)) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

创建输出视频文件

fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter('9_1.mp4', fourcc, fps, (width, height))

加载动作数据集

warmup_df = pd.read_csv('dataset_warm-up.csv') combat_df = pd.read_csv('dataset_SHIZHAN POSE.csv') attack_df = pd.read_csv('dataset_hit.csv') respect_df = pd.read_csv('dataset_respect.csv') gongbu_df = pd.read_csv('dataset_gongbu.csv')

将数据集合并为一个大的数据集

data = pd.concat([warmup_df, combat_df, attack_df, respect_df, gongbu_df], ignore_index=True)

训练 KNN 分类器

knn = KNeighborsClassifier(n_neighbors=3) knn.fit(data.iloc[:, :-1], data['label'])

处理视频文件中的每一帧

with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose: while cap.isOpened(): # 读取一帧 ret, frame = cap.read() if not ret: break

    # 将帧转换为 RGB 格式
    image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

    # 处理人体姿势检测
    results = pose.process(image)

    # 判断是否检测到人体
    if results.pose_landmarks:
        # 绘制人体姿势
        mp_drawing.draw_landmarks(
            frame, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)

        # 获取人体姿势信息
        pose_data = []
        for landmark in results.pose_landmarks.landmark:
            pose_data.append(landmark.x)
            pose_data.append(landmark.y)
            pose_data.append(landmark.z if landmark.z is not None else 0)

        # 将姿势信息输入 KNN 分类器进行预测
        label = knn.predict([pose_data])

        # 在输出图片上显示动作类型
        cv2.putText(frame, label[0], (5, 60),
                    cv2.FONT_HERSHEY_SIMPLEX, 1.1, (255, 100, 100), 2)

    else:
        # 如果未检测到人体,则跳过本帧处理
        cv2.putText(frame, 'No body detected', (50, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)

    # 将帧写入输出视频文件
    out.write(frame)

    # 显示当前帧的结果
    cv2.imshow('MediaPipe Pose Detection press q exit', frame)

    # 检测是否按下 q 键退出
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

释放资源

cap.release() out.release() cv2.destroyAllWindows()

人体动作识别:使用 MediaPipe 姿势估计模型和 KNN 分类器

原文地址: https://www.cveoy.top/t/topic/gM9v 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录