import cv2
import mediapipe as mp
import math
import pandas as pd

# 初始化MediaPipe的人体姿势模型
mp_drawing = mp.solutions.drawing_utils
mp_pose = mp.solutions.pose

# 打开输入视频文件
cap = cv2.VideoCapture('6.mp4')

# 获取输入视频的帧率和分辨率
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

# 创建输出视频文件
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter('9_1.mp4', fourcc, fps, (width, height))

# 加载动作数据集
warm_up_df = pd.read_csv('warm_up.csv')
combat_pose_df = pd.read_csv('combat_pose.csv')
attack_df = pd.read_csv('attack.csv')
respect_df = pd.read_csv('respect.csv')

# 处理视频文件中的每一帧
with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:
    while cap.isOpened():
        # 读取一帧
        ret, frame = cap.read()
        if not ret:
            break

        # 将帧转换为RGB格式
        image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # 处理人体姿势检测
        results = pose.process(image)

        # 判断是否检测到人体
        if results.pose_landmarks:
            # 绘制人体骨架
            mp_drawing.draw_landmarks(
                frame, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)

            # 获取骨架角度信息
            angles = []
            for landmark in results.pose_landmarks.landmark:
                angles.append(landmark.x)
                angles.append(landmark.y)
                angles.append(landmark.z)
            angles_df = pd.DataFrame([angles])

            # 使用骨骼角度数据与预定义数据集进行比较判断当前动作
            if angles_df.equals(warm_up_df):
                poses = 'warm-up'
            elif angles_df.equals(combat_pose_df):
                poses = 'combat pose'
            elif angles_df.equals(attack_df):
                poses = 'attack'
            elif angles_df.equals(respect_df):
                poses = 'respect'
            else:
                poses = ''

            # 在输出图片上显示动作名称
            cv2.putText(frame, poses, (600, 450),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (100, 225, 225), 2)

            cv2.putText(frame, poses, (5, 20),
                        cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5, (225, 100, 225), 2)

        else:
            # 如果未检测到人体,则跳过本帧处理
            cv2.putText(frame, 'No body detected', (50, 50),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)

        # 将帧写入输出视频文件
        out.write(frame)

        # 显示当前帧的结果
        cv2.imshow('MediaPipe Pose Detection press q exit', frame)

        # 检测是否按下q键退出
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

# 释放资源
cap.release()
out.release()
cv2.destroyAllWindows()
基于MediaPipe和OpenCV的动作识别系统

原文地址: https://www.cveoy.top/t/topic/gt1b 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录