人体姿势识别:基于MediaPipe和KNN的动作分类
import cv2 import mediapipe as mp import math import pandas as pd from sklearn.neighbors import KNeighborsClassifier
初始化MediaPipe的人体姿势模型
mp_drawing = mp.solutions.drawing_utils mp_pose = mp.solutions.pose
打开输入视频文件
cap = cv2.VideoCapture('6.mp4')
获取输入视频的帧率和分辨率
fps = int(cap.get(cv2.CAP_PROP_FPS)) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
创建输出视频文件
fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter('9_1.mp4', fourcc, fps, (width, height))
加载动作数据集
warmup_df = pd.read_csv('dataset_warm-up.csv') combat_df = pd.read_csv('dataset_SHIZHAN POSE.csv') attack_df = pd.read_csv('dataset_hit.csv') respect_df = pd.read_csv('dataset_respect.csv')
将数据集合并为一个大的数据集
data = pd.concat([warmup_df, combat_df, attack_df, respect_df], ignore_index=True)
训练KNN分类器
knn = KNeighborsClassifier(n_neighbors=3) knn.fit(data[['angle1','angle2','angle3', 'angle4','angle5' ,'angle6', 'angle7','angle8','angle9','angle10','angle11']], data['label'])
处理视频文件中的每一帧
with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose: while cap.isOpened(): # 读取一帧 ret, frame = cap.read() if not ret: break
# 将帧转换为RGB格式
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# 处理人体姿势检测
results = pose.process(image)
# 判断是否检测到人体
if results.pose_landmarks:
# 绘制人体骨架
mp_drawing.draw_landmarks(
frame, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
# 获取鼻子、右肩和右手腕关键点的信息
nose = results.pose_landmarks.landmark[mp_pose.PoseLandmark.NOSE]
right_shoulder = results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_SHOULDER]
right_wrist = results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_WRIST]
# 获取右肘和右手腕的关键点信息
right_elbow = results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_ELBOW]
right_wrist = results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_WRIST]
# 计算右肘和右手之间的长度
length = math.sqrt((right_wrist.x - right_elbow.x)**2 + (right_wrist.y - right_elbow.y)**2)
# 计算右肘和右手延长线的端点坐标
x1 = int(right_wrist.x + 2 * (right_wrist.x - right_elbow.x))
y1 = int(right_wrist.y + 2 * (right_wrist.y - right_elbow.y))
x2 = int(right_wrist.x - 2 * (right_wrist.x - right_elbow.x))
y2 = int(right_wrist.y - 2 * (right_wrist.y - right_elbow.y))
# 在输出图片上绘制绿色直线
cv2.line(frame, (int(right_elbow.x), int(right_elbow.y)), (x1, y1), (0, 255, 0), 2)
cv2.line(frame, (int(right_wrist.x), int(right_wrist.y)), (x2, y2), (0, 255, 0), 2)
# 获取鼻子、右肩和右手腕的角度
angle_nwr = math.degrees(math.atan2(right_wrist.y - nose.y, right_wrist.x - nose.x) -
math.atan2(right_shoulder.y - nose.y, right_shoulder.x - nose.x))
# 将数据输入KNN分类器进行预测
label = knn.predict([[angle, angle1, angle_dl, angle_dr, angle_tr, angle_tl,angle_lka,angle_hls,angle_rka,angle_hrs,angle_nwr]])
# 在输出图片上显示角度值和动作类型
cv2.putText(frame, 'Angle: {:.2f}'.format(angle), (5, 43),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, label[0], (600, 450),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (100, 225, 225), 2)
cv2.putText(frame, label[0], (5, 20),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5, (225, 100, 225), 2)
else:
# 如果未检测到人体,则跳过本帧处理
cv2.putText(frame, 'No body detected', (50, 50),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
# 将帧写入输出视频文件
out.write(frame)
# 显示当前帧的结果
cv2.imshow('MediaPipe Pose Detection press q exit', frame)
# 检测是否按下q键退出
if cv2.waitKey(1) & 0xFF == ord('q'):
break
释放资源
cap.release() out.release() cv2.destroyAllWindows()
原文地址: http://www.cveoy.top/t/topic/gGhX 著作权归作者所有。请勿转载和采集!