使用MediaPipe和SVM进行人体动作识别 - Python代码示例
import cv2
import mediapipe as mp
import pandas as pd
from sklearn.svm import SVC
# 初始化MediaPipe的人体姿势模型
mp_drawing = mp.solutions.drawing_utils
mp_pose = mp.solutions.pose
# 打开输入视频文件
cap = cv2.VideoCapture('9.mp4')
# 获取输入视频的帧率和分辨率
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# 创建输出视频文件
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter('9_1.mp4', fourcc, fps, (width, height))
# 加载动作数据集
warmup_df = pd.read_csv('dataset_1.csv')
combat_df = pd.read_csv('dataset_3.csv')
attack_df = pd.read_csv('dataset_2.csv')
respect_df = pd.read_csv('dataset_4.csv')
gongbu_df = pd.read_csv('dataset_5.csv')
# 将数据集合并为一个大的数据集
data = pd.concat([warmup_df, combat_df, attack_df, respect_df , gongbu_df], ignore_index=True)
# 训练SVM分类器
svm = SVC(kernel='linear', C=1, random_state=42)
svm.fit(data.iloc[:, :-1], data['label'])
# 处理视频文件中的每一帧
with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:
while cap.isOpened():
# 读取一帧
ret, frame = cap.read()
if not ret:
break
# 将帧转换为RGB格式
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# 处理人体姿势检测
results = pose.process(image)
# 判断是否检测到人体
if results.pose_landmarks:
# 绘制人体姿势
mp_drawing.draw_landmarks(
frame, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
# 获取人体姿势信息
pose_data = []
keypoints = [0, 11, 12, 13, 14, 15, 16, 23, 24, 25, 26, 27, 28] # 需要的关键点的索引
for i in keypoints:
landmark = results.pose_landmarks.landmark[i]
if landmark.visibility < 0.5: # 如果关键点可见度小于0.5,则跳过
continue
pose_data.append(landmark.x)
pose_data.append(landmark.y)
# pose_data.append(landmark.z if landmark.z is not None else 0)
# 将姿势信息输入SVM分类器进行预测
label = svm.predict([pose_data])
# 在输出图片上显示动作类型
cv2.putText(frame, str(label[0]), (5, 60),
cv2.FONT_HERSHEY_SIMPLEX, 1.1, (255, 100, 100), 2)
else:
# 如果未检测到人体,则跳过本帧处理
cv2.putText(frame, 'No body detected', (50, 50),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
# 将帧写入输出视频文件
out.write(frame)
# 显示当前帧的结果
cv2.imshow('MediaPipe Pose Detection press q exit', frame)
# 检测是否按下q键退出
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# 释放资源
cap.release()
out.release()
cv2.destroyAllWindows()
报错TypeError: Can't convert object of type 'numpy.int64' to 'str' for 'text'解决内容:
这个错误提示是因为在cv2.putText函数中,需要传入的参数类型是字符串类型,但是传入的却是numpy.int64类型的数据。解决方法是将label[0]转换为字符串类型,可以使用str()函数来进行转换,如下所示:
cv2.putText(frame, str(label[0]), (5, 60),
cv2.FONT_HERSHEY_SIMPLEX, 1.1, (255, 100, 100), 2)
原文地址: https://www.cveoy.top/t/topic/gNE3 著作权归作者所有。请勿转载和采集!