基于OpenPose和KNN的动作识别
import cv2
import math
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
neighbors = 2 # 邻居
# 加载动作数据集
warmup_df = pd.read_csv('dataset_warm-up.csv')
combat_df = pd.read_csv('dataset_SHIZHAN POSE.csv')
attack_df = pd.read_csv('dataset_hit.csv')
respect_df = pd.read_csv('dataset_respect.csv')
gongbu_df = pd.read_csv('dataset_gongbu.csv')
# 数据清洗
# 将每个小数据集中偏离平均值超过2倍标准差的样本删除
print('数据清洗')
for df in [warmup_df, combat_df, attack_df, respect_df, gongbu_df]:
df.drop(df[(np.abs(df[['angle1','angle2','angle3', 'angle4','angle5','angle5_1' ,'angle6', 'angle7','angle8','angle9','angle10','angle11']] - df[['angle1','angle2','angle3', 'angle4','angle5','angle5_1' ,'angle6', 'angle7','angle8','angle9','angle10','angle11']].mean()) > 2 * df[['angle1','angle2','angle3', 'angle4','angle5' ,'angle6', 'angle7','angle8','angle9','angle10','angle11']].std()).any(axis=1)].index, inplace=True)
# 将数据集合并为一个大的数据集
data = pd.concat([warmup_df, combat_df, attack_df, respect_df, gongbu_df], ignore_index=True)
# 训练KNN分类器
print('开始训练knn')
knn = KNeighborsClassifier(n_neighbors=neighbors, weights='distance', metric='manhattan')
knn.fit(data[['angle1','angle2','angle3', 'angle4','angle5','angle5_1' ,'angle6', 'angle7','angle8','angle9','angle10','angle11']], data['label'])
# 训练KNN分类器
knn = KNeighborsClassifier(n_neighbors=neighbors, weights='distance', metric='manhattan')
knn分类结果可视化内容:# 加载测试视频
cap = cv2.VideoCapture('test.mp4')
# 定义角度计算函数
def calculate_angle(a, b, c):
a = np.array(a)
b = np.array(b)
c = np.array(c)
radians = math.atan2(c[1]-b[1], c[0]-b[0]) - math.atan2(a[1]-b[1], a[0]-b[0])
angle = np.abs(radians*180.0/math.pi)
if angle > 180.0:
angle = 360-angle
return angle
# 定义角度检测函数
def detect_angles(frame, points):
angles = []
# 肩部、手肘、手腕、髋部、膝盖、脚踝
joints = [[6, 5, 4], [5, 4, 3], [4, 3, 2], [12, 11, 10], [11, 10, 9], [10, 9, 8]]
for joint in joints:
angle = calculate_angle(points[joint[0]], points[joint[1]], points[joint[2]])
angles.append(angle)
# 在视频中绘制角度
cv2.putText(frame, str(int(angle)), tuple(points[joint[1]]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
return angles
# 定义预测函数
def predict(action):
label = knn.predict([action])[0]
if label == 0:
return 'warm-up'
elif label == 1:
return 'SHIZHAN POSE'
elif label == 2:
return 'hit'
elif label == 3:
return 'respect'
elif label == 4:
return 'gongbu'
# 读取视频帧并进行预测
while True:
ret, frame = cap.read()
if not ret:
break
# 从OpenPose中提取人体关键点
# 这里省略了OpenPose的代码
# points = ...
# 检测角度
angles = detect_angles(frame, points)
# 进行预测
action = np.array(angles).reshape(1, -1)
result = predict(action)
# 在视频中绘制预测结果
cv2.putText(frame, result, (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
# 显示视频帧
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
原文地址: https://www.cveoy.top/t/topic/gQ1c 著作权归作者所有。请勿转载和采集!