import cv2
import math
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier

neighbors = 2  # 邻居

# 加载动作数据集
warmup_df = pd.read_csv('dataset_warm-up.csv')
combat_df = pd.read_csv('dataset_SHIZHAN POSE.csv')
attack_df = pd.read_csv('dataset_hit.csv')
respect_df = pd.read_csv('dataset_respect.csv')
gongbu_df = pd.read_csv('dataset_gongbu.csv')

# 数据清洗
# 将每个小数据集中偏离平均值超过2倍标准差的样本删除
print('数据清洗')
for df in [warmup_df, combat_df, attack_df, respect_df, gongbu_df]:
    df.drop(df[(np.abs(df[['angle1','angle2','angle3', 'angle4','angle5','angle5_1' ,'angle6', 'angle7','angle8','angle9','angle10','angle11']] - df[['angle1','angle2','angle3', 'angle4','angle5','angle5_1' ,'angle6', 'angle7','angle8','angle9','angle10','angle11']].mean()) > 2 * df[['angle1','angle2','angle3', 'angle4','angle5' ,'angle6', 'angle7','angle8','angle9','angle10','angle11']].std()).any(axis=1)].index, inplace=True)

# 将数据集合并为一个大的数据集
data = pd.concat([warmup_df, combat_df, attack_df, respect_df, gongbu_df], ignore_index=True)

# 训练KNN分类器
print('开始训练knn')
knn = KNeighborsClassifier(n_neighbors=neighbors, weights='distance', metric='manhattan')
knn.fit(data[['angle1','angle2','angle3', 'angle4','angle5','angle5_1' ,'angle6', 'angle7','angle8','angle9','angle10','angle11']], data['label'])

# 加载测试视频
cap = cv2.VideoCapture('test_video.mp4')

# 定义角度计算函数
def calculate_angle(a, b, c):
    a = np.array(a)  # 转换为numpy数组
    b = np.array(b)
    c = np.array(c)
    radians = math.atan2(c[1]-b[1], c[0]-b[0]) - math.atan2(a[1]-b[1], a[0]-b[0])
    angle = np.abs(radians*180.0/np.pi)
    if angle > 180.0:
        angle = 360 - angle
    return angle

# 定义角度检测函数
def detect_angles(frame):
    # 定义关键点坐标
    points = {
        'right_shoulder': (frame[0][0], frame[0][1]),
        'right_elbow': (frame[0][2], frame[0][3]),
        'right_wrist': (frame[0][4], frame[0][5]),
        'left_shoulder': (frame[0][6], frame[0][7]),
        'left_elbow': (frame[0][8], frame[0][9]),
        'left_wrist': (frame[0][10], frame[0][11]),
        'right_hip': (frame[0][12], frame[0][13]),
        'right_knee': (frame[0][14], frame[0][15]),
        'right_ankle': (frame[0][16], frame[0][17]),
        'left_hip': (frame[0][18], frame[0][19]),
        'left_knee': (frame[0][20], frame[0][21]),
        'left_ankle': (frame[0][22], frame[0][23]),
        'neck': (frame[0][24], frame[0][25]),
        'nose': (frame[0][26], frame[0][27]),
        'right_eye': (frame[0][28], frame[0][29]),
        'left_eye': (frame[0][30], frame[0][31]),
        'right_ear': (frame[0][32], frame[0][33]),
        'left_ear': (frame[0][34], frame[0][35])
    }
    
    # 计算角度
    angles = {
        'angle1': calculate_angle(points['right_shoulder'], points['right_elbow'], points['right_wrist']),
        'angle2': calculate_angle(points['left_shoulder'], points['left_elbow'], points['left_wrist']),
        'angle3': calculate_angle(points['right_shoulder'], points['neck'], points['left_shoulder']),
        'angle4': calculate_angle(points['right_hip'], points['right_knee'], points['right_ankle']),
        'angle5': calculate_angle(points['left_hip'], points['left_knee'], points['left_ankle']),
        'angle5_1': calculate_angle(points['right_hip'], points['left_hip'], points['neck']),
        'angle6': calculate_angle(points['right_shoulder'], points['right_hip'], points['right_knee']),
        'angle7': calculate_angle(points['left_shoulder'], points['left_hip'], points['left_knee']),
        'angle8': calculate_angle(points['neck'], points['nose'], points['right_eye']),
        'angle9': calculate_angle(points['neck'], points['nose'], points['left_eye']),
        'angle10': calculate_angle(points['right_shoulder'], points['right_ear'], points['right_eye']),
        'angle11': calculate_angle(points['left_shoulder'], points['left_ear'], points['left_eye'])
    }
    return angles

# 定义字体
font = cv2.FONT_HERSHEY_SIMPLEX

# 处理视频帧
while cap.isOpened():
    ret, frame = cap.read()
    if not ret:
        break
    
    # 调整帧大小
    frame = cv2.resize(frame, (640, 480))
    
    # 获取OpenPose输出的关键点坐标
    key_points = np.load('key_points.npy', allow_pickle=True)
    
    # 检测角度
    angles = detect_angles(key_points)
    
    # 预测动作类别
    label = knn.predict([angles])[0]
    
    # 在视频帧上绘制动作类别和角度
    cv2.putText(frame, 'Action: ' + label, (10, 30), font, 1, (0, 0, 255), 2, cv2.LINE_AA)
    for i, angle in enumerate(angles.values()):
        cv2.putText(frame, 'angle'+str(i+1)+': '+str(int(angle)), (10, 60+30*i), font, 1, (0, 255, 0), 2, cv2.LINE_AA)
    
    # 显示视频帧
    cv2.imshow('frame', frame)
    
    # 按q键退出
    if cv2.waitKey(25) & 0xFF == ord('q'):
        break

# 释放资源
cap.release()
cv2.destroyAllWindows()
基于OpenPose和KNN的动作识别系统

原文地址: https://www.cveoy.top/t/topic/gQ1i 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录