import mediapipe as mp
import cv2
import os
import pandas as pd


# 定义保存姿势估计结果的函数
def save_pose_results(pose_results, action_name, folder_name):
    filename = f'{folder_name}_{action_name}.csv'
    df = pd.DataFrame(pose_results, columns=['label', 'nose_x', 'nose_y', 'left_shoulder_x', 'left_shoulder_y',
                                              'right_shoulder_x', 'right_shoulder_y', 'left_elbow_x', 'left_elbow_y',
                                              'right_elbow_x', 'right_elbow_y', 'left_wrist_x', 'left_wrist_y',
                                              'right_wrist_x', 'right_wrist_y', 'left_hip_x', 'left_hip_y',
                                              'right_hip_x', 'right_hip_y', 'left_knee_x', 'left_knee_y',
                                              'right_knee_x', 'right_knee_y', 'left_ankle_x', 'left_ankle_y',
                                              'right_ankle_x', 'right_ankle_y'])
    df.to_csv(filename, index=False)
    print(f'{filename} saved successfully')

# 初始化mediapipe
mp_drawing = mp.solutions.drawing_utils
mp_pose = mp.solutions.pose

# 遍历不同动作的文件夹
for folder_name in os.listdir('actions'):
    # 创建一个空的姿势估计结果列表
    pose_results = []
    # 遍历文件夹中的所有图片
    for filename in os.listdir(f'actions/{folder_name}'):
        # 读取图片
        image = cv2.imread(f'actions/{folder_name}/{filename}')
        # 将图片转换为RGB格式
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        # 处理图片,进行姿势估计
        with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:
            results = pose.process(image)
            if results.pose_landmarks is None:
                continue
            # 将姿势估计结果添加到列表中
            pose_results.append([folder_name, results.pose_landmarks.landmark[mp_pose.PoseLandmark.NOSE].x,
                                 results.pose_landmarks.landmark[mp_pose.PoseLandmark.NOSE].y,
                                 results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_SHOULDER].x,
                                 results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_SHOULDER].y,
                                 results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_SHOULDER].x,
                                 results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_SHOULDER].y,
                                 results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_ELBOW].x,
                                 results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_ELBOW].y,
                                 results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_ELBOW].x,
                                 results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_ELBOW].y,
                                 results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_WRIST].x,
                                 results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_WRIST].y,
                                 results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_WRIST].x,
                                 results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_WRIST].y,
                                 results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_HIP].x,
                                 results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_HIP].y,
                                 results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_HIP].x,
                                 results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_HIP].y,
                                 results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_KNEE].x,
                                 results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_KNEE].y,
                                 results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_KNEE].x,
                                 results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_KNEE].y,
                                 results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_ANKLE].x,
                                 results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_ANKLE].y,
                                 results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_ANKLE].x,
                                 results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_ANKLE].y])
            # 在图像上绘制姿势估计结果
            annotated_image = image.copy()
            mp_drawing.draw_landmarks(annotated_image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
        # 保存绘制好姿势估计结果的图像
        cv2.imwrite(f'annotated_images/{filename}', cv2.cvtColor(annotated_image, cv2.COLOR_RGB2BGR))
    # 将姿势估计结果列表保存为csv文件
    save_pose_results(pose_results, folder_name, 'dataset')

# 报错'X has 99 features, but KNeighborsClassifier is expecting 26 features as input.'解决内容:这个错误是因为在训练KNeighborsClassifier模型时,输入的特征数量与模型期望的特征数量不一致。在这个代码中,我们从每个动作的姿势估计结果中提取了26个关键点的坐标作为特征,但是在训练模型时,可能使用了不同数量的特征。

要解决这个问题,可以检查训练模型时使用的特征数量,并确保它与提取的特征数量相同。如果特征数量不同,可以使用相同的特征数量重新训练模型或者对提取的特征进行裁剪或填充,以使其与模型期望的特征数量一致。
MediaPipe 姿势估计结果提取与保存:将图片动作转化为 CSV 数据集

原文地址: http://www.cveoy.top/t/topic/gNe3 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录