新建会话mediapipe 根据不同文件夹的图片不同文件夹代表不同动作。生成人体骨骼角度对应动作的数生成一分钟的面试自我介绍内容包括1、姓名:冯楠黑龙江大学电子工程学院物联网工程的一名学生。mediapipe 根据不同文件夹的图片不同文件夹代表不同动作。生成人体骨骼角度对应动作的数mediapipe 根据不同文件夹的图片不同文件夹代表不同动作。生成人体骨骼角度对应动作的数import cv2 im
import cv2 import mediapipe as mp import math
初始化MediaPipe的人体姿势模型
mp_drawing = mp.solutions.drawing_utils mp_pose = mp.solutions.pose
打开输入视频文件
cap = cv2.VideoCapture('6.mp4')
获取输入视频的帧率和分辨率
fps = int(cap.get(cv2.CAP_PROP_FPS)) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
创建输出视频文件
fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter('9_1.mp4', fourcc, fps, (width, height))
i = 0 poses = ""
a = 1 # 拟合精度
处理视频文件中的每一帧
with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose: while cap.isOpened(): # 读取一帧 ret, frame = cap.read() if not ret: break
# 将帧转换为RGB格式
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# 处理人体姿势检测
results = pose.process(image)
# 判断是否检测到人体
if results.pose_landmarks:
# 绘制人体骨架
mp_drawing.draw_landmarks(
frame, results.pose_landmarks, mp_pose.POSE_CONNECTIONS, landmark_drawing_spec=mp_drawing.DrawingSpec(color=(0, 255, 0), thickness=2, circle_radius=2), connection_drawing_spec=mp_drawing.DrawingSpec(color=(255, 0, 255), thickness=2, circle_radius=2))
right_knee = results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_KNEE]
right_ankle = results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_ANKLE]
right_wrist = results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_WRIST]
# 获取左肩、左肘和左手腕关键点的信息
left_shoulder = results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_SHOULDER]
left_elbow = results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_ELBOW]
left_wrist = results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_WRIST]
# 获取右肩、右肘和右手腕关键点的信息
right_shoulder = results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_SHOULDER]
right_elbow = results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_ELBOW]
right_wrist = results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_WRIST]
# 获取左臀、左膝和左踝关键点的信息
left_hip = results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_HIP]
left_knee = results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_KNEE]
left_ankle = results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_ANKLE]
# 获取右臀、右膝和右踝关键点的信息
right_hip = results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_HIP]
right_knee = results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_KNEE]
right_ankle = results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_ANKLE]
# 计算腿与右手的角度
angle = math.degrees(math.atan2(right_wrist.y - right_ankle.y, right_wrist.x - right_ankle.x) -
math.atan2(right_knee.y - right_ankle.y, right_knee.x - right_ankle.x))
# 获取左肩、左肘和左手腕
angle1 = math.degrees(math.atan2(right_wrist.y - right_ankle.y, right_wrist.x - right_ankle.x) -
math.atan2(right_knee.y - right_ankle.y, right_knee.x - right_ankle.x))
# 获取左臀、左膝和左踝
angle_dl = math.degrees(math.atan2(left_ankle.y - left_knee.y, left_ankle.x - left_knee.x) -
math.atan2(left_hip.y - left_knee.y, left_hip.x - left_knee.x))
# 获取右臀、右膝和右踝
angle_dr = math.degrees(math.atan2(right_ankle.y - right_knee.y, right_ankle.x - right_knee.x) -
math.atan2(right_hip.y - right_knee.y, right_hip.x - right_knee.x))
# 获取右肩、右肘和右手腕
angle_tr = math.degrees(math.atan2(right_wrist.y - right_elbow.y, right_wrist.x - right_elbow.x) -
math.atan2(right_shoulder.y - right_elbow.y, right_shoulder.x - right_elbow.x))
if (angle >= -16.5*a and angle <= -8/a) and (angle_dr > 179/a and angle_dr <= 186*a or angle_dl > 182/a and angle_dl <= 191*a):
poses = "warm-up"
elif (angle > -20*a and angle <= 5*a) and ((angle_dr > 130.5/a and angle_dr <= 155*a) or (angle_dr > 205/a and angle_dr <= 212*a)) and ((angle_tr > 239/a and angle_tr <= 260*a) or (angle_tr > 147/a and angle_tr <= 170*a)):
poses = "combat pose"
elif (angle > -20*a and angle <= 5*a) and ((angle_tr > -175*a and angle_tr <= -165/a) or (angle_tr > 115/a and angle_tr <= 135*a)):
poses = "attack"
elif (angle > -20*a and angle <= 5*a) and (angle_tr > -43*a and angle_tr <= -31/a or angle_tr > -5*a and angle_tr <= -1/a or angle_tr > 54/a and angle_tr <= 56*a):
poses = "respect"
i = -30
else:
i = i+1
if(i >= 60):
poses = ""
i = 0
else:
i = i+1
# 在输出图片上显示角度值
cv2.putText(frame, "Angle: {:.2f}".format(angle), (5, 43),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, "Angle_tr: {:.2f}".format(angle_tr), (5, 60),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (100, 15, 255), 2)
cv2.putText(frame, "Angle_dr: {:.2f}".format(angle_dr), (5, 80),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 100, 255), 2)
cv2.putText(frame, poses, (600, 450),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (100, 225, 225), 2)
cv2.putText(frame, poses, (5, 20),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5, (225, 100, 225), 2)
# 绘制头部关键点
nose = results.pose_landmarks.landmark[mp_pose.PoseLandmark.NOSE]
x, y = int(nose.x * width), int(nose.y * height)
cv2.circle(frame, (x, y), 5, (255, 0, 0), -1)
else:
# 如果未检测到人体,则跳过本帧处理
cv2.putText(frame, "No body detected", (50, 50),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
# 将帧写入输出视频文件
out.write(frame)
# 显示当前帧的结果
cv2.imshow('MediaPipe Pose Detection press q exit', frame)
# cv2.imshow('MediaPipe Pose Estimation', frame)
# 检测是否按下q键退出
if cv2.waitKey(1) & 0xFF == ord('q'):
break
释放资源
cap.releas
原文地址: http://www.cveoy.top/t/topic/eaqb 著作权归作者所有。请勿转载和采集!