Import necessary libraries

import cv2 import mediapipe as mp import numpy as np

Function to calculate angle between three points

def calculate_angle(a, b, c): a = np.array(a) b = np.array(b) c = np.array(c)

radians = np.arctan2(c[1]-b[1], c[0]-b[0]) - np.arctan2(a[1]-b[1], a[0]-b[0])
angle = np.abs(radians*180.0/np.pi)

if angle > 180.0:
    angle = 360 - angle
    
return angle

Setup mediapipe instance

mp_drawing = mp.solutions.drawing_utils mp_pose = mp.solutions.pose pose = mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5)

Open video capture

cap = cv2.VideoCapture('B:/人工智能实验/1击剑入门/7.转移进攻.mp4')

Curl counter variables

counter = 0 stage = None

while cap.isOpened(): # Read frame from video capture ret, frame = cap.read()

if not ret:
    break

# Recolor image to RGB
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

# Make detection
results = pose.process(image)

# Recolor back to BGR
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

# Extract landmarks
try:
    landmarks = results.pose_landmarks.landmark
    
    # Get coordinates
    shoulder = [landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value].x,landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value].y]
    elbow = [landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value].x,landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value].y]
    wrist = [landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value].x,landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value].y]
    
    # Calculate angle
    angle = calculate_angle(shoulder, elbow, wrist)
    
    # Visualize angle
    cv2.putText(image, str(angle), 
               tuple(np.multiply(elbow, [640, 480]).astype(int)), 
               cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA)
    
    # Curl counter logic
    if angle > 170  and angle < 180:
        stage = "PRE"
        
    if angle > 100 and angle < 170:
        if stage != "Lunge":
            counter +=1
        stage="Lunge"
                   
except:
    pass

# Render curl counter
# Setup status box
cv2.rectangle(image, (0,0), (225,73), (245,117,16), -1)

# Rep data
cv2.putText(image, 'REPS', (15,12), 
            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 1, cv2.LINE_AA)
cv2.putText(image, str(counter), 
            (10,60), 
            cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2, cv2.LINE_AA)

# Stage data
cv2.putText(image, 'STAGE', (65,12), 
            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 1, cv2.LINE_AA)
cv2.putText(image, stage, 
            (60,60), 
            cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2, cv2.LINE_AA)

# Render detections
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,
                        mp_drawing.DrawingSpec(color=(245,117,66), thickness=2, circle_radius=2), 
                        mp_drawing.DrawingSpec(color=(245,66,230), thickness=2, circle_radius=2) 
                         )               

cv2.imshow('Mediapipe Feed', image)

if cv2.waitKey(10) & 0xFF == ord('q'):
    break

cap.release() cv2.destroyAllWindows(

# cap = cv2VideoCapture0cap = cv2VideoCaptureB人工智能实验1击剑入门7转移进攻mp4# Curl counter variablescounter = 0 stage = None## Setup mediapipe instancewith mp_posePosemin_detection_confidence=05 min_tracking_con

原文地址: https://www.cveoy.top/t/topic/eCfg 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录