Python Mediapipe 用自定义图像绘图替换姿势地标线图

问题描述 投票:0回答:1

我正在使用网络摄像头传输视频,并希望使用 mediapipe 来估计和替换/模糊用户的脸部(使用自定义黄色图像)、用户的眼睛(带有心形 (❤) 表情符号)以及用户的嘴唇(带有微笑图画) .

我目前下面的代码只是绘制地标线。

有人可以帮我更改下面的代码以达到所描述的结果吗?

提前致谢!

当前图像结果:

期望的图像结果:

当前代码:


import cv2
import mediapipe as mp

## initialize pose estimator
mp_drawing = mp.solutions.drawing_utils
mp_pose = mp.solutions.pose
pose = mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5)

cap = cv2.VideoCapture(0)
while cap.isOpened():
    # read frame
    _, frame = cap.read()
    try:
         # resize the frame for portrait video
         # frame = cv2.resize(frame, (350, 600))
         # convert to RGB
         frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
         
         # process the frame for pose detection
         pose_results = pose.process(frame_rgb)
         # print(pose_results.pose_landmarks)
         
         # draw skeleton on the frame
         mp_drawing.draw_landmarks(frame, pose_results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
         # display the frame
         cv2.imshow('Output', frame)
    except:
         break
    
    if cv2.waitKey(1) == ord('q'):
         break
          
cap.release()
cv2.destroyAllWindows()

python opencv mediapipe
1个回答
0
投票

可以使用此处找到的方程绘制心形: https://www.quora.com/How-can-I-draw-a-heart-using-Python

import cv2
import mediapipe as mp
import numpy as np
def draw_heart(image, center, size, color):
    t = np.arange(0, 2*np.pi, 0.1)
    x = size * (16*np.sin(t)**3)
    y = size * (13*np.cos(t) - 5*np.cos(2*t) - 2*np.cos(3*t) - np.cos(4*t))
    x = x + center[0]
    y = center[1] - y
    x = x.astype(int)
    y = y.astype(int)
    for i in range(len(x) - 1):
        cv2.line(image, (x[i], y[i]), (x[i+1], y[i+1]), color, 2)
def landmark_to_image_coords(landmark, frame):
    return (int(landmark.x * frame.shape[1]), int(landmark.y * frame.shape[0]))



mp_drawing = mp.solutions.drawing_utils
mp_pose = mp.solutions.pose
pose = mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5)

cap = cv2.VideoCapture(0)
while cap.isOpened():
    _, frame = cap.read()
    try:
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        pose_results = pose.process(frame_rgb)
        # mp_drawing.draw_landmarks(frame, pose_results.pose_landmarks, mp_pose.POSE_CONNECTIONS)

        nose = pose_results.pose_landmarks.landmark[0]
        left_eye = pose_results.pose_landmarks.landmark[1]
        right_eye = pose_results.pose_landmarks.landmark[5]
        mouth_left = pose_results.pose_landmarks.landmark[9]
        mouth_right = pose_results.pose_landmarks.landmark[10]
        euclidean_distance_times_2 = 2 * ((nose.x - left_eye.x) ** 2 + (nose.y - left_eye.y) ** 2) ** 0.5
        cv2.circle(frame, (int(nose.x * frame.shape[1]), int(nose.y * frame.shape[0])), int(euclidean_distance_times_2 * frame.shape[1]), (0, 255, 255), -1)

        red= (0, 0, 255)
        heart_size = 1

        left_eye_coords = landmark_to_image_coords(left_eye, frame)
        right_eye_coords = landmark_to_image_coords(right_eye, frame)

        draw_heart(frame, left_eye_coords, heart_size, red)
        draw_heart(frame, right_eye_coords, heart_size, red)

        mouth_left_c = landmark_to_image_coords(mouth_left, frame)
        mouth_right_c = landmark_to_image_coords(mouth_right, frame)

        cv2.line(frame, mouth_left_c, mouth_right_c, red, 2)
        cv2.line(frame, mouth_left_c, (int(mouth_left_c[0] - 20), int(mouth_left_c[1] + 20)), (0, 255, 0), 2)
        cv2.line(frame, mouth_right_c, (int(mouth_right_c[0] + 20), int(mouth_right_c[1] + 20)), (0, 255, 0), 2)



        cv2.imshow('Output', frame)
    except:
        break

    if cv2.waitKey(1) == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()

print("aa")
print("ee")
© www.soinside.com 2019 - 2024. All rights reserved.