【转载】用 python 在 blender 中画出 mediapipe 探测到的 3D 手指信息

384 阅读1分钟

原文链接

用 python 在 blender 中画出 mediapipe 探测到的 3D 手指信息

简介

来源:WX-GZH :python 编程小乐园

利用 mediapipe 获取手指中各点的位置信息,将位置信息转化为 blender 中的坐标,用 python 脚本将其三维信息在 blender 中画出

源码

import bpy
import cv2
import mediapipe as mp
import numpy as np
for obj in bpy.data.objects:
    if obj.type == 'MESH':
        bpy.data.objects.remove(obj)
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
factor = 10
with mp_hands.Hands(
    static_image_mode=True,
    max_num_hands=1,
    min_detection_confidence=0.6) as hands:

    image = cv2.imread('hand.jpg')
    image = cv2.flip(image, 1)
    results = hands.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
    if results.multi_hand_landmarks:
        annotated_image = image.copy()
        for hand_landmarks in results.multi_hand_landmarks:
            for i,lm in enumerate(hand_landmarks.landmark):
                bpy.ops.mesh.primitive_uv_sphere_add(location=(0,0,0))
                bpy.context.active_object.name = 'p'+str(i)
                bpy.ops.transform.resize(value=(0.15, 0.15, 0.15))
                bpy.ops.transform.translate(value=(lm.x*factor, lm.y*factor, lm.z*factor))
                print(i,lm.z)
ob = bpy.data.objects.get('p6')
if ob:
#    ob.location = (5,2,3)
    bpy.ops.object.select_all(action='DESELECT')
    ob.select_set(True)
    bpy.context.view_layer.objects.active = ob
    ob.scale=(0.4,0.4,0.4)

解析

import bpy
import cv2
import mediapipe as mp
import numpy as np

导入 blender 依赖包 bpy

for obj in bpy.data.objects:
    if obj.type == 'MESH':
        bpy.data.objects.remove(obj)

删除画布中原有的物体

mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
factor = -10

实例化 mediapipe 对象。factor = 10mediapipe 给出的位置信息放大十倍,有利于在 blender 中显示

with mp_hands.Hands(
    static_image_mode=True,
    max_num_hands=1,
    min_detection_confidence=0.6) as hands:

    image = cv2.imread('hand.jpg')
    image = cv2.flip(image, 1)
    results = hands.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))

mediapipe 识别手指信息

if results.multi_hand_landmarks:
        annotated_image = image.copy()
        for hand_landmarks in results.multi_hand_landmarks:
            for i,lm in enumerate(hand_landmarks.landmark):
                bpy.ops.mesh.primitive_uv_sphere_add(location=(0,0,0))
                bpy.context.active_object.name = 'p'+str(i)
                bpy.ops.transform.resize(value=(0.15, 0.15, 0.15))
                bpy.ops.transform.translate(value=(lm.x*factor, lm.y*factor, lm.z*factor))
                print(i,lm.z)

利用识别到的位置信息在 blender 中作图。bpy.context.active_object.name = 'p'+str(i) 为每个点设置名称。

bpy.ops.transform.resize(value=(0.15, 0.15, 0.15)) 物体大小设置

bpy.ops.transform.translate(value=(lm.x*factor, lm.y*factor, lm.z*factor)) 物体坐标设置

ob = bpy.data.objects.get('p6')
if ob:
#    ob.location = (5,2,3)
    bpy.ops.object.select_all(action='DESELECT')
    ob.select_set(True)
    bpy.context.view_layer.objects.active = ob
    ob.scale=(0.4,0.4,0.4)

获取 p6 点对象进行坐标,大小设置

效果