使用MediaPipe Python进行人脸识别
MediaPipe介绍
MediaPipe 是一款由 Google Research 开发并开源的多媒体机器学习模型应用框架。在谷歌,一系列重要产品,如 、Google Lens、ARCore、Google Home 以及 ,都已深度整合了 MediaPipe。
MediaPipe安装
我使用Ubuntu 20.04,Python 3.8 可以直接在命令行使用pip安装。
pip install opencv-contrib-python pip install mediapipe
MediaPipe人脸识别Python代码
我使用的测试视频是,也可以尝试格式的视频。
import cv2
import mediapipe as mp
import time
cap = cv2.VideoCapture(./618c1d1b9d4a407096f5a287_lookV5-transcode.mp4)
pTime = 0
mpDraw = mp.solutions.drawing_utils
mpFaceMesh = mp.solutions.face_mesh
faceMesh = mpFaceMesh.FaceMesh(max_num_faces=2)
drawSpec = mpDraw.DrawingSpec(thickness=1, circle_radius=0)
w = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
h = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
fps = cap.get(cv2.CAP_PROP_FPS)
fourcc = cv2.VideoWriter_fourcc(*mp4v)
out = cv2.VideoWriter(./618c1d1b9d4a407096f5a287_lookV5-transcode-save.mp4, fourcc, fps, (int(w), int(h)), True)
c = 0
i = 0
timeF = 3
while (cap.isOpened()):
success, img = cap.read()
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = faceMesh.process(imgRGB)
if results.multi_face_landmarks:
for faceLms in results.multi_face_landmarks:
mpDraw.draw_landmarks(img, faceLms, mpFaceMesh.FACEMESH_TESSELATION, drawSpec, drawSpec)
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, fZRT face puppet, in realtime Unreal Engine 4 (4k @ 60fps), (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (52, 124, 90), 1)
cv2.putText(img, fFPS: {int(fps)}, (20, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (52, 124, 90), 1)
# out.write(img)
c = c + 1
if (c % timeF == 0) and success:
out = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)
cv2.imwrite(str(i) + .png, out)
i = i + 1
cv2.imshow(Face Tracking, img)
if (cv2.waitKey(5) & 0xFF) == ord(q):
break
下一篇:
超码、候选码、主码、主属性、非主属性
