Skip to content

Commit 0f30dfa

Browse files
authored
Merge pull request csubhasundar#183 from liuHongJie1217/main
#0 Create AI_face_landmark Demo
2 parents ea21ab5 + a60a478 commit 0f30dfa

File tree

6 files changed

+474
-0
lines changed

6 files changed

+474
-0
lines changed
Lines changed: 281 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,281 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "code",
5+
"execution_count": 5,
6+
"id": "21925825",
7+
"metadata": {},
8+
"outputs": [],
9+
"source": [
10+
"import mediapipe as mp\n",
11+
"import cv2 \n",
12+
"import time\n",
13+
"from tqdm import tqdm"
14+
]
15+
},
16+
{
17+
"cell_type": "code",
18+
"execution_count": 6,
19+
"id": "9072f485",
20+
"metadata": {},
21+
"outputs": [],
22+
"source": [
23+
"# import solutions\n",
24+
"mp_pose = mp.solutions.pose\n",
25+
"\n",
26+
"# import drawing functions\n",
27+
"mp_drawing = mp.solutions.drawing_utils\n",
28+
"\n",
29+
"#import model\n",
30+
"pose = mp_pose.Pose(static_image_mode=False, # Still pictures or continuous video frames\n",
31+
" model_complexity=2, # Select the human pose key point detection model, 0 has poor performance but fast, 2 has good performance but slow, and 1 is in between\n",
32+
" smooth_landmarks=True, # whether to smooth keypoints\n",
33+
" min_detection_confidence=0.5, # confidence threshold\n",
34+
" min_tracking_confidence=0.5) # tracking threshold"
35+
]
36+
},
37+
{
38+
"cell_type": "code",
39+
"execution_count": 7,
40+
"id": "edf77017",
41+
"metadata": {},
42+
"outputs": [
43+
{
44+
"name": "stderr",
45+
"output_type": "stream",
46+
"text": [
47+
"INFO: Created TensorFlow Lite XNNPACK delegate for CPU.\n"
48+
]
49+
}
50+
],
51+
"source": [
52+
"# Custom description of 33 key points of the body\n",
53+
"def process_frame(img):\n",
54+
" start_time = time.time()\n",
55+
" \n",
56+
" h, w = img.shape[0], img.shape[1]\n",
57+
" \n",
58+
" img_RGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n",
59+
" \n",
60+
" results = pose.process(img_RGB)\n",
61+
" \n",
62+
" if results.pose_landmarks:\n",
63+
" mp_drawing.draw_landmarks(img, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)\n",
64+
" \n",
65+
" for i in range(33): # Traverse 33 keypoints\n",
66+
" cx = int(results.pose_landmarks.landmark[i].x * w)\n",
67+
" cy = int(results.pose_landmarks.landmark[i].y * h)\n",
68+
" cz = results.pose_landmarks.landmark[i].z\n",
69+
"\n",
70+
" redius = 5\n",
71+
" if i == 0: #tip of the nose\n",
72+
" img = cv2.circle(img, (cx,cy), redius, (0,0,255), -1)\n",
73+
" elif i in [11,12]: #Shoulder\n",
74+
" img = cv2.circle(img, (cx,cy), redius, (223,155,6), -1)\n",
75+
" elif i in [23,24]: #hip joint\n",
76+
" img = cv2.circle(img, (cx,cy), redius, (1,240,255), -1)\n",
77+
" elif i in [13,14]: #elbow\n",
78+
" img = cv2.circle(img, (cx,cy), redius, (140,47,240), -1)\n",
79+
" elif i in [25,26]: #knee\n",
80+
" img = cv2.circle(img, (cx,cy), redius, (0,0,255), -1)\n",
81+
" elif i in [15,16,27,28]: #wrist and ankle\n",
82+
" img = cv2.circle(img, (cx,cy), redius, (223,155,60), -1)\n",
83+
" elif i in [17,19,21]: #left hand\n",
84+
" img = cv2.circle(img, (cx,cy), redius, (94,218,121), -1)\n",
85+
" elif i in [18,20,22]: #right hand\n",
86+
" img = cv2.circle(img, (cx,cy), redius, (16,144,247), -1)\n",
87+
" elif i in [27,29,31]: #left foot\n",
88+
" img = cv2.circle(img, (cx,cy), redius, (29,123,243), -1)\n",
89+
" elif i in [28,30,32]: #right foot\n",
90+
" img = cv2.circle(img, (cx,cy), redius, (193,182,255), -1)\n",
91+
" elif i in [9,10]: #mouth\n",
92+
" img = cv2.circle(img, (cx,cy), redius, (205,235,255), -1)\n",
93+
" elif i in [1,2,3,4,5,6,7,8]: #eyes and cheeks\n",
94+
" img = cv2.circle(img, (cx,cy), redius, (94,218,121), -1)\n",
95+
" else: #Other key points\n",
96+
" img = cv2.circle(img, (cx,cy), redius, (0,255,0), -1)\n",
97+
"\n",
98+
"# look_img(img)\n",
99+
" else:\n",
100+
" scaler = 1\n",
101+
" failuer_str = 'NO Person'\n",
102+
" img = cv2.putText(img, failuer_str, (25 * scaler, 100 * scaler), cv2.FONT_HERSHEY_SIMPLEX, 1.25 * scaler, 255,0,0)\n",
103+
" \n",
104+
" end_time = time.time()\n",
105+
" FPS = 1/(end_time - start_time)\n",
106+
" \n",
107+
" scaler = 1\n",
108+
" img = cv2.putText(img, 'FPS '+str(int(FPS)), (25 * scaler, 50 * scaler), cv2.FONT_HERSHEY_SIMPLEX, 1.25 * scaler, 223,155,6)\n",
109+
" \n",
110+
" return img"
111+
]
112+
},
113+
{
114+
"cell_type": "code",
115+
"execution_count": 8,
116+
"id": "8c1743d8",
117+
"metadata": {},
118+
"outputs": [],
119+
"source": [
120+
"# Video frame-by-frame processing code template\n",
121+
"def generate_video(input_path):\n",
122+
" filehead = input_path.split('/')[-1]\n",
123+
" output_path = \"out-\" + filehead\n",
124+
" \n",
125+
" print('Video starts processing',input_path)\n",
126+
" \n",
127+
" # Get the total number of frames in the video\n",
128+
" cap = cv2.VideoCapture(input_path)\n",
129+
" frame_count = 0\n",
130+
" while(cap.isOpened()):\n",
131+
" success, frame = cap.read()\n",
132+
" frame_count += 1\n",
133+
" if not success:\n",
134+
" break\n",
135+
" cap.release()\n",
136+
" print('The total number of video frames is',frame_count)\n",
137+
" \n",
138+
" # cv2.namedWindow('Crack Detection and Measurement Video Processing')\n",
139+
" cap = cv2.VideoCapture(input_path)\n",
140+
" frame_size = (cap.get(cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n",
141+
"\n",
142+
" # fourcc = int(cap.get(cv2.CAP_PROP_FOURCC))\n",
143+
" # fourcc = cv2.VideoWriter_fourcc(*'XVID')\n",
144+
" fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n",
145+
" fps = cap.get(cv2.CAP_PROP_FPS)\n",
146+
"\n",
147+
" out = cv2.VideoWriter(output_path, fourcc, fps, (int(frame_size[0]), int(frame_size[1])))\n",
148+
" \n",
149+
" # The progress bar is bound to the total number of frames of the video\n",
150+
" with tqdm(total=frame_count-1) as pbar:\n",
151+
" try:\n",
152+
" while(cap.isOpened()):\n",
153+
" success, frame = cap.read()\n",
154+
" if not success:\n",
155+
" break\n",
156+
"\n",
157+
" # process frame\n",
158+
" # frame_path = './temp_frame.png'\n",
159+
" # cv2.imwrite(frame_path, frame)\n",
160+
" try:\n",
161+
" frame = process_frame(frame)\n",
162+
" except:\n",
163+
" print('error')\n",
164+
" pass\n",
165+
" \n",
166+
" if success == True:\n",
167+
" # cv2.imshow('Video Processing', frame)\n",
168+
" out.write(frame)\n",
169+
"\n",
170+
" # The progress bar updates one frame\n",
171+
" pbar.update(1)\n",
172+
"\n",
173+
" # if cv2.waitKey(1) & 0xFF == ord('q'):\n",
174+
" # break\n",
175+
" except:\n",
176+
" print('Interrupted')\n",
177+
" pass\n",
178+
"\n",
179+
" cv2.destroyAllWindows()\n",
180+
" out.release()\n",
181+
" cap.release()\n",
182+
" print('Video saved', output_path)"
183+
]
184+
},
185+
{
186+
"cell_type": "code",
187+
"execution_count": 9,
188+
"id": "d779d98d",
189+
"metadata": {},
190+
"outputs": [
191+
{
192+
"name": "stdout",
193+
"output_type": "stream",
194+
"text": [
195+
"Video starts processing material/material.mp4\n",
196+
"The total number of video frames is 527\n"
197+
]
198+
},
199+
{
200+
"name": "stderr",
201+
"output_type": "stream",
202+
"text": [
203+
"100%|██████████| 526/526 [00:46<00:00, 11.29it/s]"
204+
]
205+
},
206+
{
207+
"name": "stdout",
208+
"output_type": "stream",
209+
"text": [
210+
"Video saved out-material.mp4\n"
211+
]
212+
},
213+
{
214+
"name": "stderr",
215+
"output_type": "stream",
216+
"text": [
217+
"\n"
218+
]
219+
}
220+
],
221+
"source": [
222+
"generate_video(input_path=\"material/material.mp4\")"
223+
]
224+
}
225+
],
226+
"metadata": {
227+
"kernelspec": {
228+
"display_name": "Python 3 (ipykernel)",
229+
"language": "python",
230+
"name": "python3"
231+
},
232+
"language_info": {
233+
"codemirror_mode": {
234+
"name": "ipython",
235+
"version": 3
236+
},
237+
"file_extension": ".py",
238+
"mimetype": "text/x-python",
239+
"name": "python",
240+
"nbconvert_exporter": "python",
241+
"pygments_lexer": "ipython3",
242+
"version": "3.7.6"
243+
},
244+
"varInspector": {
245+
"cols": {
246+
"lenName": 16,
247+
"lenType": 16,
248+
"lenVar": 40
249+
},
250+
"kernels_config": {
251+
"python": {
252+
"delete_cmd_postfix": "",
253+
"delete_cmd_prefix": "del ",
254+
"library": "var_list.py",
255+
"varRefreshCmd": "print(var_dic_list())"
256+
},
257+
"r": {
258+
"delete_cmd_postfix": ") ",
259+
"delete_cmd_prefix": "rm(",
260+
"library": "var_list.r",
261+
"varRefreshCmd": "cat(var_dic_list()) "
262+
}
263+
},
264+
"types_to_exclude": [
265+
"module",
266+
"function",
267+
"builtin_function_or_method",
268+
"instance",
269+
"_Feature"
270+
],
271+
"window_display": false
272+
},
273+
"vscode": {
274+
"interpreter": {
275+
"hash": "f7d0801bc5f7331f7f6fbbaeb6dd0f557ef8e98c90abd4bdf7e3dceee9e384d0"
276+
}
277+
}
278+
},
279+
"nbformat": 4,
280+
"nbformat_minor": 5
281+
}
Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
# machine Learning
2+
https://en.wikipedia.org/wiki/Machine_learning
3+
4+
# mediapipe
5+
https://google.github.io/mediapipe/
6+
7+
# development environment
8+
- python version: 3.7.6
9+
- conda version: 4.10.3
10+
- Install jupyter notebook with Anaconda: conda install jupyter notebook
11+
- Dependency library version
12+
- cv2: 4.5.4-dev
13+
- mediapipe: pip install mediapipe
14+
- tqdm: pip install tqdm
15+
16+
# Overview
17+
-> This example relies on Google's open source machine learning framework MediaPipe to try to learn
18+
19+
-> In the material folder, the test material (materal.mp4) and the result of running through the model (out-material.mp4) are provided. You can realize more interesting scenes by modifying the material path in the code
20+
21+
-> The material folder also provides information on various points of the human body. You can modify the points of the process_frame method in the code to achieve the human detection position that you need to pay attention to
22+
23+
-> The project is divided into .ipynb and .py files. It is recommended that you use jupyter notebook to open .ipynb
24+
25+
-> Please modify the material path (absolute path) when using the .py file to run. MacOs system may not be able to directly read the material file in the current directory due to security and privacy restrictions, so please put the file on the desktop and modify the absolute path in the code.
26+
27+
-> The editor can use jupyter notebook or Vscode

0 commit comments

Comments
 (0)