Skip to content

Commit 73cfeba

Browse files
authored
Merge pull request #125 from mciwing/fixed_facerecog
changed package from face_recognition to deepface
2 parents 21ac378 + be2d4d2 commit 73cfeba

File tree

5 files changed

+42
-79
lines changed

5 files changed

+42
-79
lines changed

docs/assets/yolo/obama.jpg

273 KB
Loading

docs/assets/yolo/obama_out.jpg

295 KB
Loading

docs/assets/yolo/trump2.jpg

-66.4 KB
Binary file not shown.

docs/assets/yolo/trump_out.jpg

-90.9 KB
Binary file not shown.

docs/yolo/index.md

+42-79
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
To follow along, we recommend setting up a new project folder with a Jupyter notebook. Additionally, create a new [virtual environment ](../python/packages.md#virtual-environments) and **activate** it. [Install the required packages](../python/packages.md#installing-packages):
99

1010
```bash
11-
pip install ultralytics opencv-python pytesseract face-recognition
11+
pip install ultralytics opencv-python pytesseract deepface tf-keras
1212
```
1313

1414
Your project structure should look like this:
@@ -306,7 +306,8 @@ Recognizing and extracting printed or handwritten text from images, enabling mac
306306
</div>
307307

308308
??? code "Code"
309-
309+
???+ warning "Warning"
310+
To run the code, you need to install tesseract on your PC. This can be a tricky process, especially on MacOS. Therefore it is okay to skip this example if you want.
310311
``` py
311312
# Need to install tesseract on your PC https://www.nutrient.io/blog/how-to-use-tesseract-ocr-in-python/
312313
from PIL import Image
@@ -315,7 +316,7 @@ Recognizing and extracting printed or handwritten text from images, enabling mac
315316
print(pytesseract.image_to_string(Image.open('scan.png')))
316317
```
317318

318-
#### Facial Recognition
319+
#### Facial Recognition and Analysis
319320
Identifying individuals based on their facial features and recognizing various facial expressions.
320321

321322
???+ example "Example: Facial Recognition"
@@ -326,92 +327,54 @@ Identifying individuals based on their facial features and recognizing various f
326327

327328
---
328329

329-
<figure markdown="span"> ![Input](../assets/yolo/trump2.jpg){width=100% } </figure>
330+
<figure markdown="span"> ![Input](../assets/yolo/obama.jpg){width=100% } </figure>
330331

331332
- __Output__
332333

333334
---
334335

335-
<figure markdown="span"> ![Input](../assets/yolo/trump_out.jpg){width=100% } </figure>
336+
<figure markdown="span"> ![Input](../assets/yolo/obama_out.jpg){width=100% } </figure>
336337

337338
</div>
338-
(Source: <a href="https://unsplash.com/de/@libraryofcongress?utm_content=creditCopyText&utm_medium=referral&utm_source=unsplash">Library of Congress</a> on <a href="https://unsplash.com/de/fotos/prasident-donald-trump-jPN_oglAjOU?utm_content=creditCopyText&utm_medium=referral&utm_source=unsplash">Unsplash</a>)
339+
(Source: <a>Wikipedia</a>)
339340

340341
??? code "Code"
341342

342343
``` py
343-
# You need to install cmake on your PC first
344-
# https://github.com/ageitgey/face_recognition?tab=readme-ov-file
345-
346-
import face_recognition
344+
# Load Packages
347345
import cv2
348-
import numpy as np
349-
350-
# Load a sample picture and learn how to recognize it.
351-
obama_image = face_recognition.load_image_file("obama.jpg")
352-
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
353-
354-
# Load a sample picture and learn how to recognize it.
355-
trump_image = face_recognition.load_image_file("trump.jpg")
356-
trump_face_encoding = face_recognition.face_encodings(trump_image)[0]
357-
358-
# Load a second sample picture and learn how to recognize it.
359-
biden_image = face_recognition.load_image_file("biden.jpg")
360-
biden_face_encoding = face_recognition.face_encodings(biden_image)[0]
361-
362-
# Create arrays of known face encodings and their names
363-
known_face_encodings = [
364-
obama_face_encoding,
365-
trump_face_encoding,
366-
biden_face_encoding
367-
]
368-
known_face_names = [
369-
"Barack Obama",
370-
"Donald Trump",
371-
"Joe Biden"
372-
]
373-
374-
# Initialize some variables
375-
face_locations = []
376-
face_encodings = []
377-
face_names = []
378-
process_this_frame = True
379-
380-
rgb_small_frame = face_recognition.load_image_file("trump2.jpg")
381-
382-
face_locations = face_recognition.face_locations(rgb_small_frame)
383-
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
384-
385-
face_names = []
386-
for face_encoding in face_encodings:
387-
# See if the face is a match for the known face(s)
388-
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
389-
name = "Unknown"
390-
391-
# Or instead, use the known face with the smallest distance to the new face
392-
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
393-
best_match_index = np.argmin(face_distances)
394-
if matches[best_match_index]:
395-
name = known_face_names[best_match_index]
396-
397-
face_names.append(name)
398-
399-
# Display the results
400-
for (top, right, bottom, left), name in zip(face_locations, face_names):
401-
402-
# Draw a box around the face
403-
cv2.rectangle(rgb_small_frame, (left, top), (right, bottom), (0, 0, 255), 2)
404-
405-
# Draw a label with a name below the face
406-
cv2.rectangle(rgb_small_frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
407-
font = cv2.FONT_HERSHEY_DUPLEX
408-
cv2.putText(rgb_small_frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
409-
410-
# Display the resulting image
411-
cv2.imshow('Video', cv2.cvtColor(rgb_small_frame, cv2.COLOR_BGR2RGB))
412-
cv2.waitKey(0)
413-
cv2.destroyAllWindows()
414-
cv2.imwrite('trump_out.jpg', cv2.cvtColor(rgb_small_frame, cv2.COLOR_BGR2RGB))
346+
import matplotlib.pyplot as plt
347+
from deepface.modules import streaming # Corrected import path
348+
from deepface import DeepFace
349+
350+
# Load Image
351+
img_path = "pic_cv_approaches/obama.jpg"
352+
img = cv2.imread(img_path)
353+
raw_img = img.copy()
354+
355+
# Analyze Image
356+
demographies = DeepFace.analyze(img_path=img_path, actions=("age", "gender", "emotion"))
357+
demography = demographies[0]
358+
359+
# Get Region of Interest
360+
x = demography["region"]["x"]
361+
y = demography["region"]["y"]
362+
w = demography["region"]["w"]
363+
h = demography["region"]["h"]
364+
365+
# Overlay Emotion
366+
img = streaming.overlay_emotion(img=img, emotion_probas=demography["emotion"], x=x, y=y, w=w, h=h)
367+
368+
# Overlay Age and Gender
369+
img = streaming.overlay_age_gender(img=img, apparent_age=demography["age"], gender=demography["dominant_gender"][0:1], x=x, y=y, w=w, h=h)
370+
371+
# Display Image
372+
plt.imshow(img[:, :, ::-1])
373+
plt.axis('off')
374+
plt.show()
375+
376+
# Save the image with overlays
377+
cv2.imwrite("obama_out.jpg", img)
415378
```
416379

417380
#### Pose Estimation
@@ -514,9 +477,9 @@ Computer vision has a wide range of applications across various industries.
514477
When we look at the world, our eyes receive light reflected from objects. Similarly, cameras capture light to create images.
515478

516479
<figure markdown="span">
517-
<img src="https://www.neg.co.jp/en/assets/img/rd/topics/cover-glass_03.png" style="width: 100%;">
480+
<img src="https://miro.medium.com/v2/resize:fit:640/format:webp/1*ZjzTZ5UpYMCu3qAejEi55g.png" style="width: 100%;">
518481
<figcaption style="text-align: center;">
519-
Camera sensor prinicpal (Source: <a href="https://www.neg.co.jp/en/rd/topics/product-cover-glass/">Neg</a>)
482+
Camera sensor prinicpal (Source: <a href="https://medium.com/@kekreaditya/most-important-9-factors-while-choosing-fpv-camera-10a5e2dc2382">Neg</a>)
520483
</figcaption>
521484
</figure>
522485

0 commit comments

Comments
 (0)