Spaces:
Running
Running
Justin Grammens
commited on
Commit
•
b0fba4a
1
Parent(s):
5dae289
updated to add the race, gender and image segmentation
Browse files
app.py
CHANGED
@@ -62,7 +62,7 @@ def classify_hair_color(image):
|
|
62 |
# Run the pipeline on the uploaded image
|
63 |
output = pipe(image)
|
64 |
|
65 |
-
print("Pipeline output for
|
66 |
# Format the output to be compatible with gr.outputs.Label
|
67 |
formatted_output = {item['label']: item['score'] for item in output}
|
68 |
|
@@ -96,13 +96,39 @@ def classify_eye_color(image):
|
|
96 |
|
97 |
return formatted_output
|
98 |
|
99 |
-
|
100 |
def process_gradio_image(pil_image):
|
101 |
# Convert PIL image to NumPy array
|
102 |
image = np.array(pil_image)
|
103 |
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # Convert RGB (from PIL) to BGR (OpenCV default)
|
104 |
return image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
def classify_image_with_multiple_models(image):
|
107 |
create_eye_region(image)
|
108 |
face_shape_result = classify_face_shape(image)
|
@@ -112,9 +138,10 @@ def classify_image_with_multiple_models(image):
|
|
112 |
hair_color_results = classify_hair_color(image)
|
113 |
eye_shape = classify_eye_shape(image)
|
114 |
eye_color = classify_eye_color(image)
|
|
|
|
|
115 |
|
116 |
-
return face_shape_result, age_result, skin_type_result, acne_results, hair_color_results, eye_shape, eye_color
|
117 |
-
|
118 |
|
119 |
def create_eye_region(image):
|
120 |
# Load the pre-trained face detector
|
@@ -132,12 +159,22 @@ def create_eye_region(image):
|
|
132 |
# Draw a rectangle around the face
|
133 |
cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
|
134 |
|
|
|
|
|
|
|
|
|
135 |
# Region of Interest (ROI) for the face
|
136 |
roi_gray = gray[y:y + h, x:x + w]
|
137 |
roi_color = image[y:y + h, x:x + w]
|
138 |
|
139 |
# Detect eyes in the face ROI
|
140 |
-
eyes = eye_cascade.detectMultiScale(roi_gray)
|
|
|
|
|
|
|
|
|
|
|
|
|
141 |
|
142 |
for (ex, ey, ew, eh) in eyes:
|
143 |
# Draw a rectangle around the eyes
|
@@ -151,15 +188,15 @@ def create_eye_region(image):
|
|
151 |
avg_color = np.mean(eye_roi, axis=(0, 1))
|
152 |
|
153 |
# Classify eye color based on average color
|
154 |
-
if avg_color[0] > avg_color[1] and avg_color[0] > avg_color[2]:
|
155 |
-
|
156 |
-
elif avg_color[1] > avg_color[0] and avg_color[1] > avg_color[2]:
|
157 |
-
|
158 |
-
else:
|
159 |
-
|
160 |
|
161 |
# Display the eye color
|
162 |
-
cv2.putText(image, color, (ex, ey - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
|
163 |
|
164 |
cv2.imwrite('segmented_face.jpg', image)
|
165 |
|
@@ -176,9 +213,12 @@ demo = gr.Interface(
|
|
176 |
gr.Label(num_top_classes=5, label="Hair Color"),
|
177 |
gr.Label(num_top_classes=4, label="Eye Shape"),
|
178 |
gr.Label(num_top_classes=5, label="Eye Color"),
|
|
|
|
|
|
|
179 |
],
|
180 |
title="Multiple Model Classification",
|
181 |
-
description="Upload an image to classify the face using
|
182 |
)
|
183 |
|
184 |
#demo.launch(auth=("admin", "pass1234"))
|
|
|
62 |
# Run the pipeline on the uploaded image
|
63 |
output = pipe(image)
|
64 |
|
65 |
+
print("Pipeline output for hair color:", output)
|
66 |
# Format the output to be compatible with gr.outputs.Label
|
67 |
formatted_output = {item['label']: item['score'] for item in output}
|
68 |
|
|
|
96 |
|
97 |
return formatted_output
|
98 |
|
|
|
99 |
def process_gradio_image(pil_image):
|
100 |
# Convert PIL image to NumPy array
|
101 |
image = np.array(pil_image)
|
102 |
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # Convert RGB (from PIL) to BGR (OpenCV default)
|
103 |
return image
|
104 |
+
|
105 |
+
def classify_race(image):
|
106 |
+
'''
|
107 |
+
"0": "East Asian",
|
108 |
+
"1": "Indian",
|
109 |
+
"2": "Black",
|
110 |
+
"3": "White",
|
111 |
+
"4": "Middle Eastern",
|
112 |
+
"5": "Latino_Hispanic",
|
113 |
+
"6": "Southeast Asian"
|
114 |
+
'''
|
115 |
+
pipe = pipeline("image-classification", model="crangana/trained-race")
|
116 |
+
# Run the pipeline on the uploaded image
|
117 |
+
output = pipe("face_region.jpg")
|
118 |
+
|
119 |
+
# Format the output to be compatible with gr.outputs.Label
|
120 |
+
formatted_output = {item['label']: item['score'] for item in output}
|
121 |
|
122 |
+
return formatted_output
|
123 |
+
|
124 |
+
def classify_gender(image):
|
125 |
+
pipe = pipeline("image-classification", model="rizvandwiki/gender-classification")
|
126 |
+
output = pipe("face_region.jpg")
|
127 |
+
# Format the output to be compatible with gr.outputs.Label
|
128 |
+
formatted_output = {item['label']: item['score'] for item in output}
|
129 |
+
return formatted_output
|
130 |
+
|
131 |
+
|
132 |
def classify_image_with_multiple_models(image):
|
133 |
create_eye_region(image)
|
134 |
face_shape_result = classify_face_shape(image)
|
|
|
138 |
hair_color_results = classify_hair_color(image)
|
139 |
eye_shape = classify_eye_shape(image)
|
140 |
eye_color = classify_eye_color(image)
|
141 |
+
race = classify_race(image)
|
142 |
+
gender = classify_gender(image)
|
143 |
|
144 |
+
return face_shape_result, age_result, skin_type_result, acne_results, hair_color_results, eye_shape, eye_color, race, gender, Image.open("segmented_face.jpg")
|
|
|
145 |
|
146 |
def create_eye_region(image):
|
147 |
# Load the pre-trained face detector
|
|
|
159 |
# Draw a rectangle around the face
|
160 |
cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
|
161 |
|
162 |
+
# Extract the face region
|
163 |
+
face_roi = image[y:y + h, x:x + w]
|
164 |
+
cv2.imwrite('face_region.jpg', face_roi)
|
165 |
+
|
166 |
# Region of Interest (ROI) for the face
|
167 |
roi_gray = gray[y:y + h, x:x + w]
|
168 |
roi_color = image[y:y + h, x:x + w]
|
169 |
|
170 |
# Detect eyes in the face ROI
|
171 |
+
eyes = eye_cascade.detectMultiScale(roi_gray, scaleFactor=1.1, minNeighbors=10, minSize=(20, 20))
|
172 |
+
|
173 |
+
eye_positions = []
|
174 |
+
for (ex, ey, ew, eh) in eyes:
|
175 |
+
# Ensure eyes are within the upper half of the face region
|
176 |
+
if ey + eh < h // 2:
|
177 |
+
eye_positions.append((ex, ey, ew, eh))
|
178 |
|
179 |
for (ex, ey, ew, eh) in eyes:
|
180 |
# Draw a rectangle around the eyes
|
|
|
188 |
avg_color = np.mean(eye_roi, axis=(0, 1))
|
189 |
|
190 |
# Classify eye color based on average color
|
191 |
+
#if avg_color[0] > avg_color[1] and avg_color[0] > avg_color[2]:
|
192 |
+
# color = "Brown"
|
193 |
+
#elif avg_color[1] > avg_color[0] and avg_color[1] > avg_color[2]:
|
194 |
+
# color = "Green"
|
195 |
+
#else:
|
196 |
+
# color = "Blue"
|
197 |
|
198 |
# Display the eye color
|
199 |
+
#cv2.putText(image, color, (ex, ey - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
|
200 |
|
201 |
cv2.imwrite('segmented_face.jpg', image)
|
202 |
|
|
|
213 |
gr.Label(num_top_classes=5, label="Hair Color"),
|
214 |
gr.Label(num_top_classes=4, label="Eye Shape"),
|
215 |
gr.Label(num_top_classes=5, label="Eye Color"),
|
216 |
+
gr.Label(num_top_classes=7, label="Race"),
|
217 |
+
gr.Label(num_top_classes=2, label="Gender"),
|
218 |
+
gr.Image(type="pil", label="Segmented Face", value="segmented_face.jpg") # Provide the path to the image
|
219 |
],
|
220 |
title="Multiple Model Classification",
|
221 |
+
description="Upload an image to classify the face using multiple classification models"
|
222 |
)
|
223 |
|
224 |
#demo.launch(auth=("admin", "pass1234"))
|