Spaces:
Running
Running
Julien Ajdenbaum
commited on
Commit
·
5389132
1
Parent(s):
5c3419c
first commit
Browse files- app.py +98 -0
- examples/160006.jpg +0 -0
- examples/160021.jpg +0 -0
- examples/160026.jpg +0 -0
- examples/160030.jpg +0 -0
- examples/160031.jpg +0 -0
- examples/160059.jpg +0 -0
- examples/160072.jpg +0 -0
- examples/160075.jpg +0 -0
- examples/160095.jpg +0 -0
- examples/160103.jpg +0 -0
- examples/160167.jpg +0 -0
- examples/160288.jpg +0 -0
- examples/160375.jpg +0 -0
- examples/160376.jpg +0 -0
- examples/160384.jpg +0 -0
- examples/160457.jpg +0 -0
- examples/160458.jpg +0 -0
- examples/161500.jpg +0 -0
- examples/163040.jpg +0 -0
- examples/163165.jpg +0 -0
- examples/163394.jpg +0 -0
- examples/166098.jpg +0 -0
- examples/169083.jpg +0 -0
- examples/172030.jpg +0 -0
- examples/177686.jpg +0 -0
- examples/178864.jpg +0 -0
- examples/download.jpeg +0 -0
- examples/sos_calvitie_4708.jpeg +0 -0
- hair_segmenter.tflite +3 -0
- requirements.txt +19 -0
app.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import cv2
|
3 |
+
import math
|
4 |
+
import numpy as np
|
5 |
+
import os
|
6 |
+
import mediapipe as mp
|
7 |
+
|
8 |
+
import sys
|
9 |
+
sys.path.append('/home/julien/Documents/bald_classification')
|
10 |
+
|
11 |
+
import predict
|
12 |
+
|
13 |
+
from mediapipe.tasks import python
|
14 |
+
from mediapipe.tasks.python import vision
|
15 |
+
|
16 |
+
# Height and width that will be used by the model
|
17 |
+
DESIRED_HEIGHT = 480
|
18 |
+
DESIRED_WIDTH = 480
|
19 |
+
|
20 |
+
# Performs resizing and showing the image
|
21 |
+
def resize_and_show(image):
|
22 |
+
|
23 |
+
h, w = image.shape[:2]
|
24 |
+
if h < w:
|
25 |
+
img = cv2.resize(image, (DESIRED_WIDTH, math.floor(h/(w/DESIRED_WIDTH))))
|
26 |
+
else:
|
27 |
+
img = cv2.resize(image, (math.floor(w/(h/DESIRED_HEIGHT)), DESIRED_HEIGHT))
|
28 |
+
cv2.imshow('color', img)
|
29 |
+
cv2.waitKey(1000)
|
30 |
+
cv2.destroyAllWindows()
|
31 |
+
|
32 |
+
def segmentate(filepath):
|
33 |
+
BG_COLOR = (192, 192, 192) # gray
|
34 |
+
MASK_COLOR = (255, 255, 255) # white
|
35 |
+
|
36 |
+
# Create the options that will be used for ImageSegmenter
|
37 |
+
base_options = python.BaseOptions(model_asset_path='./hair_segmenter.tflite')
|
38 |
+
options = vision.ImageSegmenterOptions(base_options=base_options,output_category_mask=True)
|
39 |
+
|
40 |
+
# Create the image segmenter
|
41 |
+
with vision.ImageSegmenter.create_from_options(options) as segmenter:
|
42 |
+
|
43 |
+
# Loop through demo image(s)
|
44 |
+
|
45 |
+
# Create the MediaPipe image file that will be segmented
|
46 |
+
print(filepath)
|
47 |
+
image = mp.Image.create_from_file(filepath)
|
48 |
+
|
49 |
+
# Retrieve the masks for the segmented image
|
50 |
+
segmentation_result = segmenter.segment(image)
|
51 |
+
category_mask = segmentation_result.category_mask
|
52 |
+
|
53 |
+
# Generate solid color images for showing the output segmentation mask.
|
54 |
+
image_data = image.numpy_view()
|
55 |
+
fg_image = np.zeros(image_data.shape, dtype=np.uint8)
|
56 |
+
fg_image[:] = MASK_COLOR
|
57 |
+
bg_image = np.zeros(image_data.shape, dtype=np.uint8)
|
58 |
+
bg_image[:] = BG_COLOR
|
59 |
+
|
60 |
+
condition = np.stack((category_mask.numpy_view(),) * 3, axis=-1) > 0.2
|
61 |
+
output_image = np.where(condition, fg_image, bg_image)
|
62 |
+
|
63 |
+
# print(f'Segmentation mask of {name}:')
|
64 |
+
# resize_and_show(output_image)
|
65 |
+
prediction = predict.predict(filepath)[0][0]
|
66 |
+
print(prediction)
|
67 |
+
limits = [0.002, 0.1, 0.4, 0.95, 0.97, 0.991, 1]
|
68 |
+
print(np.where(prediction < limits)[0][0])
|
69 |
+
"""
|
70 |
+
img = cv2.cvtColor(output_image, cv2.COLOR_BGR2GRAY)
|
71 |
+
# print(np.unique(img))
|
72 |
+
_, thresh = cv2.threshold(img, 200, 255, cv2.THRESH_BINARY_INV)
|
73 |
+
# plt.imshow(thresh, cmap='gray')
|
74 |
+
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
75 |
+
im = cv2.drawContours(img, contours, -1, (0,255,0), 3)
|
76 |
+
"""
|
77 |
+
return np.where(prediction < limits)[0][0] + 1, output_image
|
78 |
+
|
79 |
+
# GUI
|
80 |
+
title = 'Hair loss prediction'
|
81 |
+
description = 'Automatic Prediction of Nordwood scale state'
|
82 |
+
examples = [[f'examples/{name}', 3] for name in sorted(os.listdir('examples'))]
|
83 |
+
|
84 |
+
iface = gr.Interface(
|
85 |
+
fn=segmentate,
|
86 |
+
inputs=[
|
87 |
+
gr.Image(type='filepath', label='Input Image')
|
88 |
+
],
|
89 |
+
outputs=[
|
90 |
+
gr.Number(label='Nordwood Scale'), gr.Image(label='Hair Segmentation')
|
91 |
+
],
|
92 |
+
examples=examples,
|
93 |
+
allow_flagging='never',
|
94 |
+
cache_examples=False,
|
95 |
+
title=title,
|
96 |
+
description=description
|
97 |
+
)
|
98 |
+
iface.launch()
|
examples/160006.jpg
ADDED
![]() |
examples/160021.jpg
ADDED
![]() |
examples/160026.jpg
ADDED
![]() |
examples/160030.jpg
ADDED
![]() |
examples/160031.jpg
ADDED
![]() |
examples/160059.jpg
ADDED
![]() |
examples/160072.jpg
ADDED
![]() |
examples/160075.jpg
ADDED
![]() |
examples/160095.jpg
ADDED
![]() |
examples/160103.jpg
ADDED
![]() |
examples/160167.jpg
ADDED
![]() |
examples/160288.jpg
ADDED
![]() |
examples/160375.jpg
ADDED
![]() |
examples/160376.jpg
ADDED
![]() |
examples/160384.jpg
ADDED
![]() |
examples/160457.jpg
ADDED
![]() |
examples/160458.jpg
ADDED
![]() |
examples/161500.jpg
ADDED
![]() |
examples/163040.jpg
ADDED
![]() |
examples/163165.jpg
ADDED
![]() |
examples/163394.jpg
ADDED
![]() |
examples/166098.jpg
ADDED
![]() |
examples/169083.jpg
ADDED
![]() |
examples/172030.jpg
ADDED
![]() |
examples/177686.jpg
ADDED
![]() |
examples/178864.jpg
ADDED
![]() |
examples/download.jpeg
ADDED
![]() |
examples/sos_calvitie_4708.jpeg
ADDED
![]() |
hair_segmenter.tflite
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2628cf3ce5f695f604cbea2841e00befcaa3624bf80caf3664bef2656d59bf84
|
3 |
+
size 781618
|
requirements.txt
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio==3.41.0
|
2 |
+
absl-py
|
3 |
+
attrs
|
4 |
+
cffi
|
5 |
+
contourpy
|
6 |
+
cycler
|
7 |
+
flatbuffers
|
8 |
+
fonttools
|
9 |
+
kiwisolver
|
10 |
+
matplotlib
|
11 |
+
mediapipe
|
12 |
+
numpy
|
13 |
+
opencv-contrib-python
|
14 |
+
opencv-python
|
15 |
+
pillow
|
16 |
+
protobuf
|
17 |
+
pycparser
|
18 |
+
pyparsing
|
19 |
+
sounddevice
|