drsaikirant88
commited on
Commit
•
e19d3ae
1
Parent(s):
d38fa5d
Update app.py
Browse files
app.py
CHANGED
@@ -246,7 +246,7 @@ def predict(img):
|
|
246 |
|
247 |
if len(faces) == 0:
|
248 |
|
249 |
-
img = img.resize(
|
250 |
|
251 |
pred_emotion, pred_emotion_idx, probs_emotion = learn_emotion.predict(array(grayscale(img)))
|
252 |
|
@@ -255,12 +255,12 @@ def predict(img):
|
|
255 |
emotions = {learn_emotion_labels[i]: float(probs_emotion[i]) for i in range(len(learn_emotion_labels))}
|
256 |
sentiments = {learn_sentiment_labels[i]: float(probs_sentiment[i]) for i in range(len(learn_sentiment_labels))}
|
257 |
|
258 |
-
output = [img.resize((
|
259 |
|
260 |
else: # Max 3 for now
|
261 |
for face in faces[:3]:
|
262 |
|
263 |
-
img = face.resize((
|
264 |
|
265 |
pred_emotion, pred_emotion_idx, probs_emotion = learn_emotion.predict(array(grayscale(img)))
|
266 |
|
@@ -338,13 +338,13 @@ examples = ['happy1.jpg', 'happy2.jpg', 'angry1.png', 'angry2.jpg', 'neutral1.jp
|
|
338 |
|
339 |
gr.Interface(fn = predict,
|
340 |
inputs = gr.Image(),
|
341 |
-
outputs = [gr.Image(shape=(
|
342 |
gr.Label(label='Emotion - Person 1'),
|
343 |
gr.Label(label='Sentiment - Person 1'),
|
344 |
-
gr.Image(shape=(
|
345 |
gr.Label(label='Emotion - Person 2'),
|
346 |
gr.Label(label='Sentiment - Person 2'),
|
347 |
-
gr.Image(shape=(
|
348 |
gr.Label(label='Emotion - Person 3'),
|
349 |
gr.Label(label='Sentiment - Person 3'),], #gr.Label(),
|
350 |
title = title,
|
|
|
246 |
|
247 |
if len(faces) == 0:
|
248 |
|
249 |
+
img = img.resize(12, 12)
|
250 |
|
251 |
pred_emotion, pred_emotion_idx, probs_emotion = learn_emotion.predict(array(grayscale(img)))
|
252 |
|
|
|
255 |
emotions = {learn_emotion_labels[i]: float(probs_emotion[i]) for i in range(len(learn_emotion_labels))}
|
256 |
sentiments = {learn_sentiment_labels[i]: float(probs_sentiment[i]) for i in range(len(learn_sentiment_labels))}
|
257 |
|
258 |
+
output = [img.resize((12, 12)), emotions, sentiments, None, None, None, None, None, None]
|
259 |
|
260 |
else: # Max 3 for now
|
261 |
for face in faces[:3]:
|
262 |
|
263 |
+
img = face.resize((12, 12))
|
264 |
|
265 |
pred_emotion, pred_emotion_idx, probs_emotion = learn_emotion.predict(array(grayscale(img)))
|
266 |
|
|
|
338 |
|
339 |
gr.Interface(fn = predict,
|
340 |
inputs = gr.Image(),
|
341 |
+
outputs = [gr.Image(shape=(12, 12), label='Person 1'),
|
342 |
gr.Label(label='Emotion - Person 1'),
|
343 |
gr.Label(label='Sentiment - Person 1'),
|
344 |
+
gr.Image(shape=(12, 12), label='Person 2'),
|
345 |
gr.Label(label='Emotion - Person 2'),
|
346 |
gr.Label(label='Sentiment - Person 2'),
|
347 |
+
gr.Image(shape=(12, 12), label='Person 3'),
|
348 |
gr.Label(label='Emotion - Person 3'),
|
349 |
gr.Label(label='Sentiment - Person 3'),], #gr.Label(),
|
350 |
title = title,
|