Spaces:
Running
Running
import gradio as gr | |
import numpy as np | |
import cv2 | |
import numpy as np | |
import os | |
import glob | |
import cv2 | |
import matplotlib.pyplot as plt | |
import insightface | |
from insightface.app import FaceAnalysis | |
from insightface.data import get_image as ins_get_image | |
# Load your trained model | |
#model = tf.keras.models.load_model('path_to_your_model.h5') | |
def predict_gender(image): | |
# Convert image to format expected by your model & preprocess | |
img = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) | |
img = cv2.resize(img, (224, 224)) # Example size | |
img = img / 255.0 # Normalizing | |
img = np.expand_dims(img, axis=0) | |
prediction = model.predict(img) | |
# Assuming binary classification with a single output neuron | |
return "Male" if prediction[0] < 0.5 else "Female" | |
def predict(image_in_video, image_in_img): | |
if image_in_video == None and image_in_img == None: | |
raise gr.Error("Please upload an image.") | |
image = image_in_video or image_in_img | |
return image | |
def toggle(choice): | |
if choice == "webcam": | |
return gr.update(visible=True, value=None), gr.update(visible=False, value=None) | |
else: | |
return gr.update(visible=False, value=None), gr.update(visible=True, value=None) | |
with gr.Blocks() as blocks: | |
gr.Markdown("### WebCam or Upload?") | |
with gr.Row(): | |
with gr.Column(): | |
image_or_file_opt = gr.Radio(["webcam", "file"], value="webcam", | |
label="How would you like to upload your image?") | |
image_in_video = gr.Image(source="webcam", type="filepath") | |
image_in_img = gr.Image(source="upload", visible=False, type="filepath") | |
image_or_file_opt.change(fn=toggle, inputs=[image_or_file_opt], | |
outputs=[image_in_video, image_in_img], queue=False, show_progress=False) | |
with gr.Column(): | |
image_out = gr.Image() | |
run_btn = gr.Button("Run") | |
run_btn.click(fn=predict, inputs=[image_in_img, image_in_video], outputs=[image_out]) | |
gr.Examples(fn=predict, examples=[], inputs=[image_in_img, image_in_video], outputs=[image_out]) | |
blocks.queue() | |
blocks.launch() |