swap_face / app.py
sandrocalzada's picture
Update app.py
b39090b
raw
history blame
2.68 kB
import numpy as np
import gradio as gr
import glob
import cv2
import matplotlib.pyplot as plt
import insightface
from insightface.app import FaceAnalysis
from insightface.data import get_image as ins_get_image
def predict(image_in_video, image_in_img):
if image_in_video == None and image_in_img == None:
raise gr.Error("Please capture an image using the webcam or upload an image.")
image = image_in_video or image_in_img
return swapi(image)
app = FaceAnalysis(name='buffalo_l')
app.prepare(ctx_id=0, det_size=(640, 640))
swapper = insightface.model_zoo.get_model('inswapper_128.onnx')
def swapi(imagen):
# Check if the input is a file path (str) or an image array (numpy.ndarray)
if isinstance(imagen, str):
img = cv2.imread(imagen)
else:
img = np.array(imagen)
if img is None:
raise ValueError("Failed to read the image.")
if img.shape[-1] == 3: # Check if the image is RGB or grayscale
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
faces = app.get(img)
if not faces:
return img # If no faces are detected, return the original image
source_face = faces[0]
bbox = source_face['bbox']
bbox = [int(b) for b in bbox]
res = img.copy()
for face in faces:
res = swapper.get(res, face, source_face, paste_back=True)
return res
with gr.Blocks() as blocks:
gr.Markdown("### Capture Image Using WebCam or Upload")
with gr.Row():
with gr.Column():
image_or_file_opt = gr.Radio(["webcam", "file"], value="webcam",
label="How would you like to upload your image?")
image_in_video = gr.Image(source="webcam", type="filepath")
image_in_img = gr.Image(source="upload", visible=False, type="filepath")
# Update visibility based on selection
def toggle(choice):
if choice == "webcam":
return gr.update(visible=True, value=None), gr.update(visible=False, value=None)
else:
return gr.update(visible=False, value=None), gr.update(visible=True, value=None)
image_or_file_opt.change(fn=toggle, inputs=[image_or_file_opt],
outputs=[image_in_video, image_in_img], queue=False, show_progress=False)
with gr.Column():
image_out = gr.Image()
run_btn = gr.Button("Run")
run_btn.click(fn=predict, inputs=[image_in_img, image_in_video], outputs=[image_out])
gr.Examples(fn=predict, examples=[], inputs=[image_in_img, image_in_video], outputs=[image_out])
blocks.queue()
blocks.launch(debug=True)