File size: 2,676 Bytes
b39090b
d1ffd11
b39090b
d1ffd11
b39090b
1b9d9ae
 
b39090b
1b9d9ae
7702267
 
 
 
061fa24
 
 
 
b39090b
 
061fa24
 
b39090b
 
 
 
 
 
 
 
 
 
 
 
e70a1d4
061fa24
 
 
 
e70a1d4
 
 
 
 
 
 
 
b39090b
 
5990ce9
 
7702267
4225814
 
 
7702267
 
4225814
7702267
 
 
 
 
 
 
 
 
 
 
4225814
 
7702267
4225814
7702267
 
5990ce9
 
b39090b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import numpy as np
import gradio as gr
import glob
import cv2
import matplotlib.pyplot as plt
import insightface
from insightface.app import FaceAnalysis
from insightface.data import get_image as ins_get_image

def predict(image_in_video, image_in_img):
    if image_in_video == None and image_in_img == None:
        raise gr.Error("Please capture an image using the webcam or upload an image.")
    image = image_in_video or image_in_img
    return swapi(image)

app = FaceAnalysis(name='buffalo_l')
app.prepare(ctx_id=0, det_size=(640, 640))
swapper = insightface.model_zoo.get_model('inswapper_128.onnx')


def swapi(imagen):
    # Check if the input is a file path (str) or an image array (numpy.ndarray)
    if isinstance(imagen, str):
        img = cv2.imread(imagen)
    else:
        img = np.array(imagen)

    if img is None:
        raise ValueError("Failed to read the image.")

    if img.shape[-1] == 3:  # Check if the image is RGB or grayscale
        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)

    faces = app.get(img)

    if not faces:
        return img  # If no faces are detected, return the original image

    source_face = faces[0]
    bbox = source_face['bbox']
    bbox = [int(b) for b in bbox]

    res = img.copy()
    for face in faces:
        res = swapper.get(res, face, source_face, paste_back=True)

    return res  


with gr.Blocks() as blocks:
    gr.Markdown("### Capture Image Using WebCam or Upload") 

    with gr.Row():
        with gr.Column():
            image_or_file_opt = gr.Radio(["webcam", "file"], value="webcam",
                                         label="How would you like to upload your image?")
            image_in_video = gr.Image(source="webcam", type="filepath")
            image_in_img = gr.Image(source="upload", visible=False, type="filepath")

            # Update visibility based on selection
            def toggle(choice):
                if choice == "webcam":
                    return gr.update(visible=True, value=None), gr.update(visible=False, value=None)
                else:
                    return gr.update(visible=False, value=None), gr.update(visible=True, value=None)

            image_or_file_opt.change(fn=toggle, inputs=[image_or_file_opt],
                                     outputs=[image_in_video, image_in_img], queue=False, show_progress=False)
        with gr.Column():
            image_out = gr.Image()

    run_btn = gr.Button("Run")
    run_btn.click(fn=predict, inputs=[image_in_img, image_in_video], outputs=[image_out])
    gr.Examples(fn=predict, examples=[], inputs=[image_in_img, image_in_video], outputs=[image_out])

blocks.queue()
blocks.launch(debug=True)