Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,18 +1,68 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
-
|
|
|
|
|
|
|
5 |
|
6 |
-
def predict(input_img):
|
7 |
-
predictions = pipeline(input_img)
|
8 |
-
return input_img, {p["label"]: p["score"] for p in predictions}
|
9 |
|
10 |
-
|
11 |
-
predict,
|
12 |
-
inputs=gr.Image(label="Select hot dog candidate", sources=['upload', 'webcam'], type="pil"),
|
13 |
-
outputs=[gr.Image(label="Processed Image"), gr.Label(label="Result", num_top_classes=2)],
|
14 |
-
title="Hot Dog? Or Not?",
|
15 |
-
)
|
16 |
|
17 |
-
if __name__ == "__main__":
|
18 |
-
gradio_app.launch()
|
|
|
1 |
+
|
2 |
import gradio as gr
|
3 |
+
import numpy as np
|
4 |
+
from diffusers import UNet2DModel, DDPMPipeline, DDPMScheduler, DiffusionPipeline
|
5 |
+
import torch
|
6 |
+
import torch.nn.functional as F
|
7 |
+
from matplotlib import pyplot as plt
|
8 |
+
from PIL import Image
|
9 |
+
|
10 |
+
|
11 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
12 |
+
pipeline = DiffusionPipeline.from_pretrained("gjbooth2/Unconditional_A4C_1").to(device)
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
#try to return dataframe
|
17 |
+
def image_gen(click,rows = 4,cols = 4):
|
18 |
+
images = pipeline(batch_size=16).images
|
19 |
+
w, h = images[0].size
|
20 |
+
grid = Image.new('L', size=(cols*w, rows*h))
|
21 |
+
for i, image in enumerate(images):
|
22 |
+
grid.paste(image, box=(i%cols*w, i//cols*h))
|
23 |
+
return grid
|
24 |
+
#return 'button clicked'
|
25 |
+
|
26 |
+
|
27 |
+
def image_gen_modified(rows=4,cols=4):
|
28 |
+
pic_hold = []
|
29 |
+
model_output = pipeline(batch_size=16).images
|
30 |
+
count = 0
|
31 |
+
for i in range(len(model_output)):
|
32 |
+
pic = np.array(model_output[i].convert('L'))
|
33 |
+
max_val = max([element for row in pic for element in row])
|
34 |
+
min_val = min([element for row in pic for element in row])
|
35 |
+
|
36 |
+
if min_val > 55: #for washed out images, set them to all black
|
37 |
+
normalized_pic = np.ones((128,128))
|
38 |
+
pic_hold.append(Image.fromarray(np.uint8(normalized_pic)))
|
39 |
+
|
40 |
+
if min_val < 56:
|
41 |
+
def normalize_images(x,min_val,max_val): #normalize pixels to be more homogenous grayscale appearance
|
42 |
+
return 200*((x-min_val)/(max_val-min_val))
|
43 |
+
vectorized_normalizer = np.vectorize(normalize_images)
|
44 |
+
normalized_pic = vectorized_normalizer(pic,min_val,max_val)
|
45 |
+
pic_hold.append(Image.fromarray(np.uint8(normalized_pic)))
|
46 |
+
count+=1
|
47 |
+
|
48 |
+
w, h = model_output[0].size
|
49 |
+
grid = Image.new('L', size=(cols*w, rows*h))
|
50 |
+
for i, image in enumerate(pic_hold):
|
51 |
+
grid.paste(image, box=(i%cols*w, i//cols*h))
|
52 |
+
|
53 |
+
return grid
|
54 |
+
|
55 |
+
|
56 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
57 |
+
gr.Markdown('CS 614 Greg Booth Vision Assignment')
|
58 |
+
gr.Markdown('This gradio app can be used to generate realistic cardiac ultrasound images.')
|
59 |
+
gr.HTML("<a href = "+'https://pocus.sg/topic/subcostal-4-chamber/'+" _target='blank'>" +'Example anatomy'+ "</a>")
|
60 |
|
61 |
+
with gr.Tab('Generate a cardiac ultrasound image'):
|
62 |
+
playground_btn = gr.Button(value='Push me some images! (may take a couple minutes depending on hardware)')
|
63 |
+
playground_out = gr.Image()
|
64 |
+
playground_btn.click(image_gen_modified,outputs = playground_out)
|
65 |
|
|
|
|
|
|
|
66 |
|
67 |
+
demo.launch(auth = ('CS614','CS614'))
|
|
|
|
|
|
|
|
|
|
|
68 |
|
|
|
|