mkrzyzan commited on
Commit
9be4786
Β·
1 Parent(s): 1a5fc65

another example

Browse files
Files changed (1) hide show
  1. app.py +22 -12
app.py CHANGED
@@ -1,18 +1,28 @@
1
  import gradio as gr
 
2
 
3
- from diffusers import AutoPipelineForText2Image
4
- import torch
5
 
6
- pipeline = AutoPipelineForText2Image.from_pretrained(
7
- "runwayml/stable-diffusion-v1-5"
8
- )
9
- prompt = "peasant and dragon combat, wood cutting style, viking era, bevel with rune"
10
 
11
- image = pipeline(prompt, num_inference_steps=25).images[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
- def greet(name):
14
- return "Hello " + name + "!!"
15
-
16
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
17
- iface.launch()
18
 
 
 
1
  import gradio as gr
2
+ import numpy as np
3
 
4
+ def flip_text(text):
5
+ return text[::-1]
6
 
7
+ def flip_img(img):
8
+ return np.flipud(img)
 
 
9
 
10
+ with gr.Blocks() as demo:
11
+ gr.Markdown("flip the text or image using this demo")
12
+ with gr.Tab("Flip Text"):
13
+ text_input = gr.Textbox();
14
+ text_output = gr.Textbox();
15
+ text_btn = gr.Button("Flip");
16
+ with gr.Tab("Flip Img"):
17
+ with gr.Row():
18
+ image_input = gr.Image(source="webcam");
19
+ image_outpt = gr.Image();
20
+ image_btn = gr.Button("Flip");
21
+ with gr.Accordion("Open for more"):
22
+ gr.Markdown("Look at me");
23
+
24
+ text_btn.click(flip_text, inputs=text_input, outputs=text_output)
25
+ image_btn.click(flip_img, inputs=image_input, outputs=image_outpt)
26
 
 
 
 
 
 
27
 
28
+ demo.launch()