Hev832 commited on
Commit
d8a6468
·
verified ·
1 Parent(s): 274e2b7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -8
app.py CHANGED
@@ -1,19 +1,32 @@
1
  import gradio as gr
2
  from diffusers import DiffusionPipeline
 
3
 
4
- # Load the pipeline
5
- pipeline = DiffusionPipeline.from_pretrained("John6666/t-ponynai3-v6-sdxl")
 
 
 
 
6
 
7
- def generate_image(prompt, negative_prompt,progress=gr.Progress()):
8
- progress(0, desc="Starting Pipeline, Please Wait...")
9
- # Generate image using the pipeline with both prompt and negative prompt
10
- image = pipeline(prompt, negative_prompt=negative_prompt).images[0]
 
 
 
 
 
 
 
 
11
  return image
12
 
13
  # Create Gradio interface
14
  with gr.Blocks() as demo:
15
- gr.Markdown("# ponynai3 v6 sdxl")
16
-
17
  with gr.Row():
18
  with gr.Column():
19
  prompt = gr.Textbox(label="Enter your prompt", placeholder="Describe the image you want to generate")
@@ -23,6 +36,7 @@ with gr.Blocks() as demo:
23
  with gr.Column():
24
  output_image = gr.Image(label="Generated Image")
25
 
 
26
  generate_button.click(fn=generate_image, inputs=[prompt, negative_prompt], outputs=output_image)
27
 
28
  # Launch the Gradio app
 
1
  import gradio as gr
2
  from diffusers import DiffusionPipeline
3
+ import torch
4
 
5
+ # Load the pipeline with optimizations for CPU
6
+ pipeline = DiffusionPipeline.from_pretrained(
7
+ "John6666/t-ponynai3-v6-sdxl",
8
+ torch_dtype=torch.float16, # Use FP16 precision if supported
9
+ safety_checker=None, # Disable safety checker for faster performance
10
+ ).to("cpu")
11
 
12
+ # Enable attention slicing for memory management
13
+ pipeline.enable_attention_slicing()
14
+
15
+ def generate_image(prompt, negative_prompt, progress=gr.Progress()):
16
+ num_inference_steps = 20 # Set number of inference steps
17
+
18
+ # Track progress for each step
19
+ for i in range(num_inference_steps):
20
+ progress(i / num_inference_steps) # Update progress bar
21
+ # Perform generation step by step (simulate the process)
22
+ image = pipeline(prompt, negative_prompt=negative_prompt, num_inference_steps=num_inference_steps).images[0]
23
+
24
  return image
25
 
26
  # Create Gradio interface
27
  with gr.Blocks() as demo:
28
+ gr.Markdown("# Text-to-Image Generator with John6666/t-ponynai3-v6-sdxl models")
29
+
30
  with gr.Row():
31
  with gr.Column():
32
  prompt = gr.Textbox(label="Enter your prompt", placeholder="Describe the image you want to generate")
 
36
  with gr.Column():
37
  output_image = gr.Image(label="Generated Image")
38
 
39
+ # Add the progress bar component and connect it with the generate_image function
40
  generate_button.click(fn=generate_image, inputs=[prompt, negative_prompt], outputs=output_image)
41
 
42
  # Launch the Gradio app