Arkm20 commited on
Commit
03d5c24
1 Parent(s): 3e2de5c

Add application file

Browse files
Files changed (4) hide show
  1. README.md +4 -3
  2. app.py +147 -0
  3. header.md +15 -0
  4. requirements.txt +2 -0
README.md CHANGED
@@ -1,12 +1,13 @@
1
  ---
2
- title: Flux FAST With API
3
  emoji: 🔥
4
- colorFrom: gray
5
  colorTo: yellow
6
  sdk: gradio
7
- sdk_version: 4.44.0
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Fast FLUX.1 Dev
3
  emoji: 🔥
4
+ colorFrom: yellow
5
  colorTo: yellow
6
  sdk: gradio
7
+ sdk_version: 4.42.0
8
  app_file: app.py
9
  pinned: false
10
+ license: mit
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import os
4
+ import random
5
+ import requests
6
+ from PIL import Image
7
+ from io import BytesIO
8
+
9
+ MAX_SEED = np.iinfo(np.int32).max
10
+ MAX_IMAGE_SIZE = 2048
11
+
12
+ class APIClient:
13
+ def __init__(self, api_key=os.getenv("API_KEY"), base_url="inference.prodia.com"):
14
+ self.headers = {
15
+ "Content-Type": "application/json",
16
+ "Accept": "image/jpeg",
17
+ "Authorization": f"Bearer {api_key}"
18
+ }
19
+ self.base_url = f"https://{base_url}"
20
+
21
+ def _post(self, url, json=None):
22
+ r = requests.post(url, headers=self.headers, json=json)
23
+ r.raise_for_status()
24
+
25
+ return Image.open(BytesIO(r.content)).convert("RGB")
26
+
27
+ def job(self, config):
28
+ body = {"type": "inference.flux.dev.txt2img.v1", "config": config}
29
+ return self._post(f"{self.base_url}/v2/job", json=body)
30
+
31
+
32
+ def infer(prompt, seed=42, randomize_seed=False, resolution="1024x1024", guidance_scale=5.0, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
33
+ if randomize_seed:
34
+ seed = random.randint(0, MAX_SEED)
35
+
36
+ width, height = resolution.split("x")
37
+
38
+ image = generative_api.job({
39
+ "prompt": prompt,
40
+ "width": int(width),
41
+ "height": int(height),
42
+ "seed": seed,
43
+ "steps": num_inference_steps,
44
+ "guidance_scale": guidance_scale
45
+ })
46
+ return image, seed
47
+
48
+ generative_api = APIClient()
49
+
50
+ with open("header.md", "r") as file:
51
+ header = file.read()
52
+
53
+ examples = [
54
+ "a tiny astronaut hatching from an egg on the moon",
55
+ "a cat holding a sign that says hello world",
56
+ "an anime illustration of a wiener schnitzel",
57
+ ]
58
+
59
+ css="""
60
+ #col-container {
61
+ margin: 0 auto;
62
+ max-width: 520px;
63
+ }
64
+ .image-container img {
65
+ max-width: 512px;
66
+ max-height: 512px;
67
+ margin: 0 auto;
68
+ border-radius: 0px;
69
+ }
70
+ """
71
+
72
+ with gr.Blocks(css=css) as demo:
73
+
74
+ with gr.Column(elem_id="col-container"):
75
+ gr.Markdown(header)
76
+ with gr.Row():
77
+
78
+ prompt = gr.Text(
79
+ label="Prompt",
80
+ show_label=False,
81
+ max_lines=1,
82
+ placeholder="Enter your prompt"
83
+ )
84
+
85
+ run_button = gr.Button("Run", scale=0)
86
+
87
+ result = gr.Image(label="Result", show_label=False, format="jpeg")
88
+
89
+ with gr.Accordion("Advanced Settings", open=False):
90
+
91
+ seed = gr.Slider(
92
+ label="Seed",
93
+ minimum=0,
94
+ maximum=MAX_SEED,
95
+ step=1,
96
+ value=0,
97
+ )
98
+
99
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
100
+
101
+ with gr.Row():
102
+
103
+ resolution = gr.Dropdown(
104
+ label="Resolution",
105
+ value="1024x1024",
106
+ choices=[
107
+ "1024x1024",
108
+ "1024x576",
109
+ "576x1024"
110
+ ]
111
+ )
112
+
113
+ with gr.Row():
114
+
115
+ guidance_scale = gr.Slider(
116
+ label="Guidance Scale",
117
+ minimum=1,
118
+ maximum=15,
119
+ step=0.1,
120
+ value=3.5,
121
+ )
122
+
123
+ num_inference_steps = gr.Slider(
124
+ label="Number of inference steps",
125
+ minimum=1,
126
+ maximum=50,
127
+ step=1,
128
+ value=28,
129
+ )
130
+
131
+ gr.Examples(
132
+ examples = examples,
133
+ fn = infer,
134
+ inputs = [prompt],
135
+ outputs = [result, seed],
136
+ cache_examples="lazy"
137
+ )
138
+
139
+ gr.on(
140
+ triggers=[run_button.click, prompt.submit],
141
+ fn = infer,
142
+ inputs = [prompt, seed, randomize_seed, resolution, guidance_scale, num_inference_steps],
143
+ outputs = [result, seed]
144
+ )
145
+
146
+
147
+ demo.queue(default_concurrency_limit=12, max_size=14, api_open=True).launch(max_threads=256, show_api=True)
header.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Fast FLUX.1 [dev]
2
+ 12B param rectified flow transformer guidance-distilled from [FLUX.1 [pro]](https://blackforestlabs.ai/)
3
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center;">
4
+ <div>
5
+ <h1>If the space is too busy and slow head over to the Prodia Explorer</h1>
6
+ <br>
7
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center;">
8
+ <a href="https://blackforestlabs.ai/announcing-black-forest-labs/?utm_source=prodia"><img src="https://img.shields.io/badge/Blog-Black_Forest_Labs-blue"></a>
9
+ &nbsp;
10
+ <a href="https://prodia.com/?utm_source=huggingface"><img src="https://img.shields.io/badge/Home_Page-Prodia-green"></a>
11
+ &nbsp;
12
+ <a href="https://app.prodia.com/explorer?utm_source=huggingface"><img src="https://img.shields.io/badge/Prodia-Explorer-yellow"></a>
13
+ </div>
14
+ </div>
15
+ </div>
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ requests~=2.32.3
2
+ numpy~=1.26.4