Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -11,7 +11,7 @@ dataset = load_dataset("Gustavosta/Stable-Diffusion-Prompts")
|
|
11 |
prompt_df = dataset["train"].to_pandas()
|
12 |
|
13 |
def get_samples():
|
14 |
-
prompt_list = prompt_df.sample(n =
|
15 |
return prompt_list
|
16 |
|
17 |
def get_params(request: gr.Request):
|
@@ -27,6 +27,7 @@ def get_params(request: gr.Request):
|
|
27 |
client = InferenceClient()
|
28 |
models = client.list_deployed_models()
|
29 |
list_models = models["text-to-image"]
|
|
|
30 |
|
31 |
'''
|
32 |
list_models = [
|
@@ -227,10 +228,14 @@ with gr.Blocks(css=css) as demo:
|
|
227 |
current_model = gr.Dropdown(label="Current Model", choices=list_models, value="stabilityai/stable-diffusion-2-1")
|
228 |
|
229 |
with gr.Row(elem_id="prompt-container"):
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
|
|
|
|
|
|
|
|
234 |
|
235 |
with gr.Row():
|
236 |
image_output = gr.Image(type="pil", label="Output Image", elem_id="gallery")
|
@@ -239,6 +244,7 @@ with gr.Blocks(css=css) as demo:
|
|
239 |
negative_prompt = gr.Textbox(label="Negative Prompt", value="text, blurry, fuzziness", lines=1, elem_id="negative-prompt-text-input")
|
240 |
image_style = gr.Dropdown(label="Style", choices=["None style", "Cinematic", "Digital Art", "Portrait"], value="Portrait", allow_custom_value=False)
|
241 |
|
|
|
242 |
with gr.Row():
|
243 |
with gr.Column():
|
244 |
exps = gr.Examples(
|
@@ -247,18 +253,11 @@ with gr.Blocks(css=css) as demo:
|
|
247 |
label = "Prompt Examples",
|
248 |
elem_id = "Examples"
|
249 |
)
|
250 |
-
|
251 |
-
exps_l = get_samples()
|
252 |
-
exps = gr.Dataset(components=[text_prompt],
|
253 |
-
label="Prompt Examples",
|
254 |
-
samples=exps_l,
|
255 |
-
)
|
256 |
-
#exp_refresh = gr.Button(value="Click Refresh to get newly prompt examples")
|
257 |
-
#exp_refresh.click(get_samples, None, exps)
|
258 |
-
exps.select(lambda x: x[0], exps, text_prompt)
|
259 |
-
'''
|
260 |
|
261 |
text_button.click(generate_txt2img, inputs=[current_model, text_prompt, negative_prompt, image_style], outputs=image_output)
|
|
|
|
|
262 |
demo.load(get_params, None, current_model)
|
263 |
|
264 |
demo.launch(show_api=False)
|
|
|
11 |
prompt_df = dataset["train"].to_pandas()
|
12 |
|
13 |
def get_samples():
|
14 |
+
prompt_list = prompt_df.sample(n = 10)["Prompt"].map(lambda x: x).values.tolist()
|
15 |
return prompt_list
|
16 |
|
17 |
def get_params(request: gr.Request):
|
|
|
27 |
client = InferenceClient()
|
28 |
models = client.list_deployed_models()
|
29 |
list_models = models["text-to-image"]
|
30 |
+
list_prompts = get_samples()
|
31 |
|
32 |
'''
|
33 |
list_models = [
|
|
|
228 |
current_model = gr.Dropdown(label="Current Model", choices=list_models, value="stabilityai/stable-diffusion-2-1")
|
229 |
|
230 |
with gr.Row(elem_id="prompt-container"):
|
231 |
+
with gr.Row():
|
232 |
+
text_prompt = gr.Textbox(label="Prompt", placeholder="a cute dog",
|
233 |
+
value = "1girl, aqua eyes, baseball cap, blonde hair, closed mouth, earrings, green background, hat, hoop earrings, jewelry, looking at viewer, shirt, short hair, simple background, solo, upper body, yellow shirt",
|
234 |
+
lines=2, elem_id="prompt-text-input")
|
235 |
+
text_button = gr.Button("Manualy input Generate", variant='primary', elem_id="gen-button")
|
236 |
+
with gr.Row():
|
237 |
+
select_prompt = gr.Dropdown(label="prompt selected", choices=list_prompts, value=list_prompts[1])
|
238 |
+
select_button = gr.Button("Select Prompt Generate", variant='primary', elem_id="gen-button")
|
239 |
|
240 |
with gr.Row():
|
241 |
image_output = gr.Image(type="pil", label="Output Image", elem_id="gallery")
|
|
|
244 |
negative_prompt = gr.Textbox(label="Negative Prompt", value="text, blurry, fuzziness", lines=1, elem_id="negative-prompt-text-input")
|
245 |
image_style = gr.Dropdown(label="Style", choices=["None style", "Cinematic", "Digital Art", "Portrait"], value="Portrait", allow_custom_value=False)
|
246 |
|
247 |
+
'''
|
248 |
with gr.Row():
|
249 |
with gr.Column():
|
250 |
exps = gr.Examples(
|
|
|
253 |
label = "Prompt Examples",
|
254 |
elem_id = "Examples"
|
255 |
)
|
256 |
+
'''
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
257 |
|
258 |
text_button.click(generate_txt2img, inputs=[current_model, text_prompt, negative_prompt, image_style], outputs=image_output)
|
259 |
+
select_button.click(generate_txt2img, inputs=[current_model, select_prompt, negative_prompt, image_style], outputs=image_output)
|
260 |
+
|
261 |
demo.load(get_params, None, current_model)
|
262 |
|
263 |
demo.launch(show_api=False)
|