dn6 HF staff commited on
Commit
01903c6
1 Parent(s): 1eb6952

initial commit

Browse files
Files changed (3) hide show
  1. README.md +5 -5
  2. app.py +119 -0
  3. requirements.txt +5 -0
README.md CHANGED
@@ -1,13 +1,13 @@
1
  ---
2
  title: FLUX GIFs
3
- emoji: 🔥
4
- colorFrom: green
5
- colorTo: gray
6
  sdk: gradio
7
- sdk_version: 4.42.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: FLUX GIFs
3
+ emoji: 🖥️
4
+ colorFrom: blue
5
+ colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 4.40.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
  ---
12
 
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+
3
+ import gradio as gr
4
+ import numpy as np
5
+ import torch
6
+ import spaces
7
+ from diffusers import FluxPipeline
8
+ from PIL import Image
9
+ from diffusers.utils import export_to_gif
10
+
11
+ HEIGHT = 256
12
+ WIDTH = 1024
13
+ MAX_SEED = np.iinfo(np.int32).max
14
+
15
+ device = "cuda" if torch.cuda.is_available() else "cpu"
16
+ pipe = FluxPipeline.from_pretrained(
17
+ "black-forest-labs/FLUX.1-dev",
18
+ torch_dtype=torch.bfloat16
19
+ ).to("device")
20
+
21
+ def split_image(input_image, num_splits=4):
22
+ # Create a list to store the output images
23
+ output_images = []
24
+
25
+ # Split the image into four 256x256 sections
26
+ for i in range(num_splits):
27
+ left = i * 256
28
+ right = (i + 1) * 256
29
+ box = (left, 0, right, 256)
30
+ output_images.append(input_image.crop(box))
31
+
32
+ return output_images
33
+
34
+ @spaces.GPU(duration=190)
35
+ def predict(prompt, seed=42, randomize_seed=False, guidance_scale=5.0, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
36
+ prompt_template = f"""
37
+ A side by side 4 frame image showing consecutive stills from a looped gif moving from left to right.
38
+ The gif is of {prompt}.
39
+ """
40
+
41
+ if randomize_seed:
42
+ seed = random.randint(0, MAX_SEED)
43
+
44
+ image = pipe(
45
+ prompt=prompt_template,
46
+ guidance_scale=guidance_scale,
47
+ num_inference_steps=num_inference_steps,
48
+ num_images_per_prompt=1,
49
+ generator=torch.Generator("cpu").manual_seed(seed),
50
+ height=HEIGHT,
51
+ width=WIDTH
52
+ ).images[0]
53
+
54
+ return export_to_gif(split_image(image, 4), "flux.gif", fps=4), seed
55
+
56
+ demo = gr.Interface(fn=predict, inputs="text", outputs="image")
57
+
58
+ css="""
59
+ #col-container {
60
+ margin: 0 auto;
61
+ max-width: 520px;
62
+ }
63
+ """
64
+ examples = [
65
+ "a cat waving its paws in the air",
66
+ "a panda moving their hips from side to side",
67
+ ]
68
+
69
+ with gr.Blocks(css=css) as demo:
70
+ with gr.Column(elem_id="col-container"):
71
+ gr.Markdown("Create GIFs with Flux-dev. Based on @fofr's [tweet](https://x.com/fofrAI/status/1828910395962343561)")
72
+ with gr.Row():
73
+ prompt = gr.Text("Prompt", show_label=False, max_lines=1, placeholder="Enter your prompt")
74
+ submit = gr.Button("Submit", show_label=False)
75
+
76
+ with gr.Accordion("Advanced Settings", open=False):
77
+ seed = gr.Slider(
78
+ label="Seed",
79
+ minimum=0,
80
+ maximum=MAX_SEED,
81
+ step=1,
82
+ value=0,
83
+ )
84
+
85
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
86
+
87
+ with gr.Row():
88
+ guidance_scale = gr.Slider(
89
+ label="Guidance Scale",
90
+ minimum=1,
91
+ maximum=15,
92
+ step=0.1,
93
+ value=3.5,
94
+ )
95
+ num_inference_steps = gr.Slider(
96
+ label="Number of inference steps",
97
+ minimum=1,
98
+ maximum=50,
99
+ step=1,
100
+ value=28,
101
+ )
102
+
103
+ output = gr.Image("GIF", show_label=False)
104
+ gr.Examples(
105
+ examples=examples,
106
+ fn=predict,
107
+ inputs=[prompt],
108
+ outputs=[output, seed],
109
+ cache_examples="lazy"
110
+ )
111
+
112
+ gr.on(
113
+ triggers=[submit.click, prompt.submit],
114
+ fn=predict,
115
+ inputs=[prompt, seed, randomize_seed, guidance_scale, num_inference_steps],
116
+ outputs = [output, seed]
117
+ )
118
+
119
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ accelerate
2
+ git+https://github.com/huggingface/diffusers.git
3
+ torch
4
+ transformers==4.42.4
5
+ sentencepiece