Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,44 +1,41 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
from diffusers import FluxPriorReduxPipeline, FluxPipeline
|
4 |
-
from diffusers.utils import load_image
|
5 |
-
from huggingface_hub import login
|
6 |
from PIL import Image
|
|
|
7 |
import os
|
8 |
|
9 |
# Hugging Face HubのAPIキーを設定
|
10 |
login(os.getenv("HF_API_KEY"))
|
11 |
|
12 |
-
#
|
13 |
-
pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained(
|
14 |
-
"black-forest-labs/FLUX.1-Redux-dev",
|
15 |
-
use_auth_token=True # APIキーを使用して認証
|
16 |
-
).to("cpu") # CPUに変更
|
17 |
-
|
18 |
-
# メインのFLUXパイプラインをロード (CPU対応)
|
19 |
-
pipe = FluxPipeline.from_pretrained(
|
20 |
-
"black-forest-labs/FLUX.1-dev",
|
21 |
-
use_auth_token=True, # APIキーを使用して認証
|
22 |
-
text_encoder=None,
|
23 |
-
text_encoder_2=None
|
24 |
-
).to("cpu") # CPUに変更
|
25 |
-
|
26 |
def process_image(image_path):
|
27 |
-
#
|
28 |
image = Image.open(image_path).convert("RGB")
|
|
|
29 |
|
30 |
-
# Prior Redux
|
|
|
|
|
|
|
|
|
31 |
pipe_prior_output = pipe_prior_redux(image)
|
32 |
|
33 |
-
# FLUX
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
images = pipe(
|
35 |
guidance_scale=2.5,
|
36 |
-
num_inference_steps=
|
37 |
-
generator=torch.Generator("cpu").manual_seed(0), #
|
38 |
**pipe_prior_output,
|
39 |
).images
|
40 |
|
41 |
-
#
|
42 |
return images[0]
|
43 |
|
44 |
# Gradioインターフェースを構築
|
@@ -47,8 +44,8 @@ def infer(image):
|
|
47 |
return result_image
|
48 |
|
49 |
with gr.Blocks() as demo:
|
50 |
-
gr.Markdown("# FLUX Image Generation App")
|
51 |
-
|
52 |
with gr.Row():
|
53 |
input_image = gr.Image(type="filepath", label="Input Image")
|
54 |
output_image = gr.Image(type="pil", label="Generated Image")
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
from diffusers import FluxPriorReduxPipeline, FluxPipeline
|
|
|
|
|
4 |
from PIL import Image
|
5 |
+
from huggingface_hub import login
|
6 |
import os
|
7 |
|
8 |
# Hugging Face HubのAPIキーを設定
|
9 |
login(os.getenv("HF_API_KEY"))
|
10 |
|
11 |
+
# Lazy Loadingを実現するため、モデルロードは関数内で行う
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
def process_image(image_path):
|
13 |
+
# 入力画像をロードしてリサイズ
|
14 |
image = Image.open(image_path).convert("RGB")
|
15 |
+
image = image.resize((256, 256)) # サイズを256x256に制限してメモリ節約
|
16 |
|
17 |
+
# Prior Reduxパイプラインのロードと処理
|
18 |
+
pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained(
|
19 |
+
"black-forest-labs/FLUX.1-Redux-dev",
|
20 |
+
use_auth_token=True
|
21 |
+
).to("cpu")
|
22 |
pipe_prior_output = pipe_prior_redux(image)
|
23 |
|
24 |
+
# FLUXパイプラインのロードと処理
|
25 |
+
pipe = FluxPipeline.from_pretrained(
|
26 |
+
"black-forest-labs/FLUX.1-dev",
|
27 |
+
use_auth_token=True,
|
28 |
+
text_encoder=None,
|
29 |
+
text_encoder_2=None
|
30 |
+
).to("cpu")
|
31 |
images = pipe(
|
32 |
guidance_scale=2.5,
|
33 |
+
num_inference_steps=25, # 推論ステップを減らしてメモリ節約
|
34 |
+
generator=torch.Generator("cpu").manual_seed(0), # 再現性のためのシード値
|
35 |
**pipe_prior_output,
|
36 |
).images
|
37 |
|
38 |
+
# 結果画像を返す
|
39 |
return images[0]
|
40 |
|
41 |
# Gradioインターフェースを構築
|
|
|
44 |
return result_image
|
45 |
|
46 |
with gr.Blocks() as demo:
|
47 |
+
gr.Markdown("# FLUX Image Generation App (Optimized for CPU)")
|
48 |
+
|
49 |
with gr.Row():
|
50 |
input_image = gr.Image(type="filepath", label="Input Image")
|
51 |
output_image = gr.Image(type="pil", label="Generated Image")
|