Spaces:
Running
on
L40S
Running
on
L40S
Update app.py
Browse files
app.py
CHANGED
@@ -21,7 +21,16 @@ from diffusers import FluxPipeline
|
|
21 |
from transformers import pipeline
|
22 |
from typing import Tuple, Dict, Any # Tuple import 추가
|
23 |
|
|
|
|
|
|
|
|
|
24 |
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
# Hugging Face 토큰 설정
|
27 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
@@ -328,15 +337,15 @@ if __name__ == "__main__":
|
|
328 |
pipe = FluxPipeline.from_pretrained(
|
329 |
"black-forest-labs/FLUX.1-dev",
|
330 |
torch_dtype=torch.bfloat16,
|
331 |
-
use_auth_token=HF_TOKEN,
|
332 |
-
device_map="auto"
|
333 |
)
|
334 |
|
335 |
# Hyper-SD LoRA 로드
|
336 |
lora_path = hf_hub_download(
|
337 |
"ByteDance/Hyper-SD",
|
338 |
"Hyper-FLUX.1-dev-8steps-lora.safetensors",
|
339 |
-
use_auth_token=HF_TOKEN
|
340 |
)
|
341 |
pipe.load_lora_weights(lora_path)
|
342 |
pipe.fuse_lora(lora_scale=0.125)
|
|
|
21 |
from transformers import pipeline
|
22 |
from typing import Tuple, Dict, Any # Tuple import 추가
|
23 |
|
24 |
+
# CUDA 메모리 관리 설정
|
25 |
+
torch.cuda.empty_cache()
|
26 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
27 |
+
torch.backends.cudnn.benchmark = True
|
28 |
|
29 |
+
# 환경 변수 설정
|
30 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
|
31 |
+
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:512"
|
32 |
+
os.environ['SPCONV_ALGO'] = 'native'
|
33 |
+
os.environ['SPARSE_BACKEND'] = 'native'
|
34 |
|
35 |
# Hugging Face 토큰 설정
|
36 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
|
|
337 |
pipe = FluxPipeline.from_pretrained(
|
338 |
"black-forest-labs/FLUX.1-dev",
|
339 |
torch_dtype=torch.bfloat16,
|
340 |
+
use_auth_token=HF_TOKEN,
|
341 |
+
device_map="balanced" # "auto"에서 "balanced"로 변경
|
342 |
)
|
343 |
|
344 |
# Hyper-SD LoRA 로드
|
345 |
lora_path = hf_hub_download(
|
346 |
"ByteDance/Hyper-SD",
|
347 |
"Hyper-FLUX.1-dev-8steps-lora.safetensors",
|
348 |
+
use_auth_token=HF_TOKEN
|
349 |
)
|
350 |
pipe.load_lora_weights(lora_path)
|
351 |
pipe.fuse_lora(lora_scale=0.125)
|