multimodalart HF staff commited on
Commit
c29fb74
1 Parent(s): 5cecf5c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -10,13 +10,13 @@ dtype = torch.bfloat16
10
  device = "cuda"
11
 
12
  bfl_repo = "black-forest-labs/FLUX.1-schnell"
13
- scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained (bfl_repo, subfolder="scheduler", revision="refs/pr/1")
14
  text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
15
  tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
16
  text_encoder_2 = T5EncoderModel.from_pretrained(bfl_repo, subfolder="text_encoder_2", torch_dtype=dtype, revision="refs/pr/1")
17
  tokenizer_2 = T5TokenizerFast.from_pretrained(bfl_repo, subfolder="tokenizer_2", torch_dtype=dtype, revision="refs/pr/1")
18
- vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-schnell", subfolder="vae", torch_dtype=dtype)
19
- transformer = FluxTransformer2DModel.from_pretrained("black-forest-labs/FLUX.1-schnell", subfolder="transformer", torch_dtype=dtype, revision="refs/pr/1")
20
 
21
  device = "cuda" if torch.cuda.is_available() else "cpu"
22
 
 
10
  device = "cuda"
11
 
12
  bfl_repo = "black-forest-labs/FLUX.1-schnell"
13
+ scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(bfl_repo, subfolder="scheduler", revision="refs/pr/1")
14
  text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
15
  tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
16
  text_encoder_2 = T5EncoderModel.from_pretrained(bfl_repo, subfolder="text_encoder_2", torch_dtype=dtype, revision="refs/pr/1")
17
  tokenizer_2 = T5TokenizerFast.from_pretrained(bfl_repo, subfolder="tokenizer_2", torch_dtype=dtype, revision="refs/pr/1")
18
+ vae = AutoencoderKL.from_pretrained(bfl_repo, subfolder="vae", torch_dtype=dtype, revision="refs/pr/1")
19
+ transformer = FluxTransformer2DModel.from_pretrained(bfl_repo, subfolder="transformer", torch_dtype=dtype, revision="refs/pr/1")
20
 
21
  device = "cuda" if torch.cuda.is_available() else "cpu"
22