kaytoo2022 commited on
Commit
4aaf289
·
verified ·
1 Parent(s): 7efe5dc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -13,15 +13,15 @@ torch.backends.cuda.matmul.allow_tf32 = True
13
  base_model = "black-forest-labs/FLUX.1-dev"
14
  pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
15
 
16
- lora_repo = "kaytoo2022/kaytoo2022-flux"
17
  trigger_word = "" # Leave trigger_word blank if not used.
18
- pipe.load_lora_weights(lora_repo, adapter_name='kaytoo')
19
 
20
  # ghibsky
21
  lora_repo_2 = "aleksa-codes/flux-ghibsky-illustration"
22
  pipe.load_lora_weights(lora_repo_2, adapter_name='lora_2')
23
 
24
- pipe.set_adapters(["kaytoo", "lora_2"], adapter_weights=[0.85, 0.0])
25
 
26
  pipe.to("cuda")
27
 
@@ -34,7 +34,7 @@ def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora
34
  seed = random.randint(0, MAX_SEED)
35
  generator = torch.Generator(device="cuda").manual_seed(seed)
36
 
37
- pipe.set_adapters(["kaytoo", "lora_2"], adapter_weights=[lora_scale, lora_scale_2])
38
 
39
  # Update progress bar (0% saat mulai)
40
  progress(0, "Starting image generation...")
 
13
  base_model = "black-forest-labs/FLUX.1-dev"
14
  pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
15
 
16
+ lora_repo = "kaytoo2022/jguan_35-flux"
17
  trigger_word = "" # Leave trigger_word blank if not used.
18
+ pipe.load_lora_weights(lora_repo, adapter_name='jguan')
19
 
20
  # ghibsky
21
  lora_repo_2 = "aleksa-codes/flux-ghibsky-illustration"
22
  pipe.load_lora_weights(lora_repo_2, adapter_name='lora_2')
23
 
24
+ pipe.set_adapters(["jguan", "lora_2"], adapter_weights=[0.85, 0.0])
25
 
26
  pipe.to("cuda")
27
 
 
34
  seed = random.randint(0, MAX_SEED)
35
  generator = torch.Generator(device="cuda").manual_seed(seed)
36
 
37
+ pipe.set_adapters(["jguan", "lora_2"], adapter_weights=[lora_scale, lora_scale_2])
38
 
39
  # Update progress bar (0% saat mulai)
40
  progress(0, "Starting image generation...")