Spaces:
Runtime error
Runtime error
Commit
•
a001849
1
Parent(s):
f87de06
Update app.py
Browse files
app.py
CHANGED
@@ -32,8 +32,8 @@ maximum_concepts = 3
|
|
32 |
#Pre download the files
|
33 |
if(is_gpu_associated):
|
34 |
model_v1 = snapshot_download(repo_id="multimodalart/sd-fine-tunable")
|
35 |
-
model_v2 = snapshot_download(repo_id="stabilityai/stable-diffusion-2")
|
36 |
-
model_v2_512 = snapshot_download(repo_id="stabilityai/stable-diffusion-2-base")
|
37 |
safety_checker = snapshot_download(repo_id="multimodalart/sd-sc")
|
38 |
model_to_load = model_v1
|
39 |
|
@@ -41,7 +41,7 @@ with zipfile.ZipFile("mix.zip", 'r') as zip_ref:
|
|
41 |
zip_ref.extractall(".")
|
42 |
|
43 |
def swap_text(option, base):
|
44 |
-
resize_width = 768 if base == "v2-768" else 512
|
45 |
mandatory_liability = "You must have the right to do so and you are liable for the images you use, example:"
|
46 |
if(option == "object"):
|
47 |
instance_prompt_example = "cttoy"
|
@@ -50,7 +50,7 @@ def swap_text(option, base):
|
|
50 |
elif(option == "person"):
|
51 |
instance_prompt_example = "julcto"
|
52 |
freeze_for = 70
|
53 |
-
#show_prior_preservation = True if base != "v2-768" else False
|
54 |
show_prior_preservation=False
|
55 |
if(show_prior_preservation):
|
56 |
prior_preservation_box_update = gr.update(visible=show_prior_preservation)
|
@@ -67,7 +67,7 @@ def swap_base_model(selected_model):
|
|
67 |
global model_to_load
|
68 |
if(selected_model == "v1-5"):
|
69 |
model_to_load = model_v1
|
70 |
-
elif(selected_model == "v2-768"):
|
71 |
model_to_load = model_v2
|
72 |
else:
|
73 |
model_to_load = model_v2_512
|
@@ -96,11 +96,11 @@ def count_files(*inputs):
|
|
96 |
its = 1.1
|
97 |
if(experimental_faces):
|
98 |
its = 1
|
99 |
-
elif(selected_model == "v2-512"):
|
100 |
its = 0.8
|
101 |
if(experimental_faces):
|
102 |
its = 0.7
|
103 |
-
elif(selected_model == "v2-768"):
|
104 |
its = 0.5
|
105 |
summary_sentence = f'''You are going to train {concept_counter} {type_of_thing}(s), with {file_counter} images for {Training_Steps} steps. The training should take around {round(Training_Steps/its, 2)} seconds, or {round((Training_Steps/its)/60, 2)} minutes.
|
106 |
The setup, compression and uploading the model can take up to 20 minutes.<br>As the T4-Small GPU costs US$0.60 for 1h, <span style="font-size: 120%"><b>the estimated cost for this training is below US${round((((Training_Steps/its)/3600)+0.3+0.1)*0.60, 2)}.</b></span><br><br>
|
@@ -171,7 +171,7 @@ def train(*inputs):
|
|
171 |
if os.path.exists("hastrained.success"): os.remove("hastrained.success")
|
172 |
file_counter = 0
|
173 |
which_model = inputs[-10]
|
174 |
-
resolution = 512 if which_model != "v2-768" else 768
|
175 |
for i, input in enumerate(inputs):
|
176 |
if(i < maximum_concepts-1):
|
177 |
if(input):
|
@@ -498,7 +498,7 @@ with gr.Blocks(css=css) as demo:
|
|
498 |
|
499 |
with gr.Row() as what_are_you_training:
|
500 |
type_of_thing = gr.Dropdown(label="What would you like to train?", choices=["object", "person", "style"], value="object", interactive=True)
|
501 |
-
base_model_to_use = gr.Dropdown(label="Which base model would you like to use?", choices=["v1-5", "v2-512", "v2-768"], value="v1-5", interactive=True)
|
502 |
|
503 |
#Very hacky approach to emulate dynamically created Gradio components
|
504 |
with gr.Row() as upload_your_concept:
|
|
|
32 |
#Pre download the files
|
33 |
if(is_gpu_associated):
|
34 |
model_v1 = snapshot_download(repo_id="multimodalart/sd-fine-tunable")
|
35 |
+
model_v2 = snapshot_download(repo_id="stabilityai/stable-diffusion-2-1")
|
36 |
+
model_v2_512 = snapshot_download(repo_id="stabilityai/stable-diffusion-2-1-base")
|
37 |
safety_checker = snapshot_download(repo_id="multimodalart/sd-sc")
|
38 |
model_to_load = model_v1
|
39 |
|
|
|
41 |
zip_ref.extractall(".")
|
42 |
|
43 |
def swap_text(option, base):
|
44 |
+
resize_width = 768 if base == "v2-1-768" else 512
|
45 |
mandatory_liability = "You must have the right to do so and you are liable for the images you use, example:"
|
46 |
if(option == "object"):
|
47 |
instance_prompt_example = "cttoy"
|
|
|
50 |
elif(option == "person"):
|
51 |
instance_prompt_example = "julcto"
|
52 |
freeze_for = 70
|
53 |
+
#show_prior_preservation = True if base != "v2-1-768" else False
|
54 |
show_prior_preservation=False
|
55 |
if(show_prior_preservation):
|
56 |
prior_preservation_box_update = gr.update(visible=show_prior_preservation)
|
|
|
67 |
global model_to_load
|
68 |
if(selected_model == "v1-5"):
|
69 |
model_to_load = model_v1
|
70 |
+
elif(selected_model == "v2-1-768"):
|
71 |
model_to_load = model_v2
|
72 |
else:
|
73 |
model_to_load = model_v2_512
|
|
|
96 |
its = 1.1
|
97 |
if(experimental_faces):
|
98 |
its = 1
|
99 |
+
elif(selected_model == "v2-1-512"):
|
100 |
its = 0.8
|
101 |
if(experimental_faces):
|
102 |
its = 0.7
|
103 |
+
elif(selected_model == "v2-1-768"):
|
104 |
its = 0.5
|
105 |
summary_sentence = f'''You are going to train {concept_counter} {type_of_thing}(s), with {file_counter} images for {Training_Steps} steps. The training should take around {round(Training_Steps/its, 2)} seconds, or {round((Training_Steps/its)/60, 2)} minutes.
|
106 |
The setup, compression and uploading the model can take up to 20 minutes.<br>As the T4-Small GPU costs US$0.60 for 1h, <span style="font-size: 120%"><b>the estimated cost for this training is below US${round((((Training_Steps/its)/3600)+0.3+0.1)*0.60, 2)}.</b></span><br><br>
|
|
|
171 |
if os.path.exists("hastrained.success"): os.remove("hastrained.success")
|
172 |
file_counter = 0
|
173 |
which_model = inputs[-10]
|
174 |
+
resolution = 512 if which_model != "v2-1-768" else 768
|
175 |
for i, input in enumerate(inputs):
|
176 |
if(i < maximum_concepts-1):
|
177 |
if(input):
|
|
|
498 |
|
499 |
with gr.Row() as what_are_you_training:
|
500 |
type_of_thing = gr.Dropdown(label="What would you like to train?", choices=["object", "person", "style"], value="object", interactive=True)
|
501 |
+
base_model_to_use = gr.Dropdown(label="Which base model would you like to use?", choices=["v1-5", "v2-1-512", "v2-1-768"], value="v1-5", interactive=True)
|
502 |
|
503 |
#Very hacky approach to emulate dynamically created Gradio components
|
504 |
with gr.Row() as upload_your_concept:
|