Spaces:
Runtime error
Runtime error
Commit
•
e89b310
1
Parent(s):
9110980
Update app.py
Browse files
app.py
CHANGED
@@ -39,7 +39,7 @@ model_to_load = model_v1
|
|
39 |
with zipfile.ZipFile("mix.zip", 'r') as zip_ref:
|
40 |
zip_ref.extractall(".")
|
41 |
|
42 |
-
def swap_text(option):
|
43 |
mandatory_liability = "You must have the right to do so and you are liable for the images you use, example:"
|
44 |
if(option == "object"):
|
45 |
instance_prompt_example = "cttoy"
|
@@ -48,6 +48,7 @@ def swap_text(option):
|
|
48 |
elif(option == "person"):
|
49 |
instance_prompt_example = "julcto"
|
50 |
freeze_for = 70
|
|
|
51 |
return [f"You are going to train a `person`(s), upload 10-20 images of each person you are planning on training on from different angles/perspectives. {mandatory_liability}:", '''<img src="file/person.png" />''', f"You should name your concept with a unique made up word that has low chance of the model already knowing it (e.g.: `{instance_prompt_example}` here). Images will be automatically cropped to 512x512.", freeze_for, gr.update(visible=True)]
|
52 |
elif(option == "style"):
|
53 |
instance_prompt_example = "trsldamrl"
|
@@ -74,6 +75,8 @@ def count_files(*inputs):
|
|
74 |
file_counter+=len(files)
|
75 |
uses_custom = inputs[-1]
|
76 |
type_of_thing = inputs[-4]
|
|
|
|
|
77 |
if(uses_custom):
|
78 |
Training_Steps = int(inputs[-3])
|
79 |
else:
|
@@ -83,9 +86,19 @@ def count_files(*inputs):
|
|
83 |
elif(Training_Steps < 1400):
|
84 |
Training_Steps=1400
|
85 |
if(is_spaces):
|
86 |
-
|
87 |
-
|
88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
else:
|
90 |
summary_sentence = f'''You are going to train {concept_counter} {type_of_thing}(s), with {file_counter} images for {Training_Steps} steps.<br><br>'''
|
91 |
|
@@ -548,7 +561,7 @@ with gr.Blocks(css=css) as demo:
|
|
548 |
convert_button = gr.Button("Convert to CKPT", visible=False)
|
549 |
|
550 |
#Swap the examples and the % of text encoder trained depending if it is an object, person or style
|
551 |
-
type_of_thing.change(fn=swap_text, inputs=[type_of_thing], outputs=[thing_description, thing_image_example, things_naming, perc_txt_encoder, thing_experimental], queue=False, show_progress=False)
|
552 |
|
553 |
#Swap the base model
|
554 |
base_model_to_use.change(fn=swap_base_model, inputs=base_model_to_use, outputs=[])
|
@@ -556,10 +569,10 @@ with gr.Blocks(css=css) as demo:
|
|
556 |
#Update the summary box below the UI according to how many images are uploaded and whether users are using custom settings or not
|
557 |
for file in file_collection:
|
558 |
#file.change(fn=update_steps,inputs=file_collection, outputs=steps)
|
559 |
-
file.change(fn=count_files, inputs=file_collection+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
|
560 |
|
561 |
-
steps.change(fn=count_files, inputs=file_collection+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
|
562 |
-
perc_txt_encoder.change(fn=count_files, inputs=file_collection+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
|
563 |
|
564 |
#Give more options if the user wants to finish everything after training
|
565 |
if(is_spaces):
|
|
|
39 |
with zipfile.ZipFile("mix.zip", 'r') as zip_ref:
|
40 |
zip_ref.extractall(".")
|
41 |
|
42 |
+
def swap_text(option, base):
|
43 |
mandatory_liability = "You must have the right to do so and you are liable for the images you use, example:"
|
44 |
if(option == "object"):
|
45 |
instance_prompt_example = "cttoy"
|
|
|
48 |
elif(option == "person"):
|
49 |
instance_prompt_example = "julcto"
|
50 |
freeze_for = 70
|
51 |
+
show_prior_preservation = True if base != "v2-768" else False
|
52 |
return [f"You are going to train a `person`(s), upload 10-20 images of each person you are planning on training on from different angles/perspectives. {mandatory_liability}:", '''<img src="file/person.png" />''', f"You should name your concept with a unique made up word that has low chance of the model already knowing it (e.g.: `{instance_prompt_example}` here). Images will be automatically cropped to 512x512.", freeze_for, gr.update(visible=True)]
|
53 |
elif(option == "style"):
|
54 |
instance_prompt_example = "trsldamrl"
|
|
|
75 |
file_counter+=len(files)
|
76 |
uses_custom = inputs[-1]
|
77 |
type_of_thing = inputs[-4]
|
78 |
+
seletected_model = inputs[-5]
|
79 |
+
experimental_faces = inputs[-6]
|
80 |
if(uses_custom):
|
81 |
Training_Steps = int(inputs[-3])
|
82 |
else:
|
|
|
86 |
elif(Training_Steps < 1400):
|
87 |
Training_Steps=1400
|
88 |
if(is_spaces):
|
89 |
+
if(seletected_model == "v1-5"):
|
90 |
+
its = 1.1
|
91 |
+
if(experimental_faces):
|
92 |
+
its = 1
|
93 |
+
elif(selected_model == "v2-512"):
|
94 |
+
its = 0.8
|
95 |
+
if(experimental_faces):
|
96 |
+
its = 0.7
|
97 |
+
elif(selected_model == "v2-768"):
|
98 |
+
its = 0.5
|
99 |
+
summary_sentence = f'''You are going to train {concept_counter} {type_of_thing}(s), with {file_counter} images for {Training_Steps} steps. The training should take around {round(Training_Steps/its, 2)} seconds, or {round((Training_Steps/its)/60, 2)} minutes.
|
100 |
+
The setup, compression and uploading the model can take up to 20 minutes.<br>As the T4-Small GPU costs US$0.60 for 1h, <span style="font-size: 120%"><b>the estimated cost for this training is US${round((((Training_Steps/its)/3600)+0.3+0.1)*0.60, 2)}.</b></span><br><br>
|
101 |
+
If you check the box below the GPU attribution will automatically removed after training is done and the model is uploaded. If not, don't forget to come back here and swap the hardware back to CPU.<br><br>'''
|
102 |
else:
|
103 |
summary_sentence = f'''You are going to train {concept_counter} {type_of_thing}(s), with {file_counter} images for {Training_Steps} steps.<br><br>'''
|
104 |
|
|
|
561 |
convert_button = gr.Button("Convert to CKPT", visible=False)
|
562 |
|
563 |
#Swap the examples and the % of text encoder trained depending if it is an object, person or style
|
564 |
+
type_of_thing.change(fn=swap_text, inputs=[type_of_thing, base_model_to_use], outputs=[thing_description, thing_image_example, things_naming, perc_txt_encoder, thing_experimental], queue=False, show_progress=False)
|
565 |
|
566 |
#Swap the base model
|
567 |
base_model_to_use.change(fn=swap_base_model, inputs=base_model_to_use, outputs=[])
|
|
|
569 |
#Update the summary box below the UI according to how many images are uploaded and whether users are using custom settings or not
|
570 |
for file in file_collection:
|
571 |
#file.change(fn=update_steps,inputs=file_collection, outputs=steps)
|
572 |
+
file.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
|
573 |
|
574 |
+
steps.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
|
575 |
+
perc_txt_encoder.change(fn=count_files, inputs=file_collection+[thing_exprerimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
|
576 |
|
577 |
#Give more options if the user wants to finish everything after training
|
578 |
if(is_spaces):
|