zerhero commited on
Commit
3a37af7
1 Parent(s): 7b7018c

add more default option variable

Browse files
Files changed (6) hide show
  1. app.py +46 -33
  2. config.py +5 -4
  3. gui.py +0 -1
  4. models/checkpoints.py +1 -0
  5. requirements.txt +2 -1
  6. utils/string_utils.py +16 -3
app.py CHANGED
@@ -18,6 +18,8 @@ from stablepy import (
18
  SDXL_TASKS,
19
  )
20
  from config import (
 
 
21
  MINIMUM_IMAGE_NUMBER,
22
  MAXIMUM_IMAGE_NUMBER,
23
  DEFAULT_NEGATIVE_PROMPT,
@@ -34,6 +36,13 @@ from models.embeds import download_embeds
34
  from examples.examples import example_prompts
35
  from utils.download_utils import download_things
36
  from utils.model_utils import get_model_list
 
 
 
 
 
 
 
37
 
38
  # LOAD ALL ENV TOKEN
39
  CIVITAI_API_KEY: str = os.environ.get("CIVITAI_API_KEY")
@@ -41,6 +50,7 @@ hf_token: str = os.environ.get("HF_TOKEN")
41
 
42
  task_model_list = list(task_stablepy.keys())
43
 
 
44
  directory_models: str = 'models'
45
  os.makedirs(
46
  directory_models,
@@ -140,17 +150,9 @@ print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
140
  #######################
141
  # GUI
142
  #######################
143
- import gradio as gr
144
- import logging
145
- from utils.string_utils import extract_parameters
146
- from stablepy import logger
147
-
148
  logging.getLogger("diffusers").setLevel(logging.ERROR)
149
- import diffusers
150
 
151
  diffusers.utils.logging.set_verbosity(40)
152
- import warnings
153
- from gui import GuiSD
154
 
155
  warnings.filterwarnings(
156
  action="ignore",
@@ -171,7 +173,7 @@ warnings.filterwarnings(
171
  logger.setLevel(logging.DEBUG)
172
 
173
  # init GuiSD
174
- sd_gen: object = GuiSD(
175
  model_list=model_list,
176
  task_stablepy=task_stablepy,
177
  lora_model_list=lora_model_list,
@@ -219,7 +221,6 @@ with gr.Blocks(css=CSS) as app:
219
  with gr.Tab("Generation"):
220
  with gr.Row():
221
  with gr.Column(scale=2):
222
-
223
  task_gui = gr.Dropdown(
224
  label="Task",
225
  choices=sdxl_task,
@@ -281,14 +282,14 @@ with gr.Blocks(css=CSS) as app:
281
  minimum=1,
282
  maximum=100,
283
  step=1,
284
- value=43,
285
  label="Steps"
286
  )
287
  cfg_gui = gr.Slider(
288
  minimum=0,
289
  maximum=30,
290
  step=0.5,
291
- value=7.5,
292
  label="CFG"
293
  )
294
  sampler_gui = gr.Dropdown(
@@ -318,7 +319,7 @@ with gr.Blocks(css=CSS) as app:
318
  )
319
  with gr.Row():
320
  clip_skip_gui = gr.Checkbox(
321
- value=True,
322
  label="Layer 2 Clip Skip"
323
  )
324
  free_u_gui = gr.Checkbox(
@@ -344,7 +345,6 @@ with gr.Blocks(css=CSS) as app:
344
 
345
  parameters: dict = extract_parameters(base_prompt)
346
  for key, val in parameters.items():
347
- # print(val)
348
  if key in valid_keys:
349
  if key == "Sampler":
350
  if val not in scheduler_names:
@@ -365,8 +365,6 @@ with gr.Blocks(css=CSS) as app:
365
  if key == "Seed":
366
  continue
367
  valid_receptors[key] = gr.update(value=val)
368
- # print(val, type(val))
369
- # print(valid_receptors)
370
  return [value for value in valid_receptors.values()]
371
 
372
 
@@ -409,6 +407,7 @@ with gr.Blocks(css=CSS) as app:
409
  value=1,
410
  label="Images"
411
  )
 
412
  prompt_s_options = [
413
  ("Classic format: (word:weight)", "Classic"),
414
  ("Compel format: (word)weight", "Compel"),
@@ -417,6 +416,7 @@ with gr.Blocks(css=CSS) as app:
417
  ("Classic-ignore", "Classic-ignore"),
418
  ("None", "None"),
419
  ]
 
420
  prompt_syntax_gui = gr.Dropdown(
421
  label="Prompt Syntax",
422
  choices=prompt_s_options,
@@ -448,6 +448,7 @@ with gr.Blocks(css=CSS) as app:
448
  value=1.4,
449
  label="Upscale by"
450
  )
 
451
  esrgan_tile_gui = gr.Slider(
452
  minimum=0,
453
  value=100,
@@ -571,7 +572,10 @@ with gr.Blocks(css=CSS) as app:
571
  ]
572
  )
573
 
574
- with gr.Accordion("IP-Adapter", open=False, visible=True): # IP-Adapter
 
 
 
575
  IP_MODELS = sorted(
576
  list(
577
  set(
@@ -773,9 +777,10 @@ with gr.Blocks(css=CSS) as app:
773
  open=False,
774
  visible=True):
775
 
 
776
  try:
777
- style_names_found = sd_gen.model.STYLE_NAMES
778
- except:
779
  style_names_found = STYLE_NAMES
780
 
781
  style_prompt_gui = gr.Dropdown(
@@ -790,18 +795,18 @@ with gr.Blocks(css=CSS) as app:
790
 
791
 
792
  def load_json_style_file(json):
793
- if not sd_gen.model:
794
  gr.Info("First load the model")
795
  return gr.update(
796
  value=None,
797
  choices=STYLE_NAMES
798
  )
799
 
800
- sd_gen.model.load_style_file(json)
801
- gr.Info(f"{len(sd_gen.model.STYLE_NAMES)} styles loaded")
802
  return gr.update(
803
  value=None,
804
- choices=sd_gen.model.STYLE_NAMES
805
  )
806
 
807
 
@@ -825,10 +830,16 @@ with gr.Blocks(css=CSS) as app:
825
  open=False,
826
  visible=True):
827
  # Adetailer Inpaint Only
828
- adetailer_inpaint_only_gui = gr.Checkbox(label="Inpaint only", value=True)
 
 
 
829
 
830
  # Adetailer Verbose
831
- adetailer_verbose_gui = gr.Checkbox(label="Verbose", value=False)
 
 
 
832
 
833
  # Adetailer Sampler
834
  adetailer_sampler_options = ["Use same sampler"] + scheduler_names[:-1]
@@ -1003,9 +1014,9 @@ with gr.Blocks(css=CSS) as app:
1003
  value=False,
1004
  label="Retain Compel Previous Load"
1005
  )
1006
- retain_detailfix_model_previous_load_gui = gr.Checkbox(
1007
  value=False,
1008
- label="Retain Detailfix Model Previous Load"
1009
  )
1010
  retain_hires_model_previous_load_gui = gr.Checkbox(
1011
  value=False,
@@ -1017,7 +1028,10 @@ with gr.Blocks(css=CSS) as app:
1017
  )
1018
 
1019
  # example and Help Section
1020
- with gr.Accordion("Examples and help", open=False, visible=True):
 
 
 
1021
  gr.Markdown(
1022
  """### Help:
1023
  - The current space runs on a ZERO GPU which is assigned for approximately 60 seconds; Therefore, \
@@ -1046,7 +1060,7 @@ with gr.Blocks(css=CSS) as app:
1046
 
1047
  gr.Examples(
1048
  examples=example_prompts,
1049
- fn=sd_gen.generate_pipeline,
1050
  inputs=[
1051
  prompt_gui,
1052
  neg_prompt_gui,
@@ -1095,7 +1109,6 @@ with gr.Blocks(css=CSS) as app:
1095
  )
1096
 
1097
  with gr.Tab("Inpaint mask maker", render=True):
1098
-
1099
  def create_mask_now(img, invert):
1100
  import numpy as np
1101
  import time
@@ -1181,7 +1194,7 @@ with gr.Blocks(css=CSS) as app:
1181
  )
1182
 
1183
  generate_button.click(
1184
- fn=sd_gen.load_new_model,
1185
  inputs=[
1186
  model_name_gui,
1187
  vae_model_gui,
@@ -1191,7 +1204,7 @@ with gr.Blocks(css=CSS) as app:
1191
  queue=True,
1192
  show_progress="minimal",
1193
  ).success(
1194
- fn=sd_gen.generate_pipeline,
1195
  inputs=[
1196
  prompt_gui,
1197
  neg_prompt_gui,
@@ -1252,7 +1265,7 @@ with gr.Blocks(css=CSS) as app:
1252
  save_generated_images_gui,
1253
  image_storage_location_gui,
1254
  retain_compel_previous_load_gui,
1255
- retain_detailfix_model_previous_load_gui,
1256
  retain_hires_model_previous_load_gui,
1257
  t2i_adapter_preprocessor_gui,
1258
  adapter_conditioning_scale_gui,
 
18
  SDXL_TASKS,
19
  )
20
  from config import (
21
+ DEFAULT_STEPS,
22
+ DEFAULT_CFG,
23
  MINIMUM_IMAGE_NUMBER,
24
  MAXIMUM_IMAGE_NUMBER,
25
  DEFAULT_NEGATIVE_PROMPT,
 
36
  from examples.examples import example_prompts
37
  from utils.download_utils import download_things
38
  from utils.model_utils import get_model_list
39
+ import gradio as gr
40
+ import logging
41
+ from utils.string_utils import extract_parameters
42
+ from stablepy import logger
43
+ import diffusers
44
+ import warnings
45
+ from gui import GuiSD
46
 
47
  # LOAD ALL ENV TOKEN
48
  CIVITAI_API_KEY: str = os.environ.get("CIVITAI_API_KEY")
 
50
 
51
  task_model_list = list(task_stablepy.keys())
52
 
53
+ # [Create directories]
54
  directory_models: str = 'models'
55
  os.makedirs(
56
  directory_models,
 
150
  #######################
151
  # GUI
152
  #######################
 
 
 
 
 
153
  logging.getLogger("diffusers").setLevel(logging.ERROR)
 
154
 
155
  diffusers.utils.logging.set_verbosity(40)
 
 
156
 
157
  warnings.filterwarnings(
158
  action="ignore",
 
173
  logger.setLevel(logging.DEBUG)
174
 
175
  # init GuiSD
176
+ SD_GEN: GuiSD = GuiSD(
177
  model_list=model_list,
178
  task_stablepy=task_stablepy,
179
  lora_model_list=lora_model_list,
 
221
  with gr.Tab("Generation"):
222
  with gr.Row():
223
  with gr.Column(scale=2):
 
224
  task_gui = gr.Dropdown(
225
  label="Task",
226
  choices=sdxl_task,
 
282
  minimum=1,
283
  maximum=100,
284
  step=1,
285
+ value=DEFAULT_STEPS,
286
  label="Steps"
287
  )
288
  cfg_gui = gr.Slider(
289
  minimum=0,
290
  maximum=30,
291
  step=0.5,
292
+ value=DEFAULT_CFG,
293
  label="CFG"
294
  )
295
  sampler_gui = gr.Dropdown(
 
319
  )
320
  with gr.Row():
321
  clip_skip_gui = gr.Checkbox(
322
+ value=False,
323
  label="Layer 2 Clip Skip"
324
  )
325
  free_u_gui = gr.Checkbox(
 
345
 
346
  parameters: dict = extract_parameters(base_prompt)
347
  for key, val in parameters.items():
 
348
  if key in valid_keys:
349
  if key == "Sampler":
350
  if val not in scheduler_names:
 
365
  if key == "Seed":
366
  continue
367
  valid_receptors[key] = gr.update(value=val)
 
 
368
  return [value for value in valid_receptors.values()]
369
 
370
 
 
407
  value=1,
408
  label="Images"
409
  )
410
+
411
  prompt_s_options = [
412
  ("Classic format: (word:weight)", "Classic"),
413
  ("Compel format: (word)weight", "Compel"),
 
416
  ("Classic-ignore", "Classic-ignore"),
417
  ("None", "None"),
418
  ]
419
+
420
  prompt_syntax_gui = gr.Dropdown(
421
  label="Prompt Syntax",
422
  choices=prompt_s_options,
 
448
  value=1.4,
449
  label="Upscale by"
450
  )
451
+
452
  esrgan_tile_gui = gr.Slider(
453
  minimum=0,
454
  value=100,
 
572
  ]
573
  )
574
 
575
+ with gr.Accordion(
576
+ "IP-Adapter",
577
+ open=False,
578
+ visible=True): # IP-Adapter
579
  IP_MODELS = sorted(
580
  list(
581
  set(
 
777
  open=False,
778
  visible=True):
779
 
780
+ # noinspection PyBroadException
781
  try:
782
+ style_names_found = SD_GEN.model.STYLE_NAMES
783
+ except Exception as e:
784
  style_names_found = STYLE_NAMES
785
 
786
  style_prompt_gui = gr.Dropdown(
 
795
 
796
 
797
  def load_json_style_file(json):
798
+ if not SD_GEN.model:
799
  gr.Info("First load the model")
800
  return gr.update(
801
  value=None,
802
  choices=STYLE_NAMES
803
  )
804
 
805
+ SD_GEN.model.load_style_file(json)
806
+ gr.Info(f"{len(SD_GEN.model.STYLE_NAMES)} styles loaded")
807
  return gr.update(
808
  value=None,
809
+ choices=SD_GEN.model.STYLE_NAMES
810
  )
811
 
812
 
 
830
  open=False,
831
  visible=True):
832
  # Adetailer Inpaint Only
833
+ adetailer_inpaint_only_gui = gr.Checkbox(
834
+ label="Inpaint only",
835
+ value=True
836
+ )
837
 
838
  # Adetailer Verbose
839
+ adetailer_verbose_gui = gr.Checkbox(
840
+ label="Verbose",
841
+ value=False
842
+ )
843
 
844
  # Adetailer Sampler
845
  adetailer_sampler_options = ["Use same sampler"] + scheduler_names[:-1]
 
1014
  value=False,
1015
  label="Retain Compel Previous Load"
1016
  )
1017
+ retain_detail_fix_model_previous_load_gui = gr.Checkbox(
1018
  value=False,
1019
+ label="Retain Detail fix Model Previous Load"
1020
  )
1021
  retain_hires_model_previous_load_gui = gr.Checkbox(
1022
  value=False,
 
1028
  )
1029
 
1030
  # example and Help Section
1031
+ with gr.Accordion(
1032
+ "Examples and help",
1033
+ open=False,
1034
+ visible=True):
1035
  gr.Markdown(
1036
  """### Help:
1037
  - The current space runs on a ZERO GPU which is assigned for approximately 60 seconds; Therefore, \
 
1060
 
1061
  gr.Examples(
1062
  examples=example_prompts,
1063
+ fn=SD_GEN.generate_pipeline,
1064
  inputs=[
1065
  prompt_gui,
1066
  neg_prompt_gui,
 
1109
  )
1110
 
1111
  with gr.Tab("Inpaint mask maker", render=True):
 
1112
  def create_mask_now(img, invert):
1113
  import numpy as np
1114
  import time
 
1194
  )
1195
 
1196
  generate_button.click(
1197
+ fn=SD_GEN.load_new_model,
1198
  inputs=[
1199
  model_name_gui,
1200
  vae_model_gui,
 
1204
  queue=True,
1205
  show_progress="minimal",
1206
  ).success(
1207
+ fn=SD_GEN.generate_pipeline,
1208
  inputs=[
1209
  prompt_gui,
1210
  neg_prompt_gui,
 
1265
  save_generated_images_gui,
1266
  image_storage_location_gui,
1267
  retain_compel_previous_load_gui,
1268
+ retain_detail_fix_model_previous_load_gui,
1269
  retain_hires_model_previous_load_gui,
1270
  t2i_adapter_preprocessor_gui,
1271
  adapter_conditioning_scale_gui,
config.py CHANGED
@@ -1,3 +1,7 @@
 
 
 
 
1
  MINIMUM_IMAGE_NUMBER = 1
2
  MAXIMUM_IMAGE_NUMBER = 10
3
 
@@ -14,14 +18,11 @@ detailed kornea,
14
  fisheye,
15
  blush,
16
  parted lips, gorgeous lips, pink thin lips,
17
- detailed ear, human ears, human ear, highly detailed ears, highly detailed ear, detailed ears,
18
  perfect anatomy,
19
 
20
  five fingers,
21
  two hands,
22
- short girl, narrow body, detailed face, petite,
23
- medium boobs,
24
- (armpits, (naval), crop top), outdoor
25
  """
26
  DEFAULT_NEGATIVE_PROMPT = """(EasyNegative:1.05), easynegative, bad_prompt_version2, (poorly rendered), ugly, disfigured,
27
  cross eyed, cloned face, bad symmetry, bad anatomy, low quality, blurry, text, watermark, logo,
 
1
+ # Default value
2
+ DEFAULT_STEPS = 23
3
+ DEFAULT_CFG = 0
4
+
5
  MINIMUM_IMAGE_NUMBER = 1
6
  MAXIMUM_IMAGE_NUMBER = 10
7
 
 
18
  fisheye,
19
  blush,
20
  parted lips, gorgeous lips, pink thin lips,
 
21
  perfect anatomy,
22
 
23
  five fingers,
24
  two hands,
25
+ short girl, narrow body, detailed face, petite, ((naval)),
 
 
26
  """
27
  DEFAULT_NEGATIVE_PROMPT = """(EasyNegative:1.05), easynegative, bad_prompt_version2, (poorly rendered), ugly, disfigured,
28
  cross eyed, cloned face, bad symmetry, bad anatomy, low quality, blurry, text, watermark, logo,
gui.py CHANGED
@@ -23,7 +23,6 @@ class GuiSD:
23
  embed_list,
24
  stream=True):
25
  self.model = None
26
-
27
  print("Loading model...")
28
  self.model = Model_Diffusers(
29
  base_model_id="models/animaPencilXL_v500.safetensors",
 
23
  embed_list,
24
  stream=True):
25
  self.model = None
 
26
  print("Loading model...")
27
  self.model = Model_Diffusers(
28
  base_model_id="models/animaPencilXL_v500.safetensors",
models/checkpoints.py CHANGED
@@ -1,4 +1,5 @@
1
  CHECKPOINT_LIST = (
2
  "https://civitai.com/api/download/models/597138, " # animaPencilXL
3
  "https://civitai.com/api/download/models/609052, " # ponyPencilXL
 
4
  )
 
1
  CHECKPOINT_LIST = (
2
  "https://civitai.com/api/download/models/597138, " # animaPencilXL
3
  "https://civitai.com/api/download/models/609052, " # ponyPencilXL
4
+ "https://civitai.com/api/download/models/128713, " # dreamShaper
5
  )
requirements.txt CHANGED
@@ -1,4 +1,5 @@
1
  git+https://github.com/R3gm/stablepy.git@test_stream
2
  torch==2.2.0
3
  gdown
4
- opencv-python
 
 
1
  git+https://github.com/R3gm/stablepy.git@test_stream
2
  torch==2.2.0
3
  gdown
4
+ opencv-python
5
+ numpy
utils/string_utils.py CHANGED
@@ -34,17 +34,26 @@ def extract_parameters(input_string: str) -> dict:
34
  print("Steps not detected")
35
  parameters["neg_prompt"] = parm[1]
36
  return parameters
 
37
  parm = parm[1].split("Steps:")
38
  parameters["neg_prompt"] = parm[0]
39
  input_string = "Steps:" + parm[1]
40
 
41
  # Extracting Steps
42
- steps_match = re.search(r'Steps: (\d+)', input_string)
 
 
 
 
43
  if steps_match:
44
  parameters['Steps'] = int(steps_match.group(1))
45
 
46
  # Extracting Size
47
- size_match = re.search(r'Size: (\d+x\d+)', input_string)
 
 
 
 
48
  if size_match:
49
  parameters['Size'] = size_match.group(1)
50
  width, height = map(int, parameters['Size'].split('x'))
@@ -52,7 +61,11 @@ def extract_parameters(input_string: str) -> dict:
52
  parameters['height'] = height
53
 
54
  # Extracting other parameters
55
- other_parameters = re.findall(r'(\w+): (.*?)(?=, \w+|$)', input_string)
 
 
 
 
56
  for param in other_parameters:
57
  parameters[param[0]] = param[1].strip('"')
58
 
 
34
  print("Steps not detected")
35
  parameters["neg_prompt"] = parm[1]
36
  return parameters
37
+
38
  parm = parm[1].split("Steps:")
39
  parameters["neg_prompt"] = parm[0]
40
  input_string = "Steps:" + parm[1]
41
 
42
  # Extracting Steps
43
+ steps_match = re.search(
44
+ r'Steps: (\d+)',
45
+ input_string
46
+ )
47
+
48
  if steps_match:
49
  parameters['Steps'] = int(steps_match.group(1))
50
 
51
  # Extracting Size
52
+ size_match = re.search(
53
+ r'Size: (\d+x\d+)',
54
+ input_string
55
+ )
56
+
57
  if size_match:
58
  parameters['Size'] = size_match.group(1)
59
  width, height = map(int, parameters['Size'].split('x'))
 
61
  parameters['height'] = height
62
 
63
  # Extracting other parameters
64
+ other_parameters = re.findall(
65
+ r'(\w+): (.*?)(?=, \w+|$)',
66
+ input_string
67
+ )
68
+
69
  for param in other_parameters:
70
  parameters[param[0]] = param[1].strip('"')
71