John6666 commited on
Commit
a9cf91e
·
verified ·
1 Parent(s): b8fa141

Upload 4 files

Browse files
Files changed (3) hide show
  1. app.py +118 -63
  2. env.py +20 -0
  3. requirements.txt +1 -1
app.py CHANGED
@@ -25,6 +25,8 @@ from stablepy import (
25
  import time
26
  #import urllib.parse
27
 
 
 
28
  PREPROCESSOR_CONTROLNET = {
29
  "openpose": [
30
  "Openpose",
@@ -32,14 +34,14 @@ PREPROCESSOR_CONTROLNET = {
32
  ],
33
  "scribble": [
34
  "HED",
35
- "Pidinet",
36
  "None",
37
  ],
38
  "softedge": [
39
- "Pidinet",
40
  "HED",
41
  "HED safe",
42
- "Pidinet safe",
43
  "None",
44
  ],
45
  "segmentation": [
@@ -324,18 +326,21 @@ def extract_parameters(input_string):
324
  input_string = input_string.replace("\n", "")
325
 
326
  if "Negative prompt:" not in input_string:
327
- print("Negative prompt not detected")
328
- parameters["prompt"] = input_string
329
- return parameters
 
 
 
330
 
331
  parm = input_string.split("Negative prompt:")
332
- parameters["prompt"] = parm[0]
333
  if "Steps:" not in parm[1]:
334
  print("Steps not detected")
335
- parameters["neg_prompt"] = parm[1]
336
  return parameters
337
  parm = parm[1].split("Steps:")
338
- parameters["neg_prompt"] = parm[0]
339
  input_string = "Steps:" + parm[1]
340
 
341
  # Extracting Steps
@@ -394,7 +399,8 @@ class GuiSD:
394
  retain_task_model_in_cache=False,
395
  device="cpu",
396
  )
397
- self.model.device = torch.device("cpu") #
 
398
 
399
  def load_new_model(self, model_name, vae_model, task, progress=gr.Progress(track_tqdm=True)):
400
 
@@ -722,7 +728,7 @@ class GuiSD:
722
  if msg_lora:
723
  info_state += msg_lora
724
 
725
- info_state = info_state + "<br>" + "GENERATION DATA:<br>" + "<br>-------<br>".join(metadata).replace("\n", "<br>")
726
 
727
  download_links = "<br>".join(
728
  [
@@ -732,9 +738,9 @@ class GuiSD:
732
  )
733
  if save_generated_images:
734
  info_state += f"<br>{download_links}"
735
-
736
  img = save_images(img, metadata)
737
-
738
  yield img, info_state
739
 
740
  def update_task_options(model_name, task_name):
@@ -953,12 +959,12 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
953
  with gr.Row():
954
  seed_gui = gr.Number(minimum=-1, maximum=2**32-1, value=-1, label="Seed")
955
  pag_scale_gui = gr.Slider(minimum=0.0, maximum=10.0, step=0.1, value=0.0, label="PAG Scale")
956
- num_images_gui = gr.Slider(minimum=1, maximum=4, step=1, value=1, label="Images")
957
  clip_skip_gui = gr.Checkbox(value=False, label="Layer 2 Clip Skip")
958
  free_u_gui = gr.Checkbox(value=False, label="FreeU")
959
  with gr.Row():
960
  sampler_gui = gr.Dropdown(label="Sampler", choices=scheduler_names, value="Euler a")
961
- vae_model_gui = gr.Dropdown(label="VAE Model", choices=vae_model_list)
962
  prompt_s_options = [
963
  ("Compel format: (word)weight", "Compel"),
964
  ("Classic format: (word:weight)", "Classic"),
@@ -969,29 +975,35 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
969
  ]
970
  prompt_syntax_gui = gr.Dropdown(label="Prompt Syntax", choices=prompt_s_options, value=prompt_s_options[1][1])
971
 
972
- with gr.Row(equal_height=False):
973
- def run_set_params_gui(base_prompt):
974
- valid_receptors = { # default values
975
- "prompt": gr.update(value=base_prompt),
976
- "neg_prompt": gr.update(value=""),
977
- "Steps": gr.update(value=30),
978
- "width": gr.update(value=1024),
979
- "height": gr.update(value=1024),
980
- "Seed": gr.update(value=-1),
981
- "Sampler": gr.update(value="Euler a"),
982
- "scale": gr.update(value=7.5), # cfg
983
- "skip": gr.update(value=True),
984
- }
985
- valid_keys = list(valid_receptors.keys())
986
-
987
- parameters = extract_parameters(base_prompt)
988
- for key, val in parameters.items():
989
- # print(val)
990
- if key in valid_keys:
 
 
 
 
991
  if key == "Sampler":
992
  if val not in scheduler_names:
993
  continue
994
  elif key == "skip":
 
 
995
  if int(val) >= 2:
996
  val = True
997
  if key == "prompt":
@@ -999,43 +1011,52 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
999
  val = re.sub(r'<[^>]+>', '', val)
1000
  print("Removed LoRA written in the prompt")
1001
  if key in ["prompt", "neg_prompt"]:
1002
- val = val.strip()
1003
  if key in ["Steps", "width", "height", "Seed"]:
1004
  val = int(val)
1005
  if key == "scale":
1006
  val = float(val)
 
 
 
 
 
 
1007
  if key == "Seed":
1008
  continue
1009
  valid_receptors[key] = gr.update(value=val)
1010
  # print(val, type(val))
1011
  # print(valid_receptors)
1012
- return [value for value in valid_receptors.values()]
1013
-
1014
- set_params_gui.click(
1015
- run_set_params_gui, [prompt_gui],[
1016
- prompt_gui,
1017
- neg_prompt_gui,
1018
- steps_gui,
1019
- img_width_gui,
1020
- img_height_gui,
1021
- seed_gui,
1022
- sampler_gui,
1023
- cfg_gui,
1024
- clip_skip_gui,
1025
- ],
1026
- )
1027
-
1028
- def run_clear_prompt_gui():
1029
- return gr.update(value=""), gr.update(value="")
1030
- clear_prompt_gui.click(
1031
- run_clear_prompt_gui, [], [prompt_gui, neg_prompt_gui]
1032
- )
1033
-
1034
- def run_set_random_seed():
1035
- return -1
1036
- set_random_seed.click(
1037
- run_set_random_seed, [], seed_gui
1038
- )
 
 
 
1039
 
1040
  with gr.Accordion("LoRA", open=False, visible=True) as menu_lora:
1041
  def lora_dropdown(label):
@@ -1227,7 +1248,7 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1227
  mode_ip2 = gr.Dropdown(value="style", label="Mode", choices=MODE_IP_OPTIONS)
1228
  scale_ip2 = gr.Slider(minimum=0., maximum=2., step=0.01, value=0.7, label="Scale")
1229
 
1230
- with gr.Accordion("T2I adapter", open=False, visible=True) as menu_t2i:
1231
  t2i_adapter_preprocessor_gui = gr.Checkbox(value=True, label="T2i Adapter Preprocessor")
1232
  with gr.Row():
1233
  adapter_conditioning_scale_gui = gr.Slider(minimum=0, maximum=5., step=0.1, value=1, label="Adapter Conditioning Scale")
@@ -1368,6 +1389,11 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1368
  cache_examples=False,
1369
  #elem_id="examples",
1370
  )
 
 
 
 
 
1371
  ## END MOD
1372
 
1373
  with gr.Tab("Inpaint mask maker", render=True):
@@ -1433,6 +1459,35 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1433
  return img_source, img_result
1434
  btn_send.click(send_img, [img_source, img_result], [image_control, image_mask_gui])
1435
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1436
  ## BEGIN MOD
1437
  interface_mode_gui.change(
1438
  change_interface_mode,
 
25
  import time
26
  #import urllib.parse
27
 
28
+ print(os.getenv("SPACES_ZERO_GPU"))
29
+
30
  PREPROCESSOR_CONTROLNET = {
31
  "openpose": [
32
  "Openpose",
 
34
  ],
35
  "scribble": [
36
  "HED",
37
+ "PidiNet",
38
  "None",
39
  ],
40
  "softedge": [
41
+ "PidiNet",
42
  "HED",
43
  "HED safe",
44
+ "PidiNet safe",
45
  "None",
46
  ],
47
  "segmentation": [
 
326
  input_string = input_string.replace("\n", "")
327
 
328
  if "Negative prompt:" not in input_string:
329
+ if "Steps:" in input_string:
330
+ input_string = input_string.replace("Steps:", "Negative prompt: Steps:")
331
+ else:
332
+ print("Invalid metadata")
333
+ parameters["prompt"] = input_string
334
+ return parameters
335
 
336
  parm = input_string.split("Negative prompt:")
337
+ parameters["prompt"] = parm[0].strip()
338
  if "Steps:" not in parm[1]:
339
  print("Steps not detected")
340
+ parameters["neg_prompt"] = parm[1].strip()
341
  return parameters
342
  parm = parm[1].split("Steps:")
343
+ parameters["neg_prompt"] = parm[0].strip()
344
  input_string = "Steps:" + parm[1]
345
 
346
  # Extracting Steps
 
399
  retain_task_model_in_cache=False,
400
  device="cpu",
401
  )
402
+ self.model.load_beta_styles()
403
+ #self.model.device = torch.device("cpu") #
404
 
405
  def load_new_model(self, model_name, vae_model, task, progress=gr.Progress(track_tqdm=True)):
406
 
 
728
  if msg_lora:
729
  info_state += msg_lora
730
 
731
+ info_state = info_state + "<br>" + "GENERATION DATA:<br>" + metadata[0].replace("\n", "<br>") + "<br>-------<br>"
732
 
733
  download_links = "<br>".join(
734
  [
 
738
  )
739
  if save_generated_images:
740
  info_state += f"<br>{download_links}"
741
+
742
  img = save_images(img, metadata)
743
+
744
  yield img, info_state
745
 
746
  def update_task_options(model_name, task_name):
 
959
  with gr.Row():
960
  seed_gui = gr.Number(minimum=-1, maximum=2**32-1, value=-1, label="Seed")
961
  pag_scale_gui = gr.Slider(minimum=0.0, maximum=10.0, step=0.1, value=0.0, label="PAG Scale")
962
+ num_images_gui = gr.Slider(minimum=1, maximum=5, step=1, value=1, label="Images")
963
  clip_skip_gui = gr.Checkbox(value=False, label="Layer 2 Clip Skip")
964
  free_u_gui = gr.Checkbox(value=False, label="FreeU")
965
  with gr.Row():
966
  sampler_gui = gr.Dropdown(label="Sampler", choices=scheduler_names, value="Euler a")
967
+ vae_model_gui = gr.Dropdown(label="VAE Model", choices=vae_model_list, value=vae_model_list[0])
968
  prompt_s_options = [
969
  ("Compel format: (word)weight", "Compel"),
970
  ("Classic format: (word:weight)", "Classic"),
 
975
  ]
976
  prompt_syntax_gui = gr.Dropdown(label="Prompt Syntax", choices=prompt_s_options, value=prompt_s_options[1][1])
977
 
978
+ with gr.Row(equal_height=False):
979
+
980
+ def run_set_params_gui(base_prompt, name_model):
981
+ valid_receptors = { # default values
982
+ "prompt": gr.update(value=base_prompt),
983
+ "neg_prompt": gr.update(value=""),
984
+ "Steps": gr.update(value=30),
985
+ "width": gr.update(value=1024),
986
+ "height": gr.update(value=1024),
987
+ "Seed": gr.update(value=-1),
988
+ "Sampler": gr.update(value="Euler a"),
989
+ "scale": gr.update(value=7.), # cfg
990
+ "skip": gr.update(value=True),
991
+ "Model": gr.update(value=name_model),
992
+ }
993
+ valid_keys = list(valid_receptors.keys())
994
+
995
+ parameters = extract_parameters(base_prompt)
996
+
997
+ for key, val in parameters.items():
998
+ # print(val)
999
+ if key in valid_keys:
1000
+ try:
1001
  if key == "Sampler":
1002
  if val not in scheduler_names:
1003
  continue
1004
  elif key == "skip":
1005
+ if "," in str(val):
1006
+ val = val.replace(",", "")
1007
  if int(val) >= 2:
1008
  val = True
1009
  if key == "prompt":
 
1011
  val = re.sub(r'<[^>]+>', '', val)
1012
  print("Removed LoRA written in the prompt")
1013
  if key in ["prompt", "neg_prompt"]:
1014
+ val = re.sub(r'\s+', ' ', re.sub(r',+', ',', val)).strip()
1015
  if key in ["Steps", "width", "height", "Seed"]:
1016
  val = int(val)
1017
  if key == "scale":
1018
  val = float(val)
1019
+ if key == "Model":
1020
+ filtered_models = [m for m in model_list if val in m]
1021
+ if filtered_models:
1022
+ val = filtered_models[0]
1023
+ else:
1024
+ val = name_model
1025
  if key == "Seed":
1026
  continue
1027
  valid_receptors[key] = gr.update(value=val)
1028
  # print(val, type(val))
1029
  # print(valid_receptors)
1030
+ except Exception as e:
1031
+ print(str(e))
1032
+ return [value for value in valid_receptors.values()]
1033
+
1034
+ set_params_gui.click(
1035
+ run_set_params_gui, [prompt_gui, model_name_gui], [
1036
+ prompt_gui,
1037
+ neg_prompt_gui,
1038
+ steps_gui,
1039
+ img_width_gui,
1040
+ img_height_gui,
1041
+ seed_gui,
1042
+ sampler_gui,
1043
+ cfg_gui,
1044
+ clip_skip_gui,
1045
+ model_name_gui,
1046
+ ],
1047
+ )
1048
+
1049
+ def run_clear_prompt_gui():
1050
+ return gr.update(value=""), gr.update(value="")
1051
+ clear_prompt_gui.click(
1052
+ run_clear_prompt_gui, [], [prompt_gui, neg_prompt_gui]
1053
+ )
1054
+
1055
+ def run_set_random_seed():
1056
+ return -1
1057
+ set_random_seed.click(
1058
+ run_set_random_seed, [], seed_gui
1059
+ )
1060
 
1061
  with gr.Accordion("LoRA", open=False, visible=True) as menu_lora:
1062
  def lora_dropdown(label):
 
1248
  mode_ip2 = gr.Dropdown(value="style", label="Mode", choices=MODE_IP_OPTIONS)
1249
  scale_ip2 = gr.Slider(minimum=0., maximum=2., step=0.01, value=0.7, label="Scale")
1250
 
1251
+ with gr.Accordion("T2I adapter", open=False, visible=False) as menu_t2i:
1252
  t2i_adapter_preprocessor_gui = gr.Checkbox(value=True, label="T2i Adapter Preprocessor")
1253
  with gr.Row():
1254
  adapter_conditioning_scale_gui = gr.Slider(minimum=0, maximum=5., step=0.1, value=1, label="Adapter Conditioning Scale")
 
1389
  cache_examples=False,
1390
  #elem_id="examples",
1391
  )
1392
+ gr.Markdown(
1393
+ """### Resources
1394
+ - You can also try the image generator in Colab’s free tier, which provides free GPU [link](https://github.com/R3gm/SD_diffusers_interactive).
1395
+ """
1396
+ )
1397
  ## END MOD
1398
 
1399
  with gr.Tab("Inpaint mask maker", render=True):
 
1459
  return img_source, img_result
1460
  btn_send.click(send_img, [img_source, img_result], [image_control, image_mask_gui])
1461
 
1462
+ with gr.Tab("PNG Info"):
1463
+ def extract_exif_data(image):
1464
+ if image is None: return ""
1465
+
1466
+ try:
1467
+ metadata_keys = ['parameters', 'metadata', 'prompt', 'Comment']
1468
+
1469
+ for key in metadata_keys:
1470
+ if key in image.info:
1471
+ return image.info[key]
1472
+
1473
+ return str(image.info)
1474
+
1475
+ except Exception as e:
1476
+ return f"Error extracting metadata: {str(e)}"
1477
+
1478
+ with gr.Row():
1479
+ with gr.Column():
1480
+ image_metadata = gr.Image(label="Image with metadata", type="pil", sources=["upload"])
1481
+
1482
+ with gr.Column():
1483
+ result_metadata = gr.Textbox(label="Metadata", show_label=True, show_copy_button=True, interactive=False, container=True, max_lines=99)
1484
+
1485
+ image_metadata.change(
1486
+ fn=extract_exif_data,
1487
+ inputs=[image_metadata],
1488
+ outputs=[result_metadata],
1489
+ )
1490
+
1491
  ## BEGIN MOD
1492
  interface_mode_gui.change(
1493
  change_interface_mode,
env.py CHANGED
@@ -7,6 +7,11 @@ hf_read_token = os.environ.get('HF_READ_TOKEN') # only use for private repo
7
  # - **List Models**
8
  load_diffusers_format_model = [
9
  'stabilityai/stable-diffusion-xl-base-1.0',
 
 
 
 
 
10
  'cagliostrolab/animagine-xl-3.1',
11
  'John6666/epicrealism-xl-v8kiss-sdxl',
12
  'misri/epicrealismXL_v7FinalDestination',
@@ -22,15 +27,30 @@ load_diffusers_format_model = [
22
  'WhiteAiZ/autismmixSDXL_autismmixConfetti_diffusers',
23
  'kitty7779/ponyDiffusionV6XL',
24
  'GraydientPlatformAPI/aniverse-pony',
 
 
25
  'John6666/mistoon-anime-ponyalpha-sdxl',
 
 
26
  'John6666/ebara-mfcg-pony-mix-v12-sdxl',
27
  'John6666/t-ponynai3-v51-sdxl',
 
 
28
  'John6666/mala-anime-mix-nsfw-pony-xl-v5-sdxl',
29
  'John6666/wai-real-mix-v11-sdxl',
 
 
 
 
 
30
  'John6666/cyberrealistic-pony-v63-sdxl',
 
31
  'GraydientPlatformAPI/realcartoon-pony-diffusion',
32
  'John6666/nova-anime-xl-pony-v5-sdxl',
33
  'John6666/autismmix-sdxl-autismmix-pony-sdxl',
 
 
 
34
  'yodayo-ai/kivotos-xl-2.0',
35
  'yodayo-ai/holodayo-xl-2.1',
36
  'yodayo-ai/clandestine-xl-1.0',
 
7
  # - **List Models**
8
  load_diffusers_format_model = [
9
  'stabilityai/stable-diffusion-xl-base-1.0',
10
+ 'John6666/blue-pencil-flux1-v021-fp8-flux',
11
+ 'John6666/wai-ani-flux-v10forfp8-fp8-flux',
12
+ 'John6666/xe-anime-flux-v04-fp8-flux',
13
+ 'John6666/lyh-anime-flux-v2a1-fp8-flux',
14
+ 'John6666/carnival-unchained-v10-fp8-flux',
15
  'cagliostrolab/animagine-xl-3.1',
16
  'John6666/epicrealism-xl-v8kiss-sdxl',
17
  'misri/epicrealismXL_v7FinalDestination',
 
27
  'WhiteAiZ/autismmixSDXL_autismmixConfetti_diffusers',
28
  'kitty7779/ponyDiffusionV6XL',
29
  'GraydientPlatformAPI/aniverse-pony',
30
+ 'John6666/ras-real-anime-screencap-v1-sdxl',
31
+ 'John6666/duchaiten-pony-xl-no-score-v60-sdxl',
32
  'John6666/mistoon-anime-ponyalpha-sdxl',
33
+ 'John6666/3x3x3mixxl-v2-sdxl',
34
+ 'John6666/3x3x3mixxl-3dv01-sdxl',
35
  'John6666/ebara-mfcg-pony-mix-v12-sdxl',
36
  'John6666/t-ponynai3-v51-sdxl',
37
+ 'John6666/t-ponynai3-v65-sdxl',
38
+ 'John6666/prefect-pony-xl-v3-sdxl',
39
  'John6666/mala-anime-mix-nsfw-pony-xl-v5-sdxl',
40
  'John6666/wai-real-mix-v11-sdxl',
41
+ 'John6666/wai-c-v6-sdxl',
42
+ 'John6666/iniverse-mix-xl-sfwnsfw-pony-guofeng-v43-sdxl',
43
+ 'John6666/photo-realistic-pony-v5-sdxl',
44
+ 'John6666/pony-realism-v21main-sdxl',
45
+ 'John6666/pony-realism-v22main-sdxl',
46
  'John6666/cyberrealistic-pony-v63-sdxl',
47
+ 'John6666/cyberrealistic-pony-v64-sdxl',
48
  'GraydientPlatformAPI/realcartoon-pony-diffusion',
49
  'John6666/nova-anime-xl-pony-v5-sdxl',
50
  'John6666/autismmix-sdxl-autismmix-pony-sdxl',
51
+ 'John6666/aimz-dream-real-pony-mix-v3-sdxl',
52
+ 'John6666/duchaiten-pony-real-v11fix-sdxl',
53
+ 'John6666/duchaiten-pony-real-v20-sdxl',
54
  'yodayo-ai/kivotos-xl-2.0',
55
  'yodayo-ai/holodayo-xl-2.1',
56
  'yodayo-ai/clandestine-xl-1.0',
requirements.txt CHANGED
@@ -11,4 +11,4 @@ huggingface_hub
11
  translatepy
12
  timm
13
  rapidfuzz
14
- sentencepiece
 
11
  translatepy
12
  timm
13
  rapidfuzz
14
+ sentencepiece