John6666 commited on
Commit
b7ca65a
·
verified ·
1 Parent(s): d2c7859

Delete tagger.py

Browse files
Files changed (1) hide show
  1. tagger.py +0 -556
tagger.py DELETED
@@ -1,556 +0,0 @@
1
- import spaces
2
- from PIL import Image
3
- import torch
4
- import gradio as gr
5
- from transformers import AutoImageProcessor, AutoModelForImageClassification
6
- from pathlib import Path
7
-
8
-
9
- WD_MODEL_NAMES = ["p1atdev/wd-swinv2-tagger-v3-hf"]
10
- WD_MODEL_NAME = WD_MODEL_NAMES[0]
11
-
12
- device = "cuda" if torch.cuda.is_available() else "cpu"
13
- default_device = device
14
-
15
- try:
16
- wd_model = AutoModelForImageClassification.from_pretrained(WD_MODEL_NAME, trust_remote_code=True).to(default_device).eval()
17
- wd_processor = AutoImageProcessor.from_pretrained(WD_MODEL_NAME, trust_remote_code=True)
18
- except Exception as e:
19
- print(e)
20
- wd_model = wd_processor = None
21
-
22
- def _people_tag(noun: str, minimum: int = 1, maximum: int = 5):
23
- return (
24
- [f"1{noun}"]
25
- + [f"{num}{noun}s" for num in range(minimum + 1, maximum + 1)]
26
- + [f"{maximum+1}+{noun}s"]
27
- )
28
-
29
-
30
- PEOPLE_TAGS = (
31
- _people_tag("girl") + _people_tag("boy") + _people_tag("other") + ["no humans"]
32
- )
33
-
34
-
35
- RATING_MAP = {
36
- "sfw": "safe",
37
- "general": "safe",
38
- "sensitive": "sensitive",
39
- "questionable": "nsfw",
40
- "explicit": "explicit, nsfw",
41
- }
42
- DANBOORU_TO_E621_RATING_MAP = {
43
- "sfw": "rating_safe",
44
- "general": "rating_safe",
45
- "safe": "rating_safe",
46
- "sensitive": "rating_safe",
47
- "nsfw": "rating_explicit",
48
- "explicit, nsfw": "rating_explicit",
49
- "explicit": "rating_explicit",
50
- "rating:safe": "rating_safe",
51
- "rating:general": "rating_safe",
52
- "rating:sensitive": "rating_safe",
53
- "rating:questionable, nsfw": "rating_explicit",
54
- "rating:explicit, nsfw": "rating_explicit",
55
- }
56
-
57
-
58
- # https://github.com/toriato/stable-diffusion-webui-wd14-tagger/blob/a9eacb1eff904552d3012babfa28b57e1d3e295c/tagger/ui.py#L368
59
- kaomojis = [
60
- "0_0",
61
- "(o)_(o)",
62
- "+_+",
63
- "+_-",
64
- "._.",
65
- "<o>_<o>",
66
- "<|>_<|>",
67
- "=_=",
68
- ">_<",
69
- "3_3",
70
- "6_9",
71
- ">_o",
72
- "@_@",
73
- "^_^",
74
- "o_o",
75
- "u_u",
76
- "x_x",
77
- "|_|",
78
- "||_||",
79
- ]
80
-
81
-
82
- def replace_underline(x: str):
83
- return x.strip().replace("_", " ") if x not in kaomojis else x.strip()
84
-
85
-
86
- def to_list(s):
87
- return [x.strip() for x in s.split(",") if not s == ""]
88
-
89
-
90
- def list_sub(a, b):
91
- return [e for e in a if e not in b]
92
-
93
-
94
- def list_uniq(l):
95
- return sorted(set(l), key=l.index)
96
-
97
-
98
- def load_dict_from_csv(filename):
99
- dict = {}
100
- if not Path(filename).exists():
101
- if Path('./tagger/', filename).exists(): filename = str(Path('./tagger/', filename))
102
- else: return dict
103
- try:
104
- with open(filename, 'r', encoding="utf-8") as f:
105
- lines = f.readlines()
106
- except Exception:
107
- print(f"Failed to open dictionary file: {filename}")
108
- return dict
109
- for line in lines:
110
- parts = line.strip().split(',')
111
- dict[parts[0]] = parts[1]
112
- return dict
113
-
114
-
115
- anime_series_dict = load_dict_from_csv('character_series_dict.csv')
116
-
117
-
118
- def character_list_to_series_list(character_list):
119
- output_series_tag = []
120
- series_tag = ""
121
- series_dict = anime_series_dict
122
- for tag in character_list:
123
- series_tag = series_dict.get(tag, "")
124
- if tag.endswith(")"):
125
- tags = tag.split("(")
126
- character_tag = "(".join(tags[:-1])
127
- if character_tag.endswith(" "):
128
- character_tag = character_tag[:-1]
129
- series_tag = tags[-1].replace(")", "")
130
-
131
- if series_tag:
132
- output_series_tag.append(series_tag)
133
-
134
- return output_series_tag
135
-
136
-
137
- def select_random_character(series: str, character: str):
138
- from random import seed, randrange
139
- seed()
140
- character_list = list(anime_series_dict.keys())
141
- character = character_list[randrange(len(character_list) - 1)]
142
- series = anime_series_dict.get(character.split(",")[0].strip(), "")
143
- return series, character
144
-
145
-
146
- def danbooru_to_e621(dtag, e621_dict):
147
- def d_to_e(match, e621_dict):
148
- dtag = match.group(0)
149
- etag = e621_dict.get(replace_underline(dtag), "")
150
- if etag:
151
- return etag
152
- else:
153
- return dtag
154
-
155
- import re
156
- tag = re.sub(r'[\w ]+', lambda wrapper: d_to_e(wrapper, e621_dict), dtag, 2)
157
- return tag
158
-
159
-
160
- danbooru_to_e621_dict = load_dict_from_csv('danbooru_e621.csv')
161
-
162
-
163
- def convert_danbooru_to_e621_prompt(input_prompt: str = "", prompt_type: str = "danbooru"):
164
- if prompt_type == "danbooru": return input_prompt
165
- tags = input_prompt.split(",") if input_prompt else []
166
- people_tags: list[str] = []
167
- other_tags: list[str] = []
168
- rating_tags: list[str] = []
169
-
170
- e621_dict = danbooru_to_e621_dict
171
- for tag in tags:
172
- tag = replace_underline(tag)
173
- tag = danbooru_to_e621(tag, e621_dict)
174
- if tag in PEOPLE_TAGS:
175
- people_tags.append(tag)
176
- elif tag in DANBOORU_TO_E621_RATING_MAP.keys():
177
- rating_tags.append(DANBOORU_TO_E621_RATING_MAP.get(tag.replace(" ",""), ""))
178
- else:
179
- other_tags.append(tag)
180
-
181
- rating_tags = sorted(set(rating_tags), key=rating_tags.index)
182
- rating_tags = [rating_tags[0]] if rating_tags else []
183
- rating_tags = ["explicit, nsfw"] if rating_tags and rating_tags[0] == "explicit" else rating_tags
184
-
185
- output_prompt = ", ".join(people_tags + other_tags + rating_tags)
186
-
187
- return output_prompt
188
-
189
-
190
- from translatepy import Translator
191
- translator = Translator()
192
- def translate_prompt_old(prompt: str = ""):
193
- def translate_to_english(input: str):
194
- try:
195
- output = str(translator.translate(input, 'English'))
196
- except Exception as e:
197
- output = input
198
- print(e)
199
- return output
200
-
201
- def is_japanese(s):
202
- import unicodedata
203
- for ch in s:
204
- name = unicodedata.name(ch, "")
205
- if "CJK UNIFIED" in name or "HIRAGANA" in name or "KATAKANA" in name:
206
- return True
207
- return False
208
-
209
- def to_list(s):
210
- return [x.strip() for x in s.split(",")]
211
-
212
- prompts = to_list(prompt)
213
- outputs = []
214
- for p in prompts:
215
- p = translate_to_english(p) if is_japanese(p) else p
216
- outputs.append(p)
217
-
218
- return ", ".join(outputs)
219
-
220
-
221
- def translate_prompt(input: str):
222
- try:
223
- output = str(translator.translate(input, 'English'))
224
- except Exception as e:
225
- output = input
226
- print(e)
227
- return output
228
-
229
-
230
- def translate_prompt_to_ja(prompt: str = ""):
231
- def translate_to_japanese(input: str):
232
- try:
233
- output = str(translator.translate(input, 'Japanese'))
234
- except Exception as e:
235
- output = input
236
- print(e)
237
- return output
238
-
239
- def is_japanese(s):
240
- import unicodedata
241
- for ch in s:
242
- name = unicodedata.name(ch, "")
243
- if "CJK UNIFIED" in name or "HIRAGANA" in name or "KATAKANA" in name:
244
- return True
245
- return False
246
-
247
- def to_list(s):
248
- return [x.strip() for x in s.split(",")]
249
-
250
- prompts = to_list(prompt)
251
- outputs = []
252
- for p in prompts:
253
- p = translate_to_japanese(p) if not is_japanese(p) else p
254
- outputs.append(p)
255
-
256
- return ", ".join(outputs)
257
-
258
-
259
- def tags_to_ja(itag, dict):
260
- def t_to_j(match, dict):
261
- tag = match.group(0)
262
- ja = dict.get(replace_underline(tag), "")
263
- if ja:
264
- return ja
265
- else:
266
- return tag
267
-
268
- import re
269
- tag = re.sub(r'[\w ]+', lambda wrapper: t_to_j(wrapper, dict), itag, 2)
270
-
271
- return tag
272
-
273
-
274
- def convert_tags_to_ja(input_prompt: str = ""):
275
- tags = input_prompt.split(",") if input_prompt else []
276
- out_tags = []
277
-
278
- tags_to_ja_dict = load_dict_from_csv('all_tags_ja_ext.csv')
279
- dict = tags_to_ja_dict
280
- for tag in tags:
281
- tag = replace_underline(tag)
282
- tag = tags_to_ja(tag, dict)
283
- out_tags.append(tag)
284
-
285
- return ", ".join(out_tags)
286
-
287
-
288
- enable_auto_recom_prompt = True
289
-
290
-
291
- animagine_ps = to_list("masterpiece, best quality, very aesthetic, absurdres")
292
- animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
293
- pony_ps = to_list("score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
294
- pony_nps = to_list("source_pony, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends")
295
- other_ps = to_list("anime artwork, anime style, studio anime, highly detailed, cinematic photo, 35mm photograph, film, bokeh, professional, 4k, highly detailed")
296
- other_nps = to_list("photo, deformed, black and white, realism, disfigured, low contrast, drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly")
297
- default_ps = to_list("highly detailed, masterpiece, best quality, very aesthetic, absurdres")
298
- default_nps = to_list("score_6, score_5, score_4, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
299
- def insert_recom_prompt(prompt: str = "", neg_prompt: str = "", type: str = "None"):
300
- global enable_auto_recom_prompt
301
- prompts = to_list(prompt)
302
- neg_prompts = to_list(neg_prompt)
303
-
304
- prompts = list_sub(prompts, animagine_ps + pony_ps)
305
- neg_prompts = list_sub(neg_prompts, animagine_nps + pony_nps)
306
-
307
- last_empty_p = [""] if not prompts and type != "None" else []
308
- last_empty_np = [""] if not neg_prompts and type != "None" else []
309
-
310
- if type == "Auto":
311
- enable_auto_recom_prompt = True
312
- else:
313
- enable_auto_recom_prompt = False
314
- if type == "Animagine":
315
- prompts = prompts + animagine_ps
316
- neg_prompts = neg_prompts + animagine_nps
317
- elif type == "Pony":
318
- prompts = prompts + pony_ps
319
- neg_prompts = neg_prompts + pony_nps
320
-
321
- prompt = ", ".join(list_uniq(prompts) + last_empty_p)
322
- neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
323
-
324
- return prompt, neg_prompt
325
-
326
-
327
- def load_model_prompt_dict():
328
- import json
329
- dict = {}
330
- path = 'model_dict.json' if Path('model_dict.json').exists() else './tagger/model_dict.json'
331
- try:
332
- with open('model_dict.json', encoding='utf-8') as f:
333
- dict = json.load(f)
334
- except Exception:
335
- pass
336
- return dict
337
-
338
-
339
- model_prompt_dict = load_model_prompt_dict()
340
-
341
-
342
- def insert_model_recom_prompt(prompt: str = "", neg_prompt: str = "", model_name: str = "None"):
343
- if not model_name or not enable_auto_recom_prompt: return prompt, neg_prompt
344
- prompts = to_list(prompt)
345
- neg_prompts = to_list(neg_prompt)
346
- prompts = list_sub(prompts, animagine_ps + pony_ps + other_ps)
347
- neg_prompts = list_sub(neg_prompts, animagine_nps + pony_nps + other_nps)
348
- last_empty_p = [""] if not prompts and type != "None" else []
349
- last_empty_np = [""] if not neg_prompts and type != "None" else []
350
- ps = []
351
- nps = []
352
- if model_name in model_prompt_dict.keys():
353
- ps = to_list(model_prompt_dict[model_name]["prompt"])
354
- nps = to_list(model_prompt_dict[model_name]["negative_prompt"])
355
- else:
356
- ps = default_ps
357
- nps = default_nps
358
- prompts = prompts + ps
359
- neg_prompts = neg_prompts + nps
360
- prompt = ", ".join(list_uniq(prompts) + last_empty_p)
361
- neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
362
- return prompt, neg_prompt
363
-
364
-
365
- tag_group_dict = load_dict_from_csv('tag_group.csv')
366
-
367
-
368
- def remove_specific_prompt(input_prompt: str = "", keep_tags: str = "all"):
369
- def is_dressed(tag):
370
- import re
371
- p = re.compile(r'dress|cloth|uniform|costume|vest|sweater|coat|shirt|jacket|blazer|apron|leotard|hood|sleeve|skirt|shorts|pant|loafer|ribbon|necktie|bow|collar|glove|sock|shoe|boots|wear|emblem')
372
- return p.search(tag)
373
-
374
- def is_background(tag):
375
- import re
376
- p = re.compile(r'background|outline|light|sky|build|day|screen|tree|city')
377
- return p.search(tag)
378
-
379
- un_tags = ['solo']
380
- group_list = ['groups', 'body_parts', 'attire', 'posture', 'objects', 'creatures', 'locations', 'disambiguation_pages', 'commonly_misused_tags', 'phrases', 'verbs_and_gerunds', 'subjective', 'nudity', 'sex_objects', 'sex', 'sex_acts', 'image_composition', 'artistic_license', 'text', 'year_tags', 'metatags']
381
- keep_group_dict = {
382
- "body": ['groups', 'body_parts'],
383
- "dress": ['groups', 'body_parts', 'attire'],
384
- "all": group_list,
385
- }
386
-
387
- def is_necessary(tag, keep_tags, group_dict):
388
- if keep_tags == "all":
389
- return True
390
- elif tag in un_tags or group_dict.get(tag, "") in explicit_group:
391
- return False
392
- elif keep_tags == "body" and is_dressed(tag):
393
- return False
394
- elif is_background(tag):
395
- return False
396
- else:
397
- return True
398
-
399
- if keep_tags == "all": return input_prompt
400
- keep_group = keep_group_dict.get(keep_tags, keep_group_dict["body"])
401
- explicit_group = list(set(group_list) ^ set(keep_group))
402
-
403
- tags = input_prompt.split(",") if input_prompt else []
404
- people_tags: list[str] = []
405
- other_tags: list[str] = []
406
-
407
- group_dict = tag_group_dict
408
- for tag in tags:
409
- tag = replace_underline(tag)
410
- if tag in PEOPLE_TAGS:
411
- people_tags.append(tag)
412
- elif is_necessary(tag, keep_tags, group_dict):
413
- other_tags.append(tag)
414
-
415
- output_prompt = ", ".join(people_tags + other_tags)
416
-
417
- return output_prompt
418
-
419
-
420
- def sort_taglist(tags: list[str]):
421
- if not tags: return []
422
- character_tags: list[str] = []
423
- series_tags: list[str] = []
424
- people_tags: list[str] = []
425
- group_list = ['groups', 'body_parts', 'attire', 'posture', 'objects', 'creatures', 'locations', 'disambiguation_pages', 'commonly_misused_tags', 'phrases', 'verbs_and_gerunds', 'subjective', 'nudity', 'sex_objects', 'sex', 'sex_acts', 'image_composition', 'artistic_license', 'text', 'year_tags', 'metatags']
426
- group_tags = {}
427
- other_tags: list[str] = []
428
- rating_tags: list[str] = []
429
-
430
- group_dict = tag_group_dict
431
- group_set = set(group_dict.keys())
432
- character_set = set(anime_series_dict.keys())
433
- series_set = set(anime_series_dict.values())
434
- rating_set = set(DANBOORU_TO_E621_RATING_MAP.keys()) | set(DANBOORU_TO_E621_RATING_MAP.values())
435
-
436
- for tag in tags:
437
- tag = replace_underline(tag)
438
- if tag in PEOPLE_TAGS:
439
- people_tags.append(tag)
440
- elif tag in rating_set:
441
- rating_tags.append(tag)
442
- elif tag in group_set:
443
- elem = group_dict[tag]
444
- group_tags[elem] = group_tags[elem] + [tag] if elem in group_tags else [tag]
445
- elif tag in character_set:
446
- character_tags.append(tag)
447
- elif tag in series_set:
448
- series_tags.append(tag)
449
- else:
450
- other_tags.append(tag)
451
-
452
- output_group_tags: list[str] = []
453
- for k in group_list:
454
- output_group_tags.extend(group_tags.get(k, []))
455
-
456
- rating_tags = [rating_tags[0]] if rating_tags else []
457
- rating_tags = ["explicit, nsfw"] if rating_tags and rating_tags[0] == "explicit" else rating_tags
458
-
459
- output_tags = character_tags + series_tags + people_tags + output_group_tags + other_tags + rating_tags
460
-
461
- return output_tags
462
-
463
-
464
- def sort_tags(tags: str):
465
- if not tags: return ""
466
- taglist: list[str] = []
467
- for tag in tags.split(","):
468
- taglist.append(tag.strip())
469
- taglist = list(filter(lambda x: x != "", taglist))
470
- return ", ".join(sort_taglist(taglist))
471
-
472
-
473
- def postprocess_results(results: dict[str, float], general_threshold: float, character_threshold: float):
474
- results = {
475
- k: v for k, v in sorted(results.items(), key=lambda item: item[1], reverse=True)
476
- }
477
-
478
- rating = {}
479
- character = {}
480
- general = {}
481
-
482
- for k, v in results.items():
483
- if k.startswith("rating:"):
484
- rating[k.replace("rating:", "")] = v
485
- continue
486
- elif k.startswith("character:"):
487
- character[k.replace("character:", "")] = v
488
- continue
489
-
490
- general[k] = v
491
-
492
- character = {k: v for k, v in character.items() if v >= character_threshold}
493
- general = {k: v for k, v in general.items() if v >= general_threshold}
494
-
495
- return rating, character, general
496
-
497
-
498
- def gen_prompt(rating: list[str], character: list[str], general: list[str]):
499
- people_tags: list[str] = []
500
- other_tags: list[str] = []
501
- rating_tag = RATING_MAP[rating[0]]
502
-
503
- for tag in general:
504
- if tag in PEOPLE_TAGS:
505
- people_tags.append(tag)
506
- else:
507
- other_tags.append(tag)
508
-
509
- all_tags = people_tags + other_tags
510
-
511
- return ", ".join(all_tags)
512
-
513
-
514
- @spaces.GPU(duration=30)
515
- def predict_tags(image: Image.Image, general_threshold: float = 0.3, character_threshold: float = 0.8):
516
- inputs = wd_processor.preprocess(image, return_tensors="pt")
517
-
518
- outputs = wd_model(**inputs.to(wd_model.device, wd_model.dtype))
519
- logits = torch.sigmoid(outputs.logits[0]) # take the first logits
520
-
521
- # get probabilities
522
- if device != default_device: wd_model.to(device=device)
523
- results = {
524
- wd_model.config.id2label[i]: float(logit.float()) for i, logit in enumerate(logits)
525
- }
526
- if device != default_device: wd_model.to(device=default_device)
527
- # rating, character, general
528
- rating, character, general = postprocess_results(
529
- results, general_threshold, character_threshold
530
- )
531
- prompt = gen_prompt(
532
- list(rating.keys()), list(character.keys()), list(general.keys())
533
- )
534
- output_series_tag = ""
535
- output_series_list = character_list_to_series_list(character.keys())
536
- if output_series_list:
537
- output_series_tag = output_series_list[0]
538
- else:
539
- output_series_tag = ""
540
- return output_series_tag, ", ".join(character.keys()), prompt, gr.update(interactive=True)
541
-
542
-
543
- def predict_tags_wd(image: Image.Image, input_tags: str, algo: list[str], general_threshold: float = 0.3,
544
- character_threshold: float = 0.8, input_series: str = "", input_character: str = ""):
545
- if not "Use WD Tagger" in algo and len(algo) != 0:
546
- return input_series, input_character, input_tags, gr.update(interactive=True)
547
- return predict_tags(image, general_threshold, character_threshold)
548
-
549
-
550
- def compose_prompt_to_copy(character: str, series: str, general: str):
551
- characters = character.split(",") if character else []
552
- serieses = series.split(",") if series else []
553
- generals = general.split(",") if general else []
554
- tags = characters + serieses + generals
555
- cprompt = ",".join(tags) if tags else ""
556
- return cprompt