Spaces:
Running
Running
Super-squash branch 'main' using huggingface_hub
Browse filesCo-authored-by: p1atdev <[email protected]>
- .gitattributes +35 -0
- README.md +13 -0
- app.py +228 -0
- characterfull.txt +0 -0
- danbooru_e621.csv +0 -0
- e621_danbooru.csv +0 -0
- requirements.txt +1 -0
.gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Danbooru to e621 Tag Converter
|
3 |
+
emoji: 😻
|
4 |
+
colorFrom: pink
|
5 |
+
colorTo: yellow
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 4.36.1
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: apache-2.0
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import spaces # ZERO GPU
|
3 |
+
|
4 |
+
# ref: https://qiita.com/tregu148/items/fccccbbc47d966dd2fc2
|
5 |
+
def gradio_copy_text(_text: None):
|
6 |
+
gr.Info("Copied!")
|
7 |
+
|
8 |
+
COPY_ACTION_JS = """\
|
9 |
+
(inputs, _outputs) => {
|
10 |
+
// inputs is the string value of the input_text
|
11 |
+
if (inputs.trim() !== "") {
|
12 |
+
navigator.clipboard.writeText(inputs);
|
13 |
+
}
|
14 |
+
}"""
|
15 |
+
|
16 |
+
def _people_tag(noun: str, minimum: int = 1, maximum: int = 5):
|
17 |
+
return (
|
18 |
+
[f"1{noun}"]
|
19 |
+
+ [f"{num}{noun}s" for num in range(minimum + 1, maximum + 1)]
|
20 |
+
+ [f"{maximum+1}+{noun}s"]
|
21 |
+
)
|
22 |
+
|
23 |
+
|
24 |
+
PEOPLE_TAGS = (
|
25 |
+
_people_tag("girl") + _people_tag("boy") + _people_tag("other") + ["no humans"]
|
26 |
+
)
|
27 |
+
RATING_MAP = {
|
28 |
+
"safe": "rating_safe",
|
29 |
+
"sensitive": "rating_safe",
|
30 |
+
"nsfw": "rating_questionable",
|
31 |
+
"explicit, nsfw": "rating_explicit",
|
32 |
+
"explicit": "rating_explicit",
|
33 |
+
"rating:safe": "rating_safe",
|
34 |
+
"rating:general": "rating_safe",
|
35 |
+
"rating:sensitive": "rating_safe",
|
36 |
+
"rating:questionable, nsfw": "rating_explicit",
|
37 |
+
"rating:explicit, nsfw": "rating_explicit",
|
38 |
+
}
|
39 |
+
|
40 |
+
DESCRIPTION_MD = """
|
41 |
+
# Convert general Danbooru tags to Pony e621 tags
|
42 |
+
""".strip()
|
43 |
+
|
44 |
+
DESCRIPTION_MD2 = """
|
45 |
+
The dictionary was generated using the following repository: [ponapon280/danbooru-e621-converter](https://github.com/ponapon280/danbooru-e621-converter)
|
46 |
+
""".strip()
|
47 |
+
|
48 |
+
|
49 |
+
def character_list_to_series_list(character_list):
|
50 |
+
def get_series_dict():
|
51 |
+
import re
|
52 |
+
|
53 |
+
with open('characterfull.txt', 'r') as f:
|
54 |
+
lines = f.readlines()
|
55 |
+
|
56 |
+
series_dict = {}
|
57 |
+
for line in lines:
|
58 |
+
parts = line.strip().split(', ')
|
59 |
+
if len(parts) >= 3:
|
60 |
+
name = parts[-2].replace("\\", "")
|
61 |
+
if name.endswith(")"):
|
62 |
+
names = name.split("(")
|
63 |
+
character_name = "(".join(names[:-1])
|
64 |
+
if character_name.endswith(" "):
|
65 |
+
name = character_name[:-1]
|
66 |
+
series = re.sub(r'\\[()]', '', parts[-1])
|
67 |
+
series_dict[name] = series
|
68 |
+
|
69 |
+
return series_dict
|
70 |
+
|
71 |
+
output_series_tag = []
|
72 |
+
series_tag = ""
|
73 |
+
series_dict = get_series_dict()
|
74 |
+
for tag in character_list:
|
75 |
+
series_tag = series_dict.get(tag, "")
|
76 |
+
if tag.endswith(")"):
|
77 |
+
tags = tag.split("(")
|
78 |
+
character_tag = "(".join(tags[:-1])
|
79 |
+
if character_tag.endswith(" "):
|
80 |
+
character_tag = character_tag[:-1]
|
81 |
+
series_tag = tags[-1].replace(")", "")
|
82 |
+
|
83 |
+
if series_tag:
|
84 |
+
output_series_tag.append(series_tag)
|
85 |
+
|
86 |
+
return output_series_tag
|
87 |
+
|
88 |
+
|
89 |
+
def get_e621_dict():
|
90 |
+
with open('danbooru_e621.csv', 'r', encoding="utf-8") as f:
|
91 |
+
lines = f.readlines()
|
92 |
+
|
93 |
+
e621_dict = {}
|
94 |
+
for line in lines:
|
95 |
+
parts = line.strip().split(',')
|
96 |
+
e621_dict[parts[0]] = parts[1]
|
97 |
+
|
98 |
+
return e621_dict
|
99 |
+
|
100 |
+
|
101 |
+
def danbooru_to_e621(dtag, e621_dict):
|
102 |
+
def d_to_e(match, e621_dict):
|
103 |
+
dtag = match.group(0)
|
104 |
+
etag = e621_dict.get(dtag.strip().replace("_", " "), "")
|
105 |
+
if etag:
|
106 |
+
return etag
|
107 |
+
else:
|
108 |
+
return dtag
|
109 |
+
|
110 |
+
import re
|
111 |
+
tag = re.sub(r'[\w ]+', lambda wrapper: d_to_e(wrapper, e621_dict), dtag, 2)
|
112 |
+
|
113 |
+
return tag
|
114 |
+
|
115 |
+
|
116 |
+
def nai_to_webui(ntag):
|
117 |
+
def n_to_w(match):
|
118 |
+
import re
|
119 |
+
ntag = match.group(0)
|
120 |
+
power = 1.0
|
121 |
+
for i in range(ntag.count("{")):
|
122 |
+
power *= 1.05
|
123 |
+
for i in range(ntag.count("[")):
|
124 |
+
power *= 0.952
|
125 |
+
wtag_word = re.sub(r'(?:{+([\w ,_]+)}+)|(?:\[+([\w ,_]+)\]+)', r'\1\2', ntag, 2)
|
126 |
+
wtag = f"({wtag_word}:{power:.2f})"
|
127 |
+
if wtag:
|
128 |
+
return wtag
|
129 |
+
else:
|
130 |
+
return ntag
|
131 |
+
|
132 |
+
import re
|
133 |
+
tag = re.sub(r'(?:{+[\w ,_]+}+)|(?:\[+[\w ,_]+\]+)', lambda wrapper: n_to_w(wrapper), ntag)
|
134 |
+
|
135 |
+
return tag
|
136 |
+
|
137 |
+
|
138 |
+
def animagine_prompt(character: list[str], general: list[str], tag_type):
|
139 |
+
people_tags: list[str] = []
|
140 |
+
other_tags: list[str] = []
|
141 |
+
rating_tags: list[str] = []
|
142 |
+
|
143 |
+
e621_dict = get_e621_dict()
|
144 |
+
for tag in general:
|
145 |
+
tag = danbooru_to_e621(tag, e621_dict)
|
146 |
+
if tag in PEOPLE_TAGS:
|
147 |
+
people_tags.append(tag)
|
148 |
+
elif tag in RATING_MAP.keys():
|
149 |
+
rating_tags.append(RATING_MAP.get(tag.replace(" ",""), ""))
|
150 |
+
else:
|
151 |
+
other_tags.append(tag)
|
152 |
+
|
153 |
+
rating_tags = sorted(set(rating_tags), key=rating_tags.index)
|
154 |
+
rating_tags = [rating_tags[0]] if rating_tags else []
|
155 |
+
|
156 |
+
output_series_tag = character_list_to_series_list(character)
|
157 |
+
|
158 |
+
all_tag = ", ".join(people_tags + character + output_series_tag + other_tags + rating_tags)
|
159 |
+
|
160 |
+
if tag_type == "NovelAI":
|
161 |
+
all_tag = nai_to_webui(all_tag)
|
162 |
+
|
163 |
+
return all_tag
|
164 |
+
|
165 |
+
|
166 |
+
def convert_tags(
|
167 |
+
input_copyright = "", input_character = "", input_general = "", tag_type = "WebUI",
|
168 |
+
):
|
169 |
+
character = []
|
170 |
+
general = []
|
171 |
+
character.append(input_character) if input_character else []
|
172 |
+
general = input_general.split(",") if input_general else []
|
173 |
+
|
174 |
+
prompt = animagine_prompt(
|
175 |
+
character, general, tag_type
|
176 |
+
)
|
177 |
+
|
178 |
+
return prompt, gr.update(interactive=True,)
|
179 |
+
|
180 |
+
|
181 |
+
def demo():
|
182 |
+
with gr.Blocks() as ui:
|
183 |
+
gr.Markdown(DESCRIPTION_MD)
|
184 |
+
|
185 |
+
with gr.Row():
|
186 |
+
with gr.Column():
|
187 |
+
with gr.Group():
|
188 |
+
input_copyright = gr.Textbox(
|
189 |
+
label="Copyright tags",
|
190 |
+
placeholder="vocaloid",
|
191 |
+
)
|
192 |
+
input_character = gr.Textbox(
|
193 |
+
label="Character tags",
|
194 |
+
placeholder="hatsune miku",
|
195 |
+
)
|
196 |
+
input_general = gr.TextArea(
|
197 |
+
label="General tags",
|
198 |
+
lines=6,
|
199 |
+
placeholder="1girl, solo, ...",
|
200 |
+
)
|
201 |
+
tag_type = gr.Radio(
|
202 |
+
label="Style of tags",
|
203 |
+
choices=["WebUI", "NovelAI"],
|
204 |
+
value="WebUI",
|
205 |
+
)
|
206 |
+
start_btn = gr.Button(value="Convert", variant="primary")
|
207 |
+
|
208 |
+
with gr.Column():
|
209 |
+
with gr.Group():
|
210 |
+
prompt_text = gr.TextArea(label="Prompt", lines=6, interactive=False)
|
211 |
+
copy_btn = gr.Button(value="Copy to clipboard", interactive=False)
|
212 |
+
|
213 |
+
gr.Markdown(DESCRIPTION_MD2)
|
214 |
+
|
215 |
+
start_btn.click(
|
216 |
+
convert_tags,
|
217 |
+
inputs=[input_copyright, input_character, input_general, tag_type],
|
218 |
+
outputs=[
|
219 |
+
prompt_text,
|
220 |
+
copy_btn,
|
221 |
+
],
|
222 |
+
)
|
223 |
+
copy_btn.click(gradio_copy_text, inputs=[prompt_text], js=COPY_ACTION_JS)
|
224 |
+
|
225 |
+
return ui
|
226 |
+
|
227 |
+
if __name__ == "__main__":
|
228 |
+
demo().queue().launch()
|
characterfull.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
danbooru_e621.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
e621_danbooru.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
spaces
|