PennyJX commited on
Commit
3d7c07c
1 Parent(s): d301790

Upload 8 files

Browse files
modules/textual_inversion/autocrop.py ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import requests
3
+ import os
4
+ import numpy as np
5
+ from PIL import ImageDraw
6
+ from modules import paths_internal
7
+ from pkg_resources import parse_version
8
+
9
+ GREEN = "#0F0"
10
+ BLUE = "#00F"
11
+ RED = "#F00"
12
+
13
+
14
+ def crop_image(im, settings):
15
+ """ Intelligently crop an image to the subject matter """
16
+
17
+ scale_by = 1
18
+ if is_landscape(im.width, im.height):
19
+ scale_by = settings.crop_height / im.height
20
+ elif is_portrait(im.width, im.height):
21
+ scale_by = settings.crop_width / im.width
22
+ elif is_square(im.width, im.height):
23
+ if is_square(settings.crop_width, settings.crop_height):
24
+ scale_by = settings.crop_width / im.width
25
+ elif is_landscape(settings.crop_width, settings.crop_height):
26
+ scale_by = settings.crop_width / im.width
27
+ elif is_portrait(settings.crop_width, settings.crop_height):
28
+ scale_by = settings.crop_height / im.height
29
+
30
+ im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
31
+ im_debug = im.copy()
32
+
33
+ focus = focal_point(im_debug, settings)
34
+
35
+ # take the focal point and turn it into crop coordinates that try to center over the focal
36
+ # point but then get adjusted back into the frame
37
+ y_half = int(settings.crop_height / 2)
38
+ x_half = int(settings.crop_width / 2)
39
+
40
+ x1 = focus.x - x_half
41
+ if x1 < 0:
42
+ x1 = 0
43
+ elif x1 + settings.crop_width > im.width:
44
+ x1 = im.width - settings.crop_width
45
+
46
+ y1 = focus.y - y_half
47
+ if y1 < 0:
48
+ y1 = 0
49
+ elif y1 + settings.crop_height > im.height:
50
+ y1 = im.height - settings.crop_height
51
+
52
+ x2 = x1 + settings.crop_width
53
+ y2 = y1 + settings.crop_height
54
+
55
+ crop = [x1, y1, x2, y2]
56
+
57
+ results = []
58
+
59
+ results.append(im.crop(tuple(crop)))
60
+
61
+ if settings.annotate_image:
62
+ d = ImageDraw.Draw(im_debug)
63
+ rect = list(crop)
64
+ rect[2] -= 1
65
+ rect[3] -= 1
66
+ d.rectangle(rect, outline=GREEN)
67
+ results.append(im_debug)
68
+ if settings.destop_view_image:
69
+ im_debug.show()
70
+
71
+ return results
72
+
73
+
74
+ def focal_point(im, settings):
75
+ corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
76
+ entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
77
+ face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
78
+
79
+ pois = []
80
+
81
+ weight_pref_total = 0
82
+ if corner_points:
83
+ weight_pref_total += settings.corner_points_weight
84
+ if entropy_points:
85
+ weight_pref_total += settings.entropy_points_weight
86
+ if face_points:
87
+ weight_pref_total += settings.face_points_weight
88
+
89
+ corner_centroid = None
90
+ if corner_points:
91
+ corner_centroid = centroid(corner_points)
92
+ corner_centroid.weight = settings.corner_points_weight / weight_pref_total
93
+ pois.append(corner_centroid)
94
+
95
+ entropy_centroid = None
96
+ if entropy_points:
97
+ entropy_centroid = centroid(entropy_points)
98
+ entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
99
+ pois.append(entropy_centroid)
100
+
101
+ face_centroid = None
102
+ if face_points:
103
+ face_centroid = centroid(face_points)
104
+ face_centroid.weight = settings.face_points_weight / weight_pref_total
105
+ pois.append(face_centroid)
106
+
107
+ average_point = poi_average(pois, settings)
108
+
109
+ if settings.annotate_image:
110
+ d = ImageDraw.Draw(im)
111
+ max_size = min(im.width, im.height) * 0.07
112
+ if corner_centroid is not None:
113
+ color = BLUE
114
+ box = corner_centroid.bounding(max_size * corner_centroid.weight)
115
+ d.text((box[0], box[1] - 15), f"Edge: {corner_centroid.weight:.02f}", fill=color)
116
+ d.ellipse(box, outline=color)
117
+ if len(corner_points) > 1:
118
+ for f in corner_points:
119
+ d.rectangle(f.bounding(4), outline=color)
120
+ if entropy_centroid is not None:
121
+ color = "#ff0"
122
+ box = entropy_centroid.bounding(max_size * entropy_centroid.weight)
123
+ d.text((box[0], box[1] - 15), f"Entropy: {entropy_centroid.weight:.02f}", fill=color)
124
+ d.ellipse(box, outline=color)
125
+ if len(entropy_points) > 1:
126
+ for f in entropy_points:
127
+ d.rectangle(f.bounding(4), outline=color)
128
+ if face_centroid is not None:
129
+ color = RED
130
+ box = face_centroid.bounding(max_size * face_centroid.weight)
131
+ d.text((box[0], box[1] - 15), f"Face: {face_centroid.weight:.02f}", fill=color)
132
+ d.ellipse(box, outline=color)
133
+ if len(face_points) > 1:
134
+ for f in face_points:
135
+ d.rectangle(f.bounding(4), outline=color)
136
+
137
+ d.ellipse(average_point.bounding(max_size), outline=GREEN)
138
+
139
+ return average_point
140
+
141
+
142
+ def image_face_points(im, settings):
143
+ if settings.dnn_model_path is not None:
144
+ detector = cv2.FaceDetectorYN.create(
145
+ settings.dnn_model_path,
146
+ "",
147
+ (im.width, im.height),
148
+ 0.9, # score threshold
149
+ 0.3, # nms threshold
150
+ 5000 # keep top k before nms
151
+ )
152
+ faces = detector.detect(np.array(im))
153
+ results = []
154
+ if faces[1] is not None:
155
+ for face in faces[1]:
156
+ x = face[0]
157
+ y = face[1]
158
+ w = face[2]
159
+ h = face[3]
160
+ results.append(
161
+ PointOfInterest(
162
+ int(x + (w * 0.5)), # face focus left/right is center
163
+ int(y + (h * 0.33)), # face focus up/down is close to the top of the head
164
+ size=w,
165
+ weight=1 / len(faces[1])
166
+ )
167
+ )
168
+ return results
169
+ else:
170
+ np_im = np.array(im)
171
+ gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
172
+
173
+ tries = [
174
+ [f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01],
175
+ [f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05],
176
+ [f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05],
177
+ [f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05],
178
+ [f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05],
179
+ [f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05],
180
+ [f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05],
181
+ [f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05]
182
+ ]
183
+ for t in tries:
184
+ classifier = cv2.CascadeClassifier(t[0])
185
+ minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
186
+ try:
187
+ faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
188
+ minNeighbors=7, minSize=(minsize, minsize),
189
+ flags=cv2.CASCADE_SCALE_IMAGE)
190
+ except Exception:
191
+ continue
192
+
193
+ if faces:
194
+ rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
195
+ return [PointOfInterest((r[0] + r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0] - r[2]),
196
+ weight=1 / len(rects)) for r in rects]
197
+ return []
198
+
199
+
200
+ def image_corner_points(im, settings):
201
+ grayscale = im.convert("L")
202
+
203
+ # naive attempt at preventing focal points from collecting at watermarks near the bottom
204
+ gd = ImageDraw.Draw(grayscale)
205
+ gd.rectangle([0, im.height * .9, im.width, im.height], fill="#999")
206
+
207
+ np_im = np.array(grayscale)
208
+
209
+ points = cv2.goodFeaturesToTrack(
210
+ np_im,
211
+ maxCorners=100,
212
+ qualityLevel=0.04,
213
+ minDistance=min(grayscale.width, grayscale.height) * 0.06,
214
+ useHarrisDetector=False,
215
+ )
216
+
217
+ if points is None:
218
+ return []
219
+
220
+ focal_points = []
221
+ for point in points:
222
+ x, y = point.ravel()
223
+ focal_points.append(PointOfInterest(x, y, size=4, weight=1 / len(points)))
224
+
225
+ return focal_points
226
+
227
+
228
+ def image_entropy_points(im, settings):
229
+ landscape = im.height < im.width
230
+ portrait = im.height > im.width
231
+ if landscape:
232
+ move_idx = [0, 2]
233
+ move_max = im.size[0]
234
+ elif portrait:
235
+ move_idx = [1, 3]
236
+ move_max = im.size[1]
237
+ else:
238
+ return []
239
+
240
+ e_max = 0
241
+ crop_current = [0, 0, settings.crop_width, settings.crop_height]
242
+ crop_best = crop_current
243
+ while crop_current[move_idx[1]] < move_max:
244
+ crop = im.crop(tuple(crop_current))
245
+ e = image_entropy(crop)
246
+
247
+ if (e > e_max):
248
+ e_max = e
249
+ crop_best = list(crop_current)
250
+
251
+ crop_current[move_idx[0]] += 4
252
+ crop_current[move_idx[1]] += 4
253
+
254
+ x_mid = int(crop_best[0] + settings.crop_width / 2)
255
+ y_mid = int(crop_best[1] + settings.crop_height / 2)
256
+
257
+ return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
258
+
259
+
260
+ def image_entropy(im):
261
+ # greyscale image entropy
262
+ # band = np.asarray(im.convert("L"))
263
+ band = np.asarray(im.convert("1"), dtype=np.uint8)
264
+ hist, _ = np.histogram(band, bins=range(0, 256))
265
+ hist = hist[hist > 0]
266
+ return -np.log2(hist / hist.sum()).sum()
267
+
268
+
269
+ def centroid(pois):
270
+ x = [poi.x for poi in pois]
271
+ y = [poi.y for poi in pois]
272
+ return PointOfInterest(sum(x) / len(pois), sum(y) / len(pois))
273
+
274
+
275
+ def poi_average(pois, settings):
276
+ weight = 0.0
277
+ x = 0.0
278
+ y = 0.0
279
+ for poi in pois:
280
+ weight += poi.weight
281
+ x += poi.x * poi.weight
282
+ y += poi.y * poi.weight
283
+ avg_x = round(weight and x / weight)
284
+ avg_y = round(weight and y / weight)
285
+
286
+ return PointOfInterest(avg_x, avg_y)
287
+
288
+
289
+ def is_landscape(w, h):
290
+ return w > h
291
+
292
+
293
+ def is_portrait(w, h):
294
+ return h > w
295
+
296
+
297
+ def is_square(w, h):
298
+ return w == h
299
+
300
+
301
+ model_dir_opencv = os.path.join(paths_internal.models_path, 'opencv')
302
+ if parse_version(cv2.__version__) >= parse_version('4.8'):
303
+ model_file_path = os.path.join(model_dir_opencv, 'face_detection_yunet_2023mar.onnx')
304
+ model_url = 'https://github.com/opencv/opencv_zoo/blob/b6e370b10f641879a87890d44e42173077154a05/models/face_detection_yunet/face_detection_yunet_2023mar.onnx?raw=true'
305
+ else:
306
+ model_file_path = os.path.join(model_dir_opencv, 'face_detection_yunet.onnx')
307
+ model_url = 'https://github.com/opencv/opencv_zoo/blob/91fb0290f50896f38a0ab1e558b74b16bc009428/models/face_detection_yunet/face_detection_yunet_2022mar.onnx?raw=true'
308
+
309
+
310
+ def download_and_cache_models():
311
+ if not os.path.exists(model_file_path):
312
+ os.makedirs(model_dir_opencv, exist_ok=True)
313
+ print(f"downloading face detection model from '{model_url}' to '{model_file_path}'")
314
+ response = requests.get(model_url)
315
+ with open(model_file_path, "wb") as f:
316
+ f.write(response.content)
317
+ return model_file_path
318
+
319
+
320
+ class PointOfInterest:
321
+ def __init__(self, x, y, weight=1.0, size=10):
322
+ self.x = x
323
+ self.y = y
324
+ self.weight = weight
325
+ self.size = size
326
+
327
+ def bounding(self, size):
328
+ return [
329
+ self.x - size // 2,
330
+ self.y - size // 2,
331
+ self.x + size // 2,
332
+ self.y + size // 2
333
+ ]
334
+
335
+
336
+ class Settings:
337
+ def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5, annotate_image=False, dnn_model_path=None):
338
+ self.crop_width = crop_width
339
+ self.crop_height = crop_height
340
+ self.corner_points_weight = corner_points_weight
341
+ self.entropy_points_weight = entropy_points_weight
342
+ self.face_points_weight = face_points_weight
343
+ self.annotate_image = annotate_image
344
+ self.destop_view_image = False
345
+ self.dnn_model_path = dnn_model_path
modules/textual_inversion/dataset.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import PIL
4
+ import torch
5
+ from PIL import Image
6
+ from torch.utils.data import Dataset, DataLoader, Sampler
7
+ from torchvision import transforms
8
+ from collections import defaultdict
9
+ from random import shuffle, choices
10
+
11
+ import random
12
+ import tqdm
13
+ from modules import devices, shared
14
+ import re
15
+
16
+ from ldm.modules.distributions.distributions import DiagonalGaussianDistribution
17
+
18
+ re_numbers_at_start = re.compile(r"^[-\d]+\s*")
19
+
20
+
21
+ class DatasetEntry:
22
+ def __init__(self, filename=None, filename_text=None, latent_dist=None, latent_sample=None, cond=None, cond_text=None, pixel_values=None, weight=None):
23
+ self.filename = filename
24
+ self.filename_text = filename_text
25
+ self.weight = weight
26
+ self.latent_dist = latent_dist
27
+ self.latent_sample = latent_sample
28
+ self.cond = cond
29
+ self.cond_text = cond_text
30
+ self.pixel_values = pixel_values
31
+
32
+
33
+ class PersonalizedBase(Dataset):
34
+ def __init__(self, data_root, width, height, repeats, flip_p=0.5, placeholder_token="*", model=None, cond_model=None, device=None, template_file=None, include_cond=False, batch_size=1, gradient_step=1, shuffle_tags=False, tag_drop_out=0, latent_sampling_method='once', varsize=False, use_weight=False):
35
+ re_word = re.compile(shared.opts.dataset_filename_word_regex) if shared.opts.dataset_filename_word_regex else None
36
+
37
+ self.placeholder_token = placeholder_token
38
+
39
+ self.flip = transforms.RandomHorizontalFlip(p=flip_p)
40
+
41
+ self.dataset = []
42
+
43
+ with open(template_file, "r") as file:
44
+ lines = [x.strip() for x in file.readlines()]
45
+
46
+ self.lines = lines
47
+
48
+ assert data_root, 'dataset directory not specified'
49
+ assert os.path.isdir(data_root), "Dataset directory doesn't exist"
50
+ assert os.listdir(data_root), "Dataset directory is empty"
51
+
52
+ self.image_paths = [os.path.join(data_root, file_path) for file_path in os.listdir(data_root)]
53
+
54
+ self.shuffle_tags = shuffle_tags
55
+ self.tag_drop_out = tag_drop_out
56
+ groups = defaultdict(list)
57
+
58
+ print("Preparing dataset...")
59
+ for path in tqdm.tqdm(self.image_paths):
60
+ alpha_channel = None
61
+ if shared.state.interrupted:
62
+ raise Exception("interrupted")
63
+ try:
64
+ image = Image.open(path)
65
+ #Currently does not work for single color transparency
66
+ #We would need to read image.info['transparency'] for that
67
+ if use_weight and 'A' in image.getbands():
68
+ alpha_channel = image.getchannel('A')
69
+ image = image.convert('RGB')
70
+ if not varsize:
71
+ image = image.resize((width, height), PIL.Image.BICUBIC)
72
+ except Exception:
73
+ continue
74
+
75
+ text_filename = f"{os.path.splitext(path)[0]}.txt"
76
+ filename = os.path.basename(path)
77
+
78
+ if os.path.exists(text_filename):
79
+ with open(text_filename, "r", encoding="utf8") as file:
80
+ filename_text = file.read()
81
+ else:
82
+ filename_text = os.path.splitext(filename)[0]
83
+ filename_text = re.sub(re_numbers_at_start, '', filename_text)
84
+ if re_word:
85
+ tokens = re_word.findall(filename_text)
86
+ filename_text = (shared.opts.dataset_filename_join_string or "").join(tokens)
87
+
88
+ npimage = np.array(image).astype(np.uint8)
89
+ npimage = (npimage / 127.5 - 1.0).astype(np.float32)
90
+
91
+ torchdata = torch.from_numpy(npimage).permute(2, 0, 1).to(device=device, dtype=torch.float32)
92
+ latent_sample = None
93
+
94
+ with devices.autocast():
95
+ latent_dist = model.encode_first_stage(torchdata.unsqueeze(dim=0))
96
+
97
+ #Perform latent sampling, even for random sampling.
98
+ #We need the sample dimensions for the weights
99
+ if latent_sampling_method == "deterministic":
100
+ if isinstance(latent_dist, DiagonalGaussianDistribution):
101
+ # Works only for DiagonalGaussianDistribution
102
+ latent_dist.std = 0
103
+ else:
104
+ latent_sampling_method = "once"
105
+ latent_sample = model.get_first_stage_encoding(latent_dist).squeeze().to(devices.cpu)
106
+
107
+ if use_weight and alpha_channel is not None:
108
+ channels, *latent_size = latent_sample.shape
109
+ weight_img = alpha_channel.resize(latent_size)
110
+ npweight = np.array(weight_img).astype(np.float32)
111
+ #Repeat for every channel in the latent sample
112
+ weight = torch.tensor([npweight] * channels).reshape([channels] + latent_size)
113
+ #Normalize the weight to a minimum of 0 and a mean of 1, that way the loss will be comparable to default.
114
+ weight -= weight.min()
115
+ weight /= weight.mean()
116
+ elif use_weight:
117
+ #If an image does not have a alpha channel, add a ones weight map anyway so we can stack it later
118
+ weight = torch.ones(latent_sample.shape)
119
+ else:
120
+ weight = None
121
+
122
+ if latent_sampling_method == "random":
123
+ entry = DatasetEntry(filename=path, filename_text=filename_text, latent_dist=latent_dist, weight=weight)
124
+ else:
125
+ entry = DatasetEntry(filename=path, filename_text=filename_text, latent_sample=latent_sample, weight=weight)
126
+
127
+ if not (self.tag_drop_out != 0 or self.shuffle_tags):
128
+ entry.cond_text = self.create_text(filename_text)
129
+
130
+ if include_cond and not (self.tag_drop_out != 0 or self.shuffle_tags):
131
+ with devices.autocast():
132
+ entry.cond = cond_model([entry.cond_text]).to(devices.cpu).squeeze(0)
133
+ groups[image.size].append(len(self.dataset))
134
+ self.dataset.append(entry)
135
+ del torchdata
136
+ del latent_dist
137
+ del latent_sample
138
+ del weight
139
+
140
+ self.length = len(self.dataset)
141
+ self.groups = list(groups.values())
142
+ assert self.length > 0, "No images have been found in the dataset."
143
+ self.batch_size = min(batch_size, self.length)
144
+ self.gradient_step = min(gradient_step, self.length // self.batch_size)
145
+ self.latent_sampling_method = latent_sampling_method
146
+
147
+ if len(groups) > 1:
148
+ print("Buckets:")
149
+ for (w, h), ids in sorted(groups.items(), key=lambda x: x[0]):
150
+ print(f" {w}x{h}: {len(ids)}")
151
+ print()
152
+
153
+ def create_text(self, filename_text):
154
+ text = random.choice(self.lines)
155
+ tags = filename_text.split(',')
156
+ if self.tag_drop_out != 0:
157
+ tags = [t for t in tags if random.random() > self.tag_drop_out]
158
+ if self.shuffle_tags:
159
+ random.shuffle(tags)
160
+ text = text.replace("[filewords]", ','.join(tags))
161
+ text = text.replace("[name]", self.placeholder_token)
162
+ return text
163
+
164
+ def __len__(self):
165
+ return self.length
166
+
167
+ def __getitem__(self, i):
168
+ entry = self.dataset[i]
169
+ if self.tag_drop_out != 0 or self.shuffle_tags:
170
+ entry.cond_text = self.create_text(entry.filename_text)
171
+ if self.latent_sampling_method == "random":
172
+ entry.latent_sample = shared.sd_model.get_first_stage_encoding(entry.latent_dist).to(devices.cpu)
173
+ return entry
174
+
175
+
176
+ class GroupedBatchSampler(Sampler):
177
+ def __init__(self, data_source: PersonalizedBase, batch_size: int):
178
+ super().__init__(data_source)
179
+
180
+ n = len(data_source)
181
+ self.groups = data_source.groups
182
+ self.len = n_batch = n // batch_size
183
+ expected = [len(g) / n * n_batch * batch_size for g in data_source.groups]
184
+ self.base = [int(e) // batch_size for e in expected]
185
+ self.n_rand_batches = nrb = n_batch - sum(self.base)
186
+ self.probs = [e%batch_size/nrb/batch_size if nrb>0 else 0 for e in expected]
187
+ self.batch_size = batch_size
188
+
189
+ def __len__(self):
190
+ return self.len
191
+
192
+ def __iter__(self):
193
+ b = self.batch_size
194
+
195
+ for g in self.groups:
196
+ shuffle(g)
197
+
198
+ batches = []
199
+ for g in self.groups:
200
+ batches.extend(g[i*b:(i+1)*b] for i in range(len(g) // b))
201
+ for _ in range(self.n_rand_batches):
202
+ rand_group = choices(self.groups, self.probs)[0]
203
+ batches.append(choices(rand_group, k=b))
204
+
205
+ shuffle(batches)
206
+
207
+ yield from batches
208
+
209
+
210
+ class PersonalizedDataLoader(DataLoader):
211
+ def __init__(self, dataset, latent_sampling_method="once", batch_size=1, pin_memory=False):
212
+ super(PersonalizedDataLoader, self).__init__(dataset, batch_sampler=GroupedBatchSampler(dataset, batch_size), pin_memory=pin_memory)
213
+ if latent_sampling_method == "random":
214
+ self.collate_fn = collate_wrapper_random
215
+ else:
216
+ self.collate_fn = collate_wrapper
217
+
218
+
219
+ class BatchLoader:
220
+ def __init__(self, data):
221
+ self.cond_text = [entry.cond_text for entry in data]
222
+ self.cond = [entry.cond for entry in data]
223
+ self.latent_sample = torch.stack([entry.latent_sample for entry in data]).squeeze(1)
224
+ if all(entry.weight is not None for entry in data):
225
+ self.weight = torch.stack([entry.weight for entry in data]).squeeze(1)
226
+ else:
227
+ self.weight = None
228
+ #self.emb_index = [entry.emb_index for entry in data]
229
+ #print(self.latent_sample.device)
230
+
231
+ def pin_memory(self):
232
+ self.latent_sample = self.latent_sample.pin_memory()
233
+ return self
234
+
235
+ def collate_wrapper(batch):
236
+ return BatchLoader(batch)
237
+
238
+ class BatchLoaderRandom(BatchLoader):
239
+ def __init__(self, data):
240
+ super().__init__(data)
241
+
242
+ def pin_memory(self):
243
+ return self
244
+
245
+ def collate_wrapper_random(batch):
246
+ return BatchLoaderRandom(batch)
modules/textual_inversion/image_embedding.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import json
3
+ import warnings
4
+
5
+ import numpy as np
6
+ import zlib
7
+ from PIL import Image, ImageDraw
8
+ import torch
9
+
10
+
11
+ class EmbeddingEncoder(json.JSONEncoder):
12
+ def default(self, obj):
13
+ if isinstance(obj, torch.Tensor):
14
+ return {'TORCHTENSOR': obj.cpu().detach().numpy().tolist()}
15
+ return json.JSONEncoder.default(self, obj)
16
+
17
+
18
+ class EmbeddingDecoder(json.JSONDecoder):
19
+ def __init__(self, *args, **kwargs):
20
+ json.JSONDecoder.__init__(self, *args, object_hook=self.object_hook, **kwargs)
21
+
22
+ def object_hook(self, d):
23
+ if 'TORCHTENSOR' in d:
24
+ return torch.from_numpy(np.array(d['TORCHTENSOR']))
25
+ return d
26
+
27
+
28
+ def embedding_to_b64(data):
29
+ d = json.dumps(data, cls=EmbeddingEncoder)
30
+ return base64.b64encode(d.encode())
31
+
32
+
33
+ def embedding_from_b64(data):
34
+ d = base64.b64decode(data)
35
+ return json.loads(d, cls=EmbeddingDecoder)
36
+
37
+
38
+ def lcg(m=2**32, a=1664525, c=1013904223, seed=0):
39
+ while True:
40
+ seed = (a * seed + c) % m
41
+ yield seed % 255
42
+
43
+
44
+ def xor_block(block):
45
+ g = lcg()
46
+ randblock = np.array([next(g) for _ in range(np.product(block.shape))]).astype(np.uint8).reshape(block.shape)
47
+ return np.bitwise_xor(block.astype(np.uint8), randblock & 0x0F)
48
+
49
+
50
+ def style_block(block, sequence):
51
+ im = Image.new('RGB', (block.shape[1], block.shape[0]))
52
+ draw = ImageDraw.Draw(im)
53
+ i = 0
54
+ for x in range(-6, im.size[0], 8):
55
+ for yi, y in enumerate(range(-6, im.size[1], 8)):
56
+ offset = 0
57
+ if yi % 2 == 0:
58
+ offset = 4
59
+ shade = sequence[i % len(sequence)]
60
+ i += 1
61
+ draw.ellipse((x+offset, y, x+6+offset, y+6), fill=(shade, shade, shade))
62
+
63
+ fg = np.array(im).astype(np.uint8) & 0xF0
64
+
65
+ return block ^ fg
66
+
67
+
68
+ def insert_image_data_embed(image, data):
69
+ d = 3
70
+ data_compressed = zlib.compress(json.dumps(data, cls=EmbeddingEncoder).encode(), level=9)
71
+ data_np_ = np.frombuffer(data_compressed, np.uint8).copy()
72
+ data_np_high = data_np_ >> 4
73
+ data_np_low = data_np_ & 0x0F
74
+
75
+ h = image.size[1]
76
+ next_size = data_np_low.shape[0] + (h-(data_np_low.shape[0] % h))
77
+ next_size = next_size + ((h*d)-(next_size % (h*d)))
78
+
79
+ data_np_low = np.resize(data_np_low, next_size)
80
+ data_np_low = data_np_low.reshape((h, -1, d))
81
+
82
+ data_np_high = np.resize(data_np_high, next_size)
83
+ data_np_high = data_np_high.reshape((h, -1, d))
84
+
85
+ edge_style = list(data['string_to_param'].values())[0].cpu().detach().numpy().tolist()[0][:1024]
86
+ edge_style = (np.abs(edge_style)/np.max(np.abs(edge_style))*255).astype(np.uint8)
87
+
88
+ data_np_low = style_block(data_np_low, sequence=edge_style)
89
+ data_np_low = xor_block(data_np_low)
90
+ data_np_high = style_block(data_np_high, sequence=edge_style[::-1])
91
+ data_np_high = xor_block(data_np_high)
92
+
93
+ im_low = Image.fromarray(data_np_low, mode='RGB')
94
+ im_high = Image.fromarray(data_np_high, mode='RGB')
95
+
96
+ background = Image.new('RGB', (image.size[0]+im_low.size[0]+im_high.size[0]+2, image.size[1]), (0, 0, 0))
97
+ background.paste(im_low, (0, 0))
98
+ background.paste(image, (im_low.size[0]+1, 0))
99
+ background.paste(im_high, (im_low.size[0]+1+image.size[0]+1, 0))
100
+
101
+ return background
102
+
103
+
104
+ def crop_black(img, tol=0):
105
+ mask = (img > tol).all(2)
106
+ mask0, mask1 = mask.any(0), mask.any(1)
107
+ col_start, col_end = mask0.argmax(), mask.shape[1]-mask0[::-1].argmax()
108
+ row_start, row_end = mask1.argmax(), mask.shape[0]-mask1[::-1].argmax()
109
+ return img[row_start:row_end, col_start:col_end]
110
+
111
+
112
+ def extract_image_data_embed(image):
113
+ d = 3
114
+ outarr = crop_black(np.array(image.convert('RGB').getdata()).reshape(image.size[1], image.size[0], d).astype(np.uint8)) & 0x0F
115
+ black_cols = np.where(np.sum(outarr, axis=(0, 2)) == 0)
116
+ if black_cols[0].shape[0] < 2:
117
+ print('No Image data blocks found.')
118
+ return None
119
+
120
+ data_block_lower = outarr[:, :black_cols[0].min(), :].astype(np.uint8)
121
+ data_block_upper = outarr[:, black_cols[0].max()+1:, :].astype(np.uint8)
122
+
123
+ data_block_lower = xor_block(data_block_lower)
124
+ data_block_upper = xor_block(data_block_upper)
125
+
126
+ data_block = (data_block_upper << 4) | (data_block_lower)
127
+ data_block = data_block.flatten().tobytes()
128
+
129
+ data = zlib.decompress(data_block)
130
+ return json.loads(data, cls=EmbeddingDecoder)
131
+
132
+
133
+ def caption_image_overlay(srcimage, title, footerLeft, footerMid, footerRight, textfont=None):
134
+ from modules.images import get_font
135
+ if textfont:
136
+ warnings.warn(
137
+ 'passing in a textfont to caption_image_overlay is deprecated and does nothing',
138
+ DeprecationWarning,
139
+ stacklevel=2,
140
+ )
141
+ from math import cos
142
+
143
+ image = srcimage.copy()
144
+ fontsize = 32
145
+ factor = 1.5
146
+ gradient = Image.new('RGBA', (1, image.size[1]), color=(0, 0, 0, 0))
147
+ for y in range(image.size[1]):
148
+ mag = 1-cos(y/image.size[1]*factor)
149
+ mag = max(mag, 1-cos((image.size[1]-y)/image.size[1]*factor*1.1))
150
+ gradient.putpixel((0, y), (0, 0, 0, int(mag*255)))
151
+ image = Image.alpha_composite(image.convert('RGBA'), gradient.resize(image.size))
152
+
153
+ draw = ImageDraw.Draw(image)
154
+
155
+ font = get_font(fontsize)
156
+ padding = 10
157
+
158
+ _, _, w, h = draw.textbbox((0, 0), title, font=font)
159
+ fontsize = min(int(fontsize * (((image.size[0]*0.75)-(padding*4))/w)), 72)
160
+ font = get_font(fontsize)
161
+ _, _, w, h = draw.textbbox((0, 0), title, font=font)
162
+ draw.text((padding, padding), title, anchor='lt', font=font, fill=(255, 255, 255, 230))
163
+
164
+ _, _, w, h = draw.textbbox((0, 0), footerLeft, font=font)
165
+ fontsize_left = min(int(fontsize * (((image.size[0]/3)-(padding))/w)), 72)
166
+ _, _, w, h = draw.textbbox((0, 0), footerMid, font=font)
167
+ fontsize_mid = min(int(fontsize * (((image.size[0]/3)-(padding))/w)), 72)
168
+ _, _, w, h = draw.textbbox((0, 0), footerRight, font=font)
169
+ fontsize_right = min(int(fontsize * (((image.size[0]/3)-(padding))/w)), 72)
170
+
171
+ font = get_font(min(fontsize_left, fontsize_mid, fontsize_right))
172
+
173
+ draw.text((padding, image.size[1]-padding), footerLeft, anchor='ls', font=font, fill=(255, 255, 255, 230))
174
+ draw.text((image.size[0]/2, image.size[1]-padding), footerMid, anchor='ms', font=font, fill=(255, 255, 255, 230))
175
+ draw.text((image.size[0]-padding, image.size[1]-padding), footerRight, anchor='rs', font=font, fill=(255, 255, 255, 230))
176
+
177
+ return image
178
+
179
+
180
+ if __name__ == '__main__':
181
+
182
+ testEmbed = Image.open('test_embedding.png')
183
+ data = extract_image_data_embed(testEmbed)
184
+ assert data is not None
185
+
186
+ data = embedding_from_b64(testEmbed.text['sd-ti-embedding'])
187
+ assert data is not None
188
+
189
+ image = Image.new('RGBA', (512, 512), (255, 255, 200, 255))
190
+ cap_image = caption_image_overlay(image, 'title', 'footerLeft', 'footerMid', 'footerRight')
191
+
192
+ test_embed = {'string_to_param': {'*': torch.from_numpy(np.random.random((2, 4096)))}}
193
+
194
+ embedded_image = insert_image_data_embed(cap_image, test_embed)
195
+
196
+ retrived_embed = extract_image_data_embed(embedded_image)
197
+
198
+ assert str(retrived_embed) == str(test_embed)
199
+
200
+ embedded_image2 = insert_image_data_embed(cap_image, retrived_embed)
201
+
202
+ assert embedded_image == embedded_image2
203
+
204
+ g = lcg()
205
+ shared_random = np.array([next(g) for _ in range(100)]).astype(np.uint8).tolist()
206
+
207
+ reference_random = [253, 242, 127, 44, 157, 27, 239, 133, 38, 79, 167, 4, 177,
208
+ 95, 130, 79, 78, 14, 52, 215, 220, 194, 126, 28, 240, 179,
209
+ 160, 153, 149, 50, 105, 14, 21, 218, 199, 18, 54, 198, 193,
210
+ 38, 128, 19, 53, 195, 124, 75, 205, 12, 6, 145, 0, 28,
211
+ 30, 148, 8, 45, 218, 171, 55, 249, 97, 166, 12, 35, 0,
212
+ 41, 221, 122, 215, 170, 31, 113, 186, 97, 119, 31, 23, 185,
213
+ 66, 140, 30, 41, 37, 63, 137, 109, 216, 55, 159, 145, 82,
214
+ 204, 86, 73, 222, 44, 198, 118, 240, 97]
215
+
216
+ assert shared_random == reference_random
217
+
218
+ hunna_kay_random_sum = sum(np.array([next(g) for _ in range(100000)]).astype(np.uint8).tolist())
219
+
220
+ assert 12731374 == hunna_kay_random_sum
modules/textual_inversion/learn_schedule.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tqdm
2
+
3
+
4
+ class LearnScheduleIterator:
5
+ def __init__(self, learn_rate, max_steps, cur_step=0):
6
+ """
7
+ specify learn_rate as "0.001:100, 0.00001:1000, 1e-5:10000" to have lr of 0.001 until step 100, 0.00001 until 1000, and 1e-5 until 10000
8
+ """
9
+
10
+ pairs = learn_rate.split(',')
11
+ self.rates = []
12
+ self.it = 0
13
+ self.maxit = 0
14
+ try:
15
+ for pair in pairs:
16
+ if not pair.strip():
17
+ continue
18
+ tmp = pair.split(':')
19
+ if len(tmp) == 2:
20
+ step = int(tmp[1])
21
+ if step > cur_step:
22
+ self.rates.append((float(tmp[0]), min(step, max_steps)))
23
+ self.maxit += 1
24
+ if step > max_steps:
25
+ return
26
+ elif step == -1:
27
+ self.rates.append((float(tmp[0]), max_steps))
28
+ self.maxit += 1
29
+ return
30
+ else:
31
+ self.rates.append((float(tmp[0]), max_steps))
32
+ self.maxit += 1
33
+ return
34
+ assert self.rates
35
+ except (ValueError, AssertionError) as e:
36
+ raise Exception('Invalid learning rate schedule. It should be a number or, for example, like "0.001:100, 0.00001:1000, 1e-5:10000" to have lr of 0.001 until step 100, 0.00001 until 1000, and 1e-5 until 10000.') from e
37
+
38
+
39
+ def __iter__(self):
40
+ return self
41
+
42
+ def __next__(self):
43
+ if self.it < self.maxit:
44
+ self.it += 1
45
+ return self.rates[self.it - 1]
46
+ else:
47
+ raise StopIteration
48
+
49
+
50
+ class LearnRateScheduler:
51
+ def __init__(self, learn_rate, max_steps, cur_step=0, verbose=True):
52
+ self.schedules = LearnScheduleIterator(learn_rate, max_steps, cur_step)
53
+ (self.learn_rate, self.end_step) = next(self.schedules)
54
+ self.verbose = verbose
55
+
56
+ if self.verbose:
57
+ print(f'Training at rate of {self.learn_rate} until step {self.end_step}')
58
+
59
+ self.finished = False
60
+
61
+ def step(self, step_number):
62
+ if step_number < self.end_step:
63
+ return False
64
+
65
+ try:
66
+ (self.learn_rate, self.end_step) = next(self.schedules)
67
+ except StopIteration:
68
+ self.finished = True
69
+ return False
70
+ return True
71
+
72
+ def apply(self, optimizer, step_number):
73
+ if not self.step(step_number):
74
+ return
75
+
76
+ if self.verbose:
77
+ tqdm.tqdm.write(f'Training at rate of {self.learn_rate} until step {self.end_step}')
78
+
79
+ for pg in optimizer.param_groups:
80
+ pg['lr'] = self.learn_rate
81
+
modules/textual_inversion/logging.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ import json
3
+ import os
4
+
5
+ saved_params_shared = {
6
+ "batch_size",
7
+ "clip_grad_mode",
8
+ "clip_grad_value",
9
+ "create_image_every",
10
+ "data_root",
11
+ "gradient_step",
12
+ "initial_step",
13
+ "latent_sampling_method",
14
+ "learn_rate",
15
+ "log_directory",
16
+ "model_hash",
17
+ "model_name",
18
+ "num_of_dataset_images",
19
+ "steps",
20
+ "template_file",
21
+ "training_height",
22
+ "training_width",
23
+ }
24
+ saved_params_ti = {
25
+ "embedding_name",
26
+ "num_vectors_per_token",
27
+ "save_embedding_every",
28
+ "save_image_with_stored_embedding",
29
+ }
30
+ saved_params_hypernet = {
31
+ "activation_func",
32
+ "add_layer_norm",
33
+ "hypernetwork_name",
34
+ "layer_structure",
35
+ "save_hypernetwork_every",
36
+ "use_dropout",
37
+ "weight_init",
38
+ }
39
+ saved_params_all = saved_params_shared | saved_params_ti | saved_params_hypernet
40
+ saved_params_previews = {
41
+ "preview_cfg_scale",
42
+ "preview_height",
43
+ "preview_negative_prompt",
44
+ "preview_prompt",
45
+ "preview_sampler_index",
46
+ "preview_seed",
47
+ "preview_steps",
48
+ "preview_width",
49
+ }
50
+
51
+
52
+ def save_settings_to_file(log_directory, all_params):
53
+ now = datetime.datetime.now()
54
+ params = {"datetime": now.strftime("%Y-%m-%d %H:%M:%S")}
55
+
56
+ keys = saved_params_all
57
+ if all_params.get('preview_from_txt2img'):
58
+ keys = keys | saved_params_previews
59
+
60
+ params.update({k: v for k, v in all_params.items() if k in keys})
61
+
62
+ filename = f'settings-{now.strftime("%Y-%m-%d-%H-%M-%S")}.json'
63
+ with open(os.path.join(log_directory, filename), "w") as file:
64
+ json.dump(params, file, indent=4)
modules/textual_inversion/test_embedding.png ADDED

Git LFS Details

  • SHA256: ceb3de4098040013be6ce7169b0c0e67c2de86a8cfb43d02b16013f6af2d352e
  • Pointer size: 131 Bytes
  • Size of remote file: 489 kB
modules/textual_inversion/textual_inversion.py ADDED
@@ -0,0 +1,700 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from collections import namedtuple
3
+ from contextlib import closing
4
+
5
+ import torch
6
+ import tqdm
7
+ import html
8
+ import datetime
9
+ import csv
10
+ import safetensors.torch
11
+
12
+ import numpy as np
13
+ from PIL import Image, PngImagePlugin
14
+ from torch.utils.tensorboard import SummaryWriter
15
+
16
+ from modules import shared, devices, sd_hijack, sd_models, images, sd_samplers, sd_hijack_checkpoint, errors, hashes
17
+ import modules.textual_inversion.dataset
18
+ from modules.textual_inversion.learn_schedule import LearnRateScheduler
19
+
20
+ from modules.textual_inversion.image_embedding import embedding_to_b64, embedding_from_b64, insert_image_data_embed, extract_image_data_embed, caption_image_overlay
21
+ from modules.textual_inversion.logging import save_settings_to_file
22
+
23
+
24
+ TextualInversionTemplate = namedtuple("TextualInversionTemplate", ["name", "path"])
25
+ textual_inversion_templates = {}
26
+
27
+
28
+ def list_textual_inversion_templates():
29
+ textual_inversion_templates.clear()
30
+
31
+ for root, _, fns in os.walk(shared.cmd_opts.textual_inversion_templates_dir):
32
+ for fn in fns:
33
+ path = os.path.join(root, fn)
34
+
35
+ textual_inversion_templates[fn] = TextualInversionTemplate(fn, path)
36
+
37
+ return textual_inversion_templates
38
+
39
+
40
+ class Embedding:
41
+ def __init__(self, vec, name, step=None):
42
+ self.vec = vec
43
+ self.name = name
44
+ self.step = step
45
+ self.shape = None
46
+ self.vectors = 0
47
+ self.cached_checksum = None
48
+ self.sd_checkpoint = None
49
+ self.sd_checkpoint_name = None
50
+ self.optimizer_state_dict = None
51
+ self.filename = None
52
+ self.hash = None
53
+ self.shorthash = None
54
+
55
+ def save(self, filename):
56
+ embedding_data = {
57
+ "string_to_token": {"*": 265},
58
+ "string_to_param": {"*": self.vec},
59
+ "name": self.name,
60
+ "step": self.step,
61
+ "sd_checkpoint": self.sd_checkpoint,
62
+ "sd_checkpoint_name": self.sd_checkpoint_name,
63
+ }
64
+
65
+ torch.save(embedding_data, filename)
66
+
67
+ if shared.opts.save_optimizer_state and self.optimizer_state_dict is not None:
68
+ optimizer_saved_dict = {
69
+ 'hash': self.checksum(),
70
+ 'optimizer_state_dict': self.optimizer_state_dict,
71
+ }
72
+ torch.save(optimizer_saved_dict, f"{filename}.optim")
73
+
74
+ def checksum(self):
75
+ if self.cached_checksum is not None:
76
+ return self.cached_checksum
77
+
78
+ def const_hash(a):
79
+ r = 0
80
+ for v in a:
81
+ r = (r * 281 ^ int(v) * 997) & 0xFFFFFFFF
82
+ return r
83
+
84
+ self.cached_checksum = f'{const_hash(self.vec.reshape(-1) * 100) & 0xffff:04x}'
85
+ return self.cached_checksum
86
+
87
+ def set_hash(self, v):
88
+ self.hash = v
89
+ self.shorthash = self.hash[0:12]
90
+
91
+
92
+ class DirWithTextualInversionEmbeddings:
93
+ def __init__(self, path):
94
+ self.path = path
95
+ self.mtime = None
96
+
97
+ def has_changed(self):
98
+ if not os.path.isdir(self.path):
99
+ return False
100
+
101
+ mt = os.path.getmtime(self.path)
102
+ if self.mtime is None or mt > self.mtime:
103
+ return True
104
+
105
+ def update(self):
106
+ if not os.path.isdir(self.path):
107
+ return
108
+
109
+ self.mtime = os.path.getmtime(self.path)
110
+
111
+
112
+ class EmbeddingDatabase:
113
+ def __init__(self):
114
+ self.ids_lookup = {}
115
+ self.word_embeddings = {}
116
+ self.skipped_embeddings = {}
117
+ self.expected_shape = -1
118
+ self.embedding_dirs = {}
119
+ self.previously_displayed_embeddings = ()
120
+
121
+ def add_embedding_dir(self, path):
122
+ self.embedding_dirs[path] = DirWithTextualInversionEmbeddings(path)
123
+
124
+ def clear_embedding_dirs(self):
125
+ self.embedding_dirs.clear()
126
+
127
+ def register_embedding(self, embedding, model):
128
+ return self.register_embedding_by_name(embedding, model, embedding.name)
129
+
130
+ def register_embedding_by_name(self, embedding, model, name):
131
+ ids = model.cond_stage_model.tokenize([name])[0]
132
+ first_id = ids[0]
133
+ if first_id not in self.ids_lookup:
134
+ self.ids_lookup[first_id] = []
135
+ if name in self.word_embeddings:
136
+ # remove old one from the lookup list
137
+ lookup = [x for x in self.ids_lookup[first_id] if x[1].name!=name]
138
+ else:
139
+ lookup = self.ids_lookup[first_id]
140
+ if embedding is not None:
141
+ lookup += [(ids, embedding)]
142
+ self.ids_lookup[first_id] = sorted(lookup, key=lambda x: len(x[0]), reverse=True)
143
+ if embedding is None:
144
+ # unregister embedding with specified name
145
+ if name in self.word_embeddings:
146
+ del self.word_embeddings[name]
147
+ if len(self.ids_lookup[first_id])==0:
148
+ del self.ids_lookup[first_id]
149
+ return None
150
+ self.word_embeddings[name] = embedding
151
+ return embedding
152
+
153
+ def get_expected_shape(self):
154
+ vec = shared.sd_model.cond_stage_model.encode_embedding_init_text(",", 1)
155
+ return vec.shape[1]
156
+
157
+ def load_from_file(self, path, filename):
158
+ name, ext = os.path.splitext(filename)
159
+ ext = ext.upper()
160
+
161
+ if ext in ['.PNG', '.WEBP', '.JXL', '.AVIF']:
162
+ _, second_ext = os.path.splitext(name)
163
+ if second_ext.upper() == '.PREVIEW':
164
+ return
165
+
166
+ embed_image = Image.open(path)
167
+ if hasattr(embed_image, 'text') and 'sd-ti-embedding' in embed_image.text:
168
+ data = embedding_from_b64(embed_image.text['sd-ti-embedding'])
169
+ name = data.get('name', name)
170
+ else:
171
+ data = extract_image_data_embed(embed_image)
172
+ if data:
173
+ name = data.get('name', name)
174
+ else:
175
+ # if data is None, means this is not an embeding, just a preview image
176
+ return
177
+ elif ext in ['.BIN', '.PT']:
178
+ data = torch.load(path, map_location="cpu")
179
+ elif ext in ['.SAFETENSORS']:
180
+ data = safetensors.torch.load_file(path, device="cpu")
181
+ else:
182
+ return
183
+
184
+ embedding = create_embedding_from_data(data, name, filename=filename, filepath=path)
185
+
186
+ if self.expected_shape == -1 or self.expected_shape == embedding.shape:
187
+ self.register_embedding(embedding, shared.sd_model)
188
+ else:
189
+ self.skipped_embeddings[name] = embedding
190
+
191
+ def load_from_dir(self, embdir):
192
+ if not os.path.isdir(embdir.path):
193
+ return
194
+
195
+ for root, _, fns in os.walk(embdir.path, followlinks=True):
196
+ for fn in fns:
197
+ try:
198
+ fullfn = os.path.join(root, fn)
199
+
200
+ if os.stat(fullfn).st_size == 0:
201
+ continue
202
+
203
+ self.load_from_file(fullfn, fn)
204
+ except Exception:
205
+ errors.report(f"Error loading embedding {fn}", exc_info=True)
206
+ continue
207
+
208
+ def load_textual_inversion_embeddings(self, force_reload=False):
209
+ if not force_reload:
210
+ need_reload = False
211
+ for embdir in self.embedding_dirs.values():
212
+ if embdir.has_changed():
213
+ need_reload = True
214
+ break
215
+
216
+ if not need_reload:
217
+ return
218
+
219
+ self.ids_lookup.clear()
220
+ self.word_embeddings.clear()
221
+ self.skipped_embeddings.clear()
222
+ self.expected_shape = self.get_expected_shape()
223
+
224
+ for embdir in self.embedding_dirs.values():
225
+ self.load_from_dir(embdir)
226
+ embdir.update()
227
+
228
+ # re-sort word_embeddings because load_from_dir may not load in alphabetic order.
229
+ # using a temporary copy so we don't reinitialize self.word_embeddings in case other objects have a reference to it.
230
+ sorted_word_embeddings = {e.name: e for e in sorted(self.word_embeddings.values(), key=lambda e: e.name.lower())}
231
+ self.word_embeddings.clear()
232
+ self.word_embeddings.update(sorted_word_embeddings)
233
+
234
+ displayed_embeddings = (tuple(self.word_embeddings.keys()), tuple(self.skipped_embeddings.keys()))
235
+ if shared.opts.textual_inversion_print_at_load and self.previously_displayed_embeddings != displayed_embeddings:
236
+ self.previously_displayed_embeddings = displayed_embeddings
237
+ print(f"Textual inversion embeddings loaded({len(self.word_embeddings)}): {', '.join(self.word_embeddings.keys())}")
238
+ if self.skipped_embeddings:
239
+ print(f"Textual inversion embeddings skipped({len(self.skipped_embeddings)}): {', '.join(self.skipped_embeddings.keys())}")
240
+
241
+ def find_embedding_at_position(self, tokens, offset):
242
+ token = tokens[offset]
243
+ possible_matches = self.ids_lookup.get(token, None)
244
+
245
+ if possible_matches is None:
246
+ return None, None
247
+
248
+ for ids, embedding in possible_matches:
249
+ if tokens[offset:offset + len(ids)] == ids:
250
+ return embedding, len(ids)
251
+
252
+ return None, None
253
+
254
+
255
+ def create_embedding(name, num_vectors_per_token, overwrite_old, init_text='*'):
256
+ cond_model = shared.sd_model.cond_stage_model
257
+
258
+ with devices.autocast():
259
+ cond_model([""]) # will send cond model to GPU if lowvram/medvram is active
260
+
261
+ #cond_model expects at least some text, so we provide '*' as backup.
262
+ embedded = cond_model.encode_embedding_init_text(init_text or '*', num_vectors_per_token)
263
+ vec = torch.zeros((num_vectors_per_token, embedded.shape[1]), device=devices.device)
264
+
265
+ #Only copy if we provided an init_text, otherwise keep vectors as zeros
266
+ if init_text:
267
+ for i in range(num_vectors_per_token):
268
+ vec[i] = embedded[i * int(embedded.shape[0]) // num_vectors_per_token]
269
+
270
+ # Remove illegal characters from name.
271
+ name = "".join( x for x in name if (x.isalnum() or x in "._- "))
272
+ fn = os.path.join(shared.cmd_opts.embeddings_dir, f"{name}.pt")
273
+ if not overwrite_old:
274
+ assert not os.path.exists(fn), f"file {fn} already exists"
275
+
276
+ embedding = Embedding(vec, name)
277
+ embedding.step = 0
278
+ embedding.save(fn)
279
+
280
+ return fn
281
+
282
+
283
+ def create_embedding_from_data(data, name, filename='unknown embedding file', filepath=None):
284
+ if 'string_to_param' in data: # textual inversion embeddings
285
+ param_dict = data['string_to_param']
286
+ param_dict = getattr(param_dict, '_parameters', param_dict) # fix for torch 1.12.1 loading saved file from torch 1.11
287
+ assert len(param_dict) == 1, 'embedding file has multiple terms in it'
288
+ emb = next(iter(param_dict.items()))[1]
289
+ vec = emb.detach().to(devices.device, dtype=torch.float32)
290
+ shape = vec.shape[-1]
291
+ vectors = vec.shape[0]
292
+ elif type(data) == dict and 'clip_g' in data and 'clip_l' in data: # SDXL embedding
293
+ vec = {k: v.detach().to(devices.device, dtype=torch.float32) for k, v in data.items()}
294
+ shape = data['clip_g'].shape[-1] + data['clip_l'].shape[-1]
295
+ vectors = data['clip_g'].shape[0]
296
+ elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor: # diffuser concepts
297
+ assert len(data.keys()) == 1, 'embedding file has multiple terms in it'
298
+
299
+ emb = next(iter(data.values()))
300
+ if len(emb.shape) == 1:
301
+ emb = emb.unsqueeze(0)
302
+ vec = emb.detach().to(devices.device, dtype=torch.float32)
303
+ shape = vec.shape[-1]
304
+ vectors = vec.shape[0]
305
+ else:
306
+ raise Exception(f"Couldn't identify {filename} as neither textual inversion embedding nor diffuser concept.")
307
+
308
+ embedding = Embedding(vec, name)
309
+ embedding.step = data.get('step', None)
310
+ embedding.sd_checkpoint = data.get('sd_checkpoint', None)
311
+ embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None)
312
+ embedding.vectors = vectors
313
+ embedding.shape = shape
314
+
315
+ if filepath:
316
+ embedding.filename = filepath
317
+ embedding.set_hash(hashes.sha256(filepath, "textual_inversion/" + name) or '')
318
+
319
+ return embedding
320
+
321
+
322
+ def write_loss(log_directory, filename, step, epoch_len, values):
323
+ if shared.opts.training_write_csv_every == 0:
324
+ return
325
+
326
+ if step % shared.opts.training_write_csv_every != 0:
327
+ return
328
+ write_csv_header = False if os.path.exists(os.path.join(log_directory, filename)) else True
329
+
330
+ with open(os.path.join(log_directory, filename), "a+", newline='') as fout:
331
+ csv_writer = csv.DictWriter(fout, fieldnames=["step", "epoch", "epoch_step", *(values.keys())])
332
+
333
+ if write_csv_header:
334
+ csv_writer.writeheader()
335
+
336
+ epoch = (step - 1) // epoch_len
337
+ epoch_step = (step - 1) % epoch_len
338
+
339
+ csv_writer.writerow({
340
+ "step": step,
341
+ "epoch": epoch,
342
+ "epoch_step": epoch_step,
343
+ **values,
344
+ })
345
+
346
+ def tensorboard_setup(log_directory):
347
+ os.makedirs(os.path.join(log_directory, "tensorboard"), exist_ok=True)
348
+ return SummaryWriter(
349
+ log_dir=os.path.join(log_directory, "tensorboard"),
350
+ flush_secs=shared.opts.training_tensorboard_flush_every)
351
+
352
+ def tensorboard_add(tensorboard_writer, loss, global_step, step, learn_rate, epoch_num):
353
+ tensorboard_add_scaler(tensorboard_writer, "Loss/train", loss, global_step)
354
+ tensorboard_add_scaler(tensorboard_writer, f"Loss/train/epoch-{epoch_num}", loss, step)
355
+ tensorboard_add_scaler(tensorboard_writer, "Learn rate/train", learn_rate, global_step)
356
+ tensorboard_add_scaler(tensorboard_writer, f"Learn rate/train/epoch-{epoch_num}", learn_rate, step)
357
+
358
+ def tensorboard_add_scaler(tensorboard_writer, tag, value, step):
359
+ tensorboard_writer.add_scalar(tag=tag,
360
+ scalar_value=value, global_step=step)
361
+
362
+ def tensorboard_add_image(tensorboard_writer, tag, pil_image, step):
363
+ # Convert a pil image to a torch tensor
364
+ img_tensor = torch.as_tensor(np.array(pil_image, copy=True))
365
+ img_tensor = img_tensor.view(pil_image.size[1], pil_image.size[0],
366
+ len(pil_image.getbands()))
367
+ img_tensor = img_tensor.permute((2, 0, 1))
368
+
369
+ tensorboard_writer.add_image(tag, img_tensor, global_step=step)
370
+
371
+ def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, data_root, template_file, template_filename, steps, save_model_every, create_image_every, log_directory, name="embedding"):
372
+ assert model_name, f"{name} not selected"
373
+ assert learn_rate, "Learning rate is empty or 0"
374
+ assert isinstance(batch_size, int), "Batch size must be integer"
375
+ assert batch_size > 0, "Batch size must be positive"
376
+ assert isinstance(gradient_step, int), "Gradient accumulation step must be integer"
377
+ assert gradient_step > 0, "Gradient accumulation step must be positive"
378
+ assert data_root, "Dataset directory is empty"
379
+ assert os.path.isdir(data_root), "Dataset directory doesn't exist"
380
+ assert os.listdir(data_root), "Dataset directory is empty"
381
+ assert template_filename, "Prompt template file not selected"
382
+ assert template_file, f"Prompt template file {template_filename} not found"
383
+ assert os.path.isfile(template_file.path), f"Prompt template file {template_filename} doesn't exist"
384
+ assert steps, "Max steps is empty or 0"
385
+ assert isinstance(steps, int), "Max steps must be integer"
386
+ assert steps > 0, "Max steps must be positive"
387
+ assert isinstance(save_model_every, int), "Save {name} must be integer"
388
+ assert save_model_every >= 0, "Save {name} must be positive or 0"
389
+ assert isinstance(create_image_every, int), "Create image must be integer"
390
+ assert create_image_every >= 0, "Create image must be positive or 0"
391
+ if save_model_every or create_image_every:
392
+ assert log_directory, "Log directory is empty"
393
+
394
+
395
+ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_embedding_every, template_filename, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_name, preview_cfg_scale, preview_seed, preview_width, preview_height):
396
+ from modules import processing
397
+
398
+ save_embedding_every = save_embedding_every or 0
399
+ create_image_every = create_image_every or 0
400
+ template_file = textual_inversion_templates.get(template_filename, None)
401
+ validate_train_inputs(embedding_name, learn_rate, batch_size, gradient_step, data_root, template_file, template_filename, steps, save_embedding_every, create_image_every, log_directory, name="embedding")
402
+ template_file = template_file.path
403
+
404
+ shared.state.job = "train-embedding"
405
+ shared.state.textinfo = "Initializing textual inversion training..."
406
+ shared.state.job_count = steps
407
+
408
+ filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
409
+
410
+ log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), embedding_name)
411
+ unload = shared.opts.unload_models_when_training
412
+
413
+ if save_embedding_every > 0:
414
+ embedding_dir = os.path.join(log_directory, "embeddings")
415
+ os.makedirs(embedding_dir, exist_ok=True)
416
+ else:
417
+ embedding_dir = None
418
+
419
+ if create_image_every > 0:
420
+ images_dir = os.path.join(log_directory, "images")
421
+ os.makedirs(images_dir, exist_ok=True)
422
+ else:
423
+ images_dir = None
424
+
425
+ if create_image_every > 0 and save_image_with_stored_embedding:
426
+ images_embeds_dir = os.path.join(log_directory, "image_embeddings")
427
+ os.makedirs(images_embeds_dir, exist_ok=True)
428
+ else:
429
+ images_embeds_dir = None
430
+
431
+ hijack = sd_hijack.model_hijack
432
+
433
+ embedding = hijack.embedding_db.word_embeddings[embedding_name]
434
+ checkpoint = sd_models.select_checkpoint()
435
+
436
+ initial_step = embedding.step or 0
437
+ if initial_step >= steps:
438
+ shared.state.textinfo = "Model has already been trained beyond specified max steps"
439
+ return embedding, filename
440
+
441
+ scheduler = LearnRateScheduler(learn_rate, steps, initial_step)
442
+ clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else \
443
+ torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else \
444
+ None
445
+ if clip_grad:
446
+ clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, initial_step, verbose=False)
447
+ # dataset loading may take a while, so input validations and early returns should be done before this
448
+ shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
449
+ old_parallel_processing_allowed = shared.parallel_processing_allowed
450
+
451
+ if shared.opts.training_enable_tensorboard:
452
+ tensorboard_writer = tensorboard_setup(log_directory)
453
+
454
+ pin_memory = shared.opts.pin_memory
455
+
456
+ ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize, use_weight=use_weight)
457
+
458
+ if shared.opts.save_training_settings_to_txt:
459
+ save_settings_to_file(log_directory, {**dict(model_name=checkpoint.model_name, model_hash=checkpoint.shorthash, num_of_dataset_images=len(ds), num_vectors_per_token=len(embedding.vec)), **locals()})
460
+
461
+ latent_sampling_method = ds.latent_sampling_method
462
+
463
+ dl = modules.textual_inversion.dataset.PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, batch_size=ds.batch_size, pin_memory=pin_memory)
464
+
465
+ if unload:
466
+ shared.parallel_processing_allowed = False
467
+ shared.sd_model.first_stage_model.to(devices.cpu)
468
+
469
+ embedding.vec.requires_grad = True
470
+ optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate, weight_decay=0.0)
471
+ if shared.opts.save_optimizer_state:
472
+ optimizer_state_dict = None
473
+ if os.path.exists(f"{filename}.optim"):
474
+ optimizer_saved_dict = torch.load(f"{filename}.optim", map_location='cpu')
475
+ if embedding.checksum() == optimizer_saved_dict.get('hash', None):
476
+ optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None)
477
+
478
+ if optimizer_state_dict is not None:
479
+ optimizer.load_state_dict(optimizer_state_dict)
480
+ print("Loaded existing optimizer from checkpoint")
481
+ else:
482
+ print("No saved optimizer exists in checkpoint")
483
+
484
+ scaler = torch.cuda.amp.GradScaler()
485
+
486
+ batch_size = ds.batch_size
487
+ gradient_step = ds.gradient_step
488
+ # n steps = batch_size * gradient_step * n image processed
489
+ steps_per_epoch = len(ds) // batch_size // gradient_step
490
+ max_steps_per_epoch = len(ds) // batch_size - (len(ds) // batch_size) % gradient_step
491
+ loss_step = 0
492
+ _loss_step = 0 #internal
493
+
494
+ last_saved_file = "<none>"
495
+ last_saved_image = "<none>"
496
+ forced_filename = "<none>"
497
+ embedding_yet_to_be_embedded = False
498
+
499
+ is_training_inpainting_model = shared.sd_model.model.conditioning_key in {'hybrid', 'concat'}
500
+ img_c = None
501
+
502
+ pbar = tqdm.tqdm(total=steps - initial_step)
503
+ try:
504
+ sd_hijack_checkpoint.add()
505
+
506
+ for _ in range((steps-initial_step) * gradient_step):
507
+ if scheduler.finished:
508
+ break
509
+ if shared.state.interrupted:
510
+ break
511
+ for j, batch in enumerate(dl):
512
+ # works as a drop_last=True for gradient accumulation
513
+ if j == max_steps_per_epoch:
514
+ break
515
+ scheduler.apply(optimizer, embedding.step)
516
+ if scheduler.finished:
517
+ break
518
+ if shared.state.interrupted:
519
+ break
520
+
521
+ if clip_grad:
522
+ clip_grad_sched.step(embedding.step)
523
+
524
+ with devices.autocast():
525
+ x = batch.latent_sample.to(devices.device, non_blocking=pin_memory)
526
+ if use_weight:
527
+ w = batch.weight.to(devices.device, non_blocking=pin_memory)
528
+ c = shared.sd_model.cond_stage_model(batch.cond_text)
529
+
530
+ if is_training_inpainting_model:
531
+ if img_c is None:
532
+ img_c = processing.txt2img_image_conditioning(shared.sd_model, c, training_width, training_height)
533
+
534
+ cond = {"c_concat": [img_c], "c_crossattn": [c]}
535
+ else:
536
+ cond = c
537
+
538
+ if use_weight:
539
+ loss = shared.sd_model.weighted_forward(x, cond, w)[0] / gradient_step
540
+ del w
541
+ else:
542
+ loss = shared.sd_model.forward(x, cond)[0] / gradient_step
543
+ del x
544
+
545
+ _loss_step += loss.item()
546
+ scaler.scale(loss).backward()
547
+
548
+ # go back until we reach gradient accumulation steps
549
+ if (j + 1) % gradient_step != 0:
550
+ continue
551
+
552
+ if clip_grad:
553
+ clip_grad(embedding.vec, clip_grad_sched.learn_rate)
554
+
555
+ scaler.step(optimizer)
556
+ scaler.update()
557
+ embedding.step += 1
558
+ pbar.update()
559
+ optimizer.zero_grad(set_to_none=True)
560
+ loss_step = _loss_step
561
+ _loss_step = 0
562
+
563
+ steps_done = embedding.step + 1
564
+
565
+ epoch_num = embedding.step // steps_per_epoch
566
+ epoch_step = embedding.step % steps_per_epoch
567
+
568
+ description = f"Training textual inversion [Epoch {epoch_num}: {epoch_step+1}/{steps_per_epoch}] loss: {loss_step:.7f}"
569
+ pbar.set_description(description)
570
+ if embedding_dir is not None and steps_done % save_embedding_every == 0:
571
+ # Before saving, change name to match current checkpoint.
572
+ embedding_name_every = f'{embedding_name}-{steps_done}'
573
+ last_saved_file = os.path.join(embedding_dir, f'{embedding_name_every}.pt')
574
+ save_embedding(embedding, optimizer, checkpoint, embedding_name_every, last_saved_file, remove_cached_checksum=True)
575
+ embedding_yet_to_be_embedded = True
576
+
577
+ write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, steps_per_epoch, {
578
+ "loss": f"{loss_step:.7f}",
579
+ "learn_rate": scheduler.learn_rate
580
+ })
581
+
582
+ if images_dir is not None and steps_done % create_image_every == 0:
583
+ forced_filename = f'{embedding_name}-{steps_done}'
584
+ last_saved_image = os.path.join(images_dir, forced_filename)
585
+
586
+ shared.sd_model.first_stage_model.to(devices.device)
587
+
588
+ p = processing.StableDiffusionProcessingTxt2Img(
589
+ sd_model=shared.sd_model,
590
+ do_not_save_grid=True,
591
+ do_not_save_samples=True,
592
+ do_not_reload_embeddings=True,
593
+ )
594
+
595
+ if preview_from_txt2img:
596
+ p.prompt = preview_prompt
597
+ p.negative_prompt = preview_negative_prompt
598
+ p.steps = preview_steps
599
+ p.sampler_name = sd_samplers.samplers_map[preview_sampler_name.lower()]
600
+ p.cfg_scale = preview_cfg_scale
601
+ p.seed = preview_seed
602
+ p.width = preview_width
603
+ p.height = preview_height
604
+ else:
605
+ p.prompt = batch.cond_text[0]
606
+ p.steps = 20
607
+ p.width = training_width
608
+ p.height = training_height
609
+
610
+ preview_text = p.prompt
611
+
612
+ with closing(p):
613
+ processed = processing.process_images(p)
614
+ image = processed.images[0] if len(processed.images) > 0 else None
615
+
616
+ if unload:
617
+ shared.sd_model.first_stage_model.to(devices.cpu)
618
+
619
+ if image is not None:
620
+ shared.state.assign_current_image(image)
621
+
622
+ last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
623
+ last_saved_image += f", prompt: {preview_text}"
624
+
625
+ if shared.opts.training_enable_tensorboard and shared.opts.training_tensorboard_save_images:
626
+ tensorboard_add_image(tensorboard_writer, f"Validation at epoch {epoch_num}", image, embedding.step)
627
+
628
+ if save_image_with_stored_embedding and os.path.exists(last_saved_file) and embedding_yet_to_be_embedded:
629
+
630
+ last_saved_image_chunks = os.path.join(images_embeds_dir, f'{embedding_name}-{steps_done}.png')
631
+
632
+ info = PngImagePlugin.PngInfo()
633
+ data = torch.load(last_saved_file)
634
+ info.add_text("sd-ti-embedding", embedding_to_b64(data))
635
+
636
+ title = f"<{data.get('name', '???')}>"
637
+
638
+ try:
639
+ vectorSize = list(data['string_to_param'].values())[0].shape[0]
640
+ except Exception:
641
+ vectorSize = '?'
642
+
643
+ checkpoint = sd_models.select_checkpoint()
644
+ footer_left = checkpoint.model_name
645
+ footer_mid = f'[{checkpoint.shorthash}]'
646
+ footer_right = f'{vectorSize}v {steps_done}s'
647
+
648
+ captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right)
649
+ captioned_image = insert_image_data_embed(captioned_image, data)
650
+
651
+ captioned_image.save(last_saved_image_chunks, "PNG", pnginfo=info)
652
+ embedding_yet_to_be_embedded = False
653
+
654
+ last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
655
+ last_saved_image += f", prompt: {preview_text}"
656
+
657
+ shared.state.job_no = embedding.step
658
+
659
+ shared.state.textinfo = f"""
660
+ <p>
661
+ Loss: {loss_step:.7f}<br/>
662
+ Step: {steps_done}<br/>
663
+ Last prompt: {html.escape(batch.cond_text[0])}<br/>
664
+ Last saved embedding: {html.escape(last_saved_file)}<br/>
665
+ Last saved image: {html.escape(last_saved_image)}<br/>
666
+ </p>
667
+ """
668
+ filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
669
+ save_embedding(embedding, optimizer, checkpoint, embedding_name, filename, remove_cached_checksum=True)
670
+ except Exception:
671
+ errors.report("Error training embedding", exc_info=True)
672
+ finally:
673
+ pbar.leave = False
674
+ pbar.close()
675
+ shared.sd_model.first_stage_model.to(devices.device)
676
+ shared.parallel_processing_allowed = old_parallel_processing_allowed
677
+ sd_hijack_checkpoint.remove()
678
+
679
+ return embedding, filename
680
+
681
+
682
+ def save_embedding(embedding, optimizer, checkpoint, embedding_name, filename, remove_cached_checksum=True):
683
+ old_embedding_name = embedding.name
684
+ old_sd_checkpoint = embedding.sd_checkpoint if hasattr(embedding, "sd_checkpoint") else None
685
+ old_sd_checkpoint_name = embedding.sd_checkpoint_name if hasattr(embedding, "sd_checkpoint_name") else None
686
+ old_cached_checksum = embedding.cached_checksum if hasattr(embedding, "cached_checksum") else None
687
+ try:
688
+ embedding.sd_checkpoint = checkpoint.shorthash
689
+ embedding.sd_checkpoint_name = checkpoint.model_name
690
+ if remove_cached_checksum:
691
+ embedding.cached_checksum = None
692
+ embedding.name = embedding_name
693
+ embedding.optimizer_state_dict = optimizer.state_dict()
694
+ embedding.save(filename)
695
+ except:
696
+ embedding.sd_checkpoint = old_sd_checkpoint
697
+ embedding.sd_checkpoint_name = old_sd_checkpoint_name
698
+ embedding.name = old_embedding_name
699
+ embedding.cached_checksum = old_cached_checksum
700
+ raise
modules/textual_inversion/ui.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import html
2
+
3
+ import gradio as gr
4
+
5
+ import modules.textual_inversion.textual_inversion
6
+ from modules import sd_hijack, shared
7
+
8
+
9
+ def create_embedding(name, initialization_text, nvpt, overwrite_old):
10
+ filename = modules.textual_inversion.textual_inversion.create_embedding(name, nvpt, overwrite_old, init_text=initialization_text)
11
+
12
+ sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings()
13
+
14
+ return gr.Dropdown.update(choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())), f"Created: {filename}", ""
15
+
16
+
17
+ def train_embedding(*args):
18
+
19
+ assert not shared.cmd_opts.lowvram, 'Training models with lowvram not possible'
20
+
21
+ apply_optimizations = shared.opts.training_xattention_optimizations
22
+ try:
23
+ if not apply_optimizations:
24
+ sd_hijack.undo_optimizations()
25
+
26
+ embedding, filename = modules.textual_inversion.textual_inversion.train_embedding(*args)
27
+
28
+ res = f"""
29
+ Training {'interrupted' if shared.state.interrupted else 'finished'} at {embedding.step} steps.
30
+ Embedding saved to {html.escape(filename)}
31
+ """
32
+ return res, ""
33
+ except Exception:
34
+ raise
35
+ finally:
36
+ if not apply_optimizations:
37
+ sd_hijack.apply_optimizations()
38
+