PennyJX commited on
Commit
34551e2
1 Parent(s): 674a390

Upload 2 files

Browse files
Files changed (2) hide show
  1. modules/api/api.py +791 -0
  2. modules/api/models.py +318 -0
modules/api/api.py ADDED
@@ -0,0 +1,791 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import io
3
+ import os
4
+ import time
5
+ import datetime
6
+ import uvicorn
7
+ import ipaddress
8
+ import requests
9
+ import gradio as gr
10
+ from threading import Lock
11
+ from io import BytesIO
12
+ from fastapi import APIRouter, Depends, FastAPI, Request, Response
13
+ from fastapi.security import HTTPBasic, HTTPBasicCredentials
14
+ from fastapi.exceptions import HTTPException
15
+ from fastapi.responses import JSONResponse
16
+ from fastapi.encoders import jsonable_encoder
17
+ from secrets import compare_digest
18
+
19
+ import modules.shared as shared
20
+ from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors, restart, shared_items, script_callbacks, generation_parameters_copypaste, sd_models
21
+ from modules.api import models
22
+ from modules.shared import opts
23
+ from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
24
+ from modules.textual_inversion.textual_inversion import create_embedding, train_embedding
25
+ from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork
26
+ from PIL import PngImagePlugin, Image
27
+ from modules.sd_models_config import find_checkpoint_config_near_filename
28
+ from modules.realesrgan_model import get_realesrgan_models
29
+ from modules import devices
30
+ from typing import Any
31
+ import piexif
32
+ import piexif.helper
33
+ from contextlib import closing
34
+
35
+
36
+ def script_name_to_index(name, scripts):
37
+ try:
38
+ return [script.title().lower() for script in scripts].index(name.lower())
39
+ except Exception as e:
40
+ raise HTTPException(status_code=422, detail=f"Script '{name}' not found") from e
41
+
42
+
43
+ def validate_sampler_name(name):
44
+ config = sd_samplers.all_samplers_map.get(name, None)
45
+ if config is None:
46
+ raise HTTPException(status_code=404, detail="Sampler not found")
47
+
48
+ return name
49
+
50
+
51
+ def setUpscalers(req: dict):
52
+ reqDict = vars(req)
53
+ reqDict['extras_upscaler_1'] = reqDict.pop('upscaler_1', None)
54
+ reqDict['extras_upscaler_2'] = reqDict.pop('upscaler_2', None)
55
+ return reqDict
56
+
57
+
58
+ def verify_url(url):
59
+ """Returns True if the url refers to a global resource."""
60
+
61
+ import socket
62
+ from urllib.parse import urlparse
63
+ try:
64
+ parsed_url = urlparse(url)
65
+ domain_name = parsed_url.netloc
66
+ host = socket.gethostbyname_ex(domain_name)
67
+ for ip in host[2]:
68
+ ip_addr = ipaddress.ip_address(ip)
69
+ if not ip_addr.is_global:
70
+ return False
71
+ except Exception:
72
+ return False
73
+
74
+ return True
75
+
76
+
77
+ def decode_base64_to_image(encoding):
78
+ if encoding.startswith("http://") or encoding.startswith("https://"):
79
+ if not opts.api_enable_requests:
80
+ raise HTTPException(status_code=500, detail="Requests not allowed")
81
+
82
+ if opts.api_forbid_local_requests and not verify_url(encoding):
83
+ raise HTTPException(status_code=500, detail="Request to local resource not allowed")
84
+
85
+ headers = {'user-agent': opts.api_useragent} if opts.api_useragent else {}
86
+ response = requests.get(encoding, timeout=30, headers=headers)
87
+ try:
88
+ image = Image.open(BytesIO(response.content))
89
+ return image
90
+ except Exception as e:
91
+ raise HTTPException(status_code=500, detail="Invalid image url") from e
92
+
93
+ if encoding.startswith("data:image/"):
94
+ encoding = encoding.split(";")[1].split(",")[1]
95
+ try:
96
+ image = Image.open(BytesIO(base64.b64decode(encoding)))
97
+ return image
98
+ except Exception as e:
99
+ raise HTTPException(status_code=500, detail="Invalid encoded image") from e
100
+
101
+
102
+ def encode_pil_to_base64(image):
103
+ with io.BytesIO() as output_bytes:
104
+ if isinstance(image, str):
105
+ return image
106
+ if opts.samples_format.lower() == 'png':
107
+ use_metadata = False
108
+ metadata = PngImagePlugin.PngInfo()
109
+ for key, value in image.info.items():
110
+ if isinstance(key, str) and isinstance(value, str):
111
+ metadata.add_text(key, value)
112
+ use_metadata = True
113
+ image.save(output_bytes, format="PNG", pnginfo=(metadata if use_metadata else None), quality=opts.jpeg_quality)
114
+
115
+ elif opts.samples_format.lower() in ("jpg", "jpeg", "webp"):
116
+ if image.mode == "RGBA":
117
+ image = image.convert("RGB")
118
+ parameters = image.info.get('parameters', None)
119
+ exif_bytes = piexif.dump({
120
+ "Exif": { piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(parameters or "", encoding="unicode") }
121
+ })
122
+ if opts.samples_format.lower() in ("jpg", "jpeg"):
123
+ image.save(output_bytes, format="JPEG", exif = exif_bytes, quality=opts.jpeg_quality)
124
+ else:
125
+ image.save(output_bytes, format="WEBP", exif = exif_bytes, quality=opts.jpeg_quality)
126
+
127
+ else:
128
+ raise HTTPException(status_code=500, detail="Invalid image format")
129
+
130
+ bytes_data = output_bytes.getvalue()
131
+
132
+ return base64.b64encode(bytes_data)
133
+
134
+
135
+ def api_middleware(app: FastAPI):
136
+ rich_available = False
137
+ try:
138
+ if os.environ.get('WEBUI_RICH_EXCEPTIONS', None) is not None:
139
+ import anyio # importing just so it can be placed on silent list
140
+ import starlette # importing just so it can be placed on silent list
141
+ from rich.console import Console
142
+ console = Console()
143
+ rich_available = True
144
+ except Exception:
145
+ pass
146
+
147
+ @app.middleware("http")
148
+ async def log_and_time(req: Request, call_next):
149
+ ts = time.time()
150
+ res: Response = await call_next(req)
151
+ duration = str(round(time.time() - ts, 4))
152
+ res.headers["X-Process-Time"] = duration
153
+ endpoint = req.scope.get('path', 'err')
154
+ if shared.cmd_opts.api_log and endpoint.startswith('/sdapi'):
155
+ print('API {t} {code} {prot}/{ver} {method} {endpoint} {cli} {duration}'.format(
156
+ t=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"),
157
+ code=res.status_code,
158
+ ver=req.scope.get('http_version', '0.0'),
159
+ cli=req.scope.get('client', ('0:0.0.0', 0))[0],
160
+ prot=req.scope.get('scheme', 'err'),
161
+ method=req.scope.get('method', 'err'),
162
+ endpoint=endpoint,
163
+ duration=duration,
164
+ ))
165
+ return res
166
+
167
+ def handle_exception(request: Request, e: Exception):
168
+ err = {
169
+ "error": type(e).__name__,
170
+ "detail": vars(e).get('detail', ''),
171
+ "body": vars(e).get('body', ''),
172
+ "errors": str(e),
173
+ }
174
+ if not isinstance(e, HTTPException): # do not print backtrace on known httpexceptions
175
+ message = f"API error: {request.method}: {request.url} {err}"
176
+ if rich_available:
177
+ print(message)
178
+ console.print_exception(show_locals=True, max_frames=2, extra_lines=1, suppress=[anyio, starlette], word_wrap=False, width=min([console.width, 200]))
179
+ else:
180
+ errors.report(message, exc_info=True)
181
+ return JSONResponse(status_code=vars(e).get('status_code', 500), content=jsonable_encoder(err))
182
+
183
+ @app.middleware("http")
184
+ async def exception_handling(request: Request, call_next):
185
+ try:
186
+ return await call_next(request)
187
+ except Exception as e:
188
+ return handle_exception(request, e)
189
+
190
+ @app.exception_handler(Exception)
191
+ async def fastapi_exception_handler(request: Request, e: Exception):
192
+ return handle_exception(request, e)
193
+
194
+ @app.exception_handler(HTTPException)
195
+ async def http_exception_handler(request: Request, e: HTTPException):
196
+ return handle_exception(request, e)
197
+
198
+
199
+ class Api:
200
+ def __init__(self, app: FastAPI, queue_lock: Lock):
201
+ if shared.cmd_opts.api_auth:
202
+ self.credentials = {}
203
+ for auth in shared.cmd_opts.api_auth.split(","):
204
+ user, password = auth.split(":")
205
+ self.credentials[user] = password
206
+
207
+ self.router = APIRouter()
208
+ self.app = app
209
+ self.queue_lock = queue_lock
210
+ api_middleware(self.app)
211
+ self.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=models.TextToImageResponse)
212
+ self.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=models.ImageToImageResponse)
213
+ self.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=models.ExtrasSingleImageResponse)
214
+ self.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=models.ExtrasBatchImagesResponse)
215
+ self.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=models.PNGInfoResponse)
216
+ self.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=models.ProgressResponse)
217
+ self.add_api_route("/sdapi/v1/interrogate", self.interrogateapi, methods=["POST"])
218
+ self.add_api_route("/sdapi/v1/interrupt", self.interruptapi, methods=["POST"])
219
+ self.add_api_route("/sdapi/v1/skip", self.skip, methods=["POST"])
220
+ self.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=models.OptionsModel)
221
+ self.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"])
222
+ self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=models.FlagsModel)
223
+ self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=list[models.SamplerItem])
224
+ self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=list[models.UpscalerItem])
225
+ self.add_api_route("/sdapi/v1/latent-upscale-modes", self.get_latent_upscale_modes, methods=["GET"], response_model=list[models.LatentUpscalerModeItem])
226
+ self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=list[models.SDModelItem])
227
+ self.add_api_route("/sdapi/v1/sd-vae", self.get_sd_vaes, methods=["GET"], response_model=list[models.SDVaeItem])
228
+ self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=list[models.HypernetworkItem])
229
+ self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=list[models.FaceRestorerItem])
230
+ self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=list[models.RealesrganItem])
231
+ self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=list[models.PromptStyleItem])
232
+ self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=models.EmbeddingsResponse)
233
+ self.add_api_route("/sdapi/v1/refresh-checkpoints", self.refresh_checkpoints, methods=["POST"])
234
+ self.add_api_route("/sdapi/v1/refresh-vae", self.refresh_vae, methods=["POST"])
235
+ self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=models.CreateResponse)
236
+ self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=models.CreateResponse)
237
+ self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=models.TrainResponse)
238
+ self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=models.TrainResponse)
239
+ self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=models.MemoryResponse)
240
+ self.add_api_route("/sdapi/v1/unload-checkpoint", self.unloadapi, methods=["POST"])
241
+ self.add_api_route("/sdapi/v1/reload-checkpoint", self.reloadapi, methods=["POST"])
242
+ self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=models.ScriptsList)
243
+ self.add_api_route("/sdapi/v1/script-info", self.get_script_info, methods=["GET"], response_model=list[models.ScriptInfo])
244
+ self.add_api_route("/sdapi/v1/extensions", self.get_extensions_list, methods=["GET"], response_model=list[models.ExtensionItem])
245
+
246
+ if shared.cmd_opts.api_server_stop:
247
+ self.add_api_route("/sdapi/v1/server-kill", self.kill_webui, methods=["POST"])
248
+ self.add_api_route("/sdapi/v1/server-restart", self.restart_webui, methods=["POST"])
249
+ self.add_api_route("/sdapi/v1/server-stop", self.stop_webui, methods=["POST"])
250
+
251
+ self.default_script_arg_txt2img = []
252
+ self.default_script_arg_img2img = []
253
+
254
+ def add_api_route(self, path: str, endpoint, **kwargs):
255
+ if shared.cmd_opts.api_auth:
256
+ return self.app.add_api_route(path, endpoint, dependencies=[Depends(self.auth)], **kwargs)
257
+ return self.app.add_api_route(path, endpoint, **kwargs)
258
+
259
+ def auth(self, credentials: HTTPBasicCredentials = Depends(HTTPBasic())):
260
+ if credentials.username in self.credentials:
261
+ if compare_digest(credentials.password, self.credentials[credentials.username]):
262
+ return True
263
+
264
+ raise HTTPException(status_code=401, detail="Incorrect username or password", headers={"WWW-Authenticate": "Basic"})
265
+
266
+ def get_selectable_script(self, script_name, script_runner):
267
+ if script_name is None or script_name == "":
268
+ return None, None
269
+
270
+ script_idx = script_name_to_index(script_name, script_runner.selectable_scripts)
271
+ script = script_runner.selectable_scripts[script_idx]
272
+ return script, script_idx
273
+
274
+ def get_scripts_list(self):
275
+ t2ilist = [script.name for script in scripts.scripts_txt2img.scripts if script.name is not None]
276
+ i2ilist = [script.name for script in scripts.scripts_img2img.scripts if script.name is not None]
277
+
278
+ return models.ScriptsList(txt2img=t2ilist, img2img=i2ilist)
279
+
280
+ def get_script_info(self):
281
+ res = []
282
+
283
+ for script_list in [scripts.scripts_txt2img.scripts, scripts.scripts_img2img.scripts]:
284
+ res += [script.api_info for script in script_list if script.api_info is not None]
285
+
286
+ return res
287
+
288
+ def get_script(self, script_name, script_runner):
289
+ if script_name is None or script_name == "":
290
+ return None, None
291
+
292
+ script_idx = script_name_to_index(script_name, script_runner.scripts)
293
+ return script_runner.scripts[script_idx]
294
+
295
+ def init_default_script_args(self, script_runner):
296
+ #find max idx from the scripts in runner and generate a none array to init script_args
297
+ last_arg_index = 1
298
+ for script in script_runner.scripts:
299
+ if last_arg_index < script.args_to:
300
+ last_arg_index = script.args_to
301
+ # None everywhere except position 0 to initialize script args
302
+ script_args = [None]*last_arg_index
303
+ script_args[0] = 0
304
+
305
+ # get default values
306
+ with gr.Blocks(): # will throw errors calling ui function without this
307
+ for script in script_runner.scripts:
308
+ if script.ui(script.is_img2img):
309
+ ui_default_values = []
310
+ for elem in script.ui(script.is_img2img):
311
+ ui_default_values.append(elem.value)
312
+ script_args[script.args_from:script.args_to] = ui_default_values
313
+ return script_args
314
+
315
+ def init_script_args(self, request, default_script_args, selectable_scripts, selectable_idx, script_runner):
316
+ script_args = default_script_args.copy()
317
+ # position 0 in script_arg is the idx+1 of the selectable script that is going to be run when using scripts.scripts_*2img.run()
318
+ if selectable_scripts:
319
+ script_args[selectable_scripts.args_from:selectable_scripts.args_to] = request.script_args
320
+ script_args[0] = selectable_idx + 1
321
+
322
+ # Now check for always on scripts
323
+ if request.alwayson_scripts:
324
+ for alwayson_script_name in request.alwayson_scripts.keys():
325
+ alwayson_script = self.get_script(alwayson_script_name, script_runner)
326
+ if alwayson_script is None:
327
+ raise HTTPException(status_code=422, detail=f"always on script {alwayson_script_name} not found")
328
+ # Selectable script in always on script param check
329
+ if alwayson_script.alwayson is False:
330
+ raise HTTPException(status_code=422, detail="Cannot have a selectable script in the always on scripts params")
331
+ # always on script with no arg should always run so you don't really need to add them to the requests
332
+ if "args" in request.alwayson_scripts[alwayson_script_name]:
333
+ # min between arg length in scriptrunner and arg length in the request
334
+ for idx in range(0, min((alwayson_script.args_to - alwayson_script.args_from), len(request.alwayson_scripts[alwayson_script_name]["args"]))):
335
+ script_args[alwayson_script.args_from + idx] = request.alwayson_scripts[alwayson_script_name]["args"][idx]
336
+ return script_args
337
+
338
+ def text2imgapi(self, txt2imgreq: models.StableDiffusionTxt2ImgProcessingAPI):
339
+ script_runner = scripts.scripts_txt2img
340
+ if not script_runner.scripts:
341
+ script_runner.initialize_scripts(False)
342
+ ui.create_ui()
343
+ if not self.default_script_arg_txt2img:
344
+ self.default_script_arg_txt2img = self.init_default_script_args(script_runner)
345
+ selectable_scripts, selectable_script_idx = self.get_selectable_script(txt2imgreq.script_name, script_runner)
346
+
347
+ populate = txt2imgreq.copy(update={ # Override __init__ params
348
+ "sampler_name": validate_sampler_name(txt2imgreq.sampler_name or txt2imgreq.sampler_index),
349
+ "do_not_save_samples": not txt2imgreq.save_images,
350
+ "do_not_save_grid": not txt2imgreq.save_images,
351
+ })
352
+ if populate.sampler_name:
353
+ populate.sampler_index = None # prevent a warning later on
354
+
355
+ args = vars(populate)
356
+ args.pop('script_name', None)
357
+ args.pop('script_args', None) # will refeed them to the pipeline directly after initializing them
358
+ args.pop('alwayson_scripts', None)
359
+
360
+ script_args = self.init_script_args(txt2imgreq, self.default_script_arg_txt2img, selectable_scripts, selectable_script_idx, script_runner)
361
+
362
+ send_images = args.pop('send_images', True)
363
+ args.pop('save_images', None)
364
+
365
+ with self.queue_lock:
366
+ with closing(StableDiffusionProcessingTxt2Img(sd_model=shared.sd_model, **args)) as p:
367
+ p.is_api = True
368
+ p.scripts = script_runner
369
+ p.outpath_grids = opts.outdir_txt2img_grids
370
+ p.outpath_samples = opts.outdir_txt2img_samples
371
+
372
+ try:
373
+ shared.state.begin(job="scripts_txt2img")
374
+ if selectable_scripts is not None:
375
+ p.script_args = script_args
376
+ processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here
377
+ else:
378
+ p.script_args = tuple(script_args) # Need to pass args as tuple here
379
+ processed = process_images(p)
380
+ finally:
381
+ shared.state.end()
382
+ shared.total_tqdm.clear()
383
+
384
+ b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else []
385
+
386
+ return models.TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.js())
387
+
388
+ def img2imgapi(self, img2imgreq: models.StableDiffusionImg2ImgProcessingAPI):
389
+ init_images = img2imgreq.init_images
390
+ if init_images is None:
391
+ raise HTTPException(status_code=404, detail="Init image not found")
392
+
393
+ mask = img2imgreq.mask
394
+ if mask:
395
+ mask = decode_base64_to_image(mask)
396
+
397
+ script_runner = scripts.scripts_img2img
398
+ if not script_runner.scripts:
399
+ script_runner.initialize_scripts(True)
400
+ ui.create_ui()
401
+ if not self.default_script_arg_img2img:
402
+ self.default_script_arg_img2img = self.init_default_script_args(script_runner)
403
+ selectable_scripts, selectable_script_idx = self.get_selectable_script(img2imgreq.script_name, script_runner)
404
+
405
+ populate = img2imgreq.copy(update={ # Override __init__ params
406
+ "sampler_name": validate_sampler_name(img2imgreq.sampler_name or img2imgreq.sampler_index),
407
+ "do_not_save_samples": not img2imgreq.save_images,
408
+ "do_not_save_grid": not img2imgreq.save_images,
409
+ "mask": mask,
410
+ })
411
+ if populate.sampler_name:
412
+ populate.sampler_index = None # prevent a warning later on
413
+
414
+ args = vars(populate)
415
+ args.pop('include_init_images', None) # this is meant to be done by "exclude": True in model, but it's for a reason that I cannot determine.
416
+ args.pop('script_name', None)
417
+ args.pop('script_args', None) # will refeed them to the pipeline directly after initializing them
418
+ args.pop('alwayson_scripts', None)
419
+
420
+ script_args = self.init_script_args(img2imgreq, self.default_script_arg_img2img, selectable_scripts, selectable_script_idx, script_runner)
421
+
422
+ send_images = args.pop('send_images', True)
423
+ args.pop('save_images', None)
424
+
425
+ with self.queue_lock:
426
+ with closing(StableDiffusionProcessingImg2Img(sd_model=shared.sd_model, **args)) as p:
427
+ p.init_images = [decode_base64_to_image(x) for x in init_images]
428
+ p.is_api = True
429
+ p.scripts = script_runner
430
+ p.outpath_grids = opts.outdir_img2img_grids
431
+ p.outpath_samples = opts.outdir_img2img_samples
432
+
433
+ try:
434
+ shared.state.begin(job="scripts_img2img")
435
+ if selectable_scripts is not None:
436
+ p.script_args = script_args
437
+ processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here
438
+ else:
439
+ p.script_args = tuple(script_args) # Need to pass args as tuple here
440
+ processed = process_images(p)
441
+ finally:
442
+ shared.state.end()
443
+ shared.total_tqdm.clear()
444
+
445
+ b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else []
446
+
447
+ if not img2imgreq.include_init_images:
448
+ img2imgreq.init_images = None
449
+ img2imgreq.mask = None
450
+
451
+ return models.ImageToImageResponse(images=b64images, parameters=vars(img2imgreq), info=processed.js())
452
+
453
+ def extras_single_image_api(self, req: models.ExtrasSingleImageRequest):
454
+ reqDict = setUpscalers(req)
455
+
456
+ reqDict['image'] = decode_base64_to_image(reqDict['image'])
457
+
458
+ with self.queue_lock:
459
+ result = postprocessing.run_extras(extras_mode=0, image_folder="", input_dir="", output_dir="", save_output=False, **reqDict)
460
+
461
+ return models.ExtrasSingleImageResponse(image=encode_pil_to_base64(result[0][0]), html_info=result[1])
462
+
463
+ def extras_batch_images_api(self, req: models.ExtrasBatchImagesRequest):
464
+ reqDict = setUpscalers(req)
465
+
466
+ image_list = reqDict.pop('imageList', [])
467
+ image_folder = [decode_base64_to_image(x.data) for x in image_list]
468
+
469
+ with self.queue_lock:
470
+ result = postprocessing.run_extras(extras_mode=1, image_folder=image_folder, image="", input_dir="", output_dir="", save_output=False, **reqDict)
471
+
472
+ return models.ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1])
473
+
474
+ def pnginfoapi(self, req: models.PNGInfoRequest):
475
+ image = decode_base64_to_image(req.image.strip())
476
+ if image is None:
477
+ return models.PNGInfoResponse(info="")
478
+
479
+ geninfo, items = images.read_info_from_image(image)
480
+ if geninfo is None:
481
+ geninfo = ""
482
+
483
+ params = generation_parameters_copypaste.parse_generation_parameters(geninfo)
484
+ script_callbacks.infotext_pasted_callback(geninfo, params)
485
+
486
+ return models.PNGInfoResponse(info=geninfo, items=items, parameters=params)
487
+
488
+ def progressapi(self, req: models.ProgressRequest = Depends()):
489
+ # copy from check_progress_call of ui.py
490
+
491
+ if shared.state.job_count == 0:
492
+ return models.ProgressResponse(progress=0, eta_relative=0, state=shared.state.dict(), textinfo=shared.state.textinfo)
493
+
494
+ # avoid dividing zero
495
+ progress = 0.01
496
+
497
+ if shared.state.job_count > 0:
498
+ progress += shared.state.job_no / shared.state.job_count
499
+ if shared.state.sampling_steps > 0:
500
+ progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
501
+
502
+ time_since_start = time.time() - shared.state.time_start
503
+ eta = (time_since_start/progress)
504
+ eta_relative = eta-time_since_start
505
+
506
+ progress = min(progress, 1)
507
+
508
+ shared.state.set_current_image()
509
+
510
+ current_image = None
511
+ if shared.state.current_image and not req.skip_current_image:
512
+ current_image = encode_pil_to_base64(shared.state.current_image)
513
+
514
+ return models.ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image, textinfo=shared.state.textinfo)
515
+
516
+ def interrogateapi(self, interrogatereq: models.InterrogateRequest):
517
+ image_b64 = interrogatereq.image
518
+ if image_b64 is None:
519
+ raise HTTPException(status_code=404, detail="Image not found")
520
+
521
+ img = decode_base64_to_image(image_b64)
522
+ img = img.convert('RGB')
523
+
524
+ # Override object param
525
+ with self.queue_lock:
526
+ if interrogatereq.model == "clip":
527
+ processed = shared.interrogator.interrogate(img)
528
+ elif interrogatereq.model == "deepdanbooru":
529
+ processed = deepbooru.model.tag(img)
530
+ else:
531
+ raise HTTPException(status_code=404, detail="Model not found")
532
+
533
+ return models.InterrogateResponse(caption=processed)
534
+
535
+ def interruptapi(self):
536
+ shared.state.interrupt()
537
+
538
+ return {}
539
+
540
+ def unloadapi(self):
541
+ sd_models.unload_model_weights()
542
+
543
+ return {}
544
+
545
+ def reloadapi(self):
546
+ sd_models.send_model_to_device(shared.sd_model)
547
+
548
+ return {}
549
+
550
+ def skip(self):
551
+ shared.state.skip()
552
+
553
+ def get_config(self):
554
+ options = {}
555
+ for key in shared.opts.data.keys():
556
+ metadata = shared.opts.data_labels.get(key)
557
+ if(metadata is not None):
558
+ options.update({key: shared.opts.data.get(key, shared.opts.data_labels.get(key).default)})
559
+ else:
560
+ options.update({key: shared.opts.data.get(key, None)})
561
+
562
+ return options
563
+
564
+ def set_config(self, req: dict[str, Any]):
565
+ checkpoint_name = req.get("sd_model_checkpoint", None)
566
+ if checkpoint_name is not None and checkpoint_name not in sd_models.checkpoint_aliases:
567
+ raise RuntimeError(f"model {checkpoint_name!r} not found")
568
+
569
+ for k, v in req.items():
570
+ shared.opts.set(k, v, is_api=True)
571
+
572
+ shared.opts.save(shared.config_filename)
573
+ return
574
+
575
+ def get_cmd_flags(self):
576
+ return vars(shared.cmd_opts)
577
+
578
+ def get_samplers(self):
579
+ return [{"name": sampler[0], "aliases":sampler[2], "options":sampler[3]} for sampler in sd_samplers.all_samplers]
580
+
581
+ def get_upscalers(self):
582
+ return [
583
+ {
584
+ "name": upscaler.name,
585
+ "model_name": upscaler.scaler.model_name,
586
+ "model_path": upscaler.data_path,
587
+ "model_url": None,
588
+ "scale": upscaler.scale,
589
+ }
590
+ for upscaler in shared.sd_upscalers
591
+ ]
592
+
593
+ def get_latent_upscale_modes(self):
594
+ return [
595
+ {
596
+ "name": upscale_mode,
597
+ }
598
+ for upscale_mode in [*(shared.latent_upscale_modes or {})]
599
+ ]
600
+
601
+ def get_sd_models(self):
602
+ import modules.sd_models as sd_models
603
+ return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config_near_filename(x)} for x in sd_models.checkpoints_list.values()]
604
+
605
+ def get_sd_vaes(self):
606
+ import modules.sd_vae as sd_vae
607
+ return [{"model_name": x, "filename": sd_vae.vae_dict[x]} for x in sd_vae.vae_dict.keys()]
608
+
609
+ def get_hypernetworks(self):
610
+ return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks]
611
+
612
+ def get_face_restorers(self):
613
+ return [{"name":x.name(), "cmd_dir": getattr(x, "cmd_dir", None)} for x in shared.face_restorers]
614
+
615
+ def get_realesrgan_models(self):
616
+ return [{"name":x.name,"path":x.data_path, "scale":x.scale} for x in get_realesrgan_models(None)]
617
+
618
+ def get_prompt_styles(self):
619
+ styleList = []
620
+ for k in shared.prompt_styles.styles:
621
+ style = shared.prompt_styles.styles[k]
622
+ styleList.append({"name":style[0], "prompt": style[1], "negative_prompt": style[2]})
623
+
624
+ return styleList
625
+
626
+ def get_embeddings(self):
627
+ db = sd_hijack.model_hijack.embedding_db
628
+
629
+ def convert_embedding(embedding):
630
+ return {
631
+ "step": embedding.step,
632
+ "sd_checkpoint": embedding.sd_checkpoint,
633
+ "sd_checkpoint_name": embedding.sd_checkpoint_name,
634
+ "shape": embedding.shape,
635
+ "vectors": embedding.vectors,
636
+ }
637
+
638
+ def convert_embeddings(embeddings):
639
+ return {embedding.name: convert_embedding(embedding) for embedding in embeddings.values()}
640
+
641
+ return {
642
+ "loaded": convert_embeddings(db.word_embeddings),
643
+ "skipped": convert_embeddings(db.skipped_embeddings),
644
+ }
645
+
646
+ def refresh_checkpoints(self):
647
+ with self.queue_lock:
648
+ shared.refresh_checkpoints()
649
+
650
+ def refresh_vae(self):
651
+ with self.queue_lock:
652
+ shared_items.refresh_vae_list()
653
+
654
+ def create_embedding(self, args: dict):
655
+ try:
656
+ shared.state.begin(job="create_embedding")
657
+ filename = create_embedding(**args) # create empty embedding
658
+ sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() # reload embeddings so new one can be immediately used
659
+ return models.CreateResponse(info=f"create embedding filename: {filename}")
660
+ except AssertionError as e:
661
+ return models.TrainResponse(info=f"create embedding error: {e}")
662
+ finally:
663
+ shared.state.end()
664
+
665
+
666
+ def create_hypernetwork(self, args: dict):
667
+ try:
668
+ shared.state.begin(job="create_hypernetwork")
669
+ filename = create_hypernetwork(**args) # create empty embedding
670
+ return models.CreateResponse(info=f"create hypernetwork filename: {filename}")
671
+ except AssertionError as e:
672
+ return models.TrainResponse(info=f"create hypernetwork error: {e}")
673
+ finally:
674
+ shared.state.end()
675
+
676
+ def train_embedding(self, args: dict):
677
+ try:
678
+ shared.state.begin(job="train_embedding")
679
+ apply_optimizations = shared.opts.training_xattention_optimizations
680
+ error = None
681
+ filename = ''
682
+ if not apply_optimizations:
683
+ sd_hijack.undo_optimizations()
684
+ try:
685
+ embedding, filename = train_embedding(**args) # can take a long time to complete
686
+ except Exception as e:
687
+ error = e
688
+ finally:
689
+ if not apply_optimizations:
690
+ sd_hijack.apply_optimizations()
691
+ return models.TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}")
692
+ except Exception as msg:
693
+ return models.TrainResponse(info=f"train embedding error: {msg}")
694
+ finally:
695
+ shared.state.end()
696
+
697
+ def train_hypernetwork(self, args: dict):
698
+ try:
699
+ shared.state.begin(job="train_hypernetwork")
700
+ shared.loaded_hypernetworks = []
701
+ apply_optimizations = shared.opts.training_xattention_optimizations
702
+ error = None
703
+ filename = ''
704
+ if not apply_optimizations:
705
+ sd_hijack.undo_optimizations()
706
+ try:
707
+ hypernetwork, filename = train_hypernetwork(**args)
708
+ except Exception as e:
709
+ error = e
710
+ finally:
711
+ shared.sd_model.cond_stage_model.to(devices.device)
712
+ shared.sd_model.first_stage_model.to(devices.device)
713
+ if not apply_optimizations:
714
+ sd_hijack.apply_optimizations()
715
+ shared.state.end()
716
+ return models.TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}")
717
+ except Exception as exc:
718
+ return models.TrainResponse(info=f"train embedding error: {exc}")
719
+ finally:
720
+ shared.state.end()
721
+
722
+ def get_memory(self):
723
+ try:
724
+ import os
725
+ import psutil
726
+ process = psutil.Process(os.getpid())
727
+ res = process.memory_info() # only rss is cross-platform guaranteed so we dont rely on other values
728
+ ram_total = 100 * res.rss / process.memory_percent() # and total memory is calculated as actual value is not cross-platform safe
729
+ ram = { 'free': ram_total - res.rss, 'used': res.rss, 'total': ram_total }
730
+ except Exception as err:
731
+ ram = { 'error': f'{err}' }
732
+ try:
733
+ import torch
734
+ if torch.cuda.is_available():
735
+ s = torch.cuda.mem_get_info()
736
+ system = { 'free': s[0], 'used': s[1] - s[0], 'total': s[1] }
737
+ s = dict(torch.cuda.memory_stats(shared.device))
738
+ allocated = { 'current': s['allocated_bytes.all.current'], 'peak': s['allocated_bytes.all.peak'] }
739
+ reserved = { 'current': s['reserved_bytes.all.current'], 'peak': s['reserved_bytes.all.peak'] }
740
+ active = { 'current': s['active_bytes.all.current'], 'peak': s['active_bytes.all.peak'] }
741
+ inactive = { 'current': s['inactive_split_bytes.all.current'], 'peak': s['inactive_split_bytes.all.peak'] }
742
+ warnings = { 'retries': s['num_alloc_retries'], 'oom': s['num_ooms'] }
743
+ cuda = {
744
+ 'system': system,
745
+ 'active': active,
746
+ 'allocated': allocated,
747
+ 'reserved': reserved,
748
+ 'inactive': inactive,
749
+ 'events': warnings,
750
+ }
751
+ else:
752
+ cuda = {'error': 'unavailable'}
753
+ except Exception as err:
754
+ cuda = {'error': f'{err}'}
755
+ return models.MemoryResponse(ram=ram, cuda=cuda)
756
+
757
+ def get_extensions_list(self):
758
+ from modules import extensions
759
+ extensions.list_extensions()
760
+ ext_list = []
761
+ for ext in extensions.extensions:
762
+ ext: extensions.Extension
763
+ ext.read_info_from_repo()
764
+ if ext.remote is not None:
765
+ ext_list.append({
766
+ "name": ext.name,
767
+ "remote": ext.remote,
768
+ "branch": ext.branch,
769
+ "commit_hash":ext.commit_hash,
770
+ "commit_date":ext.commit_date,
771
+ "version":ext.version,
772
+ "enabled":ext.enabled
773
+ })
774
+ return ext_list
775
+
776
+ def launch(self, server_name, port, root_path):
777
+ self.app.include_router(self.router)
778
+ uvicorn.run(self.app, host=server_name, port=port, timeout_keep_alive=shared.cmd_opts.timeout_keep_alive, root_path=root_path)
779
+
780
+ def kill_webui(self):
781
+ restart.stop_program()
782
+
783
+ def restart_webui(self):
784
+ if restart.is_restartable():
785
+ restart.restart_program()
786
+ return Response(status_code=501)
787
+
788
+ def stop_webui(request):
789
+ shared.state.server_command = "stop"
790
+ return Response("Stopping.")
791
+
modules/api/models.py ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+
3
+ from pydantic import BaseModel, Field, create_model
4
+ from typing import Any, Optional, Literal
5
+ from inflection import underscore
6
+ from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img
7
+ from modules.shared import sd_upscalers, opts, parser
8
+
9
+ API_NOT_ALLOWED = [
10
+ "self",
11
+ "kwargs",
12
+ "sd_model",
13
+ "outpath_samples",
14
+ "outpath_grids",
15
+ "sampler_index",
16
+ # "do_not_save_samples",
17
+ # "do_not_save_grid",
18
+ "extra_generation_params",
19
+ "overlay_images",
20
+ "do_not_reload_embeddings",
21
+ "seed_enable_extras",
22
+ "prompt_for_display",
23
+ "sampler_noise_scheduler_override",
24
+ "ddim_discretize"
25
+ ]
26
+
27
+ class ModelDef(BaseModel):
28
+ """Assistance Class for Pydantic Dynamic Model Generation"""
29
+
30
+ field: str
31
+ field_alias: str
32
+ field_type: Any
33
+ field_value: Any
34
+ field_exclude: bool = False
35
+
36
+
37
+ class PydanticModelGenerator:
38
+ """
39
+ Takes in created classes and stubs them out in a way FastAPI/Pydantic is happy about:
40
+ source_data is a snapshot of the default values produced by the class
41
+ params are the names of the actual keys required by __init__
42
+ """
43
+
44
+ def __init__(
45
+ self,
46
+ model_name: str = None,
47
+ class_instance = None,
48
+ additional_fields = None,
49
+ ):
50
+ def field_type_generator(k, v):
51
+ field_type = v.annotation
52
+
53
+ if field_type == 'Image':
54
+ # images are sent as base64 strings via API
55
+ field_type = 'str'
56
+
57
+ return Optional[field_type]
58
+
59
+ def merge_class_params(class_):
60
+ all_classes = list(filter(lambda x: x is not object, inspect.getmro(class_)))
61
+ parameters = {}
62
+ for classes in all_classes:
63
+ parameters = {**parameters, **inspect.signature(classes.__init__).parameters}
64
+ return parameters
65
+
66
+ self._model_name = model_name
67
+ self._class_data = merge_class_params(class_instance)
68
+
69
+ self._model_def = [
70
+ ModelDef(
71
+ field=underscore(k),
72
+ field_alias=k,
73
+ field_type=field_type_generator(k, v),
74
+ field_value=None if isinstance(v.default, property) else v.default
75
+ )
76
+ for (k,v) in self._class_data.items() if k not in API_NOT_ALLOWED
77
+ ]
78
+
79
+ for fields in additional_fields:
80
+ self._model_def.append(ModelDef(
81
+ field=underscore(fields["key"]),
82
+ field_alias=fields["key"],
83
+ field_type=fields["type"],
84
+ field_value=fields["default"],
85
+ field_exclude=fields["exclude"] if "exclude" in fields else False))
86
+
87
+ def generate_model(self):
88
+ """
89
+ Creates a pydantic BaseModel
90
+ from the json and overrides provided at initialization
91
+ """
92
+ fields = {
93
+ d.field: (d.field_type, Field(default=d.field_value, alias=d.field_alias, exclude=d.field_exclude)) for d in self._model_def
94
+ }
95
+ DynamicModel = create_model(self._model_name, **fields)
96
+ DynamicModel.__config__.allow_population_by_field_name = True
97
+ DynamicModel.__config__.allow_mutation = True
98
+ return DynamicModel
99
+
100
+ StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator(
101
+ "StableDiffusionProcessingTxt2Img",
102
+ StableDiffusionProcessingTxt2Img,
103
+ [
104
+ {"key": "sampler_index", "type": str, "default": "Euler"},
105
+ {"key": "script_name", "type": str, "default": None},
106
+ {"key": "script_args", "type": list, "default": []},
107
+ {"key": "send_images", "type": bool, "default": True},
108
+ {"key": "save_images", "type": bool, "default": False},
109
+ {"key": "alwayson_scripts", "type": dict, "default": {}},
110
+ ]
111
+ ).generate_model()
112
+
113
+ StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator(
114
+ "StableDiffusionProcessingImg2Img",
115
+ StableDiffusionProcessingImg2Img,
116
+ [
117
+ {"key": "sampler_index", "type": str, "default": "Euler"},
118
+ {"key": "init_images", "type": list, "default": None},
119
+ {"key": "denoising_strength", "type": float, "default": 0.75},
120
+ {"key": "mask", "type": str, "default": None},
121
+ {"key": "include_init_images", "type": bool, "default": False, "exclude" : True},
122
+ {"key": "script_name", "type": str, "default": None},
123
+ {"key": "script_args", "type": list, "default": []},
124
+ {"key": "send_images", "type": bool, "default": True},
125
+ {"key": "save_images", "type": bool, "default": False},
126
+ {"key": "alwayson_scripts", "type": dict, "default": {}},
127
+ ]
128
+ ).generate_model()
129
+
130
+ class TextToImageResponse(BaseModel):
131
+ images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
132
+ parameters: dict
133
+ info: str
134
+
135
+ class ImageToImageResponse(BaseModel):
136
+ images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
137
+ parameters: dict
138
+ info: str
139
+
140
+ class ExtrasBaseRequest(BaseModel):
141
+ resize_mode: Literal[0, 1] = Field(default=0, title="Resize Mode", description="Sets the resize mode: 0 to upscale by upscaling_resize amount, 1 to upscale up to upscaling_resize_h x upscaling_resize_w.")
142
+ show_extras_results: bool = Field(default=True, title="Show results", description="Should the backend return the generated image?")
143
+ gfpgan_visibility: float = Field(default=0, title="GFPGAN Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of GFPGAN, values should be between 0 and 1.")
144
+ codeformer_visibility: float = Field(default=0, title="CodeFormer Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of CodeFormer, values should be between 0 and 1.")
145
+ codeformer_weight: float = Field(default=0, title="CodeFormer Weight", ge=0, le=1, allow_inf_nan=False, description="Sets the weight of CodeFormer, values should be between 0 and 1.")
146
+ upscaling_resize: float = Field(default=2, title="Upscaling Factor", ge=1, le=8, description="By how much to upscale the image, only used when resize_mode=0.")
147
+ upscaling_resize_w: int = Field(default=512, title="Target Width", ge=1, description="Target width for the upscaler to hit. Only used when resize_mode=1.")
148
+ upscaling_resize_h: int = Field(default=512, title="Target Height", ge=1, description="Target height for the upscaler to hit. Only used when resize_mode=1.")
149
+ upscaling_crop: bool = Field(default=True, title="Crop to fit", description="Should the upscaler crop the image to fit in the chosen size?")
150
+ upscaler_1: str = Field(default="None", title="Main upscaler", description=f"The name of the main upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}")
151
+ upscaler_2: str = Field(default="None", title="Secondary upscaler", description=f"The name of the secondary upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}")
152
+ extras_upscaler_2_visibility: float = Field(default=0, title="Secondary upscaler visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of secondary upscaler, values should be between 0 and 1.")
153
+ upscale_first: bool = Field(default=False, title="Upscale first", description="Should the upscaler run before restoring faces?")
154
+
155
+ class ExtraBaseResponse(BaseModel):
156
+ html_info: str = Field(title="HTML info", description="A series of HTML tags containing the process info.")
157
+
158
+ class ExtrasSingleImageRequest(ExtrasBaseRequest):
159
+ image: str = Field(default="", title="Image", description="Image to work on, must be a Base64 string containing the image's data.")
160
+
161
+ class ExtrasSingleImageResponse(ExtraBaseResponse):
162
+ image: str = Field(default=None, title="Image", description="The generated image in base64 format.")
163
+
164
+ class FileData(BaseModel):
165
+ data: str = Field(title="File data", description="Base64 representation of the file")
166
+ name: str = Field(title="File name")
167
+
168
+ class ExtrasBatchImagesRequest(ExtrasBaseRequest):
169
+ imageList: list[FileData] = Field(title="Images", description="List of images to work on. Must be Base64 strings")
170
+
171
+ class ExtrasBatchImagesResponse(ExtraBaseResponse):
172
+ images: list[str] = Field(title="Images", description="The generated images in base64 format.")
173
+
174
+ class PNGInfoRequest(BaseModel):
175
+ image: str = Field(title="Image", description="The base64 encoded PNG image")
176
+
177
+ class PNGInfoResponse(BaseModel):
178
+ info: str = Field(title="Image info", description="A string with the parameters used to generate the image")
179
+ items: dict = Field(title="Items", description="A dictionary containing all the other fields the image had")
180
+ parameters: dict = Field(title="Parameters", description="A dictionary with parsed generation info fields")
181
+
182
+ class ProgressRequest(BaseModel):
183
+ skip_current_image: bool = Field(default=False, title="Skip current image", description="Skip current image serialization")
184
+
185
+ class ProgressResponse(BaseModel):
186
+ progress: float = Field(title="Progress", description="The progress with a range of 0 to 1")
187
+ eta_relative: float = Field(title="ETA in secs")
188
+ state: dict = Field(title="State", description="The current state snapshot")
189
+ current_image: str = Field(default=None, title="Current image", description="The current image in base64 format. opts.show_progress_every_n_steps is required for this to work.")
190
+ textinfo: str = Field(default=None, title="Info text", description="Info text used by WebUI.")
191
+
192
+ class InterrogateRequest(BaseModel):
193
+ image: str = Field(default="", title="Image", description="Image to work on, must be a Base64 string containing the image's data.")
194
+ model: str = Field(default="clip", title="Model", description="The interrogate model used.")
195
+
196
+ class InterrogateResponse(BaseModel):
197
+ caption: str = Field(default=None, title="Caption", description="The generated caption for the image.")
198
+
199
+ class TrainResponse(BaseModel):
200
+ info: str = Field(title="Train info", description="Response string from train embedding or hypernetwork task.")
201
+
202
+ class CreateResponse(BaseModel):
203
+ info: str = Field(title="Create info", description="Response string from create embedding or hypernetwork task.")
204
+
205
+ fields = {}
206
+ for key, metadata in opts.data_labels.items():
207
+ value = opts.data.get(key)
208
+ optType = opts.typemap.get(type(metadata.default), type(metadata.default)) if metadata.default else Any
209
+
210
+ if metadata is not None:
211
+ fields.update({key: (Optional[optType], Field(default=metadata.default, description=metadata.label))})
212
+ else:
213
+ fields.update({key: (Optional[optType], Field())})
214
+
215
+ OptionsModel = create_model("Options", **fields)
216
+
217
+ flags = {}
218
+ _options = vars(parser)['_option_string_actions']
219
+ for key in _options:
220
+ if(_options[key].dest != 'help'):
221
+ flag = _options[key]
222
+ _type = str
223
+ if _options[key].default is not None:
224
+ _type = type(_options[key].default)
225
+ flags.update({flag.dest: (_type, Field(default=flag.default, description=flag.help))})
226
+
227
+ FlagsModel = create_model("Flags", **flags)
228
+
229
+ class SamplerItem(BaseModel):
230
+ name: str = Field(title="Name")
231
+ aliases: list[str] = Field(title="Aliases")
232
+ options: dict[str, str] = Field(title="Options")
233
+
234
+ class UpscalerItem(BaseModel):
235
+ name: str = Field(title="Name")
236
+ model_name: Optional[str] = Field(title="Model Name")
237
+ model_path: Optional[str] = Field(title="Path")
238
+ model_url: Optional[str] = Field(title="URL")
239
+ scale: Optional[float] = Field(title="Scale")
240
+
241
+ class LatentUpscalerModeItem(BaseModel):
242
+ name: str = Field(title="Name")
243
+
244
+ class SDModelItem(BaseModel):
245
+ title: str = Field(title="Title")
246
+ model_name: str = Field(title="Model Name")
247
+ hash: Optional[str] = Field(title="Short hash")
248
+ sha256: Optional[str] = Field(title="sha256 hash")
249
+ filename: str = Field(title="Filename")
250
+ config: Optional[str] = Field(title="Config file")
251
+
252
+ class SDVaeItem(BaseModel):
253
+ model_name: str = Field(title="Model Name")
254
+ filename: str = Field(title="Filename")
255
+
256
+ class HypernetworkItem(BaseModel):
257
+ name: str = Field(title="Name")
258
+ path: Optional[str] = Field(title="Path")
259
+
260
+ class FaceRestorerItem(BaseModel):
261
+ name: str = Field(title="Name")
262
+ cmd_dir: Optional[str] = Field(title="Path")
263
+
264
+ class RealesrganItem(BaseModel):
265
+ name: str = Field(title="Name")
266
+ path: Optional[str] = Field(title="Path")
267
+ scale: Optional[int] = Field(title="Scale")
268
+
269
+ class PromptStyleItem(BaseModel):
270
+ name: str = Field(title="Name")
271
+ prompt: Optional[str] = Field(title="Prompt")
272
+ negative_prompt: Optional[str] = Field(title="Negative Prompt")
273
+
274
+
275
+ class EmbeddingItem(BaseModel):
276
+ step: Optional[int] = Field(title="Step", description="The number of steps that were used to train this embedding, if available")
277
+ sd_checkpoint: Optional[str] = Field(title="SD Checkpoint", description="The hash of the checkpoint this embedding was trained on, if available")
278
+ sd_checkpoint_name: Optional[str] = Field(title="SD Checkpoint Name", description="The name of the checkpoint this embedding was trained on, if available. Note that this is the name that was used by the trainer; for a stable identifier, use `sd_checkpoint` instead")
279
+ shape: int = Field(title="Shape", description="The length of each individual vector in the embedding")
280
+ vectors: int = Field(title="Vectors", description="The number of vectors in the embedding")
281
+
282
+ class EmbeddingsResponse(BaseModel):
283
+ loaded: dict[str, EmbeddingItem] = Field(title="Loaded", description="Embeddings loaded for the current model")
284
+ skipped: dict[str, EmbeddingItem] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)")
285
+
286
+ class MemoryResponse(BaseModel):
287
+ ram: dict = Field(title="RAM", description="System memory stats")
288
+ cuda: dict = Field(title="CUDA", description="nVidia CUDA memory stats")
289
+
290
+
291
+ class ScriptsList(BaseModel):
292
+ txt2img: list = Field(default=None, title="Txt2img", description="Titles of scripts (txt2img)")
293
+ img2img: list = Field(default=None, title="Img2img", description="Titles of scripts (img2img)")
294
+
295
+
296
+ class ScriptArg(BaseModel):
297
+ label: str = Field(default=None, title="Label", description="Name of the argument in UI")
298
+ value: Optional[Any] = Field(default=None, title="Value", description="Default value of the argument")
299
+ minimum: Optional[Any] = Field(default=None, title="Minimum", description="Minimum allowed value for the argumentin UI")
300
+ maximum: Optional[Any] = Field(default=None, title="Minimum", description="Maximum allowed value for the argumentin UI")
301
+ step: Optional[Any] = Field(default=None, title="Minimum", description="Step for changing value of the argumentin UI")
302
+ choices: Optional[list[str]] = Field(default=None, title="Choices", description="Possible values for the argument")
303
+
304
+
305
+ class ScriptInfo(BaseModel):
306
+ name: str = Field(default=None, title="Name", description="Script name")
307
+ is_alwayson: bool = Field(default=None, title="IsAlwayson", description="Flag specifying whether this script is an alwayson script")
308
+ is_img2img: bool = Field(default=None, title="IsImg2img", description="Flag specifying whether this script is an img2img script")
309
+ args: list[ScriptArg] = Field(title="Arguments", description="List of script's arguments")
310
+
311
+ class ExtensionItem(BaseModel):
312
+ name: str = Field(title="Name", description="Extension name")
313
+ remote: str = Field(title="Remote", description="Extension Repository URL")
314
+ branch: str = Field(title="Branch", description="Extension Repository Branch")
315
+ commit_hash: str = Field(title="Commit Hash", description="Extension Repository Commit Hash")
316
+ version: str = Field(title="Version", description="Extension Version")
317
+ commit_date: str = Field(title="Commit Date", description="Extension Repository Commit Date")
318
+ enabled: bool = Field(title="Enabled", description="Flag specifying whether this extension is enabled")