added inference_steps
Browse files- handler.py +4 -2
handler.py
CHANGED
@@ -36,6 +36,8 @@ class EndpointHandler():
|
|
36 |
negative_prompt = data.pop("negative_prompt", None)
|
37 |
height = data.pop("height", 512)
|
38 |
width = data.pop("width", 512)
|
|
|
|
|
39 |
|
40 |
guidance_scale = data.pop("guidance_scale", 7.5)
|
41 |
|
@@ -43,11 +45,11 @@ class EndpointHandler():
|
|
43 |
with autocast(device.type):
|
44 |
if negative_prompt is None:
|
45 |
print(str(inputs), str(height), str(width), str(guidance_scale))
|
46 |
-
image = self.pipe(prompt=inputs, height=height, width=width, guidance_scale=float(guidance_scale))
|
47 |
image = image.images[0]
|
48 |
else:
|
49 |
print(str(inputs), str(height), str(negative_prompt), str(width), str(guidance_scale))
|
50 |
-
image = self.pipe(prompt=inputs, negative_prompt=negative_prompt, height=height, width=width, guidance_scale=float(guidance_scale))
|
51 |
image = image.images[0]
|
52 |
|
53 |
# Encode image as base64
|
|
|
36 |
negative_prompt = data.pop("negative_prompt", None)
|
37 |
height = data.pop("height", 512)
|
38 |
width = data.pop("width", 512)
|
39 |
+
inference_steps = data.pop("inference_steps", 25)
|
40 |
+
|
41 |
|
42 |
guidance_scale = data.pop("guidance_scale", 7.5)
|
43 |
|
|
|
45 |
with autocast(device.type):
|
46 |
if negative_prompt is None:
|
47 |
print(str(inputs), str(height), str(width), str(guidance_scale))
|
48 |
+
image = self.pipe(prompt=inputs, height=height, width=width, guidance_scale=float(guidance_scale),num_inference_steps=inference_steps)
|
49 |
image = image.images[0]
|
50 |
else:
|
51 |
print(str(inputs), str(height), str(negative_prompt), str(width), str(guidance_scale))
|
52 |
+
image = self.pipe(prompt=inputs, negative_prompt=negative_prompt, height=height, width=width, guidance_scale=float(guidance_scale),num_inference_steps=inference_steps)
|
53 |
image = image.images[0]
|
54 |
|
55 |
# Encode image as base64
|