salahIguiliz commited on
Commit
c4810ac
·
1 Parent(s): 691eb9b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -9
app.py CHANGED
@@ -39,6 +39,7 @@ def generate_an_image_from_text(text, text_size_, width, lenght):
39
  # font def
40
  font_dir = ''
41
  # Get a list of all the font files in the directory
 
42
  font_files = glob.glob(os.path.join(dir_path, 'fonts', '*.ttf'))
43
  # Get a list of font paths
44
  font_paths = []
@@ -55,22 +56,30 @@ def generate_an_image_from_text(text, text_size_, width, lenght):
55
  y = (image.height - text_size[1]) / 2
56
  # Draw the text on the image
57
  draw.text((x, y), text, fill=(0, 0, 0), font=font)
 
 
58
  return image
59
 
60
  def to_Canny(image):
61
- # Let's load the popular vermeer image
62
- image = np.array(image)
63
 
64
- low_threshold = 100
65
- high_threshold = 200
 
 
 
 
 
 
 
 
 
66
 
67
- image = cv2.Canny(image, low_threshold, high_threshold)
68
- image = image[:, :, None]
69
- image = np.concatenate([image, image, image], axis=2)
70
- canny_image = Image.fromarray(image)
71
- return canny_image
72
 
73
  def inference(prompt,canny_image,number,seed ):
 
 
74
 
75
  pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
76
  # This command loads the individual model components on GPU on-demand. So, we don't
@@ -84,6 +93,7 @@ def inference(prompt,canny_image,number,seed ):
84
  prompt = prompt
85
  out_image = pipe(
86
  prompt, num_inference_steps=20, generator=generator, image=image_, num_images_per_prompt=number)
 
87
  return out_image
88
 
89
  def generate_images(image, prompt):
 
39
  # font def
40
  font_dir = ''
41
  # Get a list of all the font files in the directory
42
+ print("start generation")
43
  font_files = glob.glob(os.path.join(dir_path, 'fonts', '*.ttf'))
44
  # Get a list of font paths
45
  font_paths = []
 
56
  y = (image.height - text_size[1]) / 2
57
  # Draw the text on the image
58
  draw.text((x, y), text, fill=(0, 0, 0), font=font)
59
+ print("end generation")
60
+
61
  return image
62
 
63
  def to_Canny(image):
64
+ print("start canny")
 
65
 
66
+ # Let's load the popular vermeer image
67
+ image = np.array(image)
68
+
69
+ low_threshold = 100
70
+ high_threshold = 200
71
+
72
+ image = cv2.Canny(image, low_threshold, high_threshold)
73
+ image = image[:, :, None]
74
+ image = np.concatenate([image, image, image], axis=2)
75
+ canny_image = Image.fromarray(image)
76
+ print("end canny")
77
 
78
+ return canny_image
 
 
 
 
79
 
80
  def inference(prompt,canny_image,number,seed ):
81
+ print("start inference")
82
+
83
 
84
  pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
85
  # This command loads the individual model components on GPU on-demand. So, we don't
 
93
  prompt = prompt
94
  out_image = pipe(
95
  prompt, num_inference_steps=20, generator=generator, image=image_, num_images_per_prompt=number)
96
+ print('end inference')
97
  return out_image
98
 
99
  def generate_images(image, prompt):