jwengr commited on
Commit
7e1a73f
·
verified ·
1 Parent(s): ffd555e

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +24 -12
README.md CHANGED
@@ -5,8 +5,21 @@ base_model:
5
  pipeline_tag: image-to-image
6
  ---
7
  ```
 
 
 
 
 
8
  from transformers import AutoConfig, AutoModel, ModelCard
9
 
 
 
 
 
 
 
 
 
10
  # Load the gray-inpaint model
11
  gray_inpaintor = AutoModel.from_pretrained(
12
  'jwengr/stable-diffusion-2-gray-inpaint-to-rgb',
@@ -14,25 +27,24 @@ gray_inpaintor = AutoModel.from_pretrained(
14
  trust_remote_code=True,
15
  )
16
 
17
- # Load the gray2rgb model
18
  gray2rgb = AutoModel.from_pretrained(
19
  'jwengr/stable-diffusion-2-gray-inpaint-to-rgb',
20
  subfolder='gray2rgb',
21
  trust_remote_code=True,
22
  )
23
 
24
-
25
- # Move models to GPU
26
- gray2rgb.to('cuda')
27
  gray_inpaintor.to('cuda')
 
28
 
29
- # Enable memory-efficient attention for xFormers
30
- gray2rgb.unet.enable_xformers_memory_efficient_attention()
31
- gray_inpaintor.unet.enable_xformers_memory_efficient_attention()
32
 
33
- # Generate images using gray_inpaintor and gray2rgb
34
- image_gray_restored = gray_inpaintor(batch, seed=inpaint_seed)
35
- image_gray_restored = [img.convert('RGB') for img in image_gray_restored]
36
- image_restored_pil = gray2rgb(image_gray_restored)
37
- image_restored_pt = gray2rgb(image_gray_restored, output_type='pt')
38
  ```
 
5
  pipeline_tag: image-to-image
6
  ---
7
  ```
8
+ import torch
9
+ import numpy as np
10
+
11
+ from PIL import Image
12
+ from diffusers.utils import load_image
13
  from transformers import AutoConfig, AutoModel, ModelCard
14
 
15
+ img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
16
+ mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
17
+
18
+ image_gray = load_image(img_url).resize((512, 512)).convert('L').convert('RGB') # image must be 3 channel
19
+ mask_image = load_image(mask_url).resize((512, 512))
20
+ mask = (np.array(mask_image)>128)*1
21
+ image_gray_masked = Image.fromarray(((1-mask) * np.array(image_gray)).astype(np.uint8))
22
+
23
  # Load the gray-inpaint model
24
  gray_inpaintor = AutoModel.from_pretrained(
25
  'jwengr/stable-diffusion-2-gray-inpaint-to-rgb',
 
27
  trust_remote_code=True,
28
  )
29
 
30
+ Load the gray2rgb model
31
  gray2rgb = AutoModel.from_pretrained(
32
  'jwengr/stable-diffusion-2-gray-inpaint-to-rgb',
33
  subfolder='gray2rgb',
34
  trust_remote_code=True,
35
  )
36
 
37
+ Move models to GPU
 
 
38
  gray_inpaintor.to('cuda')
39
+ gray2rgb.to('cuda')
40
 
41
+ # Enable memory-efficient attention
42
+ # gray2rgb.unet.enable_xformers_memory_efficient_attention()
43
+ # gray_inpaintor.unet.enable_xformers_memory_efficient_attention()
44
 
45
+ with torch.autocast('cuda',dtype=torch.bfloat16):
46
+ with torch.no_grad():
47
+ # each model's input image should be one of PIL.Image, List[PIL.Image], preprocessed tensor (B,3,H,W). Image must be 3 channel
48
+ image_gray_restored = gray_inpaintor(image_gray_masked, num_inference_steps=250, seed=10)[0].convert('L') # you can pass 'mask' arg explictly. mask : Tensor (B,1,512,512)
49
+ image_restored = gray2rgb(image_gray_restored.convert('RGB'))
50
  ```