michaelapplydesign commited on
Commit
04c7187
·
1 Parent(s): 8f75876
Files changed (2) hide show
  1. app.py +28 -3
  2. pipelines.py +0 -2
app.py CHANGED
@@ -3,6 +3,15 @@ import numpy as np
3
  from models import make_inpainting
4
  import utils
5
 
 
 
 
 
 
 
 
 
 
6
  def removeFurniture(input_img1,
7
  input_img2,
8
  positive_prompt,
@@ -36,8 +45,24 @@ def removeFurniture(input_img1,
36
 
37
  return retList
38
 
39
- def segmentation(image):
40
- return image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
  def upscale(image):
43
  return image
@@ -66,7 +91,7 @@ with gr.Blocks() as app:
66
  gr.Image(),
67
  gr.Image()])
68
  with gr.Column():
69
- gr.Button("Segmentation").click(segmentation, inputs=gr.Image(type="pil"), outputs=gr.Image())
70
  with gr.Column():
71
  gr.Button("Upscale").click(upscale, inputs=gr.Image(type="pil"), outputs=gr.Image())
72
 
 
3
  from models import make_inpainting
4
  import utils
5
 
6
+ from transformers import MaskFormerImageProcessor, MaskFormerForInstanceSegmentation
7
+ from PIL import Image
8
+ import requests
9
+ from transformers import pipeline
10
+ import torch
11
+ import random
12
+ import io
13
+ import base64
14
+
15
  def removeFurniture(input_img1,
16
  input_img2,
17
  positive_prompt,
 
45
 
46
  return retList
47
 
48
+ def imageToString(img):
49
+
50
+ output = io.BytesIO()
51
+ img.save(output, format="png")
52
+ return output.getvalue()
53
+
54
+ def segmentation(img):
55
+ print("segmentation")
56
+
57
+ # semantic_segmentation = pipeline("image-segmentation", "nvidia/segformer-b1-finetuned-cityscapes-1024-1024")
58
+ semantic_segmentation = pipeline("image-segmentation", "facebook/maskformer-swin-large-ade")
59
+ results = semantic_segmentation(img)
60
+ for p in results:
61
+ p['mask'] = utils.image_to_byte_array(p['mask'])
62
+ p['mask'] = base64.b64encode(p['mask'])
63
+ #print(results)
64
+ return str(results)
65
+
66
 
67
  def upscale(image):
68
  return image
 
91
  gr.Image(),
92
  gr.Image()])
93
  with gr.Column():
94
+ gr.Button("Segmentation").click(segmentation, inputs=gr.Image(type="pil"), outputs=gr.TextArea())
95
  with gr.Column():
96
  gr.Button("Upscale").click(upscale, inputs=gr.Image(type="pil"), outputs=gr.Image())
97
 
pipelines.py CHANGED
@@ -2,8 +2,6 @@ import logging
2
  import torch
3
  import time
4
  from diffusers import StableDiffusionInpaintPipeline
5
-
6
- # from config import WIDTH, HEIGHT
7
  from helpers import flush
8
 
9
  LOGGING = logging.getLogger(__name__)
 
2
  import torch
3
  import time
4
  from diffusers import StableDiffusionInpaintPipeline
 
 
5
  from helpers import flush
6
 
7
  LOGGING = logging.getLogger(__name__)