Qasaawaleid nateraw commited on
Commit
4d97e95
0 Parent(s):

Duplicate from nateraw/background-remover

Browse files

Co-authored-by: Nate Raw <[email protected]>

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.github/workflows/check_size.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Check file size
2
+
3
+ on:
4
+ pull_request:
5
+ branches: [main]
6
+
7
+ # to run this workflow manually from the Actions tab
8
+ workflow_dispatch:
9
+
10
+ jobs:
11
+ sync-to-hub:
12
+ runs-on: ubuntu-latest
13
+ steps:
14
+ - name: Check large files
15
+ uses: ActionsDesk/[email protected]
16
+ with:
17
+ filesizelimit: 10485760 # = 10MB, so we can sync to HF spaces
.github/workflows/sync_to_hub.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Sync to Hugging Face hub
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+
7
+ # to run this workflow manually from the Actions tab
8
+ workflow_dispatch:
9
+
10
+ jobs:
11
+ sync-to-hub:
12
+ runs-on: ubuntu-latest
13
+ steps:
14
+ - uses: actions/checkout@v2
15
+ with:
16
+ fetch-depth: 0
17
+ - name: Push to hub
18
+ env:
19
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
20
+ run: git push https://nateraw:[email protected]/spaces/nateraw/background-remover main --force
README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Background Remover
3
+ emoji: 🖼️✂️
4
+ colorFrom: blue
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 2.9.4
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: nateraw/background-remover
11
+ ---
12
+
13
+ # background-remover
14
+
15
+ [![Generic badge](https://img.shields.io/badge/🤗-Open%20In%20Spaces-blue.svg)](https://huggingface.co/spaces/nateraw/background-remover)
16
+
17
+ A Gradio app to remove the background from an image
18
+
19
+ ---⬇️
20
+
21
+ Autogenerated using [this template](https://github.com/nateraw/spaces-template)
app.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import gradio as gr
3
+ import numpy as np
4
+ import onnxruntime
5
+ import requests
6
+ from huggingface_hub import hf_hub_download
7
+ from PIL import Image
8
+
9
+
10
+ # Get x_scale_factor & y_scale_factor to resize image
11
+ def get_scale_factor(im_h, im_w, ref_size=512):
12
+
13
+ if max(im_h, im_w) < ref_size or min(im_h, im_w) > ref_size:
14
+ if im_w >= im_h:
15
+ im_rh = ref_size
16
+ im_rw = int(im_w / im_h * ref_size)
17
+ elif im_w < im_h:
18
+ im_rw = ref_size
19
+ im_rh = int(im_h / im_w * ref_size)
20
+ else:
21
+ im_rh = im_h
22
+ im_rw = im_w
23
+
24
+ im_rw = im_rw - im_rw % 32
25
+ im_rh = im_rh - im_rh % 32
26
+
27
+ x_scale_factor = im_rw / im_w
28
+ y_scale_factor = im_rh / im_h
29
+
30
+ return x_scale_factor, y_scale_factor
31
+
32
+
33
+ MODEL_PATH = hf_hub_download('nateraw/background-remover-files', 'modnet.onnx', repo_type='dataset')
34
+
35
+
36
+ def main(image_path, threshold):
37
+
38
+ # read image
39
+ im = cv2.imread(image_path)
40
+ im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
41
+
42
+ # unify image channels to 3
43
+ if len(im.shape) == 2:
44
+ im = im[:, :, None]
45
+ if im.shape[2] == 1:
46
+ im = np.repeat(im, 3, axis=2)
47
+ elif im.shape[2] == 4:
48
+ im = im[:, :, 0:3]
49
+
50
+ # normalize values to scale it between -1 to 1
51
+ im = (im - 127.5) / 127.5
52
+
53
+ im_h, im_w, im_c = im.shape
54
+ x, y = get_scale_factor(im_h, im_w)
55
+
56
+ # resize image
57
+ im = cv2.resize(im, None, fx=x, fy=y, interpolation=cv2.INTER_AREA)
58
+
59
+ # prepare input shape
60
+ im = np.transpose(im)
61
+ im = np.swapaxes(im, 1, 2)
62
+ im = np.expand_dims(im, axis=0).astype('float32')
63
+
64
+ # Initialize session and get prediction
65
+ session = onnxruntime.InferenceSession(MODEL_PATH, None)
66
+ input_name = session.get_inputs()[0].name
67
+ output_name = session.get_outputs()[0].name
68
+ result = session.run([output_name], {input_name: im})
69
+
70
+ # refine matte
71
+ matte = (np.squeeze(result[0]) * 255).astype('uint8')
72
+ matte = cv2.resize(matte, dsize=(im_w, im_h), interpolation=cv2.INTER_AREA)
73
+
74
+ # HACK - Could probably just convert this to PIL instead of writing
75
+ cv2.imwrite('out.png', matte)
76
+
77
+ image = Image.open(image_path)
78
+ matte = Image.open('out.png')
79
+
80
+ # obtain predicted foreground
81
+ image = np.asarray(image)
82
+ if len(image.shape) == 2:
83
+ image = image[:, :, None]
84
+ if image.shape[2] == 1:
85
+ image = np.repeat(image, 3, axis=2)
86
+ elif image.shape[2] == 4:
87
+ image = image[:, :, 0:3]
88
+
89
+ b, g, r = cv2.split(image)
90
+
91
+ mask = np.asarray(matte)
92
+ a = np.ones(mask.shape, dtype='uint8') * 255
93
+ alpha_im = cv2.merge([b, g, r, a], 4)
94
+ bg = np.zeros(alpha_im.shape)
95
+ new_mask = np.stack([mask, mask, mask, mask], axis=2)
96
+ foreground = np.where(new_mask > threshold, alpha_im, bg).astype(np.uint8)
97
+
98
+ return Image.fromarray(foreground)
99
+
100
+
101
+ title = "MODNet Background Remover"
102
+ description = "Gradio demo for MODNet, a model that can remove the background from a given image. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
103
+ article = "<div style='text-align: center;'> <a href='https://github.com/ZHKKKe/MODNet' target='_blank'>Github Repo</a> | <a href='https://arxiv.org/abs/2011.11961' target='_blank'>MODNet: Real-Time Trimap-Free Portrait Matting via Objective Decomposition</a> </div>"
104
+
105
+ url = "https://huggingface.co/datasets/nateraw/background-remover-files/resolve/main/twitter_profile_pic.jpeg"
106
+ image = Image.open(requests.get(url, stream=True).raw)
107
+ image.save('twitter_profile_pic.jpg')
108
+
109
+ url = "https://upload.wikimedia.org/wikipedia/commons/8/8d/President_Barack_Obama.jpg"
110
+ image = Image.open(requests.get(url, stream=True).raw)
111
+ image.save('obama.jpg')
112
+
113
+ interface = gr.Interface(
114
+ fn=main,
115
+ inputs=[
116
+ gr.inputs.Image(type='filepath'),
117
+ gr.inputs.Slider(minimum=0, maximum=250, default=100, step=5, label='Mask Cutoff Threshold'),
118
+ ],
119
+ outputs='image',
120
+ examples=[['twitter_profile_pic.jpg', 120], ['obama.jpg', 155]],
121
+ title=title,
122
+ description=description,
123
+ article=article,
124
+ )
125
+
126
+ if __name__ == '__main__':
127
+ interface.launch(debug=True)
packages.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ffmpeg
2
+ libsm6
3
+ libxext6
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ onnxruntime==1.6.0
2
+ onnx
3
+ opencv-python
4
+ numpy
5
+ huggingface_hub