ozyman commited on
Commit
90a3b7b
·
1 Parent(s): 622fc67

added columns, improved depth, added error flags

Browse files
Files changed (1) hide show
  1. app.py +47 -37
app.py CHANGED
@@ -15,7 +15,7 @@ import pandas as pd
15
  from skimage.io import imread, imsave
16
  # from tddfa.TDDFA import TDDFA
17
  from tddfa.utils.depth import depth
18
- from tddfa.TDDFA import TDDFA
19
 
20
  import torch.optim as optim
21
  from DSDG.DUM.models.CDCNs_u import Conv2d_cd, CDCN_u
@@ -29,13 +29,15 @@ import boto3
29
  import os
30
  os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
31
  os.environ['OMP_NUM_THREADS'] = '4'
 
 
32
 
33
  app_version = 'ddn1'
34
 
35
  device = torch.device("cpu")
36
  labels = ['Live', 'Spoof']
37
  pix_threshhold = 0.45
38
- dsdg_threshold = 0.003
39
  examples = [
40
  ['examples/1_1_21_2_33_scene_fake.jpg'],
41
  ['examples/frame150_real.jpg'],
@@ -53,9 +55,9 @@ deepix_model.load_state_dict(torch.load('./DeePixBiS/DeePixBiS.pth'))
53
  deepix_model.eval()
54
 
55
 
56
- depth_config_path = 'tddfa/configs/mb05_120x120.yml' # 'tddfa/configs/mb1_120x120.yml
57
  cfg = yaml.load(open(depth_config_path), Loader=yaml.SafeLoader)
58
- tddfa = TDDFA(gpu_mode=False, **cfg)
59
 
60
 
61
  cdcn_model = CDCN_u(basic_conv=Conv2d_cd, theta=0.7)
@@ -112,15 +114,18 @@ def find_largest_face(faces):
112
 
113
 
114
  def inference(img):
115
- grey = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
 
 
116
  faces = faceClassifier.detectMultiScale(
117
  grey, scaleFactor=1.1, minNeighbors=4)
118
  face = find_largest_face(faces)
119
 
120
  if face is not None:
121
  x, y, w, h = face
122
- faceRegion = img[y:y + h, x:x + w]
123
- faceRegion = cv.cvtColor(faceRegion, cv.COLOR_BGR2RGB)
 
124
  faceRegion = tfms(faceRegion)
125
  faceRegion = faceRegion.unsqueeze(0)
126
 
@@ -129,21 +134,19 @@ def inference(img):
129
  res_deepix = torch.mean(mask).item()
130
  cls_deepix = 'Real' if res_deepix >= pix_threshhold else 'Spoof'
131
 
132
- label_deepix = f'{cls_deepix} {res_deepix:.2f}'
133
- confidences_deepix = {label_deepix: res_deepix}
134
  color_deepix = (0, 255, 0) if cls_deepix == 'Real' else (255, 0, 0)
135
- img_deepix = cv.rectangle(img.copy(), (x, y), (x + w, y + h), color_deepix, 2)
136
- cv.putText(img_deepix, label_deepix, (x, y + h + 30),
137
  cv.FONT_HERSHEY_COMPLEX, 1, color_deepix)
138
 
139
  # else:
140
  dense_flag = True
141
- boxes = list(face)
142
- boxes.append(1)
143
- param_lst, roi_box_lst = tddfa(img, [boxes])
144
 
145
  ver_lst = tddfa.recon_vers(param_lst, roi_box_lst, dense_flag=dense_flag)
146
- depth_img = depth(img, ver_lst, tddfa.tri, with_bg_flag=False)
147
  with torch.no_grad():
148
  map_score_list = []
149
  image_x, map_x = prepare_data([img], [list(face)], [depth_img])
@@ -167,13 +170,12 @@ def inference(img):
167
  if res_dsdg > 10:
168
  res_dsdg = 0.0
169
  cls_dsdg = 'Real' if res_dsdg >= dsdg_threshold else 'Spoof'
170
- res_dsdg = res_dsdg * 100
171
 
172
- label_dsdg = f'{cls_dsdg} {res_dsdg:.2f}'
173
- confidences_dsdg = {label_dsdg: res_deepix}
174
  color_dsdg = (0, 255, 0) if cls_dsdg == 'Real' else (255, 0, 0)
175
- img_dsdg = cv.rectangle(img.copy(), (x, y), (x + w, y + h), color_dsdg, 2)
176
- cv.putText(img_dsdg, label_dsdg, (x, y + h + 30),
177
  cv.FONT_HERSHEY_COMPLEX, 1, color_dsdg)
178
 
179
  cls_deepix, cls_dsdg = [1 if cls_ == 'Real' else 0 for cls_ in [cls_deepix, cls_dsdg]]
@@ -186,6 +188,12 @@ def inference(img):
186
  def upload_to_s3(image_array, app_version, *labels):
187
  folder = 'demo'
188
  bucket_name = 'livenessng'
 
 
 
 
 
 
189
 
190
  # Initialize S3 client
191
  s3 = boto3.client('s3')
@@ -212,25 +220,27 @@ def upload_to_s3(image_array, app_version, *labels):
212
  demo = gr.Blocks()
213
 
214
  with demo:
215
- input_img = gr.Image(source='webcam', shape=None, type='numpy')
216
- btn_run = gr.Button(value="Run")
217
- with gr.Column():
218
- outputs=[
219
- gr.Image(label='DeePixBiS', type='numpy'),
220
- gr.Label(num_top_classes=2, label='DeePixBiS'),
221
- gr.Image(label='DSDG', type='numpy'),
222
- gr.Label(num_top_classes=2, label='DSDG')]
223
- labels = [gr.Number(visible=False), gr.Number(visible=False)]
 
 
 
 
 
 
 
 
 
224
  btn_run.click(inference, [input_img], outputs+labels)
225
-
226
  app_version_block = gr.Textbox(value=app_version, visible=False)
227
- with gr.Column():
228
- radio = gr.Radio(
229
- ["Real", "Spoof", "None"], label="True label", type='index'
230
- )
231
- flag = gr.Button(value="Flag")
232
- status = gr.Textbox()
233
- flag.click(upload_to_s3, [input_img, app_version_block, radio]+labels, [status], show_progress=True)
234
 
235
 
236
  if __name__ == '__main__':
 
15
  from skimage.io import imread, imsave
16
  # from tddfa.TDDFA import TDDFA
17
  from tddfa.utils.depth import depth
18
+ from tddfa.TDDFA_ONNX import TDDFA_ONNX
19
 
20
  import torch.optim as optim
21
  from DSDG.DUM.models.CDCNs_u import Conv2d_cd, CDCN_u
 
29
  import os
30
  os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
31
  os.environ['OMP_NUM_THREADS'] = '4'
32
+ os.environ['AWS_ACCESS_KEY_ID'] = 'AKIA3JAMX4K53MFDKMGJ'
33
+ os.environ['AWS_SECRET_ACCESS_KEY'] = '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'
34
 
35
  app_version = 'ddn1'
36
 
37
  device = torch.device("cpu")
38
  labels = ['Live', 'Spoof']
39
  pix_threshhold = 0.45
40
+ dsdg_threshold = 0.0015
41
  examples = [
42
  ['examples/1_1_21_2_33_scene_fake.jpg'],
43
  ['examples/frame150_real.jpg'],
 
55
  deepix_model.eval()
56
 
57
 
58
+ depth_config_path = 'tddfa/configs/mb1_120x120.yml' # 'tddfa/configs/mb1_120x120.yml
59
  cfg = yaml.load(open(depth_config_path), Loader=yaml.SafeLoader)
60
+ tddfa = TDDFA_ONNX(gpu_mode=False, **cfg)
61
 
62
 
63
  cdcn_model = CDCN_u(basic_conv=Conv2d_cd, theta=0.7)
 
114
 
115
 
116
  def inference(img):
117
+ if img is None:
118
+ return None, {}, None, None, {}, None, None
119
+ grey = cv.cvtColor(img, cv.COLOR_RGB2GRAY)
120
  faces = faceClassifier.detectMultiScale(
121
  grey, scaleFactor=1.1, minNeighbors=4)
122
  face = find_largest_face(faces)
123
 
124
  if face is not None:
125
  x, y, w, h = face
126
+ x2 = x + w
127
+ y2 = y + h
128
+ faceRegion = img[y:y2, x:x2]
129
  faceRegion = tfms(faceRegion)
130
  faceRegion = faceRegion.unsqueeze(0)
131
 
 
134
  res_deepix = torch.mean(mask).item()
135
  cls_deepix = 'Real' if res_deepix >= pix_threshhold else 'Spoof'
136
 
137
+ confidences_deepix = {'Real confidence': res_deepix}
 
138
  color_deepix = (0, 255, 0) if cls_deepix == 'Real' else (255, 0, 0)
139
+ img_deepix = cv.rectangle(img.copy(), (x, y), (x2, y2), color_deepix, 2)
140
+ cv.putText(img_deepix, cls_deepix, (x, y2 + 30),
141
  cv.FONT_HERSHEY_COMPLEX, 1, color_deepix)
142
 
143
  # else:
144
  dense_flag = True
145
+ box = [x, y, x2, y2, 1]
146
+ param_lst, roi_box_lst = tddfa(img, [box])
 
147
 
148
  ver_lst = tddfa.recon_vers(param_lst, roi_box_lst, dense_flag=dense_flag)
149
+ depth_img = depth(img, ver_lst, tddfa.tri, with_bg_flag=True)
150
  with torch.no_grad():
151
  map_score_list = []
152
  image_x, map_x = prepare_data([img], [list(face)], [depth_img])
 
170
  if res_dsdg > 10:
171
  res_dsdg = 0.0
172
  cls_dsdg = 'Real' if res_dsdg >= dsdg_threshold else 'Spoof'
173
+ res_dsdg = res_dsdg * 300
174
 
175
+ confidences_dsdg = {'Real confidence': res_dsdg}
 
176
  color_dsdg = (0, 255, 0) if cls_dsdg == 'Real' else (255, 0, 0)
177
+ img_dsdg = cv.rectangle(img.copy(), (x, y), (x2, y2), color_dsdg, 2)
178
+ cv.putText(img_dsdg, cls_dsdg, (x, y2 + 30),
179
  cv.FONT_HERSHEY_COMPLEX, 1, color_dsdg)
180
 
181
  cls_deepix, cls_dsdg = [1 if cls_ == 'Real' else 0 for cls_ in [cls_deepix, cls_dsdg]]
 
188
  def upload_to_s3(image_array, app_version, *labels):
189
  folder = 'demo'
190
  bucket_name = 'livenessng'
191
+ if image_array is None:
192
+ return 'Error. Take a photo first.'
193
+ elif labels[-2] == -1:
194
+ return 'Error. Run the detection first.'
195
+ elif labels[0] is None:
196
+ return 'Error. Select the true label first.'
197
 
198
  # Initialize S3 client
199
  s3 = boto3.client('s3')
 
220
  demo = gr.Blocks()
221
 
222
  with demo:
223
+ with gr.Row():
224
+ with gr.Column():
225
+ input_img = gr.Image(source='webcam', shape=None, type='numpy')
226
+ btn_run = gr.Button(value="Run")
227
+ with gr.Column():
228
+ outputs=[
229
+ gr.Image(label='DeePixBiS', type='numpy'),
230
+ gr.Label(num_top_classes=2, label='DeePixBiS'),
231
+ gr.Image(label='DSDG', type='numpy'),
232
+ gr.Label(num_top_classes=2, label='DSDG')]
233
+ with gr.Column():
234
+ radio = gr.Radio(
235
+ ["Real", "Spoof", "None"], label="True label", type='index')
236
+ flag = gr.Button(value="Flag")
237
+ status = gr.Textbox()
238
+ example_block = gr.Examples(examples, [input_img], outputs+labels)
239
+
240
+ labels = [gr.Number(visible=False, value=-1), gr.Number(visible=False, value=-1)]
241
  btn_run.click(inference, [input_img], outputs+labels)
 
242
  app_version_block = gr.Textbox(value=app_version, visible=False)
243
+ flag.click(upload_to_s3, [input_img, app_version_block, radio]+labels, [status], show_progress=True)
 
 
 
 
 
 
244
 
245
 
246
  if __name__ == '__main__':