Spaces:
Build error
Build error
has12zen
commited on
Commit
·
c379def
1
Parent(s):
57d1faf
Test
Browse files- .gitignore +3 -0
- app.py +17 -0
- requirements.txt +5 -0
- utils.py +260 -0
.gitignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
A4/
|
2 |
+
__pycache__/
|
3 |
+
frame_processing.log
|
app.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from utils import *
|
3 |
+
|
4 |
+
with gr.Blocks() as demo:
|
5 |
+
gr.Markdown("# RESIDUAL-BASED FORENSIC COMPARISON OF VIDEO SEQUENCES")
|
6 |
+
with gr.Tab(""):
|
7 |
+
with gr.Row():
|
8 |
+
with gr.Column():
|
9 |
+
v1 = gr.Video(label="Forged Video")
|
10 |
+
v2 = gr.Video(label="Orignal Video")
|
11 |
+
encrypt_output = gr.Video(label="Mahalanobis distance")
|
12 |
+
decrypt_output = gr.Textbox(lines=1, label="output")
|
13 |
+
encrypt_button = gr.Button("Process")
|
14 |
+
|
15 |
+
encrypt_button.click(final_main, inputs=[v1, v2 ], outputs=[encrypt_output,decrypt_output])
|
16 |
+
|
17 |
+
demo.launch(share=False);
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
opencv-python
|
3 |
+
Pillow
|
4 |
+
numpy
|
5 |
+
logging
|
utils.py
ADDED
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import matplotlib.pyplot as plt
|
2 |
+
from PIL import ImageFont
|
3 |
+
from PIL import ImageDraw
|
4 |
+
import multiprocessing
|
5 |
+
from PIL import Image
|
6 |
+
import numpy as np
|
7 |
+
import itertools
|
8 |
+
import logging
|
9 |
+
import math
|
10 |
+
import cv2
|
11 |
+
import os
|
12 |
+
|
13 |
+
|
14 |
+
logging.basicConfig(filename=f'{os.getcwd()}/frame_processing.log', level=logging.INFO)
|
15 |
+
logging.info('Starting frame processing')
|
16 |
+
fps = 0
|
17 |
+
def read_file(name):
|
18 |
+
global fps
|
19 |
+
cap = cv2.VideoCapture(name)
|
20 |
+
fps = cap.get(cv2.CAP_PROP_FPS)
|
21 |
+
if not cap.isOpened():
|
22 |
+
logging.error("Cannot open Video")
|
23 |
+
exit()
|
24 |
+
frames = []
|
25 |
+
while True:
|
26 |
+
ret,frame = cap.read()
|
27 |
+
if not ret:
|
28 |
+
logging.info("Can't receive frame (stream end?). Exiting ...")
|
29 |
+
break
|
30 |
+
frames.append(frame)
|
31 |
+
|
32 |
+
cap.release()
|
33 |
+
cv2.destroyAllWindows()
|
34 |
+
for i in range(len(frames)):
|
35 |
+
# print(frames[i].shape)
|
36 |
+
frames[i]=cv2.cvtColor(frames[i], cv2.COLOR_BGR2GRAY)
|
37 |
+
|
38 |
+
frames_with_index = [(frame, i) for i, frame in enumerate(frames)]
|
39 |
+
return frames_with_index
|
40 |
+
|
41 |
+
st = [0,1,2,3,4]
|
42 |
+
dt = {}
|
43 |
+
idx = 0;
|
44 |
+
l = (tuple(i) for i in itertools.product(st, repeat=4) if tuple(reversed(i)) >= tuple(i))
|
45 |
+
l=list(l)
|
46 |
+
cnt = 0
|
47 |
+
for i in range(0,len(l)):
|
48 |
+
lt=l[i]
|
49 |
+
mirror = tuple(reversed(lt))
|
50 |
+
dt[mirror]=i;
|
51 |
+
dt[lt]=i;
|
52 |
+
|
53 |
+
|
54 |
+
def calc_filtered_img(img):
|
55 |
+
residual_img= np.zeros(img.shape)
|
56 |
+
# residual_img = np.array(img);
|
57 |
+
# fil = np.array([[-1,3,-3,1]])
|
58 |
+
# residual_img = cv2.filter2D(residual_img, -1, fil)
|
59 |
+
for i in range(img.shape[0]):
|
60 |
+
for j in range(img.shape[1]):
|
61 |
+
residual_img[i, j] = - 3*img[i, j];
|
62 |
+
if(j>0):
|
63 |
+
residual_img[i, j] += img[i, j-1]
|
64 |
+
if(j+1<img.shape[1]):
|
65 |
+
residual_img[i, j] += 3*img[i, j+1]
|
66 |
+
if(j+2<img.shape[1]):
|
67 |
+
residual_img[i,j]-= img[i, j+2]
|
68 |
+
|
69 |
+
return residual_img
|
70 |
+
|
71 |
+
def calc_q_t_img(img, q, t):
|
72 |
+
qt_img = np.zeros(img.shape)
|
73 |
+
dct = {}
|
74 |
+
for i in range(img.shape[0]):
|
75 |
+
for j in range(img.shape[1]):
|
76 |
+
val = np.minimum(t, np.maximum(-t, np.round(img[i, j]/q)))
|
77 |
+
dct[val] = dct.get(val,0)+1
|
78 |
+
qt_img[i, j] = val
|
79 |
+
# print(dct)
|
80 |
+
return qt_img
|
81 |
+
|
82 |
+
def process_frame(frame_and_index):
|
83 |
+
frame, index = frame_and_index
|
84 |
+
# processing logic for a single frame
|
85 |
+
# logging.info(f"Processing frame {index}")
|
86 |
+
filtered_image = calc_filtered_img(frame)
|
87 |
+
output_image = calc_q_t_img(filtered_image, q, t)
|
88 |
+
output_image=output_image+2
|
89 |
+
# plt.imshow(output_image)
|
90 |
+
return output_image.astype(np.uint8)
|
91 |
+
|
92 |
+
|
93 |
+
# Center the filtered image at zero by adding 128
|
94 |
+
q = 3
|
95 |
+
t = 2
|
96 |
+
def process_video(frames_with_index):
|
97 |
+
num_processes = multiprocessing.cpu_count()
|
98 |
+
logging.info(f"Using {num_processes} processes")
|
99 |
+
pool = multiprocessing.Pool(num_processes)
|
100 |
+
# process the frames in parallel
|
101 |
+
processed_frames = pool.map(process_frame, frames_with_index)
|
102 |
+
pool.close()
|
103 |
+
pool.join()
|
104 |
+
processed_frame_with_index = [(frame, i) for i, frame in enumerate(processed_frames)]
|
105 |
+
return processed_frame_with_index
|
106 |
+
|
107 |
+
co_occurrence_matrix_size = 5
|
108 |
+
co_occurrence_matrix_distance = 4
|
109 |
+
def each_frame(frame_and_index,processed_frames):
|
110 |
+
# go rowise and column wise
|
111 |
+
frame,index = frame_and_index
|
112 |
+
freq_dict = {}
|
113 |
+
for i in range( frame.shape[0]):
|
114 |
+
for j in range( frame.shape[1]-co_occurrence_matrix_distance):
|
115 |
+
row = frame[i]
|
116 |
+
v1 = row[j:j+4]
|
117 |
+
k1 = tuple(v1)
|
118 |
+
freq_dict[k1]=freq_dict.get(k1,0)+1
|
119 |
+
freq_dict2={}
|
120 |
+
for i in range( frame.shape[0]-co_occurrence_matrix_distance):
|
121 |
+
for j in range( frame.shape[1]):
|
122 |
+
column = frame[:, j]
|
123 |
+
v2 = column[i:i+4]
|
124 |
+
k2 = tuple(v2)
|
125 |
+
freq_dict2[k2]=freq_dict2.get(k2,0)+1
|
126 |
+
freq_dict3={}
|
127 |
+
for i in range( frame.shape[0]):
|
128 |
+
for j in range( frame.shape[1]):
|
129 |
+
# get next possible 4 frames
|
130 |
+
if index < len(processed_frames)-3:
|
131 |
+
f1 = processed_frames[index+1][i,j]
|
132 |
+
f2 = processed_frames[index+2][i,j]
|
133 |
+
f3 = processed_frames[index+3][i,j]
|
134 |
+
k = (frame[i,j], f1, f2, f3)
|
135 |
+
freq_dict3[k]=freq_dict3.get(k,0)+1
|
136 |
+
logging.info(f"hist made for frame {index}")
|
137 |
+
return (freq_dict,freq_dict2,freq_dict3)
|
138 |
+
|
139 |
+
def extract_video(processed_frame_with_index):
|
140 |
+
processed_frames = [frame for frame, index in processed_frame_with_index]
|
141 |
+
num_processes = multiprocessing.cpu_count()
|
142 |
+
logging.info(f"Using2 {num_processes} processes")
|
143 |
+
pool = multiprocessing.Pool(num_processes)
|
144 |
+
# process the frames in parallel
|
145 |
+
freq_dict_list = pool.starmap(each_frame, zip(processed_frame_with_index,itertools.repeat(processed_frames)))
|
146 |
+
pool.close()
|
147 |
+
pool.join()
|
148 |
+
return freq_dict_list
|
149 |
+
def final(freq_dict_list):
|
150 |
+
descriptors = []
|
151 |
+
for freq_dicts in freq_dict_list:
|
152 |
+
di1=[]
|
153 |
+
for freq_dict in freq_dicts:
|
154 |
+
frame = np.zeros(325);
|
155 |
+
for(k,v) in freq_dict.items():
|
156 |
+
frame[dt[k]]+=v
|
157 |
+
di1.append(frame);
|
158 |
+
descriptors.append(di1)
|
159 |
+
descriptors=np.array(descriptors);
|
160 |
+
desc_1d = descriptors.reshape(descriptors.shape[0],-1)
|
161 |
+
mean_1d = np.mean(desc_1d,axis=0)
|
162 |
+
co_variance_1d = np.zeros((1,1))
|
163 |
+
for frame in desc_1d:
|
164 |
+
mean_1d+=frame
|
165 |
+
mean_1d=frame/len(desc_1d)
|
166 |
+
|
167 |
+
for frame in desc_1d:
|
168 |
+
tmp = frame-mean_1d
|
169 |
+
co_variance_1d+=np.matmul(tmp,tmp.T)
|
170 |
+
co_variance_1d=co_variance_1d/len(desc_1d)
|
171 |
+
|
172 |
+
mean = np.zeros(descriptors[0].shape)
|
173 |
+
co_variance = np.zeros((3,3))
|
174 |
+
for frame in descriptors:
|
175 |
+
mean+=frame
|
176 |
+
mean=frame/len(descriptors)
|
177 |
+
|
178 |
+
# print(mean)
|
179 |
+
for frame in descriptors:
|
180 |
+
tmp=frame-mean
|
181 |
+
tc=np.matmul(tmp,tmp.T)
|
182 |
+
co_variance+=tc
|
183 |
+
|
184 |
+
co_variance=co_variance/len(descriptors)
|
185 |
+
return (mean,co_variance,descriptors,mean_1d,co_variance_1d,desc_1d)
|
186 |
+
|
187 |
+
def final_main(input1,input2):
|
188 |
+
f1 = read_file(input1)
|
189 |
+
of1 = read_file(input2)
|
190 |
+
pf1 = process_video(f1)
|
191 |
+
pof1=process_video(of1)
|
192 |
+
fd1 = extract_video(pf1)
|
193 |
+
ofd1 = extract_video(pof1)
|
194 |
+
mean1,co_variance1,disc1,mean_1d_1,co_variance_1d_1,desc_1d_1=final(fd1)
|
195 |
+
mean2,co_variance2,disc2,mean_1d_2,co_variance_1d_2,desc_1d_2=final(ofd1)
|
196 |
+
distances = []
|
197 |
+
for index,disc in enumerate(disc1):
|
198 |
+
gm = disc - mean2
|
199 |
+
dm = np.matmul(np.matmul(gm.T,np.linalg.inv(co_variance2)),gm)
|
200 |
+
dm_sq = np.sqrt(np.abs(dm))
|
201 |
+
distances.append(dm_sq)
|
202 |
+
|
203 |
+
distances = np.array(distances)
|
204 |
+
|
205 |
+
dist2 = []
|
206 |
+
for index, disc in enumerate(disc2):
|
207 |
+
gm = disc - mean2
|
208 |
+
dm = np.matmul(np.matmul(gm.T,np.linalg.inv(co_variance2)),gm)
|
209 |
+
dm_sq = np.sqrt(np.abs(dm))
|
210 |
+
dist2.append(dm_sq)
|
211 |
+
|
212 |
+
dist2 = np.array(dist2)
|
213 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
214 |
+
height =f1[0][0].shape[0]+of1[0][0].shape[0]
|
215 |
+
width = 325+f1[0][0].shape[1]
|
216 |
+
video = cv2.VideoWriter('video.mp4', fourcc, 30, (width,height))
|
217 |
+
inital_diff,final_diff = 10000,-1
|
218 |
+
result = ''
|
219 |
+
|
220 |
+
for index, dist in enumerate(distances):
|
221 |
+
heatmap = dist;
|
222 |
+
frame,index = f1[index]
|
223 |
+
different = False
|
224 |
+
if index<len(of1):
|
225 |
+
frame2 = of1[index][0]
|
226 |
+
diff = dist - dist2[index]
|
227 |
+
if not np.allclose(diff, np.zeros(diff.shape)):
|
228 |
+
different = True
|
229 |
+
inital_diff = min(inital_diff, index)
|
230 |
+
final_diff = max(final_diff, index)
|
231 |
+
sum1= np.sum(dist)
|
232 |
+
sum2 = np.sum(dist2[index])
|
233 |
+
|
234 |
+
new_im = Image.new('RGB', (width, height))
|
235 |
+
new_im.paste(Image.fromarray(frame), (0, 0))
|
236 |
+
new_im.paste(Image.fromarray(frame2), (0, frame.shape[0]))
|
237 |
+
heatmapshow = None
|
238 |
+
heatmapshow = cv2.normalize(heatmap, heatmapshow, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
|
239 |
+
heatmapshow = cv2.applyColorMap(heatmapshow, cv2.COLORMAP_JET)
|
240 |
+
new_im.paste(Image.fromarray(heatmapshow), (frame.shape[1], 0))
|
241 |
+
|
242 |
+
draw = ImageDraw.Draw(new_im)
|
243 |
+
text = "The images are same."
|
244 |
+
if different:
|
245 |
+
text = "The images are different."
|
246 |
+
text_width, text_height = draw.textsize(text)
|
247 |
+
|
248 |
+
x = (new_im.width - text_width) / 2
|
249 |
+
y = (new_im.height - text_height) / 2
|
250 |
+
|
251 |
+
draw.text((x, y), text, fill=(255, 255, 255))
|
252 |
+
|
253 |
+
new_im = np.array(new_im)
|
254 |
+
video.write(new_im)
|
255 |
+
outputString = ""
|
256 |
+
if inital_diff != 10000:
|
257 |
+
outputString+=f"Initial difference at frame {inital_diff} at time {inital_diff/fps} seconds"
|
258 |
+
outputString+=f"Final difference at frame {final_diff} at time {final_diff/fps} seconds"
|
259 |
+
video.release()
|
260 |
+
return ("video.mp4",outputString)
|