Spaces:
Sleeping
Sleeping
Prathmesh Patil
commited on
Delete code.py
Browse files
code.py
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
#importing important libraries
|
2 |
-
import numpy as np
|
3 |
-
import keras
|
4 |
-
from keras.applications.vgg16 import VGG16, preprocess_input
|
5 |
-
from keras.layers import Flatten, Dense
|
6 |
-
from keras.models import Model
|
7 |
-
import cv2
|
8 |
-
import os
|
9 |
-
import numpy as np
|
10 |
-
import tensorflow as tf
|
11 |
-
|
12 |
-
from keras.models import Sequential
|
13 |
-
from keras.preprocessing import image
|
14 |
-
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
|
15 |
-
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
16 |
-
|
17 |
-
# Load the pre-trained VGG16 model
|
18 |
-
base_model = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
|
19 |
-
|
20 |
-
# Freeze the base model layers
|
21 |
-
for layer in base_model.layers:
|
22 |
-
layer.trainable = False
|
23 |
-
|
24 |
-
# Add custom layers for face classification
|
25 |
-
x = base_model.output
|
26 |
-
x = Flatten()(x)
|
27 |
-
x = Dense(1024, activation='relu')(x)
|
28 |
-
predictions = Dense(1, activation='sigmoid')(x)
|
29 |
-
|
30 |
-
# Create the final model
|
31 |
-
model = Model(inputs=base_model.input, outputs=predictions)
|
32 |
-
|
33 |
-
# Compile the model
|
34 |
-
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
|
35 |
-
|
36 |
-
# Define data generators for training and validation
|
37 |
-
data_generator = ImageDataGenerator(preprocessing_function=preprocess_input)
|
38 |
-
|
39 |
-
train_data = data_generator.flow_from_directory(
|
40 |
-
'img_for_deepfake_detection/train',
|
41 |
-
target_size=(224, 224),
|
42 |
-
batch_size=32,
|
43 |
-
class_mode='binary',
|
44 |
-
# Number of workers for parallel data loading
|
45 |
-
)
|
46 |
-
|
47 |
-
valid_data = data_generator.flow_from_directory(
|
48 |
-
'img_for_deepfake_detection/valid',
|
49 |
-
target_size=(224, 224),
|
50 |
-
batch_size=32,
|
51 |
-
class_mode='binary',
|
52 |
-
# Number of workers for parallel data loading
|
53 |
-
)
|
54 |
-
|
55 |
-
# Train the model
|
56 |
-
model.fit(train_data, epochs=10, validation_data=valid_data)
|
57 |
-
|
58 |
-
# Evaluate the model on the validation data
|
59 |
-
loss, accuracy = model.evaluate(valid_data)
|
60 |
-
print(f'Validation Accuracy: {accuracy*100:.2f}%')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|