File size: 3,195 Bytes
9b6f256
8a7c0a6
 
 
 
77e83a3
d91c980
f115e07
 
 
 
8a7c0a6
 
26e8e64
8a7c0a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48ec153
8a7c0a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48ec153
 
 
 
 
 
f115e07
d91c980
48ec153
f115e07
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import os
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import VGG16
from tensorflow.keras.layers import Flatten, Dense
# from huggingface_hub import push_to_hub
from huggingface_hub import upload_file

# Environment variable for Hugging Face token
sac = os.getenv('accesstoken')


# Define data paths (modify as needed)
train_data_dir = 'train'
validation_data_dir = 'valid'
test_data_dir = 'valid'

# Set image dimensions (adjust if necessary)
img_width, img_height = 224, 224  # VGG16 expects these dimensions

# Data augmentation for improved generalization (optional)
train_datagen = ImageDataGenerator(
    rescale=1./255,  # Normalize pixel values
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    fill_mode='nearest'
)

validation_datagen = ImageDataGenerator(rescale=1./255)  # Only rescale for validation

# Load training and validation data
train_generator = train_datagen.flow_from_directory(
    train_data_dir,
    target_size=(img_width, img_height),
    batch_size=32,  # Adjust batch size based on GPU memory
    class_mode='binary'  # Two classes: cat or dog
)

validation_generator = validation_datagen.flow_from_directory(
    validation_data_dir,
    target_size=(img_width, img_height),
    batch_size=32,
    class_mode='binary'
)

# Load pre-trained VGG16 model (without the top layers)
base_model = VGG16(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3))

# Freeze the base model layers (optional - experiment with unfreezing for fine-tuning)
base_model.trainable = False

# Add custom layers for classification on top of the pre-trained model
x = base_model.output
x = Flatten()(x)
predictions = Dense(1, activation='sigmoid')(x)  # Sigmoid for binary classification

# Create the final model
model = tf.keras.Model(inputs=base_model.input, outputs=predictions)

# Compile the model
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# Train the model
history = model.fit(
    train_generator,
    epochs=1,  # Adjust number of epochs based on dataset size and validation performance
    validation_data=validation_generator
)

# Evaluate the model on test data (optional)
test_generator = validation_datagen.flow_from_directory(
    test_data_dir,
    target_size=(img_width, img_height),
    batch_size=32,
    class_mode='binary'
)

test_loss, test_acc = model.evaluate(test_generator)
print('Test accuracy:', test_acc)

# Save the model for future use (optional)
# Not recommended for Hugging Face Hub upload (use tf.saved_model.save())

# Export the model for Hugging Face Hub using tf.saved_model.save()
export_dir = 'saved_model'  # Create a directory for the SavedModel
tf.saved_model.save(model, export_dir)

# Upload the model to your Hugging Face space repository
upload_file(
    model_path=export_dir,  # Point to the SavedModel directory
    repo_id="okeowo1014/catsanddogs",
    commit_message="cats and dog image classifier with transfer learning",
    tags=["image-classifier", "data-augmentation", "class-weights"],
    include_optimizer=True,
    token=sac
)