catsandogs / train.py
okeowo1014's picture
Update train.py
85dd2d2 verified
raw
history blame
3.54 kB
import os
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import VGG16
from tensorflow.keras.layers import Flatten, Dense
# from huggingface_hub import push_to_hub_keras
from huggingface_hub import hf_hub_url, push_to_hub_keras
# from huggingface_hub import upload_file
from huggingface_hub import HfApi
api = HfApi()
# Environment variable for Hugging Face token
sac = os.getenv('accesstoken')
# Define data paths (modify as needed)
train_data_dir = 'tt'
validation_data_dir = 'valid'
test_data_dir = 'valid'
# Set image dimensions (adjust if necessary)
img_width, img_height = 224, 224 # VGG16 expects these dimensions
# Data augmentation for improved generalization (optional)
train_datagen = ImageDataGenerator(
rescale=1./255, # Normalize pixel values
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest'
)
validation_datagen = ImageDataGenerator(rescale=1./255) # Only rescale for validation
# Load training and validation data
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=32, # Adjust batch size based on GPU memory
class_mode='binary' # Two classes: cat or dog
)
validation_generator = validation_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=32,
class_mode='binary'
)
# Load pre-trained VGG16 model (without the top layers)
base_model = VGG16(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3))
# Freeze the base model layers (optional - experiment with unfreezing for fine-tuning)
base_model.trainable = False
# Add custom layers for classification on top of the pre-trained model
x = base_model.output
x = Flatten()(x)
predictions = Dense(1, activation='sigmoid')(x) # Sigmoid for binary classification
# Create the final model
model = tf.keras.Model(inputs=base_model.input, outputs=predictions)
# Compile the model
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# Train the model
history = model.fit(
train_generator,
epochs=1, # Adjust number of epochs based on dataset size and validation performance
validation_data=validation_generator
)
# Evaluate the model on test data (optional)
test_generator = validation_datagen.flow_from_directory(
test_data_dir,
target_size=(img_width, img_height),
batch_size=32,
class_mode='binary'
)
test_loss, test_acc = model.evaluate(test_generator)
print('Test accuracy:', test_acc)
# Save the model for future use (optional)
# Not recommended for Hugging Face Hub upload (use tf.saved_model.save())
# Export the model for Hugging Face Hub using tf.saved_model.save()
export_dir = 'saved_model' # Create a directory for the SavedModel
tf.saved_model.save(model, export_dir)
# Save model weights to local directory
# model.save_pretrained("kerascatanddog")
# Upload the model to your Hugging Face space repository
push_to_hub_keras(
model, # model, # Point to the SavedModel directory
repo_id="okeowo1014/kerascatanddog",
commit_message="cats and dog image classifier with transfer learning",
tags=["image-classifier", "data-augmentation", "class-weights"],
include_optimizer=True,
token=sac
)
# # Upload the SavedModel directory to Hugging Face Hub
# api.upload_folder(folder_path=export_dir, repo_id="okeowo1014/catsanddogs", token=sac)