import os import tensorflow as tf from huggingface_hub import push_to_hub_keras from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.applications import VGG16 from tensorflow.keras.layers import Flatten, Dense # Environment variable for Hugging Face token sac = os.getenv('accesstoken') # Define data paths (modify as needed) train_data_dir = 'tt' validation_data_dir = 'valid' test_data_dir = 'valid' # Set image dimensions (adjust if necessary) img_width, img_height = 224, 224 # VGG16 expects these dimensions # Data augmentation for improved generalization (optional) train_datagen = ImageDataGenerator( rescale=1./255, # Normalize pixel values shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest' ) validation_datagen = ImageDataGenerator(rescale=1./255) # Only rescale for validation # Load training and validation data train_generator = train_datagen.flow_from_directory( train_data_dir, target_size=(img_width, img_height), batch_size=32, # Adjust batch size based on GPU memory class_mode='binary' # Two classes: cat or dog ) validation_generator = validation_datagen.flow_from_directory( validation_data_dir, target_size=(img_width, img_height), batch_size=32, class_mode='binary' ) # Load pre-trained VGG16 model (without the top layers) base_model = VGG16(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3)) # Freeze the base model layers (optional - experiment with unfreezing for fine-tuning) base_model.trainable = False # Add custom layers for classification on top of the pre-trained model x = base_model.output x = Flatten()(x) predictions = Dense(1, activation='sigmoid')(x) # Sigmoid for binary classification # Create the final model model = tf.keras.Model(inputs=base_model.input, outputs=predictions) # Compile the model model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) # Train the model history = model.fit( train_generator, epochs=1, # Adjust number of epochs based on dataset size and validation performance validation_data=validation_generator ) # Evaluate the model on test data (optional) test_generator = validation_datagen.flow_from_directory( test_data_dir, target_size=(img_width, img_height), batch_size=32, class_mode='binary' ) test_loss, test_acc = model.evaluate(test_generator) print('Test accuracy:', test_acc) # Save the model for future use (optional) # Not recommended for Hugging Face Hub upload (use tf.saved_model.save()) # Export the model for Hugging Face Hub using tf.saved_model.save() # export_dir = 'saved_model' # Create a directory for the SavedModel # tf.saved_model.save(model, export_dir,) # Save model weights to local directory # model.save_pretrained("kerascatanddog") # model.savesave_format='tf' # model.save('cat_dog_classifier2.h5',save_format='h5') # new_model = tf.keras.models.load_model('cat_dog_classifier2.h5') # Show the model architecture model.summary() # # Upload the model to your Hugging Face space repository # push_to_hub_keras( # model, # model, # Point to the SavedModel directory # repo_id="okeowo1014/kerascatanddog.keras", # commit_message="cats and dog image classifier with transfer learning", # tags=["image-classifier", "data-augmentation", "class-weights"], # include_optimizer=True, # token=sac, # ) # Save the model with a valid extension (recommended: .h5) model.save('cat_dog_classifierss.keras','',save_format='tf',) # tf.keras.models.save_model(model, 'cat_dog_classifierss.keras') # Authenticate with Hugging Face (replace YOUR_TOKEN with your actual token) # from huggingface_hub import hf_hub_auth # hf_hub_auth(token=sac) # Recreate the exact same model purely from the file new_model = keras.models.load_model('cat_dog_classifierss.keras') # Push the model to Hugging Face Hub with a descriptive name model_repo = push_to_hub_keras(new_model, repo_id="okeowo1014/kerascatanddog",token=sac) # Optional organization print(f"Model pushed to Hugging Face Hub: {model_repo}")