import os import tensorflow as tf from huggingface_hub import push_to_hub_keras from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.image import ImageDataGenerator # Environment variable for Hugging Face token sac = os.getenv('accesstoken') # Define hyperparameters batch_size = 60 epochs = 10 learning_rate = 0.001 num_classes = 10 # Define data generators for training and validation train_datagen = ImageDataGenerator(rescale=1. / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) val_datagen = ImageDataGenerator(rescale=1. / 255) # Define paths to training and validation data directories train_data_dir = "datasets" val_data_dir = "first_ten_files" # Load training and validation data using the generators train_generator = train_datagen.flow_from_directory( train_data_dir, target_size=(224, 224), # Adjust image size as needed batch_size=batch_size, color_mode='grayscale' # Specify grayscale mode ) val_generator = val_datagen.flow_from_directory( val_data_dir, target_size=(224, 224), batch_size=batch_size, color_mode='grayscale' ) test_generator = val_datagen.flow_from_directory( val_data_dir, target_size=(224, 224), batch_size=batch_size, color_mode='grayscale' ) # Define the CNN model model = Sequential([ Conv2D(32, (3, 3), activation='relu', input_shape=(224, 224, 1)), MaxPooling2D((2, 2)), Conv2D(64, (3, 3), activation='relu'), MaxPooling2D((2, 2)), Conv2D(128, (3, 3), activation='relu'), MaxPooling2D((2, 2)), Flatten(), Dense(256, activation='relu'), Dense(num_classes, activation='softmax') # Replace num_classes with actual number of individuals ]) # Compile the model model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate), metrics=['accuracy']) # Train the model model.fit(train_generator, epochs=epochs, validation_data=val_generator) test_loss, test_acc = model.evaluate(test_generator) print('Test accuracy:', test_acc) labels = train_generator.class_indices labels = dict((v, k) for k, v in labels.items()) print(labels) # Save the trained model model.save('fingerprint_recognition_model.keras') # Show the model architecture model.summary() push_to_hub_keras( model, repo_id="okeowo1014/fingerprintgrecognizer", commit_message="Initial commit model", token=sac, include_optimizer=True, ) print(f"Model pushed to Hugging Face Hub")