File size: 4,134 Bytes
9b6f256
8a7c0a6
366474a
8a7c0a6
 
 
85dd2d2
f115e07
 
 
 
8a7c0a6
 
28339c2
8a7c0a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48ec153
8a7c0a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48ec153
 
da40e62
4e71660
 
98da3ab
4e71660
 
366474a
 
4e71660
 
366474a
d977b7f
035a85c
 
 
 
 
 
 
 
 
 
b65427f
1961dc6
035a85c
9e50616
 
1961dc6
300365a
035a85c
1961dc6
035a85c
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
import os
import tensorflow as tf
from huggingface_hub import push_to_hub_keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import VGG16
from tensorflow.keras.layers import Flatten, Dense


# Environment variable for Hugging Face token
sac = os.getenv('accesstoken')


# Define data paths (modify as needed)
train_data_dir = 'tt'
validation_data_dir = 'valid'
test_data_dir = 'valid'

# Set image dimensions (adjust if necessary)
img_width, img_height = 224, 224  # VGG16 expects these dimensions

# Data augmentation for improved generalization (optional)
train_datagen = ImageDataGenerator(
    rescale=1./255,  # Normalize pixel values
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    fill_mode='nearest'
)

validation_datagen = ImageDataGenerator(rescale=1./255)  # Only rescale for validation

# Load training and validation data
train_generator = train_datagen.flow_from_directory(
    train_data_dir,
    target_size=(img_width, img_height),
    batch_size=32,  # Adjust batch size based on GPU memory
    class_mode='binary'  # Two classes: cat or dog
)

validation_generator = validation_datagen.flow_from_directory(
    validation_data_dir,
    target_size=(img_width, img_height),
    batch_size=32,
    class_mode='binary'
)

# Load pre-trained VGG16 model (without the top layers)
base_model = VGG16(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3))

# Freeze the base model layers (optional - experiment with unfreezing for fine-tuning)
base_model.trainable = False

# Add custom layers for classification on top of the pre-trained model
x = base_model.output
x = Flatten()(x)
predictions = Dense(1, activation='sigmoid')(x)  # Sigmoid for binary classification

# Create the final model
model = tf.keras.Model(inputs=base_model.input, outputs=predictions)

# Compile the model
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# Train the model
history = model.fit(
    train_generator,
    epochs=1,  # Adjust number of epochs based on dataset size and validation performance
    validation_data=validation_generator
)

# Evaluate the model on test data (optional)
test_generator = validation_datagen.flow_from_directory(
    test_data_dir,
    target_size=(img_width, img_height),
    batch_size=32,
    class_mode='binary'
)

test_loss, test_acc = model.evaluate(test_generator)
print('Test accuracy:', test_acc)

# Save the model for future use (optional)
# Not recommended for Hugging Face Hub upload (use tf.saved_model.save())

# Export the model for Hugging Face Hub using tf.saved_model.save()
# export_dir = 'saved_model'  # Create a directory for the SavedModel
# tf.saved_model.save(model, export_dir,)
# Save model weights to local directory
# model.save_pretrained("kerascatanddog")
# model.savesave_format='tf'
# model.save('cat_dog_classifier2.h5',save_format='h5')
# new_model = tf.keras.models.load_model('cat_dog_classifier2.h5')

# Show the model architecture
model.summary()

# # Upload the model to your Hugging Face space repository
# push_to_hub_keras(
#     model, # model,  # Point to the SavedModel directory
#     repo_id="okeowo1014/kerascatanddog.keras",
#     commit_message="cats and dog image classifier with transfer learning",
#     tags=["image-classifier", "data-augmentation", "class-weights"],
#     include_optimizer=True,
#     token=sac,
# )
# Save the model with a valid extension (recommended: .h5)
model.save('cat_dog_classifierss.keras')
# tf.keras.models.save_model(model, 'cat_dog_classifierss.keras')
# Authenticate with Hugging Face (replace YOUR_TOKEN with your actual token)
# from huggingface_hub import hf_hub_auth
# hf_hub_auth(token=sac)
# Recreate the exact same model purely from the file
new_model = tf.keras.models.load_model('cat_dog_classifierss.keras')
# Push the model to Hugging Face Hub with a descriptive name
model_repo = push_to_hub_keras(new_model, repo_id="okeowo1014/kerascatanddog",token=sac)  # Optional organization

print(f"Model pushed to Hugging Face Hub: {model_repo}")