Spaces:
Sleeping
Sleeping
File size: 2,366 Bytes
416195d 257a51b dda3659 3863601 257a51b b6419e7 3863601 416195d 3863601 257a51b 3863601 257a51b 3863601 257a51b 3863601 257a51b dda3659 257a51b 3863601 257a51b 3863601 257a51b 3863601 1990fd1 a7050e0 3863601 257a51b a7050e0 598597f 3863601 257a51b 52bf79b 257a51b 416195d bce38ea 416195d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Embedding, GlobalAveragePooling1D
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from huggingface_hub import push_to_hub_keras
# Environment variable for Hugging Face token
sac = os.getenv('accesstoken')
# Sample data for sentiment analysis
texts = ["I love deep learning!", "I hate Mondays.", "This movie is fantastic.", "The weather is terrible."]
labels = np.array([1, 0, 1, 0]) # 1 for positive sentiment, 0 for negative sentiment
# Tokenize the texts
tokenizer = Tokenizer(num_words=1000, oov_token='<OOV>')
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
padded_sequences = pad_sequences(sequences, maxlen=10, padding='post', truncating='post')
# Split data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(padded_sequences, labels, test_size=0.2, random_state=42)
# Build the model
model = Sequential([
Embedding(input_dim=1000, output_dim=16),
GlobalAveragePooling1D(),
Dense(16, activation='relu'),
Dense(1, activation='sigmoid')
])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# Train the model
model.fit(X_train, y_train, epochs=5, batch_size=2)
# Evaluate the model
loss, accuracy = model.evaluate(X_test, y_test)
print(f'Accuracy: {accuracy * 100:.2f}%')
# Save the model with the correct filepath extension
model.save('my_custom_text_classifier.keras')
# tf.saved_model.save(model, "my_custom_text_classifier")
# Later, load the model and make predictions
loaded_model = tf.keras.models.load_model('my_custom_text_classifier.keras')
# loaded_model =tf.keras.layers.TFSMLayer("my_custom_text_classifier", call_endpoint="serving_default")
# Example prediction
new_texts = ["I'm feeling great!", "This book is boring."]
sequences = tokenizer.texts_to_sequences(new_texts)
padded_sequences = pad_sequences(sequences, maxlen=10, padding='post', truncating='post')
predictions = model.predict(padded_sequences)
print(predictions)
push_to_hub_keras(
model,
repo_id="okeowo1014/kerascatanddog",
commit_message="Initial commit",
token=sac
)
|