Spaces:
Sleeping
Sleeping
import tensorflow as tf | |
from tensorflow.keras.layers import Dense, Embedding, GlobalAveragePooling1D | |
from tensorflow.keras.models import Sequential | |
from transformers import AutoTokenizer, TFAutoModelForSequenceClassification, pipeline | |
# Sample data for sentiment analysis | |
texts = ["I love deep learning!", "I hate Mondays.", "This movie is fantastic.", "The weather is terrible."] | |
labels = [1, 0, 1, 0] # 1 for positive sentiment, 0 for negative sentiment | |
# Load the tokenizer and model | |
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') | |
model = TFAutoModelForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2) | |
# Tokenize the texts | |
inputs = tokenizer(texts, padding=True, truncation=True, return_tensors='tf') | |
# Compile the model | |
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) | |
# Train the model | |
model.fit(inputs, labels, epochs=3, batch_size=2) | |
# Save the model to Hugging Face Model Hub | |
model.save_pretrained("./my-text-classifier") | |
# Load the saved model from disk | |
loaded_model = TFAutoModelForSequenceClassification.from_pretrained("./my-text-classifier") | |
# Use the loaded model for prediction | |
classifier = pipeline('text-classification', model=loaded_model, tokenizer=tokenizer) | |
result = classifier("I'm feeling great!") | |
print(result) | |