Spaces:
Sleeping
Sleeping
File size: 1,361 Bytes
3863601 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
import tensorflow as tf
from tensorflow.keras.layers import Dense, Embedding, GlobalAveragePooling1D
from tensorflow.keras.models import Sequential
from transformers import AutoTokenizer, TFAutoModelForSequenceClassification, pipeline
# Sample data for sentiment analysis
texts = ["I love deep learning!", "I hate Mondays.", "This movie is fantastic.", "The weather is terrible."]
labels = [1, 0, 1, 0] # 1 for positive sentiment, 0 for negative sentiment
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
model = TFAutoModelForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2)
# Tokenize the texts
inputs = tokenizer(texts, padding=True, truncation=True, return_tensors='tf')
# Compile the model
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
# Train the model
model.fit(inputs, labels, epochs=3, batch_size=2)
# Save the model to Hugging Face Model Hub
model.save_pretrained("./my-text-classifier")
# Load the saved model from disk
loaded_model = TFAutoModelForSequenceClassification.from_pretrained("./my-text-classifier")
# Use the loaded model for prediction
classifier = pipeline('text-classification', model=loaded_model, tokenizer=tokenizer)
result = classifier("I'm feeling great!")
print(result)
|