File size: 708 Bytes
333eb3a
d76b25e
 
 
333eb3a
 
d76b25e
333eb3a
 
 
 
d76b25e
333eb3a
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
from transformers import TFAutoModel, AutoTokenizer
from huggingface_hub import from_pretrained_keras

model = from_pretrained_keras("okeowo1014/kerascatanddog")

# Load the model from the Hugging Face Model Hub
# model = TFAutoModel.from_pretrained('okeowo1014/catsandogs')

# Now you can use the loaded model for inference or fine-tuning
# Example prediction
new_texts = ["I'm feeling great!", "This book is boring."]
tokenizer=tokenizer = AutoTokenizer.from_pretrained('okeowo1014/kerascatanddog')
sequences = tokenizer.texts_to_sequences(new_texts)
padded_sequences = pad_sequences(sequences, maxlen=10, padding='post', truncating='post')
predictions = model.predict(padded_sequences)
print(predictions)