|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
|
|
|
|
|
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("nebiyu29/fintunned-v2-roberta_GA") |
|
model = AutoModelForSequenceClassification.from_pretrained("nebiyu29/fintunned-v2-roberta_GA") |
|
|
|
|
|
def classify_text(text): |
|
""" |
|
This function preprocesses, feeds text to the model, and outputs the predicted class. |
|
""" |
|
inputs = tokenizer(text, padding=True, truncation=True, return_tensors="pt") |
|
outputs = model(**inputs) |
|
logits = outputs.logits |
|
predictions = torch.argmax(logits, dim=-1) |
|
return model.config.id2label[predictions.item()] |
|
|
|
interface = gr.Interface( |
|
fn=classify_text, |
|
inputs="text", |
|
outputs="text", |
|
title="Text Classification Demo", |
|
description="Enter some text, and the model will classify it.", |
|
choices=["positive", "negative", "neutral"] |
|
) |
|
|
|
interface.launch() |
|
|