from transformers import AutoModelForSequenceClassification,AutoTokenizer import gradio as gr import torch model_name="nebiyu29/hate_classifier" tokenizer=AutoTokenizer.from_pretrained(model_name) model=AutoModelForSequenceClassification.from_pretrained(model_name) #this where the model is active and we need to make the gradiends in active model.eval() with torch.no_grad(): def model_classifier(text): if len(text)==0: return f"the input text is {text}" else: encoded_input=tokenizer(text) #this is where the encoding happens scores=model(encoded) #this is is the score for rach values return scores #lets write something that accepts input as text and returns the most likely out come out of 3 demo=gr.Interface( fn=model_classifier, inputs=gr.Textbox(lines=5,label="Enter you text"), outputs=gr.Textbox(lines=5,label="Label scores"), title="Hate Classifier Demo App" ) demo.launch()