File size: 1,962 Bytes
8ec0711
3711811
eeac5cc
3711811
ce5c0eb
a8c9879
 
8ec0711
959ecc7
a8c9879
8ec0711
2dab15b
 
 
 
8ec0711
 
 
011a88c
8439e92
 
011a88c
 
1994400
 
8439e92
 
3711811
 
 
ce5c0eb
a8c9879
 
 
 
ce5c0eb
8c2b131
 
 
c96e178
8c2b131
a8c9879
 
 
678e80d
ce5c0eb
a8c9879
 
082d447
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
from transformers import AutoModelForSequenceClassification,AutoTokenizer
from torch.nn.functional import softmax
import torch
import gradio as gr
import json

model_name="nebiyu29/hate_classifier"
tokenizer=AutoTokenizer.from_pretrained(model_name)
model=AutoModelForSequenceClassification.from_pretrained(model_name)

#this where the model is active and we need to make the gradiends in active

def model_classifier(text):
    model.eval()
    with torch.no_grad():
        if len(text)==0:
            return f"the input text is {text}"
        else:
            encoded_input=tokenizer(text,return_tensors="pt",truncation=True,padding=True,max_length=512) #this is where the encoding happens
            input_ids=encoded_input["input_ids"]
            attention_mask=encoded_input["attention_mask"]

            #turning the inputs into tensors
            inputs_ids=torch.tensor(input_ids).unsqueeze(dim=0)
            attention_mask=torch.tensor(attention_mask).unsqueeze(dim=0)
            
            logits=model(input_ids,attention_mask).logits  #this is the logits of the labels
            probs_label=softmax(logits,dim=-1)  #turning the probability distribution into normalize form
            id2label=model.config.id2label
            return_probs={id2label[i]:probs.item() for i,probs in enumerate(probs_label[0])}
            return json.dumps(list(return_probs.items()))




#lets define how the output looks like
#output_format=gr.Dataframe(row_count=(3,"dynamic"),col_count=(2,"dynamic"),label="label probabilities",headers=["label","probabilities"])

#the output looks like a json format
output_format=gr.Textbox(label="label probabilities")           

#lets write something that accepts input as text and returns the most likely out come out of 3
demo=gr.Interface(
    fn=model_classifier,
    inputs=gr.Textbox(lines=5,label="Enter you text"),
    outputs=output_format,
    title="Hate Classifier Demo App"
)
demo.launch(share=True)