nebiyu29 commited on
Commit
3711811
1 Parent(s): 082d447

the model classifier now returns probability distribution over all the labels

Browse files
Files changed (1) hide show
  1. app.py +7 -3
app.py CHANGED
@@ -1,6 +1,7 @@
1
  from transformers import AutoModelForSequenceClassification,AutoTokenizer
2
- import gradio as gr
3
  import torch
 
4
 
5
  model_name="nebiyu29/hate_classifier"
6
  tokenizer=AutoTokenizer.from_pretrained(model_name)
@@ -15,8 +16,11 @@ def model_classifier(text):
15
  return f"the input text is {text}"
16
  else:
17
  encoded_input=tokenizer(text) #this is where the encoding happens
18
- scores=model(encoded)[0] #this is is the score for rach values
19
- return scores
 
 
 
20
 
21
 
22
 
 
1
  from transformers import AutoModelForSequenceClassification,AutoTokenizer
2
+ from torch.nn.functional import softmax
3
  import torch
4
+ import gradio as gr
5
 
6
  model_name="nebiyu29/hate_classifier"
7
  tokenizer=AutoTokenizer.from_pretrained(model_name)
 
16
  return f"the input text is {text}"
17
  else:
18
  encoded_input=tokenizer(text) #this is where the encoding happens
19
+ logits=model(**encoded) #this is the logits of the labels
20
+ probs_label=softmax(logits,dim=-1) #turning the probability distribution into normalize form
21
+ id2label=model.config.id2label
22
+ return_probs={id2label[i]:probs.item() for i,probs in enumerate(probs_label[0])}
23
+ return return_probs
24
 
25
 
26