File size: 1,359 Bytes
d3a6db8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import streamlit as st
import numpy as np
from transformers import BertTokenizer, TFBertForSequenceClassification
import torch

@st.cache(allow_output_mutation=True)
def get_model():
    tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
    model = TFBertForSequenceClassification.from_pretrained("lfernandopg/Proyecto-Transformers")
    return tokenizer,model


tokenizer,model = get_model()

user_input = st.text_area('Enter Text to Analyze')
button = st.button("Analyze")

d = {
  0 : 'Accountant',
  1 : 'Actuary',                    
  2 : 'Biologist',  
  3 : 'Chemist',
  4 : 'Civil engineer',
  5 : 'Computer programmer',
  6 : 'Data scientist',
  7 : 'Database administrator',
  8 : 'Dentist',
  9 : 'Economist',
  10 : 'Environmental engineer',
  11 : 'Financial analyst',
  12 : 'IT manager',
  13 : 'Mathematician',
  14 : 'Mechanical engineer',
  15 : 'Physician assistant',
  16 : 'Psychologist',
  17 : 'Statistician',
  18 : 'Systems analyst',
  19 : 'Technical writer ',
  20 : 'Web developer '
}


if user_input and button :
    test_sample = tokenizer([user_input], padding=True, truncation=True, max_length=512,return_tensors='pt')
    # test_sample
    output = model(**test_sample)
    st.write("Logits: ",output.logits)
    y_pred = np.argmax(output.logits.detach().numpy(),axis=1)
    st.write("Prediction: ",d[y_pred[0]])