Spaces:
Sleeping
Sleeping
added app
Browse files
app.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# AUTOGENERATED! DO NOT EDIT! File to edit: ../app.ipynb.
|
2 |
+
|
3 |
+
# %% auto 0
|
4 |
+
__all__ = ['tokenizer', 'device', 'model', 'CLASS_LABELS', 'sentence', 'label', 'examples', 'intf', 'classify_sentiment']
|
5 |
+
|
6 |
+
# %% ../app.ipynb 2
|
7 |
+
import gradio as gr
|
8 |
+
import torch
|
9 |
+
from layer import Model
|
10 |
+
|
11 |
+
# %% ../app.ipynb 3
|
12 |
+
from transformers import BertTokenizerFast
|
13 |
+
|
14 |
+
tokenizer = BertTokenizerFast.from_pretrained('bert-base-cased')
|
15 |
+
|
16 |
+
# %% ../app.ipynb 4
|
17 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
18 |
+
|
19 |
+
model = torch.load('./model.pt', map_location=torch.device('cpu')).to(device)
|
20 |
+
model.eval()
|
21 |
+
|
22 |
+
# %% ../app.ipynb 5
|
23 |
+
CLASS_LABELS = ['Negative', 'Positive']
|
24 |
+
|
25 |
+
# %% ../app.ipynb 6
|
26 |
+
def classify_sentiment(sentence):
|
27 |
+
tokens = tokenizer(sentence)
|
28 |
+
pred = model(torch.tensor([tokens['input_ids']]).to(device), [len(tokens)]).item()
|
29 |
+
return dict(zip(CLASS_LABELS, [1 - pred, pred]))
|
30 |
+
|
31 |
+
|
32 |
+
# %% ../app.ipynb 7
|
33 |
+
sentence = gr.inputs.Textbox()
|
34 |
+
label = gr.outputs.Label()
|
35 |
+
examples = ['Movie is the best!', 'Worst movie ever.']
|
36 |
+
|
37 |
+
intf = gr.Interface(fn=classify_sentiment,
|
38 |
+
inputs=sentence,
|
39 |
+
outputs=label,
|
40 |
+
title='Sentiment analysis',
|
41 |
+
examples=examples)
|
42 |
+
intf.launch(inline=False)
|
layer.py
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import torch
|
3 |
+
from torch import nn
|
4 |
+
import torch.nn.functional as F
|
5 |
+
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
|
6 |
+
|
7 |
+
|
8 |
+
class DynamicLayerConfig:
|
9 |
+
"""
|
10 |
+
Arguments for nn.Embedding layer:
|
11 |
+
vocab_size - size of the vocabulary (number of unique tokens, depends on tokenizer configuration)
|
12 |
+
embed_size - the number of features to represent one token
|
13 |
+
Arguments for LSTM layer:
|
14 |
+
hidden_size β the number of features in the hidden state
|
15 |
+
proj_size β if > 0, will use LSTM with projections of corresponding size (instead of embed_size)
|
16 |
+
num_layers β number of recurrent layers
|
17 |
+
dropout β if non-zero, introduces a Dropout layer on the outputs of each LSTM layer except the last layer,
|
18 |
+
with dropout probability equal to dropout
|
19 |
+
bidirectional β if True, becomes a bidirectional LSTM
|
20 |
+
"""
|
21 |
+
def __init__(
|
22 |
+
self,
|
23 |
+
vocab_size: int,
|
24 |
+
embed_size: int,
|
25 |
+
hidden_size: int,
|
26 |
+
proj_size: int = 0,
|
27 |
+
num_layers: int = 1,
|
28 |
+
dropout: float = 0.,
|
29 |
+
bidirectional: bool = False
|
30 |
+
):
|
31 |
+
self.embed_size = embed_size
|
32 |
+
self.hidden_size = hidden_size
|
33 |
+
self.vocab_size = vocab_size
|
34 |
+
self.proj_size = proj_size
|
35 |
+
self.num_layers = num_layers
|
36 |
+
self.dropout = dropout
|
37 |
+
self.bidirectional = bidirectional
|
38 |
+
|
39 |
+
class DynamicLayerAttentionBlock(nn.Module):
|
40 |
+
def __init__(self, config):
|
41 |
+
super().__init__()
|
42 |
+
self.hidden_size = config.hidden_size
|
43 |
+
self.proj_size = config.proj_size if config.proj_size != 0 else config.embed_size
|
44 |
+
if config.bidirectional:
|
45 |
+
self.hidden_size *= 2
|
46 |
+
self.proj_size *= 2
|
47 |
+
|
48 |
+
self.W_Q = nn.Linear(self.hidden_size, self.proj_size, bias=False)
|
49 |
+
self.W_K = nn.Linear(self.hidden_size, self.proj_size, bias=False)
|
50 |
+
self.W_V = nn.Linear(self.hidden_size, self.proj_size, bias=False)
|
51 |
+
|
52 |
+
def forward(self, rnn_output):
|
53 |
+
|
54 |
+
Q = self.W_Q(rnn_output)
|
55 |
+
K = self.W_K(rnn_output)
|
56 |
+
V = self.W_V(rnn_output)
|
57 |
+
|
58 |
+
d_k = K.size(-1)
|
59 |
+
scores = torch.matmul(Q, K.transpose(1,2)) / np.sqrt(d_k)
|
60 |
+
alpha_n = F.softmax(scores, dim=-1)
|
61 |
+
context = torch.matmul(alpha_n, V)
|
62 |
+
|
63 |
+
output = context.sum(1)
|
64 |
+
|
65 |
+
return output, alpha_n
|
66 |
+
|
67 |
+
|
68 |
+
class DynamicLayer(nn.Module):
|
69 |
+
def __init__(self, config: DynamicLayerConfig):
|
70 |
+
super().__init__()
|
71 |
+
|
72 |
+
self.config = config
|
73 |
+
|
74 |
+
self.wte = nn.Embedding(self.config.vocab_size, self.config.embed_size)
|
75 |
+
self.lstm = nn.LSTM(
|
76 |
+
input_size=self.config.embed_size,
|
77 |
+
hidden_size=self.config.hidden_size,
|
78 |
+
proj_size=self.config.proj_size,
|
79 |
+
num_layers=self.config.num_layers,
|
80 |
+
dropout=self.config.dropout,
|
81 |
+
bidirectional=self.config.bidirectional,
|
82 |
+
batch_first=True,
|
83 |
+
)
|
84 |
+
self.attention = DynamicLayerAttentionBlock(self.config)
|
85 |
+
|
86 |
+
"""
|
87 |
+
Arguments:
|
88 |
+
input_ids - tensor of shape (batch_size, sequence_length). All values are in interval - [0, vocab_size).
|
89 |
+
These indices will be processed through nn.Embedding to obtain inputs_embeds of shape (batch_size, sequence_length, embed_size)
|
90 |
+
or
|
91 |
+
|
92 |
+
inputs_embeds - tensor of shape (batch_size, sequence_length, embed_size)
|
93 |
+
"""
|
94 |
+
def forward(
|
95 |
+
self,
|
96 |
+
input_ids: torch.LongTensor,
|
97 |
+
input_lens: torch.LongTensor,
|
98 |
+
) -> torch.FloatTensor:
|
99 |
+
|
100 |
+
input_embeds = self.wte(input_ids)
|
101 |
+
|
102 |
+
input_packed = pack_padded_sequence(input_embeds, input_lens, batch_first=True, enforce_sorted=False)
|
103 |
+
|
104 |
+
lstm_output, (hn, cn) = self.lstm(input_packed)
|
105 |
+
|
106 |
+
output_padded, output_lengths = pad_packed_sequence(lstm_output, batch_first=True)
|
107 |
+
|
108 |
+
output, _ = self.attention(output_padded)
|
109 |
+
return output
|
110 |
+
|
111 |
+
|
112 |
+
class Model(nn.Module):
|
113 |
+
def __init__(self, config: DynamicLayerConfig):
|
114 |
+
super().__init__()
|
115 |
+
self.proj_size = config.proj_size if config.proj_size != 0 else config.embed_size
|
116 |
+
if config.bidirectional:
|
117 |
+
self.proj_size *= 2
|
118 |
+
self.dynamic_layer = DynamicLayer(config)
|
119 |
+
self.fc = nn.Linear(self.proj_size, 1)
|
120 |
+
|
121 |
+
def forward(
|
122 |
+
self,
|
123 |
+
input_ids: torch.LongTensor,
|
124 |
+
input_lens: torch.LongTensor,
|
125 |
+
) -> torch.FloatTensor:
|
126 |
+
|
127 |
+
fixed_sized = self.dynamic_layer(input_ids, input_lens)
|
128 |
+
return torch.sigmoid(self.fc(fixed_sized))
|
129 |
+
|
model.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f6c002c780dd99e67b019c9f68eb1f12c9801bbeb0393ae8d58c77f54ed6e6ae
|
3 |
+
size 16041171
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
transformers
|