arubenruben commited on
Commit
772fc4a
1 Parent(s): 9941bd6

commit files to HF hub

Browse files
Files changed (4) hide show
  1. config.json +1 -1
  2. srl.py +3 -11
  3. tokenizer.json +1 -1
  4. tokenizer_config.json +1 -1
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "PORTULAN/albertina-100m-portuguese-ptpt-encoder",
3
  "architectures": [
4
  "DebertaForTokenClassification"
5
  ],
 
1
  {
2
+ "_name_or_path": "liaad/propbank_br_srl_albertina_100m_portuguese_ptpt_encoder",
3
  "architectures": [
4
  "DebertaForTokenClassification"
5
  ],
srl.py CHANGED
@@ -38,17 +38,7 @@ class SRLPipeline(Pipeline):
38
  current_word = word_id
39
  label = -100 if word_id is None else labels[word_id]
40
  new_labels.append(label)
41
- elif word_id is None:
42
- # Special token
43
- new_labels.append(-100)
44
  else:
45
- """
46
- # Same word as previous token
47
- label = labels[word_id]
48
- # If the label is B-XXX we change it to I-XXX
49
- if label % 2 == 1:
50
- label += 1
51
- """
52
  new_labels.append(-100)
53
 
54
  results.append(new_labels)
@@ -121,8 +111,10 @@ class SRLPipeline(Pipeline):
121
  if label != -100:
122
  true_predictions.append(self.label_names[prediction])
123
 
 
 
124
  outputs.append({
125
- "tokens": self.text.split(),
126
  "predictions": true_predictions,
127
  "verb": self.verbs[i]
128
  })
 
38
  current_word = word_id
39
  label = -100 if word_id is None else labels[word_id]
40
  new_labels.append(label)
 
 
 
41
  else:
 
 
 
 
 
 
 
42
  new_labels.append(-100)
43
 
44
  results.append(new_labels)
 
111
  if label != -100:
112
  true_predictions.append(self.label_names[prediction])
113
 
114
+ doc = self.nlp(self.text.strip())
115
+
116
  outputs.append({
117
+ "tokens": [token.text for token in doc],
118
  "predictions": true_predictions,
119
  "verb": self.verbs[i]
120
  })
tokenizer.json CHANGED
@@ -52,7 +52,7 @@
52
  "normalizer": null,
53
  "pre_tokenizer": {
54
  "type": "ByteLevel",
55
- "add_prefix_space": false,
56
  "trim_offsets": true,
57
  "use_regex": true
58
  },
 
52
  "normalizer": null,
53
  "pre_tokenizer": {
54
  "type": "ByteLevel",
55
+ "add_prefix_space": true,
56
  "trim_offsets": true,
57
  "use_regex": true
58
  },
tokenizer_config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "add_bos_token": false,
3
- "add_prefix_space": false,
4
  "added_tokens_decoder": {
5
  "0": {
6
  "content": "[PAD]",
 
1
  {
2
  "add_bos_token": false,
3
+ "add_prefix_space": true,
4
  "added_tokens_decoder": {
5
  "0": {
6
  "content": "[PAD]",