Update README.md
Browse files
README.md
CHANGED
@@ -1,3 +1,106 @@
|
|
1 |
---
|
|
|
|
|
|
|
|
|
2 |
license: mit
|
|
|
|
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
tags:
|
3 |
+
- BERT
|
4 |
+
- Text Classification
|
5 |
+
language: Arabic
|
6 |
license: mit
|
7 |
+
datasets:
|
8 |
+
- ACE2005
|
9 |
---
|
10 |
+
|
11 |
+
# Arabic Relation Extraction Model
|
12 |
+
- Relation Extraction model based on [GigaBERTv4](https://huggingface.co/lanwuwei/GigaBERT-v4-Arabic-and-English).
|
13 |
+
- ACE2005 Training data: Arabic
|
14 |
+
- [Relation tags](https://www.ldc.upenn.edu/sites/www.ldc.upenn.edu/files/arabic-relations-guidelines-v6.5.pdf) including: Physical, Part-whole, Personal-Social, 'ORG-Affiliation, Agent-Artifact, Gen-Affiliation
|
15 |
+
|
16 |
+
## Hyperparameters
|
17 |
+
- learning_rate=2e-5
|
18 |
+
- num_train_epochs=10
|
19 |
+
- weight_decay=0.01
|
20 |
+
|
21 |
+
## ACE2005 Evaluation results (F1)
|
22 |
+
| Language | Arabic |
|
23 |
+
|:----:|:-----------:|
|
24 |
+
| | 89.4 |
|
25 |
+
|
26 |
+
## How to use
|
27 |
+
```python
|
28 |
+
>>> from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer, AuotoModelForSequenceClassification
|
29 |
+
|
30 |
+
>>> ner_model = AutoModelForTokenClassification.from_pretrained("ychenNLP/arabic-ner-ace-gigabert")
|
31 |
+
>>> ner_tokenizer = AutoTokenizer.from_pretrained("ychenNLP/arabic-ner-ace-gigabert")
|
32 |
+
>>> ner_pip = pipeline("ner", model=ner_model, tokenizer=ner_tokenizer, grouped_entities=True)
|
33 |
+
|
34 |
+
>>> re_model = AutoModelForSequenceClassification.from_pretrained("ychenNLP/arabic-relation-extraction-ace-gigabert")
|
35 |
+
>>> re_tokenizer = AutoTokenizer.from_pretrained("ychenNLP/arabic-relation-extraction-ace-gigabert")
|
36 |
+
>>> re_pip = pipeline("text-classification", model=re_model, tokenizer=re_tokenizer)
|
37 |
+
|
38 |
+
def process_ner_output(entity_mention, input):
|
39 |
+
re_input = []
|
40 |
+
for idx1 in range(len(entity_mention) - 1):
|
41 |
+
for idx2 in range(idx1 + 1, len(entity_mention)):
|
42 |
+
ent_1 = entity_mention[idx1]
|
43 |
+
ent_2 = entity_mention[idx2]
|
44 |
+
|
45 |
+
ent_1_type = ent_1['entity_group']
|
46 |
+
ent_2_type = ent_2['entity_group']
|
47 |
+
ent_1_s = ent_1['start']
|
48 |
+
ent_1_e = ent_1['end']
|
49 |
+
ent_2_s = ent_2['start']
|
50 |
+
ent_2_e = ent_2['end']
|
51 |
+
new_re_input = ""
|
52 |
+
for c_idx, c in enumerate(input):
|
53 |
+
if c_idx == ent_1_s:
|
54 |
+
new_re_input += "<{}>".format(ent_1_type)
|
55 |
+
elif c_idx == ent_1_e:
|
56 |
+
new_re_input += "</{}>".format(ent_1_type)
|
57 |
+
elif c_idx == ent_2_s:
|
58 |
+
new_re_input += "<{}>".format(ent_2_type)
|
59 |
+
elif c_idx == ent_2_e:
|
60 |
+
new_re_input += "</{}>".format(ent_2_type)
|
61 |
+
new_re_input += c
|
62 |
+
re_input.append({"re_input": new_re_input, "arg1": ent_1, "arg2": ent_2, "input": input})
|
63 |
+
return re_input
|
64 |
+
|
65 |
+
def post_process_re_output(re_output, re_input, ner_output):
|
66 |
+
final_output = []
|
67 |
+
for idx, out in enumerate(re_output):
|
68 |
+
if out["label"] != 'O':
|
69 |
+
tmp = re_input[idx]
|
70 |
+
tmp['relation_type'] = out
|
71 |
+
tmp.pop('re_input', None)
|
72 |
+
final_output.append(tmp)
|
73 |
+
|
74 |
+
template = {"input": re_input["input"],
|
75 |
+
"entity": ner_output,
|
76 |
+
"relation": final_output}
|
77 |
+
|
78 |
+
return template
|
79 |
+
|
80 |
+
>>> input = "Hugging face is a French company in New york."
|
81 |
+
>>> output = ner_pip(input)
|
82 |
+
|
83 |
+
>>> re_input = process_ner_output(output, input)
|
84 |
+
|
85 |
+
>>> re_output = []
|
86 |
+
>>> for idx in range(len(re_input)):
|
87 |
+
>>> tmp_re_output = re_pip(re_input[idx]["re_input"])
|
88 |
+
>>> re_output.append(tmp_re_output)
|
89 |
+
|
90 |
+
|
91 |
+
|
92 |
+
>>> re_ner_output = post_process_re_output(re_output)
|
93 |
+
>>> print("Sentence: ",re_ner_output["input"])
|
94 |
+
>>> print("Entity: ", re_ner_output["entity"])
|
95 |
+
>>> print("Relation: ", re_ner_output["relation"])
|
96 |
+
|
97 |
+
### BibTeX entry and citation info
|
98 |
+
|
99 |
+
```bibtex
|
100 |
+
@inproceedings{lan2020gigabert,
|
101 |
+
author = {Lan, Wuwei and Chen, Yang and Xu, Wei and Ritter, Alan},
|
102 |
+
title = {Giga{BERT}: Zero-shot Transfer Learning from {E}nglish to {A}rabic},
|
103 |
+
booktitle = {Proceedings of The 2020 Conference on Empirical Methods on Natural Language Processing (EMNLP)},
|
104 |
+
year = {2020}
|
105 |
+
}
|
106 |
+
```
|