Datasets:
ArXiv:
path to file changed
Browse files- legalglue.py +27 -19
legalglue.py
CHANGED
@@ -17,6 +17,7 @@
|
|
17 |
import csv
|
18 |
import json
|
19 |
import textwrap
|
|
|
20 |
|
21 |
import datasets
|
22 |
|
@@ -91,11 +92,11 @@ class LegalGlueConfig(datasets.BuilderConfig):
|
|
91 |
|
92 |
|
93 |
|
94 |
-
class
|
95 |
"""LegalGLUE: A Benchmark Dataset for Legal Language Understanding"""
|
96 |
|
97 |
BUILDER_CONFIGS = [
|
98 |
-
|
99 |
name="german_ler",
|
100 |
description=textwrap.dedent(
|
101 |
"""\
|
@@ -150,15 +151,16 @@ class LexGLUE(datasets.GeneratorBasedBuilder):
|
|
150 |
)
|
151 |
|
152 |
def _split_generators(self, dl_manager):
|
153 |
-
archive = dl_manager.download(self.config.data_url)
|
154 |
if self.config_name == "german_ler":
|
|
|
155 |
return datasets.SplitGenerator(
|
156 |
name=datasets.Split.TRAIN,
|
157 |
# These kwargs will be passed to _generate_examples
|
158 |
gen_kwargs={
|
159 |
"filepath": self.config.data_files,
|
160 |
"split": "train",
|
161 |
-
"
|
162 |
},
|
163 |
)
|
164 |
else:
|
@@ -192,23 +194,29 @@ class LexGLUE(datasets.GeneratorBasedBuilder):
|
|
192 |
),
|
193 |
]
|
194 |
|
195 |
-
def _generate_examples(self, filepath, split,
|
196 |
if self.config_name == "german_ler":
|
|
|
197 |
texts, labels = [], []
|
198 |
-
for
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
for
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
|
|
|
|
|
|
|
|
|
|
212 |
for i in enumerate(texts):
|
213 |
tokens = text[i]
|
214 |
ner_tags = labels[i]
|
|
|
17 |
import csv
|
18 |
import json
|
19 |
import textwrap
|
20 |
+
import os
|
21 |
|
22 |
import datasets
|
23 |
|
|
|
92 |
|
93 |
|
94 |
|
95 |
+
class LegalGLUE(datasets.GeneratorBasedBuilder):
|
96 |
"""LegalGLUE: A Benchmark Dataset for Legal Language Understanding"""
|
97 |
|
98 |
BUILDER_CONFIGS = [
|
99 |
+
LegalGlueConfig(
|
100 |
name="german_ler",
|
101 |
description=textwrap.dedent(
|
102 |
"""\
|
|
|
151 |
)
|
152 |
|
153 |
def _split_generators(self, dl_manager):
|
154 |
+
#archive = dl_manager.download(self.config.data_url)
|
155 |
if self.config_name == "german_ler":
|
156 |
+
archive = dl_manager.download_and_extract(self.config.data_url)
|
157 |
return datasets.SplitGenerator(
|
158 |
name=datasets.Split.TRAIN,
|
159 |
# These kwargs will be passed to _generate_examples
|
160 |
gen_kwargs={
|
161 |
"filepath": self.config.data_files,
|
162 |
"split": "train",
|
163 |
+
"archive": archive,
|
164 |
},
|
165 |
)
|
166 |
else:
|
|
|
194 |
),
|
195 |
]
|
196 |
|
197 |
+
def _generate_examples(self, filepath, split, archive):
|
198 |
if self.config_name == "german_ler":
|
199 |
+
|
200 |
texts, labels = [], []
|
201 |
+
for file in filepath:
|
202 |
+
path = os.path.join(archive,file)
|
203 |
+
with open (path, encoding="utf-8") as f:
|
204 |
+
tokens = []
|
205 |
+
tags = []
|
206 |
+
for line in f:
|
207 |
+
if line == "" or line == "\n":
|
208 |
+
if tokens:
|
209 |
+
texts.append(tokens)
|
210 |
+
labels.append(tags)
|
211 |
+
tokens = []
|
212 |
+
tags = []
|
213 |
+
else:
|
214 |
+
token, tag = line.split()
|
215 |
+
tokens.append(token)
|
216 |
+
tags.append(tag.rstrip())
|
217 |
+
texts.append(tokens)
|
218 |
+
labels.append(tags)
|
219 |
+
|
220 |
for i in enumerate(texts):
|
221 |
tokens = text[i]
|
222 |
ner_tags = labels[i]
|