feat(data): change bmes to parquet
Browse files
data/{test.char.bmes → test.parquet}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:516e640ed45b46302dbe2adf133596e52b764f5de05d8a9da603b2b4db7ce202
|
3 |
+
size 47490
|
data/{train.char.bmes → train.parquet}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c94ad632fd3f9b1c32a42c55d443cba30cd2ada3d9940684326eb1ea1abfceea
|
3 |
+
size 214937
|
data/{validation.char.bmes → validation.parquet}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2f95b23904d28860c1703fd8a3649d64a320011e5f4076a078c9680f4414cf95
|
3 |
+
size 46872
|
weibo.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import datasets
|
2 |
from datasets.download.download_manager import DownloadManager
|
|
|
3 |
|
4 |
_DESCRIPTION = """\
|
5 |
The Weibo NER dataset is a Chinese Named Entity Recognition dataset
|
@@ -25,9 +26,9 @@ _CITATION = """\
|
|
25 |
|
26 |
_URL = "https://huggingface.co/datasets/minskiter/weibo/resolve/main/"
|
27 |
_URLS = {
|
28 |
-
"train": _URL + "data/train.
|
29 |
-
"validation": _URL + "data/validation.
|
30 |
-
"test": _URL + "data/test.
|
31 |
}
|
32 |
|
33 |
class WeiboNamedEntities(datasets.GeneratorBasedBuilder):
|
@@ -103,26 +104,13 @@ class WeiboNamedEntities(datasets.GeneratorBasedBuilder):
|
|
103 |
),
|
104 |
]
|
105 |
|
106 |
-
def _default_example(self):
|
107 |
-
return {"text": [], "labels": []}
|
108 |
-
|
109 |
def _generate_examples(self, filepath):
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
continue
|
120 |
-
char, label = line.split(" ")
|
121 |
-
char = char.strip()
|
122 |
-
if char == "":
|
123 |
-
char = " "
|
124 |
-
label = label.strip()
|
125 |
-
example["text"].append(char)
|
126 |
-
example["labels"].append(label)
|
127 |
-
if len(example["text"]) > 0:
|
128 |
-
yield _id, example
|
|
|
1 |
import datasets
|
2 |
from datasets.download.download_manager import DownloadManager
|
3 |
+
import pyarrow.parquet as pq
|
4 |
|
5 |
_DESCRIPTION = """\
|
6 |
The Weibo NER dataset is a Chinese Named Entity Recognition dataset
|
|
|
26 |
|
27 |
_URL = "https://huggingface.co/datasets/minskiter/weibo/resolve/main/"
|
28 |
_URLS = {
|
29 |
+
"train": _URL + "data/train.parquet",
|
30 |
+
"validation": _URL + "data/validation.parquet",
|
31 |
+
"test": _URL + "data/test.parquet",
|
32 |
}
|
33 |
|
34 |
class WeiboNamedEntities(datasets.GeneratorBasedBuilder):
|
|
|
104 |
),
|
105 |
]
|
106 |
|
|
|
|
|
|
|
107 |
def _generate_examples(self, filepath):
|
108 |
+
file = pq.ParquetFile(filepath)
|
109 |
+
_id = -1
|
110 |
+
for i in file.iter_batches(batch_size=64):
|
111 |
+
rows = i.to_pylist()
|
112 |
+
for row in rows:
|
113 |
+
_id+=1
|
114 |
+
yield _id, row
|
115 |
+
|
116 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|