fix(weibo.py): fix data format
Browse files- .gitignore +2 -1
- README.md +6 -1
- data/test.parquet +2 -2
- data/train.parquet +2 -2
- data/validation.parquet +2 -2
- weibo.py +1 -6
.gitignore
CHANGED
@@ -1,2 +1,3 @@
|
|
1 |
download.py
|
2 |
-
save.py
|
|
|
|
1 |
download.py
|
2 |
+
save.py
|
3 |
+
*.bmes
|
README.md
CHANGED
@@ -68,4 +68,9 @@ datasets = load_dataset("minskiter/weibo",save_infos=True)
|
|
68 |
train,validation,test = datasets['train'],datasets['validation'],datasets['test']
|
69 |
# convert label to str
|
70 |
print(train.features['labels'].feature.int2str(0))
|
71 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
68 |
train,validation,test = datasets['train'],datasets['validation'],datasets['test']
|
69 |
# convert label to str
|
70 |
print(train.features['labels'].feature.int2str(0))
|
71 |
+
```
|
72 |
+
|
73 |
+
### CHANGE LOGS
|
74 |
+
|
75 |
+
- 21/7/2023 v1.0.2 Fix data format.
|
76 |
+
- 16/7/2023 v1.0.0 Publish weibo data.
|
data/test.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:28f647d061337083af0b3ebd61f558c8de80720db0279e1505d6eb2a3a30dd89
|
3 |
+
size 35089
|
data/train.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8a0771b61f8ef925b66f579b66f39a3a55f8465b359eb2628c5d22074391dcab
|
3 |
+
size 147678
|
data/validation.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3682731b23d269edfd18e7325f05ec6f8c4b37f3c0262684978272d3bbfcb137
|
3 |
+
size 34581
|
weibo.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
import datasets
|
2 |
from datasets.download.download_manager import DownloadManager
|
3 |
import pyarrow.parquet as pq
|
4 |
-
import json
|
5 |
|
6 |
_DESCRIPTION = """\
|
7 |
The Weibo NER dataset is a Chinese Named Entity Recognition dataset
|
@@ -33,7 +32,7 @@ _URLS = {
|
|
33 |
}
|
34 |
|
35 |
class WeiboNamedEntities(datasets.GeneratorBasedBuilder):
|
36 |
-
VERSION = datasets.Version("1.0.
|
37 |
|
38 |
def _info(self):
|
39 |
return datasets.DatasetInfo(
|
@@ -112,11 +111,7 @@ class WeiboNamedEntities(datasets.GeneratorBasedBuilder):
|
|
112 |
rows = i.to_pylist()
|
113 |
for row in rows:
|
114 |
_id+=1
|
115 |
-
# fix string
|
116 |
-
row['text'] = row['text'].replace("'", '"')
|
117 |
-
row['text'] = json.loads(row['text'])
|
118 |
yield _id, row
|
119 |
-
|
120 |
|
121 |
|
122 |
|
|
|
1 |
import datasets
|
2 |
from datasets.download.download_manager import DownloadManager
|
3 |
import pyarrow.parquet as pq
|
|
|
4 |
|
5 |
_DESCRIPTION = """\
|
6 |
The Weibo NER dataset is a Chinese Named Entity Recognition dataset
|
|
|
32 |
}
|
33 |
|
34 |
class WeiboNamedEntities(datasets.GeneratorBasedBuilder):
|
35 |
+
VERSION = datasets.Version("1.0.2")
|
36 |
|
37 |
def _info(self):
|
38 |
return datasets.DatasetInfo(
|
|
|
111 |
rows = i.to_pylist()
|
112 |
for row in rows:
|
113 |
_id+=1
|
|
|
|
|
|
|
114 |
yield _id, row
|
|
|
115 |
|
116 |
|
117 |
|