devrim commited on
Commit
15c53c5
·
verified ·
1 Parent(s): 058b418

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -56,3 +56,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
56
  # Video files - compressed
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
56
  # Video files - compressed
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
59
+ train/partition_0.jsonl filter=lfs diff=lfs merge=lfs -text
60
+ train/partition_1.jsonl filter=lfs diff=lfs merge=lfs -text
goodwiki_long.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Devrim Cavusoglu and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Goodwiki Long Subset."""
18
+
19
+
20
+ import json
21
+
22
+ import datasets
23
+
24
+ logger = datasets.logging.get_logger(__name__)
25
+
26
+
27
+ _DESCRIPTION = """\
28
+ Dataset consisting of long wikipedia articles in markdown format.
29
+ """
30
+
31
+ _URLS = {
32
+ "train": [
33
+ "train/partition_0.jsonl",
34
+ "train/partition_1.jsonl",
35
+ ]
36
+ }
37
+
38
+
39
+ class GoodWikiLongDatasetConfig(datasets.BuilderConfig):
40
+ """BuilderConfig for Dataset."""
41
+
42
+ def __init__(self, **kwargs):
43
+ """BuilderConfig for Dataset.
44
+
45
+ Args:
46
+ **kwargs: keyword arguments forwarded to super.
47
+ """
48
+ super(GoodWikiLongDatasetConfig, self).__init__(**kwargs)
49
+
50
+ @property
51
+ def features(self):
52
+ return {
53
+ "id": datasets.Value("string"),
54
+ "url": datasets.Value("null"),
55
+ "title": datasets.Value("string"),
56
+ "text": datasets.Value("string"),
57
+ "revid": datasets.Value("string"),
58
+ "description": datasets.Value("string"),
59
+ "categories": datasets.Sequence(datasets.Value("string")),
60
+ }
61
+
62
+
63
+ class GoodWikiLongDataset(datasets.GeneratorBasedBuilder):
64
+ """WikiLongDataset Classification dataset. Version 1.0."""
65
+
66
+ BUILDER_CONFIGS = [
67
+ GoodWikiLongDatasetConfig(
68
+ version=datasets.Version("1.0.0", ""), description="Goodwiki Long Articles"
69
+ )
70
+ ]
71
+ BUILDER_CONFIG_CLASS = GoodWikiLongDatasetConfig
72
+
73
+ def _info(self):
74
+ return datasets.DatasetInfo(
75
+ description=_DESCRIPTION,
76
+ features=datasets.Features(self.config.features),
77
+ )
78
+
79
+ def _split_generators(self, dl_manager):
80
+ data_dir = dl_manager.download_and_extract(_URLS)
81
+
82
+ return [
83
+ datasets.SplitGenerator(
84
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir["train"]}
85
+ ),
86
+ ]
87
+
88
+ def _generate_examples(self, filepath):
89
+ """This function returns the examples in the raw (text) form."""
90
+ logger.info("generating examples from = %s", filepath)
91
+ if isinstance(filepath, str):
92
+ filepath = [filepath]
93
+ key = 0
94
+ for path in filepath:
95
+ with open(path, encoding="utf-8") as data:
96
+ for article_data in data:
97
+ article = json.loads(article_data)
98
+ article["id"] = article.pop("pageid")
99
+ article["text"] = "# " + article["title"] + "\n\n" + article.pop("markdown")
100
+ article["url"] = None
101
+ yield key, article
102
+ key += 1
info.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "n_instances": 17854,
3
+ "avg_num_words": 5119.19239386132,
4
+ "min_token_length": 4096,
5
+ "min_char_length": 16384,
6
+ "source_dataset": {
7
+ "path": "euirim/goodwiki",
8
+ "name": null,
9
+ "split": "train"
10
+ }
11
+ }
section_histogram.png ADDED

Git LFS Details

  • SHA256: f503c31b073f9a35f8f583ffa48ab60780034a5308ecfb5d7a9e5058bd924bce
  • Pointer size: 130 Bytes
  • Size of remote file: 71 kB
train/partition_0.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b63921983f062515b9450af548a33e7e02d16c076bb452d515ed78d22336e666
3
+ size 384000160
train/partition_1.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7723c2f44acf426d5b41f92971fc264d2d268ad2ae69457654afc88df6d9706
3
+ size 192850578