devrim commited on
Commit
6582f53
·
1 Parent(s): b81be6f

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -53,3 +53,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ cultural_discourse_subject/train.json filter=lfs diff=lfs merge=lfs -text
57
+ cultural_discourse_type/train.json filter=lfs diff=lfs merge=lfs -text
cultural_discourse_subject/test.json ADDED
The diff for this file is too large to render. See raw diff
 
cultural_discourse_subject/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad7c120c12b77bdd93ec94678a35f72622570ef6706a26e7e15750f45ef582dd
3
+ size 20279680
cultural_discourse_subject/val.json ADDED
The diff for this file is too large to render. See raw diff
 
cultural_discourse_type/test.json ADDED
The diff for this file is too large to render. See raw diff
 
cultural_discourse_type/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53e27ce1c4b6dd3f03c881db6ea0214fb37365d55c014218a028b156f694fca8
3
+ size 20279498
cultural_discourse_type/val.json ADDED
The diff for this file is too large to render. See raw diff
 
literary_text_type/test.json ADDED
The diff for this file is too large to render. See raw diff
 
literary_text_type/train.json ADDED
The diff for this file is too large to render. See raw diff
 
literary_text_type/val.json ADDED
The diff for this file is too large to render. See raw diff
 
russian_second_level.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Russian Literary Dataset from late 19th century up to early 20th century."""
2
+
3
+ import json
4
+ import os
5
+ import warnings
6
+ from typing import Dict, List, Tuple
7
+
8
+ import datasets
9
+ import numpy as np
10
+ from transformers import PreTrainedTokenizerBase
11
+
12
+ _DESCRIPTION = """Second level categorization of Russian articles."""
13
+
14
+ _HOMEPAGE = ""
15
+
16
+ _LICENSE = ""
17
+
18
+ _NAMES = [
19
+ "cultural_discourse_subject",
20
+ "cultural_discourse_type",
21
+ "literary_text_type",
22
+ ]
23
+
24
+ # Label information as `(num_labels, is_multi_label)` tuples
25
+ _LABELS: Dict[str, Tuple[int, bool]] = {
26
+ "cultural_discourse_subject": (7, True),
27
+ "cultural_discourse_type": (7, True),
28
+ "literary_text_type": (5, False),
29
+ }
30
+
31
+
32
+ def generate_urls(name: str) -> Dict[str, str]:
33
+ return {
34
+ "train": os.path.join(name, "train.json"),
35
+ "val": os.path.join(name, "val.json"),
36
+ "test": os.path.join(name, "test.json"),
37
+ }
38
+
39
+
40
+ class NonwestlitSecondLevelConfig(datasets.BuilderConfig):
41
+ """BuilderConfig for Dataset."""
42
+
43
+ def __init__(
44
+ self, tokenizer: PreTrainedTokenizerBase = None, max_sequence_length: int = None, **kwargs
45
+ ):
46
+ """BuilderConfig for Dataset.
47
+
48
+ Args:
49
+ **kwargs: keyword arguments forwarded to super.
50
+ """
51
+ super(NonwestlitSecondLevelConfig, self).__init__(**kwargs)
52
+ self.tokenizer = tokenizer
53
+ self.max_sequence_length = max_sequence_length
54
+
55
+ @property
56
+ def features(self):
57
+ if self.name == "literary_text_type":
58
+ labels = datasets.Value("uint8")
59
+ else:
60
+ labels = datasets.Sequence(datasets.Value("uint8"))
61
+ return {
62
+ "labels": labels,
63
+ "input_ids": datasets.Value("string"),
64
+ "title": datasets.Value("string"),
65
+ "iid": datasets.Value("uint32"),
66
+ "chunk_id": datasets.Value("uint32"),
67
+ }
68
+
69
+
70
+ class NonwestlitSecondLevelDataset(datasets.GeneratorBasedBuilder):
71
+ VERSION = datasets.Version("0.0.1")
72
+
73
+ BUILDER_CONFIGS = [
74
+ NonwestlitSecondLevelConfig(name=name, version=version, description=name)
75
+ for name, version in zip(_NAMES, [VERSION] * len(_NAMES))
76
+ ]
77
+ BUILDER_CONFIG_CLASS = NonwestlitSecondLevelConfig
78
+ __current_id = 1
79
+ __current_chunk_id = 1
80
+
81
+ @property
82
+ def __next_id(self):
83
+ cid = self.__current_id
84
+ self.__current_id += 1
85
+ return cid
86
+
87
+ @property
88
+ def __next_chunk_id(self):
89
+ cid = self.__current_chunk_id
90
+ self.__current_chunk_id += 1
91
+ return cid
92
+
93
+ @property
94
+ def label_info(self) -> Tuple[int, bool]:
95
+ return _LABELS[self.config.name]
96
+
97
+ def __reset_chunk_id(self):
98
+ self.__current_chunk_id = 1
99
+
100
+ def _info(self):
101
+ if self.config.tokenizer is None:
102
+ raise RuntimeError(
103
+ "For HF Datasets and for chunking to be carried out, 'tokenizer' must be given."
104
+ )
105
+ if "llama" in self.config.tokenizer.name_or_path:
106
+ warnings.warn(
107
+ "It is suggested to pass 'max_sequence_length' argument for Llama-2 model family. There "
108
+ "might be errors for the data processing parts as `model_max_len` attributes are set to"
109
+ "MAX_INT64 (?)."
110
+ )
111
+ return datasets.DatasetInfo(
112
+ description=_DESCRIPTION,
113
+ features=datasets.Features(self.config.features),
114
+ )
115
+
116
+ def _split_generators(self, dl_manager):
117
+ urls = generate_urls(self.config.name)
118
+ data_dir = dl_manager.download_and_extract(urls)
119
+ return [
120
+ datasets.SplitGenerator(
121
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir["train"]}
122
+ ),
123
+ datasets.SplitGenerator(
124
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_dir["val"]}
125
+ ),
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir["test"]}
128
+ ),
129
+ ]
130
+
131
+ def prepare_articles(self, article: str) -> List[str]:
132
+ tokenizer = self.config.tokenizer
133
+ model_inputs = tokenizer(
134
+ article,
135
+ truncation=True,
136
+ padding=True,
137
+ max_length=self.config.max_sequence_length,
138
+ return_overflowing_tokens=True,
139
+ )
140
+ return tokenizer.batch_decode(model_inputs["input_ids"], skip_special_tokens=True)
141
+
142
+ def _to_one_hot(self, labels: List[int], num_labels: int) -> List[int]:
143
+ x = np.zeros(num_labels, dtype=np.float16)
144
+ x[labels] = 1.0
145
+ return x.tolist()
146
+
147
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
148
+ def _generate_examples(self, filepath: str):
149
+ with open(filepath, encoding="utf-8") as f:
150
+ dataset = json.load(f)
151
+
152
+ num_labels, multi_label = self.label_info
153
+ chunk_id = 0
154
+ for instance in dataset:
155
+ iid = instance.get("id", self.__next_id)
156
+ label = instance.get("label")
157
+ if label is None:
158
+ if not multi_label:
159
+ continue
160
+ else:
161
+ label = self._to_one_hot(labels=[], num_labels=num_labels)
162
+ elif isinstance(label, int):
163
+ label = int(label) - 1
164
+ elif isinstance(label, str):
165
+ if multi_label:
166
+ label = [int(l) - 1 for l in label.split(",")]
167
+ label = self._to_one_hot(label, num_labels)
168
+ else:
169
+ label = int(label) - 1
170
+
171
+ article = self.prepare_articles(instance["article"])
172
+ self.__reset_chunk_id()
173
+ for chunk in article:
174
+ chunk_inputs = {
175
+ "iid": iid,
176
+ "chunk_id": self.__next_chunk_id,
177
+ "title": instance["title"],
178
+ "input_ids": chunk,
179
+ "labels": label,
180
+ }
181
+ yield chunk_id, chunk_inputs
182
+ chunk_id += 1