SH
commited on
Commit
•
8ab590c
1
Parent(s):
ec9ee58
Upload safe .txt corpus
Browse files- .gitattributes +4 -0
- data/corpus_safe_txt_only.zip +3 -0
- fincorpus-de-10k.py +95 -0
.gitattributes
CHANGED
@@ -53,3 +53,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
56 |
+
corpus_safe.zip filter=lfs diff=lfs merge=lfs -text
|
57 |
+
corpus_safe_txt_only.zip filter=lfs diff=lfs merge=lfs -text
|
58 |
+
data/corpus_safe.zip filter=lfs diff=lfs merge=lfs -text
|
59 |
+
data/corpus_safe_txt_only.zip filter=lfs diff=lfs merge=lfs -text
|
data/corpus_safe_txt_only.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2a79b7517b4874cd85b5b0858d6e7c07324c0a589f71cb0d6b03fc452b1cf0e1
|
3 |
+
size 271752073
|
fincorpus-de-10k.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datasets
|
2 |
+
from datasets.tasks import LanguageModeling
|
3 |
+
|
4 |
+
# TODO
|
5 |
+
# - shows how to include metadata from a separate file: https://huggingface.co/datasets/SIA86/WaterFlowCountersRecognition/blob/e659c03dfc5e50dd08648b92d66b2f3f3ef560a4/WaterFlowCountersRecognition.py
|
6 |
+
# - shows how to add and use custom kwargs that we could use for globbing filenames: https://discuss.huggingface.co/t/using-config-kwargs-within-the-load-dataset/32112/3
|
7 |
+
|
8 |
+
_DATA_URL = "https://huggingface.co/datasets/anhaltai/fincorpus-de-10k/resolve/main/data/corpus_safe_txt_only.zip"
|
9 |
+
|
10 |
+
ALL_COLLECTIONS_NAME = "all"
|
11 |
+
|
12 |
+
# Top-level directories' names
|
13 |
+
CONFIG_NAMES = {
|
14 |
+
"Annual_reports",
|
15 |
+
"BBK_monthly",
|
16 |
+
"Base_prospectuses",
|
17 |
+
"Final_terms",
|
18 |
+
# "IFRS",
|
19 |
+
# "Informational_materials",
|
20 |
+
"Law",
|
21 |
+
ALL_COLLECTIONS_NAME
|
22 |
+
}
|
23 |
+
|
24 |
+
|
25 |
+
# TODO
|
26 |
+
_DESCRIPTION = """\
|
27 |
+
We introduce a predominantly German corpus comprising 12.5k PDF documents (and 10.5k extracted txt files) sourced from the financial domain. The corresponding extracted textual data encompasses more than 165 million tokens derived predominantly from German, and to a lesser extent, bilingual documents.
|
28 |
+
We provide detailed information about the document types included in the corpus, such as final terms, base prospectuses, annual reports, information materials, law documents, international financial reporting standards, and monthly reports from the Bundesbank, accompanied by comprehensive statistical analysis.
|
29 |
+
This version of the dataset excludes two collections, IFRS and Informational_materials, leaving only datasets definitely releasable with an open license.
|
30 |
+
"""
|
31 |
+
|
32 |
+
# TODO bibtex citation here
|
33 |
+
_CITATION = """ """
|
34 |
+
|
35 |
+
|
36 |
+
class FincorpusConfig(datasets.BuilderConfig):
|
37 |
+
def __init__(self, generate_sentences=False, **kwargs):
|
38 |
+
super(FincorpusConfig, self).__init__(
|
39 |
+
version=datasets.Version("1.0.0"), **kwargs
|
40 |
+
)
|
41 |
+
|
42 |
+
|
43 |
+
class Fincorpus(datasets.GeneratorBasedBuilder):
|
44 |
+
# VERSION = datasets.Version('1.0.0')
|
45 |
+
|
46 |
+
BUILDER_CONFIGS = [
|
47 |
+
FincorpusConfig(name=config_name) for config_name in CONFIG_NAMES
|
48 |
+
]
|
49 |
+
DEFAULT_CONFIG_NAME = ALL_COLLECTIONS_NAME
|
50 |
+
|
51 |
+
def _info(self):
|
52 |
+
return datasets.DatasetInfo(
|
53 |
+
description=_DESCRIPTION,
|
54 |
+
features=datasets.Features(
|
55 |
+
{
|
56 |
+
"filename": datasets.Value("string"),
|
57 |
+
"text": datasets.Value("string"),
|
58 |
+
}
|
59 |
+
),
|
60 |
+
supervised_keys=None,
|
61 |
+
# citation=_CITATION,
|
62 |
+
task_templates=[LanguageModeling(text_column="text")],
|
63 |
+
)
|
64 |
+
|
65 |
+
def _split_generators(self, dl_manager):
|
66 |
+
# config_urls = _DATA_URL[self.config.name]
|
67 |
+
config_url = _DATA_URL
|
68 |
+
arch_path = dl_manager.download(config_url)
|
69 |
+
# files_paths = dl_manager.download_and_extract(config_url)
|
70 |
+
# subdir = self.config.name
|
71 |
+
# clean_paths = [x for x in files_paths if x.startswith(subdir)]
|
72 |
+
return [
|
73 |
+
datasets.SplitGenerator(
|
74 |
+
name=datasets.Split.TRAIN,
|
75 |
+
gen_kwargs={"files": dl_manager.iter_archive(arch_path)},
|
76 |
+
),
|
77 |
+
]
|
78 |
+
|
79 |
+
def _path_belongs_to_collection(self, path: str):
|
80 |
+
subfolder_name = self.config.name
|
81 |
+
if subfolder_name == ALL_COLLECTIONS_NAME:
|
82 |
+
return True
|
83 |
+
|
84 |
+
if path.startswith("txt/" + subfolder_name):
|
85 |
+
return True
|
86 |
+
return False
|
87 |
+
|
88 |
+
def _generate_examples(self, files):
|
89 |
+
_id = 0
|
90 |
+
for path, f in files:
|
91 |
+
if not self._path_belongs_to_collection(path):
|
92 |
+
continue
|
93 |
+
text = f.read().decode("utf-8").strip()
|
94 |
+
yield _id, {"text": text, "filename": path}
|
95 |
+
_id += 1
|