tomersagi commited on
Commit
4590a93
·
1 Parent(s): 88051b7
data/Mishnah_english.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c370361d5bc54750bd0657840e8b5c71270122f09169f0fcabc0065bffd1cc7b
3
+ size 7291639
data/Mishnah_hebrew.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:477612cd11799ac1103055438a5e56fe774be5595bf5e96848678cca09645aee
3
+ size 88534468
data/Talmud_english.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14c0a6e0ac18c3efc20b6f8f2a06347f4baa101158d68d2c764a44c546a8842d
3
+ size 50222575
data/Talmud_hebrew.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:751245b158e12fb0a9c27f731df216df0d3de9f5b4161a3184ac85144dcaeb80
3
+ size 294414866
scrape_script.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import pandas as pd
4
+ import pyarrow as pa
5
+ import pyarrow.parquet as pq
6
+ import json
7
+
8
+
9
+ def traverse_directory(root_path, callback):
10
+ for dirpath, _, filenames in os.walk(root_path):
11
+ for filename in filenames:
12
+ file_path = os.path.join(dirpath, filename)
13
+ callback(file_path)
14
+
15
+
16
+ def process_file(file_path):
17
+ if not file_path.endswith(".txt"):
18
+ return
19
+
20
+ with open(file_path, "r", encoding="utf-8") as file:
21
+ content = file.read()
22
+
23
+ dirname = os.path.dirname(file_path)
24
+ dir_name = os.path.basename(dirname)
25
+ top_level_directory = os.path.relpath(dirname, root_directory).split(os.sep)[0]
26
+
27
+ if dir_name.lower() == "english":
28
+ append_to_parquet(content, file_path, "en", top_level_directory)
29
+ elif dir_name.lower() == "hebrew":
30
+ append_to_parquet(content, file_path, "he", top_level_directory)
31
+
32
+
33
+ def append_to_parquet(content, file_path, lang, top_level_directory):
34
+ data_dir = "data"
35
+ if not os.path.exists(data_dir):
36
+ os.makedirs(data_dir)
37
+
38
+ if lang == "en":
39
+ parquet_file = os.path.join(data_dir, f"{top_level_directory}_english.parquet")
40
+ elif lang == "he":
41
+ parquet_file = os.path.join(data_dir, f"{top_level_directory}_hebrew.parquet")
42
+ else:
43
+ return
44
+
45
+ metadata = {"file": file_path}
46
+ meta_json = json.dumps(metadata)
47
+
48
+ data = pd.DataFrame({"meta": [meta_json], "text": [content]})
49
+ table = pa.Table.from_pandas(data)
50
+
51
+ if not os.path.exists(parquet_file) or os.path.getsize(parquet_file) == 0:
52
+ with pq.ParquetWriter(parquet_file, table.schema, compression="snappy") as writer:
53
+ writer.write_table(table)
54
+ else:
55
+ pf = pq.ParquetFile(parquet_file)
56
+ old_table = pf.read()
57
+ combined_table = pa.concat_tables([old_table, table])
58
+
59
+ with pq.ParquetWriter(parquet_file, combined_table.schema, compression="snappy") as writer:
60
+ writer.write_table(combined_table)
61
+
62
+ print(f"Successfully saved: {file_path}")
63
+
64
+
65
+ if __name__ == "__main__":
66
+ if len(sys.argv) != 2:
67
+ print("Usage: python script.py <root_directory_path>")
68
+ sys.exit(1)
69
+
70
+ root_directory = sys.argv[1]
71
+ traverse_directory(root_directory, process_file)
72
+
sefaria.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+
4
+ import datasets
5
+
6
+ _CITATION = """\
7
+ @misc{shmidman2022introducing,
8
+ title={Introducing BEREL: BERT Embeddings for Rabbinic-Encoded Language},
9
+ author={Avi Shmidman and Joshua Guedalia and Shaltiel Shmidman and Cheyn Shmuel Shmidman and Eli Handel and Moshe Koppel},
10
+ year={2022},
11
+ eprint={2208.01875},
12
+ archivePrefix={arXiv},
13
+ primaryClass={cs.CL}
14
+ }
15
+ """
16
+
17
+ _DESCRIPTION = """\
18
+ This Dataset is a Hugging Face Reflection of the Sefaria Database.
19
+ The following is the description of Sefaria from the sefaria.org website:
20
+
21
+ Sefaria is home to 3,000 years of Jewish texts. We are a non-profit organization
22
+ offering free access to texts, translations, and commentaries so that everyone
23
+ can participate in the ongoing process of studying, interpreting, and creating Torah
24
+ """
25
+
26
+ _HOMEPAGE = "https://github.com/Sefaria/Sefaria-Export"
27
+ _LICENSE = "Each text is licensed separately, so there is no overall license for this repository."
28
+
29
+ class Sefaria(datasets.GeneratorBasedBuilder):
30
+ VERSION = datasets.Version("1.0.0")
31
+
32
+ BUILDER_CONFIGS = [
33
+ datasets.BuilderConfig(name="english", version=VERSION, description="This configuration contains only English texts"),
34
+ datasets.BuilderConfig(name="hebrew", version=VERSION, description="This configuration contains only Hebrew texts"),
35
+ datasets.BuilderConfig(name="all", version=VERSION, description="This configuration contains both English and Hebrew texts"),
36
+ ]
37
+
38
+ DEFAULT_CONFIG_NAME = "all"
39
+
40
+ def _info(self):
41
+ features = datasets.Features(
42
+ {
43
+ "text": datasets.Value("string"),
44
+ "meta": datasets.Value("string")
45
+ }
46
+ )
47
+ return datasets.DatasetInfo(
48
+ description=_DESCRIPTION,
49
+ features=features,
50
+ homepage=_HOMEPAGE,
51
+ license=_LICENSE,
52
+ citation=_CITATION,
53
+ )
54
+
55
+ def _split_generators(self, dl_manager):
56
+ data_dir = "data"
57
+
58
+ if self.config.name == "english":
59
+ filepaths = [os.path.join(data_dir, f) for f in os.listdir(data_dir) if f.endswith("_english.parquet")]
60
+ elif self.config.name == "hebrew":
61
+ filepaths = [os.path.join(data_dir, f) for f in os.listdir(data_dir) if f.endswith("_hebrew.parquet")]
62
+ else:
63
+ filepaths = [os.path.join(data_dir, f) for f in os.listdir(data_dir) if f.endswith(".parquet")]
64
+
65
+ return [
66
+ datasets.SplitGenerator(
67
+ name=datasets.Split.TRAIN,
68
+ gen_kwargs={
69
+ "filepaths": filepaths,
70
+ },
71
+ ),
72
+ ]
73
+
74
+ def _generate_examples(self, filepaths):
75
+ for filepath in filepaths:
76
+ df = pd.read_parquet(filepath)
77
+ for idx, row in df.iterrows():
78
+ yield idx, {
79
+ "text": row["text"],
80
+ "language": row["language"],
81
+ }
82
+