import os import pandas as pd import glob import datasets _CITATION = """\ @misc{shmidman2022introducing, title={Introducing BEREL: BERT Embeddings for Rabbinic-Encoded Language}, author={Avi Shmidman and Joshua Guedalia and Shaltiel Shmidman and Cheyn Shmuel Shmidman and Eli Handel and Moshe Koppel}, year={2022}, eprint={2208.01875}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ _DESCRIPTION = """\ This Dataset is a Hugging Face Reflection of the Sefaria Database. The following is the description of Sefaria from the sefaria.org website: Sefaria is home to 3,000 years of Jewish texts. We are a non-profit organization offering free access to texts, translations, and commentaries so that everyone can participate in the ongoing process of studying, interpreting, and creating Torah """ _HOMEPAGE = "https://github.com/Sefaria/Sefaria-Export" _LICENSE = "Each text is licensed separately, so there is no overall license for this repository." class Sefaria(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.0.3") BUILDER_CONFIGS = [ datasets.BuilderConfig(name="english", version=VERSION, description="This configuration contains only English texts"), datasets.BuilderConfig(name="hebrew", version=VERSION, description="This configuration contains only Hebrew texts"), datasets.BuilderConfig(name="all", version=VERSION, description="This configuration contains both English and Hebrew texts"), ] DEFAULT_CONFIG_NAME = "all" def _info(self): features = datasets.Features( { "text": datasets.Value("string"), "meta": datasets.Value("string") } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) data_urls = { "english": "https://huggingface.co/datasets/mehdie/sefaria/raw/main/data/*_english.parquet", "hebrew": "https://huggingface.co/datasets/mehdie/sefaria/raw/main/data/*_hebrew.parquet", "all": "https://huggingface.co/datasets/mehdie/sefaria/raw/main/data/*.parquet" } def _split_generators(self, dl_manager): # Download and extract the dataset files if self.config.name in ["english", "hebrew"]: data_dir = dl_manager.download_and_extract(data_urls[self.config.name]) else: data_dir = dl_manager.download_and_extract(data_urls["all"]) # Find the files in the extracted directory filepaths = glob.glob(os.path.join(data_dir, "*.parquet")) print(f"Filepaths found: {filepaths}") # Add this line to print the filepaths return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepaths": filepaths, }, ), ] def _generate_examples(self, filepaths): for filepath in filepaths: df = pd.read_parquet(filepath) print(f"Reading file: {filepath}, number of rows: {len(df)}") # Add this line to print the number of row for idx, row in df.iterrows(): yield idx, { "text": row["text"], "meta": row["meta"], }