import os import pandas as pd import datasets _CITATION = """\ @misc{shmidman2022introducing, title={Introducing BEREL: BERT Embeddings for Rabbinic-Encoded Language}, author={Avi Shmidman and Joshua Guedalia and Shaltiel Shmidman and Cheyn Shmuel Shmidman and Eli Handel and Moshe Koppel}, year={2022}, eprint={2208.01875}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ _DESCRIPTION = """\ This Dataset is a Hugging Face Reflection of the Sefaria Database. The following is the description of Sefaria from the sefaria.org website: Sefaria is home to 3,000 years of Jewish texts. We are a non-profit organization offering free access to texts, translations, and commentaries so that everyone can participate in the ongoing process of studying, interpreting, and creating Torah """ _HOMEPAGE = "https://github.com/Sefaria/Sefaria-Export" _LICENSE = "Each text is licensed separately, so there is no overall license for this repository." class Sefaria(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.0.2") BUILDER_CONFIGS = [ datasets.BuilderConfig(name="english", version=VERSION, description="This configuration contains only English texts"), datasets.BuilderConfig(name="hebrew", version=VERSION, description="This configuration contains only Hebrew texts"), datasets.BuilderConfig(name="all", version=VERSION, description="This configuration contains both English and Hebrew texts"), ] DEFAULT_CONFIG_NAME = "all" def _info(self): features = datasets.Features( { "text": datasets.Value("string"), "meta": datasets.Value("string") } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): data_dir = "data" if self.config.name == "english": filepaths = [os.path.join(data_dir, f) for f in os.listdir(data_dir) if f.endswith("_english.parquet")] elif self.config.name == "hebrew": filepaths = [os.path.join(data_dir, f) for f in os.listdir(data_dir) if f.endswith("_hebrew.parquet")] else: filepaths = [os.path.join(data_dir, f) for f in os.listdir(data_dir) if f.endswith(".parquet")] print(f"Filepaths found: {filepaths}") # Add this line to print the filepaths return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepaths": filepaths, }, ), ] def _generate_examples(self, filepaths): for filepath in filepaths: df = pd.read_parquet(filepath) print(f"Reading file: {filepath}, number of rows: {len(df)}") # Add this line to print the number of row for idx, row in df.iterrows(): yield idx, { "text": row["text"], "meta": row["meta"], }