sefaria / scrape_script.py
Tomer Sagi
adding language checks and cleaning rules
3b71369
raw
history blame
2.76 kB
import os
import sys
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import json
import re
from langdetect import detect
def traverse_directory(root_path, callback):
for dirpath, _, filenames in os.walk(root_path):
for filename in filenames:
file_path = os.path.join(dirpath, filename)
callback(file_path)
def process_file(file_path):
if not file_path.endswith(".txt"):
return
with open(file_path, "r", encoding="utf-8") as file:
content = file.read()
dirname = os.path.dirname(file_path)
dir_name = os.path.basename(dirname)
top_level_directory = os.path.relpath(dirname, root_directory).split(os.sep)[0]
if dir_name.lower() == "english":
append_to_parquet(content, file_path, "en", top_level_directory)
elif dir_name.lower() == "hebrew":
append_to_parquet(content, file_path, "he", top_level_directory)
def append_to_parquet(content, file_path, lang, top_level_directory):
data_dir = "data"
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if lang == "en":
parquet_file = os.path.join(data_dir, f"train_{top_level_directory}_english.parquet")
elif lang == "he":
parquet_file = os.path.join(data_dir, f"train_{top_level_directory}_hebrew.parquet")
else:
return
# Check if the content is in French or Spanish
detected_lang = detect(content)
if detected_lang == 'fr' or detected_lang == 'es':
return
# Apply cleaning rules
content = re.sub(r'<span[^>]*>|</span>', '', content) # Remove HTML spans
# Remove chapter markers
chapter_markers = ['Chapter', 'Halakhah']
for marker in chapter_markers:
content = re.sub(rf'^{marker} \d+$', '', content, flags=re.MULTILINE)
metadata = {"file": file_path}
meta_json = json.dumps(metadata)
data = pd.DataFrame({"meta": [meta_json], "text": [content]})
table = pa.Table.from_pandas(data)
if not os.path.exists(parquet_file) or os.path.getsize(parquet_file) == 0:
with pq.ParquetWriter(parquet_file, table.schema, compression="snappy") as writer:
writer.write_table(table)
else:
pf = pq.ParquetFile(parquet_file)
old_table = pf.read()
combined_table = pa.concat_tables([old_table, table])
with pq.ParquetWriter(parquet_file, combined_table.schema, compression="snappy") as writer:
writer.write_table(combined_table)
print(f"Successfully saved: {file_path}")
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: python script.py <root_directory_path>")
sys.exit(1)
root_directory = sys.argv[1]
traverse_directory(root_directory, process_file)