File size: 1,799 Bytes
bbdd49e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
import unittest
import os
import shutil
from scrape_script import append_to_parquet
import pandas as pd
class TestAppendToParquet(unittest.TestCase):
def setUp(self):
self.temp_dir = "temp_data"
if not os.path.exists(self.temp_dir):
os.makedirs(self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_cleaning_rules(self):
content = (
"<span>Test text with HTML tags.</span> "
"https://example.com\n"
"Chapter 1\n"
"Halakhah 1\n"
"<b>bold text</b> <strong>strong text</strong> <small>small text</small>"
)
file_path = "sample_file.txt"
lang = "en"
top_level_directory = "test"
append_to_parquet(content, file_path, lang, top_level_directory, data_dir=self.temp_dir)
parquet_file = os.path.join(self.temp_dir, f"train_{top_level_directory}_english.parquet")
self.assertTrue(os.path.exists(parquet_file))
df = pd.read_parquet(parquet_file)
self.assertEqual(len(df), 1)
cleaned_content = df.loc[0, "text"]
expected_cleaned_content = (
"Test text with HTML tags. "
"\n"
"bold text strong text small text"
)
cleaned_content_lines = [line.strip() for line in cleaned_content.split("\n") if line.strip()]
expected_cleaned_content_lines = [line.strip() for line in expected_cleaned_content.split("\n") if line.strip()]
self.assertEqual(len(cleaned_content_lines), len(expected_cleaned_content_lines))
for clean_line, expected_line in zip(cleaned_content_lines, expected_cleaned_content_lines):
self.assertEqual(clean_line, expected_line)
if __name__ == "__main__":
unittest.main()
|