|
|
|
from crag_sampler import CragSampler |
|
import json |
|
import os |
|
|
|
|
|
def run_crag_task_1_and_2( |
|
file_path: str, |
|
fields_to_extract: list[str] = None, |
|
n_subsets: int = 5, |
|
output_dir: str = None, |
|
compress: bool = True, |
|
n_processes: int = None, |
|
overwrite: bool = False, |
|
): |
|
"""Run the CRAG sampling pipeline for tasks 1 and 2. |
|
|
|
Args: |
|
file_path: Path to input JSONL file |
|
fields_to_extract: List of fields to extract from JSONL |
|
n_subsets: Number of subsets to create |
|
output_dir: Directory for output files |
|
compress: Whether to compress output files |
|
n_processes: Number of processes for parallel processing |
|
overwrite: Whether to overwrite existing files |
|
""" |
|
|
|
sampler = CragSampler( |
|
input_file=file_path, required_fields=fields_to_extract, use_cache=True |
|
) |
|
|
|
|
|
output_path = os.path.join( |
|
os.path.dirname(file_path), |
|
f"{os.path.splitext(os.path.basename(file_path))[0]}_subsets.json", |
|
) |
|
|
|
|
|
subsets_data = sampler.create_subsets(n_subsets=n_subsets, output_path=output_path) |
|
|
|
|
|
print(f"Created {subsets_data['metadata']['n_subsets']} subsets") |
|
print("\nGlobal statistics:") |
|
print(json.dumps(subsets_data["metadata"]["global_statistics"], indent=2)) |
|
print("\nFirst subset statistics:") |
|
print(json.dumps(subsets_data["subsets"][0]["statistics"], indent=2)) |
|
|
|
|
|
sampler.write_subsets( |
|
subsets_file=output_path, |
|
output_dir=output_dir, |
|
compress=compress, |
|
n_processes=n_processes, |
|
overwrite=overwrite, |
|
) |
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
file_path = "./local_data/crag_task_1_and_2_dev_v4.jsonl.bz2" |
|
fields_to_extract = ["domain", "answer", "question_type", "static_or_dynamic"] |
|
n_subsets = 20 |
|
output_dir = "./subset/crag_task_1_and_2" |
|
|
|
run_crag_task_1_and_2( |
|
file_path, fields_to_extract, n_subsets=n_subsets, overwrite=True |
|
) |
|
|