Datasets:

Modalities:
Text
Formats:
json
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
File size: 2,095 Bytes
df57a76
 
 
 
 
 
 
 
 
4a87d90
df57a76
 
 
 
 
 
 
 
 
 
4a87d90
df57a76
 
 
 
 
 
 
 
 
 
4a87d90
df57a76
 
4a87d90
df57a76
 
4a87d90
 
df57a76
 
4a87d90
df57a76
4a87d90
 
 
df57a76
4a87d90
 
 
df57a76
 
 
 
 
 
 
 
 
 
 
4a87d90
 
df57a76
4a87d90
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
# Example usage
from crag_sampler import CragSampler
import json
import os


def run_crag_task_1_and_2(
    file_path: str,
    fields_to_extract: list[str] = None,
    n_subsets: int = 5,
    output_dir: str = None,
    compress: bool = True,
    n_processes: int = None,
    overwrite: bool = False,
):
    """Run the CRAG sampling pipeline for tasks 1 and 2.

    Args:
        file_path: Path to input JSONL file
        fields_to_extract: List of fields to extract from JSONL
        n_subsets: Number of subsets to create
        output_dir: Directory for output files
        compress: Whether to compress output files
        n_processes: Number of processes for parallel processing
        overwrite: Whether to overwrite existing files
    """
    # Initialize sampler
    sampler = CragSampler(
        input_file=file_path, required_fields=fields_to_extract, use_cache=True
    )

    # Create output path for subsets
    output_path = os.path.join(
        os.path.dirname(file_path),
        f"{os.path.splitext(os.path.basename(file_path))[0]}_subsets.json",
    )

    # Create subsets
    subsets_data = sampler.create_subsets(n_subsets=n_subsets, output_path=output_path)

    # Print statistics
    print(f"Created {subsets_data['metadata']['n_subsets']} subsets")
    print("\nGlobal statistics:")
    print(json.dumps(subsets_data["metadata"]["global_statistics"], indent=2))
    print("\nFirst subset statistics:")
    print(json.dumps(subsets_data["subsets"][0]["statistics"], indent=2))

    # Write subsets to files
    sampler.write_subsets(
        subsets_file=output_path,
        output_dir=output_dir,
        compress=compress,
        n_processes=n_processes,
        overwrite=overwrite,
    )


# Example usage
if __name__ == "__main__":
    file_path = "./local_data/crag_task_1_and_2_dev_v4.jsonl.bz2"
    fields_to_extract = ["domain", "answer", "question_type", "static_or_dynamic"]
    n_subsets = 20
    output_dir = "./subset/crag_task_1_and_2"

    run_crag_task_1_and_2(
        file_path, fields_to_extract, n_subsets=n_subsets, overwrite=True
    )