Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
Libraries:
Datasets
Dask
License:
mikeg112 commited on
Commit
5926a2c
·
1 Parent(s): 2e5dd11

Add scripts

Browse files
Files changed (4) hide show
  1. .gitignore +8 -0
  2. rehydrate.py +47 -0
  3. upload.py +85 -0
  4. util.py +8 -0
.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Virtual environment
2
+ .env
3
+
4
+ # Local arrow copy of data
5
+ arrow_data
6
+
7
+ # pycache
8
+ *.pyc
rehydrate.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from datasets import load_from_disk, load_dataset, DatasetDict
4
+ from util import PARTITIONING_CATS
5
+
6
+
7
+ def download_dir(repo_name: str = "persius/hicric", output_dir="./arrow_data"):
8
+ """Download the dir from HF hub without cloning, if you like, and save locally."""
9
+ ds_dict = DatasetDict()
10
+ for split in PARTITIONING_CATS:
11
+ ds = load_dataset(repo_name, name=split)
12
+ ds_dict[split] = ds
13
+ ds_dict.save_to_disk(output_dir)
14
+ return None
15
+
16
+
17
+ def repopulate_dir(
18
+ hf_data_dir: str = "./arrow_data", rehydrate_target_dir: str = "./data/processed"
19
+ ):
20
+ """Rehydrate the HICRIC processed data dir from the HF Dataset.
21
+
22
+ This hydrates the data in the same format in which it was/is originally produced in
23
+ the HICRIC repository's code.
24
+ """
25
+
26
+ for split in PARTITIONING_CATS:
27
+ dataset = load_from_disk(os.path.join(hf_data_dir, split, "train"))
28
+ # Get individual lines
29
+ for instance in dataset:
30
+ # Extract the output file/directory associated with line
31
+ rel_path = instance["relative_path"]
32
+ output_file_path = os.path.join(rehydrate_target_dir, rel_path)
33
+ output_directory = os.path.join(
34
+ rehydrate_target_dir, os.path.dirname(rel_path)
35
+ )
36
+ os.makedirs(output_directory, exist_ok=True)
37
+
38
+ with open(output_file_path, "a") as writer:
39
+ writer.write(json.dumps(instance) + "\n")
40
+
41
+ print(f"Repopulated data saved to {rehydrate_target_dir}")
42
+ return None
43
+
44
+
45
+ if __name__ == "__main__":
46
+ download_dir()
47
+ repopulate_dir()
upload.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from datasets import Dataset, DatasetDict
3
+ from util import PARTITIONING_CATS
4
+
5
+
6
+ def construct_hf_dataset(metadata_file: str = "processed_sources.jsonl"):
7
+ """Construct a HF DatasetDict class from the HICRIC processed data dir, and push to hub."""
8
+
9
+ def data_generator(cat: str):
10
+ def validate_tags(tags, partitioning_cats=PARTITIONING_CATS):
11
+ # Find the intersection of the two lists
12
+ matches = [tag for tag in tags if tag in partitioning_cats]
13
+
14
+ # Raise an exception if there are none or two or more matches
15
+ if len(matches) == 0 or len(matches) >= 2:
16
+ raise ValueError(
17
+ f"The list of tags must contain exactly one key from the partitioning categories: {partitioning_cats}."
18
+ )
19
+
20
+ return True # If the tags are valid
21
+
22
+ # Open metadata file
23
+ with open(metadata_file, "r") as metadata_f:
24
+ for idx, line in enumerate(metadata_f):
25
+ obj = json.loads(line)
26
+ local_processed_path = obj["local_processed_path"]
27
+ file_tags = obj["tags"]
28
+ date_accessed = obj["date_accessed"]
29
+ url = obj["url"]
30
+ raw_md5 = obj["md5"]
31
+
32
+ # Only proceed for relevant partition cat
33
+ _valid = validate_tags(file_tags)
34
+ if cat not in file_tags:
35
+ continue
36
+
37
+ # Read the JSONL file pointed to by the `local_processed_path`
38
+ with open(local_processed_path, "r") as data_file:
39
+ for _idx, data_line in enumerate(data_file):
40
+ data_obj = json.loads(data_line, strict=False)
41
+
42
+ # Get line specific data
43
+ text = data_obj.get("text", "")
44
+ line_tags = data_obj.get("tags", [])
45
+ if len(text) == 0:
46
+ continue
47
+ if len(line_tags) > 0:
48
+ tags = file_tags + line_tags
49
+ else:
50
+ tags = file_tags
51
+
52
+ rec = {
53
+ "text": data_obj.get("text", ""),
54
+ "tags": tags,
55
+ "date_accessed": date_accessed,
56
+ "source_url": url,
57
+ "source_md5": raw_md5,
58
+ "relative_path": local_processed_path,
59
+ }
60
+
61
+ # Add some specific partition keys
62
+ if cat == "case-description":
63
+ rec["decision"] = data_obj.get("decision", "unknown")
64
+ rec["appeal_type"] = data_obj.get("appeal_type", "unknown")
65
+
66
+ yield rec
67
+
68
+ # Create a DatasetDict to store sub-directory datasets
69
+ dataset_dict = DatasetDict()
70
+ for cat in PARTITIONING_CATS:
71
+ sub_dataset = Dataset.from_generator(
72
+ generator=data_generator, gen_kwargs={"cat": cat}
73
+ )
74
+ dataset_dict[cat] = sub_dataset
75
+
76
+ # Save each sub-directory dataset as a separate dataset within a DatasetDict
77
+ for k, v in dataset_dict.items():
78
+ v.push_to_hub("persius/hicric", k, private=True)
79
+ # dataset_dict.save_to_disk("./arrow_data")
80
+
81
+ return None
82
+
83
+
84
+ if __name__ == "__main__":
85
+ construct_hf_dataset()
util.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ PARTITIONING_CATS = [
2
+ "legal",
3
+ "regulatory-guidance",
4
+ "contract-coverage-rule-medical-policy",
5
+ "opinion-policy-summary",
6
+ "case-description",
7
+ "clinical-guidelines",
8
+ ]