Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
Libraries:
Datasets
Dask
License:
hicric / upload.py
mikeg112's picture
Add scripts
5926a2c
import json
from datasets import Dataset, DatasetDict
from util import PARTITIONING_CATS
def construct_hf_dataset(metadata_file: str = "processed_sources.jsonl"):
"""Construct a HF DatasetDict class from the HICRIC processed data dir, and push to hub."""
def data_generator(cat: str):
def validate_tags(tags, partitioning_cats=PARTITIONING_CATS):
# Find the intersection of the two lists
matches = [tag for tag in tags if tag in partitioning_cats]
# Raise an exception if there are none or two or more matches
if len(matches) == 0 or len(matches) >= 2:
raise ValueError(
f"The list of tags must contain exactly one key from the partitioning categories: {partitioning_cats}."
)
return True # If the tags are valid
# Open metadata file
with open(metadata_file, "r") as metadata_f:
for idx, line in enumerate(metadata_f):
obj = json.loads(line)
local_processed_path = obj["local_processed_path"]
file_tags = obj["tags"]
date_accessed = obj["date_accessed"]
url = obj["url"]
raw_md5 = obj["md5"]
# Only proceed for relevant partition cat
_valid = validate_tags(file_tags)
if cat not in file_tags:
continue
# Read the JSONL file pointed to by the `local_processed_path`
with open(local_processed_path, "r") as data_file:
for _idx, data_line in enumerate(data_file):
data_obj = json.loads(data_line, strict=False)
# Get line specific data
text = data_obj.get("text", "")
line_tags = data_obj.get("tags", [])
if len(text) == 0:
continue
if len(line_tags) > 0:
tags = file_tags + line_tags
else:
tags = file_tags
rec = {
"text": data_obj.get("text", ""),
"tags": tags,
"date_accessed": date_accessed,
"source_url": url,
"source_md5": raw_md5,
"relative_path": local_processed_path,
}
# Add some specific partition keys
if cat == "case-description":
rec["decision"] = data_obj.get("decision", "unknown")
rec["appeal_type"] = data_obj.get("appeal_type", "unknown")
yield rec
# Create a DatasetDict to store sub-directory datasets
dataset_dict = DatasetDict()
for cat in PARTITIONING_CATS:
sub_dataset = Dataset.from_generator(
generator=data_generator, gen_kwargs={"cat": cat}
)
dataset_dict[cat] = sub_dataset
# Save each sub-directory dataset as a separate dataset within a DatasetDict
for k, v in dataset_dict.items():
v.push_to_hub("persius/hicric", k, private=True)
# dataset_dict.save_to_disk("./arrow_data")
return None
if __name__ == "__main__":
construct_hf_dataset()