Datasets:

Modalities:
Tabular
Text
Formats:
parquet
Languages:
code
ArXiv:
Libraries:
Datasets
Dask
License:

Script to fast download data using multiple workers

#10
by riturajj - opened

The following code can be used to download the top 24 languages present (or more) in the dataset. It also allows for resuming the runs in case they're interrupted and it saves the raw data in jsonl file format. Before downloading, you may've to download the HF dataset which has the blob_ids required for fetching the actual data from AWS. HF may block access for a very large number of calls to read dataset in online mode, so I used the local copy of dataset instead. Suggest using about 120-130 as num_workers for a system with 128G of memory.

import json
import os
import boto3
import multiprocessing
from smart_open import open
from datasets import load_dataset
from botocore import UNSIGNED
from botocore.client import Config
from tqdm import tqdm

to_download = ["C", "Kotlin", "Rust", "C-Sharp", "Lua", "SQL", "C++", "PHP", "Shell", "Go", "Python","Swift", "Java", "R", "TypeScript", "JavaScript", "Ruby", "AsciiDoc", "RDoc", "Text", "HTML", "RMarkdown", "reStructuredText", "Markdown"]

done_blobs = {}
def collect_downloaded_blob_ids(lang_subdir):
    global done_blobs
    done_blobs = {}
    if not os.path.exists(lang_subdir):
        return
    for filename in os.listdir(lang_subdir):
        try:
            if filename.startswith('done_') and filename.endswith('.json'):
                filepath = os.path.join(lang_subdir, filename)
                with open(filepath, 'r') as file:
                    data = json.load(file)
                    for blob_id in data['blob_id']:
                        done_blobs[blob_id] = 1
        except:
            continue
    print(f"Already downloaded blobs: {len(done_blobs)}")

def download_chunk(data_repo, download_folder, worker_id, num_workers):
    global done_blobs
    cur_done_blobs = []     #helpful in resuming the interrupted runs
    s3 = boto3.client("s3", config=Config(signature_version=UNSIGNED))
    print(f"Starting {data_repo} download for {worker_id}")
    ds = load_dataset(data_repo, split="train", streaming=True)
    print(f"Filtering ds for {worker_id}")
    ds = ds.filter(lambda row, idx: idx % num_workers == worker_id, with_indices=True)

    data_jsonl = []
    for i, row in tqdm(enumerate(ds), desc=f"Worker {worker_id}"):
        blob_id, src_encoding, language = row["blob_id"], row["src_encoding"], row['language']
        if blob_id in done_blobs:
            #print(f"{blob_id} already downloaded")
            continue
        s3_url = f"s3://softwareheritage/content/{blob_id}"
        try:
            with open(s3_url, "rb", compression=".gz", transport_params={"client": s3}) as fin:
                content = fin.read().decode(src_encoding)
        except Exception as e:
            print(f"Exception occured: {e}")
            continue
        data_jsonl.append({"text": content})
        cur_done_blobs.append(blob_id)

        #store 8K records in each jsonl file
        if len(data_jsonl) ==  8000:
            directory = os.path.join(download_folder, language)
            if not os.path.exists(directory):
                os.makedirs(directory)
            data_path = os.path.join(directory, blob_id + ".jsonl") #save with current blob_id for uniqueness
            write_dicts_to_jsonl(data_jsonl, data_path)
            data_jsonl = []

            #write blob_ids for blobs done being downloaded and written
            with open(data_path.replace(blob_id + ".jsonl", f"done_{blob_id}.json"), "w") as f:
                json.dump({"blob_id": cur_done_blobs}, f)
            cur_done_blobs = []

    # Save any remaining data
    if data_jsonl:
        directory = os.path.join(download_folder, language)
        if not os.path.exists(directory):
            os.makedirs(directory)
        data_path = os.path.join(directory, f"remaining_{worker_id}.jsonl")
        write_dicts_to_jsonl(data_jsonl, data_path)

        #write blob_ids for blobs done being downloaded and written
        with open(os.path.join(directory, f"done_{blob_id}.json"), "w") as f:
            json.dump({"blob_id": cur_done_blobs}, f)

def download_the_stack_v2(data_repo, download_folder, num_workers):
    for lang in to_download:
        lang_out_subdir = os.path.join(download_folder, lang)
        lang_subdir = os.path.join(data_repo, lang)
        collect_downloaded_blob_ids(lang_out_subdir)
        with multiprocessing.Pool(processes=num_workers) as pool:
            pool.starmap(download_chunk, [(lang_subdir, download_folder, i, num_workers) for i in range(num_workers)])

def write_dicts_to_jsonl(dict_list, jsonl_path):
    print("Writing ", jsonl_path)
    with open(jsonl_path, "w") as f:
        for d in dict_list:
            json.dump(d, f)
            f.write("\n")


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="the-stack-v2 download entry.")
    parser.add_argument("--hf_data_dir", type=str)
    parser.add_argument("--download_folder", type=str)
    parser.add_argument("--num_workers", type=int)
    args = parser.parse_args()
    download_the_stack_v2(args.hf_data_dir, args.download_folder, args.num_workers)```
riturajj changed discussion title from Script to fast download of data using multiple workers to Script to fast download data using multiple workers

Sign up or log in to comment