Datasets:
mteb
/

Modalities:
Text
Formats:
json
Libraries:
Datasets
pandas
KennethEnevoldsen's picture
removed empty text files
df528fb unverified
raw
history blame
1.44 kB
from pathlib import Path
import mteb
log_file_path = Path("remove_empty.log")
# remove log file if exists
if log_file_path.exists():
log_file_path.unlink()
tasks = mteb.get_tasks(tasks=["STS22"])
task = tasks[0]
task.load_data()
def filter_sample(x):
if len(x["sentence1"]) > 0 and len(x["sentence2"]) > 0:
return True
log = f"Filtered: {x['sentence1']} -- {x['sentence2']}"
with open(log_file_path, "a") as f:
f.write(log + "\n")
print(log)
return False
for hf_subset in task.dataset:
_ds = task.dataset[hf_subset]
for split in _ds:
ds = _ds[split]
# filter empty sentences
n_samples = len(ds)
ds = ds.filter(lambda x: filter_sample(x))
n_left = len(ds)
log = f"Filtered {n_samples - n_left} samples from {n_samples} in {hf_subset} - {split}"
with open(log_file_path, "a") as f:
f.write(log + "\n")
print(log)
_ds[split] = ds
task.dataset[hf_subset] = _ds
save_path = Path(__file__).parent.parent
for hf_subset in task.dataset:
_ds = task.dataset[hf_subset]
for split in _ds:
ds = _ds[split]
ds.to_json(save_path / split / (hf_subset + ".jsonl.gz"), compression="gzip")
log = f"Saved {hf_subset} - {split} to {save_path / split / (hf_subset + '.jsonl.gz')}"
with open(log_file_path, "a") as f:
f.write(log + "\n")
print(log)