tlunified-ner / tlunified-ner.py
ljvmiranda921's picture
Implement simple workflow for parsing spaCy files
1b0f91f
raw
history blame
3.29 kB
import os
from typing import List
import datasets
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """"""
_LICENSE = """GNU GPL v3.0"""
_CLASSES = ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
_VERSION = "1.0"
class TLUnifiedNERConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
super(TLUnifiedNER, self).__init__(**kwargs)
class TLUnifiedNER(datasets.GeneratorBasedBuilder):
"""Contains an annotated version of the TLUnified dataset from Cruz and Cheng (2021)."""
VERSION = datasets.Version(_VERSION)
def _info() -> "datasets.DatasetInfo":
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.feature.ClassLabel(names=_CLASSES)
),
}
),
supervised_keys=None,
)
def _split_generators(
self, dl_manager: "datasets.builder.DownloadManager"
) -> List["datasets.SplitGenerator"]:
"""Return a list of SplitGenerators that organizes the splits."""
# The file extracts into {train,dev,test}.spacy files. The _generate_examples function
# below will define how these files are parsed.
corpus_dir = "corpus/iob"
data_files = {
"train": os.path.join(corpus_dir, "train.iob"),
"dev": os.path.join(corpus_dir, "dev.iob"),
"test": os.path.join(corpus_dir, "test.iob"),
}
return [
# fmt: off
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
# fmt: on
]
def _generate_examples(self, filepath: str):
"""Defines how examples are parsed from the IOB file."""
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
guid = 0
tokens = []
ner_tags = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"ner_tags": ner_tags,
}
guid += 1
tokens = []
ner_tags = []
else:
# TLUnified-NER iob are separated by \t
token, ner_tag = line.split("\t")
tokens.append(token)
ner_tags.append(ner_tag.rstrip())
# Last example
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"ner_tags": ner_tags,
}