# coding=utf-8 # Copyright 2020 HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 # CoNLL-2012 Shared Task: Modeling Multilingual Unrestricted Coreference in OntoNotes import datasets logger = datasets.logging.get_logger(__name__) _CITATION = """\ @inproceedings{pradhan2012conll, title={CoNLL-2012 shared task: Modeling multilingual unrestricted coreference in OntoNotes}, author={Pradhan, Sameer and Moschitti, Alessandro and Xue, Nianwen and Uryupina, Olga and Zhang, Yuchen}, booktitle={Joint Conference on EMNLP and CoNLL-Shared Task}, pages={1--40}, year={2012} } """ _DESCRIPTION = """\ The CoNLL-2012 shared task involved predicting coreference in English, Chinese, and Arabic, using the final version, v5.0, of the OntoNotes corpus. It was a follow-on to the English-only task organized in 2011. Until the creation of the OntoNotes corpus, resources in this sub-field of language processing were limited to noun phrase coreference, often on a restricted set of entities, such as the ACE entities. OntoNotes provides a large-scale corpus of general anaphoric coreference not restricted to noun phrases or to a specified set of entity types, and covers multiple languages. OntoNotes also provides additional layers of integrated annotation, capturing additional shallow semantic structure. This paper describes the OntoNotes annotation (coreference and other layers) and then describes the parameters of the shared task including the format, pre-processing information, evaluation criteria, and presents and discusses the results achieved by the participating systems. The task of coreference has had a complex evaluation history. Potentially many evaluation conditions, have, in the past, made it difficult to judge the improvement in new algorithms over previously reported results. Having a standard test set and standard evaluation parameters, all based on a resource that provides multiple integrated annotation layers (syntactic parses, semantic roles, word senses, named entities and coreference) and in multiple languages could support joint modeling and help ground and energize ongoing research in the task of entity and event coreference. For more details see https://aclanthology.org/W12-4501.pdf """ class Conll2012Config(datasets.BuilderConfig): """BuilderConfig for Conll2012""" def __init__(self, **kwargs): """BuilderConfig for Conll2012. Args: **kwargs: keyword arguments forwarded to super. """ super(Conll2012Config, self).__init__(**kwargs) class Conll2012(datasets.GeneratorBasedBuilder): """Conll2012 dataset.""" BUILDER_CONFIGS = [ Conll2012Config(name="conll2012", version=datasets.Version("1.0.0"), description="Conll2012 dataset"), ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "tokens": datasets.Sequence(datasets.Value("string")), "pos_tags": datasets.Sequence( datasets.features.ClassLabel( names=[ '$', "''", '*', ',', '-LRB-', '-RRB-', '.', ':', 'ADD', 'AFX', 'CC', 'CD', 'DT', 'EX', 'FW', 'HYPH', 'IN', 'JJ', 'JJR', 'JJS', 'LS', 'MD', 'NFP', 'NN', 'NNP', 'NNPS', 'NNS', 'PDT', 'POS', 'PRP', 'PRP$', 'RB', 'RBR', 'RBS', 'RP', 'SYM', 'TO', 'UH', 'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ', 'VERB', 'WDT', 'WP', 'WP$', 'WRB', 'XX', '``' ] ) ), "tags": datasets.Sequence( datasets.features.ClassLabel( names=[ 'O', 'B-CARDINAL', 'B-DATE', 'B-EVENT', 'B-FAC', 'B-GPE', 'B-LANGUAGE', 'B-LAW', 'B-LOC', 'B-MONEY', 'B-NORP', 'B-ORDINAL', 'B-ORG', 'B-PERCENT', 'B-PERSON', 'B-PRODUCT', 'B-QUANTITY', 'B-TIME', 'B-WORK_OF_ART', 'I-CARDINAL', 'I-DATE', 'I-EVENT', 'I-FAC', 'I-GPE', 'I-LANGUAGE', 'I-LAW', 'I-LOC', 'I-MONEY', 'I-NORP', 'I-ORDINAL', 'I-ORG', 'I-PERCENT', 'I-PERSON', 'I-PRODUCT', 'I-QUANTITY', 'I-TIME', 'I-WORK_OF_ART' ] ) ), } ), supervised_keys=None, homepage="https://catalog.ldc.upenn.edu/LDC2013T19", citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" urls_to_download = { 'train': 'train.txt', 'validation': 'validation.txt', 'test': 'test.txt', } downloaded_files = dl_manager.download_and_extract(urls_to_download) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepath': downloaded_files['train']}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={'filepath': downloaded_files['validation']}), datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={'filepath': downloaded_files['test']}), ] def _generate_examples(self, filepath): logger.info("⏳ Generating examples from = %s", filepath) with open(filepath, encoding="utf-8") as f: lines = f.readlines() guid = 0 tokens = [] tags = [] pos_tags = [] for line in lines: if line.startswith("-DOCSTART-") or line.strip() == "" or line == "\n": if tokens: yield guid, { 'id': str(guid), 'tokens': tokens, 'tags': tags, 'pos_tags': pos_tags, } guid += 1 tokens = [] tags = [] pos_tags = [] else: # conll2012 tokens are tab-separated splits = line.split('\t') tokens.append(splits[0]) pos_tags.append(splits[1]) tags.append(splits[3].rstrip()) # last example yield guid, { 'id': str(guid), 'tokens': tokens, 'tags': tags, 'pos_tags': pos_tags, }