ArneBinder commited on
Commit
74b4882
1 Parent(s): 982d568

add resolve_parts_of_same dataset variant

Browse files

This adds a new dataset variant `resolve_parts_of_same` where all spans connected via `parts_of_same` relations are merged by using the smallest start index as new start and the biggest end index as new end index, i.e. the max coverage will be used span. Note that this may create nested spans!

Files changed (1) hide show
  1. sciarg.py +195 -20
sciarg.py CHANGED
@@ -1,8 +1,13 @@
 
 
 
 
1
  from pie_modules.document.processing import (
2
  RegexPartitioner,
3
  RelationArgumentSorter,
4
  TextSpanTrimmer,
5
  )
 
6
  from pytorch_ie.core import Document
7
  from pytorch_ie.documents import (
8
  TextDocumentWithLabeledSpansAndBinaryRelations,
@@ -11,12 +16,130 @@ from pytorch_ie.documents import (
11
 
12
  from pie_datasets.builders import BratBuilder, BratConfig
13
  from pie_datasets.builders.brat import BratDocumentWithMergedSpans
 
14
  from pie_datasets.document.processing import Caster, Pipeline
15
 
16
  URL = "http://data.dws.informatik.uni-mannheim.de/sci-arg/compiled_corpus.zip"
17
  SPLIT_PATHS = {"train": "compiled_corpus"}
18
 
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  def get_common_pipeline_steps(target_document_type: type[Document]) -> dict:
21
  return dict(
22
  cast=Caster(
@@ -31,6 +154,36 @@ def get_common_pipeline_steps(target_document_type: type[Document]) -> dict:
31
  )
32
 
33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  class SciArg(BratBuilder):
35
  BASE_DATASET_PATH = "DFKI-SLT/brat"
36
  BASE_DATASET_REVISION = "844de61e8a00dc6a93fc29dc185f6e617131fbf1"
@@ -39,33 +192,55 @@ class SciArg(BratBuilder):
39
  # The span fragments in SciArg come just from the new line splits, so we can merge them.
40
  # Actual span fragments are annotated via "parts_of_same" relations.
41
  BUILDER_CONFIGS = [
42
- BratConfig(name=BratBuilder.DEFAULT_CONFIG_NAME, merge_fragmented_spans=True),
 
43
  ]
44
  DOCUMENT_TYPES = {
45
  BratBuilder.DEFAULT_CONFIG_NAME: BratDocumentWithMergedSpans,
 
46
  }
47
 
48
  # we need to add None to the list of dataset variants to support the default dataset variant
49
  BASE_BUILDER_KWARGS_DICT = {
50
  dataset_variant: {"url": URL, "split_paths": SPLIT_PATHS}
51
- for dataset_variant in ["default", "merge_fragmented_spans", None]
52
  }
53
 
54
- DOCUMENT_CONVERTERS = {
55
- TextDocumentWithLabeledSpansAndBinaryRelations: Pipeline(
56
- **get_common_pipeline_steps(TextDocumentWithLabeledSpansAndBinaryRelations)
57
- ),
58
- TextDocumentWithLabeledSpansBinaryRelationsAndLabeledPartitions: Pipeline(
59
- **get_common_pipeline_steps(
60
- TextDocumentWithLabeledSpansBinaryRelationsAndLabeledPartitions
61
- ),
62
- add_partitions=RegexPartitioner(
63
- partition_layer_name="labeled_partitions",
64
- pattern="<([^>/]+)>.*</\\1>",
65
- label_group_id=1,
66
- label_whitelist=["Title", "Abstract", "H1"],
67
- skip_initial_partition=True,
68
- strip_whitespace=True,
69
- ),
70
- ),
71
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from typing import Sequence, Set, Tuple, Union
3
+
4
+ import networkx as nx
5
  from pie_modules.document.processing import (
6
  RegexPartitioner,
7
  RelationArgumentSorter,
8
  TextSpanTrimmer,
9
  )
10
+ from pytorch_ie.annotations import BinaryRelation, LabeledMultiSpan, LabeledSpan
11
  from pytorch_ie.core import Document
12
  from pytorch_ie.documents import (
13
  TextDocumentWithLabeledSpansAndBinaryRelations,
 
16
 
17
  from pie_datasets.builders import BratBuilder, BratConfig
18
  from pie_datasets.builders.brat import BratDocumentWithMergedSpans
19
+ from pie_datasets.core.dataset import DocumentConvertersType
20
  from pie_datasets.document.processing import Caster, Pipeline
21
 
22
  URL = "http://data.dws.informatik.uni-mannheim.de/sci-arg/compiled_corpus.zip"
23
  SPLIT_PATHS = {"train": "compiled_corpus"}
24
 
25
 
26
+ logger = logging.getLogger(__name__)
27
+
28
+
29
+ def _merge_spans_via_relation(
30
+ spans: Sequence[LabeledSpan],
31
+ relations: Sequence[BinaryRelation],
32
+ link_relation_label: str,
33
+ create_multi_spans: bool = True,
34
+ ) -> Tuple[Union[Set[LabeledSpan], Set[LabeledMultiSpan]], Set[BinaryRelation]]:
35
+ # convert list of relations to a graph to easily calculate connected components to merge
36
+ g = nx.Graph()
37
+ link_relations = []
38
+ other_relations = []
39
+ for rel in relations:
40
+ if rel.label == link_relation_label:
41
+ link_relations.append(rel)
42
+ # never merge spans that have not the same label
43
+ if (
44
+ not (isinstance(rel.head, LabeledSpan) or isinstance(rel.tail, LabeledSpan))
45
+ or rel.head.label == rel.tail.label
46
+ ):
47
+ g.add_edge(rel.head, rel.tail)
48
+ else:
49
+ logger.debug(
50
+ f"spans to merge do not have the same label, do not merge them: {rel.head}, {rel.tail}"
51
+ )
52
+ else:
53
+ other_relations.append(rel)
54
+
55
+ span_mapping = {}
56
+ connected_components: Set[LabeledSpan]
57
+ for connected_components in nx.connected_components(g):
58
+ # all spans in a connected component have the same label
59
+ label = list(span.label for span in connected_components)[0]
60
+ connected_components_sorted = sorted(connected_components, key=lambda span: span.start)
61
+ if create_multi_spans:
62
+ new_span = LabeledMultiSpan(
63
+ slices=tuple((span.start, span.end) for span in connected_components_sorted),
64
+ label=label,
65
+ )
66
+ else:
67
+ new_span = LabeledSpan(
68
+ start=min(span.start for span in connected_components_sorted),
69
+ end=max(span.end for span in connected_components_sorted),
70
+ label=label,
71
+ )
72
+ for span in connected_components_sorted:
73
+ span_mapping[span] = new_span
74
+ for span in spans:
75
+ if span not in span_mapping:
76
+ if create_multi_spans:
77
+ span_mapping[span] = LabeledMultiSpan(
78
+ slices=((span.start, span.end),), label=span.label, score=span.score
79
+ )
80
+ else:
81
+ span_mapping[span] = LabeledSpan(
82
+ start=span.start, end=span.end, label=span.label, score=span.score
83
+ )
84
+
85
+ new_spans = set(span_mapping.values())
86
+ new_relations = {
87
+ BinaryRelation(
88
+ head=span_mapping[rel.head],
89
+ tail=span_mapping[rel.tail],
90
+ label=rel.label,
91
+ score=rel.score,
92
+ )
93
+ for rel in other_relations
94
+ }
95
+
96
+ return new_spans, new_relations
97
+
98
+
99
+ class SpansWithRelationsMerger:
100
+ """Merge spans that are connected via a specific relation type.
101
+
102
+ Args:
103
+ relation_layer: The name of the layer that contains the relations.
104
+ link_relation_label: The label of the relations that connect the spans.
105
+ create_multi_spans: If True, the merged spans are LabeledMultiSpans, otherwise LabeledSpans.
106
+ """
107
+
108
+ def __init__(
109
+ self,
110
+ relation_layer: str,
111
+ link_relation_label: str,
112
+ result_document_type: type[Document],
113
+ result_field_mapping: dict[str, str],
114
+ create_multi_spans: bool = True,
115
+ ):
116
+ self.relation_layer = relation_layer
117
+ self.link_relation_label = link_relation_label
118
+ self.create_multi_spans = create_multi_spans
119
+ self.result_document_type = result_document_type
120
+ self.result_field_mapping = result_field_mapping
121
+
122
+ def __call__(self, document: Document) -> Document:
123
+ relations: Sequence[BinaryRelation] = document[self.relation_layer]
124
+ spans: Sequence[LabeledSpan] = document[self.relation_layer].target_layer
125
+
126
+ new_spans, new_relations = _merge_spans_via_relation(
127
+ spans=spans,
128
+ relations=relations,
129
+ link_relation_label=self.link_relation_label,
130
+ create_multi_spans=self.create_multi_spans,
131
+ )
132
+
133
+ result = document.copy(with_annotations=False).as_type(new_type=self.result_document_type)
134
+ span_layer_name = document[self.relation_layer].target_name
135
+ result_span_layer_name = self.result_field_mapping[span_layer_name]
136
+ result_relation_layer_name = self.result_field_mapping[self.relation_layer]
137
+ result[result_span_layer_name].extend(new_spans)
138
+ result[result_relation_layer_name].extend(new_relations)
139
+
140
+ return result
141
+
142
+
143
  def get_common_pipeline_steps(target_document_type: type[Document]) -> dict:
144
  return dict(
145
  cast=Caster(
 
154
  )
155
 
156
 
157
+ def get_common_pipeline_steps_with_merge_multi_spans(
158
+ target_document_type: type[Document],
159
+ ) -> dict:
160
+ return dict(
161
+ merge_spans=SpansWithRelationsMerger(
162
+ relation_layer="relations",
163
+ link_relation_label="parts_of_same",
164
+ create_multi_spans=False,
165
+ result_document_type=target_document_type,
166
+ result_field_mapping={"spans": "labeled_spans", "relations": "binary_relations"},
167
+ ),
168
+ trim_adus=TextSpanTrimmer(layer="labeled_spans"),
169
+ sort_symmetric_relation_arguments=RelationArgumentSorter(
170
+ relation_layer="binary_relations",
171
+ label_whitelist=["parts_of_same", "semantically_same"],
172
+ ),
173
+ )
174
+
175
+
176
+ class SciArgConfig(BratConfig):
177
+ def __init__(
178
+ self,
179
+ name: str,
180
+ resolve_parts_of_same: bool = False,
181
+ **kwargs,
182
+ ):
183
+ super().__init__(name=name, merge_fragmented_spans=True, **kwargs)
184
+ self.resolve_parts_of_same = resolve_parts_of_same
185
+
186
+
187
  class SciArg(BratBuilder):
188
  BASE_DATASET_PATH = "DFKI-SLT/brat"
189
  BASE_DATASET_REVISION = "844de61e8a00dc6a93fc29dc185f6e617131fbf1"
 
192
  # The span fragments in SciArg come just from the new line splits, so we can merge them.
193
  # Actual span fragments are annotated via "parts_of_same" relations.
194
  BUILDER_CONFIGS = [
195
+ SciArgConfig(name=BratBuilder.DEFAULT_CONFIG_NAME),
196
+ SciArgConfig(name="resolve_parts_of_same", resolve_parts_of_same=True),
197
  ]
198
  DOCUMENT_TYPES = {
199
  BratBuilder.DEFAULT_CONFIG_NAME: BratDocumentWithMergedSpans,
200
+ "resolve_parts_of_same": BratDocumentWithMergedSpans,
201
  }
202
 
203
  # we need to add None to the list of dataset variants to support the default dataset variant
204
  BASE_BUILDER_KWARGS_DICT = {
205
  dataset_variant: {"url": URL, "split_paths": SPLIT_PATHS}
206
+ for dataset_variant in ["default", "resolve_parts_of_same", None]
207
  }
208
 
209
+ @property
210
+ def document_converters(self) -> DocumentConvertersType:
211
+ regex_partitioner = RegexPartitioner(
212
+ partition_layer_name="labeled_partitions",
213
+ pattern="<([^>/]+)>.*</\\1>",
214
+ label_group_id=1,
215
+ label_whitelist=["Title", "Abstract", "H1"],
216
+ skip_initial_partition=True,
217
+ strip_whitespace=True,
218
+ )
219
+ if not self.config.resolve_parts_of_same:
220
+ return {
221
+ TextDocumentWithLabeledSpansAndBinaryRelations: Pipeline(
222
+ **get_common_pipeline_steps(TextDocumentWithLabeledSpansAndBinaryRelations)
223
+ ),
224
+ TextDocumentWithLabeledSpansBinaryRelationsAndLabeledPartitions: Pipeline(
225
+ **get_common_pipeline_steps(
226
+ TextDocumentWithLabeledSpansBinaryRelationsAndLabeledPartitions
227
+ ),
228
+ add_partitions=regex_partitioner,
229
+ ),
230
+ }
231
+ else:
232
+ return {
233
+ TextDocumentWithLabeledSpansAndBinaryRelations: Pipeline(
234
+ **get_common_pipeline_steps_with_merge_multi_spans(
235
+ TextDocumentWithLabeledSpansAndBinaryRelations
236
+ )
237
+ ),
238
+ TextDocumentWithLabeledSpansBinaryRelationsAndLabeledPartitions: Pipeline(
239
+ **get_common_pipeline_steps_with_merge_multi_spans(
240
+ TextDocumentWithLabeledSpansBinaryRelationsAndLabeledPartitions
241
+ ),
242
+ add_partitions=regex_partitioner,
243
+ ),
244
+ # TODO: add TextDocumentWithLabeledMultiSpansAndBinaryRelations
245
+ # TODO: add TextDocumentWithLabeledMultiSpansBinaryRelationsAndLabeledPartitions
246
+ }