parquet-converter commited on
Commit
72bf295
1 Parent(s): eb58348

Update parquet files

Browse files
README.md DELETED
@@ -1,618 +0,0 @@
1
- ---
2
- annotations_creators:
3
- - expert-generated
4
- language:
5
- - en
6
- language_creators:
7
- - found
8
- license: []
9
- multilinguality:
10
- - monolingual
11
- pretty_name: CrossRE is a cross-domain dataset for relation extraction
12
- size_categories:
13
- - 10K<n<100K
14
- source_datasets:
15
- - extended|cross_ner
16
- tags:
17
- - cross domain
18
- - ai
19
- - news
20
- - music
21
- - literature
22
- - politics
23
- - science
24
- task_categories:
25
- - text-classification
26
- task_ids:
27
- - multi-class-classification
28
- dataset_info:
29
- - config_name: ai
30
- features:
31
- - name: doc_key
32
- dtype: string
33
- - name: sentence
34
- sequence: string
35
- - name: ner
36
- sequence:
37
- - name: id-start
38
- dtype: int32
39
- - name: id-end
40
- dtype: int32
41
- - name: entity-type
42
- dtype: string
43
- - name: relations
44
- sequence:
45
- - name: id_1-start
46
- dtype: int32
47
- - name: id_1-end
48
- dtype: int32
49
- - name: id_2-start
50
- dtype: int32
51
- - name: id_2-end
52
- dtype: int32
53
- - name: relation-type
54
- dtype: string
55
- - name: Exp
56
- dtype: string
57
- - name: Un
58
- dtype: bool
59
- - name: SA
60
- dtype: bool
61
- splits:
62
- - name: train
63
- num_bytes: 62411
64
- num_examples: 100
65
- - name: validation
66
- num_bytes: 183717
67
- num_examples: 350
68
- - name: test
69
- num_bytes: 217353
70
- num_examples: 431
71
- download_size: 508107
72
- dataset_size: 463481
73
- - config_name: literature
74
- features:
75
- - name: doc_key
76
- dtype: string
77
- - name: sentence
78
- sequence: string
79
- - name: ner
80
- sequence:
81
- - name: id-start
82
- dtype: int32
83
- - name: id-end
84
- dtype: int32
85
- - name: entity-type
86
- dtype: string
87
- - name: relations
88
- sequence:
89
- - name: id_1-start
90
- dtype: int32
91
- - name: id_1-end
92
- dtype: int32
93
- - name: id_2-start
94
- dtype: int32
95
- - name: id_2-end
96
- dtype: int32
97
- - name: relation-type
98
- dtype: string
99
- - name: Exp
100
- dtype: string
101
- - name: Un
102
- dtype: bool
103
- - name: SA
104
- dtype: bool
105
- splits:
106
- - name: train
107
- num_bytes: 62699
108
- num_examples: 100
109
- - name: validation
110
- num_bytes: 246214
111
- num_examples: 400
112
- - name: test
113
- num_bytes: 264450
114
- num_examples: 416
115
- download_size: 635130
116
- dataset_size: 573363
117
- - config_name: music
118
- features:
119
- - name: doc_key
120
- dtype: string
121
- - name: sentence
122
- sequence: string
123
- - name: ner
124
- sequence:
125
- - name: id-start
126
- dtype: int32
127
- - name: id-end
128
- dtype: int32
129
- - name: entity-type
130
- dtype: string
131
- - name: relations
132
- sequence:
133
- - name: id_1-start
134
- dtype: int32
135
- - name: id_1-end
136
- dtype: int32
137
- - name: id_2-start
138
- dtype: int32
139
- - name: id_2-end
140
- dtype: int32
141
- - name: relation-type
142
- dtype: string
143
- - name: Exp
144
- dtype: string
145
- - name: Un
146
- dtype: bool
147
- - name: SA
148
- dtype: bool
149
- splits:
150
- - name: train
151
- num_bytes: 69846
152
- num_examples: 100
153
- - name: validation
154
- num_bytes: 261497
155
- num_examples: 350
156
- - name: test
157
- num_bytes: 312165
158
- num_examples: 399
159
- download_size: 726956
160
- dataset_size: 643508
161
- - config_name: news
162
- features:
163
- - name: doc_key
164
- dtype: string
165
- - name: sentence
166
- sequence: string
167
- - name: ner
168
- sequence:
169
- - name: id-start
170
- dtype: int32
171
- - name: id-end
172
- dtype: int32
173
- - name: entity-type
174
- dtype: string
175
- - name: relations
176
- sequence:
177
- - name: id_1-start
178
- dtype: int32
179
- - name: id_1-end
180
- dtype: int32
181
- - name: id_2-start
182
- dtype: int32
183
- - name: id_2-end
184
- dtype: int32
185
- - name: relation-type
186
- dtype: string
187
- - name: Exp
188
- dtype: string
189
- - name: Un
190
- dtype: bool
191
- - name: SA
192
- dtype: bool
193
- splits:
194
- - name: train
195
- num_bytes: 49102
196
- num_examples: 164
197
- - name: validation
198
- num_bytes: 77952
199
- num_examples: 350
200
- - name: test
201
- num_bytes: 96301
202
- num_examples: 400
203
- download_size: 239763
204
- dataset_size: 223355
205
- - config_name: politics
206
- features:
207
- - name: doc_key
208
- dtype: string
209
- - name: sentence
210
- sequence: string
211
- - name: ner
212
- sequence:
213
- - name: id-start
214
- dtype: int32
215
- - name: id-end
216
- dtype: int32
217
- - name: entity-type
218
- dtype: string
219
- - name: relations
220
- sequence:
221
- - name: id_1-start
222
- dtype: int32
223
- - name: id_1-end
224
- dtype: int32
225
- - name: id_2-start
226
- dtype: int32
227
- - name: id_2-end
228
- dtype: int32
229
- - name: relation-type
230
- dtype: string
231
- - name: Exp
232
- dtype: string
233
- - name: Un
234
- dtype: bool
235
- - name: SA
236
- dtype: bool
237
- splits:
238
- - name: train
239
- num_bytes: 76004
240
- num_examples: 101
241
- - name: validation
242
- num_bytes: 277633
243
- num_examples: 350
244
- - name: test
245
- num_bytes: 295294
246
- num_examples: 400
247
- download_size: 726427
248
- dataset_size: 648931
249
- - config_name: science
250
- features:
251
- - name: doc_key
252
- dtype: string
253
- - name: sentence
254
- sequence: string
255
- - name: ner
256
- sequence:
257
- - name: id-start
258
- dtype: int32
259
- - name: id-end
260
- dtype: int32
261
- - name: entity-type
262
- dtype: string
263
- - name: relations
264
- sequence:
265
- - name: id_1-start
266
- dtype: int32
267
- - name: id_1-end
268
- dtype: int32
269
- - name: id_2-start
270
- dtype: int32
271
- - name: id_2-end
272
- dtype: int32
273
- - name: relation-type
274
- dtype: string
275
- - name: Exp
276
- dtype: string
277
- - name: Un
278
- dtype: bool
279
- - name: SA
280
- dtype: bool
281
- splits:
282
- - name: train
283
- num_bytes: 63876
284
- num_examples: 103
285
- - name: validation
286
- num_bytes: 224402
287
- num_examples: 351
288
- - name: test
289
- num_bytes: 249075
290
- num_examples: 400
291
- download_size: 594058
292
- dataset_size: 537353
293
- ---
294
- # Dataset Card for CrossRE
295
- ## Table of Contents
296
- - [Table of Contents](#table-of-contents)
297
- - [Dataset Description](#dataset-description)
298
- - [Dataset Summary](#dataset-summary)
299
- - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
300
- - [Languages](#languages)
301
- - [Dataset Structure](#dataset-structure)
302
- - [Data Instances](#data-instances)
303
- - [Data Fields](#data-fields)
304
- - [Data Splits](#data-splits)
305
- - [Dataset Creation](#dataset-creation)
306
- - [Curation Rationale](#curation-rationale)
307
- - [Source Data](#source-data)
308
- - [Annotations](#annotations)
309
- - [Personal and Sensitive Information](#personal-and-sensitive-information)
310
- - [Considerations for Using the Data](#considerations-for-using-the-data)
311
- - [Social Impact of Dataset](#social-impact-of-dataset)
312
- - [Discussion of Biases](#discussion-of-biases)
313
- - [Other Known Limitations](#other-known-limitations)
314
- - [Additional Information](#additional-information)
315
- - [Dataset Curators](#dataset-curators)
316
- - [Licensing Information](#licensing-information)
317
- - [Citation Information](#citation-information)
318
- - [Contributions](#contributions)
319
-
320
- ## Dataset Description
321
- - **Repository:** [CrossRE](https://github.com/mainlp/CrossRE)
322
- - **Paper:** [CrossRE: A Cross-Domain Dataset for Relation Extraction](https://arxiv.org/abs/2210.09345)
323
-
324
- ### Dataset Summary
325
- CrossRE is a new, freely-available crossdomain benchmark for RE, which comprises six distinct text domains and includes
326
- multilabel annotations. It includes the following domains: news, politics, natural science, music, literature and
327
- artificial intelligence. The semantic relations are annotated on top of CrossNER (Liu et al., 2021), a cross-domain
328
- dataset for NER which contains domain-specific entity types.
329
- The dataset contains 17 relation labels for the six domains: PART-OF, PHYSICAL, USAGE, ROLE, SOCIAL,
330
- GENERAL-AFFILIATION, COMPARE, TEMPORAL, ARTIFACT, ORIGIN, TOPIC, OPPOSITE, CAUSE-EFFECT, WIN-DEFEAT, TYPEOF, NAMED, and
331
- RELATED-TO.
332
-
333
- For details, see the paper: https://arxiv.org/abs/2210.09345
334
-
335
- ### Supported Tasks and Leaderboards
336
-
337
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
338
-
339
- ### Languages
340
-
341
- The language data in CrossRE is in English (BCP-47 en)
342
-
343
- ## Dataset Structure
344
-
345
- ### Data Instances
346
-
347
- #### news
348
- - **Size of downloaded dataset files:** 0.24 MB
349
- - **Size of the generated dataset:** 0.22 MB
350
-
351
- An example of 'train' looks as follows:
352
- ```python
353
- {
354
- "doc_key": "news-train-1",
355
- "sentence": ["EU", "rejects", "German", "call", "to", "boycott", "British", "lamb", "."],
356
- "ner": [
357
- {"id-start": 0, "id-end": 0, "entity-type": "organisation"},
358
- {"id-start": 2, "id-end": 3, "entity-type": "misc"},
359
- {"id-start": 6, "id-end": 7, "entity-type": "misc"}
360
- ],
361
- "relations": [
362
- {"id_1-start": 0, "id_1-end": 0, "id_2-start": 2, "id_2-end": 3, "relation-type": "opposite", "Exp": "rejects", "Un": False, "SA": False},
363
- {"id_1-start": 2, "id_1-end": 3, "id_2-start": 6, "id_2-end": 7, "relation-type": "opposite", "Exp": "calls_for_boycot_of", "Un": False, "SA": False},
364
- {"id_1-start": 2, "id_1-end": 3, "id_2-start": 6, "id_2-end": 7, "relation-type": "topic", "Exp": "", "Un": False, "SA": False}
365
- ]
366
- }
367
- ```
368
-
369
- #### politics
370
- - **Size of downloaded dataset files:** 0.73 MB
371
- - **Size of the generated dataset:** 0.65 MB
372
-
373
- An example of 'train' looks as follows:
374
- ```python
375
- {
376
- "doc_key": "politics-train-1",
377
- "sentence": ["Parties", "with", "mainly", "Eurosceptic", "views", "are", "the", "ruling", "United", "Russia", ",", "and", "opposition", "parties", "the", "Communist", "Party", "of", "the", "Russian", "Federation", "and", "Liberal", "Democratic", "Party", "of", "Russia", "."],
378
- "ner": [
379
- {"id-start": 8, "id-end": 9, "entity-type": "politicalparty"},
380
- {"id-start": 15, "id-end": 20, "entity-type": "politicalparty"},
381
- {"id-start": 22, "id-end": 26, "entity-type": "politicalparty"}
382
- ],
383
- "relations": [
384
- {"id_1-start": 8, "id_1-end": 9, "id_2-start": 15, "id_2-end": 20, "relation-type": "opposite", "Exp": "in_opposition", "Un": False, "SA": False},
385
- {"id_1-start": 8, "id_1-end": 9, "id_2-start": 22, "id_2-end": 26, "relation-type": "opposite", "Exp": "in_opposition", "Un": False, "SA": False}
386
- ]
387
- }
388
- ```
389
-
390
- #### science
391
- - **Size of downloaded dataset files:** 0.59 MB
392
- - **Size of the generated dataset:** 0.54 MB
393
-
394
- An example of 'train' looks as follows:
395
- ```python
396
- {
397
- "doc_key": "science-train-1",
398
- "sentence": ["They", "may", "also", "use", "Adenosine", "triphosphate", ",", "Nitric", "oxide", ",", "and", "ROS", "for", "signaling", "in", "the", "same", "ways", "that", "animals", "do", "."],
399
- "ner": [
400
- {"id-start": 4, "id-end": 5, "entity-type": "chemicalcompound"},
401
- {"id-start": 7, "id-end": 8, "entity-type": "chemicalcompound"},
402
- {"id-start": 11, "id-end": 11, "entity-type": "chemicalcompound"}
403
- ],
404
- "relations": []
405
- }
406
- ```
407
-
408
- #### music
409
- - **Size of downloaded dataset files:** 0.73 MB
410
- - **Size of the generated dataset:** 0.64 MB
411
-
412
- An example of 'train' looks as follows:
413
- ```python
414
- {
415
- "doc_key": "music-train-1",
416
- "sentence": ["In", "2003", ",", "the", "Stade", "de", "France", "was", "the", "primary", "site", "of", "the", "2003", "World", "Championships", "in", "Athletics", "."],
417
- "ner": [
418
- {"id-start": 4, "id-end": 6, "entity-type": "location"},
419
- {"id-start": 13, "id-end": 17, "entity-type": "event"}
420
- ],
421
- "relations": [
422
- {"id_1-start": 13, "id_1-end": 17, "id_2-start": 4, "id_2-end": 6, "relation-type": "physical", "Exp": "", "Un": False, "SA": False}
423
- ]
424
- }
425
- ```
426
-
427
- #### literature
428
- - **Size of downloaded dataset files:** 0.64 MB
429
- - **Size of the generated dataset:** 0.57 MB
430
-
431
- An example of 'train' looks as follows:
432
- ```python
433
- {
434
- "doc_key": "literature-train-1",
435
- "sentence": ["In", "1351", ",", "during", "the", "reign", "of", "Emperor", "Toghon", "Temür", "of", "the", "Yuan", "dynasty", ",", "93rd-generation", "descendant", "Kong", "Huan", "(", "孔浣", ")", "'", "s", "2nd", "son", "Kong", "Shao", "(", "孔昭", ")", "moved", "from", "China", "to", "Korea", "during", "the", "Goryeo", ",", "and", "was", "received", "courteously", "by", "Princess", "Noguk", "(", "the", "Mongolian-born", "wife", "of", "the", "future", "king", "Gongmin", ")", "."],
436
- "ner": [
437
- {"id-start": 7, "id-end": 9, "entity-type": "person"},
438
- {"id-start": 12, "id-end": 13, "entity-type": "country"},
439
- {"id-start": 17, "id-end": 18, "entity-type": "writer"},
440
- {"id-start": 20, "id-end": 20, "entity-type": "writer"},
441
- {"id-start": 26, "id-end": 27, "entity-type": "writer"},
442
- {"id-start": 29, "id-end": 29, "entity-type": "writer"},
443
- {"id-start": 33, "id-end": 33, "entity-type": "country"},
444
- {"id-start": 35, "id-end": 35, "entity-type": "country"},
445
- {"id-start": 38, "id-end": 38, "entity-type": "misc"},
446
- {"id-start": 45, "id-end": 46, "entity-type": "person"},
447
- {"id-start": 49, "id-end": 50, "entity-type": "misc"},
448
- {"id-start": 55, "id-end": 55, "entity-type": "person"}
449
- ],
450
- "relations": [
451
- {"id_1-start": 7, "id_1-end": 9, "id_2-start": 12, "id_2-end": 13, "relation-type": "role", "Exp": "", "Un": False, "SA": False},
452
- {"id_1-start": 7, "id_1-end": 9, "id_2-start": 12, "id_2-end": 13, "relation-type": "temporal", "Exp": "", "Un": False, "SA": False},
453
- {"id_1-start": 17, "id_1-end": 18, "id_2-start": 26, "id_2-end": 27, "relation-type": "social", "Exp": "family", "Un": False, "SA": False},
454
- {"id_1-start": 20, "id_1-end": 20, "id_2-start": 17, "id_2-end": 18, "relation-type": "named", "Exp": "", "Un": False, "SA": False},
455
- {"id_1-start": 26, "id_1-end": 27, "id_2-start": 33, "id_2-end": 33, "relation-type": "physical", "Exp": "", "Un": False, "SA": False},
456
- {"id_1-start": 26, "id_1-end": 27, "id_2-start": 35, "id_2-end": 35, "relation-type": "physical", "Exp": "", "Un": False, "SA": False},
457
- {"id_1-start": 26, "id_1-end": 27, "id_2-start": 38, "id_2-end": 38, "relation-type": "temporal", "Exp": "", "Un": False, "SA": False},
458
- {"id_1-start": 26, "id_1-end": 27, "id_2-start": 45, "id_2-end": 46, "relation-type": "social", "Exp": "greeted_by", "Un": False, "SA": False},
459
- {"id_1-start": 29, "id_1-end": 29, "id_2-start": 26, "id_2-end": 27, "relation-type": "named", "Exp": "", "Un": False, "SA": False},
460
- {"id_1-start": 45, "id_1-end": 46, "id_2-start": 55, "id_2-end": 55, "relation-type": "social", "Exp": "marriage", "Un": False, "SA": False},
461
- {"id_1-start": 49, "id_1-end": 50, "id_2-start": 45, "id_2-end": 46, "relation-type": "named", "Exp": "", "Un": False, "SA": False}
462
- ]
463
- }
464
- ```
465
-
466
- #### ai
467
- - **Size of downloaded dataset files:** 0.51 MB
468
- - **Size of the generated dataset:** 0.46 MB
469
-
470
- An example of 'train' looks as follows:
471
- ```python
472
- {
473
- "doc_key": "ai-train-1",
474
- "sentence": ["Popular", "approaches", "of", "opinion-based", "recommender", "system", "utilize", "various", "techniques", "including", "text", "mining", ",", "information", "retrieval", ",", "sentiment", "analysis", "(", "see", "also", "Multimodal", "sentiment", "analysis", ")", "and", "deep", "learning", "X.Y.", "Feng", ",", "H.", "Zhang", ",", "Y.J.", "Ren", ",", "P.H.", "Shang", ",", "Y.", "Zhu", ",", "Y.C.", "Liang", ",", "R.C.", "Guan", ",", "D.", "Xu", ",", "(", "2019", ")", ",", ",", "21", "(", "5", ")", ":", "e12957", "."],
475
- "ner": [
476
- {"id-start": 3, "id-end": 5, "entity-type": "product"},
477
- {"id-start": 10, "id-end": 11, "entity-type": "field"},
478
- {"id-start": 13, "id-end": 14, "entity-type": "task"},
479
- {"id-start": 16, "id-end": 17, "entity-type": "task"},
480
- {"id-start": 21, "id-end": 23, "entity-type": "task"},
481
- {"id-start": 26, "id-end": 27, "entity-type": "field"},
482
- {"id-start": 28, "id-end": 29, "entity-type": "researcher"},
483
- {"id-start": 31, "id-end": 32, "entity-type": "researcher"},
484
- {"id-start": 34, "id-end": 35, "entity-type": "researcher"},
485
- {"id-start": 37, "id-end": 38, "entity-type": "researcher"},
486
- {"id-start": 40, "id-end": 41, "entity-type": "researcher"},
487
- {"id-start": 43, "id-end": 44, "entity-type": "researcher"},
488
- {"id-start": 46, "id-end": 47, "entity-type": "researcher"},
489
- {"id-start": 49, "id-end": 50, "entity-type": "researcher"}
490
- ],
491
- "relations": [
492
- {"id_1-start": 3, "id_1-end": 5, "id_2-start": 10, "id_2-end": 11, "relation-type": "part-of", "Exp": "", "Un": False, "SA": False},
493
- {"id_1-start": 3, "id_1-end": 5, "id_2-start": 10, "id_2-end": 11, "relation-type": "usage", "Exp": "", "Un": False, "SA": False},
494
- {"id_1-start": 3, "id_1-end": 5, "id_2-start": 13, "id_2-end": 14, "relation-type": "part-of", "Exp": "", "Un": False, "SA": False},
495
- {"id_1-start": 3, "id_1-end": 5, "id_2-start": 13, "id_2-end": 14, "relation-type": "usage", "Exp": "", "Un": False, "SA": False},
496
- {"id_1-start": 3, "id_1-end": 5, "id_2-start": 16, "id_2-end": 17, "relation-type": "part-of", "Exp": "", "Un": False, "SA": False},
497
- {"id_1-start": 3, "id_1-end": 5, "id_2-start": 16, "id_2-end": 17, "relation-type": "usage", "Exp": "", "Un": False, "SA": False},
498
- {"id_1-start": 3, "id_1-end": 5, "id_2-start": 26, "id_2-end": 27, "relation-type": "part-of", "Exp": "", "Un": False, "SA": False},
499
- {"id_1-start": 3, "id_1-end": 5, "id_2-start": 26, "id_2-end": 27, "relation-type": "usage", "Exp": "", "Un": False, "SA": False},
500
- {"id_1-start": 21, "id_1-end": 23, "id_2-start": 16, "id_2-end": 17, "relation-type": "part-of", "Exp": "", "Un": False, "SA": False},
501
- {"id_1-start": 21, "id_1-end": 23, "id_2-start": 16, "id_2-end": 17, "relation-type": "type-of", "Exp": "", "Un": False, "SA": False}
502
- ]
503
- }
504
- ```
505
-
506
- ### Data Fields
507
-
508
- The data fields are the same among all splits.
509
- - `doc_key`: the instance id of this sentence, a `string` feature.
510
- - `sentence`: the list of tokens of this sentence, obtained with spaCy, a `list` of `string` features.
511
- - `ner`: the list of named entities in this sentence, a `list` of `dict` features.
512
- - `id-start`: the start index of the entity, a `int` feature.
513
- - `id-end`: the end index of the entity, a `int` feature.
514
- - `entity-type`: the type of the entity, a `string` feature.
515
- - `relations`: the list of relations in this sentence, a `list` of `dict` features.
516
- - `id_1-start`: the start index of the first entity, a `int` feature.
517
- - `id_1-end`: the end index of the first entity, a `int` feature.
518
- - `id_2-start`: the start index of the second entity, a `int` feature.
519
- - `id_2-end`: the end index of the second entity, a `int` feature.
520
- - `relation-type`: the type of the relation, a `string` feature.
521
- - `Exp`: the explanation of the relation type assigned, a `string` feature.
522
- - `Un`: uncertainty of the annotator, a `bool` feature.
523
- - `SA`: existence of syntax ambiguity which poses a challenge for the annotator, a `bool` feature.
524
-
525
- ### Data Splits
526
- #### Sentences
527
- | | Train | Dev | Test | Total |
528
- |--------------|---------|---------|---------|---------|
529
- | news | 164 | 350 | 400 | 914 |
530
- | politics | 101 | 350 | 400 | 851 |
531
- | science | 103 | 351 | 400 | 854 |
532
- | music | 100 | 350 | 399 | 849 |
533
- | literature | 100 | 400 | 416 | 916 |
534
- | ai | 100 | 350 | 431 | 881 |
535
- | ------------ | ------- | ------- | ------- | ------- |
536
- | total | 668 | 2,151 | 2,46 | 5,265 |
537
-
538
- #### Relations
539
- | | Train | Dev | Test | Total |
540
- |--------------|---------|---------|---------|---------|
541
- | news | 175 | 300 | 396 | 871 |
542
- | politics | 502 | 1,616 | 1,831 | 3,949 |
543
- | science | 355 | 1,340 | 1,393 | 3,088 |
544
- | music | 496 | 1,861 | 2,333 | 4,690 |
545
- | literature | 397 | 1,539 | 1,591 | 3,527 |
546
- | ai | 350 | 1,006 | 1,127 | 2,483 |
547
- | ------------ | ------- | ------- | ------- | ------- |
548
- | total | 2,275 | 7,662 | 8,671 | 18,608 |
549
-
550
- ## Dataset Creation
551
-
552
- ### Curation Rationale
553
-
554
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
555
-
556
- ### Source Data
557
-
558
- #### Initial Data Collection and Normalization
559
-
560
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
561
-
562
- #### Who are the source language producers?
563
-
564
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
565
-
566
- ### Annotations
567
-
568
- #### Annotation process
569
-
570
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
571
-
572
- #### Who are the annotators?
573
-
574
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
575
-
576
- ### Personal and Sensitive Information
577
-
578
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
579
-
580
- ## Considerations for Using the Data
581
-
582
- ### Social Impact of Dataset
583
-
584
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
585
-
586
- ### Discussion of Biases
587
-
588
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
589
-
590
- ### Other Known Limitations
591
-
592
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
593
-
594
- ## Additional Information
595
-
596
- ### Dataset Curators
597
-
598
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
599
-
600
- ### Licensing Information
601
-
602
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
603
-
604
- ### Citation Information
605
-
606
- ```
607
- @inproceedings{bassignana-plank-2022-crossre,
608
- title = "Cross{RE}: A {C}ross-{D}omain {D}ataset for {R}elation {E}xtraction",
609
- author = "Bassignana, Elisa and Plank, Barbara",
610
- booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
611
- year = "2022",
612
- publisher = "Association for Computational Linguistics"
613
- }
614
- ```
615
-
616
- ### Contributions
617
-
618
- Thanks to [@phucdev](https://github.com/phucdev) for adding this dataset.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ai/cross_re-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd411749ecf14e06690c63475898868e1fcd85c6f033180ae0455b997953ce39
3
+ size 73439
ai/cross_re-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a2ed3364ab2aaf764c8f0f55f7c45eff8e6be3400596eff7e2639c9bafe4425
3
+ size 30176
ai/cross_re-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef55a77ac7eab477cd16cdd4e0d712bd233de568e8cccd252ed015f32ca761aa
3
+ size 63837
cross_re.py DELETED
@@ -1,184 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- """CrossRE is a cross-domain dataset for relation extraction"""
15
-
16
-
17
- import json
18
- import datasets
19
-
20
-
21
- _CITATION = """\
22
- @inproceedings{bassignana-plank-2022-crossre,
23
- title = "Cross{RE}: A {C}ross-{D}omain {D}ataset for {R}elation {E}xtraction",
24
- author = "Bassignana, Elisa and Plank, Barbara",
25
- booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
26
- year = "2022",
27
- publisher = "Association for Computational Linguistics"
28
- }
29
- """
30
-
31
- _DESCRIPTION = """\
32
- CrossRE is a new, freely-available crossdomain benchmark for RE, which comprises six distinct text domains and includes
33
- multilabel annotations. It includes the following domains: news, politics, natural science, music, literature and
34
- artificial intelligence. The semantic relations are annotated on top of CrossNER (Liu et al., 2021), a cross-domain
35
- dataset for NER which contains domain-specific entity types.
36
- The dataset contains 17 relation labels for the six domains: PART-OF, PHYSICAL, USAGE, ROLE, SOCIAL,
37
- GENERAL-AFFILIATION, COMPARE, TEMPORAL, ARTIFACT, ORIGIN, TOPIC, OPPOSITE, CAUSE-EFFECT, WIN-DEFEAT, TYPEOF, NAMED, and
38
- RELATED-TO.
39
-
40
- For details, see the paper: https://arxiv.org/abs/2210.09345
41
- """
42
-
43
- _HOMEPAGE = "https://github.com/mainlp/CrossRE"
44
-
45
- # TODO: Add the licence for the dataset here if you can find it
46
- _LICENSE = ""
47
-
48
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
49
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
50
- _URLS = {
51
- "news": {
52
- "train": "https://raw.githubusercontent.com/mainlp/CrossRE/main/crossre_data/news-train.json",
53
- "validation": "https://raw.githubusercontent.com/mainlp/CrossRE/main/crossre_data/news-dev.json",
54
- "test": "https://raw.githubusercontent.com/mainlp/CrossRE/main/crossre_data/news-test.json",
55
- },
56
- "politics": {
57
- "train": "https://raw.githubusercontent.com/mainlp/CrossRE/main/crossre_data/politics-train.json",
58
- "validation": "https://raw.githubusercontent.com/mainlp/CrossRE/main/crossre_data/politics-dev.json",
59
- "test": "https://raw.githubusercontent.com/mainlp/CrossRE/main/crossre_data/politics-test.json",
60
- },
61
- "science": {
62
- "train": "https://raw.githubusercontent.com/mainlp/CrossRE/main/crossre_data/science-train.json",
63
- "validation": "https://raw.githubusercontent.com/mainlp/CrossRE/main/crossre_data/science-dev.json",
64
- "test": "https://raw.githubusercontent.com/mainlp/CrossRE/main/crossre_data/science-test.json",
65
- },
66
- "music": {
67
- "train": "https://raw.githubusercontent.com/mainlp/CrossRE/main/crossre_data/music-train.json",
68
- "validation": "https://raw.githubusercontent.com/mainlp/CrossRE/main/crossre_data/music-dev.json",
69
- "test": "https://raw.githubusercontent.com/mainlp/CrossRE/main/crossre_data/music-test.json",
70
- },
71
- "literature": {
72
- "train": "https://raw.githubusercontent.com/mainlp/CrossRE/main/crossre_data/literature-train.json",
73
- "validation": "https://raw.githubusercontent.com/mainlp/CrossRE/main/crossre_data/literature-dev.json",
74
- "test": "https://raw.githubusercontent.com/mainlp/CrossRE/main/crossre_data/literature-test.json",
75
- },
76
- "ai": {
77
- "train": "https://raw.githubusercontent.com/mainlp/CrossRE/main/crossre_data/ai-train.json",
78
- "validation": "https://raw.githubusercontent.com/mainlp/CrossRE/main/crossre_data/ai-dev.json",
79
- "test": "https://raw.githubusercontent.com/mainlp/CrossRE/main/crossre_data/ai-test.json",
80
- },
81
- }
82
-
83
-
84
- class CrossRE(datasets.GeneratorBasedBuilder):
85
- """CrossRE is a cross-domain dataset for relation extraction"""
86
-
87
- VERSION = datasets.Version("1.1.0")
88
-
89
- BUILDER_CONFIGS = [
90
- datasets.BuilderConfig(name="news", version=VERSION,
91
- description="This part of CrossRE covers data from the news domain"),
92
- datasets.BuilderConfig(name="politics", version=VERSION,
93
- description="This part of CrossRE covers data from the politics domain"),
94
- datasets.BuilderConfig(name="science", version=VERSION,
95
- description="This part of CrossRE covers data from the science domain"),
96
- datasets.BuilderConfig(name="music", version=VERSION,
97
- description="This part of CrossRE covers data from the music domain"),
98
- datasets.BuilderConfig(name="literature", version=VERSION,
99
- description="This part of CrossRE covers data from the literature domain"),
100
- datasets.BuilderConfig(name="ai", version=VERSION,
101
- description="This part of CrossRE covers data from the AI domain"),
102
- ]
103
-
104
- def _info(self):
105
- features = datasets.Features(
106
- {
107
- "doc_key": datasets.Value("string"),
108
- "sentence": datasets.Sequence(datasets.Value("string")),
109
- "ner": [{
110
- "id-start": datasets.Value("int32"),
111
- "id-end": datasets.Value("int32"),
112
- "entity-type": datasets.Value("string"),
113
- }],
114
- "relations": [{
115
- "id_1-start": datasets.Value("int32"),
116
- "id_1-end": datasets.Value("int32"),
117
- "id_2-start": datasets.Value("int32"),
118
- "id_2-end": datasets.Value("int32"),
119
- "relation-type": datasets.Value("string"),
120
- "Exp": datasets.Value("string"), # Explanation of the relation type assigned
121
- "Un": datasets.Value("bool"), # Uncertainty of the annotator
122
- "SA": datasets.Value("bool"), # Syntax Ambiguity which poses a challenge for the annotator
123
- }]
124
- }
125
- )
126
- return datasets.DatasetInfo(
127
- # This is the description that will appear on the datasets page.
128
- description=_DESCRIPTION,
129
- # This defines the different columns of the dataset and their types
130
- features=features, # Here we define them above because they are different between the two configurations
131
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
132
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
133
- # supervised_keys=("sentence", "label"),
134
- # Homepage of the dataset for documentation
135
- homepage=_HOMEPAGE,
136
- # License for the dataset if available
137
- license=_LICENSE,
138
- # Citation for the dataset
139
- citation=_CITATION,
140
- )
141
-
142
- def _split_generators(self, dl_manager):
143
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
144
-
145
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
146
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
147
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
148
- urls = _URLS[self.config.name]
149
- downloaded_files = dl_manager.download_and_extract(urls)
150
- return [datasets.SplitGenerator(name=i, gen_kwargs={"filepath": downloaded_files[str(i)]})
151
- for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]]
152
-
153
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
154
- def _generate_examples(self, filepath):
155
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
156
- with open(filepath, encoding="utf-8") as f:
157
- for row in f:
158
- doc = json.loads(row)
159
- doc_key = doc["doc_key"]
160
- ner = []
161
- for entity in doc["ner"]:
162
- ner.append({
163
- "id-start": entity[0],
164
- "id-end": entity[1],
165
- "entity-type": entity[2],
166
- })
167
- relations = []
168
- for relation in doc["relations"]:
169
- relations.append({
170
- "id_1-start": relation[0],
171
- "id_1-end": relation[1],
172
- "id_2-start": relation[2],
173
- "id_2-end": relation[3],
174
- "relation-type": relation[4],
175
- "Exp": relation[5],
176
- "Un": relation[6],
177
- "SA": relation[7],
178
- })
179
- yield doc_key, {
180
- "doc_key": doc_key,
181
- "sentence": doc["sentence"],
182
- "ner": ner,
183
- "relations": relations
184
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
literature/cross_re-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dcb556971c1d6dfd1d36d3e07f37c88768cc382d05885e79349a04379726c1e4
3
+ size 92972
literature/cross_re-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50cfc30507db93eaeebbfe6e2db96f33699ce80910fcafb28d39f0d4b8b200f4
3
+ size 31707
literature/cross_re-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49f203bd4b4cac1f52e334fad438fa3185c2fe967cba6e0915928b0b9bdebd32
3
+ size 88003
music/cross_re-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c41ee6ee67cc5c922ca56be8a050733965c31f84ff4ad81711a8b48fdb940cd
3
+ size 97362
music/cross_re-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:121b99bfc1e330e4fd7b5bc04e5819fe879b6f06a7dd330a16ce6d25cd683c97
3
+ size 31189
music/cross_re-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4a8e3712bb091ccc8ea940c5db2a379e63bd2f5a8095c58187d13965f4f9549
3
+ size 82913
news/cross_re-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef19485dc80680c8111373dcdf618ecd656cf72aad6b2600f893ad33d0f88ebe
3
+ size 37823
news/cross_re-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8679cf6196ff3ff73877f4e75a5b6c5dfa152e1f2854d1ec4da061fdaaa35b6b
3
+ size 25595
news/cross_re-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f3e4fce26587222d90f89a474b3e10509cd2e64e2164d9a8ce41ee8a083189d
3
+ size 31783
politics/cross_re-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13e746d4149b818ec037edb07085fb23e7ee6b778996af0bc549882b1a12df97
3
+ size 88524
politics/cross_re-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e9afc704bae4008db5576762c8e960990f14c701114c44f0afb5025fc367ded
3
+ size 32683
politics/cross_re-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6afb38f720b740d9060e111c21ef093a04b1b2a8e27002b21869c4f8e02a7bdc
3
+ size 78847
science/cross_re-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dc8afeaa0d75821942580df5ca29546ff0c1b2e2b8a0e98579639fe31be1216
3
+ size 88666
science/cross_re-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f2c88356dce50a3f8eb04be75fce89209efa2558d5217c184fabd8cbf7aa6f1
3
+ size 31209
science/cross_re-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93a1d22ae67bd401e1e24e32f5e7421035a7f09594b3dc905529302fffe229a3
3
+ size 77777