rfernand commited on
Commit
94c28d3
1 Parent(s): 4909b02

Upload 3 files

Browse files
Files changed (1) hide show
  1. basic_sentence_transforms.py +47 -5
basic_sentence_transforms.py CHANGED
@@ -1,4 +1,4 @@
1
- # basic-sentence-transforms.py: the HF datasets "loading script" for the NC_PAT dataset (defines configurations/tasks, columns, etc.)
2
  import os
3
  import json
4
  import datasets
@@ -31,7 +31,7 @@ dir_only = {
31
  "direction": datasets.Value("string"),
32
  }
33
 
34
- configs = [
35
  {"name": "car_cdr_cons",
36
  "desc": "small phrase translation tasks that require only: CAR, CDR, or CAR+CDR+CONS operations",
37
  "features": samp_class},
@@ -79,6 +79,9 @@ configs = [
79
  {"name": "car_cdr_seq_path_tuc",
80
  "desc": "same task as car_cdr_seq_path, but requires mapping lowercase fillers to their uppercase tokens",
81
  "features": count_class},
 
 
 
82
 
83
  {"name": "active_active_stb",
84
  "desc": "active sentence translation, from sentence to parenthesized tree form, both directions",
@@ -87,6 +90,15 @@ configs = [
87
  {"name": "active_active_stb_40k",
88
  "desc": "same task as active_active_stb, but train samples increased from 10K to 40K",
89
  "features": dir_only},
 
 
 
 
 
 
 
 
 
90
 
91
  {"name": "active_logical_ttb",
92
  "desc": "active to logical tree translation, in both directions",
@@ -95,7 +107,8 @@ configs = [
95
  {"name": "active_logical_ttb_40k",
96
  "desc": "same task as active_logical_ttb, but train samples increased from 10K to 40K",
97
  "features": dir_only},
98
-
 
99
  {"name": "active_passive_ssb",
100
  "desc": "active to passive sentence translation, in both directions",
101
  "features": dir_only},
@@ -111,6 +124,15 @@ configs = [
111
  {"name": "active_passive_ttb_40k",
112
  "desc": "same task as active_passive_ttb, but train samples increased from 10K to 40K",
113
  "features": dir_only},
 
 
 
 
 
 
 
 
 
114
 
115
  {"name": "actpass_logical_tt",
116
  "desc": "mixture of active to logical and passive to logical tree translations, single direction",
@@ -120,6 +142,23 @@ configs = [
120
  "desc": "same task as actpass_logical_tt, but train samples increased from 10K to 40K",
121
  "features": no_extra},
122
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  {"name": "passive_logical_ttb",
124
  "desc": "passive to logical tree translation, in both directions",
125
  "features": dir_only},
@@ -137,13 +176,16 @@ configs = [
137
  "features": dir_only},
138
  ]
139
 
 
 
140
  class BasicSentenceTransformsConfig(datasets.BuilderConfig):
141
  """BuilderConfig for basic_sentence_transforms dataset."""
142
 
143
  def __init__(self, features=None, **kwargs):
144
  # Version history:
145
  # 0.0.18: Initial version released to HF datasets
146
- super().__init__(version=datasets.Version("0.0.18"), **kwargs)
 
147
 
148
  self.features = features
149
  self.label_classes = None
@@ -166,7 +208,7 @@ class BasicSentenceTransformsConfig(datasets.BuilderConfig):
166
 
167
  class BasicSentenceTransforms(datasets.GeneratorBasedBuilder):
168
  BUILDER_CONFIGS = [BasicSentenceTransformsConfig(name=c["name"], description=c["desc"], features=c["features"]) for c in configs]
169
- VERSION = datasets.Version("0.0.18")
170
 
171
  def _info(self):
172
  # features are now required here, so get them from the current CONFIG (following code example from super_glue.py)
 
1
+ # basic-sentence-transforms.py: the HF datasets "loading script" for the internal NC_PAT dataset (defines configurations/tasks, columns, etc.)
2
  import os
3
  import json
4
  import datasets
 
31
  "direction": datasets.Value("string"),
32
  }
33
 
34
+ warmup_configs = [
35
  {"name": "car_cdr_cons",
36
  "desc": "small phrase translation tasks that require only: CAR, CDR, or CAR+CDR+CONS operations",
37
  "features": samp_class},
 
79
  {"name": "car_cdr_seq_path_tuc",
80
  "desc": "same task as car_cdr_seq_path, but requires mapping lowercase fillers to their uppercase tokens",
81
  "features": count_class},
82
+ ]
83
+
84
+ core_configs = [
85
 
86
  {"name": "active_active_stb",
87
  "desc": "active sentence translation, from sentence to parenthesized tree form, both directions",
 
90
  {"name": "active_active_stb_40k",
91
  "desc": "same task as active_active_stb, but train samples increased from 10K to 40K",
92
  "features": dir_only},
93
+
94
+
95
+ {"name": "active_logical_ssb",
96
+ "desc": "active to logical sentence translation, in both directions",
97
+ "features": dir_only},
98
+
99
+ {"name": "active_logical_ssb_40k",
100
+ "desc": "same task as active_logical_ssb, but train samples increased from 10K to 40K",
101
+ "features": dir_only},
102
 
103
  {"name": "active_logical_ttb",
104
  "desc": "active to logical tree translation, in both directions",
 
107
  {"name": "active_logical_ttb_40k",
108
  "desc": "same task as active_logical_ttb, but train samples increased from 10K to 40K",
109
  "features": dir_only},
110
+
111
+
112
  {"name": "active_passive_ssb",
113
  "desc": "active to passive sentence translation, in both directions",
114
  "features": dir_only},
 
124
  {"name": "active_passive_ttb_40k",
125
  "desc": "same task as active_passive_ttb, but train samples increased from 10K to 40K",
126
  "features": dir_only},
127
+
128
+
129
+ {"name": "actpass_logical_ss",
130
+ "desc": "mixture of active to logical and passive to logical sentence translations, single direction",
131
+ "features": no_extra},
132
+
133
+ {"name": "actpass_logical_ss_40k",
134
+ "desc": "same task as actpass_logical_ss, but train samples increased from 10K to 40K",
135
+ "features": no_extra},
136
 
137
  {"name": "actpass_logical_tt",
138
  "desc": "mixture of active to logical and passive to logical tree translations, single direction",
 
142
  "desc": "same task as actpass_logical_tt, but train samples increased from 10K to 40K",
143
  "features": no_extra},
144
 
145
+ {"name": "logical_logical_stb",
146
+ "desc": "logical form sentence translation, from sentence to parenthesized tree form, both directions",
147
+ "features": dir_only},
148
+
149
+ {"name": "alogical_logical_stb_40k",
150
+ "desc": "same task as logical_logical_stb, but train samples increased from 10K to 40K",
151
+ "features": dir_only},
152
+
153
+
154
+ {"name": "passive_logical_ssb",
155
+ "desc": "passive to logical sentence translation, in both directions",
156
+ "features": dir_only},
157
+
158
+ {"name": "passive_logical_ssb_40k",
159
+ "desc": "same task as passive_logical_ssb, but train samples increased from 10K to 40K",
160
+ "features": dir_only},
161
+
162
  {"name": "passive_logical_ttb",
163
  "desc": "passive to logical tree translation, in both directions",
164
  "features": dir_only},
 
176
  "features": dir_only},
177
  ]
178
 
179
+ configs = warmup_configs + core_configs
180
+
181
  class BasicSentenceTransformsConfig(datasets.BuilderConfig):
182
  """BuilderConfig for basic_sentence_transforms dataset."""
183
 
184
  def __init__(self, features=None, **kwargs):
185
  # Version history:
186
  # 0.0.18: Initial version released to HF datasets
187
+ # 0.0.21: release V21 of NC_PAT dataset
188
+ super().__init__(version=datasets.Version("0.0.21"), **kwargs)
189
 
190
  self.features = features
191
  self.label_classes = None
 
208
 
209
  class BasicSentenceTransforms(datasets.GeneratorBasedBuilder):
210
  BUILDER_CONFIGS = [BasicSentenceTransformsConfig(name=c["name"], description=c["desc"], features=c["features"]) for c in configs]
211
+ VERSION = datasets.Version("0.0.21")
212
 
213
  def _info(self):
214
  # features are now required here, so get them from the current CONFIG (following code example from super_glue.py)