parquet-converter commited on
Commit
7d58abd
1 Parent(s): 25cdc80

Update parquet files

Browse files
README.md DELETED
@@ -1,539 +0,0 @@
1
- ---
2
- annotations_creators:
3
- - found
4
- language_creators:
5
- - found
6
- language:
7
- - code
8
- - en
9
- license:
10
- - c-uda
11
- multilinguality:
12
- - other-programming-languages
13
- size_categories:
14
- - 100K<n<1M
15
- - 10K<n<100K
16
- source_datasets:
17
- - original
18
- task_categories:
19
- - translation
20
- task_ids: []
21
- pretty_name: CodeXGlueCtCodeToText
22
- configs:
23
- - go
24
- - java
25
- - javascript
26
- - php
27
- - python
28
- - ruby
29
- tags:
30
- - code-to-text
31
- dataset_info:
32
- - config_name: go
33
- features:
34
- - name: id
35
- dtype: int32
36
- - name: repo
37
- dtype: string
38
- - name: path
39
- dtype: string
40
- - name: func_name
41
- dtype: string
42
- - name: original_string
43
- dtype: string
44
- - name: language
45
- dtype: string
46
- - name: code
47
- dtype: string
48
- - name: code_tokens
49
- sequence: string
50
- - name: docstring
51
- dtype: string
52
- - name: docstring_tokens
53
- sequence: string
54
- - name: sha
55
- dtype: string
56
- - name: url
57
- dtype: string
58
- splits:
59
- - name: train
60
- num_bytes: 342244027
61
- num_examples: 167288
62
- - name: validation
63
- num_bytes: 13721912
64
- num_examples: 7325
65
- - name: test
66
- num_bytes: 16328458
67
- num_examples: 8122
68
- download_size: 499922799
69
- dataset_size: 372294397
70
- - config_name: java
71
- features:
72
- - name: id
73
- dtype: int32
74
- - name: repo
75
- dtype: string
76
- - name: path
77
- dtype: string
78
- - name: func_name
79
- dtype: string
80
- - name: original_string
81
- dtype: string
82
- - name: language
83
- dtype: string
84
- - name: code
85
- dtype: string
86
- - name: code_tokens
87
- sequence: string
88
- - name: docstring
89
- dtype: string
90
- - name: docstring_tokens
91
- sequence: string
92
- - name: sha
93
- dtype: string
94
- - name: url
95
- dtype: string
96
- splits:
97
- - name: train
98
- num_bytes: 452554719
99
- num_examples: 164923
100
- - name: validation
101
- num_bytes: 13366396
102
- num_examples: 5183
103
- - name: test
104
- num_bytes: 29080857
105
- num_examples: 10955
106
- download_size: 1072966017
107
- dataset_size: 495001972
108
- - config_name: javascript
109
- features:
110
- - name: id
111
- dtype: int32
112
- - name: repo
113
- dtype: string
114
- - name: path
115
- dtype: string
116
- - name: func_name
117
- dtype: string
118
- - name: original_string
119
- dtype: string
120
- - name: language
121
- dtype: string
122
- - name: code
123
- dtype: string
124
- - name: code_tokens
125
- sequence: string
126
- - name: docstring
127
- dtype: string
128
- - name: docstring_tokens
129
- sequence: string
130
- - name: sha
131
- dtype: string
132
- - name: url
133
- dtype: string
134
- splits:
135
- - name: train
136
- num_bytes: 160860743
137
- num_examples: 58025
138
- - name: validation
139
- num_bytes: 10337396
140
- num_examples: 3885
141
- - name: test
142
- num_bytes: 10190765
143
- num_examples: 3291
144
- download_size: 1677110214
145
- dataset_size: 181388904
146
- - config_name: php
147
- features:
148
- - name: id
149
- dtype: int32
150
- - name: repo
151
- dtype: string
152
- - name: path
153
- dtype: string
154
- - name: func_name
155
- dtype: string
156
- - name: original_string
157
- dtype: string
158
- - name: language
159
- dtype: string
160
- - name: code
161
- dtype: string
162
- - name: code_tokens
163
- sequence: string
164
- - name: docstring
165
- dtype: string
166
- - name: docstring_tokens
167
- sequence: string
168
- - name: sha
169
- dtype: string
170
- - name: url
171
- dtype: string
172
- splits:
173
- - name: train
174
- num_bytes: 614655799
175
- num_examples: 241241
176
- - name: validation
177
- num_bytes: 33283149
178
- num_examples: 12982
179
- - name: test
180
- num_bytes: 35375097
181
- num_examples: 14014
182
- download_size: 864290912
183
- dataset_size: 683314045
184
- - config_name: python
185
- features:
186
- - name: id
187
- dtype: int32
188
- - name: repo
189
- dtype: string
190
- - name: path
191
- dtype: string
192
- - name: func_name
193
- dtype: string
194
- - name: original_string
195
- dtype: string
196
- - name: language
197
- dtype: string
198
- - name: code
199
- dtype: string
200
- - name: code_tokens
201
- sequence: string
202
- - name: docstring
203
- dtype: string
204
- - name: docstring_tokens
205
- sequence: string
206
- - name: sha
207
- dtype: string
208
- - name: url
209
- dtype: string
210
- splits:
211
- - name: train
212
- num_bytes: 813664500
213
- num_examples: 251820
214
- - name: validation
215
- num_bytes: 46888668
216
- num_examples: 13914
217
- - name: test
218
- num_bytes: 50659792
219
- num_examples: 14918
220
- download_size: 953306861
221
- dataset_size: 911212960
222
- - config_name: ruby
223
- features:
224
- - name: id
225
- dtype: int32
226
- - name: repo
227
- dtype: string
228
- - name: path
229
- dtype: string
230
- - name: func_name
231
- dtype: string
232
- - name: original_string
233
- dtype: string
234
- - name: language
235
- dtype: string
236
- - name: code
237
- dtype: string
238
- - name: code_tokens
239
- sequence: string
240
- - name: docstring
241
- dtype: string
242
- - name: docstring_tokens
243
- sequence: string
244
- - name: sha
245
- dtype: string
246
- - name: url
247
- dtype: string
248
- splits:
249
- - name: train
250
- num_bytes: 51956595
251
- num_examples: 24927
252
- - name: validation
253
- num_bytes: 2821089
254
- num_examples: 1400
255
- - name: test
256
- num_bytes: 2671603
257
- num_examples: 1261
258
- download_size: 124154892
259
- dataset_size: 57449287
260
- ---
261
- # Dataset Card for "code_x_glue_ct_code_to_text"
262
-
263
- ## Table of Contents
264
- - [Dataset Description](#dataset-description)
265
- - [Dataset Summary](#dataset-summary)
266
- - [Supported Tasks and Leaderboards](#supported-tasks)
267
- - [Languages](#languages)
268
- - [Dataset Structure](#dataset-structure)
269
- - [Data Instances](#data-instances)
270
- - [Data Fields](#data-fields)
271
- - [Data Splits](#data-splits-sample-size)
272
- - [Dataset Creation](#dataset-creation)
273
- - [Curation Rationale](#curation-rationale)
274
- - [Source Data](#source-data)
275
- - [Annotations](#annotations)
276
- - [Personal and Sensitive Information](#personal-and-sensitive-information)
277
- - [Considerations for Using the Data](#considerations-for-using-the-data)
278
- - [Social Impact of Dataset](#social-impact-of-dataset)
279
- - [Discussion of Biases](#discussion-of-biases)
280
- - [Other Known Limitations](#other-known-limitations)
281
- - [Additional Information](#additional-information)
282
- - [Dataset Curators](#dataset-curators)
283
- - [Licensing Information](#licensing-information)
284
- - [Citation Information](#citation-information)
285
- - [Contributions](#contributions)
286
-
287
- ## Dataset Description
288
-
289
- - **Homepage:** https://github.com/microsoft/CodeXGLUE/tree/main/Code-Text/code-to-text
290
-
291
- ### Dataset Summary
292
-
293
- CodeXGLUE code-to-text dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Text/code-to-text
294
-
295
- The dataset we use comes from CodeSearchNet and we filter the dataset as the following:
296
- - Remove examples that codes cannot be parsed into an abstract syntax tree.
297
- - Remove examples that #tokens of documents is < 3 or >256
298
- - Remove examples that documents contain special tokens (e.g. <img ...> or https:...)
299
- - Remove examples that documents are not English.
300
-
301
- ### Supported Tasks and Leaderboards
302
-
303
- - `machine-translation`: The dataset can be used to train a model for automatically generating **English** docstrings for code.
304
-
305
- ### Languages
306
-
307
- - Go **programming** language
308
- - Java **programming** language
309
- - Javascript **programming** language
310
- - PHP **programming** language
311
- - Python **programming** language
312
- - Ruby **programming** language
313
- - English **natural** language
314
-
315
- ## Dataset Structure
316
-
317
- ### Data Instances
318
-
319
- #### go
320
-
321
- An example of 'test' looks as follows.
322
- ```
323
- {
324
- "code": "func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) {\n\topts := &stmOptions{ctx: c.Ctx()}\n\tfor _, f := range so {\n\t\tf(opts)\n\t}\n\tif len(opts.prefetch) != 0 {\n\t\tf := apply\n\t\tapply = func(s STM) error {\n\t\t\ts.Get(opts.prefetch...)\n\t\t\treturn f(s)\n\t\t}\n\t}\n\treturn runSTM(mkSTM(c, opts), apply)\n}",
325
- "code_tokens": ["func", "NewSTM", "(", "c", "*", "v3", ".", "Client", ",", "apply", "func", "(", "STM", ")", "error", ",", "so", "...", "stmOption", ")", "(", "*", "v3", ".", "TxnResponse", ",", "error", ")", "{", "opts", ":=", "&", "stmOptions", "{", "ctx", ":", "c", ".", "Ctx", "(", ")", "}", "\n", "for", "_", ",", "f", ":=", "range", "so", "{", "f", "(", "opts", ")", "\n", "}", "\n", "if", "len", "(", "opts", ".", "prefetch", ")", "!=", "0", "{", "f", ":=", "apply", "\n", "apply", "=", "func", "(", "s", "STM", ")", "error", "{", "s", ".", "Get", "(", "opts", ".", "prefetch", "...", ")", "\n", "return", "f", "(", "s", ")", "\n", "}", "\n", "}", "\n", "return", "runSTM", "(", "mkSTM", "(", "c", ",", "opts", ")", ",", "apply", ")", "\n", "}"],
326
- "docstring": "// NewSTM initiates a new STM instance, using serializable snapshot isolation by default.",
327
- "docstring_tokens": ["NewSTM", "initiates", "a", "new", "STM", "instance", "using", "serializable", "snapshot", "isolation", "by", "default", "."],
328
- "func_name": "NewSTM",
329
- "id": 0,
330
- "language": "go",
331
- "original_string": "func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) {\n\topts := &stmOptions{ctx: c.Ctx()}\n\tfor _, f := range so {\n\t\tf(opts)\n\t}\n\tif len(opts.prefetch) != 0 {\n\t\tf := apply\n\t\tapply = func(s STM) error {\n\t\t\ts.Get(opts.prefetch...)\n\t\t\treturn f(s)\n\t\t}\n\t}\n\treturn runSTM(mkSTM(c, opts), apply)\n}",
332
- "path": "clientv3/concurrency/stm.go",
333
- "repo": "etcd-io/etcd",
334
- "sha": "616592d9ba993e3fe9798eef581316016df98906",
335
- "url": "https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/concurrency/stm.go#L89-L102"
336
- }
337
- ```
338
-
339
- #### java
340
-
341
- An example of 'test' looks as follows.
342
- ```
343
- {
344
- "code": "protected final void fastPathOrderedEmit(U value, boolean delayError, Disposable disposable) {\n final Observer<? super V> observer = downstream;\n final SimplePlainQueue<U> q = queue;\n\n if (wip.get() == 0 && wip.compareAndSet(0, 1)) {\n if (q.isEmpty()) {\n accept(observer, value);\n if (leave(-1) == 0) {\n return;\n }\n } else {\n q.offer(value);\n }\n } else {\n q.offer(value);\n if (!enter()) {\n return;\n }\n }\n QueueDrainHelper.drainLoop(q, observer, delayError, disposable, this);\n }",
345
- "code_tokens": ["protected", "final", "void", "fastPathOrderedEmit", "(", "U", "value", ",", "boolean", "delayError", ",", "Disposable", "disposable", ")", "{", "final", "Observer", "<", "?", "super", "V", ">", "observer", "=", "downstream", ";", "final", "SimplePlainQueue", "<", "U", ">", "q", "=", "queue", ";", "if", "(", "wip", ".", "get", "(", ")", "==", "0", "&&", "wip", ".", "compareAndSet", "(", "0", ",", "1", ")", ")", "{", "if", "(", "q", ".", "isEmpty", "(", ")", ")", "{", "accept", "(", "observer", ",", "value", ")", ";", "if", "(", "leave", "(", "-", "1", ")", "==", "0", ")", "{", "return", ";", "}", "}", "else", "{", "q", ".", "offer", "(", "value", ")", ";", "}", "}", "else", "{", "q", ".", "offer", "(", "value", ")", ";", "if", "(", "!", "enter", "(", ")", ")", "{", "return", ";", "}", "}", "QueueDrainHelper", ".", "drainLoop", "(", "q", ",", "observer", ",", "delayError", ",", "disposable", ",", "this", ")", ";", "}"],
346
- "docstring": "Makes sure the fast-path emits in order.\n@param value the value to emit or queue up\n@param delayError if true, errors are delayed until the source has terminated\n@param disposable the resource to dispose if the drain terminates",
347
- "docstring_tokens": ["Makes", "sure", "the", "fast", "-", "path", "emits", "in", "order", "."],
348
- "func_name": "QueueDrainObserver.fastPathOrderedEmit",
349
- "id": 0,
350
- "language": "java",
351
- "original_string": "protected final void fastPathOrderedEmit(U value, boolean delayError, Disposable disposable) {\n final Observer<? super V> observer = downstream;\n final SimplePlainQueue<U> q = queue;\n\n if (wip.get() == 0 && wip.compareAndSet(0, 1)) {\n if (q.isEmpty()) {\n accept(observer, value);\n if (leave(-1) == 0) {\n return;\n }\n } else {\n q.offer(value);\n }\n } else {\n q.offer(value);\n if (!enter()) {\n return;\n }\n }\n QueueDrainHelper.drainLoop(q, observer, delayError, disposable, this);\n }",
352
- "path": "src/main/java/io/reactivex/internal/observers/QueueDrainObserver.java",
353
- "repo": "ReactiveX/RxJava",
354
- "sha": "ac84182aa2bd866b53e01c8e3fe99683b882c60e",
355
- "url": "https://github.com/ReactiveX/RxJava/blob/ac84182aa2bd866b53e01c8e3fe99683b882c60e/src/main/java/io/reactivex/internal/observers/QueueDrainObserver.java#L88-L108"
356
- }
357
- ```
358
-
359
- #### javascript
360
-
361
- An example of 'test' looks as follows.
362
- ```
363
- {
364
- "code": "function createInstance(defaultConfig) {\n var context = new Axios(defaultConfig);\n var instance = bind(Axios.prototype.request, context);\n\n // Copy axios.prototype to instance\n utils.extend(instance, Axios.prototype, context);\n\n // Copy context to instance\n utils.extend(instance, context);\n\n return instance;\n}",
365
- "code_tokens": ["function", "createInstance", "(", "defaultConfig", ")", "{", "var", "context", "=", "new", "Axios", "(", "defaultConfig", ")", ";", "var", "instance", "=", "bind", "(", "Axios", ".", "prototype", ".", "request", ",", "context", ")", ";", "// Copy axios.prototype to instance", "utils", ".", "extend", "(", "instance", ",", "Axios", ".", "prototype", ",", "context", ")", ";", "// Copy context to instance", "utils", ".", "extend", "(", "instance", ",", "context", ")", ";", "return", "instance", ";", "}"],
366
- "docstring": "Create an instance of Axios\n\n@param {Object} defaultConfig The default config for the instance\n@return {Axios} A new instance of Axios",
367
- "docstring_tokens": ["Create", "an", "instance", "of", "Axios"],
368
- "func_name": "createInstance",
369
- "id": 0,
370
- "language": "javascript",
371
- "original_string": "function createInstance(defaultConfig) {\n var context = new Axios(defaultConfig);\n var instance = bind(Axios.prototype.request, context);\n\n // Copy axios.prototype to instance\n utils.extend(instance, Axios.prototype, context);\n\n // Copy context to instance\n utils.extend(instance, context);\n\n return instance;\n}",
372
- "path": "lib/axios.js",
373
- "repo": "axios/axios",
374
- "sha": "92d231387fe2092f8736bc1746d4caa766b675f5",
375
- "url": "https://github.com/axios/axios/blob/92d231387fe2092f8736bc1746d4caa766b675f5/lib/axios.js#L15-L26"
376
- }
377
- ```
378
-
379
- #### php
380
-
381
- An example of 'train' looks as follows.
382
- ```
383
- {
384
- "code": "public static function build($serviceAddress, $restConfigPath, array $config = [])\n {\n $config += [\n 'httpHandler' => null,\n ];\n list($baseUri, $port) = self::normalizeServiceAddress($serviceAddress);\n $requestBuilder = new RequestBuilder(\"$baseUri:$port\", $restConfigPath);\n $httpHandler = $config['httpHandler'] ?: self::buildHttpHandlerAsync();\n return new RestTransport($requestBuilder, $httpHandler);\n }",
385
- "code_tokens": ["public", "static", "function", "build", "(", "$", "serviceAddress", ",", "$", "restConfigPath", ",", "array", "$", "config", "=", "[", "]", ")", "{", "$", "config", "+=", "[", "'httpHandler'", "=>", "null", ",", "]", ";", "list", "(", "$", "baseUri", ",", "$", "port", ")", "=", "self", "::", "normalizeServiceAddress", "(", "$", "serviceAddress", ")", ";", "$", "requestBuilder", "=", "new", "RequestBuilder", "(", "\"$baseUri:$port\"", ",", "$", "restConfigPath", ")", ";", "$", "httpHandler", "=", "$", "config", "[", "'httpHandler'", "]", "?", ":", "self", "::", "buildHttpHandlerAsync", "(", ")", ";", "return", "new", "RestTransport", "(", "$", "requestBuilder", ",", "$", "httpHandler", ")", ";", "}"],
386
- "docstring": "Builds a RestTransport.\n\n@param string $serviceAddress\nThe address of the API remote host, for example \"example.googleapis.com\".\n@param string $restConfigPath\nPath to rest config file.\n@param array $config {\nConfig options used to construct the gRPC transport.\n\n@type callable $httpHandler A handler used to deliver PSR-7 requests.\n}\n@return RestTransport\n@throws ValidationException",
387
- "docstring_tokens": ["Builds", "a", "RestTransport", "."],
388
- "func_name": "RestTransport.build",
389
- "id": 0,
390
- "language": "php",
391
- "original_string": "public static function build($serviceAddress, $restConfigPath, array $config = [])\n {\n $config += [\n 'httpHandler' => null,\n ];\n list($baseUri, $port) = self::normalizeServiceAddress($serviceAddress);\n $requestBuilder = new RequestBuilder(\"$baseUri:$port\", $restConfigPath);\n $httpHandler = $config['httpHandler'] ?: self::buildHttpHandlerAsync();\n return new RestTransport($requestBuilder, $httpHandler);\n }",
392
- "path": "src/Transport/RestTransport.php",
393
- "repo": "googleapis/gax-php",
394
- "sha": "48387fb818c6882296710a2302a0aa973b99afb2",
395
- "url": "https://github.com/googleapis/gax-php/blob/48387fb818c6882296710a2302a0aa973b99afb2/src/Transport/RestTransport.php#L85-L94"
396
- }
397
- ```
398
-
399
- #### python
400
-
401
- An example of 'validation' looks as follows.
402
- ```
403
- {
404
- "code": "def save_act(self, path=None):\n \"\"\"Save model to a pickle located at `path`\"\"\"\n if path is None:\n path = os.path.join(logger.get_dir(), \"model.pkl\")\n\n with tempfile.TemporaryDirectory() as td:\n save_variables(os.path.join(td, \"model\"))\n arc_name = os.path.join(td, \"packed.zip\")\n with zipfile.ZipFile(arc_name, 'w') as zipf:\n for root, dirs, files in os.walk(td):\n for fname in files:\n file_path = os.path.join(root, fname)\n if file_path != arc_name:\n zipf.write(file_path, os.path.relpath(file_path, td))\n with open(arc_name, \"rb\") as f:\n model_data = f.read()\n with open(path, \"wb\") as f:\n cloudpickle.dump((model_data, self._act_params), f)",
405
- "code_tokens": ["def", "save_act", "(", "self", ",", "path", "=", "None", ")", ":", "if", "path", "is", "None", ":", "path", "=", "os", ".", "path", ".", "join", "(", "logger", ".", "get_dir", "(", ")", ",", "\"model.pkl\"", ")", "with", "tempfile", ".", "TemporaryDirectory", "(", ")", "as", "td", ":", "save_variables", "(", "os", ".", "path", ".", "join", "(", "td", ",", "\"model\"", ")", ")", "arc_name", "=", "os", ".", "path", ".", "join", "(", "td", ",", "\"packed.zip\"", ")", "with", "zipfile", ".", "ZipFile", "(", "arc_name", ",", "'w'", ")", "as", "zipf", ":", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "td", ")", ":", "for", "fname", "in", "files", ":", "file_path", "=", "os", ".", "path", ".", "join", "(", "root", ",", "fname", ")", "if", "file_path", "!=", "arc_name", ":", "zipf", ".", "write", "(", "file_path", ",", "os", ".", "path", ".", "relpath", "(", "file_path", ",", "td", ")", ")", "with", "open", "(", "arc_name", ",", "\"rb\"", ")", "as", "f", ":", "model_data", "=", "f", ".", "read", "(", ")", "with", "open", "(", "path", ",", "\"wb\"", ")", "as", "f", ":", "cloudpickle", ".", "dump", "(", "(", "model_data", ",", "self", ".", "_act_params", ")", ",", "f", ")"],
406
- "docstring": "Save model to a pickle located at `path`",
407
- "docstring_tokens": ["Save", "model", "to", "a", "pickle", "located", "at", "path"],
408
- "func_name": "ActWrapper.save_act",
409
- "id": 0,
410
- "language": "python",
411
- "original_string": "def save_act(self, path=None):\n \"\"\"Save model to a pickle located at `path`\"\"\"\n if path is None:\n path = os.path.join(logger.get_dir(), \"model.pkl\")\n\n with tempfile.TemporaryDirectory() as td:\n save_variables(os.path.join(td, \"model\"))\n arc_name = os.path.join(td, \"packed.zip\")\n with zipfile.ZipFile(arc_name, 'w') as zipf:\n for root, dirs, files in os.walk(td):\n for fname in files:\n file_path = os.path.join(root, fname)\n if file_path != arc_name:\n zipf.write(file_path, os.path.relpath(file_path, td))\n with open(arc_name, \"rb\") as f:\n model_data = f.read()\n with open(path, \"wb\") as f:\n cloudpickle.dump((model_data, self._act_params), f)",
412
- "path": "baselines/deepq/deepq.py",
413
- "repo": "openai/baselines",
414
- "sha": "3301089b48c42b87b396e246ea3f56fa4bfc9678",
415
- "url": "https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/deepq/deepq.py#L55-L72"
416
- }
417
- ```
418
-
419
- #### ruby
420
-
421
- An example of 'train' looks as follows.
422
- ```
423
- {
424
- "code": "def render_body(context, options)\n if options.key?(:partial)\n [render_partial(context, options)]\n else\n StreamingTemplateRenderer.new(@lookup_context).render(context, options)\n end\n end",
425
- "code_tokens": ["def", "render_body", "(", "context", ",", "options", ")", "if", "options", ".", "key?", "(", ":partial", ")", "[", "render_partial", "(", "context", ",", "options", ")", "]", "else", "StreamingTemplateRenderer", ".", "new", "(", "@lookup_context", ")", ".", "render", "(", "context", ",", "options", ")", "end", "end"],
426
- "docstring": "Render but returns a valid Rack body. If fibers are defined, we return\n a streaming body that renders the template piece by piece.\n\n Note that partials are not supported to be rendered with streaming,\n so in such cases, we just wrap them in an array.",
427
- "docstring_tokens": ["Render", "but", "returns", "a", "valid", "Rack", "body", ".", "If", "fibers", "are", "defined", "we", "return", "a", "streaming", "body", "that", "renders", "the", "template", "piece", "by", "piece", "."],
428
- "func_name": "ActionView.Renderer.render_body",
429
- "id": 0,
430
- "language": "ruby",
431
- "original_string": "def render_body(context, options)\n if options.key?(:partial)\n [render_partial(context, options)]\n else\n StreamingTemplateRenderer.new(@lookup_context).render(context, options)\n end\n end",
432
- "path": "actionview/lib/action_view/renderer/renderer.rb",
433
- "repo": "rails/rails",
434
- "sha": "85a8bc644be69908f05740a5886ec19cd3679df5",
435
- "url": "https://github.com/rails/rails/blob/85a8bc644be69908f05740a5886ec19cd3679df5/actionview/lib/action_view/renderer/renderer.rb#L38-L44"
436
- }
437
- ```
438
-
439
- ### Data Fields
440
-
441
- In the following each data field in go is explained for each config. The data fields are the same among all splits.
442
-
443
- #### go, java, javascript, php, python, ruby
444
-
445
- | field name | type | description |
446
- |----------------|----------------|-----------------------------------------------------------------------------------|
447
- |id |int32 | Index of the sample |
448
- |repo |string | repo: the owner/repo |
449
- |path |string | path: the full path to the original file |
450
- |func_name |string | func_name: the function or method name |
451
- |original_string |string | original_string: the raw string before tokenization or parsing |
452
- |language |string | language: the programming language name |
453
- |code |string | code/function: the part of the original_string that is code |
454
- |code_tokens |Sequence[string]| code_tokens/function_tokens: tokenized version of code |
455
- |docstring |string | docstring: the top-level comment or docstring, if it exists in the original string|
456
- |docstring_tokens|Sequence[string]| docstring_tokens: tokenized version of docstring |
457
- |sha |string | sha of the file |
458
- |url |string | url of the file |
459
-
460
- ### Data Splits
461
-
462
- | name |train |validation|test |
463
- |----------|-----:|---------:|----:|
464
- |go |167288| 7325| 8122|
465
- |java |164923| 5183|10955|
466
- |javascript| 58025| 3885| 3291|
467
- |php |241241| 12982|14014|
468
- |python |251820| 13914|14918|
469
- |ruby | 24927| 1400| 1261|
470
-
471
- ## Dataset Creation
472
-
473
- ### Curation Rationale
474
-
475
- [More Information Needed]
476
-
477
- ### Source Data
478
-
479
- #### Initial Data Collection and Normalization
480
-
481
- Data from CodeSearchNet Challenge dataset.
482
- [More Information Needed]
483
-
484
- #### Who are the source language producers?
485
-
486
- Software Engineering developers.
487
-
488
- ### Annotations
489
-
490
- #### Annotation process
491
-
492
- [More Information Needed]
493
-
494
- #### Who are the annotators?
495
-
496
- [More Information Needed]
497
-
498
- ### Personal and Sensitive Information
499
-
500
- [More Information Needed]
501
-
502
- ## Considerations for Using the Data
503
-
504
- ### Social Impact of Dataset
505
-
506
- [More Information Needed]
507
-
508
- ### Discussion of Biases
509
-
510
- [More Information Needed]
511
-
512
- ### Other Known Limitations
513
-
514
- [More Information Needed]
515
-
516
- ## Additional Information
517
-
518
- ### Dataset Curators
519
-
520
- https://github.com/microsoft, https://github.com/madlag
521
-
522
- ### Licensing Information
523
-
524
- Computational Use of Data Agreement (C-UDA) License.
525
-
526
- ### Citation Information
527
-
528
- ```
529
- @article{husain2019codesearchnet,
530
- title={Codesearchnet challenge: Evaluating the state of semantic code search},
531
- author={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc},
532
- journal={arXiv preprint arXiv:1909.09436},
533
- year={2019}
534
- }
535
- ```
536
-
537
- ### Contributions
538
-
539
- Thanks to @madlag (and partly also @ncoop57) for adding this dataset.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
code_x_glue_ct_code_to_text.py DELETED
@@ -1,155 +0,0 @@
1
- import json
2
- import os
3
- import os.path
4
- from typing import List
5
-
6
- import datasets
7
-
8
- from .common import TrainValidTestChild
9
- from .generated_definitions import DEFINITIONS
10
-
11
-
12
- _DESCRIPTION = """The dataset we use comes from CodeSearchNet and we filter the dataset as the following:
13
- - Remove examples that codes cannot be parsed into an abstract syntax tree.
14
- - Remove examples that #tokens of documents is < 3 or >256
15
- - Remove examples that documents contain special tokens (e.g. <img ...> or https:...)
16
- - Remove examples that documents are not English.
17
- """
18
- _CITATION = """@article{husain2019codesearchnet,
19
- title={Codesearchnet challenge: Evaluating the state of semantic code search},
20
- author={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc},
21
- journal={arXiv preprint arXiv:1909.09436},
22
- year={2019}
23
- }"""
24
-
25
-
26
- class CodeXGlueCtCodeToTextBaseImpl(TrainValidTestChild):
27
- _DESCRIPTION = _DESCRIPTION
28
- _CITATION = _CITATION
29
-
30
- # For each file, each line in the uncompressed file represents one function.
31
- _FEATURES = {
32
- "id": datasets.Value("int32"), # Index of the sample
33
- "repo": datasets.Value("string"), # repo: the owner/repo
34
- "path": datasets.Value("string"), # path: the full path to the original file
35
- "func_name": datasets.Value("string"), # func_name: the function or method name
36
- "original_string": datasets.Value("string"), # original_string: the raw string before tokenization or parsing
37
- "language": datasets.Value("string"), # language: the programming language name
38
- "code": datasets.Value("string"), # code/function: the part of the original_string that is code
39
- "code_tokens": datasets.features.Sequence(
40
- datasets.Value("string")
41
- ), # code_tokens/function_tokens: tokenized version of code
42
- "docstring": datasets.Value(
43
- "string"
44
- ), # docstring: the top-level comment or docstring, if it exists in the original string
45
- "docstring_tokens": datasets.features.Sequence(
46
- datasets.Value("string")
47
- ), # docstring_tokens: tokenized version of docstring
48
- "sha": datasets.Value("string"), # sha of the file
49
- "url": datasets.Value("string"), # url of the file
50
- }
51
-
52
- _SUPERVISED_KEYS = ["docstring", "docstring_tokens"]
53
-
54
- def generate_urls(self, split_name, language):
55
- yield "language", f"https://s3.amazonaws.com/code-search-net/CodeSearchNet/v2/{language}.zip"
56
- yield "dataset", "dataset.zip"
57
-
58
- def get_data_files(self, split_name, file_paths, language):
59
- language_specific_path = file_paths["language"]
60
- final_path = os.path.join(language_specific_path, language, "final")
61
- # Make some cleanup to save space
62
- for path in os.listdir(final_path):
63
- if path.endswith(".pkl"):
64
- os.unlink(path)
65
-
66
- data_files = []
67
- for root, dirs, files in os.walk(final_path):
68
- for file in files:
69
- temp = os.path.join(root, file)
70
- if ".jsonl" in temp:
71
- if split_name in temp:
72
- data_files.append(temp)
73
- return data_files
74
-
75
- def post_process(self, split_name, language, js):
76
- return js
77
-
78
- def _generate_examples(self, split_name, file_paths, language):
79
- import gzip
80
-
81
- data_set_path = file_paths["dataset"]
82
-
83
- data_files = self.get_data_files(split_name, file_paths, language)
84
-
85
- urls = {}
86
- f1_path_parts = [data_set_path, "dataset", language, f"{split_name}.txt"]
87
- if self.SINGLE_LANGUAGE:
88
- del f1_path_parts[2]
89
-
90
- f1_path = os.path.join(*f1_path_parts)
91
- with open(f1_path, encoding="utf-8") as f1:
92
- for line in f1:
93
- line = line.strip()
94
- urls[line] = True
95
-
96
- idx = 0
97
- for file in data_files:
98
- if ".gz" in file:
99
- f = gzip.open(file)
100
- else:
101
- f = open(file, encoding="utf-8")
102
-
103
- for line in f:
104
- line = line.strip()
105
- js = json.loads(line)
106
- if js["url"] in urls:
107
- js["id"] = idx
108
- js = self.post_process(split_name, language, js)
109
- if "partition" in js:
110
- del js["partition"]
111
- yield idx, js
112
- idx += 1
113
- f.close()
114
-
115
-
116
- class CodeXGlueCtCodeToTextImpl(CodeXGlueCtCodeToTextBaseImpl):
117
- SINGLE_LANGUAGE = False
118
-
119
- def generate_urls(self, split_name):
120
- language = self.info["parameters"]["language"]
121
- for e in super().generate_urls(split_name, language):
122
- yield e
123
-
124
- def _generate_examples(self, split_name, file_paths):
125
- language = self.info["parameters"]["language"]
126
- for e in super()._generate_examples(split_name, file_paths, language):
127
- yield e
128
-
129
-
130
- CLASS_MAPPING = {
131
- "CodeXGlueCtCodeToText": CodeXGlueCtCodeToTextImpl,
132
- }
133
-
134
-
135
- class CodeXGlueCtCodeToText(datasets.GeneratorBasedBuilder):
136
- BUILDER_CONFIG_CLASS = datasets.BuilderConfig
137
- BUILDER_CONFIGS = [
138
- datasets.BuilderConfig(name=name, description=info["description"]) for name, info in DEFINITIONS.items()
139
- ]
140
-
141
- def _info(self):
142
- name = self.config.name
143
- info = DEFINITIONS[name]
144
- if info["class_name"] in CLASS_MAPPING:
145
- self.child = CLASS_MAPPING[info["class_name"]](info)
146
- else:
147
- raise RuntimeError(f"Unknown python class for dataset configuration {name}")
148
- ret = self.child._info()
149
- return ret
150
-
151
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
152
- return self.child._split_generators(dl_manager=dl_manager)
153
-
154
- def _generate_examples(self, split_name, file_paths):
155
- return self.child._generate_examples(split_name, file_paths)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
common.py DELETED
@@ -1,75 +0,0 @@
1
- from typing import List
2
-
3
- import datasets
4
-
5
-
6
- # Citation, taken from https://github.com/microsoft/CodeXGLUE
7
- _DEFAULT_CITATION = """@article{CodeXGLUE,
8
- title={CodeXGLUE: A Benchmark Dataset and Open Challenge for Code Intelligence},
9
- year={2020},}"""
10
-
11
-
12
- class Child:
13
- _DESCRIPTION = None
14
- _FEATURES = None
15
- _CITATION = None
16
- SPLITS = {"train": datasets.Split.TRAIN}
17
- _SUPERVISED_KEYS = None
18
-
19
- def __init__(self, info):
20
- self.info = info
21
-
22
- def homepage(self):
23
- return self.info["project_url"]
24
-
25
- def _info(self):
26
- # This is the description that will appear on the datasets page.
27
- return datasets.DatasetInfo(
28
- description=self.info["description"] + "\n\n" + self._DESCRIPTION,
29
- features=datasets.Features(self._FEATURES),
30
- homepage=self.homepage(),
31
- citation=self._CITATION or _DEFAULT_CITATION,
32
- supervised_keys=self._SUPERVISED_KEYS,
33
- )
34
-
35
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
36
- SPLITS = self.SPLITS
37
- _URL = self.info["raw_url"]
38
- urls_to_download = {}
39
- for split in SPLITS:
40
- if split not in urls_to_download:
41
- urls_to_download[split] = {}
42
-
43
- for key, url in self.generate_urls(split):
44
- if not url.startswith("http"):
45
- url = _URL + "/" + url
46
- urls_to_download[split][key] = url
47
-
48
- downloaded_files = {}
49
- for k, v in urls_to_download.items():
50
- downloaded_files[k] = dl_manager.download_and_extract(v)
51
-
52
- return [
53
- datasets.SplitGenerator(
54
- name=SPLITS[k],
55
- gen_kwargs={"split_name": k, "file_paths": downloaded_files[k]},
56
- )
57
- for k in SPLITS
58
- ]
59
-
60
- def check_empty(self, entries):
61
- all_empty = all([v == "" for v in entries.values()])
62
- all_non_empty = all([v != "" for v in entries.values()])
63
-
64
- if not all_non_empty and not all_empty:
65
- raise RuntimeError("Parallel data files should have the same number of lines.")
66
-
67
- return all_empty
68
-
69
-
70
- class TrainValidTestChild(Child):
71
- SPLITS = {
72
- "train": datasets.Split.TRAIN,
73
- "valid": datasets.Split.VALIDATION,
74
- "test": datasets.Split.TEST,
75
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"go": {"description": "CodeXGLUE code-to-text dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Text/code-to-text\n\nThe dataset we use comes from CodeSearchNet and we filter the dataset as the following:\n- Remove examples that codes cannot be parsed into an abstract syntax tree.\n- Remove examples that #tokens of documents is < 3 or >256\n- Remove examples that documents contain special tokens (e.g. <img ...> or https:...)\n- Remove examples that documents are not English.\n", "citation": "@article{husain2019codesearchnet,\ntitle={Codesearchnet challenge: Evaluating the state of semantic code search},\nauthor={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc},\njournal={arXiv preprint arXiv:1909.09436},\nyear={2019}\n}", "homepage": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Text/code-to-text", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "repo": {"dtype": "string", "id": null, "_type": "Value"}, "path": {"dtype": "string", "id": null, "_type": "Value"}, "func_name": {"dtype": "string", "id": null, "_type": "Value"}, "original_string": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "code": {"dtype": "string", "id": null, "_type": "Value"}, "code_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "docstring": {"dtype": "string", "id": null, "_type": "Value"}, "docstring_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "sha": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": {"input": "docstring", "output": "docstring_tokens"}, "task_templates": null, "builder_name": "code_x_glue_ct_code_to_text", "config_name": "go", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 342244027, "num_examples": 167288, "dataset_name": "code_x_glue_ct_code_to_text"}, "validation": {"name": "validation", "num_bytes": 13721912, "num_examples": 7325, "dataset_name": "code_x_glue_ct_code_to_text"}, "test": {"name": "test", "num_bytes": 16328458, "num_examples": 8122, "dataset_name": "code_x_glue_ct_code_to_text"}}, "download_checksums": {"https://s3.amazonaws.com/code-search-net/CodeSearchNet/v2/go.zip": {"num_bytes": 487525935, "checksum": "15d23f01dc2796447e1736263e6830079289d5ef41f09988011afdcf8da6b6e5"}, "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Text/code-to-text/dataset.zip": {"num_bytes": 12396864, "checksum": "31ec750805302ecd71b278a492d23d2ac916269f7ec645bba4f23b6f7c4bf217"}}, "download_size": 499922799, "post_processing_size": null, "dataset_size": 372294397, "size_in_bytes": 872217196}, "java": {"description": "CodeXGLUE code-to-text dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Text/code-to-text\n\nThe dataset we use comes from CodeSearchNet and we filter the dataset as the following:\n- Remove examples that codes cannot be parsed into an abstract syntax tree.\n- Remove examples that #tokens of documents is < 3 or >256\n- Remove examples that documents contain special tokens (e.g. <img ...> or https:...)\n- Remove examples that documents are not English.\n", "citation": "@article{husain2019codesearchnet,\ntitle={Codesearchnet challenge: Evaluating the state of semantic code search},\nauthor={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc},\njournal={arXiv preprint arXiv:1909.09436},\nyear={2019}\n}", "homepage": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Text/code-to-text", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "repo": {"dtype": "string", "id": null, "_type": "Value"}, "path": {"dtype": "string", "id": null, "_type": "Value"}, "func_name": {"dtype": "string", "id": null, "_type": "Value"}, "original_string": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "code": {"dtype": "string", "id": null, "_type": "Value"}, "code_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "docstring": {"dtype": "string", "id": null, "_type": "Value"}, "docstring_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "sha": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": {"input": "docstring", "output": "docstring_tokens"}, "task_templates": null, "builder_name": "code_x_glue_ct_code_to_text", "config_name": "java", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 452554719, "num_examples": 164923, "dataset_name": "code_x_glue_ct_code_to_text"}, "validation": {"name": "validation", "num_bytes": 13366396, "num_examples": 5183, "dataset_name": "code_x_glue_ct_code_to_text"}, "test": {"name": "test", "num_bytes": 29080857, "num_examples": 10955, "dataset_name": "code_x_glue_ct_code_to_text"}}, "download_checksums": {"https://s3.amazonaws.com/code-search-net/CodeSearchNet/v2/java.zip": {"num_bytes": 1060569153, "checksum": "05f9204b1808413fab30f0e69229e298f6de4ad468279d53a2aa5797e3a78c17"}, "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Text/code-to-text/dataset.zip": {"num_bytes": 12396864, "checksum": "31ec750805302ecd71b278a492d23d2ac916269f7ec645bba4f23b6f7c4bf217"}}, "download_size": 1072966017, "post_processing_size": null, "dataset_size": 495001972, "size_in_bytes": 1567967989}, "javascript": {"description": "CodeXGLUE code-to-text dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Text/code-to-text\n\nThe dataset we use comes from CodeSearchNet and we filter the dataset as the following:\n- Remove examples that codes cannot be parsed into an abstract syntax tree.\n- Remove examples that #tokens of documents is < 3 or >256\n- Remove examples that documents contain special tokens (e.g. <img ...> or https:...)\n- Remove examples that documents are not English.\n", "citation": "@article{husain2019codesearchnet,\ntitle={Codesearchnet challenge: Evaluating the state of semantic code search},\nauthor={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc},\njournal={arXiv preprint arXiv:1909.09436},\nyear={2019}\n}", "homepage": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Text/code-to-text", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "repo": {"dtype": "string", "id": null, "_type": "Value"}, "path": {"dtype": "string", "id": null, "_type": "Value"}, "func_name": {"dtype": "string", "id": null, "_type": "Value"}, "original_string": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "code": {"dtype": "string", "id": null, "_type": "Value"}, "code_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "docstring": {"dtype": "string", "id": null, "_type": "Value"}, "docstring_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "sha": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": {"input": "docstring", "output": "docstring_tokens"}, "task_templates": null, "builder_name": "code_x_glue_ct_code_to_text", "config_name": "javascript", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 160860743, "num_examples": 58025, "dataset_name": "code_x_glue_ct_code_to_text"}, "validation": {"name": "validation", "num_bytes": 10337396, "num_examples": 3885, "dataset_name": "code_x_glue_ct_code_to_text"}, "test": {"name": "test", "num_bytes": 10190765, "num_examples": 3291, "dataset_name": "code_x_glue_ct_code_to_text"}}, "download_checksums": {"https://s3.amazonaws.com/code-search-net/CodeSearchNet/v2/javascript.zip": {"num_bytes": 1664713350, "checksum": "fdc743f5af27f90c77584a2d29e2b7f8cecdd00c37b433c385b888ee062936dd"}, "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Text/code-to-text/dataset.zip": {"num_bytes": 12396864, "checksum": "31ec750805302ecd71b278a492d23d2ac916269f7ec645bba4f23b6f7c4bf217"}}, "download_size": 1677110214, "post_processing_size": null, "dataset_size": 181388904, "size_in_bytes": 1858499118}, "php": {"description": "CodeXGLUE code-to-text dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Text/code-to-text\n\nThe dataset we use comes from CodeSearchNet and we filter the dataset as the following:\n- Remove examples that codes cannot be parsed into an abstract syntax tree.\n- Remove examples that #tokens of documents is < 3 or >256\n- Remove examples that documents contain special tokens (e.g. <img ...> or https:...)\n- Remove examples that documents are not English.\n", "citation": "@article{husain2019codesearchnet,\ntitle={Codesearchnet challenge: Evaluating the state of semantic code search},\nauthor={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc},\njournal={arXiv preprint arXiv:1909.09436},\nyear={2019}\n}", "homepage": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Text/code-to-text", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "repo": {"dtype": "string", "id": null, "_type": "Value"}, "path": {"dtype": "string", "id": null, "_type": "Value"}, "func_name": {"dtype": "string", "id": null, "_type": "Value"}, "original_string": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "code": {"dtype": "string", "id": null, "_type": "Value"}, "code_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "docstring": {"dtype": "string", "id": null, "_type": "Value"}, "docstring_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "sha": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": {"input": "docstring", "output": "docstring_tokens"}, "task_templates": null, "builder_name": "code_x_glue_ct_code_to_text", "config_name": "php", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 614655799, "num_examples": 241241, "dataset_name": "code_x_glue_ct_code_to_text"}, "validation": {"name": "validation", "num_bytes": 33283149, "num_examples": 12982, "dataset_name": "code_x_glue_ct_code_to_text"}, "test": {"name": "test", "num_bytes": 35375097, "num_examples": 14014, "dataset_name": "code_x_glue_ct_code_to_text"}}, "download_checksums": {"https://s3.amazonaws.com/code-search-net/CodeSearchNet/v2/php.zip": {"num_bytes": 851894048, "checksum": "c3bbf0d1b10010f88b058faea876f1f5471758399e30d58c11f78ff53660ce00"}, "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Text/code-to-text/dataset.zip": {"num_bytes": 12396864, "checksum": "31ec750805302ecd71b278a492d23d2ac916269f7ec645bba4f23b6f7c4bf217"}}, "download_size": 864290912, "post_processing_size": null, "dataset_size": 683314045, "size_in_bytes": 1547604957}, "python": {"description": "CodeXGLUE code-to-text dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Text/code-to-text\n\nThe dataset we use comes from CodeSearchNet and we filter the dataset as the following:\n- Remove examples that codes cannot be parsed into an abstract syntax tree.\n- Remove examples that #tokens of documents is < 3 or >256\n- Remove examples that documents contain special tokens (e.g. <img ...> or https:...)\n- Remove examples that documents are not English.\n", "citation": "@article{husain2019codesearchnet,\ntitle={Codesearchnet challenge: Evaluating the state of semantic code search},\nauthor={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc},\njournal={arXiv preprint arXiv:1909.09436},\nyear={2019}\n}", "homepage": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Text/code-to-text", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "repo": {"dtype": "string", "id": null, "_type": "Value"}, "path": {"dtype": "string", "id": null, "_type": "Value"}, "func_name": {"dtype": "string", "id": null, "_type": "Value"}, "original_string": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "code": {"dtype": "string", "id": null, "_type": "Value"}, "code_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "docstring": {"dtype": "string", "id": null, "_type": "Value"}, "docstring_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "sha": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": {"input": "docstring", "output": "docstring_tokens"}, "task_templates": null, "builder_name": "code_x_glue_ct_code_to_text", "config_name": "python", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 813664500, "num_examples": 251820, "dataset_name": "code_x_glue_ct_code_to_text"}, "validation": {"name": "validation", "num_bytes": 46888668, "num_examples": 13914, "dataset_name": "code_x_glue_ct_code_to_text"}, "test": {"name": "test", "num_bytes": 50659792, "num_examples": 14918, "dataset_name": "code_x_glue_ct_code_to_text"}}, "download_checksums": {"https://s3.amazonaws.com/code-search-net/CodeSearchNet/v2/python.zip": {"num_bytes": 940909997, "checksum": "7223c6460bebfa85697b586da91e47bc5d64790a4d60bba5917106458ab6b40e"}, "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Text/code-to-text/dataset.zip": {"num_bytes": 12396864, "checksum": "31ec750805302ecd71b278a492d23d2ac916269f7ec645bba4f23b6f7c4bf217"}}, "download_size": 953306861, "post_processing_size": null, "dataset_size": 911212960, "size_in_bytes": 1864519821}, "ruby": {"description": "CodeXGLUE code-to-text dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Text/code-to-text\n\nThe dataset we use comes from CodeSearchNet and we filter the dataset as the following:\n- Remove examples that codes cannot be parsed into an abstract syntax tree.\n- Remove examples that #tokens of documents is < 3 or >256\n- Remove examples that documents contain special tokens (e.g. <img ...> or https:...)\n- Remove examples that documents are not English.\n", "citation": "@article{husain2019codesearchnet,\ntitle={Codesearchnet challenge: Evaluating the state of semantic code search},\nauthor={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc},\njournal={arXiv preprint arXiv:1909.09436},\nyear={2019}\n}", "homepage": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Text/code-to-text", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "repo": {"dtype": "string", "id": null, "_type": "Value"}, "path": {"dtype": "string", "id": null, "_type": "Value"}, "func_name": {"dtype": "string", "id": null, "_type": "Value"}, "original_string": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "code": {"dtype": "string", "id": null, "_type": "Value"}, "code_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "docstring": {"dtype": "string", "id": null, "_type": "Value"}, "docstring_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "sha": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": {"input": "docstring", "output": "docstring_tokens"}, "task_templates": null, "builder_name": "code_x_glue_ct_code_to_text", "config_name": "ruby", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 51956595, "num_examples": 24927, "dataset_name": "code_x_glue_ct_code_to_text"}, "validation": {"name": "validation", "num_bytes": 2821089, "num_examples": 1400, "dataset_name": "code_x_glue_ct_code_to_text"}, "test": {"name": "test", "num_bytes": 2671603, "num_examples": 1261, "dataset_name": "code_x_glue_ct_code_to_text"}}, "download_checksums": {"https://s3.amazonaws.com/code-search-net/CodeSearchNet/v2/ruby.zip": {"num_bytes": 111758028, "checksum": "67aee5812d0f994df745c771c7791483f2b060561495747d424e307af4b342e6"}, "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Text/code-to-text/dataset.zip": {"num_bytes": 12396864, "checksum": "31ec750805302ecd71b278a492d23d2ac916269f7ec645bba4f23b6f7c4bf217"}}, "download_size": 124154892, "post_processing_size": null, "dataset_size": 57449287, "size_in_bytes": 181604179}}
 
 
generated_definitions.py DELETED
@@ -1,68 +0,0 @@
1
- DEFINITIONS = {
2
- "go": {
3
- "class_name": "CodeXGlueCtCodeToText",
4
- "dataset_type": "Code-Text",
5
- "description": "CodeXGLUE code-to-text dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Text/code-to-text",
6
- "dir_name": "code-to-text",
7
- "name": "go",
8
- "parameters": {"language": "go"},
9
- "project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Text/code-to-text",
10
- "raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Text/code-to-text",
11
- "sizes": {"test": 8122, "train": 167288, "validation": 7325},
12
- },
13
- "java": {
14
- "class_name": "CodeXGlueCtCodeToText",
15
- "dataset_type": "Code-Text",
16
- "description": "CodeXGLUE code-to-text dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Text/code-to-text",
17
- "dir_name": "code-to-text",
18
- "name": "java",
19
- "parameters": {"language": "java"},
20
- "project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Text/code-to-text",
21
- "raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Text/code-to-text",
22
- "sizes": {"test": 10955, "train": 164923, "validation": 5183},
23
- },
24
- "javascript": {
25
- "class_name": "CodeXGlueCtCodeToText",
26
- "dataset_type": "Code-Text",
27
- "description": "CodeXGLUE code-to-text dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Text/code-to-text",
28
- "dir_name": "code-to-text",
29
- "name": "javascript",
30
- "parameters": {"language": "javascript"},
31
- "project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Text/code-to-text",
32
- "raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Text/code-to-text",
33
- "sizes": {"test": 3291, "train": 58025, "validation": 3885},
34
- },
35
- "php": {
36
- "class_name": "CodeXGlueCtCodeToText",
37
- "dataset_type": "Code-Text",
38
- "description": "CodeXGLUE code-to-text dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Text/code-to-text",
39
- "dir_name": "code-to-text",
40
- "name": "php",
41
- "parameters": {"language": "php"},
42
- "project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Text/code-to-text",
43
- "raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Text/code-to-text",
44
- "sizes": {"test": 14014, "train": 241241, "validation": 12982},
45
- },
46
- "python": {
47
- "class_name": "CodeXGlueCtCodeToText",
48
- "dataset_type": "Code-Text",
49
- "description": "CodeXGLUE code-to-text dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Text/code-to-text",
50
- "dir_name": "code-to-text",
51
- "name": "python",
52
- "parameters": {"language": "python"},
53
- "project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Text/code-to-text",
54
- "raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Text/code-to-text",
55
- "sizes": {"test": 14918, "train": 251820, "validation": 13914},
56
- },
57
- "ruby": {
58
- "class_name": "CodeXGlueCtCodeToText",
59
- "dataset_type": "Code-Text",
60
- "description": "CodeXGLUE code-to-text dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Text/code-to-text",
61
- "dir_name": "code-to-text",
62
- "name": "ruby",
63
- "parameters": {"language": "ruby"},
64
- "project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Text/code-to-text",
65
- "raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Text/code-to-text",
66
- "sizes": {"test": 1261, "train": 24927, "validation": 1400},
67
- },
68
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
go/code_x_glue_ct_code_to_text-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d17b12c70a8a7bca333496d446f31bc4cfa55ef13f000d2c855bfe3c4747fb3
3
+ size 5432556
go/code_x_glue_ct_code_to_text-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6b8204f34aa537fdc52ef9f7a2658dd96443f59fb142ceb503a95822e1dc2f5
3
+ size 111584934
go/code_x_glue_ct_code_to_text-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21a10db4f185aadf43049616d6a32f66d2153e1f799ac04db52add329f1b8b87
3
+ size 4294909
java/code_x_glue_ct_code_to_text-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:668222f47724c5763438c263649601cae86e0a182238d5c264152e6b30d63e46
3
+ size 9378592
java/code_x_glue_ct_code_to_text-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b22fdfbbc5fdfcb8dca411d2f8c8bc243211faee9ed77c0a056c763e539be734
3
+ size 141184597
java/code_x_glue_ct_code_to_text-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d0751ba1ae16fcde58093abc7f6432be4b0763b12aecdcd72a3bf86faf94669
3
+ size 4245862
javascript/code_x_glue_ct_code_to_text-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9df8006129c8e2c77e59e8012626c1d9eba5c3c7feb37cbb51c86675d4427b28
3
+ size 3593406
javascript/code_x_glue_ct_code_to_text-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fe2ca6a84660304cb72e917e9c1f88e60be8af9671b6834245866360cbd8d8f
3
+ size 58427419
javascript/code_x_glue_ct_code_to_text-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39915a1f0b806ab42c72d33dd9ea49332309a7e5f6bd050971720ab3e0c4c4c4
3
+ size 3779362
php/code_x_glue_ct_code_to_text-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c426b9e5a88edd7b42ce7358dbcd98eabcff93628b6ebf9cc8dd2879cd72a024
3
+ size 11160468
php/code_x_glue_ct_code_to_text-train-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b811acf59ed6999a936302786170c1333a3d4c349cb63aecafcc00a36e67be90
3
+ size 161531061
php/code_x_glue_ct_code_to_text-train-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:deacbf48cae3c20f259e142e3a5246654e2abe87bc455d29e0b6029358631851
3
+ size 36436409
php/code_x_glue_ct_code_to_text-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:010fce19d571125dbc3055d357d38b0813ee8e99c40266ebd1190263a8946c45
3
+ size 10534828
python/code_x_glue_ct_code_to_text-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de0e5e8b160029d549a51b21f5d4a9312943049aaf2ea30ef62d4607350f1758
3
+ size 17972582
python/code_x_glue_ct_code_to_text-train-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89038d09c98d72c1953be34b745bbb91ca544288ca255bc6094357f9bef1cb87
3
+ size 179026743
python/code_x_glue_ct_code_to_text-train-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f8dddd41fdd02b0487101688a926e278e9358a02a130a79802dc32cb28cd44f
3
+ size 111876241
python/code_x_glue_ct_code_to_text-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d106d2c7c8fd2a812fadb43ced8cbe3c8bef09d41cdc9b0a6ec0121dbf2dea6f
3
+ size 16650840
ruby/code_x_glue_ct_code_to_text-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f5a1a3866b016dd4c3d4fb64aea672d6de58b7856ba35c37e4bfb91c53c729e
3
+ size 1030531
ruby/code_x_glue_ct_code_to_text-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f67e65c74523ebed1037d393312842d991310ef493633b2a9b84932646d83d5
3
+ size 19861673
ruby/code_x_glue_ct_code_to_text-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81a7e62f94d6a3726449362f3def05638d684be482c51d2afb96085a67c6cba8
3
+ size 1059105