3v324v23 commited on
Commit
9cbb988
1 Parent(s): 534eb1a

adding the corpus

Browse files
Product-Search-Corpus-v0.1.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.Wikipedia
15
+
16
+ # Lint as: python3
17
+ """TREC Product Search dataset."""
18
+
19
+ import json
20
+
21
+ import datasets
22
+
23
+ _CITATION = """
24
+
25
+ """
26
+
27
+ _DESCRIPTION = "dataset load script for TREC Product Search Corpus"
28
+
29
+ _DATASET_URLS = {
30
+ 'train': "https://huggingface.co/datasets/trec-product-search/Product-Search-Corpus-v0.1/resolve/main/corpus.jsonl.gz ",
31
+ }
32
+
33
+
34
+ class TRECProductCorpus(datasets.GeneratorBasedBuilder):
35
+ VERSION = datasets.Version("0.0.1")
36
+
37
+ BUILDER_CONFIGS = [
38
+ datasets.BuilderConfig(version=VERSION,
39
+ description="TREC Product Search Corpus"),
40
+ ]
41
+
42
+ def _info(self):
43
+ features = datasets.Features(
44
+ {'docid': datasets.Value('string'), 'title': datasets.Value('string'), 'text': datasets.Value('string')}
45
+ )
46
+
47
+ return datasets.DatasetInfo(
48
+ # This is the description that will appear on the datasets page.
49
+ description=_DESCRIPTION,
50
+ # This defines the different columns of the dataset and their types
51
+ features=features, # Here we define them above because they are different between the two configurations
52
+ supervised_keys=None,
53
+ # Homepage of the dataset for documentation
54
+ homepage="",
55
+ # License for the dataset if available
56
+ license="",
57
+ # Citation for the dataset
58
+ citation=_CITATION,
59
+ )
60
+
61
+ def _split_generators(self, dl_manager):
62
+ if self.config.data_files:
63
+ downloaded_files = self.config.data_files
64
+ else:
65
+ downloaded_files = dl_manager.download_and_extract(_DATASET_URLS)
66
+ splits = [
67
+ datasets.SplitGenerator(
68
+ name=split,
69
+ gen_kwargs={
70
+ "files": [downloaded_files[split]] if isinstance(downloaded_files[split], str) else downloaded_files[split],
71
+ },
72
+ ) for split in downloaded_files
73
+ ]
74
+ return splits
75
+
76
+ def _generate_examples(self, files):
77
+ """Yields examples."""
78
+ for filepath in files:
79
+ with open(filepath, encoding="utf-8") as f:
80
+ for line in f:
81
+ data = json.loads(line)
82
+ yield data['docid'], data
corpus-simple.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1aa3e9dc9e4b555db8c588c6bc2c71d254808950b3e204b67df25ecfe5d0be7f
3
+ size 570693517
corpus.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc226c6b4620695da43d902ef1a4e46ca9d93911e8e9e5b530b0b4d5c3aada27
3
+ size 1103769132