Convert dataset to Parquet

#3
by albertvillanova HF staff - opened
Files changed (3) hide show
  1. README.md +10 -4
  2. hard.py +0 -105
  3. plain_text/train-00000-of-00001.parquet +3 -0
README.md CHANGED
@@ -20,6 +20,7 @@ task_ids:
20
  paperswithcode_id: hard
21
  pretty_name: Hotel Arabic-Reviews Dataset
22
  dataset_info:
 
23
  features:
24
  - name: text
25
  dtype: string
@@ -32,13 +33,18 @@ dataset_info:
32
  '2': '3'
33
  '3': '4'
34
  '4': '5'
35
- config_name: plain_text
36
  splits:
37
  - name: train
38
- num_bytes: 27507085
39
  num_examples: 105698
40
- download_size: 8508677
41
- dataset_size: 27507085
 
 
 
 
 
 
42
  ---
43
 
44
  # Dataset Card for Hard
 
20
  paperswithcode_id: hard
21
  pretty_name: Hotel Arabic-Reviews Dataset
22
  dataset_info:
23
+ config_name: plain_text
24
  features:
25
  - name: text
26
  dtype: string
 
33
  '2': '3'
34
  '3': '4'
35
  '4': '5'
 
36
  splits:
37
  - name: train
38
+ num_bytes: 27507041
39
  num_examples: 105698
40
+ download_size: 13607961
41
+ dataset_size: 27507041
42
+ configs:
43
+ - config_name: plain_text
44
+ data_files:
45
+ - split: train
46
+ path: plain_text/train-*
47
+ default: true
48
  ---
49
 
50
  # Dataset Card for Hard
hard.py DELETED
@@ -1,105 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Hotel Reviews in Arabic language"""
18
-
19
-
20
- import os
21
-
22
- import datasets
23
- from datasets.tasks import TextClassification
24
-
25
-
26
- _DESCRIPTION = """\
27
- This dataset contains 93700 hotel reviews in Arabic language.\
28
- The hotel reviews were collected from Booking.com website during June/July 2016.\
29
- The reviews are expressed in Modern Standard Arabic as well as dialectal Arabic.\
30
- The following table summarize some tatistics on the HARD Dataset.
31
- """
32
-
33
- _CITATION = """\
34
- @incollection{elnagar2018hotel,
35
- title={Hotel Arabic-reviews dataset construction for sentiment analysis applications},
36
- author={Elnagar, Ashraf and Khalifa, Yasmin S and Einea, Anas},
37
- booktitle={Intelligent Natural Language Processing: Trends and Applications},
38
- pages={35--52},
39
- year={2018},
40
- publisher={Springer}
41
- }
42
- """
43
-
44
- _DOWNLOAD_URL = "https://raw.githubusercontent.com/elnagara/HARD-Arabic-Dataset/master/data/balanced-reviews.zip"
45
-
46
-
47
- class HardConfig(datasets.BuilderConfig):
48
- """BuilderConfig for Hard."""
49
-
50
- def __init__(self, **kwargs):
51
- """BuilderConfig for Hard.
52
-
53
- Args:
54
- **kwargs: keyword arguments forwarded to super.
55
- """
56
- super(HardConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
57
-
58
-
59
- class Hard(datasets.GeneratorBasedBuilder):
60
- """Hard dataset."""
61
-
62
- BUILDER_CONFIGS = [
63
- HardConfig(
64
- name="plain_text",
65
- description="Plain text",
66
- )
67
- ]
68
-
69
- def _info(self):
70
- return datasets.DatasetInfo(
71
- description=_DESCRIPTION,
72
- features=datasets.Features(
73
- {
74
- "text": datasets.Value("string"),
75
- "label": datasets.features.ClassLabel(
76
- names=[
77
- "1",
78
- "2",
79
- "3",
80
- "4",
81
- "5",
82
- ]
83
- ),
84
- }
85
- ),
86
- supervised_keys=None,
87
- homepage="https://github.com/elnagara/HARD-Arabic-Dataset",
88
- citation=_CITATION,
89
- task_templates=[TextClassification(text_column="text", label_column="label")],
90
- )
91
-
92
- def _split_generators(self, dl_manager):
93
- data_dir = dl_manager.download_and_extract(_DOWNLOAD_URL)
94
- return [
95
- datasets.SplitGenerator(
96
- name=datasets.Split.TRAIN, gen_kwargs={"directory": os.path.join(data_dir, "balanced-reviews.txt")}
97
- ),
98
- ]
99
-
100
- def _generate_examples(self, directory):
101
- """Generate examples."""
102
- with open(directory, mode="r", encoding="utf-16") as file:
103
- for id_, line in enumerate(file.read().splitlines()[1:]):
104
- _, _, rating, _, _, _, review_text = line.split("\t")
105
- yield str(id_), {"text": review_text, "label": rating}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
plain_text/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc1abe8fadfb59f6dcc2f933565a23f08af89814bf7f7946019ea5cc35245114
3
+ size 13607961