parquet-converter commited on
Commit
63b35d8
1 Parent(s): b3f5895

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,51 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.lz4 filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.npy filter=lfs diff=lfs merge=lfs -text
14
- *.npz filter=lfs diff=lfs merge=lfs -text
15
- *.onnx filter=lfs diff=lfs merge=lfs -text
16
- *.ot filter=lfs diff=lfs merge=lfs -text
17
- *.parquet filter=lfs diff=lfs merge=lfs -text
18
- *.pb filter=lfs diff=lfs merge=lfs -text
19
- *.pickle filter=lfs diff=lfs merge=lfs -text
20
- *.pkl filter=lfs diff=lfs merge=lfs -text
21
- *.pt filter=lfs diff=lfs merge=lfs -text
22
- *.pth filter=lfs diff=lfs merge=lfs -text
23
- *.rar filter=lfs diff=lfs merge=lfs -text
24
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
- *.tar.* filter=lfs diff=lfs merge=lfs -text
26
- *.tflite filter=lfs diff=lfs merge=lfs -text
27
- *.tgz filter=lfs diff=lfs merge=lfs -text
28
- *.wasm filter=lfs diff=lfs merge=lfs -text
29
- *.xz filter=lfs diff=lfs merge=lfs -text
30
- *.zip filter=lfs diff=lfs merge=lfs -text
31
- *.zst filter=lfs diff=lfs merge=lfs -text
32
- *tfevents* filter=lfs diff=lfs merge=lfs -text
33
- # Audio files - uncompressed
34
- *.pcm filter=lfs diff=lfs merge=lfs -text
35
- *.sam filter=lfs diff=lfs merge=lfs -text
36
- *.raw filter=lfs diff=lfs merge=lfs -text
37
- # Audio files - compressed
38
- *.aac filter=lfs diff=lfs merge=lfs -text
39
- *.flac filter=lfs diff=lfs merge=lfs -text
40
- *.mp3 filter=lfs diff=lfs merge=lfs -text
41
- *.ogg filter=lfs diff=lfs merge=lfs -text
42
- *.wav filter=lfs diff=lfs merge=lfs -text
43
- # Image files - uncompressed
44
- *.bmp filter=lfs diff=lfs merge=lfs -text
45
- *.gif filter=lfs diff=lfs merge=lfs -text
46
- *.png filter=lfs diff=lfs merge=lfs -text
47
- *.tiff filter=lfs diff=lfs merge=lfs -text
48
- # Image files - compressed
49
- *.jpg filter=lfs diff=lfs merge=lfs -text
50
- *.jpeg filter=lfs diff=lfs merge=lfs -text
51
- *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,37 +0,0 @@
1
- ---
2
- language:
3
- - en
4
- license:
5
- - mit
6
- source_datasets:
7
- - original
8
- task_categories:
9
- - image-segmentation
10
- - object-detection
11
- task_ids: []
12
- tags:
13
- - optical-character-recognition
14
- - text-detection
15
- - ocr
16
- ---
17
-
18
-
19
- # School Notebooks Dataset
20
-
21
- The images of school notebooks with handwritten notes in English.
22
-
23
- The dataset annotation contain end-to-end markup for training detection and OCR models, as well as an end-to-end model for reading text from pages.
24
-
25
- ## Annotation format
26
-
27
- The annotation is in COCO format. The `annotation.json` should have the following dictionaries:
28
-
29
- - `annotation["categories"]` - a list of dicts with a categories info (categotiy names and indexes).
30
- - `annotation["images"]` - a list of dictionaries with a description of images, each dictionary must contain fields:
31
- - `file_name` - name of the image file.
32
- - `id` for image id.
33
- - `annotation["annotations"]` - a list of dictioraties with a murkup information. Each dictionary stores a description for one polygon from the dataset, and must contain the following fields:
34
- - `image_id` - the index of the image on which the polygon is located.
35
- - `category_id` - the polygon’s category index.
36
- - `attributes` - dict with some additional annotation information. In the `translation` subdict you can find text translation for the line.
37
- - `segmentation` - the coordinates of the polygon, a list of numbers - which are coordinate pairs x and y.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
annotations_test.json DELETED
The diff for this file is too large to render. See raw diff
 
annotations_train.json DELETED
The diff for this file is too large to render. See raw diff
 
annotations_val.json DELETED
The diff for this file is too large to render. See raw diff
 
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"default": {"description": "", "citation": "", "homepage": "", "license": "", "features": {"image": {"decode": true, "id": null, "_type": "Image"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "school_notebooks_en", "config_name": "default", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 11957, "num_examples": 70, "dataset_name": "school_notebooks_en"}, "test": {"name": "test", "num_bytes": 1692, "num_examples": 10, "dataset_name": "school_notebooks_en"}, "validation": {"name": "validation", "num_bytes": 1726, "num_examples": 10, "dataset_name": "school_notebooks_en"}}, "download_checksums": {"images.zip": {"num_bytes": 356400252, "checksum": "295ab37246a1ec976f50019ab77b3bb35543b0cdcbd2b825a7949c4959448a75"}, "annotations_train.json": {"num_bytes": 8644229, "checksum": "2b52b1876e47c6970e2ba97f57fe2a431a2bca0c5b9450360c4bf481643c0b5f"}, "annotations_test.json": {"num_bytes": 687288, "checksum": "c241e966d26d10cd7fa19730489cac104ebebfd9931d0acd0c4fb7369a958672"}, "annotations_val.json": {"num_bytes": 1768738, "checksum": "8a6b80a2c73ca25a2b2b23d4620c4c34492b728ed094a824e82f49427712c271"}}, "download_size": 367500507, "post_processing_size": null, "dataset_size": 15375, "size_in_bytes": 367515882}}
 
 
images.zip → default/school_notebooks_en-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:295ab37246a1ec976f50019ab77b3bb35543b0cdcbd2b825a7949c4959448a75
3
- size 356400252
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9540b9ff87ca6bbbfb132d5dc512e0ef74d6cf1ae5c7a90b4286efc8d65609d
3
+ size 43642579
default/school_notebooks_en-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:302bd5e9d45e785202daacfa375d42552382b116b29d0caac54841cd417e3709
3
+ size 283075804
default/school_notebooks_en-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61826673706dab8a96601b84922266899c1b0401db05de129a072f5974082fe0
3
+ size 40900202
school_notebooks_EN.py DELETED
@@ -1,63 +0,0 @@
1
- import os
2
- import json
3
- import datasets
4
-
5
-
6
- class SchoolNotebooks(datasets.GeneratorBasedBuilder):
7
- def _info(self):
8
- return datasets.DatasetInfo(
9
- features=datasets.Features(
10
- {
11
- "image": datasets.Image(),
12
- }
13
- )
14
- )
15
-
16
- def _split_generators(self, dl_manager):
17
- _URLS = {
18
- "images": "images.zip",
19
- "train_data": "annotations_train.json",
20
- "test_data": "annotations_test.json",
21
- "val_data": "annotations_val.json"
22
- }
23
- data_files = dl_manager.download_and_extract(_URLS)
24
-
25
- return [
26
- datasets.SplitGenerator(
27
- name=datasets.Split.TRAIN,
28
- gen_kwargs={
29
- "image_paths": dl_manager.iter_files(data_files["images"]),
30
- "annotation_path": data_files["train_data"],
31
- },
32
- ),
33
- datasets.SplitGenerator(
34
- name=datasets.Split.TEST,
35
- gen_kwargs={
36
- "image_paths": dl_manager.iter_files(data_files["images"]),
37
- "annotation_path": data_files["test_data"],
38
- },
39
- ),
40
- datasets.SplitGenerator(
41
- name=datasets.Split.VALIDATION,
42
- gen_kwargs={
43
- "image_paths": dl_manager.iter_files(data_files["images"]),
44
- "annotation_path": data_files["val_data"],
45
- },
46
- )
47
- ]
48
-
49
- def _generate_examples(self, image_paths, annotation_path):
50
- """Generate examples."""
51
- with open(annotation_path, 'r') as f:
52
- data = json.load(f)
53
-
54
- image_names = set()
55
- for image_data in data['images']:
56
- image_names.add(image_data['file_name'])
57
-
58
- for idx, image_path in enumerate(image_paths):
59
- if os.path.basename(image_path) in image_names:
60
- example = {
61
- "image": image_path,
62
- }
63
- yield idx, example