File size: 5,308 Bytes
c8ee2c3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
"""OAB Exams dataset"""

import datasets
import pandas as pd
import re
from collections import defaultdict
import os
import json

_CITATION = """@misc{almeida2023bluex,
      title={BLUEX: A benchmark based on Brazilian Leading Universities Entrance eXams}, 
      author={Thales Sales Almeida and Thiago Laitz and Giovana K. Bonás and Rodrigo Nogueira},
      year={2023},
      eprint={2307.05410},
      archivePrefix={arXiv},
      primaryClass={cs.CL}
}
"""

_DESCRIPTION = """
Despite Portuguese being the fifth most widely spoken language, there is a lack of freely available resources for evaluating language models in Portuguese. This repository contains a multimodal dataset consisting of the two leading university entrance exams conducted in Brazil: Convest (Unicamp) and Fuvest (USP), spanning from 2018 to 2024. The dataset comprises a total of 1260 questions, of which 724 do not have accompanying images.
"""

_HOMEPAGE="https://github.com/Portuguese-Benchmark-Datasets/BLUEX"

_URL = "portuguese-benchmark-datasets/BLUEX"
_URL = "https://raw.githubusercontent.com/Portuguese-Benchmark-Datasets/BLUEX/main/data/bluex_dataset.zip"

class BLUEX_without_images(datasets.GeneratorBasedBuilder):

    VERSION = datasets.Version("1.1.0")
    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "question_number": datasets.Value("int32"),
                    "exam_id": datasets.Value("string"),
                    "exam_year": datasets.Value("string"),
                    "university": datasets.Value("string"),
                    "question_type": datasets.Sequence(datasets.Value("string")),
                    "nullified": datasets.Value("bool"),
                    "question": datasets.Value("string"),
                    "choices": datasets.Sequence(feature={
                        "text": datasets.Value("string"),
                        "label": datasets.Value("string")
                    }),
                    "answerKey": datasets.Value("string"),
                    }),
            supervised_keys=None,
            homepage=_HOMEPAGE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        #dataset = datasets.load_dataset(_URL, split="questions")
        #remove questions that require images
        #dataset = dataset.filter(lambda example: not example['IU'] and example['alternatives_type'] == 'string')
        filedir = dl_manager.download_and_extract(_URL)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "filedir": os.path.join(filedir, 'questions')
                }
            )
        ]

    def _generate_examples(self, filedir):

        for university in os.listdir(filedir):
            years = sorted(os.listdir(os.path.join(filedir, university)))
            for year in years:
                days = [d for d in os.listdir(os.path.join(filedir, university, year)) if os.path.isdir(os.path.join(filedir, university, year, d))]
                if len(days) == 0:
                    days = ['']
                days = sorted(days)
                for day in days:
                    if day == '':
                        path = os.path.join(filedir, university, year)
                    else:
                        path = os.path.join(filedir, university, year, day)

                    exam_id = f"{university}_{year}" if day == '' else f"{university}_{year}_{day.replace('day', '')}"
                    filenames = sorted(os.listdir(path), key=lambda x: int(re.findall(r'\d+', x)[0]))
                    for filename in filenames:
                        if filename.endswith('.json'):
                            with open(os.path.join(path, filename), 'r') as f:
                                example = json.load(f)

                            if example['IU'] or example['alternatives_type'] != 'string' or example['has_associated_images']:
                                continue

                            choices = {
                                "text": [],
                                "label": ["A", "B", "C", "D", "E"]
                            }
                            for alternative in example['alternatives']:
                                choices['text'].append(alternative[3:].strip())
                            choices['label'] = choices['label'][:len(choices['text'])]

                            doc_id = f"{exam_id}_{example['number']}"
                            yield doc_id, {
                                "id": doc_id, 
                                "question_number": example['number'], 
                                "exam_id": exam_id, 
                                "exam_year": year,
                                "university": university,
                                "question_type": example['subject'], 
                                "nullified": None, 
                                "question": example['question'], 
                                "choices": choices,
                                "answerKey": example['answer'] 
                            }