Datasets:

Modalities:
Tabular
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
platinum-bench / README.md
evendrow's picture
Set platinum_target of rejected questions to None (gsm8k)
b314140 verified
|
raw
history blame
22 kB
metadata
license: cc-by-sa-4.0
configs:
  - config_name: bbh_logical_deduction_three_objects
    data_files:
      - split: test
        path: bbh_logical_deduction_three_objects/test-*
  - config_name: bbh_navigate
    data_files:
      - split: test
        path: bbh_navigate/test-*
  - config_name: bbh_object_counting
    data_files:
      - split: test
        path: bbh_object_counting/test-*
  - config_name: drop
    data_files:
      - split: test
        path: drop/test-*
  - config_name: gsm8k
    data_files:
      - split: test
        path: gsm8k/test-*
  - config_name: hotpotqa
    data_files:
      - split: test
        path: hotpotqa/test-*
  - config_name: mmlu_math
    data_files:
      - split: test
        path: mmlu_math/test-*
  - config_name: multiarith
    data_files:
      - split: test
        path: multiarith/test-*
  - config_name: singleop
    data_files:
      - split: test
        path: singleop/test-*
  - config_name: singleq
    data_files:
      - split: test
        path: singleq/test-*
  - config_name: squad
    data_files:
      - split: test
        path: squad/test-*
  - config_name: svamp
    data_files:
      - split: test
        path: svamp/test-*
  - config_name: tab_fact
    data_files:
      - split: test
        path: tab_fact/test-*
  - config_name: vqa
    data_files:
      - split: test
        path: vqa/test-*
  - config_name: winograd_wsc
    data_files:
      - split: test
        path: winograd_wsc/test-*
dataset_info:
  - config_name: bbh_logical_deduction_three_objects
    features:
      - name: cleaning_status
        dtype: string
      - name: platinum_prompt
        dtype: string
      - name: platinum_prompt_no_cot
        dtype: string
      - name: platinum_target
        sequence: string
      - name: original_target
        sequence: string
      - name: platinum_parsing_strategy
        dtype: string
      - name: input
        dtype: string
      - name: target
        dtype: string
    splits:
      - name: test
        num_bytes: 305159
        num_examples: 200
    download_size: 60084
    dataset_size: 305159
  - config_name: bbh_navigate
    features:
      - name: cleaning_status
        dtype: string
      - name: platinum_prompt
        dtype: string
      - name: platinum_prompt_no_cot
        dtype: string
      - name: platinum_target
        sequence: string
      - name: original_target
        sequence: string
      - name: platinum_parsing_strategy
        dtype: string
      - name: input
        dtype: string
      - name: target
        dtype: string
    splits:
      - name: test
        num_bytes: 166521
        num_examples: 200
    download_size: 29525
    dataset_size: 166521
  - config_name: bbh_object_counting
    features:
      - name: cleaning_status
        dtype: string
      - name: platinum_prompt
        dtype: string
      - name: platinum_prompt_no_cot
        dtype: string
      - name: platinum_target
        sequence: string
      - name: original_target
        sequence: string
      - name: platinum_parsing_strategy
        dtype: string
      - name: input
        dtype: string
      - name: target
        dtype: string
    splits:
      - name: test
        num_bytes: 128297
        num_examples: 200
    download_size: 31178
    dataset_size: 128297
  - config_name: drop
    features:
      - name: cleaning_status
        dtype: string
      - name: platinum_prompt
        dtype: string
      - name: platinum_prompt_no_cot
        dtype: string
      - name: platinum_target
        sequence: string
      - name: original_target
        sequence: string
      - name: platinum_parsing_strategy
        dtype: string
      - name: section_id
        dtype: string
      - name: query_id
        dtype: string
      - name: passage
        dtype: string
      - name: question
        dtype: string
      - name: answers_spans
        struct:
          - name: spans
            sequence: string
          - name: types
            sequence: string
    splits:
      - name: test
        num_bytes: 957462
        num_examples: 250
    download_size: 469964
    dataset_size: 957462
  - config_name: gsm8k
    features:
      - name: cleaning_status
        dtype: string
      - name: platinum_prompt
        dtype: string
      - name: platinum_prompt_no_cot
        dtype: string
      - name: platinum_target
        sequence: string
      - name: original_target
        sequence: string
      - name: platinum_parsing_strategy
        dtype: string
      - name: question
        dtype: string
      - name: answer
        dtype: string
    splits:
      - name: test
        num_bytes: 411558
        num_examples: 300
    download_size: 200727
    dataset_size: 411558
  - config_name: hotpotqa
    features:
      - name: cleaning_status
        dtype: string
      - name: platinum_prompt
        dtype: string
      - name: platinum_prompt_no_cot
        dtype: string
      - name: platinum_target
        sequence: string
      - name: original_target
        sequence: string
      - name: platinum_parsing_strategy
        dtype: string
      - name: id
        dtype: string
      - name: question
        dtype: string
      - name: answer
        dtype: string
      - name: type
        dtype: string
      - name: level
        dtype: string
      - name: supporting_facts
        struct:
          - name: sent_id
            sequence: int64
          - name: title
            sequence: string
      - name: context
        struct:
          - name: sentences
            sequence:
              sequence: string
          - name: title
            sequence: string
    splits:
      - name: test
        num_bytes: 2164891
        num_examples: 250
    download_size: 1288500
    dataset_size: 2164891
  - config_name: mmlu_math
    features:
      - name: cleaning_status
        dtype: string
      - name: platinum_prompt
        dtype: string
      - name: platinum_prompt_no_cot
        dtype: string
      - name: platinum_target
        sequence: string
      - name: original_target
        sequence: string
      - name: platinum_parsing_strategy
        dtype: string
      - name: question
        dtype: string
      - name: subject
        dtype: string
      - name: choices
        sequence: string
      - name: answer
        dtype: int64
    splits:
      - name: test
        num_bytes: 287212
        num_examples: 270
    download_size: 113723
    dataset_size: 287212
  - config_name: multiarith
    features:
      - name: cleaning_status
        dtype: string
      - name: platinum_prompt
        dtype: string
      - name: platinum_prompt_no_cot
        dtype: string
      - name: platinum_target
        sequence: string
      - name: original_target
        sequence: string
      - name: platinum_parsing_strategy
        dtype: string
      - name: input
        dtype: string
      - name: output_program
        dtype: string
      - name: output_answer
        dtype: string
      - name: split
        dtype: string
      - name: dataset
        dtype: string
    splits:
      - name: test
        num_bytes: 157371
        num_examples: 174
    download_size: 54214
    dataset_size: 157371
  - config_name: singleop
    features:
      - name: cleaning_status
        dtype: string
      - name: platinum_prompt
        dtype: string
      - name: platinum_prompt_no_cot
        dtype: string
      - name: platinum_target
        sequence: string
      - name: original_target
        sequence: string
      - name: platinum_parsing_strategy
        dtype: string
      - name: input
        dtype: string
      - name: output_program
        dtype: string
      - name: output_answer
        dtype: string
      - name: split
        dtype: string
      - name: dataset
        dtype: string
    splits:
      - name: test
        num_bytes: 118922
        num_examples: 159
    download_size: 45006
    dataset_size: 118922
  - config_name: singleq
    features:
      - name: cleaning_status
        dtype: string
      - name: platinum_prompt
        dtype: string
      - name: platinum_prompt_no_cot
        dtype: string
      - name: platinum_target
        sequence: string
      - name: original_target
        sequence: string
      - name: platinum_parsing_strategy
        dtype: string
      - name: input
        dtype: string
      - name: output_program
        dtype: string
      - name: output_answer
        dtype: string
      - name: split
        dtype: string
      - name: dataset
        dtype: string
    splits:
      - name: test
        num_bytes: 96097
        num_examples: 109
    download_size: 39915
    dataset_size: 96097
  - config_name: squad
    features:
      - name: cleaning_status
        dtype: string
      - name: platinum_prompt
        dtype: string
      - name: platinum_prompt_no_cot
        dtype: string
      - name: platinum_target
        sequence: string
      - name: original_target
        sequence: string
      - name: platinum_parsing_strategy
        dtype: string
      - name: id
        dtype: string
      - name: title
        dtype: string
      - name: context
        dtype: string
      - name: question
        dtype: string
      - name: answers
        struct:
          - name: answer_start
            sequence: int64
          - name: text
            sequence: string
    splits:
      - name: test
        num_bytes: 865087
        num_examples: 250
    download_size: 466955
    dataset_size: 865087
  - config_name: svamp
    features:
      - name: cleaning_status
        dtype: string
      - name: platinum_prompt
        dtype: string
      - name: platinum_prompt_no_cot
        dtype: string
      - name: platinum_target
        sequence: string
      - name: original_target
        sequence: string
      - name: platinum_parsing_strategy
        dtype: string
      - name: ID
        dtype: string
      - name: Body
        dtype: string
      - name: Question
        dtype: string
      - name: Equation
        dtype: string
      - name: Answer
        dtype: string
      - name: Type
        dtype: string
      - name: question_concat
        dtype: string
    splits:
      - name: test
        num_bytes: 322658
        num_examples: 300
    download_size: 116772
    dataset_size: 322658
  - config_name: tab_fact
    features:
      - name: cleaning_status
        dtype: string
      - name: platinum_prompt
        dtype: string
      - name: platinum_prompt_no_cot
        dtype: string
      - name: platinum_target
        sequence: string
      - name: original_target
        sequence: string
      - name: platinum_parsing_strategy
        dtype: string
      - name: id
        dtype: int64
      - name: table_id
        dtype: string
      - name: table_text
        dtype: string
      - name: table_caption
        dtype: string
      - name: statement
        dtype: string
      - name: label
        dtype: int64
    splits:
      - name: test
        num_bytes: 1137151
        num_examples: 200
    download_size: 475063
    dataset_size: 1137151
  - config_name: vqa
    features:
      - name: cleaning_status
        dtype: string
      - name: image_path
        dtype: string
      - name: platinum_prompt
        dtype: string
      - name: platinum_prompt_no_cot
        dtype: string
      - name: platinum_target
        sequence: string
      - name: original_target
        sequence: 'null'
      - name: platinum_parsing_stratagy
        dtype: string
      - name: question_type
        dtype: string
      - name: multiple_choice_answer
        dtype: string
      - name: answers
        list:
          - name: answer
            dtype: string
          - name: answer_confidence
            dtype: string
          - name: answer_id
            dtype: int64
      - name: image_id
        dtype: int64
      - name: answer_type
        dtype: string
      - name: question_id
        dtype: int64
      - name: question
        dtype: string
    splits:
      - name: test
        num_bytes: 122801
        num_examples: 242
    download_size: 26070
    dataset_size: 122801
  - config_name: winograd_wsc
    features:
      - name: cleaning_status
        dtype: string
      - name: platinum_prompt
        dtype: string
      - name: platinum_prompt_no_cot
        dtype: string
      - name: platinum_target
        sequence: string
      - name: original_target
        sequence: string
      - name: platinum_parsing_strategy
        dtype: string
      - name: text
        dtype: string
      - name: pronoun
        dtype: string
      - name: pronoun_loc
        dtype: int64
      - name: quote
        dtype: string
      - name: quote_loc
        dtype: int64
      - name: options
        sequence: string
      - name: label
        dtype: int64
      - name: source
        dtype: string
    splits:
      - name: test
        num_bytes: 198631
        num_examples: 200
    download_size: 54931
    dataset_size: 198631
task_categories:
  - question-answering
language:
  - en

Dataset Card for PlatinumBench

🏆 Leaderboard  |  🖥️ Code  |  📖 Paper

Dataset Description

Dataset Summary

Platinum Benchmarks are benchmarks that are are carefully curated to minimize label errors and ambiguity, allowing us to measure reliability of models.

This dataset containts fifteen platinum benchmarks created by manually revising questions from existing datasets (see the github repo for details on accessing our revised subset of VQA). To revise each benchmark, we ran a vareity of frontier models on individual examples and manually re-annotated any example for which at least one model made an error. See the paper for further details on the revision process.

Load the Dataset

To load the dataset using HuggingFace datasets, you first need to pip install datasets, then run the following code:

from datasets import load_dataset

ds = load_dataset("madrylab/platinum-bench", name="gsm8k", split="test") # or another subset
ds = ds.filter(lambda x: x['cleaning_status'] != 'rejected') # filter out rejected questions

Dataset structure

Data Instances

We accessed each of the fourteen original natural language benchmarks that we revised from their respective huggingface repositories, and each benchmark had its own per-instance data fields/columns. We have standardized these benchmarks by providing pre-constructed prompts for each dataset (under 'platinum_prompt'). Each prompt template automatically formats the relevant dataset columns into a consistent structure. You can use these standardized prompts directly, but we include the original dataset columns for those interested in their own prompting, or to seamlessly subtitute our revised benchmarks for the original versions.

For VQA, we source images and annotataions from their official website, and reference images by their image path in the original downloaded directory format (see our GitHub repository for additional details).

An example from the PlatinumBench GSM8K subset looks as follows:

{'cleaning_status': 'consensus',
 'platinum_prompt': 'Solve the following math word problem.\n\nA robe takes 2 bolts of blue fiber and half that much white fiber.  How many bolts in total does it take?\n\nThink step-by-step. Then, provide the final answer as a single integer in the format "Answer: XXX" with no extra formatting.',
 'platinum_prompt_no_cot': 'Solve the following math word problem.\n\nA robe takes 2 bolts of blue fiber and half that much white fiber.  How many bolts in total does it take?\n\nThen, provide the final answer as a single integer in the format "Answer: XXX" with no extra formatting.',
 'platinum_target': ['3'],
 'platinum_parsing_strategy': 'math',
 'original_target': ['3']
 'question': 'A robe takes 2 bolts of blue fiber and half that much white fiber.  How many bolts in total does it take?',
 'answer': 'It takes 2/2=<<2/2=1>>1 bolt of white fiber\nSo the total amount of fabric is 2+1=<<2+1=3>>3 bolts of fabric\n#### 3'}

Data Fields

  • cleaning_status (str): One of:
    1. concensus: all LLMs agreed with the label, so the example was not manually reviewed (platinum_target == original_target by default).
    2. verified: the original target was maually verified to be correct (platinum_target == original_target).
    3. revised: the label is updated from the original label (platinum_target != original_target).
    4. rejected: the example is removed due to issues such as ambiguity.
  • platinum_prompt (str): A chain-of-thought question prompt that can be directly asked to a language model. This is constructed from fields in the original dataset.
  • platinum_prompt_no_cot (str): The same prompt, but without explicity chain-of-thought instructions. This is used for models like o1 that don't need chain-of-thought prompting.
  • platinum_target (List[str]): The list of all correct answers for the question. In most cases there is just one correct answer.
  • original_target (str): The original target provided in the dataset. This is can be different from the platinum target if it is incorrect.
  • platinum_parsing_strategy (str): The parser that should be used to parse the LLM answer. Refer to the provided code.
  • image_path (str): Only included for VQA. The image path from which to source the relevant image, such as: 'val2014/COCO_val2014_000000304481.jpg.
  • We also incude all the original dataset columns after these ones.

This HuggingFace dataset includes rejected questions that are not used for evaluation. To use only questions that we include in our platinum benchmarks, make sure to filter these out:

ds = ds.filter(lambda x: x['cleaning_status'] != 'rejected')

Prompt Example

Here is an example of the standardized prompt we provide for a question from MultiArith:

Solve the following math word problem.

At the schools book fair Sam bought 13 adventure books and 17 mystery books. If 15 of the books were used, how many new books did he buy?

Think step-by-step. Then, provide the final answer as a single number in the format "Answer: XXX" with no extra formatting.

The specific prompt template and parsing strategy depends on the model, although many of them are common between datasets.

Dataset Creation

Curation Rationale

Many current LLM benchmarks are riddled with label noise such as mislabeled or ambiguous questions. Due to this label noise, progress in these benchmarks often stalls before models actually achieve reliable performance on them. As a result, the comminuty often considers these benchmarks to be "saturated" and discards them too early, discouraging machine learning practictioners from ever striving to achieve proper reliability. As a first step towards addressing this gap in benchmarking practices, we revise samples from fifteen "saturated" benchmark to minimize label noise.

Source Data and Attribution

Each of the fifteen benchmarks that we revise was sourced from the following huggingface repositories:

Type URL Subset Split License
SingleOp Math https://huggingface.co/datasets/allenai/lila singleop test CC BY 4.0
SingleEq Math https://huggingface.co/datasets/allenai/lila singleeq test CC BY 4.0
MultiArith Math https://huggingface.co/datasets/allenai/lila multiarith test CC BY 4.0
SVAMP Math https://huggingface.co/datasets/ChilleD/svamp default test MIT
GSM8K Math https://huggingface.co/datasets/openai/gsm8k main test MIT
MMLU High‑School Math Math https://huggingface.co/datasets/cais/mmlu high_school_mathematics test MIT
Logic. Ded. 3-Obj Logic https://huggingface.co/datasets/maveriq/bigbenchhard logical_deduction_three_objects train MIT
Object Counting Logic https://huggingface.co/datasets/maveriq/bigbenchhard object_counting train MIT
Navigate Logic https://huggingface.co/datasets/maveriq/bigbenchhard navigate train MIT
TabFact Table Understanding https://huggingface.co/datasets/wenhu/tab_fact tab_fact test CC BY 4.0
HotPotQA Reading Comp. https://huggingface.co/datasets/hotpotqa/hotpot_qa distractor validation CC BY‑SA 4.0
SQuAD2.0 Reading Comp. https://huggingface.co/datasets/rajpurkar/squad_v2 squad_v2 validation CC BY‑SA 4.0
DROP Reading Comp. https://huggingface.co/datasets/ucinlp/drop default validation CC BY‑SA 4.0
Wingograd WSC Commonsense https://huggingface.co/datasets/ErnestSDavis/winograd_wsc wsc285 test CC BY 4.0
VQA Vision https://visualqa.org/download.html N/A validation CC BY 4.0

Please defer to the datasets cards of these benchmarks for further details on their collection and annotation process.

Additional Information

Licensing Information

See the table above for the licensing information of the original datasets upon which our work is based. The further annotations we provide are licensed under the CC BY-SA 4.0 license.

Citation Information

Cite this dataset and the source datasets (see sources.bib).

@misc{vendrow2025largelanguagemodelbenchmarks,
      title={Do Large Language Model Benchmarks Test Reliability?}, 
      author={Joshua Vendrow and Edward Vendrow and Sara Beery and Aleksander Madry},
      year={2025},
      eprint={2502.03461},
      archivePrefix={arXiv},
      primaryClass={cs.LG},
      url={https://arxiv.org/abs/2502.03461}, 
}