{
  "results": {
    "veritatis-velit_lsat-rc_base": {
      "acc,none": 0.37174721189591076,
      "acc_stderr,none": 0.029520497706913982,
      "alias": "veritatis-velit_lsat-rc_base"
    },
    "veritatis-velit_lsat-lr_base": {
      "acc,none": 0.25098039215686274,
      "acc_stderr,none": 0.0192179738903761,
      "alias": "veritatis-velit_lsat-lr_base"
    },
    "veritatis-velit_lsat-ar_base": {
      "acc,none": 0.2217391304347826,
      "acc_stderr,none": 0.02745149660405891,
      "alias": "veritatis-velit_lsat-ar_base"
    },
    "veritatis-velit_logiqa_base": {
      "acc,none": 0.29073482428115016,
      "acc_stderr,none": 0.018164056209177798,
      "alias": "veritatis-velit_logiqa_base"
    },
    "veritatis-velit_logiqa2_base": {
      "acc,none": 0.36704834605597964,
      "acc_stderr,none": 0.01216070666440455,
      "alias": "veritatis-velit_logiqa2_base"
    },
    "saepe-fuga_lsat-rc_base": {
      "acc,none": 0.34944237918215615,
      "acc_stderr,none": 0.02912482161970039,
      "alias": "saepe-fuga_lsat-rc_base"
    },
    "saepe-fuga_lsat-lr_base": {
      "acc,none": 0.2823529411764706,
      "acc_stderr,none": 0.01995228875819785,
      "alias": "saepe-fuga_lsat-lr_base"
    },
    "saepe-fuga_lsat-ar_base": {
      "acc,none": 0.17391304347826086,
      "acc_stderr,none": 0.025047317386049713,
      "alias": "saepe-fuga_lsat-ar_base"
    },
    "saepe-fuga_logiqa_base": {
      "acc,none": 0.30670926517571884,
      "acc_stderr,none": 0.01844510522956535,
      "alias": "saepe-fuga_logiqa_base"
    },
    "saepe-fuga_logiqa2_base": {
      "acc,none": 0.36323155216284986,
      "acc_stderr,none": 0.012133733683836157,
      "alias": "saepe-fuga_logiqa2_base"
    },
    "nisi-sunt_lsat-rc_base": {
      "acc,none": 0.31226765799256506,
      "acc_stderr,none": 0.028307781204694345,
      "alias": "nisi-sunt_lsat-rc_base"
    },
    "nisi-sunt_lsat-lr_base": {
      "acc,none": 0.26862745098039215,
      "acc_stderr,none": 0.019646519888599705,
      "alias": "nisi-sunt_lsat-lr_base"
    },
    "nisi-sunt_lsat-ar_base": {
      "acc,none": 0.20869565217391303,
      "acc_stderr,none": 0.02685410826543969,
      "alias": "nisi-sunt_lsat-ar_base"
    },
    "nisi-sunt_logiqa_base": {
      "acc,none": 0.3003194888178914,
      "acc_stderr,none": 0.018335874932123606,
      "alias": "nisi-sunt_logiqa_base"
    },
    "nisi-sunt_logiqa2_base": {
      "acc,none": 0.37659033078880405,
      "acc_stderr,none": 0.01222456057756536,
      "alias": "nisi-sunt_logiqa2_base"
    },
    "laboriosam-molestiae_lsat-rc_base": {
      "acc,none": 0.3308550185873606,
      "acc_stderr,none": 0.02874164221560224,
      "alias": "laboriosam-molestiae_lsat-rc_base"
    },
    "laboriosam-molestiae_lsat-lr_base": {
      "acc,none": 0.28627450980392155,
      "acc_stderr,none": 0.020035401617079118,
      "alias": "laboriosam-molestiae_lsat-lr_base"
    },
    "laboriosam-molestiae_lsat-ar_base": {
      "acc,none": 0.18695652173913044,
      "acc_stderr,none": 0.025763772398512335,
      "alias": "laboriosam-molestiae_lsat-ar_base"
    },
    "laboriosam-molestiae_logiqa_base": {
      "acc,none": 0.3146964856230032,
      "acc_stderr,none": 0.018575795328740336,
      "alias": "laboriosam-molestiae_logiqa_base"
    },
    "laboriosam-molestiae_logiqa2_base": {
      "acc,none": 0.3746819338422392,
      "acc_stderr,none": 0.012212196173823686,
      "alias": "laboriosam-molestiae_logiqa2_base"
    },
    "iste-molestias_lsat-rc_base": {
      "acc,none": 0.36059479553903345,
      "acc_stderr,none": 0.029331239329958934,
      "alias": "iste-molestias_lsat-rc_base"
    },
    "iste-molestias_lsat-lr_base": {
      "acc,none": 0.2549019607843137,
      "acc_stderr,none": 0.019316765480532974,
      "alias": "iste-molestias_lsat-lr_base"
    },
    "iste-molestias_lsat-ar_base": {
      "acc,none": 0.20869565217391303,
      "acc_stderr,none": 0.026854108265439654,
      "alias": "iste-molestias_lsat-ar_base"
    },
    "iste-molestias_logiqa_base": {
      "acc,none": 0.30670926517571884,
      "acc_stderr,none": 0.018445105229565353,
      "alias": "iste-molestias_logiqa_base"
    },
    "iste-molestias_logiqa2_base": {
      "acc,none": 0.38549618320610685,
      "acc_stderr,none": 0.01227960059074116,
      "alias": "iste-molestias_logiqa2_base"
    },
    "eum-saepe_lsat-rc_base": {
      "acc,none": 0.3643122676579926,
      "acc_stderr,none": 0.029396215063241374,
      "alias": "eum-saepe_lsat-rc_base"
    },
    "eum-saepe_lsat-lr_base": {
      "acc,none": 0.24313725490196078,
      "acc_stderr,none": 0.01901408485181097,
      "alias": "eum-saepe_lsat-lr_base"
    },
    "eum-saepe_lsat-ar_base": {
      "acc,none": 0.19130434782608696,
      "acc_stderr,none": 0.025991852462828487,
      "alias": "eum-saepe_lsat-ar_base"
    },
    "eum-saepe_logiqa_base": {
      "acc,none": 0.3035143769968051,
      "acc_stderr,none": 0.01839101519560228,
      "alias": "eum-saepe_logiqa_base"
    },
    "eum-saepe_logiqa2_base": {
      "acc,none": 0.3651399491094148,
      "acc_stderr,none": 0.01214732308367413,
      "alias": "eum-saepe_logiqa2_base"
    }
  },
  "configs": {
    "eum-saepe_logiqa2_base": {
      "task": "eum-saepe_logiqa2_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "eum-saepe-logiqa2/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "eum-saepe_logiqa_base": {
      "task": "eum-saepe_logiqa_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "eum-saepe-logiqa/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "eum-saepe_lsat-ar_base": {
      "task": "eum-saepe_lsat-ar_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "eum-saepe-lsat-ar/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "eum-saepe_lsat-lr_base": {
      "task": "eum-saepe_lsat-lr_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "eum-saepe-lsat-lr/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "eum-saepe_lsat-rc_base": {
      "task": "eum-saepe_lsat-rc_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "eum-saepe-lsat-rc/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "iste-molestias_logiqa2_base": {
      "task": "iste-molestias_logiqa2_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "iste-molestias-logiqa2/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "iste-molestias_logiqa_base": {
      "task": "iste-molestias_logiqa_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "iste-molestias-logiqa/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "iste-molestias_lsat-ar_base": {
      "task": "iste-molestias_lsat-ar_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "iste-molestias-lsat-ar/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "iste-molestias_lsat-lr_base": {
      "task": "iste-molestias_lsat-lr_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "iste-molestias-lsat-lr/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "iste-molestias_lsat-rc_base": {
      "task": "iste-molestias_lsat-rc_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "iste-molestias-lsat-rc/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "laboriosam-molestiae_logiqa2_base": {
      "task": "laboriosam-molestiae_logiqa2_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "laboriosam-molestiae-logiqa2/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "laboriosam-molestiae_logiqa_base": {
      "task": "laboriosam-molestiae_logiqa_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "laboriosam-molestiae-logiqa/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "laboriosam-molestiae_lsat-ar_base": {
      "task": "laboriosam-molestiae_lsat-ar_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "laboriosam-molestiae-lsat-ar/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "laboriosam-molestiae_lsat-lr_base": {
      "task": "laboriosam-molestiae_lsat-lr_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "laboriosam-molestiae-lsat-lr/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "laboriosam-molestiae_lsat-rc_base": {
      "task": "laboriosam-molestiae_lsat-rc_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "laboriosam-molestiae-lsat-rc/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "nisi-sunt_logiqa2_base": {
      "task": "nisi-sunt_logiqa2_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "nisi-sunt-logiqa2/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "nisi-sunt_logiqa_base": {
      "task": "nisi-sunt_logiqa_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "nisi-sunt-logiqa/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "nisi-sunt_lsat-ar_base": {
      "task": "nisi-sunt_lsat-ar_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "nisi-sunt-lsat-ar/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "nisi-sunt_lsat-lr_base": {
      "task": "nisi-sunt_lsat-lr_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "nisi-sunt-lsat-lr/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "nisi-sunt_lsat-rc_base": {
      "task": "nisi-sunt_lsat-rc_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "nisi-sunt-lsat-rc/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "saepe-fuga_logiqa2_base": {
      "task": "saepe-fuga_logiqa2_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "saepe-fuga-logiqa2/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "saepe-fuga_logiqa_base": {
      "task": "saepe-fuga_logiqa_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "saepe-fuga-logiqa/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "saepe-fuga_lsat-ar_base": {
      "task": "saepe-fuga_lsat-ar_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "saepe-fuga-lsat-ar/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "saepe-fuga_lsat-lr_base": {
      "task": "saepe-fuga_lsat-lr_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "saepe-fuga-lsat-lr/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "saepe-fuga_lsat-rc_base": {
      "task": "saepe-fuga_lsat-rc_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "saepe-fuga-lsat-rc/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "veritatis-velit_logiqa2_base": {
      "task": "veritatis-velit_logiqa2_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "veritatis-velit-logiqa2/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "veritatis-velit_logiqa_base": {
      "task": "veritatis-velit_logiqa_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "veritatis-velit-logiqa/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "veritatis-velit_lsat-ar_base": {
      "task": "veritatis-velit_lsat-ar_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "veritatis-velit-lsat-ar/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "veritatis-velit_lsat-lr_base": {
      "task": "veritatis-velit_lsat-lr_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "veritatis-velit-lsat-lr/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "veritatis-velit_lsat-rc_base": {
      "task": "veritatis-velit_lsat-rc_base",
      "group": "logikon-bench",
      "dataset_path": "logikon/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "veritatis-velit-lsat-rc/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage.\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n        \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    }
  },
  "versions": {
    "eum-saepe_logiqa2_base": 0.0,
    "eum-saepe_logiqa_base": 0.0,
    "eum-saepe_lsat-ar_base": 0.0,
    "eum-saepe_lsat-lr_base": 0.0,
    "eum-saepe_lsat-rc_base": 0.0,
    "iste-molestias_logiqa2_base": 0.0,
    "iste-molestias_logiqa_base": 0.0,
    "iste-molestias_lsat-ar_base": 0.0,
    "iste-molestias_lsat-lr_base": 0.0,
    "iste-molestias_lsat-rc_base": 0.0,
    "laboriosam-molestiae_logiqa2_base": 0.0,
    "laboriosam-molestiae_logiqa_base": 0.0,
    "laboriosam-molestiae_lsat-ar_base": 0.0,
    "laboriosam-molestiae_lsat-lr_base": 0.0,
    "laboriosam-molestiae_lsat-rc_base": 0.0,
    "nisi-sunt_logiqa2_base": 0.0,
    "nisi-sunt_logiqa_base": 0.0,
    "nisi-sunt_lsat-ar_base": 0.0,
    "nisi-sunt_lsat-lr_base": 0.0,
    "nisi-sunt_lsat-rc_base": 0.0,
    "saepe-fuga_logiqa2_base": 0.0,
    "saepe-fuga_logiqa_base": 0.0,
    "saepe-fuga_lsat-ar_base": 0.0,
    "saepe-fuga_lsat-lr_base": 0.0,
    "saepe-fuga_lsat-rc_base": 0.0,
    "veritatis-velit_logiqa2_base": 0.0,
    "veritatis-velit_logiqa_base": 0.0,
    "veritatis-velit_lsat-ar_base": 0.0,
    "veritatis-velit_lsat-lr_base": 0.0,
    "veritatis-velit_lsat-rc_base": 0.0
  },
  "n-shot": {
    "eum-saepe_logiqa2_base": 0,
    "eum-saepe_logiqa_base": 0,
    "eum-saepe_lsat-ar_base": 0,
    "eum-saepe_lsat-lr_base": 0,
    "eum-saepe_lsat-rc_base": 0,
    "iste-molestias_logiqa2_base": 0,
    "iste-molestias_logiqa_base": 0,
    "iste-molestias_lsat-ar_base": 0,
    "iste-molestias_lsat-lr_base": 0,
    "iste-molestias_lsat-rc_base": 0,
    "laboriosam-molestiae_logiqa2_base": 0,
    "laboriosam-molestiae_logiqa_base": 0,
    "laboriosam-molestiae_lsat-ar_base": 0,
    "laboriosam-molestiae_lsat-lr_base": 0,
    "laboriosam-molestiae_lsat-rc_base": 0,
    "nisi-sunt_logiqa2_base": 0,
    "nisi-sunt_logiqa_base": 0,
    "nisi-sunt_lsat-ar_base": 0,
    "nisi-sunt_lsat-lr_base": 0,
    "nisi-sunt_lsat-rc_base": 0,
    "saepe-fuga_logiqa2_base": 0,
    "saepe-fuga_logiqa_base": 0,
    "saepe-fuga_lsat-ar_base": 0,
    "saepe-fuga_lsat-lr_base": 0,
    "saepe-fuga_lsat-rc_base": 0,
    "veritatis-velit_logiqa2_base": 0,
    "veritatis-velit_logiqa_base": 0,
    "veritatis-velit_lsat-ar_base": 0,
    "veritatis-velit_lsat-lr_base": 0,
    "veritatis-velit_lsat-rc_base": 0
  },
  "config": {
    "model": "vllm",
    "model_args": "pretrained=Deci/DeciLM-7B,revision=main,dtype=auto,tensor_parallel_size=1,gpu_memory_utilization=0.9,trust_remote_code=true,max_length=4096",
    "batch_size": "auto",
    "batch_sizes": [],
    "device": null,
    "use_cache": null,
    "limit": null,
    "bootstrap_iters": 100000,
    "gen_kwargs": null
  },
  "git_hash": "5044cf9"
}