text
stringlengths 0
15.3k
|
---|
def doc_to_text(doc, connector): |
conn = connector[doc['question']] |
return doc['premise'].strip()[:-1] + f' {conn}' |
def doc_to_choice(doc): |
return [convert_choice(doc['choice1']), convert_choice(doc['choice2'])] |
doc_to_text_et = partial(doc_to_text, connector={'cause': 'sest', 'effect': 'seetõttu'}) |
doc_to_text_ht = partial(doc_to_text, connector={'cause': 'poukisa', 'effect': 'donk sa'}) |
doc_to_text_it = partial(doc_to_text, connector={'cause': 'perché', 'effect': 'quindi'}) |
doc_to_text_id = partial(doc_to_text, connector={'cause': 'karena', 'effect': 'maka'}) |
doc_to_text_qu = partial(doc_to_text, connector={'cause': 'imataq', 'effect': 'chaymi'}) |
doc_to_text_sw = partial(doc_to_text, connector={'cause': 'kwa sababu', 'effect': 'kwa hiyo'}) |
doc_to_text_zh = partial(doc_to_text, connector={'cause': '因为', 'effect': '所以'}) |
doc_to_text_ta = partial(doc_to_text, connector={'cause': 'காரணமாக', 'effect': 'எனவே'}) |
doc_to_text_th = partial(doc_to_text, connector={'cause': 'เพราะ', 'effect': 'ดังนั้น'}) |
doc_to_text_tr = partial(doc_to_text, connector={'cause': 'çünkü', 'effect': 'bu yüzden'}) |
doc_to_text_vi = partial(doc_to_text, connector={'cause': 'bởi vì', 'effect': 'vì vậy'}) |
# File: lm-evaluation-harness-main/lm_eval/tasks/xnli/utils.py |
import argparse |
import yaml |
LANGUAGES = {'ar': {'QUESTION_WORD': 'صحيح', 'ENTAILMENT_LABEL': 'نعم', 'NEUTRAL_LABEL': 'لذا', 'CONTRADICTION_LABEL': 'رقم'}, 'bg': {'QUESTION_WORD': 'правилно', 'ENTAILMENT_LABEL': 'да', 'NEUTRAL_LABEL': 'така', 'CONTRADICTION_LABEL': 'не'}, 'de': {'QUESTION_WORD': 'richtig', 'ENTAILMENT_LABEL': 'Ja', 'NEUTRAL_LABEL': 'Auch', 'CONTRADICTION_LABEL': 'Nein'}, 'el': {'QUESTION_WORD': 'σωστός', 'ENTAILMENT_LABEL': 'Ναί', 'NEUTRAL_LABEL': 'Έτσι', 'CONTRADICTION_LABEL': 'όχι'}, 'en': {'QUESTION_WORD': 'right', 'ENTAILMENT_LABEL': 'Yes', 'NEUTRAL_LABEL': 'Also', 'CONTRADICTION_LABEL': 'No'}, 'es': {'QUESTION_WORD': 'correcto', 'ENTAILMENT_LABEL': 'Sí', 'NEUTRAL_LABEL': 'Asi que', 'CONTRADICTION_LABEL': 'No'}, 'fr': {'QUESTION_WORD': 'correct', 'ENTAILMENT_LABEL': 'Oui', 'NEUTRAL_LABEL': 'Aussi', 'CONTRADICTION_LABEL': 'Non'}, 'hi': {'QUESTION_WORD': 'सही', 'ENTAILMENT_LABEL': 'हाँ', 'NEUTRAL_LABEL': 'इसलिए', 'CONTRADICTION_LABEL': 'नहीं'}, 'ru': {'QUESTION_WORD': 'правильно', 'ENTAILMENT_LABEL': 'Да', 'NEUTRAL_LABEL': 'Так', 'CONTRADICTION_LABEL': 'Нет'}, 'sw': {'QUESTION_WORD': 'sahihi', 'ENTAILMENT_LABEL': 'Ndiyo', 'NEUTRAL_LABEL': 'Hivyo', 'CONTRADICTION_LABEL': 'Hapana'}, 'th': {'QUESTION_WORD': 'ถูกต้อง', 'ENTAILMENT_LABEL': 'ใช่', 'NEUTRAL_LABEL': 'ดังนั้น', 'CONTRADICTION_LABEL': 'ไม่'}, 'tr': {'QUESTION_WORD': 'doğru', 'ENTAILMENT_LABEL': 'Evet', 'NEUTRAL_LABEL': 'Böylece', 'CONTRADICTION_LABEL': 'Hayır'}, 'ur': {'QUESTION_WORD': 'صحیح', 'ENTAILMENT_LABEL': 'جی ہاں', 'NEUTRAL_LABEL': 'اس لئے', 'CONTRADICTION_LABEL': 'نہیں'}, 'vi': {'QUESTION_WORD': 'đúng', 'ENTAILMENT_LABEL': 'Vâng', 'NEUTRAL_LABEL': 'Vì vậy', 'CONTRADICTION_LABEL': 'Không'}, 'zh': {'QUESTION_WORD': '正确', 'ENTAILMENT_LABEL': '是的', 'NEUTRAL_LABEL': '所以', 'CONTRADICTION_LABEL': '不是的'}} |
def gen_lang_yamls(output_dir: str, overwrite: bool) -> None: |
err = [] |
for lang in LANGUAGES.keys(): |
file_name = f'xnli_{lang}.yaml' |
try: |
QUESTION_WORD = LANGUAGES[lang]['QUESTION_WORD'] |
ENTAILMENT_LABEL = LANGUAGES[lang]['ENTAILMENT_LABEL'] |
NEUTRAL_LABEL = LANGUAGES[lang]['NEUTRAL_LABEL'] |
CONTRADICTION_LABEL = LANGUAGES[lang]['CONTRADICTION_LABEL'] |
with open(f'{output_dir}/{file_name}', 'w' if overwrite else 'x', encoding='utf8') as f: |
f.write('# Generated by utils.py\n') |
yaml.dump({'include': 'xnli_common_yaml', 'dataset_name': lang, 'task': f'xnli_{lang}', 'doc_to_text': '', 'doc_to_choice': f'{{{{[premise+", {QUESTION_WORD}? {ENTAILMENT_LABEL}, "+hypothesis,premise+", {QUESTION_WORD}? {NEUTRAL_LABEL}, "+hypothesis,premise+", {QUESTION_WORD}? {CONTRADICTION_LABEL}, "+hypothesis]}}}}'}, f, allow_unicode=True) |
except FileExistsError: |
err.append(file_name) |
if len(err) > 0: |
raise FileExistsError(f"Files were not created because they already exist (use --overwrite flag): {', '.join(err)}") |
def main() -> None: |
parser = argparse.ArgumentParser() |
parser.add_argument('--overwrite', default=False, action='store_true', help='Overwrite files if they already exist') |
parser.add_argument('--output-dir', default='.', help='Directory to write yaml files to') |
args = parser.parse_args() |
gen_lang_yamls(output_dir=args.output_dir, overwrite=args.overwrite) |
if __name__ == '__main__': |
main() |
# File: lm-evaluation-harness-main/lm_eval/tasks/xwinograd/utils.py |
import argparse |
from typing import Dict, List |
import yaml |
LANGUAGES = ['en', 'fr', 'jp', 'pt', 'ru', 'zh'] |
def doc_to_text(doc: Dict) -> int: |
answer_to_num = {'1': 0, '2': 1} |
return answer_to_num[doc['answer']] |
def doc_to_target(doc: Dict) -> str: |
idx = doc['sentence'].index('_') + 1 |
return doc['sentence'][idx:].strip() |
def doc_to_choice(doc: Dict) -> List[str]: |
idx = doc['sentence'].index('_') |
options = [doc['option1'], doc['option2']] |
return [doc['sentence'][:idx] + opt for opt in options] |
def gen_lang_yamls(output_dir: str, overwrite: bool) -> None: |
err = [] |
for lang in LANGUAGES: |
file_name = f'xwinograd_{lang}.yaml' |
try: |
with open(f'{output_dir}/{file_name}', 'w' if overwrite else 'x', encoding='utf-8') as f: |
f.write('# Generated by utils.py\n') |
yaml.dump({'include': 'xwinograd_common_yaml', 'dataset_name': lang, 'task': f'xwinograd_{lang}'}, f) |
except FileExistsError: |
err.append(file_name) |
if len(err) > 0: |
raise FileExistsError(f"Files were not created because they already exist (use --overwrite flag): {', '.join(err)}") |
def main() -> None: |
parser = argparse.ArgumentParser() |
parser.add_argument('--overwrite', default=False, action='store_true', help='Overwrite files if they already exist') |
parser.add_argument('--output-dir', default='.', help='Directory to write yaml files to') |
args = parser.parse_args() |
gen_lang_yamls(output_dir=args.output_dir, overwrite=args.overwrite) |
if __name__ == '__main__': |
main() |
# File: lm-evaluation-harness-main/lm_eval/utils.py |
import collections |
import fnmatch |
import functools |
import hashlib |
import importlib.util |
import inspect |
import json |
import logging |
import os |