text
stringlengths 0
15.3k
|
---|
import re |
from dataclasses import asdict, is_dataclass |
from itertools import islice |
from typing import Any, Callable, List |
import numpy as np |
import yaml |
from jinja2 import BaseLoader, Environment, StrictUndefined |
logging.basicConfig(format='%(asctime)s,%(msecs)03d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s', datefmt='%Y-%m-%d:%H:%M:%S', level=logging.INFO) |
eval_logger = logging.getLogger('lm-eval') |
SPACING = ' ' * 47 |
HIGHER_IS_BETTER_SYMBOLS = {True: '↑', False: '↓'} |
def hash_string(string: str) -> str: |
return hashlib.sha256(string.encode('utf-8')).hexdigest() |
def escaped_split(text, sep_char, maxsplit=-1): |
assert len(sep_char) == 1, 'separation string must be a single character for escaped splitting' |
if maxsplit == 0: |
return text |
maxsplit = max(0, maxsplit) |
return re.split('(?<!\\\\)' + sep_char, text, maxsplit) |
def handle_arg_string(arg): |
if arg.lower() == 'true': |
return True |
elif arg.lower() == 'false': |
return False |
elif arg.isnumeric(): |
return int(arg) |
try: |
return float(arg) |
except ValueError: |
return arg |
def handle_non_serializable(o): |
if isinstance(o, np.int64) or isinstance(o, np.int32): |
return int(o) |
elif isinstance(o, set): |
return list(o) |
else: |
return str(o) |
def sanitize_list(sub): |
if isinstance(sub, list): |
return [sanitize_list(item) for item in sub] |
if isinstance(sub, tuple): |
return tuple((sanitize_list(item) for item in sub)) |
else: |
return str(sub) |
def simple_parse_args_string(args_string): |
args_string = args_string.strip() |
if not args_string: |
return {} |
arg_list = [arg for arg in args_string.split(',') if arg] |
args_dict = {k: handle_arg_string(v) for (k, v) in [arg.split('=') for arg in arg_list]} |
return args_dict |
def join_iters(iters): |
for iter in iters: |
yield from iter |
def group(arr, fn): |
res = collections.defaultdict(list) |
for ob in arr: |
res[fn(ob)].append(ob) |
return list(res.values()) |
def pattern_match(patterns, source_list): |
if isinstance(patterns, str): |
patterns = [patterns] |
task_names = set() |
for pattern in patterns: |
for matching in fnmatch.filter(source_list, pattern): |
task_names.add(matching) |
return sorted(list(task_names)) |
def softmax(x): |
e_x = np.exp(x - np.max(x)) |
return e_x / e_x.sum() |
def general_detokenize(string): |
string = string.replace(" n't", "n't") |
string = string.replace(' )', ')') |
string = string.replace('( ', '(') |
string = string.replace('" ', '"') |
string = string.replace(' "', '"') |
string = re.sub(" (['.,])", '\\1', string) |
return string |
def get_file_task_name(filename: str) -> str: |
return filename[filename.find('_') + 1:filename.rfind('_')] |
def get_file_datetime(filename: str) -> str: |
return filename[filename.rfind('_') + 1:].replace('.jsonl', '') |
def sanitize_model_name(model_name: str) -> str: |
return re.sub('[\\"<>:/\\|\\\\?\\*\\[\\]]+', '__', model_name) |
def sanitize_task_name(task_name: str) -> str: |