File size: 1,705 Bytes
a600684
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import numpy as np
from typing import Dict

# https://stackoverflow.com/a/50425683
def softmax(x: np.ndarray, axis: int):
    x -= x.max(axis=axis, keepdims=True)
    e: np.ndarray = np.exp(x)
    return e / e.sum(axis=axis, keepdims=True)

def sample_logits(out, temperature: float = 1.0, top_p: float = 0.8, logit_bias: Dict[int, float] = None) -> int:
    if hasattr(out, '__module__') and out.__module__ == 'torch':
        out = out.cpu().numpy()

    probs: np.ndarray = softmax(out, axis=-1)

    return sample_probs(probs, temperature, top_p, logit_bias)

def sample_probs(probs: np.ndarray, temperature: float = 1.0, top_p: float = 0.8, logit_bias: Dict[int, float] = None) -> int:
    if not (0.0 <= temperature):
        raise ValueError('temperature')
    if not (0.0 <= top_p <= 1.0):
        raise ValueError('top_p')

    if top_p == 0.0:
        top_p = 1.0

    if logit_bias is not None and len(logit_bias) > 0:
        logits: np.ndarray = np.log(probs)

        ids, values = zip(*logit_bias.items())
        logits[list(ids)] += values

        # Makes calculation more numerically stable, does not change the result
        logits -= logits.max(axis=-1, keepdims=True)

        probs = np.exp(logits) / np.sum(np.exp(logits))

    if temperature == 0.0:
        return np.argmax(probs).item()

    if top_p < 1.0:
        sorted_probs = np.sort(probs)[::-1]
        cumulative_probs = np.cumsum(sorted_probs)
        cutoff = float(sorted_probs[np.argmax(cumulative_probs > top_p)])
        probs[probs < cutoff] = 0

    if temperature != 1.0:
        probs = np.power(probs, 1.0 / temperature)

    probs = probs / np.sum(probs)

    return np.random.choice(a=len(probs), p=probs)