Spaces:
Sleeping
Sleeping
Elijahbodden
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -61,9 +61,10 @@ presets = {
|
|
61 |
}
|
62 |
|
63 |
|
64 |
-
def custom_lp_logits_processor(ids, logits):
|
65 |
-
|
66 |
-
|
|
|
67 |
|
68 |
def respond(
|
69 |
message,
|
@@ -103,8 +104,7 @@ def respond(
|
|
103 |
max_tokens=128,
|
104 |
frequency_penalty=frequency_penalty,
|
105 |
presence_penalty=presence_penalty,
|
106 |
-
logits_processor=custom_lp_logits_processor
|
107 |
-
# lambda ids, logits: ExponentialDecayLengthPenalty((lp_start, lp_decay), tokenizer.eos_token_id, len(convo))(ids, torch.from_numpy(logits))
|
108 |
):
|
109 |
token = message["choices"][0]["text"]
|
110 |
|
|
|
61 |
}
|
62 |
|
63 |
|
64 |
+
def custom_lp_logits_processor(ids, logits, lp_start, lp_decay):
|
65 |
+
if (len(ids) > lp_start):
|
66 |
+
logits[tokenizer.eos_token_id] *= pow(lp_decay, len(ids)-lp_start)
|
67 |
+
return logits
|
68 |
|
69 |
def respond(
|
70 |
message,
|
|
|
104 |
max_tokens=128,
|
105 |
frequency_penalty=frequency_penalty,
|
106 |
presence_penalty=presence_penalty,
|
107 |
+
logits_processor=lambda ids, logits: custom_lp_logits_processor(ids, logits, lp_start, lp_decay)
|
|
|
108 |
):
|
109 |
token = message["choices"][0]["text"]
|
110 |
|