Spaces:
Sleeping
Sleeping
Tuchuanhuhuhu
commited on
Commit
·
2342c7b
1
Parent(s):
33cbbdb
支持高级参数调节
Browse files
modules/models/StableLM.py
CHANGED
@@ -39,6 +39,9 @@ class StableLM_Client(BaseLLMModel):
|
|
39 |
- StableAssistant is excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.
|
40 |
- StableAssistant is more than just an information source, StableAssistant is also able to write poetry, short stories, and make jokes.
|
41 |
- StableAssistant will refuse to participate in anything that could harm a human."""
|
|
|
|
|
|
|
42 |
|
43 |
def _get_stablelm_style_input(self):
|
44 |
history = self.history + [{"role": "assistant", "content": ""}]
|
@@ -50,8 +53,8 @@ class StableLM_Client(BaseLLMModel):
|
|
50 |
|
51 |
def _generate(self, text, bad_text=None):
|
52 |
stop = StopOnTokens()
|
53 |
-
result = self.generator(text, max_new_tokens=
|
54 |
-
temperature=
|
55 |
return result[0]["generated_text"].replace(text, "")
|
56 |
|
57 |
def get_answer_at_once(self):
|
@@ -68,11 +71,11 @@ class StableLM_Client(BaseLLMModel):
|
|
68 |
generate_kwargs = dict(
|
69 |
model_inputs,
|
70 |
streamer=streamer,
|
71 |
-
max_new_tokens=
|
72 |
do_sample=True,
|
73 |
-
top_p=
|
74 |
top_k=1000,
|
75 |
-
temperature=
|
76 |
num_beams=1,
|
77 |
stopping_criteria=StoppingCriteriaList([stop])
|
78 |
)
|
|
|
39 |
- StableAssistant is excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.
|
40 |
- StableAssistant is more than just an information source, StableAssistant is also able to write poetry, short stories, and make jokes.
|
41 |
- StableAssistant will refuse to participate in anything that could harm a human."""
|
42 |
+
self.max_generation_token = 1024
|
43 |
+
self.top_p = 0.95
|
44 |
+
self.temperature = 1.0
|
45 |
|
46 |
def _get_stablelm_style_input(self):
|
47 |
history = self.history + [{"role": "assistant", "content": ""}]
|
|
|
53 |
|
54 |
def _generate(self, text, bad_text=None):
|
55 |
stop = StopOnTokens()
|
56 |
+
result = self.generator(text, max_new_tokens=self.max_generation_token, num_return_sequences=1, num_beams=1, do_sample=True,
|
57 |
+
temperature=self.temperature, top_p=self.top_p, top_k=1000, stopping_criteria=StoppingCriteriaList([stop]))
|
58 |
return result[0]["generated_text"].replace(text, "")
|
59 |
|
60 |
def get_answer_at_once(self):
|
|
|
71 |
generate_kwargs = dict(
|
72 |
model_inputs,
|
73 |
streamer=streamer,
|
74 |
+
max_new_tokens=self.max_generation_token,
|
75 |
do_sample=True,
|
76 |
+
top_p=self.top_p,
|
77 |
top_k=1000,
|
78 |
+
temperature=self.temperature,
|
79 |
num_beams=1,
|
80 |
stopping_criteria=StoppingCriteriaList([stop])
|
81 |
)
|