Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -4,6 +4,7 @@ import base64
|
|
4 |
from io import BytesIO
|
5 |
from PIL import Image
|
6 |
import requests
|
|
|
7 |
import bs4
|
8 |
import lxml
|
9 |
# Define the list of models
|
@@ -34,7 +35,15 @@ def generate_prompt(company_name, company_html, company_descp):
|
|
34 |
{'role':'system','content':system_prompt},
|
35 |
{'role':'user','content':prompt},
|
36 |
]
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
for response in stream:
|
39 |
output += response.token.text
|
40 |
return output
|
|
|
4 |
from io import BytesIO
|
5 |
from PIL import Image
|
6 |
import requests
|
7 |
+
import random
|
8 |
import bs4
|
9 |
import lxml
|
10 |
# Define the list of models
|
|
|
35 |
{'role':'system','content':system_prompt},
|
36 |
{'role':'user','content':prompt},
|
37 |
]
|
38 |
+
generate_kwargs = dict(
|
39 |
+
temperature=0.99,
|
40 |
+
max_new_tokens=512, #total tokens - input tokens
|
41 |
+
top_p=0.99,
|
42 |
+
repetition_penalty=1.0,
|
43 |
+
do_sample=True,
|
44 |
+
seed=random.randint(1,100000000000000),
|
45 |
+
)
|
46 |
+
stream = client.text_generation(prompt_in, **generate_kwargs, stream=True, details=True, return_full_text=True)
|
47 |
for response in stream:
|
48 |
output += response.token.text
|
49 |
return output
|