Spaces:
Runtime error
Runtime error
Commit
·
88a5ed3
1
Parent(s):
de5737a
Add application file
Browse files- .gitignore +160 -0
- app.py +211 -0
- requirements.txt +7 -0
.gitignore
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
share/python-wheels/
|
24 |
+
*.egg-info/
|
25 |
+
.installed.cfg
|
26 |
+
*.egg
|
27 |
+
MANIFEST
|
28 |
+
|
29 |
+
# PyInstaller
|
30 |
+
# Usually these files are written by a python script from a template
|
31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
32 |
+
*.manifest
|
33 |
+
*.spec
|
34 |
+
|
35 |
+
# Installer logs
|
36 |
+
pip-log.txt
|
37 |
+
pip-delete-this-directory.txt
|
38 |
+
|
39 |
+
# Unit test / coverage reports
|
40 |
+
htmlcov/
|
41 |
+
.tox/
|
42 |
+
.nox/
|
43 |
+
.coverage
|
44 |
+
.coverage.*
|
45 |
+
.cache
|
46 |
+
nosetests.xml
|
47 |
+
coverage.xml
|
48 |
+
*.cover
|
49 |
+
*.py,cover
|
50 |
+
.hypothesis/
|
51 |
+
.pytest_cache/
|
52 |
+
cover/
|
53 |
+
|
54 |
+
# Translations
|
55 |
+
*.mo
|
56 |
+
*.pot
|
57 |
+
|
58 |
+
# Django stuff:
|
59 |
+
*.log
|
60 |
+
local_settings.py
|
61 |
+
db.sqlite3
|
62 |
+
db.sqlite3-journal
|
63 |
+
|
64 |
+
# Flask stuff:
|
65 |
+
instance/
|
66 |
+
.webassets-cache
|
67 |
+
|
68 |
+
# Scrapy stuff:
|
69 |
+
.scrapy
|
70 |
+
|
71 |
+
# Sphinx documentation
|
72 |
+
docs/_build/
|
73 |
+
|
74 |
+
# PyBuilder
|
75 |
+
.pybuilder/
|
76 |
+
target/
|
77 |
+
|
78 |
+
# Jupyter Notebook
|
79 |
+
.ipynb_checkpoints
|
80 |
+
|
81 |
+
# IPython
|
82 |
+
profile_default/
|
83 |
+
ipython_config.py
|
84 |
+
|
85 |
+
# pyenv
|
86 |
+
# For a library or package, you might want to ignore these files since the code is
|
87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
88 |
+
# .python-version
|
89 |
+
|
90 |
+
# pipenv
|
91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
94 |
+
# install all needed dependencies.
|
95 |
+
#Pipfile.lock
|
96 |
+
|
97 |
+
# poetry
|
98 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
100 |
+
# commonly ignored for libraries.
|
101 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
102 |
+
#poetry.lock
|
103 |
+
|
104 |
+
# pdm
|
105 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
106 |
+
#pdm.lock
|
107 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
108 |
+
# in version control.
|
109 |
+
# https://pdm.fming.dev/#use-with-ide
|
110 |
+
.pdm.toml
|
111 |
+
|
112 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
113 |
+
__pypackages__/
|
114 |
+
|
115 |
+
# Celery stuff
|
116 |
+
celerybeat-schedule
|
117 |
+
celerybeat.pid
|
118 |
+
|
119 |
+
# SageMath parsed files
|
120 |
+
*.sage.py
|
121 |
+
|
122 |
+
# Environments
|
123 |
+
.env
|
124 |
+
.venv
|
125 |
+
env/
|
126 |
+
venv/
|
127 |
+
ENV/
|
128 |
+
env.bak/
|
129 |
+
venv.bak/
|
130 |
+
|
131 |
+
# Spyder project settings
|
132 |
+
.spyderproject
|
133 |
+
.spyproject
|
134 |
+
|
135 |
+
# Rope project settings
|
136 |
+
.ropeproject
|
137 |
+
|
138 |
+
# mkdocs documentation
|
139 |
+
/site
|
140 |
+
|
141 |
+
# mypy
|
142 |
+
.mypy_cache/
|
143 |
+
.dmypy.json
|
144 |
+
dmypy.json
|
145 |
+
|
146 |
+
# Pyre type checker
|
147 |
+
.pyre/
|
148 |
+
|
149 |
+
# pytype static type analyzer
|
150 |
+
.pytype/
|
151 |
+
|
152 |
+
# Cython debug symbols
|
153 |
+
cython_debug/
|
154 |
+
|
155 |
+
# PyCharm
|
156 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
157 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
158 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
159 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
160 |
+
#.idea/
|
app.py
ADDED
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
import shutil
|
4 |
+
|
5 |
+
import gradio as gr
|
6 |
+
from huggingface_hub import Repository
|
7 |
+
|
8 |
+
import openai
|
9 |
+
|
10 |
+
HF_TOKEN = os.environ.get("TRL_TOKEN", None)
|
11 |
+
API_URL = os.environ.get("API_URL")
|
12 |
+
|
13 |
+
theme = gr.themes.Monochrome(
|
14 |
+
primary_hue="indigo",
|
15 |
+
secondary_hue="blue",
|
16 |
+
neutral_hue="slate",
|
17 |
+
radius_size=gr.themes.sizes.radius_sm,
|
18 |
+
font=[
|
19 |
+
gr.themes.GoogleFont("Open Sans"),
|
20 |
+
"ui-sans-serif",
|
21 |
+
"system-ui",
|
22 |
+
"sans-serif",
|
23 |
+
],
|
24 |
+
)
|
25 |
+
|
26 |
+
if HF_TOKEN:
|
27 |
+
try:
|
28 |
+
shutil.rmtree("./data/")
|
29 |
+
except:
|
30 |
+
pass
|
31 |
+
|
32 |
+
repo = Repository(
|
33 |
+
local_dir="./data/",
|
34 |
+
clone_from="trl-lib/stack-llama-prompts",
|
35 |
+
use_auth_token=HF_TOKEN,
|
36 |
+
repo_type="dataset",
|
37 |
+
)
|
38 |
+
repo.git_pull()
|
39 |
+
|
40 |
+
|
41 |
+
PROMPT_TEMPLATE = """Question: {prompt}\n\nAnswer:"""
|
42 |
+
|
43 |
+
def save_inputs_and_outputs(inputs, outputs, generate_kwargs):
|
44 |
+
with open(os.path.join("data", "prompts.jsonl"), "a") as f:
|
45 |
+
json.dump(
|
46 |
+
{"inputs": inputs, "outputs": outputs, "generate_kwargs": generate_kwargs},
|
47 |
+
f,
|
48 |
+
ensure_ascii=False,
|
49 |
+
)
|
50 |
+
f.write("\n")
|
51 |
+
commit_url = repo.push_to_hub()
|
52 |
+
|
53 |
+
def generate(
|
54 |
+
instruction,
|
55 |
+
temperature=0.9,
|
56 |
+
max_new_tokens=256,
|
57 |
+
top_p=0.95,
|
58 |
+
repetition_penalty=1.0,
|
59 |
+
do_save=True,
|
60 |
+
):
|
61 |
+
output = instruction + str(temperature)
|
62 |
+
s = ""
|
63 |
+
for ch in output:
|
64 |
+
s += ch
|
65 |
+
yield s
|
66 |
+
return s
|
67 |
+
|
68 |
+
example_system_prompt = [
|
69 |
+
"You are a helpful and precise assistant for checking the quality of the answer."
|
70 |
+
]
|
71 |
+
example_your_prompt = [
|
72 |
+
"[Question]\n{question}\n\n[The Start of Assistant's Answer]\n{answer}\n[The End of of Assistant's Answer]\n\nWe would like to request your feedback on the performance of the AI assistant in response to the user question displayed above.\nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only the value indicating the scores for the Assistant. In the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."
|
73 |
+
]
|
74 |
+
examples = [
|
75 |
+
["You are a helpful and precise assistant for checking the quality of the answer.", "[Question]\n{question}\n\n[The Start of Assistant's Answer]\n{answer}\n[The End of of Assistant's Answer]\n\nWe would like to request your feedback on the performance of the AI assistant in response to the user question displayed above.\nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only the value indicating the scores for the Assistant. In the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."]
|
76 |
+
]
|
77 |
+
|
78 |
+
|
79 |
+
def process_example(args):
|
80 |
+
for x in generate(args):
|
81 |
+
pass
|
82 |
+
return x
|
83 |
+
|
84 |
+
|
85 |
+
def gpt_eval(system_prompt, prompt, question, answer, openai_key):
|
86 |
+
if openai_key is None or len(openai_key) <= 10:
|
87 |
+
yield "Please enter a valid openai API key"
|
88 |
+
return
|
89 |
+
# return prompt.format(question=question, answer=answer)
|
90 |
+
origin_input = prompt.format(question=question, answer=answer)
|
91 |
+
input_str = system_prompt + "\n" + origin_input + "\n\n---\n"
|
92 |
+
yield input_str
|
93 |
+
# return input_str
|
94 |
+
# openai.api_key = "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"
|
95 |
+
openai.api_key = openai_key
|
96 |
+
res = openai.ChatCompletion.create(
|
97 |
+
model="gpt-3.5-turbo",
|
98 |
+
messages=[
|
99 |
+
{"role": "system", "content": system_prompt},
|
100 |
+
{"role": "user", "content": origin_input},
|
101 |
+
],
|
102 |
+
stream=True,
|
103 |
+
)
|
104 |
+
output = ""
|
105 |
+
for chunk in res:
|
106 |
+
content = chunk["choices"][0].get("delta", {}).get("content")
|
107 |
+
if content is not None:
|
108 |
+
# print(content, end="")
|
109 |
+
output += content
|
110 |
+
yield input_str + output
|
111 |
+
|
112 |
+
|
113 |
+
css = ".generating {visibility: hidden}" # + share_btn_css
|
114 |
+
|
115 |
+
|
116 |
+
system_prompt = gr.Textbox(
|
117 |
+
value = "You are a helpful and precise assistant for checking the quality of the answer.",
|
118 |
+
interactive=True,
|
119 |
+
label="System Prompt",
|
120 |
+
)
|
121 |
+
your_prompt = gr.Textbox(
|
122 |
+
placeholder="Enter your prompt here",
|
123 |
+
value="[Question]\n{question}\n\n[The Start of Assistant's Answer]\n{answer}\n[The End of of Assistant's Answer]\n\nWe would like to request your feedback on the performance of the AI assistant in response to the user question displayed above.\nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only the value indicating the scores for the Assistant. In the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment.",
|
124 |
+
label="Your Prompt",
|
125 |
+
interactive=True,
|
126 |
+
)
|
127 |
+
llm_question = gr.Textbox(
|
128 |
+
placeholder="Enter your question here",
|
129 |
+
value="What is the meaning of life?",
|
130 |
+
label="Your Question",
|
131 |
+
elem_id="q-input",
|
132 |
+
)
|
133 |
+
llm_answer = gr.Textbox(
|
134 |
+
placeholder="Enter your answer here",
|
135 |
+
label="Your LLM's Answer",
|
136 |
+
value="C'est la vie!",
|
137 |
+
elem_id="q-tmp-output",
|
138 |
+
)
|
139 |
+
|
140 |
+
|
141 |
+
|
142 |
+
with gr.Blocks(theme=theme, analytics_enabled=False, css=css) as demo:
|
143 |
+
with gr.Column():
|
144 |
+
gr.Markdown(
|
145 |
+
"""
|
146 |
+
Type in the box below and click the button to generate answers to your most pressing questions!
|
147 |
+
⚠️ **Data Collection**: by default, we are collecting the prompts entered in this app to further improve and evaluate the model. Do not share any personal or sensitive information while using the app! You can opt out of this data collection by removing the checkbox below:
|
148 |
+
"""
|
149 |
+
)
|
150 |
+
|
151 |
+
with gr.Row():
|
152 |
+
with gr.Column(scale=3):
|
153 |
+
do_save = gr.Checkbox(
|
154 |
+
value=True,
|
155 |
+
label="Store data",
|
156 |
+
info="You agree to the storage of your prompt and generated text for research and development purposes:",
|
157 |
+
)
|
158 |
+
system_prompt.render()
|
159 |
+
your_prompt.render()
|
160 |
+
llm_question.render()
|
161 |
+
llm_answer.render()
|
162 |
+
|
163 |
+
with gr.Box():
|
164 |
+
gr.Markdown("**Evaluation by GPT**")
|
165 |
+
evaluations = gr.Markdown(elem_id="q-output")
|
166 |
+
submit = gr.Button("Generate", variant="primary")
|
167 |
+
|
168 |
+
|
169 |
+
with gr.Column(scale=1):
|
170 |
+
openai_key = gr.Textbox(
|
171 |
+
placeholder="This will not be saved or shared.",
|
172 |
+
label="OpenAI API",
|
173 |
+
type="password",
|
174 |
+
)
|
175 |
+
|
176 |
+
openai_model = gr.Textbox(
|
177 |
+
value="gpt-3.5-turbo",
|
178 |
+
label="Model (More opions coming soon) )",
|
179 |
+
)
|
180 |
+
|
181 |
+
# gr.Examples(
|
182 |
+
# examples=example_system_prompt,
|
183 |
+
# inputs=[system_prompt],
|
184 |
+
# label="Example System Prompt"
|
185 |
+
# )
|
186 |
+
|
187 |
+
# gr.Examples(
|
188 |
+
# examples=example_your_prompt,
|
189 |
+
# inputs=[your_prompt],
|
190 |
+
# label="Example Your Prompt"
|
191 |
+
# )
|
192 |
+
|
193 |
+
example_box = gr.Examples(
|
194 |
+
examples=examples,
|
195 |
+
inputs=[system_prompt, your_prompt],
|
196 |
+
cache_examples=False,
|
197 |
+
)
|
198 |
+
|
199 |
+
submit.click(
|
200 |
+
gpt_eval,
|
201 |
+
inputs=[system_prompt, your_prompt, llm_question, llm_answer, openai_key],
|
202 |
+
outputs=[evaluations],
|
203 |
+
)
|
204 |
+
|
205 |
+
# llm_question.submit(
|
206 |
+
# generate,
|
207 |
+
# inputs=[llm_question, temperature, max_new_tokens, top_p, repetition_penalty],
|
208 |
+
# outputs=[evaluations],
|
209 |
+
# )
|
210 |
+
|
211 |
+
demo.queue().launch(debug=True)
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
openai
|
3 |
+
torch
|
4 |
+
transformers
|
5 |
+
datasets
|
6 |
+
evaluate
|
7 |
+
scikit-learn
|