Spaces:
Runtime error
Runtime error
add inference parameters for sampling generaton task
Browse files
app.py
CHANGED
@@ -149,14 +149,6 @@ def display_tree(self: Tree, prefix='value: '):
|
|
149 |
self.right.display_tree()
|
150 |
|
151 |
"""
|
152 |
-
|
153 |
-
def docgen_func(function_code):
|
154 |
-
req_data = {"inputs": function_code}
|
155 |
-
output = query(req_data)
|
156 |
-
if type(output) is list:
|
157 |
-
return f'"""\n{output[0]["generated_text"]}\n"""'
|
158 |
-
else:
|
159 |
-
return str(output)
|
160 |
|
161 |
def pygen_func(nl_code_intent):
|
162 |
pass # TODO: generate code PL from intent NL + search in corpus
|
@@ -168,12 +160,41 @@ def pygen_func(nl_code_intent):
|
|
168 |
# return str(answer)
|
169 |
# CT5_URL = "https://api-inference.huggingface.co/models/nielsr/codet5-small-code-summarization-ruby"
|
170 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
iface = gr.Interface(
|
172 |
# pygen_func,
|
173 |
docgen_func,
|
174 |
[
|
175 |
# gr.inputs.Textbox(lines=7, label="Code Intent (NL)", default=task_code),
|
176 |
-
gr.inputs.Textbox(lines=10, label="Enter Task + Code in Python (PL)", default=task_code),
|
|
|
177 |
],
|
178 |
# gr.outputs.Textbox(label="Code Generated PL"))
|
179 |
gr.outputs.Textbox(label="Docstring Generated (NL)"),
|
|
|
149 |
self.right.display_tree()
|
150 |
|
151 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
152 |
|
153 |
def pygen_func(nl_code_intent):
|
154 |
pass # TODO: generate code PL from intent NL + search in corpus
|
|
|
160 |
# return str(answer)
|
161 |
# CT5_URL = "https://api-inference.huggingface.co/models/nielsr/codet5-small-code-summarization-ruby"
|
162 |
|
163 |
+
def docgen_func(function_code, temp):
|
164 |
+
req_data = {
|
165 |
+
"inputs": function_code,
|
166 |
+
"parameters": {
|
167 |
+
"min_length": 50, # (Default: None). Integer to define the minimum length in tokens of the output summary.
|
168 |
+
"max_length": 500, # (Default: None). Integer to define the maximum length in tokens of the output summary.
|
169 |
+
"top_k": 3, # (Default: None). Integer to define the top tokens considered within the sample operation to create new text.
|
170 |
+
"top_p": 0.8, # (Default: None). Float to define the tokens that are within the sample` operation of text generation.
|
171 |
+
# Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p.
|
172 |
+
"temperature": temp, # (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation.
|
173 |
+
# 1 means regular sampling, 0 means top_k=1, 100.0 is getting closer to uniform probability.
|
174 |
+
"repetition_penalty": 50.0, # (Default: None). Float (0.0-100.0). The more a token is used within generation
|
175 |
+
# the more it is penalized to not be picked in successive generation passes.
|
176 |
+
"max_time": 80, # (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum.
|
177 |
+
# Network can cause some overhead so it will be a soft limit.
|
178 |
+
},
|
179 |
+
"options": {
|
180 |
+
"use_gpu": false, # (Default: false). Boolean to use GPU instead of CPU for inference (requires Startup plan at least)
|
181 |
+
"use_cache": true, # (Default: true). Boolean. There is a cache layer on the inference API to speedup requests we have already seen. Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being used resulting in a real new query.
|
182 |
+
"wait_for_model": false, # (Default: false) Boolean. If the model is not ready, wait for it instead of receiving 503. It limits the number of requests required to get your inference done. It is advised to only set this flag to true after receiving a 503 error as it will limit hanging in your application to known places.
|
183 |
+
}
|
184 |
+
}
|
185 |
+
output = query(req_data)
|
186 |
+
if type(output) is list:
|
187 |
+
return f'"""\n{output[0]["generated_text"]}\n"""'
|
188 |
+
else:
|
189 |
+
return str(output)
|
190 |
+
|
191 |
iface = gr.Interface(
|
192 |
# pygen_func,
|
193 |
docgen_func,
|
194 |
[
|
195 |
# gr.inputs.Textbox(lines=7, label="Code Intent (NL)", default=task_code),
|
196 |
+
gr.inputs.Textbox(lines=10, label="Enter Task + Code in Python (PL)", default=task_code),
|
197 |
+
gr.inputs.Slider(0, 100, label="Temperature"),
|
198 |
],
|
199 |
# gr.outputs.Textbox(label="Code Generated PL"))
|
200 |
gr.outputs.Textbox(label="Docstring Generated (NL)"),
|