meg-huggingface
commited on
Commit
·
798ff9d
1
Parent(s):
668284b
parallel processing handling
Browse files
src/backend/inference_endpoint.py
CHANGED
@@ -32,6 +32,7 @@ def create_endpoint(endpoint_name, repository, framework='pytorch',
|
|
32 |
except huggingface_hub.utils._errors.HfHubHTTPError as e:
|
33 |
# Workload with the same name already exists error.
|
34 |
# Use it again, just make sure it has the right settings.
|
|
|
35 |
logger.debug("Hit error:")
|
36 |
logger.debug(e)
|
37 |
logger.debug("Attempting to update with the given parameters.")
|
|
|
32 |
except huggingface_hub.utils._errors.HfHubHTTPError as e:
|
33 |
# Workload with the same name already exists error.
|
34 |
# Use it again, just make sure it has the right settings.
|
35 |
+
# TODO(mm): Is this error even catching?
|
36 |
logger.debug("Hit error:")
|
37 |
logger.debug(e)
|
38 |
logger.debug("Attempting to update with the given parameters.")
|
src/backend/run_toxicity_eval.py
CHANGED
@@ -169,7 +169,7 @@ def main(endpoint_url, eval_request):
|
|
169 |
prompts = [row['text'] for row in ds['train']['prompt']]
|
170 |
# All the generated responses from the endpoint
|
171 |
with Pool() as pool:
|
172 |
-
generated_responses = pool.map(
|
173 |
att_scores_out = score_generations(prompts, generated_responses)
|
174 |
logger.debug("Scores are:")
|
175 |
logger.debug(att_scores_out)
|
|
|
169 |
prompts = [row['text'] for row in ds['train']['prompt']]
|
170 |
# All the generated responses from the endpoint
|
171 |
with Pool() as pool:
|
172 |
+
generated_responses = pool.map(get_generation(endpoint_url, x) for x in prompts[:DATASET_CUTOFF])
|
173 |
att_scores_out = score_generations(prompts, generated_responses)
|
174 |
logger.debug("Scores are:")
|
175 |
logger.debug(att_scores_out)
|