openai error: BadRequestError: Invalid URL: missing field `name`
#100
by
hchtao
- opened
when using openai
example or huggingface_hub.InferenceClient
with base_url
, below error occured. The issue is not limited to this particular model, but other models (such as google/gemma-2-2b-it
) as well.
BadRequestError Traceback (most recent call last)
Cell In[16], line 26
3 client = OpenAI(
4 base_url="https://api-inference.huggingface.co/v1/",
5 api_key=os.getenv("hf_token")
6 )
8 messages = [
9 {
10 "role": "user",
(...)
23 }
24 ]
---> 26 completion = client.chat.completions.create(
27 model="meta-llama/Llama-3.2-11B-Vision-Instruct",
28 messages=messages,
29 max_tokens=500
30 )
32 print(completion.choices[0].message)
File ~/anaconda3/envs/agent/lib/python3.12/site-packages/openai/_utils/_utils.py:279, in required_args.<locals>.inner.<locals>.wrapper(*args, **kwargs)
277 msg = f"Missing required argument: {quote(missing[0])}"
278 raise TypeError(msg)
--> 279 return func(*args, **kwargs)
File ~/anaconda3/envs/agent/lib/python3.12/site-packages/openai/resources/chat/completions.py:859, in Completions.create(self, messages, model, audio, frequency_penalty, function_call, functions, logit_bias, logprobs, max_completion_tokens, max_tokens, metadata, modalities, n, parallel_tool_calls, prediction, presence_penalty, reasoning_effort, response_format, seed, service_tier, stop, store, stream, stream_options, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout)
817 @required_args(["messages", "model"], ["messages", "model", "stream"])
818 def create(
819 self,
(...)
856 timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
857 ) -> ChatCompletion | Stream[ChatCompletionChunk]:
858 validate_response_format(response_format)
--> 859 return self._post(
860 "/chat/completions",
861 body=maybe_transform(
862 {
863 "messages": messages,
864 "model": model,
865 "audio": audio,
866 "frequency_penalty": frequency_penalty,
867 "function_call": function_call,
868 "functions": functions,
869 "logit_bias": logit_bias,
870 "logprobs": logprobs,
871 "max_completion_tokens": max_completion_tokens,
872 "max_tokens": max_tokens,
873 "metadata": metadata,
874 "modalities": modalities,
875 "n": n,
876 "parallel_tool_calls": parallel_tool_calls,
877 "prediction": prediction,
878 "presence_penalty": presence_penalty,
879 "reasoning_effort": reasoning_effort,
880 "response_format": response_format,
881 "seed": seed,
882 "service_tier": service_tier,
883 "stop": stop,
884 "store": store,
885 "stream": stream,
886 "stream_options": stream_options,
887 "temperature": temperature,
888 "tool_choice": tool_choice,
889 "tools": tools,
890 "top_logprobs": top_logprobs,
891 "top_p": top_p,
892 "user": user,
893 },
894 completion_create_params.CompletionCreateParams,
895 ),
896 options=make_request_options(
897 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
898 ),
899 cast_to=ChatCompletion,
900 stream=stream or False,
901 stream_cls=Stream[ChatCompletionChunk],
902 )
File ~/anaconda3/envs/agent/lib/python3.12/site-packages/openai/_base_client.py:1283, in SyncAPIClient.post(self, path, cast_to, body, options, files, stream, stream_cls)
1269 def post(
1270 self,
1271 path: str,
(...)
1278 stream_cls: type[_StreamT] | None = None,
1279 ) -> ResponseT | _StreamT:
1280 opts = FinalRequestOptions.construct(
1281 method="post", url=path, json_data=body, files=to_httpx_files(files), **options
1282 )
-> 1283 return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
File ~/anaconda3/envs/agent/lib/python3.12/site-packages/openai/_base_client.py:960, in SyncAPIClient.request(self, cast_to, options, remaining_retries, stream, stream_cls)
957 else:
958 retries_taken = 0
--> 960 return self._request(
961 cast_to=cast_to,
962 options=options,
963 stream=stream,
964 stream_cls=stream_cls,
965 retries_taken=retries_taken,
966 )
File ~/anaconda3/envs/agent/lib/python3.12/site-packages/openai/_base_client.py:1064, in SyncAPIClient._request(self, cast_to, options, retries_taken, stream, stream_cls)
1061 err.response.read()
1063 log.debug("Re-raising status error")
-> 1064 raise self._make_status_error_from_response(err.response) from None
1066 return self._process_response(
1067 cast_to=cast_to,
1068 options=options,
(...)
1072 retries_taken=retries_taken,
1073 )
BadRequestError: Invalid URL: missing field `name`
When using huggingface_hub.InferenceClient
without base_url
, no error.