-
Notifications
You must be signed in to change notification settings - Fork 19
Closed
Description
update: Fixed in #182
import os
from chatlas import ChatGithub
from dotenv import load_dotenv
load_dotenv(".env")
chat = ChatGithub(
model="gpt-4.1",
system_prompt="You are a terse assistant.",
api_key=os.getenv("GITHUB_PAT"),
)
chat.chat("What is the capital of the moon?")
returns an AuthenticationError with this traceback:
AuthenticationError: Error code: 401 - {'error': {'message': 'Incorrect API key provided: ghp_gKmz****************************7aub. You can find your API key at [https://platform.openai.com/account/api-keys.](vscode-file://vscode-app/Applications/Positron.app/Contents/Resources/app/out/vs/code/electron-browser/workbench/workbench.html#)', 'type': 'invalid_request_error', 'param': None, 'code': 'invalid_api_key'}}
Cell In[16], line 1
----> 1 chat.chat("What is the capital of the moon?")
Hide Traceback
File [~/git/rstudio/shiny-py/.venv/lib/python3.12/site-packages/chatlas/_chat.py:858](vscode-file://vscode-app/Applications/Positron.app/Contents/Resources/app/out/vs/code/electron-browser/workbench/workbench.html#), in Chat.chat(self, echo, stream, kwargs, *args)
847 response = ChatResponse(
848 self._chat_impl(
849 turn,
(...)
854 )
855 )
857 with display:
--> 858 for _ in response:
859 pass
861 return response
File [~/git/rstudio/shiny-py/.venv/lib/python3.12/site-packages/chatlas/_chat.py:2549](vscode-file://vscode-app/Applications/Positron.app/Contents/Resources/app/out/vs/code/electron-browser/workbench/workbench.html#), in ChatResponse.__next__(self)
2548 def __next__(self) -> str:
-> 2549 chunk = next(self._generator)
2550 self.content += chunk # Keep track of accumulated content
2551 return chunk
File [~/git/rstudio/shiny-py/.venv/lib/python3.12/site-packages/chatlas/_chat.py:2063](vscode-file://vscode-app/Applications/Positron.app/Contents/Resources/app/out/vs/code/electron-browser/workbench/workbench.html#), in Chat._chat_impl(self, user_turn, echo, content, stream, kwargs)
2061 user_turn_result: Turn | None = user_turn
2062 while user_turn_result is not None:
-> 2063 for chunk in self._submit_turns(
2064 user_turn_result,
2065 echo=echo,
2066 stream=stream,
2067 kwargs=kwargs,
2068 ):
2069 yield chunk
2071 turn = self.get_last_turn(role="assistant")
File [~/git/rstudio/shiny-py/.venv/lib/python3.12/site-packages/chatlas/_chat.py:2193](vscode-file://vscode-app/Applications/Positron.app/Contents/Resources/app/out/vs/code/electron-browser/workbench/workbench.html#), in Chat._submit_turns(self, user_turn, echo, stream, data_model, kwargs)
2190 all_kwargs.update(kwargs)
2192 if stream:
-> 2193 response = self.provider.chat_perform(
2194 stream=True,
2195 turns=[*self._turns, user_turn],
2196 tools=self._tools,
2197 data_model=data_model,
2198 kwargs=all_kwargs,
2199 )
2201 result = None
2202 for chunk in response:
File [~/git/rstudio/shiny-py/.venv/lib/python3.12/site-packages/chatlas/_provider_openai.py:282](vscode-file://vscode-app/Applications/Positron.app/Contents/Resources/app/out/vs/code/electron-browser/workbench/workbench.html#), in OpenAIProvider.chat_perform(self, stream, turns, tools, data_model, kwargs)
272 def chat_perform(
273 self,
274 *,
(...)
279 kwargs: Optional["SubmitInputArgs"] = None,
280 ):
281 kwargs = self._chat_perform_args(stream, turns, tools, data_model, kwargs)
--> 282 return self._client.chat.completions.create(**kwargs)
File [~/git/rstudio/shiny-py/.venv/lib/python3.12/site-packages/openai/_utils/_utils.py:286](vscode-file://vscode-app/Applications/Positron.app/Contents/Resources/app/out/vs/code/electron-browser/workbench/workbench.html#), in required_args.<locals>.inner.<locals>.wrapper(*args, **kwargs)
284 msg = f"Missing required argument: {quote(missing[0])}"
285 raise TypeError(msg)
--> 286 return func(*args, **kwargs)
File [~/git/rstudio/shiny-py/.venv/lib/python3.12/site-packages/openai/resources/chat/completions/completions.py:1147](vscode-file://vscode-app/Applications/Positron.app/Contents/Resources/app/out/vs/code/electron-browser/workbench/workbench.html#), in Completions.create(self, messages, model, audio, frequency_penalty, function_call, functions, logit_bias, logprobs, max_completion_tokens, max_tokens, metadata, modalities, n, parallel_tool_calls, prediction, presence_penalty, prompt_cache_key, reasoning_effort, response_format, safety_identifier, seed, service_tier, stop, store, stream, stream_options, temperature, tool_choice, tools, top_logprobs, top_p, user, verbosity, web_search_options, extra_headers, extra_query, extra_body, timeout)
1101 @required_args(["messages", "model"], ["messages", "model", "stream"])
1102 def create(
1103 self,
(...)
1144 timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
1145 ) -> ChatCompletion | Stream[ChatCompletionChunk]:
1146 validate_response_format(response_format)
-> 1147 return self._post(
1148 "/chat/completions",
1149 body=maybe_transform(
1150 {
1151 "messages": messages,
1152 "model": model,
1153 "audio": audio,
1154 "frequency_penalty": frequency_penalty,
1155 "function_call": function_call,
1156 "functions": functions,
1157 "logit_bias": logit_bias,
1158 "logprobs": logprobs,
1159 "max_completion_tokens": max_completion_tokens,
1160 "max_tokens": max_tokens,
1161 "metadata": metadata,
1162 "modalities": modalities,
1163 "n": n,
1164 "parallel_tool_calls": parallel_tool_calls,
1165 "prediction": prediction,
1166 "presence_penalty": presence_penalty,
1167 "prompt_cache_key": prompt_cache_key,
1168 "reasoning_effort": reasoning_effort,
1169 "response_format": response_format,
1170 "safety_identifier": safety_identifier,
1171 "seed": seed,
1172 "service_tier": service_tier,
1173 "stop": stop,
1174 "store": store,
1175 "stream": stream,
1176 "stream_options": stream_options,
1177 "temperature": temperature,
1178 "tool_choice": tool_choice,
1179 "tools": tools,
1180 "top_logprobs": top_logprobs,
1181 "top_p": top_p,
1182 "user": user,
1183 "verbosity": verbosity,
1184 "web_search_options": web_search_options,
1185 },
1186 completion_create_params.CompletionCreateParamsStreaming
1187 if stream
1188 else completion_create_params.CompletionCreateParamsNonStreaming,
1189 ),
1190 options=make_request_options(
1191 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
1192 ),
1193 cast_to=ChatCompletion,
1194 stream=stream or False,
1195 stream_cls=Stream[ChatCompletionChunk],
1196 )
File [~/git/rstudio/shiny-py/.venv/lib/python3.12/site-packages/openai/_base_client.py:1259](vscode-file://vscode-app/Applications/Positron.app/Contents/Resources/app/out/vs/code/electron-browser/workbench/workbench.html#), in SyncAPIClient.post(self, path, cast_to, body, options, files, stream, stream_cls)
1245 def post(
1246 self,
1247 path: str,
(...)
1254 stream_cls: type[_StreamT] | None = None,
1255 ) -> ResponseT | _StreamT:
1256 opts = FinalRequestOptions.construct(
1257 method="post", url=path, json_data=body, files=to_httpx_files(files), **options
1258 )
-> 1259 return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
File [~/git/rstudio/shiny-py/.venv/lib/python3.12/site-packages/openai/_base_client.py:1047](vscode-file://vscode-app/Applications/Positron.app/Contents/Resources/app/out/vs/code/electron-browser/workbench/workbench.html#), in SyncAPIClient.request(self, cast_to, options, stream, stream_cls)
1044 err.response.read()
1046 log.debug("Re-raising status error")
-> 1047 raise self._make_status_error_from_response(err.response) from None
1049 break
1051 assert response is not None, "could not resolve response (should never happen)"
Metadata
Metadata
Assignees
Labels
No labels