Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enhance functions Documentation and Improve Error Messages #350

Merged
merged 5 commits into from
May 21, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 9 additions & 8 deletions google/generativeai/answer.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def _make_grounding_passages(source: GroundingPassagesOptions) -> glm.GroundingP

if not isinstance(source, Iterable):
raise TypeError(
f"The 'source' argument must be an instance of 'GroundingPassagesOptions', but got a '{type(source).__name__}' object instead."
f"Invalid input: The 'source' argument must be an instance of 'GroundingPassagesOptions'. Received a '{type(source).__name__}' object instead."
)

passages = []
Expand Down Expand Up @@ -156,9 +156,9 @@ def _make_semantic_retriever_config(
source["source"] = _maybe_get_source_name(source["source"])
else:
raise TypeError(
"Could create a `glm.SemanticRetrieverConfig` from:\n"
f" type: {type(source)}\n"
f" value: {source}"
f"Invalid input: Failed to create a 'glm.SemanticRetrieverConfig' from the provided source. "
f"Received type: {type(source).__name__}, "
f"Received value: {source}"
)

if source["query"] is None:
Expand Down Expand Up @@ -208,15 +208,17 @@ def _make_generate_answer_request(

if inline_passages is not None and semantic_retriever is not None:
raise ValueError(
"Either `inline_passages` or `semantic_retriever_config` must be set, not both."
f"Invalid configuration: Please set either 'inline_passages' or 'semantic_retriever_config', but not both. "
f"Received for inline_passages: {inline_passages}, and for semantic_retriever: {semantic_retriever}."
)
elif inline_passages is not None:
inline_passages = _make_grounding_passages(inline_passages)
elif semantic_retriever is not None:
semantic_retriever = _make_semantic_retriever_config(semantic_retriever, contents[-1])
else:
raise TypeError(
f"The source must be either an `inline_passages` xor `semantic_retriever_config`, but both are `None`"
f"Invalid configuration: Either 'inline_passages' or 'semantic_retriever_config' must be provided, but currently both are 'None'. "
f"Received for inline_passages: {inline_passages}, and for semantic_retriever: {semantic_retriever}."
)

if answer_style:
Expand Down Expand Up @@ -245,8 +247,7 @@ def generate_answer(
client: glm.GenerativeServiceClient | None = None,
request_options: helper_types.RequestOptionsType | None = None,
):
"""
Calls the GenerateAnswer API and returns a `types.Answer` containing the response.
"""Calls the GenerateAnswer API and returns a `types.Answer` containing the response.

You can pass a literal list of text chunks:

Expand Down
14 changes: 10 additions & 4 deletions google/generativeai/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,9 @@ def __init__(self, *args, **kwargs):
def _setup_discovery_api(self):
api_key = self._client_options.api_key
if api_key is None:
raise ValueError("Uploading to the File API requires an API key.")
raise ValueError(
"Invalid operation: Uploading to the File API requires an API key. Please provide a valid API key."
)

request = googleapiclient.http.HttpRequest(
http=httplib2.Http(),
Expand Down Expand Up @@ -81,7 +83,9 @@ def create_file(

class FileServiceAsyncClient(glm.FileServiceAsyncClient):
async def create_file(self, *args, **kwargs):
raise NotImplementedError("`create_file` is not yet implemented for the async client.")
raise NotImplementedError(
"The `create_file` method is currently not supported for the asynchronous client."
)


@dataclasses.dataclass
Expand Down Expand Up @@ -109,7 +113,7 @@ def configure(
client_info: gapic_v1.client_info.ClientInfo | None = None,
default_metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""Captures default client configuration.
"""Initializes default client configurations using specified parameters or environment variables.

If no API key has been provided (either directly, or on `client_options`) and the
`GOOGLE_API_KEY` environment variable is set, it will be used as the API key.
Expand All @@ -135,7 +139,9 @@ def configure(

if had_api_key_value:
if api_key is not None:
raise ValueError("You can't set both `api_key` and `client_options['api_key']`.")
raise ValueError(
"Invalid configuration: Please set either `api_key` or `client_options['api_key']`, but not both."
)
else:
if api_key is None:
# If no key is provided explicitly, attempt to load one from the
Expand Down
35 changes: 21 additions & 14 deletions google/generativeai/discuss.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,15 +69,19 @@ def _make_messages(
elif len(even_authors) == 1:
even_author = even_authors.pop()
else:
raise discuss_types.AuthorError("Authors are not strictly alternating")
raise discuss_types.AuthorError(
"Invalid sequence: Authors in the discussion must alternate strictly."
)

odd_authors = set(msg.author for msg in messages[1::2] if msg.author)
if not odd_authors:
odd_author = "1"
elif len(odd_authors) == 1:
odd_author = odd_authors.pop()
else:
raise discuss_types.AuthorError("Authors are not strictly alternating")
raise discuss_types.AuthorError(
"Invalid sequence: Authors in the discussion must alternate strictly."
)

if all(msg.author for msg in messages):
return messages
Expand Down Expand Up @@ -130,8 +134,8 @@ def _make_examples_from_flat(
raise ValueError(
textwrap.dedent(
f"""\
You must pass `Primer` objects, pairs of messages, or an *even* number of messages, got:
{len(examples)} messages"""
Invalid input: You must pass either `Primer` objects, pairs of messages, or an even number of messages.
Currently, {len(examples)} messages were provided, which is an odd number."""
)
)
result = []
Expand Down Expand Up @@ -186,7 +190,7 @@ def _make_examples(
else:
if not ("input" in first and "output" in first):
raise TypeError(
"To create an `Example` from a dict you must supply both `input` and an `output` keys"
"Invalid dictionary format: To create an `Example` instance, the dictionary must contain both `input` and `output` keys."
)
else:
if isinstance(first, discuss_types.MESSAGE_OPTIONS):
Expand Down Expand Up @@ -232,8 +236,7 @@ def _make_message_prompt_dict(
flat_prompt = (context is not None) or (examples is not None) or (messages is not None)
if flat_prompt:
raise ValueError(
"You can't set `prompt`, and its fields `(context, examples, messages)`"
" at the same time"
"Invalid configuration: Either `prompt` or its fields `(context, examples, messages)` should be set, but not both simultaneously."
)
if isinstance(prompt, glm.MessagePrompt):
return prompt
Expand All @@ -245,7 +248,7 @@ def _make_message_prompt_dict(
keys = set(prompt.keys())
if not keys.issubset(discuss_types.MESSAGE_PROMPT_KEYS):
raise KeyError(
f"Found extra entries in the prompt dictionary: {keys - discuss_types.MESSAGE_PROMPT_KEYS}"
f"Invalid prompt dictionary: Extra entries found that are not recognized: {keys - discuss_types.MESSAGE_PROMPT_KEYS}. Please check the keys."
)

examples = prompt.get("examples", None)
Expand Down Expand Up @@ -319,7 +322,7 @@ def chat(
client: glm.DiscussServiceClient | None = None,
request_options: helper_types.RequestOptionsType | None = None,
) -> discuss_types.ChatResponse:
"""Calls the API and returns a `types.ChatResponse` containing the response.
"""Calls the API to initiate a chat with a model using provided parameters

Args:
model: Which model to call, as a string or a `types.Model`.
Expand Down Expand Up @@ -419,6 +422,7 @@ async def chat_async(
client: glm.DiscussServiceAsyncClient | None = None,
request_options: helper_types.RequestOptionsType | None = None,
) -> discuss_types.ChatResponse:
"""Calls the API asynchronously to initiate a chat with a model using provided parameters"""
request = _make_generate_message_request(
model=model,
context=context,
Expand Down Expand Up @@ -473,12 +477,13 @@ def reply(
request_options: helper_types.RequestOptionsType | None = None,
) -> discuss_types.ChatResponse:
if isinstance(self._client, glm.DiscussServiceAsyncClient):
raise TypeError(f"reply can't be called on an async client, use reply_async instead.")
raise TypeError(
"Invalid operation: The 'reply' method cannot be called on an asynchronous client. Please use the 'reply_async' method instead."
)
if self.last is None:
raise ValueError(
"The last response from the model did not return any candidates.\n"
"Check the `.filters` attribute to see why the responses were filtered:\n"
f"{self.filters}"
f"Invalid operation: No candidates returned from the model's last response. "
f"Please inspect the '.filters' attribute to understand why responses were filtered out. Current filters: {self.filters}"
)

request = self.to_dict()
Expand All @@ -497,7 +502,7 @@ async def reply_async(
) -> discuss_types.ChatResponse:
if isinstance(self._client, glm.DiscussServiceClient):
raise TypeError(
f"reply_async can't be called on a non-async client, use reply instead."
"Invalid method call: `reply_async` is not supported on a non-async client. Please use the `reply` method instead."
)
request = self.to_dict()
request.pop("candidates")
Expand Down Expand Up @@ -577,6 +582,8 @@ def count_message_tokens(
client: glm.DiscussServiceAsyncClient | None = None,
request_options: helper_types.RequestOptionsType | None = None,
) -> discuss_types.TokenCount:
"""Calls the API to calculate the number of tokens used in the prompt."""

model = model_types.make_model_name(model)
prompt = _make_message_prompt(prompt, context=context, examples=examples, messages=messages)

Expand Down
20 changes: 13 additions & 7 deletions google/generativeai/embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,9 @@ def to_task_type(x: EmbeddingTaskTypeOptions) -> EmbeddingTaskType:

def _batched(iterable: Iterable[T], n: int) -> Iterable[list[T]]:
if n < 1:
raise ValueError(f"Batch size `n` must be >0, got: {n}")
raise ValueError(
f"Invalid input: The batch size 'n' must be a positive integer. You entered: {n}. Please enter a number greater than 0."
)
batch = []
for item in iterable:
batch.append(item)
Expand Down Expand Up @@ -167,11 +169,13 @@ def embed_content(

if title and to_task_type(task_type) is not EmbeddingTaskType.RETRIEVAL_DOCUMENT:
raise ValueError(
"If a title is specified, the task must be a retrieval document type task."
f"Invalid task type: When a title is specified, the task must be of a 'retrieval document' type. Received task type: {task_type} and title: {title}."
)

if output_dimensionality and output_dimensionality < 0:
raise ValueError("`output_dimensionality` must be a non-negative integer.")
raise ValueError(
f"Invalid value: `output_dimensionality` must be a non-negative integer. Received: {output_dimensionality}."
)

if task_type:
task_type = to_task_type(task_type)
Expand Down Expand Up @@ -247,7 +251,8 @@ async def embed_content_async(
client: glm.GenerativeServiceAsyncClient = None,
request_options: helper_types.RequestOptionsType | None = None,
) -> text_types.EmbeddingDict | text_types.BatchEmbeddingDict:
"""The async version of `genai.embed_content`."""
"""Calls the API to create async embeddings for content passed in."""

model = model_types.make_model_name(model)

if request_options is None:
Expand All @@ -258,11 +263,12 @@ async def embed_content_async(

if title and to_task_type(task_type) is not EmbeddingTaskType.RETRIEVAL_DOCUMENT:
raise ValueError(
"If a title is specified, the task must be a retrieval document type task."
f"Invalid task type: When a title is specified, the task must be of a 'retrieval document' type. Received task type: {task_type} and title: {title}."
)

if output_dimensionality and output_dimensionality < 0:
raise ValueError("`output_dimensionality` must be a non-negative integer.")
raise ValueError(
f"Invalid value: `output_dimensionality` must be a non-negative integer. Received: {output_dimensionality}."
)

if task_type:
task_type = to_task_type(task_type)
Expand Down
5 changes: 4 additions & 1 deletion google/generativeai/files.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def upload_file(
display_name: str | None = None,
resumable: bool = True,
) -> file_types.File:
"""Uploads a file using a supported file service.
"""Calls the API to upload a file using a supported file service.

Args:
path: The path to the file to be uploaded.
Expand Down Expand Up @@ -73,6 +73,7 @@ def upload_file(


def list_files(page_size=100) -> Iterable[file_types.File]:
"""Calls the API to list files using a supported file service."""
client = get_default_file_client()

response = client.list_files(glm.ListFilesRequest(page_size=page_size))
Expand All @@ -81,11 +82,13 @@ def list_files(page_size=100) -> Iterable[file_types.File]:


def get_file(name) -> file_types.File:
"""Calls the API to retrieve a specified file using a supported file service."""
client = get_default_file_client()
return file_types.File(client.get_file(name=name))


def delete_file(name):
"""Calls the API to permanently delete a specified file using a supported file service."""
if isinstance(name, (file_types.File, glm.File)):
name = name.name
request = glm.DeleteFileRequest(name=name)
Expand Down
Loading
Loading