Skip to content

Commit

Permalink
Add genai.protos
Browse files Browse the repository at this point in the history
Change-Id: I21cfada033c6ffbed7a20e117e61582fde925f61
  • Loading branch information
MarkDaoust committed May 21, 2024
1 parent 88f7ab3 commit c9a955e
Show file tree
Hide file tree
Showing 42 changed files with 1,375 additions and 1,446 deletions.
82 changes: 4 additions & 78 deletions docs/build_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,77 +44,13 @@
# For showing the conditional imports and types in `content_types.py`
# grpc must be imported first.
typing.TYPE_CHECKING = True
from google import generativeai as palm
from google import generativeai as genai


from tensorflow_docs.api_generator import generate_lib
from tensorflow_docs.api_generator import public_api

import yaml

glm.__doc__ = """\
This package, `google.ai.generativelanguage`, is a low-level auto-generated client library for the PaLM API.
```posix-terminal
pip install google.ai.generativelanguage
```
It is built using the same tooling as Google Cloud client libraries, and will be quite familiar if you've used
those before.
While we encourage Python users to access the PaLM API using the `google.generativeai` package (aka `palm`),
this lower level package is also available.
Each method in the PaLM API is connected to one of the client classes. Pass your API-key to the class' `client_options`
when initializing a client:
```
from google.ai import generativelanguage as glm
client = glm.DiscussServiceClient(
client_options={'api_key':'YOUR_API_KEY'})
```
To call the api, pass an appropriate request-proto-object. For the `DiscussServiceClient.generate_message` pass
a `generativelanguage.GenerateMessageRequest` instance:
```
request = glm.GenerateMessageRequest(
model='models/chat-bison-001',
prompt=glm.MessagePrompt(
messages=[glm.Message(content='Hello!')]))
client.generate_message(request)
```
```
candidates {
author: "1"
content: "Hello! How can I help you today?"
}
...
```
For simplicity:
* The API methods also accept key-word arguments.
* Anywhere you might pass a proto-object, the library will also accept simple python structures.
So the following is equivalent to the previous example:
```
client.generate_message(
model='models/chat-bison-001',
prompt={'messages':[{'content':'Hello!'}]})
```
```
candidates {
author: "1"
content: "Hello! How can I help you today?"
}
...
```
"""

HERE = pathlib.Path(__file__).parent

PROJECT_SHORT_NAME = "genai"
Expand Down Expand Up @@ -143,21 +79,11 @@ class MyFilter:
def __init__(self, base_dirs):
self.filter_base_dirs = public_api.FilterBaseDirs(base_dirs)

def drop_staticmethods(self, parent, children):
parent = dict(parent.__dict__)
for name, value in children:
if not isinstance(parent.get(name, None), staticmethod):
yield name, value

def __call__(self, path, parent, children):
if any("generativelanguage" in part for part in path) or "generativeai" in path:
children = self.filter_base_dirs(path, parent, children)
children = public_api.explicit_package_contents_filter(path, parent, children)

if any("generativelanguage" in part for part in path):
if "ServiceClient" in path[-1] or "ServiceAsyncClient" in path[-1]:
children = list(self.drop_staticmethods(parent, children))

return children


Expand Down Expand Up @@ -188,11 +114,11 @@ def gen_api_docs():
"""
)

doc_generator = MyDocGenerator(
doc_generator = generate_lib.DocGenerator(
root_title=PROJECT_FULL_NAME,
py_modules=[("google", google)],
py_modules=[("google.generativeai", genai)],
base_dir=(
pathlib.Path(palm.__file__).parent,
pathlib.Path(genai.__file__).parent,
pathlib.Path(glm.__file__).parent.parent,
),
code_url_prefix=(
Expand Down
66 changes: 33 additions & 33 deletions google/generativeai/answer.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from typing import Any, Iterable, Union, Mapping, Optional
from typing_extensions import TypedDict

import google.ai.generativelanguage as glm
from google.generativeai import protos

from google.generativeai.client import (
get_default_generative_client,
Expand All @@ -35,7 +35,7 @@

DEFAULT_ANSWER_MODEL = "models/aqa"

AnswerStyle = glm.GenerateAnswerRequest.AnswerStyle
AnswerStyle = protos.GenerateAnswerRequest.AnswerStyle

AnswerStyleOptions = Union[int, str, AnswerStyle]

Expand Down Expand Up @@ -66,28 +66,28 @@ def to_answer_style(x: AnswerStyleOptions) -> AnswerStyle:


GroundingPassageOptions = (
Union[glm.GroundingPassage, tuple[str, content_types.ContentType], content_types.ContentType],
Union[protos.GroundingPassage, tuple[str, content_types.ContentType], content_types.ContentType],
)

GroundingPassagesOptions = Union[
glm.GroundingPassages,
protos.GroundingPassages,
Iterable[GroundingPassageOptions],
Mapping[str, content_types.ContentType],
]


def _make_grounding_passages(source: GroundingPassagesOptions) -> glm.GroundingPassages:
def _make_grounding_passages(source: GroundingPassagesOptions) -> protos.GroundingPassages:
"""
Converts the `source` into a `glm.GroundingPassage`. A `GroundingPassages` contains a list of
`glm.GroundingPassage` objects, which each contain a `glm.Contant` and a string `id`.
Converts the `source` into a `protos.GroundingPassage`. A `GroundingPassages` contains a list of
`protos.GroundingPassage` objects, which each contain a `protos.Contant` and a string `id`.
Args:
source: `Content` or a `GroundingPassagesOptions` that will be converted to glm.GroundingPassages.
source: `Content` or a `GroundingPassagesOptions` that will be converted to protos.GroundingPassages.
Return:
`glm.GroundingPassages` to be passed into `glm.GenerateAnswer`.
`protos.GroundingPassages` to be passed into `protos.GenerateAnswer`.
"""
if isinstance(source, glm.GroundingPassages):
if isinstance(source, protos.GroundingPassages):
return source

if not isinstance(source, Iterable):
Expand All @@ -100,19 +100,19 @@ def _make_grounding_passages(source: GroundingPassagesOptions) -> glm.GroundingP
source = source.items()

for n, data in enumerate(source):
if isinstance(data, glm.GroundingPassage):
if isinstance(data, protos.GroundingPassage):
passages.append(data)
elif isinstance(data, tuple):
id, content = data # tuple must have exactly 2 items.
passages.append({"id": id, "content": content_types.to_content(content)})
else:
passages.append({"id": str(n), "content": content_types.to_content(data)})

return glm.GroundingPassages(passages=passages)
return protos.GroundingPassages(passages=passages)


SourceNameType = Union[
str, retriever_types.Corpus, glm.Corpus, retriever_types.Document, glm.Document
str, retriever_types.Corpus, protos.Corpus, retriever_types.Document, protos.Document
]


Expand All @@ -127,15 +127,15 @@ class SemanticRetrieverConfigDict(TypedDict):
SemanticRetrieverConfigOptions = Union[
SourceNameType,
SemanticRetrieverConfigDict,
glm.SemanticRetrieverConfig,
protos.SemanticRetrieverConfig,
]


def _maybe_get_source_name(source) -> str | None:
if isinstance(source, str):
return source
elif isinstance(
source, (retriever_types.Corpus, glm.Corpus, retriever_types.Document, glm.Document)
source, (retriever_types.Corpus, protos.Corpus, retriever_types.Document, protos.Document)
):
return source.name
else:
Expand All @@ -145,8 +145,8 @@ def _maybe_get_source_name(source) -> str | None:
def _make_semantic_retriever_config(
source: SemanticRetrieverConfigOptions,
query: content_types.ContentsType,
) -> glm.SemanticRetrieverConfig:
if isinstance(source, glm.SemanticRetrieverConfig):
) -> protos.SemanticRetrieverConfig:
if isinstance(source, protos.SemanticRetrieverConfig):
return source

name = _maybe_get_source_name(source)
Expand All @@ -156,7 +156,7 @@ def _make_semantic_retriever_config(
source["source"] = _maybe_get_source_name(source["source"])
else:
raise TypeError(
"Could create a `glm.SemanticRetrieverConfig` from:\n"
"Could create a `protos.SemanticRetrieverConfig` from:\n"
f" type: {type(source)}\n"
f" value: {source}"
)
Expand All @@ -166,7 +166,7 @@ def _make_semantic_retriever_config(
elif isinstance(source["query"], str):
source["query"] = content_types.to_content(source["query"])

return glm.SemanticRetrieverConfig(source)
return protos.SemanticRetrieverConfig(source)


def _make_generate_answer_request(
Expand All @@ -178,26 +178,26 @@ def _make_generate_answer_request(
answer_style: AnswerStyle | None = None,
safety_settings: safety_types.SafetySettingOptions | None = None,
temperature: float | None = None,
) -> glm.GenerateAnswerRequest:
) -> protos.GenerateAnswerRequest:
"""
constructs a glm.GenerateAnswerRequest object by organizing the input parameters for the API call to generate a grounded answer from the model.
constructs a protos.GenerateAnswerRequest object by organizing the input parameters for the API call to generate a grounded answer from the model.
Args:
model: Name of the model used to generate the grounded response.
contents: Content of the current conversation with the model. For single-turn query, this is a
single question to answer. For multi-turn queries, this is a repeated field that contains
conversation history and the last `Content` in the list containing the question.
inline_passages: Grounding passages (a list of `Content`-like objects or `(id, content)` pairs,
or a `glm.GroundingPassages`) to send inline with the request. Exclusive with `semantic_retreiver`,
or a `protos.GroundingPassages`) to send inline with the request. Exclusive with `semantic_retreiver`,
one must be set, but not both.
semantic_retriever: A Corpus, Document, or `glm.SemanticRetrieverConfig` to use for grounding. Exclusive with
semantic_retriever: A Corpus, Document, or `protos.SemanticRetrieverConfig` to use for grounding. Exclusive with
`inline_passages`, one must be set, but not both.
answer_style: Style for grounded answers.
safety_settings: Safety settings for generated output.
temperature: The temperature for randomness in the output.
Returns:
Call for glm.GenerateAnswerRequest().
Call for protos.GenerateAnswerRequest().
"""
model = model_types.make_model_name(model)

Expand All @@ -222,7 +222,7 @@ def _make_generate_answer_request(
if answer_style:
answer_style = to_answer_style(answer_style)

return glm.GenerateAnswerRequest(
return protos.GenerateAnswerRequest(
model=model,
contents=contents,
inline_passages=inline_passages,
Expand All @@ -242,7 +242,7 @@ def generate_answer(
answer_style: AnswerStyle | None = None,
safety_settings: safety_types.SafetySettingOptions | None = None,
temperature: float | None = None,
client: glm.GenerativeServiceClient | None = None,
client: protos.GenerativeServiceClient | None = None,
request_options: helper_types.RequestOptionsType | None = None,
):
"""
Expand Down Expand Up @@ -272,14 +272,14 @@ def generate_answer(
contents: The question to be answered by the model, grounded in the
provided source.
inline_passages: Grounding passages (a list of `Content`-like objects or (id, content) pairs,
or a `glm.GroundingPassages`) to send inline with the request. Exclusive with `semantic_retreiver`,
or a `protos.GroundingPassages`) to send inline with the request. Exclusive with `semantic_retreiver`,
one must be set, but not both.
semantic_retriever: A Corpus, Document, or `glm.SemanticRetrieverConfig` to use for grounding. Exclusive with
semantic_retriever: A Corpus, Document, or `protos.SemanticRetrieverConfig` to use for grounding. Exclusive with
`inline_passages`, one must be set, but not both.
answer_style: Style in which the grounded answer should be returned.
safety_settings: Safety settings for generated output. Defaults to None.
temperature: Controls the randomness of the output.
client: If you're not relying on a default client, you pass a `glm.TextServiceClient` instead.
client: If you're not relying on a default client, you pass a `protos.TextServiceClient` instead.
request_options: Options for the request.
Returns:
Expand Down Expand Up @@ -315,7 +315,7 @@ async def generate_answer_async(
answer_style: AnswerStyle | None = None,
safety_settings: safety_types.SafetySettingOptions | None = None,
temperature: float | None = None,
client: glm.GenerativeServiceClient | None = None,
client: protos.GenerativeServiceClient | None = None,
request_options: helper_types.RequestOptionsType | None = None,
):
"""
Expand All @@ -326,14 +326,14 @@ async def generate_answer_async(
contents: The question to be answered by the model, grounded in the
provided source.
inline_passages: Grounding passages (a list of `Content`-like objects or (id, content) pairs,
or a `glm.GroundingPassages`) to send inline with the request. Exclusive with `semantic_retreiver`,
or a `protos.GroundingPassages`) to send inline with the request. Exclusive with `semantic_retreiver`,
one must be set, but not both.
semantic_retriever: A Corpus, Document, or `glm.SemanticRetrieverConfig` to use for grounding. Exclusive with
semantic_retriever: A Corpus, Document, or `protos.SemanticRetrieverConfig` to use for grounding. Exclusive with
`inline_passages`, one must be set, but not both.
answer_style: Style in which the grounded answer should be returned.
safety_settings: Safety settings for generated output. Defaults to None.
temperature: Controls the randomness of the output.
client: If you're not relying on a default client, you pass a `glm.TextServiceClient` instead.
client: If you're not relying on a default client, you pass a `protos.TextServiceClient` instead.
Returns:
A `types.Answer` containing the model's text answer response.
Expand Down
Loading

0 comments on commit c9a955e

Please sign in to comment.