Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We鈥檒l occasionally send you account related emails.

Already on GitHub? Sign in to your account

Python: Removes the _async suffix where is not needed #4735

Merged
8 changes: 8 additions & 0 deletions python/DEV_SETUP.md
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,14 @@ You can also run all the tests together under the [tests](tests/) folder.

## Tools and scripts

## Implementation Decisions

### Asynchronous programming

It's important to note that most of this library is written with asynchronous in mind. The
developer should always assume everything is asynchronous. One can use the function signature
with either `async def` or `def` to understand if something is asynchronous or not.

## Pydantic and Serialization

[Pydantic Documentation](https://docs.pydantic.dev/1.10/)
Expand Down
2 changes: 1 addition & 1 deletion python/samples/kernel-syntax-examples/action_planner.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ async def main():
ask = "What is the sum of 110 and 990?"

# ask the action planner to identify a suitable function from the list of functions available.
plan = await planner.create_plan_async(goal=ask)
plan = await planner.create_plan(goal=ask)

# ask the action planner to execute the identified function.
result = await plan.invoke_async()
Expand Down
4 changes: 2 additions & 2 deletions python/samples/kernel-syntax-examples/azure_chat_gpt_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,13 +77,13 @@ async def chat() -> bool:

stream = True
if stream:
answer = kernel.run_stream_async(chat_function, input_vars=context_vars)
answer = kernel.run_stream(chat_function, input_vars=context_vars)
print("Mosscap:> ", end="")
async for message in answer:
print(message, end="")
print("\n")
return True
answer = await kernel.run_async(chat_function, input_vars=context_vars)
answer = await kernel.run(chat_function, input_vars=context_vars)
print(f"Mosscap:> {answer}")
return True

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ async def chat() -> bool:
# answer = await kernel.run_async(chat_function, input_vars=context_vars)
# print(f"Assistant:> {answer}")

answer = kernel.run_stream_async(chat_function, input_vars=context_vars, input_context=context)
answer = kernel.run_stream(chat_function, input_vars=context_vars, input_context=context)
print("Assistant:> ", end="")
async for message in answer:
print(message, end="")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ async def chat() -> bool:
# answer = await kernel.run_async(chat_function, input_vars=context_vars)
# print(f"Assistant:> {answer}")

answer = kernel.run_stream_async(chat_function, input_vars=context_vars)
answer = kernel.run_stream(chat_function, input_vars=context_vars)
print("Assistant:> ", end="")
async for message in answer:
print(message, end="")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ async def chat() -> bool:
# answer = await kernel.run_async(chat_function, input_vars=context_vars)
# print(f"Assistant:> {answer}")

answer = kernel.run_stream_async(chat_function, input_vars=context_vars, input_context=context)
answer = kernel.run_stream(chat_function, input_vars=context_vars, input_context=context)
print("Assistant:> ", end="")
async for message in answer:
print(message, end="")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,17 +18,15 @@

async def populate_memory(kernel: sk.Kernel) -> None:
# Add some documents to the ACS semantic memory
await kernel.memory.save_information_async(COLLECTION_NAME, id="info1", text="My name is Andrea")
await kernel.memory.save_information_async(COLLECTION_NAME, id="info2", text="I currently work as a tour guide")
await kernel.memory.save_information_async(
COLLECTION_NAME, id="info3", text="I've been living in Seattle since 2005"
)
await kernel.memory.save_information_async(
await kernel.memory.save_information(COLLECTION_NAME, id="info1", text="My name is Andrea")
await kernel.memory.save_information(COLLECTION_NAME, id="info2", text="I currently work as a tour guide")
await kernel.memory.save_information(COLLECTION_NAME, id="info3", text="I've been living in Seattle since 2005")
await kernel.memory.save_information(
COLLECTION_NAME,
id="info4",
text="I visited France and Italy five times since 2015",
)
await kernel.memory.save_information_async(COLLECTION_NAME, id="info5", text="My family is from New York")
await kernel.memory.save_information(COLLECTION_NAME, id="info5", text="My family is from New York")


async def search_acs_memory_questions(kernel: sk.Kernel) -> None:
Expand All @@ -42,7 +40,7 @@ async def search_acs_memory_questions(kernel: sk.Kernel) -> None:

for question in questions:
print(f"Question: {question}")
result = await kernel.memory.search_async(COLLECTION_NAME, question)
result = await kernel.memory.search(COLLECTION_NAME, question)
print(f"Answer: {result[0].text}\n")


Expand Down Expand Up @@ -88,7 +86,7 @@ async def main() -> None:
print("Asking questions... (manually)")
await search_acs_memory_questions(kernel)

await connector.close_async()
await connector.close()


if __name__ == "__main__":
Expand Down
4 changes: 2 additions & 2 deletions python/samples/kernel-syntax-examples/bing_search_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ async def main():
web_plugin = kernel.import_plugin(WebSearchEnginePlugin(connector), "WebSearch")

prompt = "Who is Leonardo DiCaprio's current girlfriend?"
search_async = web_plugin["searchAsync"]
result = await search_async.invoke_async(prompt)
search = web_plugin["searchAsync"]
result = await search.invoke_async(prompt)
juliomenendez marked this conversation as resolved.
Show resolved Hide resolved
print(result)

"""
Expand Down
2 changes: 1 addition & 1 deletion python/samples/kernel-syntax-examples/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ async def chat(context_vars: sk.ContextVariables) -> bool:
print("\n\nExiting chat...")
return False

answer = await kernel.run_async(chat_function, input_vars=context_vars)
answer = await kernel.run(chat_function, input_vars=context_vars)
context_vars["chat_history"] += f"\nUser:> {user_input}\nChatBot:> {answer}\n"

print(f"ChatBot:> {answer}")
Expand Down
2 changes: 1 addition & 1 deletion python/samples/kernel-syntax-examples/chat_gpt_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ async def chat() -> bool:
print("\n\nExiting chat...")
return False

answer = await kernel.run_async(chat_function, input_vars=context_vars)
answer = await kernel.run(chat_function, input_vars=context_vars)
print(f"Mosscap:> {answer}")
return True

Expand Down
4 changes: 2 additions & 2 deletions python/samples/kernel-syntax-examples/google_palm_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@ async def chat_request_example(api_key):
chat_messages = list()
user_mssg = "I'm planning a vacation. Which are some must-visit places in Europe?"
chat_messages.append(("user", user_mssg))
answer = await palm_chat_completion.complete_chat_async(chat_messages, settings)
answer = await palm_chat_completion.complete_chat(chat_messages, settings)
chat_messages.append(("assistant", str(answer)))
user_mssg = "Where should I go in France?"
chat_messages.append(("user", user_mssg))
answer = await palm_chat_completion.complete_chat_async(chat_messages, settings)
answer = await palm_chat_completion.complete_chat(chat_messages, settings)
chat_messages.append(("assistant", str(answer)))

context_vars = sk.ContextVariables()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,11 @@

async def populate_memory(kernel: sk.Kernel) -> None:
# Add some documents to the semantic memory
await kernel.memory.save_information_async("aboutMe", id="info1", text="My name is Andrea")
await kernel.memory.save_information_async("aboutMe", id="info2", text="I currently work as a tour guide")
await kernel.memory.save_information_async("aboutMe", id="info3", text="My favorite hobby is hiking")
await kernel.memory.save_information_async("aboutMe", id="info4", text="I visitied Iceland last year.")
await kernel.memory.save_information_async("aboutMe", id="info5", text="My family is from New York")
await kernel.memory.save_information("aboutMe", id="info1", text="My name is Andrea")
await kernel.memory.save_information("aboutMe", id="info2", text="I currently work as a tour guide")
await kernel.memory.save_information("aboutMe", id="info3", text="My favorite hobby is hiking")
await kernel.memory.save_information("aboutMe", id="info4", text="I visitied Iceland last year.")
await kernel.memory.save_information("aboutMe", id="info5", text="My family is from New York")


async def search_memory_examples(kernel: sk.Kernel) -> None:
Expand All @@ -36,7 +36,7 @@ async def search_memory_examples(kernel: sk.Kernel) -> None:

for question in questions:
print(f"Question: {question}")
result = await kernel.memory.search_async("aboutMe", question)
result = await kernel.memory.search("aboutMe", question)
print(f"Answer: {result}\n")


Expand Down Expand Up @@ -105,7 +105,7 @@ async def chat(kernel: sk.Kernel, chat_func: sk.KernelFunctionBase, context: sk.
print("\n\nExiting chat...")
return False

answer = await kernel.run_async(chat_func, input_vars=context.variables)
answer = await kernel.run(chat_func, input_vars=context.variables)
context["chat_history"] += f"\nUser:> {user_input}\nChatBot:> {answer}\n"

print(f"ChatBot:> {answer}")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ async def chat() -> bool:
print("\n\nExiting chat...")
return False

answer = await kernel.run_async(chat_function, input_vars=context_vars)
answer = await kernel.run(chat_function, input_vars=context_vars)
print(f"Blackbeard:> {answer}")
return True

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,13 @@
)


async def text_completion_example_complete_async(kernel, api_key, user_mssg, settings):
async def text_completion_example_complete(kernel, api_key, user_mssg, settings):
"""
Complete a text prompt using the Google PaLM model and print the results.
"""
palm_text_completion = sk_gp.GooglePalmTextCompletion("models/text-bison-001", api_key)
kernel.add_text_completion_service("models/text-bison-001", palm_text_completion)
answer = await palm_text_completion.complete_async(user_mssg, settings)
answer = await palm_text_completion.complete(user_mssg, settings)
return answer


Expand All @@ -31,13 +31,13 @@ async def main() -> None:
"boxes have 98 coins in total. How many coins are there in each box? "
"Think about it step by step, and show your work."
)
response = await text_completion_example_complete_async(kernel, apikey, user_mssg1, settings)
response = await text_completion_example_complete(kernel, apikey, user_mssg1, settings)
print(f"User:> {user_mssg1}\n\nChatBot:> {response}\n")
# Use temperature to influence the variance of the responses
settings.number_of_responses = 3
settings.temperature = 1
user_mssg2 = "I need a concise answer. A common method for traversing a binary tree is"
response = await text_completion_example_complete_async(kernel, apikey, user_mssg2, settings)
response = await text_completion_example_complete(kernel, apikey, user_mssg2, settings)
print(f"User:> {user_mssg2}\n\nChatBot:> {response}")
return

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,10 @@ async def main():

# The search query
prompt = "Who is Leonardo DiCaprio's current girlfriend?"
search_async = web_plugin["searchAsync"]
search = web_plugin["searchAsync"]

# By default, only one search result is provided
result = await search_async.invoke_async(prompt)
result = await search.invoke_async(prompt)
juliomenendez marked this conversation as resolved.
Show resolved Hide resolved
print(result)

"""
Expand Down
16 changes: 7 additions & 9 deletions python/samples/kernel-syntax-examples/memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,11 @@

async def populate_memory(kernel: sk.Kernel) -> None:
# Add some documents to the semantic memory
await kernel.memory.save_information_async("aboutMe", id="info1", text="My name is Andrea")
await kernel.memory.save_information_async("aboutMe", id="info2", text="I currently work as a tour guide")
await kernel.memory.save_information_async("aboutMe", id="info3", text="I've been living in Seattle since 2005")
await kernel.memory.save_information_async(
"aboutMe", id="info4", text="I visited France and Italy five times since 2015"
)
await kernel.memory.save_information_async("aboutMe", id="info5", text="My family is from New York")
await kernel.memory.save_information("aboutMe", id="info1", text="My name is Andrea")
await kernel.memory.save_information("aboutMe", id="info2", text="I currently work as a tour guide")
await kernel.memory.save_information("aboutMe", id="info3", text="I've been living in Seattle since 2005")
await kernel.memory.save_information("aboutMe", id="info4", text="I visited France and Italy five times since 2015")
await kernel.memory.save_information("aboutMe", id="info5", text="My family is from New York")


async def search_memory_examples(kernel: sk.Kernel) -> None:
Expand All @@ -29,7 +27,7 @@ async def search_memory_examples(kernel: sk.Kernel) -> None:

for question in questions:
print(f"Question: {question}")
result = await kernel.memory.search_async("aboutMe", question)
result = await kernel.memory.search("aboutMe", question)
print(f"Answer: {result[0].text}\n")


Expand Down Expand Up @@ -85,7 +83,7 @@ async def chat(kernel: sk.Kernel, chat_func: sk.KernelFunctionBase, context: sk.
print("\n\nExiting chat...")
return False

answer = await kernel.run_async(chat_func, input_vars=context.variables)
answer = await kernel.run(chat_func, input_vars=context.variables)
context["chat_history"] += f"\nUser:> {user_input}\nChatBot:> {answer}\n"

print(f"ChatBot:> {answer}")
Expand Down
6 changes: 3 additions & 3 deletions python/samples/kernel-syntax-examples/openai_logit_bias.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,13 +82,13 @@ async def chat_request_example(kernel, api_key, org_id):
messages = [{"role": "user", "content": user_mssg}]

chat_messages.append(("user", user_mssg))
answer = await openai_chat_completion.complete_chat_async(messages=messages, settings=settings)
answer = await openai_chat_completion.complete_chat(messages=messages, settings=settings)
chat_messages.append(("assistant", str(answer[0])))

user_mssg = "What are his best all-time stats?"
messages = [{"role": "user", "content": user_mssg}]
chat_messages.append(("user", user_mssg))
answer = await openai_chat_completion.complete_chat_async(messages=messages, settings=settings)
answer = await openai_chat_completion.complete_chat(messages=messages, settings=settings)
chat_messages.append(("assistant", str(answer[0])))

context_vars = sk.ContextVariables()
Expand Down Expand Up @@ -154,7 +154,7 @@ async def text_complete_request_example(kernel, api_key, org_id):
settings = _config_ban_tokens(settings, keys)

user_mssg = "The best pie flavor to have in autumn is"
answer = await openai_text_completion.complete_async(user_mssg, settings)
answer = await openai_text_completion.complete(user_mssg, settings)

context_vars = sk.ContextVariables()
context_vars["chat_history"] = f"User:> {user_mssg}\nChatBot:> {answer}\n"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,5 +30,5 @@
plugins_directory = os.path.join(__file__, "../../../../samples/plugins")
plugin = kernel.import_semantic_plugin_from_directory(plugins_directory, "FunPlugin")

result = asyncio.run(kernel.run_async(plugin["Joke"], input_str="time travel to dinosaur age"))
result = asyncio.run(kernel.run(plugin["Joke"], input_str="time travel to dinosaur age"))
print(result)
22 changes: 10 additions & 12 deletions python/samples/kernel-syntax-examples/self-critique_rag.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,17 +20,15 @@

async def populate_memory(kernel: sk.Kernel) -> None:
# Add some documents to the ACS semantic memory
await kernel.memory.save_information_async(COLLECTION_NAME, id="info1", text="My name is Andrea")
await kernel.memory.save_information_async(COLLECTION_NAME, id="info2", text="I currently work as a tour guide")
await kernel.memory.save_information_async(
COLLECTION_NAME, id="info3", text="I've been living in Seattle since 2005"
)
await kernel.memory.save_information_async(
await kernel.memory.save_information(COLLECTION_NAME, id="info1", text="My name is Andrea")
await kernel.memory.save_information(COLLECTION_NAME, id="info2", text="I currently work as a tour guide")
await kernel.memory.save_information(COLLECTION_NAME, id="info3", text="I've been living in Seattle since 2005")
await kernel.memory.save_information(
COLLECTION_NAME,
id="info4",
text="I visited France and Italy five times since 2015",
)
await kernel.memory.save_information_async(COLLECTION_NAME, id="info5", text="My family is from New York")
await kernel.memory.save_information(COLLECTION_NAME, id="info5", text="My family is from New York")


async def main() -> None:
Expand Down Expand Up @@ -99,7 +97,7 @@ async def main() -> None:
chat_func = kernel.create_semantic_function(sk_prompt_rag, max_tokens=1000, temperature=0.5)
self_critique_func = kernel.create_semantic_function(sk_prompt_rag_sc, max_tokens=4, temperature=0.0)

answer = await kernel.run_async(
answer = await kernel.run(
chat_func,
input_vars=ContextVariables(
variables={
Expand All @@ -110,24 +108,24 @@ async def main() -> None:
),
)
print(f"Answer: {str(answer).strip()}")
check = await kernel.run_async(self_critique_func, input_context=answer)
check = await kernel.run(self_critique_func, input_context=answer)
print(f"The answer was {str(check).strip()}")

print("-" * 50)
print(" Let's pretend the answer was wrong...")
answer.variables.variables["input"] = "Yes, you live in New York City."
print(f"Answer: {str(answer).strip()}")
check = await kernel.run_async(self_critique_func, input_context=answer)
check = await kernel.run(self_critique_func, input_context=answer)
print(f"The answer was {str(check).strip()}")

print("-" * 50)
print(" Let's pretend the answer is not related...")
answer.variables.variables["input"] = "Yes, the earth is not flat."
print(f"Answer: {str(answer).strip()}")
check = await kernel.run_async(self_critique_func, input_context=answer)
check = await kernel.run(self_critique_func, input_context=answer)
print(f"The answer was {str(check).strip()}")

await connector.close_async()
await connector.close()


if __name__ == "__main__":
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ async def main():
ask = "What day of the week is today, all uppercase?"

# ask the sequential planner to identify a suitable function from the list of functions available.
plan = await planner.create_plan_async(goal=ask)
plan = await planner.create_plan(goal=ask)

# ask the sequential planner to execute the identified function.
result = await plan.invoke_async()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

class ChatCompletionClientBase(ABC):
@abstractmethod
async def complete_chat_async(
async def complete_chat(
self,
messages: List["ChatMessage"],
settings: "AIRequestSettings",
Expand All @@ -31,7 +31,7 @@ async def complete_chat_async(
pass

@abstractmethod
async def complete_chat_stream_async(
async def complete_chat_stream(
self,
messages: List["ChatMessage"],
settings: "AIRequestSettings",
Expand Down
Loading
Loading