Skip to content

Commit

Permalink
Merge branch 'add-batch-timeout-override' of https://github.com/diond…
Browse files Browse the repository at this point in the history
…rapeck/promptflow into add-batch-timeout-override
  • Loading branch information
diondrapeck committed Jun 26, 2024
2 parents d7ee8d7 + 201369a commit 9b08075
Show file tree
Hide file tree
Showing 89 changed files with 3,281 additions and 932 deletions.
13 changes: 8 additions & 5 deletions .github/workflows/promptflow-evals-unit-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -54,22 +54,25 @@ jobs:
working-directory: ${{ env.WORKING_DIRECTORY }}
- name: install promptflow packages in editable mode
run: |
export TIME_LIMIT=5
export start_tm=`date +%s`
poetry run pip install -e ../promptflow
poetry run pip install -e ../promptflow-core
poetry run pip install -e ../promptflow-devkit
poetry run pip install -e ../promptflow-tracing
poetry run pip install -e ../promptflow-tools
poetry run pip install -e ../promptflow-azure
working-directory: ${{ env.WORKING_DIRECTORY }}
- name: install promptflow-evals from wheel
# wildcard expansion (*) does not work in Windows, so leverage python to find and install
run: poetry run pip install --pre $(python -c "import glob; print(glob.glob('promptflow_evals-*.whl')[0])")
poetry run pip install -e ../promptflow-evals
export install_time=$(((`date +%s` - ${start_tm})/60))
echo "The installation took ${install_time} minutes."
echo "The time limit for installation is ${TIME_LIMIT}"
test ${install_time} -le $TIME_LIMIT || echo "::warning file=pyproject.toml,line=40,col=0::The installation took ${install_time} minutes, the limit is ${TIME_LIMIT}."
working-directory: ${{ env.WORKING_DIRECTORY }}
- name: install recording
run: poetry run pip install -e ../promptflow-recording
working-directory: ${{ env.WORKING_DIRECTORY }}
- name: run unit tests
run: poetry run pytest -m unittest --cov=promptflow --cov-config=pyproject.toml --cov-report=term --cov-report=html --cov-report=xml
run: poetry run pytest -m unittest --cov=promptflow --cov-config=pyproject.toml --cov-report=term --cov-report=html --cov-report=xml --cov-fail-under=63
working-directory: ${{ env.WORKING_DIRECTORY }}
- name: upload coverage report
uses: actions/upload-artifact@v4
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,8 @@ pf run create --flow "path.to.module:function_name" --data "./data.jsonl"
```python

from path.to.module import my_flow
pf.run(flow=my_flow, data="./data.json;")
# Note directly run function in `pf.run` is only supported in local PFClient for now
pf.run(flow=my_flow, data="./data.jsonl")

# user can also directly use entry in `flow` param for batch run
pf.run(flow="path.to.module:function_name", data="./data.jsonl")
Expand Down
1 change: 1 addition & 0 deletions examples/dev_requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -11,3 +11,4 @@ black-nb
pypandoc # for markdown reader
pypandoc_binary # pypandoc pandoc backend
panflute # for pandoc filters
numpy<2.0.0 # for compatibility
2 changes: 1 addition & 1 deletion examples/flex-flows/basic/flex-flow-quickstart-azure.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@
"\n",
"model_config = AzureOpenAIModelConfiguration(\n",
" connection=\"open_ai_connection\",\n",
" azure_deployment=\"gpt-35-turbo\",\n",
" azure_deployment=\"gpt-4o\",\n",
")"
]
},
Expand Down
2 changes: 1 addition & 1 deletion examples/flex-flows/basic/flex-flow-quickstart.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@
"outputs": [],
"source": [
"# control the AOAI deployment (model) used in this example\n",
"deployment_name = \"gpt-35-turbo\""
"deployment_name = \"gpt-4o\""
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion examples/flex-flows/basic/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,6 @@ def my_llm_tool(
if __name__ == "__main__":
result = my_llm_tool(
prompt="Write a simple Hello, world! program that displays the greeting message.",
deployment_name="gpt-35-turbo",
deployment_name="gpt-4o",
)
print(result)
4 changes: 2 additions & 2 deletions examples/flex-flows/basic/programmer.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def load_prompt(jinja2_template: str, text: str) -> str:

@trace
def write_simple_program(
text: str = "Hello World!", deployment_name="gpt-35-turbo"
text: str = "Hello World!", deployment_name="gpt-4o"
) -> Result:
"""Ask LLM to write a simple program."""
prompt = load_prompt("hello.jinja2", text)
Expand All @@ -37,5 +37,5 @@ def write_simple_program(
from promptflow.tracing import start_trace

start_trace()
result = write_simple_program("Hello, world!", "gpt-35-turbo")
result = write_simple_program("Hello, world!", "gpt-4o")
print(result)
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@
"\n",
"# create a chatFlow obj with connection\n",
"config = AzureOpenAIModelConfiguration(\n",
" connection=\"open_ai_connection\", azure_deployment=\"gpt-35-turbo\"\n",
" connection=\"open_ai_connection\", azure_deployment=\"gpt-4o\"\n",
")\n",
"chat_flow = ChatFlow(config)\n",
"\n",
Expand Down
2 changes: 1 addition & 1 deletion examples/flex-flows/chat-async-stream/chat.prompty
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ model:
api: chat
configuration:
type: azure_openai
azure_deployment: gpt-35-turbo
azure_deployment: gpt-4o
parameters:
temperature: 0.2
stream: true
Expand Down
2 changes: 1 addition & 1 deletion examples/flex-flows/chat-async-stream/flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ async def __call__(

start_trace()
config = AzureOpenAIModelConfiguration(
connection="open_ai_connection", azure_deployment="gpt-35-turbo"
connection="open_ai_connection", azure_deployment="gpt-4o"
)
flow = ChatFlow(model_config=config)
result = flow("What's Azure Machine Learning?", [])
Expand Down
2 changes: 1 addition & 1 deletion examples/flex-flows/chat-async-stream/init.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"model_config": {
"connection": "open_ai_connection",
"azure_deployment": "gpt-35-turbo"
"azure_deployment": "gpt-4o"
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@
"\n",
"# create the model config to be used in below flow calls\n",
"config = AzureOpenAIModelConfiguration(\n",
" connection=\"open_ai_connection\", azure_deployment=\"gpt-35-turbo\"\n",
" connection=\"open_ai_connection\", azure_deployment=\"gpt-4o\"\n",
")"
]
},
Expand Down Expand Up @@ -199,7 +199,7 @@
"source": [
"eval_flow = \"../eval-checklist/flow.flex.yaml\"\n",
"config = AzureOpenAIModelConfiguration(\n",
" connection=\"open_ai_connection\", azure_deployment=\"gpt-35-turbo\"\n",
" connection=\"open_ai_connection\", azure_deployment=\"gpt-4o\"\n",
")\n",
"eval_run = pf.run(\n",
" flow=eval_flow,\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@
"\n",
"# create the model config to be used in below flow calls\n",
"config = AzureOpenAIModelConfiguration(\n",
" connection=\"open_ai_connection\", azure_deployment=\"gpt-35-turbo\"\n",
" connection=\"open_ai_connection\", azure_deployment=\"gpt-4o\"\n",
")"
]
},
Expand Down
2 changes: 1 addition & 1 deletion examples/flex-flows/chat-basic/chat.prompty
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ model:
api: chat
configuration:
type: azure_openai
azure_deployment: gpt-35-turbo
azure_deployment: gpt-4o
parameters:
temperature: 0.2
max_tokens: 1024
Expand Down
2 changes: 1 addition & 1 deletion examples/flex-flows/chat-basic/flow.flex.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ sample:
init:
model_config:
connection: open_ai_connection
azure_deployment: gpt-35-turbo
azure_deployment: gpt-4o
max_total_token: 1024
environment:
# image: mcr.microsoft.com/azureml/promptflow/promptflow-python
Expand Down
2 changes: 1 addition & 1 deletion examples/flex-flows/chat-basic/flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def __call__(

start_trace()
config = AzureOpenAIModelConfiguration(
connection="open_ai_connection", azure_deployment="gpt-35-turbo"
connection="open_ai_connection", azure_deployment="gpt-4o"
)
flow = ChatFlow(config)
result = flow("What's Azure Machine Learning?", [])
Expand Down
2 changes: 1 addition & 1 deletion examples/flex-flows/chat-basic/init.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"model_config": {
"connection": "open_ai_connection",
"azure_deployment": "gpt-35-turbo"
"azure_deployment": "gpt-4o"
},
"max_total_token": 2048
}
2 changes: 1 addition & 1 deletion examples/flex-flows/chat-basic/run.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,6 @@ data: data.jsonl
init:
model_config:
connection: open_ai_connection
azure_deployment: gpt-35-turbo
azure_deployment: gpt-4o
column_mapping:
question: ${data.question}
2 changes: 1 addition & 1 deletion examples/flex-flows/chat-minimal/chat.prompty
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ model:
api: chat
configuration:
type: azure_openai
azure_deployment: gpt-35-turbo
azure_deployment: gpt-4o
parameters:
temperature: 0.2
max_tokens: 1024
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@
"\n",
"# create a chatFlow obj with connection\n",
"config = AzureOpenAIModelConfiguration(\n",
" connection=\"open_ai_connection\", azure_deployment=\"gpt-35-turbo\"\n",
" connection=\"open_ai_connection\", azure_deployment=\"gpt-4o\"\n",
")\n",
"chat_flow = ChatFlow(config)\n",
"\n",
Expand Down
2 changes: 1 addition & 1 deletion examples/flex-flows/chat-stream/chat.prompty
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ model:
api: chat
configuration:
type: azure_openai
azure_deployment: gpt-35-turbo
azure_deployment: gpt-4o
parameters:
temperature: 0.2
stream: true
Expand Down
2 changes: 1 addition & 1 deletion examples/flex-flows/chat-stream/flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def __call__(

start_trace()
config = AzureOpenAIModelConfiguration(
connection="open_ai_connection", azure_deployment="gpt-35-turbo"
connection="open_ai_connection", azure_deployment="gpt-4o"
)
flow = ChatFlow(model_config=config)
result = flow("What's Azure Machine Learning?", [])
Expand Down
2 changes: 1 addition & 1 deletion examples/flex-flows/chat-stream/init.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"model_config": {
"connection": "open_ai_connection",
"azure_deployment": "gpt-35-turbo"
"azure_deployment": "gpt-4o"
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ model:
api: chat
configuration:
type: azure_openai
azure_deployment: gpt-35-turbo
azure_deployment: gpt-4o
parameters:
max_tokens: 128
temperature: 0.2
Expand Down
2 changes: 1 addition & 1 deletion examples/flex-flows/eval-checklist/check_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def __aggregate__(self, line_results: list) -> dict:
}

config = AzureOpenAIModelConfiguration(
connection="open_ai_connection", azure_deployment="gpt-35-turbo"
connection="open_ai_connection", azure_deployment="gpt-4o"
)
flow = EvalFlow(config)

Expand Down
2 changes: 1 addition & 1 deletion examples/flex-flows/eval-checklist/eval.prompty
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ model:
api: chat
configuration:
type: azure_openai
azure_deployment: gpt-35-turbo
azure_deployment: gpt-4o
parameters:
max_tokens: 256
temperature: 0.7
Expand Down
2 changes: 1 addition & 1 deletion examples/flex-flows/eval-checklist/init.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"model_config": {
"connection": "open_ai_connection",
"azure_deployment": "gpt-35-turbo-0125"
"azure_deployment": "gpt-4o"
}
}
2 changes: 1 addition & 1 deletion examples/flex-flows/eval-code-quality/code_quality.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def __aggregate__(self, line_results: list) -> dict:
start_trace()
model_config = AzureOpenAIModelConfiguration(
connection="open_ai_connection",
azure_deployment="gpt-35-turbo",
azure_deployment="gpt-4o",
)
evaluator = CodeEvaluator(model_config)
result = evaluator('print("Hello, world!")')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ model:
api: chat
configuration:
type: azure_openai
azure_deployment: gpt-35-turbo
azure_deployment: gpt-4o
parameters:
temperature: 0.2
inputs:
Expand Down
2 changes: 1 addition & 1 deletion examples/flex-flows/eval-code-quality/init.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"model_config": {
"connection": "open_ai_connection",
"azure_deployment": "gpt-35-turbo"
"azure_deployment": "gpt-4o"
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def __init__(self, custom_connection: CustomConnection):
)
elif "openai_api_key" in self.custom_connection.secrets:
self.llm = AzureChatOpenAI(
deployment_name="gpt-35-turbo",
deployment_name="gpt-4o",
openai_api_key=self.custom_connection.secrets["openai_api_key"],
azure_endpoint=self.custom_connection.configs["azure_endpoint"],
openai_api_type="azure",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ nodes:
path: hello.py
inputs:
connection: basic_custom_connection
deployment_name: text-davinci-003
deployment_name: gpt-35-turbo-instruct
max_tokens: "120"
prompt: ${hello_prompt.output}
environment:
Expand Down
2 changes: 1 addition & 1 deletion examples/flows/standard/basic/flow.dag.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,5 +24,5 @@ nodes:
path: hello.py
inputs:
prompt: ${hello_prompt.output}
deployment_name: text-davinci-003
deployment_name: gpt-35-turbo-instruct
max_tokens: "120"
2 changes: 1 addition & 1 deletion examples/prompty/basic/basic.prompty
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ model:
api: chat
configuration:
type: azure_openai
azure_deployment: gpt-35-turbo
azure_deployment: gpt-4o
parameters:
max_tokens: 128
temperature: 0.2
Expand Down
4 changes: 2 additions & 2 deletions examples/prompty/basic/prompty-quickstart.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@
"configuration = AzureOpenAIModelConfiguration(\n",
" # azure_endpoint=\"${env:AZURE_OPENAI_ENDPOINT}\", # Use ${env:<ENV_NAME>} to surround the environment variable name.\n",
" # api_key=\"${env:AZURE_OPENAI_API_KEY}\",\n",
" azure_deployment=\"gpt-35-turbo-0125\",\n",
" azure_deployment=\"gpt-4o\",\n",
")\n",
"\n",
"# override configuration with OpenAIModelConfiguration\n",
Expand Down Expand Up @@ -177,7 +177,7 @@
"source": [
"### Eval the result \n",
"\n",
"Note: the eval flow returns a `json_object`. You need a new model version like [gpt-35-turbo (0125)](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#gpt-35-models) to use the `json_object` response_format feature."
"Note: the eval flow returns a `json_object`."
]
},
{
Expand Down
6 changes: 3 additions & 3 deletions examples/prompty/chat-basic/chat-with-prompty.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@
"\n",
"# override configuration with created connection in AzureOpenAIModelConfiguration\n",
"configuration = AzureOpenAIModelConfiguration(\n",
" connection=\"open_ai_connection\", azure_deployment=\"gpt-35-turbo-0125\"\n",
" connection=\"open_ai_connection\", azure_deployment=\"gpt-4o\"\n",
")\n",
"\n",
"# override openai connection with OpenAIModelConfiguration\n",
Expand Down Expand Up @@ -229,7 +229,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"Note: the eval flow returns a `json_object`. You need a new model version like [gpt-35-turbo (0125)](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#gpt-35-models) to use the `json_object` response_format feature."
"Note: the eval flow returns a `json_object`."
]
},
{
Expand Down Expand Up @@ -369,7 +369,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.18"
"version": "3.9.17"
},
"resources": "examples/requirements.txt, examples/prompty/chat-basic, examples/prompty/eval-apology"
},
Expand Down
2 changes: 1 addition & 1 deletion examples/prompty/chat-basic/chat.prompty
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ model:
configuration:
type: azure_openai
connection: open_ai_connection
azure_deployment: gpt-35-turbo
azure_deployment: gpt-4o
parameters:
max_tokens: 256
temperature: 0.2
Expand Down
Loading

0 comments on commit 9b08075

Please sign in to comment.