Skip to content

Merge branch 'main' into users/ninhu/perf_improve

Sign in for the full log view
GitHub Actions / Executor E2E Test Result [users/ninhu/perf_improve](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:users/ninhu/perf_improve++) failed Jun 17, 2024 in 0s

2 fail, 6 skipped, 238 pass in 5m 4s

246 tests  ±0   238 ✅ ±0   5m 4s ⏱️ ±0s
  1 suites ±0     6 💤 ±0 
  1 files   ±0     2 ❌ ±0 

Results for commit 5ddefa3. ± Comparison against earlier commit 6b4e44e.

Annotations

Check warning on line 0 in tests.executor.e2etests.test_traces.TestExecutorTraces

See this annotation in the file changed.

@github-actions github-actions / Executor E2E Test Result [users/ninhu/perf_improve](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:users/ninhu/perf_improve++)

test_executor_openai_api_flow[openai_completion_api_flow-inputs2] (tests.executor.e2etests.test_traces.TestExecutorTraces) failed

artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
promptflow._core._errors.ToolExecutionError: Execution failure in 'completion': (APIStatusError) Error code: 410 - {'error': {'code': 'ModelDeprecated', 'message': 'The model associated with the deployment is deprecated and no longer available for use. Please refer to the Azure OpenAI service documentation for more information.'}}
self = <promptflow._core.flow_execution_context.FlowExecutionContext object at 0x7f15fd2f7610>
node = Node(name='completion', tool=None, inputs={'prompt': FlowInputAssignment(value='prompt', value_type=<InputValueType.FL...alse, source=ToolSource(type='code', tool=None, path='completion.py'), type=<ToolType.PYTHON: 'python'>, activate=None)
f = <function completion at 0x7f15fd4089d0>
kwargs = {'connection': <promptflow.core._connection.AzureOpenAIConnection object at 0x7f15fd2f71c0>, 'prompt': 'What is the capital of the United States of America?', 'stream': False}

    def _invoke_tool_inner(self, node: Node, f: Callable, kwargs):
        module = f.func.__module__ if isinstance(f, functools.partial) else f.__module__
        node_name = node.name
        try:
            if (
                interval_seconds := try_get_long_running_logging_interval(flow_logger, DEFAULT_LOGGING_INTERVAL)
            ) is None:
>               return f(**kwargs)

/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/_core/flow_execution_context.py:182: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/tracing/_trace.py:556: in wrapped
    output = func(*args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow/tests/test_configs/flows/openai_completion_api_flow/completion.py:41: in completion
    completion = get_client(connection).completions.create(
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/tracing/_integrations/_openai_injector.py:88: in wrapper
    return f(*args, **kwargs)
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/tracing/_trace.py:556: in wrapped
    output = func(*args, **kwargs)
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/openai/_utils/_utils.py:277: in wrapper
    return func(*args, **kwargs)
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/openai/resources/completions.py:528: in create
    return self._post(
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/openai/_base_client.py:1240: in post
    return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/openai/_base_client.py:921: in request
    return self._request(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <openai.lib.azure.AzureOpenAI object at 0x7f15fd3e0820>

    def _request(
        self,
        *,
        cast_to: Type[ResponseT],
        options: FinalRequestOptions,
        remaining_retries: int | None,
        stream: bool,
        stream_cls: type[_StreamT] | None,
    ) -> ResponseT | _StreamT:
        cast_to = self._maybe_override_cast_to(cast_to, options)
        self._prepare_options(options)
    
        retries = self._remaining_retries(remaining_retries, options)
        request = self._build_request(options)
        self._prepare_request(request)
    
        kwargs: HttpxSendArgs = {}
        if self.custom_auth is not None:
            kwargs["auth"] = self.custom_auth
    
        log.debug("Sending HTTP Request: %s %s", request.method, request.url)
    
        try:
            response = self._client.send(
                request,
                stream=stream or self._should_stream_response_body(request=request),
                **kwargs,
            )
        except httpx.TimeoutException as err:
            log.debug("Encountered httpx.TimeoutException", exc_info=True)
    
            if retries > 0:
                return self._retry_request(
                    options,
                    cast_to,
                    retries,
                    stream=stream,
                    stream_cls=stream_cls,
                    response_headers=None,
                )
    
            log.debug("Raising timeout error")
            raise APITimeoutError(request=request) from err
        except Exception as err:
            log.debug("Encountered Exception", exc_info=True)
    
            if retries > 0:
                return self._retry_request(
                    options,
                    cast_to,
                    retries,
                    stream=stream,
                    stream_cls=stream_cls,
                    response_headers=None,
                )
    
            log.debug("Raising connection error")
            raise APIConnectionError(request=request) from err
    
        log.debug(
            'HTTP Response: %s %s "%i %s" %s',
            request.method,
            request.url,
            response.status_code,
            response.reason_phrase,
            response.headers,
        )
        log.debug("request_id: %s", response.headers.get("x-request-id"))
    
        try:
            response.raise_for_status()
        except httpx.HTTPStatusError as err:  # thrown on 4xx and 5xx status code
            log.debug("Encountered httpx.HTTPStatusError", exc_info=True)
    
            if retries > 0 and self._should_retry(err.response):
                err.response.close()
                return self._retry_request(
                    options,
                    cast_to,
                    retries,
                    err.response.headers,
                    stream=stream,
                    stream_cls=stream_cls,
                )
    
            # If the response is streamed then we need to explicitly read the response
            # to completion before attempting to access the response text.
            if not err.response.is_closed:
                err.response.read()
    
            log.debug("Re-raising status error")
>           raise self._make_status_error_from_response(err.response) from None
E           openai.APIStatusError: Error code: 410 - {'error': {'code': 'ModelDeprecated', 'message': 'The model associated with the deployment is deprecated and no longer available for use. Please refer to the Azure OpenAI service documentation for more information.'}}

/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/openai/_base_client.py:1020: APIStatusError

The above exception was the direct cause of the following exception:

self = <executor.e2etests.test_traces.TestExecutorTraces object at 0x7f15fd4e1d30>
flow_folder = 'openai_completion_api_flow'
inputs = {'prompt': 'What is the capital of the United States of America?', 'stream': False}
dev_connections = {'aoai_assistant_connection': {'module': 'promptflow.connections', 'name': 'aoai_assistant_connection', 'type': 'Azure...ai.azure.com/', 'api_key': 'c2881c848bf048e9b3198a2a64464ef3', 'api_type': 'azure', 'api_version': '2024-02-01'}}, ...}

    @pytest.mark.parametrize(
        "flow_folder, inputs",
        [
            ("openai_chat_api_flow", get_chat_input(False)),
            ("openai_chat_api_flow", get_chat_input(True)),
            ("openai_completion_api_flow", get_completion_input(False)),
            ("openai_completion_api_flow", get_completion_input(True)),
            ("llm_tool", {"topic": "Hello", "stream": False}),
            ("llm_tool", {"topic": "Hello", "stream": True}),
        ],
    )
    def test_executor_openai_api_flow(self, flow_folder, inputs, dev_connections):
        executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
>       flow_result = executor.exec_line(inputs)

/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py:203: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/flow_executor.py:739: in exec_line
    line_result = self._exec(
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/flow_executor.py:1034: in _exec
    output, aggregation_inputs = self._exec_inner_with_trace(
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/flow_executor.py:937: in _exec_inner_with_trace
    output, nodes_outputs = self._traverse_nodes(inputs, context)
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/flow_executor.py:1215: in _traverse_nodes
    nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context, inputs, batch_nodes)
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/flow_executor.py:1270: in _submit_to_scheduler
    return scheduler.execute(self._line_timeout_sec)
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/_flow_nodes_scheduler.py:131: in execute
    raise e
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/_flow_nodes_scheduler.py:113: in execute
    self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/_flow_nodes_scheduler.py:160: in _collect_outputs
    each_node_result = each_future.result()
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/concurrent/futures/_base.py:439: in result
    return self.__get_result()
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/concurrent/futures/_base.py:391: in __get_result
    raise self._exception
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/concurrent/futures/thread.py:58: in run
    result = self.fn(*self.args, **self.kwargs)
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/_flow_nodes_scheduler.py:181: in _exec_single_node_in_thread
    result = context.invoke_tool(node, f, kwargs=kwargs)
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/_core/flow_execution_context.py:90: in invoke_tool
    result = self._invoke_tool_inner(node, f, kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <promptflow._core.flow_execution_context.FlowExecutionContext object at 0x7f15fd2f7610>
node = Node(name='completion', tool=None, inputs={'prompt': FlowInputAssignment(value='prompt', value_type=<InputValueType.FL...alse, source=ToolSource(type='code', tool=None, path='completion.py'), type=<ToolType.PYTHON: 'python'>, activate=None)
f = <function completion at 0x7f15fd4089d0>
kwargs = {'connection': <promptflow.core._connection.AzureOpenAIConnection object at 0x7f15fd2f71c0>, 'prompt': 'What is the capital of the United States of America?', 'stream': False}

    def _invoke_tool_inner(self, node: Node, f: Callable, kwargs):
        module = f.func.__module__ if isinstance(f, functools.partial) else f.__module__
        node_name = node.name
        try:
            if (
                interval_seconds := try_get_long_running_logging_interval(flow_logger, DEFAULT_LOGGING_INTERVAL)
            ) is None:
                return f(**kwargs)
            logging_name = node_name
            if self._line_number is not None:
                logging_name = f"{node_name} in line {self._line_number}"
            start_time = time.perf_counter()
            thread_id = threading.current_thread().ident
            with RepeatLogTimer(
                interval_seconds=interval_seconds,
                logger=logger,
                level=WARNING,
                log_message_function=generate_elapsed_time_messages,
                args=(logging_name, start_time, interval_seconds, thread_id),
            ):
                return f(**kwargs)
        except PromptflowException as e:
            # All the exceptions from built-in tools are PromptflowException.
            # For these cases, raise the exception directly.
            if module is not None:
                e.module = module
            raise e
        except Exception as e:
            # Otherwise, we assume the error comes from user's tool.
            # For these cases, raise ToolExecutionError, which is classified as UserError
            # and shows stack trace in the error message to make it easy for user to troubleshoot.
>           raise ToolExecutionError(node_name=node_name, module=module) from e
E           promptflow._core._errors.ToolExecutionError: Execution failure in 'completion': (APIStatusError) Error code: 410 - {'error': {'code': 'ModelDeprecated', 'message': 'The model associated with the deployment is deprecated and no longer available for use. Please refer to the Azure OpenAI service documentation for more information.'}}

/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/_core/flow_execution_context.py:206: ToolExecutionError

Check warning on line 0 in tests.executor.e2etests.test_traces.TestExecutorTraces

See this annotation in the file changed.

@github-actions github-actions / Executor E2E Test Result [users/ninhu/perf_improve](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:users/ninhu/perf_improve++)

test_executor_openai_api_flow[openai_completion_api_flow-inputs3] (tests.executor.e2etests.test_traces.TestExecutorTraces) failed

artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
promptflow._core._errors.ToolExecutionError: Execution failure in 'completion': (APIStatusError) Error code: 410 - {'error': {'code': 'ModelDeprecated', 'message': 'The model associated with the deployment is deprecated and no longer available for use. Please refer to the Azure OpenAI service documentation for more information.'}}
self = <promptflow._core.flow_execution_context.FlowExecutionContext object at 0x7f15fcede4c0>
node = Node(name='completion', tool=None, inputs={'prompt': FlowInputAssignment(value='prompt', value_type=<InputValueType.FL...alse, source=ToolSource(type='code', tool=None, path='completion.py'), type=<ToolType.PYTHON: 'python'>, activate=None)
f = <function completion at 0x7f15fce225e0>
kwargs = {'connection': <promptflow.core._connection.AzureOpenAIConnection object at 0x7f15fcede400>, 'prompt': 'What is the capital of the United States of America?', 'stream': True}

    def _invoke_tool_inner(self, node: Node, f: Callable, kwargs):
        module = f.func.__module__ if isinstance(f, functools.partial) else f.__module__
        node_name = node.name
        try:
            if (
                interval_seconds := try_get_long_running_logging_interval(flow_logger, DEFAULT_LOGGING_INTERVAL)
            ) is None:
>               return f(**kwargs)

/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/_core/flow_execution_context.py:182: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/tracing/_trace.py:556: in wrapped
    output = func(*args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow/tests/test_configs/flows/openai_completion_api_flow/completion.py:41: in completion
    completion = get_client(connection).completions.create(
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/tracing/_integrations/_openai_injector.py:88: in wrapper
    return f(*args, **kwargs)
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/tracing/_trace.py:556: in wrapped
    output = func(*args, **kwargs)
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/openai/_utils/_utils.py:277: in wrapper
    return func(*args, **kwargs)
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/openai/resources/completions.py:528: in create
    return self._post(
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/openai/_base_client.py:1240: in post
    return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/openai/_base_client.py:921: in request
    return self._request(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <openai.lib.azure.AzureOpenAI object at 0x7f15fcf2b7f0>

    def _request(
        self,
        *,
        cast_to: Type[ResponseT],
        options: FinalRequestOptions,
        remaining_retries: int | None,
        stream: bool,
        stream_cls: type[_StreamT] | None,
    ) -> ResponseT | _StreamT:
        cast_to = self._maybe_override_cast_to(cast_to, options)
        self._prepare_options(options)
    
        retries = self._remaining_retries(remaining_retries, options)
        request = self._build_request(options)
        self._prepare_request(request)
    
        kwargs: HttpxSendArgs = {}
        if self.custom_auth is not None:
            kwargs["auth"] = self.custom_auth
    
        log.debug("Sending HTTP Request: %s %s", request.method, request.url)
    
        try:
            response = self._client.send(
                request,
                stream=stream or self._should_stream_response_body(request=request),
                **kwargs,
            )
        except httpx.TimeoutException as err:
            log.debug("Encountered httpx.TimeoutException", exc_info=True)
    
            if retries > 0:
                return self._retry_request(
                    options,
                    cast_to,
                    retries,
                    stream=stream,
                    stream_cls=stream_cls,
                    response_headers=None,
                )
    
            log.debug("Raising timeout error")
            raise APITimeoutError(request=request) from err
        except Exception as err:
            log.debug("Encountered Exception", exc_info=True)
    
            if retries > 0:
                return self._retry_request(
                    options,
                    cast_to,
                    retries,
                    stream=stream,
                    stream_cls=stream_cls,
                    response_headers=None,
                )
    
            log.debug("Raising connection error")
            raise APIConnectionError(request=request) from err
    
        log.debug(
            'HTTP Response: %s %s "%i %s" %s',
            request.method,
            request.url,
            response.status_code,
            response.reason_phrase,
            response.headers,
        )
        log.debug("request_id: %s", response.headers.get("x-request-id"))
    
        try:
            response.raise_for_status()
        except httpx.HTTPStatusError as err:  # thrown on 4xx and 5xx status code
            log.debug("Encountered httpx.HTTPStatusError", exc_info=True)
    
            if retries > 0 and self._should_retry(err.response):
                err.response.close()
                return self._retry_request(
                    options,
                    cast_to,
                    retries,
                    err.response.headers,
                    stream=stream,
                    stream_cls=stream_cls,
                )
    
            # If the response is streamed then we need to explicitly read the response
            # to completion before attempting to access the response text.
            if not err.response.is_closed:
                err.response.read()
    
            log.debug("Re-raising status error")
>           raise self._make_status_error_from_response(err.response) from None
E           openai.APIStatusError: Error code: 410 - {'error': {'code': 'ModelDeprecated', 'message': 'The model associated with the deployment is deprecated and no longer available for use. Please refer to the Azure OpenAI service documentation for more information.'}}

/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/openai/_base_client.py:1020: APIStatusError

The above exception was the direct cause of the following exception:

self = <executor.e2etests.test_traces.TestExecutorTraces object at 0x7f15fd4e1ee0>
flow_folder = 'openai_completion_api_flow'
inputs = {'prompt': 'What is the capital of the United States of America?', 'stream': True}
dev_connections = {'aoai_assistant_connection': {'module': 'promptflow.connections', 'name': 'aoai_assistant_connection', 'type': 'Azure...ai.azure.com/', 'api_key': 'c2881c848bf048e9b3198a2a64464ef3', 'api_type': 'azure', 'api_version': '2024-02-01'}}, ...}

    @pytest.mark.parametrize(
        "flow_folder, inputs",
        [
            ("openai_chat_api_flow", get_chat_input(False)),
            ("openai_chat_api_flow", get_chat_input(True)),
            ("openai_completion_api_flow", get_completion_input(False)),
            ("openai_completion_api_flow", get_completion_input(True)),
            ("llm_tool", {"topic": "Hello", "stream": False}),
            ("llm_tool", {"topic": "Hello", "stream": True}),
        ],
    )
    def test_executor_openai_api_flow(self, flow_folder, inputs, dev_connections):
        executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
>       flow_result = executor.exec_line(inputs)

/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py:203: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/flow_executor.py:739: in exec_line
    line_result = self._exec(
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/flow_executor.py:1034: in _exec
    output, aggregation_inputs = self._exec_inner_with_trace(
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/flow_executor.py:937: in _exec_inner_with_trace
    output, nodes_outputs = self._traverse_nodes(inputs, context)
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/flow_executor.py:1215: in _traverse_nodes
    nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context, inputs, batch_nodes)
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/flow_executor.py:1270: in _submit_to_scheduler
    return scheduler.execute(self._line_timeout_sec)
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/_flow_nodes_scheduler.py:131: in execute
    raise e
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/_flow_nodes_scheduler.py:113: in execute
    self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/_flow_nodes_scheduler.py:160: in _collect_outputs
    each_node_result = each_future.result()
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/concurrent/futures/_base.py:439: in result
    return self.__get_result()
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/concurrent/futures/_base.py:391: in __get_result
    raise self._exception
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/concurrent/futures/thread.py:58: in run
    result = self.fn(*self.args, **self.kwargs)
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/executor/_flow_nodes_scheduler.py:181: in _exec_single_node_in_thread
    result = context.invoke_tool(node, f, kwargs=kwargs)
/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/_core/flow_execution_context.py:90: in invoke_tool
    result = self._invoke_tool_inner(node, f, kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <promptflow._core.flow_execution_context.FlowExecutionContext object at 0x7f15fcede4c0>
node = Node(name='completion', tool=None, inputs={'prompt': FlowInputAssignment(value='prompt', value_type=<InputValueType.FL...alse, source=ToolSource(type='code', tool=None, path='completion.py'), type=<ToolType.PYTHON: 'python'>, activate=None)
f = <function completion at 0x7f15fce225e0>
kwargs = {'connection': <promptflow.core._connection.AzureOpenAIConnection object at 0x7f15fcede400>, 'prompt': 'What is the capital of the United States of America?', 'stream': True}

    def _invoke_tool_inner(self, node: Node, f: Callable, kwargs):
        module = f.func.__module__ if isinstance(f, functools.partial) else f.__module__
        node_name = node.name
        try:
            if (
                interval_seconds := try_get_long_running_logging_interval(flow_logger, DEFAULT_LOGGING_INTERVAL)
            ) is None:
                return f(**kwargs)
            logging_name = node_name
            if self._line_number is not None:
                logging_name = f"{node_name} in line {self._line_number}"
            start_time = time.perf_counter()
            thread_id = threading.current_thread().ident
            with RepeatLogTimer(
                interval_seconds=interval_seconds,
                logger=logger,
                level=WARNING,
                log_message_function=generate_elapsed_time_messages,
                args=(logging_name, start_time, interval_seconds, thread_id),
            ):
                return f(**kwargs)
        except PromptflowException as e:
            # All the exceptions from built-in tools are PromptflowException.
            # For these cases, raise the exception directly.
            if module is not None:
                e.module = module
            raise e
        except Exception as e:
            # Otherwise, we assume the error comes from user's tool.
            # For these cases, raise ToolExecutionError, which is classified as UserError
            # and shows stack trace in the error message to make it easy for user to troubleshoot.
>           raise ToolExecutionError(node_name=node_name, module=module) from e
E           promptflow._core._errors.ToolExecutionError: Execution failure in 'completion': (APIStatusError) Error code: 410 - {'error': {'code': 'ModelDeprecated', 'message': 'The model associated with the deployment is deprecated and no longer available for use. Please refer to the Azure OpenAI service documentation for more information.'}}

/opt/hostedtoolcache/Python/3.9.19/x64/lib/python3.9/site-packages/promptflow/_core/flow_execution_context.py:206: ToolExecutionError

Check notice on line 0 in .github

See this annotation in the file changed.

@github-actions github-actions / Executor E2E Test Result [users/ninhu/perf_improve](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:users/ninhu/perf_improve++)

6 skipped tests found

There are 6 skipped tests, see "Raw output" for the full list of skipped tests.
Raw output
tests.executor.e2etests.test_assistant.TestAssistant ‑ test_assistant_package_tool_with_conn[assistant-with-package-tool]
tests.executor.e2etests.test_assistant.TestAssistant ‑ test_assistant_tool_with_connection[assistant-tool-with-connection-line_input0]
tests.executor.e2etests.test_assistant.TestAssistant ‑ test_assistant_with_image[food-calorie-assistant-line_input0]
tests.executor.e2etests.test_execution_server.TestExecutionServer ‑ test_execution_flow_with_nan_inf
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_executor_exec_line[package_tools]
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_executor_exec_node[package_tools-search_by_text-flow_inputs5-None]

Check notice on line 0 in .github

See this annotation in the file changed.

@github-actions github-actions / Executor E2E Test Result [users/ninhu/perf_improve](https://github.com/microsoft/promptflow/actions/workflows/promptflow-executor-e2e-test.yml?query=branch:users/ninhu/perf_improve++)

246 tests found

There are 246 tests, see "Raw output" for the full list of tests.
Raw output
tests.executor.e2etests.test_activate.TestExecutorActivate ‑ test_aggregate_bypassed_nodes
tests.executor.e2etests.test_activate.TestExecutorActivate ‑ test_all_nodes_bypassed
tests.executor.e2etests.test_activate.TestExecutorActivate ‑ test_batch_run_activate
tests.executor.e2etests.test_activate.TestExecutorActivate ‑ test_flow_run_activate[activate_condition_always_met]
tests.executor.e2etests.test_activate.TestExecutorActivate ‑ test_flow_run_activate[activate_with_no_inputs]
tests.executor.e2etests.test_activate.TestExecutorActivate ‑ test_flow_run_activate[all_depedencies_bypassed_with_activate_met]
tests.executor.e2etests.test_activate.TestExecutorActivate ‑ test_flow_run_activate[conditional_flow_with_activate]
tests.executor.e2etests.test_activate.TestExecutorActivate ‑ test_invalid_activate_config
tests.executor.e2etests.test_assistant.TestAssistant ‑ test_assistant_package_tool_with_conn[assistant-with-package-tool]
tests.executor.e2etests.test_assistant.TestAssistant ‑ test_assistant_tool_with_connection[assistant-tool-with-connection-line_input0]
tests.executor.e2etests.test_assistant.TestAssistant ‑ test_assistant_with_image[food-calorie-assistant-line_input0]
tests.executor.e2etests.test_async.TestAsync ‑ test_exec_line_async[async_tools-expected_result0]
tests.executor.e2etests.test_async.TestAsync ‑ test_exec_line_async[async_tools_with_sync_tools-expected_result1]
tests.executor.e2etests.test_async.TestAsync ‑ test_executor_node_concurrency[async_tools-concurrency_levels0-expected_concurrency0]
tests.executor.e2etests.test_async.TestAsync ‑ test_executor_node_concurrency[async_tools_with_sync_tools-concurrency_levels1-expected_concurrency1]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_batch_resume[web_classification-web_classification_default_20240207_165606_643000]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_batch_resume_aggregation[classification_accuracy_evaluation-classification_accuracy_evaluation_default_20240208_152402_694000]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_batch_resume_aggregation_with_image[eval_flow_with_image_resume-eval_flow_with_image_resume_default_20240305_111258_103000]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_batch_run[prompt_tools-inputs_mapping1]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_batch_run[sample_flow_with_functions-inputs_mapping3]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_batch_run[script_with___file__-inputs_mapping2]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_batch_run[web_classification_no_variants-inputs_mapping0]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_batch_run_failure[connection_as_input-input_mapping0-InputNotFound-The input for flow cannot be empty in batch mode. Please review your flow and provide valid inputs.]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_batch_run_failure[script_with___file__-input_mapping1-EmptyInputsData-Couldn't find any inputs data at the given input paths. Please review the provided path and consider resubmitting.]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_batch_run_in_existing_loop
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_batch_run_line_result[simple_aggregation-batch_input0-str]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_batch_run_line_result[simple_aggregation-batch_input1-str]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_batch_run_line_result[simple_aggregation-batch_input2-str]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_batch_run_then_eval
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_batch_run_with_aggregation_failure
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_batch_storage
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_batch_with_default_input
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_batch_with_line_number
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_batch_with_metrics
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_batch_with_openai_metrics
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_batch_with_partial_failure
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_chat_group_batch_run[chat_group/cloud_batch_runs/chat_group_simulation-chat_group/cloud_batch_runs/chat_group_copilot-5-inputs.json]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_chat_group_batch_run[chat_group/cloud_batch_runs/chat_group_simulation-chat_group/cloud_batch_runs/chat_group_copilot-5-inputs_using_default_value.json]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_chat_group_batch_run_early_stop[chat_group/cloud_batch_runs/chat_group_copilot-chat_group/cloud_batch_runs/chat_group_simulation_error-5-inputs.json]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_chat_group_batch_run_early_stop[chat_group/cloud_batch_runs/chat_group_simulation_error-chat_group/cloud_batch_runs/chat_group_copilot-5-inputs.json]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_chat_group_batch_run_multi_inputs[chat_group/cloud_batch_runs/chat_group_simulation-chat_group/cloud_batch_runs/chat_group_copilot-5-simulation_input.json-copilot_input.json]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_chat_group_batch_run_stop_signal[chat_group/cloud_batch_runs/chat_group_simulation_stop_signal-chat_group/cloud_batch_runs/chat_group_copilot-5-inputs.json]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_forkserver_mode_batch_run[prompt_tools-inputs_mapping1]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_forkserver_mode_batch_run[sample_flow_with_functions-inputs_mapping3]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_forkserver_mode_batch_run[script_with___file__-inputs_mapping2]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_forkserver_mode_batch_run[web_classification_no_variants-inputs_mapping0]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_spawn_mode_batch_run[prompt_tools-inputs_mapping1]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_spawn_mode_batch_run[sample_flow_with_functions-inputs_mapping3]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_spawn_mode_batch_run[script_with___file__-inputs_mapping2]
tests.executor.e2etests.test_batch_engine.TestBatch ‑ test_spawn_mode_batch_run[web_classification_no_variants-inputs_mapping0]
tests.executor.e2etests.test_batch_server.TestBatchServer ‑ test_batch_run_with_basic_flow
tests.executor.e2etests.test_batch_server.TestBatchServer ‑ test_batch_run_with_image_flow
tests.executor.e2etests.test_batch_timeout.TestBatchTimeout ‑ test_batch_timeout[one_line_of_bulktest_timeout-3-600-Line 2 execution timeout for exceeding 3 seconds-Status.Completed]
tests.executor.e2etests.test_batch_timeout.TestBatchTimeout ‑ test_batch_timeout[one_line_of_bulktest_timeout-600-5-Line 2 execution timeout for exceeding-Status.Failed]
tests.executor.e2etests.test_batch_timeout.TestBatchTimeout ‑ test_batch_with_line_timeout[one_line_of_bulktest_timeout]
tests.executor.e2etests.test_batch_timeout.TestBatchTimeout ‑ test_batch_with_one_line_timeout[one_line_of_bulktest_timeout]
tests.executor.e2etests.test_concurent_execution.TestConcurrentExecution ‑ test_concurrent_run
tests.executor.e2etests.test_concurent_execution.TestConcurrentExecution ‑ test_concurrent_run_with_exception
tests.executor.e2etests.test_concurent_execution.TestConcurrentExecution ‑ test_linear_run
tests.executor.e2etests.test_csharp_executor_proxy.TestCSharpExecutorProxy ‑ test_batch
tests.executor.e2etests.test_csharp_executor_proxy.TestCSharpExecutorProxy ‑ test_batch_cancel
tests.executor.e2etests.test_csharp_executor_proxy.TestCSharpExecutorProxy ‑ test_batch_execution_error
tests.executor.e2etests.test_csharp_executor_proxy.TestCSharpExecutorProxy ‑ test_batch_validation_error
tests.executor.e2etests.test_eager_flow.TestEagerFlow ‑ test_batch_run[basic_callable_class-inputs_mapping2-<lambda>-init_kwargs2]
tests.executor.e2etests.test_eager_flow.TestEagerFlow ‑ test_batch_run[callable_class_with_primitive-inputs_mapping3-<lambda>-init_kwargs3]
tests.executor.e2etests.test_eager_flow.TestEagerFlow ‑ test_batch_run[dummy_flow_with_trace-inputs_mapping0-<lambda>-None]
tests.executor.e2etests.test_eager_flow.TestEagerFlow ‑ test_batch_run[flow_with_dataclass_output-inputs_mapping1-<lambda>-None]
tests.executor.e2etests.test_eager_flow.TestEagerFlow ‑ test_batch_run_with_callable_entry
tests.executor.e2etests.test_eager_flow.TestEagerFlow ‑ test_batch_run_with_init_multiple_workers[1-<lambda>]
tests.executor.e2etests.test_eager_flow.TestEagerFlow ‑ test_batch_run_with_init_multiple_workers[2-<lambda>]
tests.executor.e2etests.test_eager_flow.TestEagerFlow ‑ test_batch_run_with_invalid_case
tests.executor.e2etests.test_eager_flow.TestEagerFlow ‑ test_batch_run_with_openai
tests.executor.e2etests.test_execution_server.TestExecutionServer ‑ test_execution_flow_with_nan_inf
tests.executor.e2etests.test_executor_execution_failures.TestExecutorFailures ‑ test_executor_exec_line_fail[async_tools_failures-async_fail-In tool raise_an_exception_async: dummy_input]
tests.executor.e2etests.test_executor_execution_failures.TestExecutorFailures ‑ test_executor_exec_line_fail[sync_tools_failures-sync_fail-In tool raise_an_exception: dummy_input]
tests.executor.e2etests.test_executor_execution_failures.TestExecutorFailures ‑ test_executor_exec_line_fail_with_exception[async_tools_failures-async_fail-In tool raise_an_exception_async: dummy_input]
tests.executor.e2etests.test_executor_execution_failures.TestExecutorFailures ‑ test_executor_exec_line_fail_with_exception[sync_tools_failures-sync_fail-In tool raise_an_exception: dummy_input]
tests.executor.e2etests.test_executor_execution_failures.TestExecutorFailures ‑ test_executor_exec_node_fail[async_tools_failures-async_fail-In tool raise_an_exception_async: dummy_input]
tests.executor.e2etests.test_executor_execution_failures.TestExecutorFailures ‑ test_executor_exec_node_fail[sync_tools_failures-sync_fail-In tool raise_an_exception: dummy_input]
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_chat_flow_stream_mode
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_convert_flow_input_types[simple_flow_with_python_tool]
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_execute_flow[output-intermediate-True-2]
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_execute_flow[output_1-intermediate_1-False-1]
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_executor_creation_with_default_input
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_executor_creation_with_default_variants[web_classification]
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_executor_exec_line[async_tools]
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_executor_exec_line[async_tools_with_sync_tools]
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_executor_exec_line[connection_as_input]
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_executor_exec_line[package_tools]
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_executor_exec_line[prompt_tools]
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_executor_exec_line[script_with___file__]
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_executor_exec_line[script_with_import]
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_executor_exec_line[tool_with_assistant_definition]
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_executor_exec_line[web_classification_no_variants]
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_executor_exec_node[connection_as_input-conn_node-None-None]
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_executor_exec_node[package_tools-search_by_text-flow_inputs5-None]
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_executor_exec_node[prompt_tools-summarize_text_content_prompt-flow_inputs1-dependency_nodes_outputs1]
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_executor_exec_node[script_with___file__-node1-flow_inputs2-None]
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_executor_exec_node[script_with___file__-node2-None-dependency_nodes_outputs3]
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_executor_exec_node[script_with___file__-node3-None-None]
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_executor_exec_node[script_with_import-node1-flow_inputs8-None]
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_executor_exec_node[simple_aggregation-accuracy-flow_inputs7-dependency_nodes_outputs7]
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_executor_exec_node[web_classification_no_variants-summarize_text_content-flow_inputs0-dependency_nodes_outputs0]
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_executor_exec_node_with_llm_node
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_executor_for_script_tool_with_init
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_executor_node_overrides
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_flow_with_no_inputs_and_output[no_inputs_outputs]
tests.executor.e2etests.test_executor_happypath.TestExecutor ‑ test_long_running_log
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_batch_run_input_type_invalid[simple_flow_with_python_tool-inputs_mapping0-The input for flow is incorrect. The value for flow input 'num' in line 0 of input data does not match the expected type 'int'. Please change flow input type or adjust the input value in your input data.-InputTypeError]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_batch_run_raise_on_line_failure[simple_flow_with_python_tool-batch_input0-True-Exception]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_batch_run_raise_on_line_failure[simple_flow_with_python_tool-batch_input1-False-InputTypeError]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_batch_run_raise_on_line_failure[simple_flow_with_python_tool-batch_input2-True-None]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_batch_run_raise_on_line_failure[simple_flow_with_python_tool-batch_input3-False-None]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_executor_create_failure_type[source_file_missing-flow.dag.python.yaml-ResolveToolError-InvalidSource]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_executor_create_failure_type_and_message[flow_input_reference_invalid-flow.dag.yaml-InputReferenceNotFound-None-Invalid node definitions found in the flow graph. Node 'divide_num' references flow input 'num_1' which is not defined in your flow. To resolve this issue, please review your flow, ensuring that you either add the missing flow inputs or adjust node reference to the correct flow input.]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_executor_create_failure_type_and_message[flow_llm_with_wrong_conn-flow.dag.yaml-ResolveToolError-InvalidConnectionType-Tool load failed in 'wrong_llm': (InvalidConnectionType) Connection type CustomConnection is not supported for LLM.]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_executor_create_failure_type_and_message[flow_output_reference_invalid-flow.dag.yaml-EmptyOutputReference-None-The output 'content' for flow is incorrect. The reference is not specified for the output 'content' in the flow. To rectify this, ensure that you accurately specify the reference in the flow.]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_executor_create_failure_type_and_message[node_circular_dependency-flow.dag.yaml-NodeCircularDependency-None-Invalid node definitions found in the flow graph. Node circular dependency has been detected among the nodes in your flow. Kindly review the reference relationships for the nodes ['divide_num', 'divide_num_1', 'divide_num_2'] and resolve the circular reference issue in the flow.]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_executor_create_failure_type_and_message[node_reference_not_found-flow.dag.yaml-NodeReferenceNotFound-None-Invalid node definitions found in the flow graph. Node 'divide_num_2' references a non-existent node 'divide_num_3' in your flow. Please review your flow to ensure that the node name is accurately specified.]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_executor_create_failure_type_and_message[nodes_names_duplicated-flow.dag.yaml-DuplicateNodeName-None-Invalid node definitions found in the flow graph. Node with name 'stringify_num' appears more than once in the node definitions in your flow, which is not allowed. To address this issue, please review your flow and either rename or remove nodes with identical names.]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_executor_create_failure_type_and_message[outputs_reference_not_valid-flow.dag.yaml-OutputReferenceNotFound-None-The output 'content' for flow is incorrect. The output 'content' references non-existent node 'another_stringify_num' in your flow. To resolve this issue, please carefully review your flow and correct the reference definition for the output in question.]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_executor_create_failure_type_and_message[outputs_with_invalid_flow_inputs_ref-flow.dag.yaml-OutputReferenceNotFound-None-The output 'num' for flow is incorrect. The output 'num' references non-existent flow input 'num11' in your flow. Please carefully review your flow and correct the reference definition for the output in question.]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_executor_create_failure_type_and_message[source_file_missing-flow.dag.jinja.yaml-ResolveToolError-InvalidSource-Tool load failed in 'summarize_text_content': (InvalidSource) Node source path 'summarize_text_content__variant_1.jinja2' is invalid on node 'summarize_text_content'.]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_flow_run_execution_errors[flow_output_unserializable-line_input0-FlowOutputUnserializable-The output 'content' for flow is incorrect. The output value is not JSON serializable. JSON dump failed: (TypeError) Object of type UnserializableClass is not JSON serializable. Please verify your flow output and make sure the value serializable.]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_flow_run_input_type_invalid[python_tool_with_simple_image_without_default-line_input2-InputNotFound]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_flow_run_input_type_invalid[simple_flow_with_python_tool-line_input0-InputNotFound]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_flow_run_input_type_invalid[simple_flow_with_python_tool-line_input1-InputTypeError]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_flow_run_with_duplicated_inputs[llm_tool_with_duplicated_inputs-Invalid inputs {'prompt'} in prompt template of node llm_tool_with_duplicated_inputs. These inputs are duplicated with the parameters of AzureOpenAI.completion.]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_flow_run_with_duplicated_inputs[prompt_tool_with_duplicated_inputs-Invalid inputs {'template'} in prompt template of node prompt_tool_with_duplicated_inputs. These inputs are duplicated with the reserved parameters of prompt tool.]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_invalid_flow_dag[invalid_connection-ResolveToolError-GetConnectionError]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_invalid_flow_dag[tool_type_missing-ResolveToolError-NotImplementedError]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_invalid_flow_dag[wrong_api-ResolveToolError-APINotFound]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_invalid_flow_dag[wrong_module-FailedToImportModule-None]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_invalid_flow_run_inputs_should_not_saved_to_run_info
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_node_topology_in_order[web_classification_no_variants-web_classification_no_variants_unordered]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_single_node_input_type_invalid[path_root0-simple_flow_with_python_tool-divide_num-line_input0-InputNotFound-The input for node is incorrect. Node input 'num' is not found in input data for node 'divide_num'. Please verify the inputs data for the node.]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_single_node_input_type_invalid[path_root1-simple_flow_with_python_tool-divide_num-line_input1-InputTypeError-The input for node is incorrect. Value for input 'num' of node 'divide_num' is not type 'int'. Please review and rectify the input data.]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_single_node_input_type_invalid[path_root2-flow_input_reference_invalid-divide_num-line_input2-InputNotFound-The input for node is incorrect. Node input 'num_1' is not found from flow inputs of node 'divide_num'. Please review the node definition in your flow.]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_single_node_input_type_invalid[path_root3-simple_flow_with_python_tool-bad_node_name-line_input3-SingleNodeValidationError-Validation failed when attempting to execute the node. Node 'bad_node_name' is not found in flow 'flow.dag.yaml'. Please change node name or correct the flow file.]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_single_node_input_type_invalid[path_root4-node_missing_type_or_source-divide_num-line_input4-SingleNodeValidationError-Validation failed when attempting to execute the node. Properties 'source' or 'type' are not specified for Node 'divide_num' in flow 'flow.dag.yaml'. Please make sure these properties are in place and try again.]
tests.executor.e2etests.test_executor_validation.TestValidation ‑ test_valid_flow_run_inpust_should_saved_to_run_info
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_batch_engine_with_image[chat_flow_with_image-input_dirs3-inputs_mapping3-answer-2-False]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_batch_engine_with_image[eval_flow_with_composite_image-input_dirs5-inputs_mapping5-output-2-True]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_batch_engine_with_image[eval_flow_with_simple_image-input_dirs4-inputs_mapping4-output-2-True]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_batch_engine_with_image[python_tool_with_composite_image-input_dirs2-inputs_mapping2-output-2-False]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_batch_engine_with_image[python_tool_with_simple_image-input_dirs0-inputs_mapping0-output-4-False]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_batch_engine_with_image[python_tool_with_simple_image_with_default-input_dirs1-inputs_mapping1-output-4-False]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_batch_run_then_eval_with_image
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_aggregation_with_image[eval_flow_with_composite_image-inputs6]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_aggregation_with_image[eval_flow_with_composite_image-inputs7]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_aggregation_with_image[eval_flow_with_composite_image-inputs8]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_aggregation_with_image[eval_flow_with_simple_image-inputs0]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_aggregation_with_image[eval_flow_with_simple_image-inputs1]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_aggregation_with_image[eval_flow_with_simple_image-inputs2]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_aggregation_with_image[eval_flow_with_simple_image-inputs3]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_aggregation_with_image[eval_flow_with_simple_image-inputs4]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_aggregation_with_image[eval_flow_with_simple_image-inputs5]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_line_with_image[chat_flow_with_image-inputs9]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_line_with_image[python_tool_with_composite_image-inputs6]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_line_with_image[python_tool_with_composite_image-inputs7]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_line_with_image[python_tool_with_composite_image-inputs8]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_line_with_image[python_tool_with_image_nested_api_calls-inputs10]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_line_with_image[python_tool_with_simple_image-inputs0]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_line_with_image[python_tool_with_simple_image-inputs1]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_line_with_image[python_tool_with_simple_image-inputs2]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_line_with_image[python_tool_with_simple_image-inputs3]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_line_with_image[python_tool_with_simple_image-inputs4]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_line_with_image[python_tool_with_simple_image-inputs5]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_node_with_image[python_tool_with_composite_image-python_node-flow_inputs2-None]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_node_with_image[python_tool_with_composite_image-python_node_2-flow_inputs3-None]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_node_with_image[python_tool_with_composite_image-python_node_3-flow_inputs4-dependency_nodes_outputs4]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_node_with_image[python_tool_with_simple_image-python_node-flow_inputs0-None]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_node_with_image[python_tool_with_simple_image-python_node_2-flow_inputs1-dependency_nodes_outputs1]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_node_with_image_storage_and_path[None-False-.]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_node_with_image_storage_and_path[None-True-test_storage]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_node_with_image_storage_and_path[test_path-False-test_path]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_node_with_image_storage_and_path[test_path-True-test_storage]
tests.executor.e2etests.test_image.TestExecutorWithImage ‑ test_executor_exec_node_with_invalid_default_value[python_tool_with_invalid_default_value-python_node_2-flow_inputs0-dependency_nodes_outputs0]
tests.executor.e2etests.test_image.TestExecutorWithOpenaiVisionImage ‑ test_batch_engine_with_image[chat_flow_with_openai_vision_image-input_dirs1-inputs_mapping1-answer-2]
tests.executor.e2etests.test_image.TestExecutorWithOpenaiVisionImage ‑ test_batch_engine_with_image[python_tool_with_openai_vision_image-input_dirs0-inputs_mapping0-output-4]
tests.executor.e2etests.test_image.TestExecutorWithOpenaiVisionImage ‑ test_executor_exec_line_with_image[chat_flow_with_openai_vision_image-inputs6]
tests.executor.e2etests.test_image.TestExecutorWithOpenaiVisionImage ‑ test_executor_exec_line_with_image[python_tool_with_openai_vision_image-inputs0]
tests.executor.e2etests.test_image.TestExecutorWithOpenaiVisionImage ‑ test_executor_exec_line_with_image[python_tool_with_openai_vision_image-inputs1]
tests.executor.e2etests.test_image.TestExecutorWithOpenaiVisionImage ‑ test_executor_exec_line_with_image[python_tool_with_openai_vision_image-inputs2]
tests.executor.e2etests.test_image.TestExecutorWithOpenaiVisionImage ‑ test_executor_exec_line_with_image[python_tool_with_openai_vision_image-inputs3]
tests.executor.e2etests.test_image.TestExecutorWithOpenaiVisionImage ‑ test_executor_exec_line_with_image[python_tool_with_openai_vision_image-inputs4]
tests.executor.e2etests.test_image.TestExecutorWithOpenaiVisionImage ‑ test_executor_exec_line_with_image[python_tool_with_openai_vision_image-inputs5]
tests.executor.e2etests.test_image.TestExecutorWithOpenaiVisionImage ‑ test_executor_exec_node_with_image[python_tool_with_openai_vision_image-python_node-flow_inputs0-None]
tests.executor.e2etests.test_image.TestExecutorWithOpenaiVisionImage ‑ test_executor_exec_node_with_image[python_tool_with_openai_vision_image-python_node_2-flow_inputs1-dependency_nodes_outputs1]
tests.executor.e2etests.test_image.TestExecutorWithOpenaiVisionImage ‑ test_executor_exec_node_with_image_storage_and_path[None-False-.]
tests.executor.e2etests.test_image.TestExecutorWithOpenaiVisionImage ‑ test_executor_exec_node_with_image_storage_and_path[None-True-test_storage]
tests.executor.e2etests.test_image.TestExecutorWithOpenaiVisionImage ‑ test_executor_exec_node_with_image_storage_and_path[test_path-False-test_path]
tests.executor.e2etests.test_image.TestExecutorWithOpenaiVisionImage ‑ test_executor_exec_node_with_image_storage_and_path[test_path-True-test_storage]
tests.executor.e2etests.test_langchain.TestLangchain ‑ test_batch_with_langchain[flow_with_langchain_traces-inputs_mapping0]
tests.executor.e2etests.test_langchain.TestLangchain ‑ test_batch_with_langchain[openai_chat_api_flow-inputs_mapping1]
tests.executor.e2etests.test_langchain.TestLangchain ‑ test_batch_with_langchain[openai_completion_api_flow-inputs_mapping2]
tests.executor.e2etests.test_logs.TestExecutorLogs ‑ test_activate_config_log
tests.executor.e2etests.test_logs.TestExecutorLogs ‑ test_async_log_in_worker_thread
tests.executor.e2etests.test_logs.TestExecutorLogs ‑ test_batch_run_flow_logs[flow_root_dir0-print_input_flow-8]
tests.executor.e2etests.test_logs.TestExecutorLogs ‑ test_batch_run_flow_logs[flow_root_dir1-print_input_flex-2]
tests.executor.e2etests.test_logs.TestExecutorLogs ‑ test_change_log_format
tests.executor.e2etests.test_logs.TestExecutorLogs ‑ test_executor_logs[print_input_flow]
tests.executor.e2etests.test_logs.TestExecutorLogs ‑ test_log_progress[simple_flow_with_ten_inputs-inputs_mapping0]
tests.executor.e2etests.test_logs.TestExecutorLogs ‑ test_long_run_log
tests.executor.e2etests.test_logs.TestExecutorLogs ‑ test_node_logs[print_input_flow]
tests.executor.e2etests.test_logs.TestExecutorLogs ‑ test_node_logs_in_executor_logs[print_input_flow]
tests.executor.e2etests.test_package_tool.TestPackageTool ‑ test_custom_llm_tool_with_duplicated_inputs
tests.executor.e2etests.test_package_tool.TestPackageTool ‑ test_executor_package_tool_with_conn
tests.executor.e2etests.test_package_tool.TestPackageTool ‑ test_executor_package_with_prompt_tool
tests.executor.e2etests.test_package_tool.TestPackageTool ‑ test_package_tool_execution[wrong_package_in_package_tools-ResolveToolError-PackageToolNotFoundError-Tool load failed in 'search_by_text': (PackageToolNotFoundError) Package tool 'promptflow.tools.serpapi11.SerpAPI.search' is not found in the current environment. All available package tools are: ['promptflow.tools.azure_content_safety.AzureContentSafety.analyze_text', 'promptflow.tools.azure_detect.AzureDetect.get_language'].]
tests.executor.e2etests.test_package_tool.TestPackageTool ‑ test_package_tool_execution[wrong_tool_in_package_tools-ResolveToolError-PackageToolNotFoundError-Tool load failed in 'search_by_text': (PackageToolNotFoundError) Package tool 'promptflow.tools.serpapi.SerpAPI.search_11' is not found in the current environment. All available package tools are: ['promptflow.tools.azure_content_safety.AzureContentSafety.analyze_text', 'promptflow.tools.azure_detect.AzureDetect.get_language'].]
tests.executor.e2etests.test_package_tool.TestPackageTool ‑ test_package_tool_load_error[tool_with_init_error-Tool load failed in 'tool_with_init_error': (ToolLoadError) Failed to load package tool 'Tool with init error': (Exception) Tool load error.]
tests.executor.e2etests.test_script_tool_generator.TestScriptToolGenerator ‑ test_generate_script_tool_meta_with_dynamic_list
tests.executor.e2etests.test_script_tool_generator.TestScriptToolGenerator ‑ test_generate_script_tool_meta_with_enabled_by_value
tests.executor.e2etests.test_script_tool_generator.TestScriptToolGenerator ‑ test_generate_script_tool_meta_with_generated_by
tests.executor.e2etests.test_script_tool_generator.TestScriptToolGenerator ‑ test_generate_script_tool_meta_with_invalid_dynamic_list
tests.executor.e2etests.test_script_tool_generator.TestScriptToolGenerator ‑ test_generate_script_tool_meta_with_invalid_enabled_by
tests.executor.e2etests.test_script_tool_generator.TestScriptToolGenerator ‑ test_generate_script_tool_meta_with_invalid_icon
tests.executor.e2etests.test_script_tool_generator.TestScriptToolGenerator ‑ test_generate_script_tool_meta_with_invalid_schema
tests.executor.e2etests.test_telemetry.TestExecutorTelemetry ‑ test_executor_openai_telemetry
tests.executor.e2etests.test_telemetry.TestExecutorTelemetry ‑ test_executor_openai_telemetry_with_batch_run
tests.executor.e2etests.test_traces.TestExecutorTraces ‑ test_executor_generator_tools
tests.executor.e2etests.test_traces.TestExecutorTraces ‑ test_executor_openai_api_flow[llm_tool-inputs4]
tests.executor.e2etests.test_traces.TestExecutorTraces ‑ test_executor_openai_api_flow[llm_tool-inputs5]
tests.executor.e2etests.test_traces.TestExecutorTraces ‑ test_executor_openai_api_flow[openai_chat_api_flow-inputs0]
tests.executor.e2etests.test_traces.TestExecutorTraces ‑ test_executor_openai_api_flow[openai_chat_api_flow-inputs1]
tests.executor.e2etests.test_traces.TestExecutorTraces ‑ test_executor_openai_api_flow[openai_completion_api_flow-inputs2]
tests.executor.e2etests.test_traces.TestExecutorTraces ‑ test_executor_openai_api_flow[openai_completion_api_flow-inputs3]
tests.executor.e2etests.test_traces.TestExecutorTraces ‑ test_flow_with_trace[flow_with_trace]
tests.executor.e2etests.test_traces.TestExecutorTraces ‑ test_flow_with_trace[flow_with_trace_async]
tests.executor.e2etests.test_traces.TestExecutorTraces ‑ test_trace_behavior_with_generator_node[False]
tests.executor.e2etests.test_traces.TestExecutorTraces ‑ test_trace_behavior_with_generator_node[True]
tests.executor.e2etests.test_traces.TestOTelTracer ‑ test_flow_with_nested_tool
tests.executor.e2etests.test_traces.TestOTelTracer ‑ test_flow_with_traced_function
tests.executor.e2etests.test_traces.TestOTelTracer ‑ test_otel_trace[flow_with_trace-inputs0-5]
tests.executor.e2etests.test_traces.TestOTelTracer ‑ test_otel_trace[flow_with_trace_async-inputs1-5]
tests.executor.e2etests.test_traces.TestOTelTracer ‑ test_otel_trace_with_batch
tests.executor.e2etests.test_traces.TestOTelTracer ‑ test_otel_trace_with_embedding[openai_embedding_api_flow-inputs0-3]
tests.executor.e2etests.test_traces.TestOTelTracer ‑ test_otel_trace_with_embedding[openai_embedding_api_flow_with_token-inputs1-3]
tests.executor.e2etests.test_traces.TestOTelTracer ‑ test_otel_trace_with_llm[flow_with_async_llm_tasks-inputs5-False-6]
tests.executor.e2etests.test_traces.TestOTelTracer ‑ test_otel_trace_with_llm[llm_tool-inputs4-False-4]
tests.executor.e2etests.test_traces.TestOTelTracer ‑ test_otel_trace_with_llm[openai_chat_api_flow-inputs0-False-3]
tests.executor.e2etests.test_traces.TestOTelTracer ‑ test_otel_trace_with_llm[openai_chat_api_flow-inputs1-True-3]
tests.executor.e2etests.test_traces.TestOTelTracer ‑ test_otel_trace_with_llm[openai_completion_api_flow-inputs2-False-3]
tests.executor.e2etests.test_traces.TestOTelTracer ‑ test_otel_trace_with_llm[openai_completion_api_flow-inputs3-True-3]
tests.executor.e2etests.test_traces.TestOTelTracer ‑ test_otel_trace_with_prompt[llm_tool-inputs0-joke.jinja2]