Skip to content

[Bugfix][Executor] Fix the issue of duplicate line numbers in the chat group run scenario #9980

[Bugfix][Executor] Fix the issue of duplicate line numbers in the chat group run scenario

[Bugfix][Executor] Fix the issue of duplicate line numbers in the chat group run scenario #9980

GitHub Actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++) failed May 8, 2024 in 0s

27 fail, 13 skipped, 633 pass in 57m 33s

    4 files  ±0      4 suites  ±0   57m 33s ⏱️ +21s
  673 tests ±0    633 ✅ ±0  13 💤 ±0   27 ❌ ±0 
2 692 runs  ±0  2 538 ✅ ±0  52 💤 ±0  102 ❌ ±0 

Results for commit b685cd0. ± Comparison against earlier commit a18c491.

Annotations

Check warning on line 0 in tests.sdk_cli_test.e2etests.test_cli.TestCli

See this annotation in the file changed.

@github-actions github-actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++)

All 4 runs failed: test_pf_flow_test_with_non_english_input_output (tests.sdk_cli_test.e2etests.test_cli.TestCli)

artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
SystemExit: 1
args = (Namespace(version=False, action='flow', flow='/home/runner/work/promptflow/promptflow/src/promptflow/tests/test_confi... skip_open_browser=False, init=None, url_params=None, verbose=False, debug=False, user_agent=None, sub_action='test'),)
kwargs = {}, telemetry_logger = <Logger promptflow._sdk._telemetry (INFO)>

    @wraps(func)
    def wrapper(*args, **kwargs):
        try:
            telemetry_logger = get_telemetry_logger()
            with log_activity(
                telemetry_logger,
                activity_name,
                activity_type=ActivityType.PUBLICAPI,
                custom_dimensions=custom_dimensions,
            ):
>               return func(*args, **kwargs)

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_utils.py:282: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:88: in run_command
    raise ex
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:62: in run_command
    dispatch_flow_commands(args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:78: in dispatch_flow_commands
    test_flow(args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:464: in test_flow
    _test_flow_standard(args, pf_client, inputs, environment_variables)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:546: in _test_flow_standard
    result = pf_client.flows.test(
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_telemetry/activity.py:265: in wrapper
    return f(self, *args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/operations/_flow_operations.py:164: in test
    TestSubmitter._raise_error_when_test_failed(result, show_trace=node is not None)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

test_result = LineResult(output={}, aggregation_inputs={}, run_info=FlowRunInfo(run_id='c390a3be-de9f-42b1-b7e2-c8dfd31ada8f_0', sta...[], "_func": "Completions.create"}\n\n'}, system_metrics={'duration': 0.013457}, result=None, message_format='basic')})
show_trace = False

    @staticmethod
    def _raise_error_when_test_failed(test_result, show_trace=False):
        from promptflow.executor._result import LineResult
    
        test_status = test_result.run_info.status if isinstance(test_result, LineResult) else test_result.status
    
        if test_status == Status.Failed:
            error_dict = test_result.run_info.error if isinstance(test_result, LineResult) else test_result.error
            error_response = ErrorResponse.from_error_dict(error_dict)
            user_execution_error = error_response.get_user_execution_error_info()
            error_message = error_response.message
            stack_trace = user_execution_error.get("traceback", "")
            error_type = user_execution_error.get("type", "Exception")
            if show_trace:
                print(stack_trace)
>           raise UserErrorException(f"{error_type}: {error_message}", error=stack_trace)
E           promptflow.exceptions.UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.
E           values: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "\u4ec0\u4e48\u662f chat gpt"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": false, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_orchestrator/test_submitter.py:618: UserErrorException

During handling of the above exception, another exception occurred:

self = <sdk_cli_test.e2etests.test_cli.TestCli object at 0x7fe28b92bfd0>
capsys = <_pytest.capture.CaptureFixture object at 0x7fe28818f250>

    def test_pf_flow_test_with_non_english_input_output(self, capsys):
        # disable trace to not invoke prompt flow service, which will print unexpected content to stdout
        with mock.patch("promptflow._sdk._tracing.is_trace_feature_disabled", return_value=True):
            question = "什么是 chat gpt"
>           run_pf_command("flow", "test", "--flow", f"{FLOWS_DIR}/chat_flow", "--inputs", f'question="{question}"')

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:372: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:61: in run_pf_command
    main()
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:156: in main
    entry(command_args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:137: in entry
    cli_exception_and_telemetry_handler(run_command, activity_name)(args)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

args = (Namespace(version=False, action='flow', flow='/home/runner/work/promptflow/promptflow/src/promptflow/tests/test_confi... skip_open_browser=False, init=None, url_params=None, verbose=False, debug=False, user_agent=None, sub_action='test'),)
kwargs = {}, telemetry_logger = <Logger promptflow._sdk._telemetry (INFO)>

    @wraps(func)
    def wrapper(*args, **kwargs):
        try:
            telemetry_logger = get_telemetry_logger()
            with log_activity(
                telemetry_logger,
                activity_name,
                activity_type=ActivityType.PUBLICAPI,
                custom_dimensions=custom_dimensions,
            ):
                return func(*args, **kwargs)
        except Exception as e:
            if is_format_exception():
                # When the flag format_exception is set in command,
                # it will write a json with exception info and command to stderr.
                error_msg = ExceptionPresenter.create(e).to_dict(include_debug_info=True)
                error_msg["command"] = " ".join(sys.argv)
                sys.stderr.write(json.dumps(error_msg))
            if isinstance(e, PromptflowException):
                print_red_error(f"{activity_name} failed with {e.__class__.__name__}: {str(e)}")
>               sys.exit(1)
E               SystemExit: 1

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_utils.py:292: SystemExit

Check warning on line 0 in tests.sdk_cli_test.e2etests.test_cli.TestCli

See this annotation in the file changed.

@github-actions github-actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++)

All 4 runs failed: test_init_chat_flow (tests.sdk_cli_test.e2etests.test_cli.TestCli)

artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
SystemExit: 1
args = (Namespace(version=False, action='flow', flow='chat_flow', node=None, variant=None, interactive=False, input=None, inp... skip_open_browser=False, init=None, url_params=None, verbose=False, debug=False, user_agent=None, sub_action='test'),)
kwargs = {}, telemetry_logger = <Logger promptflow._sdk._telemetry (INFO)>

    @wraps(func)
    def wrapper(*args, **kwargs):
        try:
            telemetry_logger = get_telemetry_logger()
            with log_activity(
                telemetry_logger,
                activity_name,
                activity_type=ActivityType.PUBLICAPI,
                custom_dimensions=custom_dimensions,
            ):
>               return func(*args, **kwargs)

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_utils.py:282: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:88: in run_command
    raise ex
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:62: in run_command
    dispatch_flow_commands(args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:78: in dispatch_flow_commands
    test_flow(args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:464: in test_flow
    _test_flow_standard(args, pf_client, inputs, environment_variables)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:546: in _test_flow_standard
    result = pf_client.flows.test(
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_telemetry/activity.py:265: in wrapper
    return f(self, *args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/operations/_flow_operations.py:164: in test
    TestSubmitter._raise_error_when_test_failed(result, show_trace=node is not None)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

test_result = LineResult(output={}, aggregation_inputs={}, run_info=FlowRunInfo(run_id='9d7ce4a4-608f-4293-b0ca-ad21af9f3ba5_0', sta...[], "_func": "Completions.create"}\n\n'}, system_metrics={'duration': 0.011741}, result=None, message_format='basic')})
show_trace = False

    @staticmethod
    def _raise_error_when_test_failed(test_result, show_trace=False):
        from promptflow.executor._result import LineResult
    
        test_status = test_result.run_info.status if isinstance(test_result, LineResult) else test_result.status
    
        if test_status == Status.Failed:
            error_dict = test_result.run_info.error if isinstance(test_result, LineResult) else test_result.error
            error_response = ErrorResponse.from_error_dict(error_dict)
            user_execution_error = error_response.get_user_execution_error_info()
            error_message = error_response.message
            stack_trace = user_execution_error.get("traceback", "")
            error_type = user_execution_error.get("type", "Exception")
            if show_trace:
                print(stack_trace)
>           raise UserErrorException(f"{error_type}: {error_message}", error=stack_trace)
E           promptflow.exceptions.UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.
E           values: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "hi"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": false, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_orchestrator/test_submitter.py:618: UserErrorException

During handling of the above exception, another exception occurred:

self = <sdk_cli_test.e2etests.test_cli.TestCli object at 0x7fe28b92b250>

    def test_init_chat_flow(self):
        temp_dir = mkdtemp()
        with _change_working_dir(temp_dir):
            flow_name = "chat_flow"
            # Init standard flow
            run_pf_command(
                "flow",
                "init",
                "--flow",
                flow_name,
                "--type",
                "chat",
            )
            ignore_file_path = Path(temp_dir) / flow_name / ".gitignore"
            assert ignore_file_path.exists()
            ignore_file_path.unlink()
    
            # Only azure openai connection in test env
            with open(Path(temp_dir) / flow_name / "flow.dag.yaml", "r") as f:
                flow_dict = load_yaml(f)
            flow_dict["nodes"][0]["provider"] = "AzureOpenAI"
            flow_dict["nodes"][0]["connection"] = "azure_open_ai_connection"
            with open(Path(temp_dir) / flow_name / "flow.dag.yaml", "w") as f:
                dump_yaml(flow_dict, f)
    
>           run_pf_command("flow", "test", "--flow", flow_name, "--inputs", "question=hi")

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:774: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:61: in run_pf_command
    main()
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:156: in main
    entry(command_args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:137: in entry
    cli_exception_and_telemetry_handler(run_command, activity_name)(args)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

args = (Namespace(version=False, action='flow', flow='chat_flow', node=None, variant=None, interactive=False, input=None, inp... skip_open_browser=False, init=None, url_params=None, verbose=False, debug=False, user_agent=None, sub_action='test'),)
kwargs = {}, telemetry_logger = <Logger promptflow._sdk._telemetry (INFO)>

    @wraps(func)
    def wrapper(*args, **kwargs):
        try:
            telemetry_logger = get_telemetry_logger()
            with log_activity(
                telemetry_logger,
                activity_name,
                activity_type=ActivityType.PUBLICAPI,
                custom_dimensions=custom_dimensions,
            ):
                return func(*args, **kwargs)
        except Exception as e:
            if is_format_exception():
                # When the flag format_exception is set in command,
                # it will write a json with exception info and command to stderr.
                error_msg = ExceptionPresenter.create(e).to_dict(include_debug_info=True)
                error_msg["command"] = " ".join(sys.argv)
                sys.stderr.write(json.dumps(error_msg))
            if isinstance(e, PromptflowException):
                print_red_error(f"{activity_name} failed with {e.__class__.__name__}: {str(e)}")
>               sys.exit(1)
E               SystemExit: 1

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_utils.py:292: SystemExit

Check warning on line 0 in tests.sdk_cli_test.e2etests.test_cli.TestCli

See this annotation in the file changed.

@github-actions github-actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++)

All 4 runs failed: test_flow_chat (tests.sdk_cli_test.e2etests.test_cli.TestCli)

artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
SystemExit: 1
args = (Namespace(version=False, action='flow', flow='/home/runner/work/promptflow/promptflow/src/promptflow/tests/test_confi... skip_open_browser=False, init=None, url_params=None, verbose=False, debug=False, user_agent=None, sub_action='test'),)
kwargs = {}, telemetry_logger = <Logger promptflow._sdk._telemetry (INFO)>

    @wraps(func)
    def wrapper(*args, **kwargs):
        try:
            telemetry_logger = get_telemetry_logger()
            with log_activity(
                telemetry_logger,
                activity_name,
                activity_type=ActivityType.PUBLICAPI,
                custom_dimensions=custom_dimensions,
            ):
>               return func(*args, **kwargs)

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_utils.py:282: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:88: in run_command
    raise ex
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:62: in run_command
    dispatch_flow_commands(args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:78: in dispatch_flow_commands
    test_flow(args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:462: in test_flow
    _test_flow_interactive(args, pf_client, inputs, environment_variables)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:534: in _test_flow_interactive
    pf_client.flows._chat(
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_telemetry/activity.py:265: in wrapper
    return f(self, *args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/operations/_flow_operations.py:360: in _chat
    submitter._chat_flow(
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_orchestrator/test_submitter.py:567: in _chat_flow
    self._raise_error_when_test_failed(flow_result, show_trace=True)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

test_result = LineResult(output={}, aggregation_inputs={}, run_info=FlowRunInfo(run_id='06855f86-7de7-4529-a269-43cb9f7b7b15_0', sta...[], "_func": "Completions.create"}\n\n'}, system_metrics={'duration': 0.011302}, result=None, message_format='basic')})
show_trace = True

    @staticmethod
    def _raise_error_when_test_failed(test_result, show_trace=False):
        from promptflow.executor._result import LineResult
    
        test_status = test_result.run_info.status if isinstance(test_result, LineResult) else test_result.status
    
        if test_status == Status.Failed:
            error_dict = test_result.run_info.error if isinstance(test_result, LineResult) else test_result.error
            error_response = ErrorResponse.from_error_dict(error_dict)
            user_execution_error = error_response.get_user_execution_error_info()
            error_message = error_response.message
            stack_trace = user_execution_error.get("traceback", "")
            error_type = user_execution_error.get("type", "Exception")
            if show_trace:
                print(stack_trace)
>           raise UserErrorException(f"{error_type}: {error_message}", error=stack_trace)
E           promptflow.exceptions.UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.
E           values: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "what is chat gpt?"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": false, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_orchestrator/test_submitter.py:618: UserErrorException

During handling of the above exception, another exception occurred:

self = <sdk_cli_test.e2etests.test_cli.TestCli object at 0x7fe28b9d4f50>
monkeypatch = <_pytest.monkeypatch.MonkeyPatch object at 0x7fe298915550>
capsys = <_pytest.capture.CaptureFixture object at 0x7fe28630b5d0>

    def test_flow_chat(self, monkeypatch, capsys):
        chat_list = ["hi", "what is chat gpt?"]
    
        def mock_input(*args, **kwargs):
            if chat_list:
                return chat_list.pop()
            else:
                raise KeyboardInterrupt()
    
        monkeypatch.setattr("builtins.input", mock_input)
>       run_pf_command(
            "flow",
            "test",
            "--flow",
            f"{FLOWS_DIR}/chat_flow",
            "--interactive",
        )

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:980: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:61: in run_pf_command
    main()
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:156: in main
    entry(command_args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:137: in entry
    cli_exception_and_telemetry_handler(run_command, activity_name)(args)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

args = (Namespace(version=False, action='flow', flow='/home/runner/work/promptflow/promptflow/src/promptflow/tests/test_confi... skip_open_browser=False, init=None, url_params=None, verbose=False, debug=False, user_agent=None, sub_action='test'),)
kwargs = {}, telemetry_logger = <Logger promptflow._sdk._telemetry (INFO)>

    @wraps(func)
    def wrapper(*args, **kwargs):
        try:
            telemetry_logger = get_telemetry_logger()
            with log_activity(
                telemetry_logger,
                activity_name,
                activity_type=ActivityType.PUBLICAPI,
                custom_dimensions=custom_dimensions,
            ):
                return func(*args, **kwargs)
        except Exception as e:
            if is_format_exception():
                # When the flag format_exception is set in command,
                # it will write a json with exception info and command to stderr.
                error_msg = ExceptionPresenter.create(e).to_dict(include_debug_info=True)
                error_msg["command"] = " ".join(sys.argv)
                sys.stderr.write(json.dumps(error_msg))
            if isinstance(e, PromptflowException):
                print_red_error(f"{activity_name} failed with {e.__class__.__name__}: {str(e)}")
>               sys.exit(1)
E               SystemExit: 1

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_utils.py:292: SystemExit

Check warning on line 0 in tests.sdk_cli_test.e2etests.test_cli.TestCli

See this annotation in the file changed.

@github-actions github-actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++)

All 4 runs failed: test_invalid_chat_flow (tests.sdk_cli_test.e2etests.test_cli.TestCli)

artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert "Execution failure in 'show_answer': (Exception) mock exception" in 'Prompt flow service has started...\n===============================================\nWelcome to chat flow, chat_flow_with_exception.\nPress Enter to send your message.\nYou can quit with ctrl+C.\n===============================================\nUser: \npf.flow.test failed with UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.\nvalues: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "what is chat gpt?"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": false, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}\n\n'
 +  where 'Prompt flow service has started...\n===============================================\nWelcome to chat flow, chat_flow_with_exception.\nPress Enter to send your message.\nYou can quit with ctrl+C.\n===============================================\nUser: \npf.flow.test failed with UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.\nvalues: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "what is chat gpt?"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": false, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}\n\n' = CaptureResult(out='Prompt flow service has started...\n===============================================\nWelcome to chat flow, chat_flow_with_exception.\nPress Enter to send your message.\nYou can quit with ctrl+C.\n===============================================\nUser: \npf.flow.test failed with UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.\nvalues: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "what is chat gpt?"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": false, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}\n\n', err='').out
self = <sdk_cli_test.e2etests.test_cli.TestCli object at 0x7fe28b9d49d0>
monkeypatch = <_pytest.monkeypatch.MonkeyPatch object at 0x7fe2882a6ad0>
capsys = <_pytest.capture.CaptureFixture object at 0x7fe2882a4090>

    def test_invalid_chat_flow(self, monkeypatch, capsys):
        def mock_input(*args, **kwargs):
            if chat_list:
                return chat_list.pop()
            else:
                raise KeyboardInterrupt()
    
        monkeypatch.setattr("builtins.input", mock_input)
    
        chat_list = ["hi", "what is chat gpt?"]
        with pytest.raises(SystemExit):
            run_pf_command(
                "flow",
                "test",
                "--flow",
                f"{FLOWS_DIR}/chat_flow_with_exception",
                "--interactive",
            )
        outerr = capsys.readouterr()
>       assert "Execution failure in 'show_answer': (Exception) mock exception" in outerr.out
E       assert "Execution failure in 'show_answer': (Exception) mock exception" in 'Prompt flow service has started...\n===============================================\nWelcome to chat flow, chat_flow_with_exception.\nPress Enter to send your message.\nYou can quit with ctrl+C.\n===============================================\nUser: \npf.flow.test failed with UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.\nvalues: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "what is chat gpt?"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": false, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}\n\n'
E        +  where 'Prompt flow service has started...\n===============================================\nWelcome to chat flow, chat_flow_with_exception.\nPress Enter to send your message.\nYou can quit with ctrl+C.\n===============================================\nUser: \npf.flow.test failed with UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.\nvalues: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "what is chat gpt?"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": false, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}\n\n' = CaptureResult(out='Prompt flow service has started...\n===============================================\nWelcome to chat flow, chat_flow_with_exception.\nPress Enter to send your message.\nYou can quit with ctrl+C.\n===============================================\nUser: \npf.flow.test failed with UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.\nvalues: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "what is chat gpt?"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": false, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}\n\n', err='').out

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:1037: AssertionError

Check warning on line 0 in tests.sdk_cli_test.e2etests.test_cli.TestCli

See this annotation in the file changed.

@github-actions github-actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++)

All 4 runs failed: test_chat_with_stream_output (tests.sdk_cli_test.e2etests.test_cli.TestCli)

artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
SystemExit: 1
args = (Namespace(version=False, action='flow', flow='/home/runner/work/promptflow/promptflow/src/promptflow/tests/test_confi... skip_open_browser=False, init=None, url_params=None, verbose=False, debug=False, user_agent=None, sub_action='test'),)
kwargs = {}, telemetry_logger = <Logger promptflow._sdk._telemetry (INFO)>

    @wraps(func)
    def wrapper(*args, **kwargs):
        try:
            telemetry_logger = get_telemetry_logger()
            with log_activity(
                telemetry_logger,
                activity_name,
                activity_type=ActivityType.PUBLICAPI,
                custom_dimensions=custom_dimensions,
            ):
>               return func(*args, **kwargs)

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_utils.py:282: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:88: in run_command
    raise ex
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:62: in run_command
    dispatch_flow_commands(args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:78: in dispatch_flow_commands
    test_flow(args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:462: in test_flow
    _test_flow_interactive(args, pf_client, inputs, environment_variables)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:534: in _test_flow_interactive
    pf_client.flows._chat(
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_telemetry/activity.py:265: in wrapper
    return f(self, *args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/operations/_flow_operations.py:360: in _chat
    submitter._chat_flow(
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_orchestrator/test_submitter.py:567: in _chat_flow
    self._raise_error_when_test_failed(flow_result, show_trace=True)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

test_result = LineResult(output={}, aggregation_inputs={}, run_info=FlowRunInfo(run_id='20b4df2c-4649-4b19-a9e6-7fa2619d9a2d_0', sta...[], "_func": "Completions.create"}\n\n'}, system_metrics={'duration': 0.011777}, result=None, message_format='basic')})
show_trace = True

    @staticmethod
    def _raise_error_when_test_failed(test_result, show_trace=False):
        from promptflow.executor._result import LineResult
    
        test_status = test_result.run_info.status if isinstance(test_result, LineResult) else test_result.status
    
        if test_status == Status.Failed:
            error_dict = test_result.run_info.error if isinstance(test_result, LineResult) else test_result.error
            error_response = ErrorResponse.from_error_dict(error_dict)
            user_execution_error = error_response.get_user_execution_error_info()
            error_message = error_response.message
            stack_trace = user_execution_error.get("traceback", "")
            error_type = user_execution_error.get("type", "Exception")
            if show_trace:
                print(stack_trace)
>           raise UserErrorException(f"{error_type}: {error_message}", error=stack_trace)
E           promptflow.exceptions.UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.
E           values: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "what is chat gpt?"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": true, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_orchestrator/test_submitter.py:618: UserErrorException

During handling of the above exception, another exception occurred:

self = <sdk_cli_test.e2etests.test_cli.TestCli object at 0x7fe28b9d5d50>
monkeypatch = <_pytest.monkeypatch.MonkeyPatch object at 0x7fe286347410>
capsys = <_pytest.capture.CaptureFixture object at 0x7fe286344250>

    def test_chat_with_stream_output(self, monkeypatch, capsys):
        chat_list = ["hi", "what is chat gpt?"]
    
        def mock_input(*args, **kwargs):
            if chat_list:
                return chat_list.pop()
            else:
                raise KeyboardInterrupt()
    
        monkeypatch.setattr("builtins.input", mock_input)
    
        # Test streaming output
        chat_list = ["hi", "what is chat gpt?"]
>       run_pf_command(
            "flow",
            "test",
            "--flow",
            f"{FLOWS_DIR}/chat_flow_with_stream_output",
            "--interactive",
        )

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:1089: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:61: in run_pf_command
    main()
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:156: in main
    entry(command_args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:137: in entry
    cli_exception_and_telemetry_handler(run_command, activity_name)(args)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

args = (Namespace(version=False, action='flow', flow='/home/runner/work/promptflow/promptflow/src/promptflow/tests/test_confi... skip_open_browser=False, init=None, url_params=None, verbose=False, debug=False, user_agent=None, sub_action='test'),)
kwargs = {}, telemetry_logger = <Logger promptflow._sdk._telemetry (INFO)>

    @wraps(func)
    def wrapper(*args, **kwargs):
        try:
            telemetry_logger = get_telemetry_logger()
            with log_activity(
                telemetry_logger,
                activity_name,
                activity_type=ActivityType.PUBLICAPI,
                custom_dimensions=custom_dimensions,
            ):
                return func(*args, **kwargs)
        except Exception as e:
            if is_format_exception():
                # When the flag format_exception is set in command,
                # it will write a json with exception info and command to stderr.
                error_msg = ExceptionPresenter.create(e).to_dict(include_debug_info=True)
                error_msg["command"] = " ".join(sys.argv)
                sys.stderr.write(json.dumps(error_msg))
            if isinstance(e, PromptflowException):
                print_red_error(f"{activity_name} failed with {e.__class__.__name__}: {str(e)}")
>               sys.exit(1)
E               SystemExit: 1

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_utils.py:292: SystemExit

Check warning on line 0 in tests.sdk_cli_test.e2etests.test_cli.TestCli

See this annotation in the file changed.

@github-actions github-actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++)

All 4 runs failed: test_flow_test_with_default_chat_history (tests.sdk_cli_test.e2etests.test_cli.TestCli)

artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
SystemExit: 1
args = (Namespace(version=False, action='flow', flow='/home/runner/work/promptflow/promptflow/src/promptflow/tests/test_confi... skip_open_browser=False, init=None, url_params=None, verbose=False, debug=False, user_agent=None, sub_action='test'),)
kwargs = {}, telemetry_logger = <Logger promptflow._sdk._telemetry (INFO)>

    @wraps(func)
    def wrapper(*args, **kwargs):
        try:
            telemetry_logger = get_telemetry_logger()
            with log_activity(
                telemetry_logger,
                activity_name,
                activity_type=ActivityType.PUBLICAPI,
                custom_dimensions=custom_dimensions,
            ):
>               return func(*args, **kwargs)

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_utils.py:282: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:88: in run_command
    raise ex
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:62: in run_command
    dispatch_flow_commands(args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:78: in dispatch_flow_commands
    test_flow(args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:464: in test_flow
    _test_flow_standard(args, pf_client, inputs, environment_variables)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:546: in _test_flow_standard
    result = pf_client.flows.test(
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_telemetry/activity.py:265: in wrapper
    return f(self, *args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/operations/_flow_operations.py:164: in test
    TestSubmitter._raise_error_when_test_failed(result, show_trace=node is not None)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

test_result = LineResult(output={}, aggregation_inputs={}, run_info=FlowRunInfo(run_id='869654b2-9302-40d0-b269-e226f82981a4_0', sta...[], "_func": "Completions.create"}\n\n'}, system_metrics={'duration': 0.013883}, result=None, message_format='basic')})
show_trace = False

    @staticmethod
    def _raise_error_when_test_failed(test_result, show_trace=False):
        from promptflow.executor._result import LineResult
    
        test_status = test_result.run_info.status if isinstance(test_result, LineResult) else test_result.status
    
        if test_status == Status.Failed:
            error_dict = test_result.run_info.error if isinstance(test_result, LineResult) else test_result.error
            error_response = ErrorResponse.from_error_dict(error_dict)
            user_execution_error = error_response.get_user_execution_error_info()
            error_message = error_response.message
            stack_trace = user_execution_error.get("traceback", "")
            error_type = user_execution_error.get("type", "Exception")
            if show_trace:
                print(stack_trace)
>           raise UserErrorException(f"{error_type}: {error_message}", error=stack_trace)
E           promptflow.exceptions.UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.
E           values: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "hi"}, {"role": "assistant", "content": "hi"}, {"role": "user", "content": "who are you"}, {"role": "assistant", "content": "who are you"}, {"role": "user", "content": "What is ChatGPT?"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": false, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_orchestrator/test_submitter.py:618: UserErrorException

During handling of the above exception, another exception occurred:

self = <sdk_cli_test.e2etests.test_cli.TestCli object at 0x7fe28b928c10>

    def test_flow_test_with_default_chat_history(self):
>       run_pf_command(
            "flow",
            "test",
            "--flow",
            f"{FLOWS_DIR}/chat_flow_with_default_history",
        )

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:1123: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:61: in run_pf_command
    main()
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:156: in main
    entry(command_args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:137: in entry
    cli_exception_and_telemetry_handler(run_command, activity_name)(args)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

args = (Namespace(version=False, action='flow', flow='/home/runner/work/promptflow/promptflow/src/promptflow/tests/test_confi... skip_open_browser=False, init=None, url_params=None, verbose=False, debug=False, user_agent=None, sub_action='test'),)
kwargs = {}, telemetry_logger = <Logger promptflow._sdk._telemetry (INFO)>

    @wraps(func)
    def wrapper(*args, **kwargs):
        try:
            telemetry_logger = get_telemetry_logger()
            with log_activity(
                telemetry_logger,
                activity_name,
                activity_type=ActivityType.PUBLICAPI,
                custom_dimensions=custom_dimensions,
            ):
                return func(*args, **kwargs)
        except Exception as e:
            if is_format_exception():
                # When the flag format_exception is set in command,
                # it will write a json with exception info and command to stderr.
                error_msg = ExceptionPresenter.create(e).to_dict(include_debug_info=True)
                error_msg["command"] = " ".join(sys.argv)
                sys.stderr.write(json.dumps(error_msg))
            if isinstance(e, PromptflowException):
                print_red_error(f"{activity_name} failed with {e.__class__.__name__}: {str(e)}")
>               sys.exit(1)
E               SystemExit: 1

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_utils.py:292: SystemExit

Check warning on line 0 in tests.sdk_cli_test.e2etests.test_cli.TestCli

See this annotation in the file changed.

@github-actions github-actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++)

All 4 runs failed: test_flow_test_with_user_defined_chat_history (tests.sdk_cli_test.e2etests.test_cli.TestCli)

artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
SystemExit: 1
args = (Namespace(version=False, action='flow', flow='/home/runner/work/promptflow/promptflow/src/promptflow/tests/test_confi... skip_open_browser=False, init=None, url_params=None, verbose=False, debug=False, user_agent=None, sub_action='test'),)
kwargs = {}, telemetry_logger = <Logger promptflow._sdk._telemetry (INFO)>

    @wraps(func)
    def wrapper(*args, **kwargs):
        try:
            telemetry_logger = get_telemetry_logger()
            with log_activity(
                telemetry_logger,
                activity_name,
                activity_type=ActivityType.PUBLICAPI,
                custom_dimensions=custom_dimensions,
            ):
>               return func(*args, **kwargs)

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_utils.py:282: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:88: in run_command
    raise ex
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:62: in run_command
    dispatch_flow_commands(args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:78: in dispatch_flow_commands
    test_flow(args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:462: in test_flow
    _test_flow_interactive(args, pf_client, inputs, environment_variables)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:534: in _test_flow_interactive
    pf_client.flows._chat(
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_telemetry/activity.py:265: in wrapper
    return f(self, *args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/operations/_flow_operations.py:360: in _chat
    submitter._chat_flow(
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_orchestrator/test_submitter.py:567: in _chat_flow
    self._raise_error_when_test_failed(flow_result, show_trace=True)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

test_result = LineResult(output={}, aggregation_inputs={}, run_info=FlowRunInfo(run_id='c7cad7c5-79f5-4a64-8952-bf003695bd88_0', sta...[], "_func": "Completions.create"}\n\n'}, system_metrics={'duration': 0.011753}, result=None, message_format='basic')})
show_trace = True

    @staticmethod
    def _raise_error_when_test_failed(test_result, show_trace=False):
        from promptflow.executor._result import LineResult
    
        test_status = test_result.run_info.status if isinstance(test_result, LineResult) else test_result.status
    
        if test_status == Status.Failed:
            error_dict = test_result.run_info.error if isinstance(test_result, LineResult) else test_result.error
            error_response = ErrorResponse.from_error_dict(error_dict)
            user_execution_error = error_response.get_user_execution_error_info()
            error_message = error_response.message
            stack_trace = user_execution_error.get("traceback", "")
            error_type = user_execution_error.get("type", "Exception")
            if show_trace:
                print(stack_trace)
>           raise UserErrorException(f"{error_type}: {error_message}", error=stack_trace)
E           promptflow.exceptions.UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.
E           values: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "what is chat gpt?"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": false, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_orchestrator/test_submitter.py:618: UserErrorException

During handling of the above exception, another exception occurred:

self = <sdk_cli_test.e2etests.test_cli.TestCli object at 0x7fe28b9d5490>
monkeypatch = <_pytest.monkeypatch.MonkeyPatch object at 0x7fe26d40bcd0>
capsys = <_pytest.capture.CaptureFixture object at 0x7fe26d40aa90>

    def test_flow_test_with_user_defined_chat_history(self, monkeypatch, capsys):
        chat_list = ["hi", "what is chat gpt?"]
    
        def mock_input(*args, **kwargs):
            if chat_list:
                return chat_list.pop()
            else:
                raise KeyboardInterrupt()
    
        monkeypatch.setattr("builtins.input", mock_input)
>       run_pf_command(
            "flow",
            "test",
            "--flow",
            f"{FLOWS_DIR}/chat_flow_with_defined_chat_history",
            "--interactive",
        )

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:1151: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:61: in run_pf_command
    main()
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:156: in main
    entry(command_args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:137: in entry
    cli_exception_and_telemetry_handler(run_command, activity_name)(args)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

args = (Namespace(version=False, action='flow', flow='/home/runner/work/promptflow/promptflow/src/promptflow/tests/test_confi... skip_open_browser=False, init=None, url_params=None, verbose=False, debug=False, user_agent=None, sub_action='test'),)
kwargs = {}, telemetry_logger = <Logger promptflow._sdk._telemetry (INFO)>

    @wraps(func)
    def wrapper(*args, **kwargs):
        try:
            telemetry_logger = get_telemetry_logger()
            with log_activity(
                telemetry_logger,
                activity_name,
                activity_type=ActivityType.PUBLICAPI,
                custom_dimensions=custom_dimensions,
            ):
                return func(*args, **kwargs)
        except Exception as e:
            if is_format_exception():
                # When the flag format_exception is set in command,
                # it will write a json with exception info and command to stderr.
                error_msg = ExceptionPresenter.create(e).to_dict(include_debug_info=True)
                error_msg["command"] = " ".join(sys.argv)
                sys.stderr.write(json.dumps(error_msg))
            if isinstance(e, PromptflowException):
                print_red_error(f"{activity_name} failed with {e.__class__.__name__}: {str(e)}")
>               sys.exit(1)
E               SystemExit: 1

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_utils.py:292: SystemExit

Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_test.TestFlowTest

See this annotation in the file changed.

@github-actions github-actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++)

All 4 runs failed: test_pf_test_with_streaming_output (tests.sdk_cli_test.e2etests.test_flow_test.TestFlowTest)

artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
promptflow.exceptions.UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.
values: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What is ChatGPT?"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": true, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}
self = <sdk_cli_test.e2etests.test_flow_test.TestFlowTest object at 0x7fe27296d190>

    def test_pf_test_with_streaming_output(self):
        flow_path = Path(f"{FLOWS_DIR}/chat_flow_with_stream_output")
>       result = _client.test(flow=flow_path)

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_test.py:115: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_pf_client.py:472: in test
    return self.flows.test(
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_telemetry/activity.py:265: in wrapper
    return f(self, *args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/operations/_flow_operations.py:164: in test
    TestSubmitter._raise_error_when_test_failed(result, show_trace=node is not None)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

test_result = LineResult(output={}, aggregation_inputs={}, run_info=FlowRunInfo(run_id='410c9905-df07-4d15-867e-397bc72a8bde_0', sta...[], "_func": "Completions.create"}\n\n'}, system_metrics={'duration': 0.012107}, result=None, message_format='basic')})
show_trace = False

    @staticmethod
    def _raise_error_when_test_failed(test_result, show_trace=False):
        from promptflow.executor._result import LineResult
    
        test_status = test_result.run_info.status if isinstance(test_result, LineResult) else test_result.status
    
        if test_status == Status.Failed:
            error_dict = test_result.run_info.error if isinstance(test_result, LineResult) else test_result.error
            error_response = ErrorResponse.from_error_dict(error_dict)
            user_execution_error = error_response.get_user_execution_error_info()
            error_message = error_response.message
            stack_trace = user_execution_error.get("traceback", "")
            error_type = user_execution_error.get("type", "Exception")
            if show_trace:
                print(stack_trace)
>           raise UserErrorException(f"{error_type}: {error_message}", error=stack_trace)
E           promptflow.exceptions.UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.
E           values: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What is ChatGPT?"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": true, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_orchestrator/test_submitter.py:618: UserErrorException

Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_test.TestFlowTest

See this annotation in the file changed.

@github-actions github-actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++)

All 4 runs failed: test_stream_output_with_builtin_llm (tests.sdk_cli_test.e2etests.test_flow_test.TestFlowTest)

artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
AssertionError: {'additionalInfo': [{'info': {'message': 'OpenAI API hits exception: RecordItemMissingException: Record item not found...ll last):
  ', 'type': 'ScriptExecutionError'}, 'innerError': {'code': 'ScriptExecutionError', 'innerError': None}, ...}
assert 'Failed' == 'Completed'
  
  - Completed
  + Failed
self = <sdk_cli_test.e2etests.test_flow_test.TestFlowTest object at 0x7fe272994590>

    def test_stream_output_with_builtin_llm(self):
        flow_path = Path(f"{EAGER_FLOWS_DIR}/builtin_llm/").absolute()
        result = _client._flows._test(
            flow=flow_path,
            inputs={"stream": True},
            environment_variables={
                "OPENAI_API_KEY": "${azure_open_ai_connection.api_key}",
                "AZURE_OPENAI_ENDPOINT": "${azure_open_ai_connection.api_base}",
            },
        )
>       assert result.run_info.status.value == "Completed", result.run_info.error
E       AssertionError: {'additionalInfo': [{'info': {'message': 'OpenAI API hits exception: RecordItemMissingException: Record item not found...ll last):
E         ', 'type': 'ScriptExecutionError'}, 'innerError': {'code': 'ScriptExecutionError', 'innerError': None}, ...}
E       assert 'Failed' == 'Completed'
E         
E         - Completed
E         + Failed

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_test.py:435: AssertionError

Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve

See this annotation in the file changed.

@github-actions github-actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++)

All 4 runs failed: test_stream_llm_chat[text/event-stream-200-text/event-stream; charset=utf-8] (tests.sdk_cli_test.e2etests.test_flow_serve)

artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 200
 +  where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code
serving_client_llm_chat = <FlaskClient <PromptflowServingApp 'promptflow.core._serving.app'>>
accept = 'text/event-stream', expected_status_code = 200
expected_content_type = 'text/event-stream; charset=utf-8'

    @pytest.mark.usefixtures("recording_injection", "setup_local_connection")
    @pytest.mark.e2etest
    @pytest.mark.parametrize(
        "accept, expected_status_code, expected_content_type",
        [
            ("text/event-stream", 200, "text/event-stream; charset=utf-8"),
            ("text/html", 406, "application/json"),
            ("application/json", 200, "application/json"),
            ("*/*", 200, "application/json"),
            ("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
            ("application/json, */*", 200, "application/json"),
            ("", 200, "application/json"),
        ],
    )
    def test_stream_llm_chat(
        serving_client_llm_chat,
        accept,
        expected_status_code,
        expected_content_type,
    ):
        payload = {
            "question": "What is the capital of France?",
            "chat_history": [],
        }
        headers = {
            "Content-Type": "application/json",
            "Accept": accept,
        }
        response = serving_client_llm_chat.post("/score", json=payload, headers=headers)
>       assert response.status_code == expected_status_code
E       assert 400 == 200
E        +  where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve.py:259: AssertionError

Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve

See this annotation in the file changed.

@github-actions github-actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++)

All 4 runs failed: test_stream_llm_chat[text/html-406-application/json] (tests.sdk_cli_test.e2etests.test_flow_serve)

artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 406
 +  where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code
serving_client_llm_chat = <FlaskClient <PromptflowServingApp 'promptflow.core._serving.app'>>
accept = 'text/html', expected_status_code = 406
expected_content_type = 'application/json'

    @pytest.mark.usefixtures("recording_injection", "setup_local_connection")
    @pytest.mark.e2etest
    @pytest.mark.parametrize(
        "accept, expected_status_code, expected_content_type",
        [
            ("text/event-stream", 200, "text/event-stream; charset=utf-8"),
            ("text/html", 406, "application/json"),
            ("application/json", 200, "application/json"),
            ("*/*", 200, "application/json"),
            ("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
            ("application/json, */*", 200, "application/json"),
            ("", 200, "application/json"),
        ],
    )
    def test_stream_llm_chat(
        serving_client_llm_chat,
        accept,
        expected_status_code,
        expected_content_type,
    ):
        payload = {
            "question": "What is the capital of France?",
            "chat_history": [],
        }
        headers = {
            "Content-Type": "application/json",
            "Accept": accept,
        }
        response = serving_client_llm_chat.post("/score", json=payload, headers=headers)
>       assert response.status_code == expected_status_code
E       assert 400 == 406
E        +  where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve.py:259: AssertionError

Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve

See this annotation in the file changed.

@github-actions github-actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++)

All 4 runs failed: test_stream_llm_chat[application/json-200-application/json] (tests.sdk_cli_test.e2etests.test_flow_serve)

artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 200
 +  where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code
serving_client_llm_chat = <FlaskClient <PromptflowServingApp 'promptflow.core._serving.app'>>
accept = 'application/json', expected_status_code = 200
expected_content_type = 'application/json'

    @pytest.mark.usefixtures("recording_injection", "setup_local_connection")
    @pytest.mark.e2etest
    @pytest.mark.parametrize(
        "accept, expected_status_code, expected_content_type",
        [
            ("text/event-stream", 200, "text/event-stream; charset=utf-8"),
            ("text/html", 406, "application/json"),
            ("application/json", 200, "application/json"),
            ("*/*", 200, "application/json"),
            ("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
            ("application/json, */*", 200, "application/json"),
            ("", 200, "application/json"),
        ],
    )
    def test_stream_llm_chat(
        serving_client_llm_chat,
        accept,
        expected_status_code,
        expected_content_type,
    ):
        payload = {
            "question": "What is the capital of France?",
            "chat_history": [],
        }
        headers = {
            "Content-Type": "application/json",
            "Accept": accept,
        }
        response = serving_client_llm_chat.post("/score", json=payload, headers=headers)
>       assert response.status_code == expected_status_code
E       assert 400 == 200
E        +  where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve.py:259: AssertionError

Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve

See this annotation in the file changed.

@github-actions github-actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++)

All 4 runs failed: test_stream_llm_chat[*/*-200-application/json] (tests.sdk_cli_test.e2etests.test_flow_serve)

artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 200
 +  where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code
serving_client_llm_chat = <FlaskClient <PromptflowServingApp 'promptflow.core._serving.app'>>
accept = '*/*', expected_status_code = 200
expected_content_type = 'application/json'

    @pytest.mark.usefixtures("recording_injection", "setup_local_connection")
    @pytest.mark.e2etest
    @pytest.mark.parametrize(
        "accept, expected_status_code, expected_content_type",
        [
            ("text/event-stream", 200, "text/event-stream; charset=utf-8"),
            ("text/html", 406, "application/json"),
            ("application/json", 200, "application/json"),
            ("*/*", 200, "application/json"),
            ("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
            ("application/json, */*", 200, "application/json"),
            ("", 200, "application/json"),
        ],
    )
    def test_stream_llm_chat(
        serving_client_llm_chat,
        accept,
        expected_status_code,
        expected_content_type,
    ):
        payload = {
            "question": "What is the capital of France?",
            "chat_history": [],
        }
        headers = {
            "Content-Type": "application/json",
            "Accept": accept,
        }
        response = serving_client_llm_chat.post("/score", json=payload, headers=headers)
>       assert response.status_code == expected_status_code
E       assert 400 == 200
E        +  where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve.py:259: AssertionError

Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve

See this annotation in the file changed.

@github-actions github-actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++)

All 4 runs failed: test_stream_llm_chat[text/event-stream, application/json-200-text/event-stream; charset=utf-8] (tests.sdk_cli_test.e2etests.test_flow_serve)

artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 200
 +  where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code
serving_client_llm_chat = <FlaskClient <PromptflowServingApp 'promptflow.core._serving.app'>>
accept = 'text/event-stream, application/json', expected_status_code = 200
expected_content_type = 'text/event-stream; charset=utf-8'

    @pytest.mark.usefixtures("recording_injection", "setup_local_connection")
    @pytest.mark.e2etest
    @pytest.mark.parametrize(
        "accept, expected_status_code, expected_content_type",
        [
            ("text/event-stream", 200, "text/event-stream; charset=utf-8"),
            ("text/html", 406, "application/json"),
            ("application/json", 200, "application/json"),
            ("*/*", 200, "application/json"),
            ("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
            ("application/json, */*", 200, "application/json"),
            ("", 200, "application/json"),
        ],
    )
    def test_stream_llm_chat(
        serving_client_llm_chat,
        accept,
        expected_status_code,
        expected_content_type,
    ):
        payload = {
            "question": "What is the capital of France?",
            "chat_history": [],
        }
        headers = {
            "Content-Type": "application/json",
            "Accept": accept,
        }
        response = serving_client_llm_chat.post("/score", json=payload, headers=headers)
>       assert response.status_code == expected_status_code
E       assert 400 == 200
E        +  where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve.py:259: AssertionError

Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve

See this annotation in the file changed.

@github-actions github-actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++)

All 4 runs failed: test_stream_llm_chat[application/json, */*-200-application/json] (tests.sdk_cli_test.e2etests.test_flow_serve)

artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 200
 +  where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code
serving_client_llm_chat = <FlaskClient <PromptflowServingApp 'promptflow.core._serving.app'>>
accept = 'application/json, */*', expected_status_code = 200
expected_content_type = 'application/json'

    @pytest.mark.usefixtures("recording_injection", "setup_local_connection")
    @pytest.mark.e2etest
    @pytest.mark.parametrize(
        "accept, expected_status_code, expected_content_type",
        [
            ("text/event-stream", 200, "text/event-stream; charset=utf-8"),
            ("text/html", 406, "application/json"),
            ("application/json", 200, "application/json"),
            ("*/*", 200, "application/json"),
            ("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
            ("application/json, */*", 200, "application/json"),
            ("", 200, "application/json"),
        ],
    )
    def test_stream_llm_chat(
        serving_client_llm_chat,
        accept,
        expected_status_code,
        expected_content_type,
    ):
        payload = {
            "question": "What is the capital of France?",
            "chat_history": [],
        }
        headers = {
            "Content-Type": "application/json",
            "Accept": accept,
        }
        response = serving_client_llm_chat.post("/score", json=payload, headers=headers)
>       assert response.status_code == expected_status_code
E       assert 400 == 200
E        +  where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve.py:259: AssertionError

Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve

See this annotation in the file changed.

@github-actions github-actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++)

All 4 runs failed: test_stream_llm_chat[-200-application/json] (tests.sdk_cli_test.e2etests.test_flow_serve)

artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 200
 +  where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code
serving_client_llm_chat = <FlaskClient <PromptflowServingApp 'promptflow.core._serving.app'>>
accept = '', expected_status_code = 200
expected_content_type = 'application/json'

    @pytest.mark.usefixtures("recording_injection", "setup_local_connection")
    @pytest.mark.e2etest
    @pytest.mark.parametrize(
        "accept, expected_status_code, expected_content_type",
        [
            ("text/event-stream", 200, "text/event-stream; charset=utf-8"),
            ("text/html", 406, "application/json"),
            ("application/json", 200, "application/json"),
            ("*/*", 200, "application/json"),
            ("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
            ("application/json, */*", 200, "application/json"),
            ("", 200, "application/json"),
        ],
    )
    def test_stream_llm_chat(
        serving_client_llm_chat,
        accept,
        expected_status_code,
        expected_content_type,
    ):
        payload = {
            "question": "What is the capital of France?",
            "chat_history": [],
        }
        headers = {
            "Content-Type": "application/json",
            "Accept": accept,
        }
        response = serving_client_llm_chat.post("/score", json=payload, headers=headers)
>       assert response.status_code == expected_status_code
E       assert 400 == 200
E        +  where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve.py:259: AssertionError

Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve_fastapi

See this annotation in the file changed.

@github-actions github-actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++)

All 4 runs failed: test_stream_llm_chat[text/event-stream-200-text/event-stream; charset=utf-8] (tests.sdk_cli_test.e2etests.test_flow_serve_fastapi)

artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 200
 +  where 400 = <Response [400 Bad Request]>.status_code
fastapi_serving_client_llm_chat = <starlette.testclient.TestClient object at 0x7fe2878f1ad0>
accept = 'text/event-stream', expected_status_code = 200
expected_content_type = 'text/event-stream; charset=utf-8'

    @pytest.mark.usefixtures("recording_injection", "setup_local_connection")
    @pytest.mark.e2etest
    @pytest.mark.parametrize(
        "accept, expected_status_code, expected_content_type",
        [
            ("text/event-stream", 200, "text/event-stream; charset=utf-8"),
            ("text/html", 406, "application/json"),
            ("application/json", 200, "application/json"),
            ("*/*", 200, "application/json"),
            ("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
            ("application/json, */*", 200, "application/json"),
            ("", 200, "application/json"),
        ],
    )
    def test_stream_llm_chat(
        fastapi_serving_client_llm_chat,
        accept,
        expected_status_code,
        expected_content_type,
    ):
        payload = {
            "question": "What is the capital of France?",
            "chat_history": [],
        }
        headers = {
            "Content-Type": "application/json",
            "Accept": accept,
        }
        response = fastapi_serving_client_llm_chat.post("/score", json=payload, headers=headers)
        res_content_type = response.headers.get("content-type")
>       assert response.status_code == expected_status_code
E       assert 400 == 200
E        +  where 400 = <Response [400 Bad Request]>.status_code

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve_fastapi.py:261: AssertionError

Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve_fastapi

See this annotation in the file changed.

@github-actions github-actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++)

All 4 runs failed: test_stream_llm_chat[text/html-406-application/json] (tests.sdk_cli_test.e2etests.test_flow_serve_fastapi)

artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 406
 +  where 400 = <Response [400 Bad Request]>.status_code
fastapi_serving_client_llm_chat = <starlette.testclient.TestClient object at 0x7fe2878375d0>
accept = 'text/html', expected_status_code = 406
expected_content_type = 'application/json'

    @pytest.mark.usefixtures("recording_injection", "setup_local_connection")
    @pytest.mark.e2etest
    @pytest.mark.parametrize(
        "accept, expected_status_code, expected_content_type",
        [
            ("text/event-stream", 200, "text/event-stream; charset=utf-8"),
            ("text/html", 406, "application/json"),
            ("application/json", 200, "application/json"),
            ("*/*", 200, "application/json"),
            ("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
            ("application/json, */*", 200, "application/json"),
            ("", 200, "application/json"),
        ],
    )
    def test_stream_llm_chat(
        fastapi_serving_client_llm_chat,
        accept,
        expected_status_code,
        expected_content_type,
    ):
        payload = {
            "question": "What is the capital of France?",
            "chat_history": [],
        }
        headers = {
            "Content-Type": "application/json",
            "Accept": accept,
        }
        response = fastapi_serving_client_llm_chat.post("/score", json=payload, headers=headers)
        res_content_type = response.headers.get("content-type")
>       assert response.status_code == expected_status_code
E       assert 400 == 406
E        +  where 400 = <Response [400 Bad Request]>.status_code

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve_fastapi.py:261: AssertionError

Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve_fastapi

See this annotation in the file changed.

@github-actions github-actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++)

All 4 runs failed: test_stream_llm_chat[application/json-200-application/json] (tests.sdk_cli_test.e2etests.test_flow_serve_fastapi)

artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 200
 +  where 400 = <Response [400 Bad Request]>.status_code
fastapi_serving_client_llm_chat = <starlette.testclient.TestClient object at 0x7fe287818310>
accept = 'application/json', expected_status_code = 200
expected_content_type = 'application/json'

    @pytest.mark.usefixtures("recording_injection", "setup_local_connection")
    @pytest.mark.e2etest
    @pytest.mark.parametrize(
        "accept, expected_status_code, expected_content_type",
        [
            ("text/event-stream", 200, "text/event-stream; charset=utf-8"),
            ("text/html", 406, "application/json"),
            ("application/json", 200, "application/json"),
            ("*/*", 200, "application/json"),
            ("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
            ("application/json, */*", 200, "application/json"),
            ("", 200, "application/json"),
        ],
    )
    def test_stream_llm_chat(
        fastapi_serving_client_llm_chat,
        accept,
        expected_status_code,
        expected_content_type,
    ):
        payload = {
            "question": "What is the capital of France?",
            "chat_history": [],
        }
        headers = {
            "Content-Type": "application/json",
            "Accept": accept,
        }
        response = fastapi_serving_client_llm_chat.post("/score", json=payload, headers=headers)
        res_content_type = response.headers.get("content-type")
>       assert response.status_code == expected_status_code
E       assert 400 == 200
E        +  where 400 = <Response [400 Bad Request]>.status_code

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve_fastapi.py:261: AssertionError

Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve_fastapi

See this annotation in the file changed.

@github-actions github-actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++)

All 4 runs failed: test_stream_llm_chat[*/*-200-application/json] (tests.sdk_cli_test.e2etests.test_flow_serve_fastapi)

artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 200
 +  where 400 = <Response [400 Bad Request]>.status_code
fastapi_serving_client_llm_chat = <starlette.testclient.TestClient object at 0x7fe287dd54d0>
accept = '*/*', expected_status_code = 200
expected_content_type = 'application/json'

    @pytest.mark.usefixtures("recording_injection", "setup_local_connection")
    @pytest.mark.e2etest
    @pytest.mark.parametrize(
        "accept, expected_status_code, expected_content_type",
        [
            ("text/event-stream", 200, "text/event-stream; charset=utf-8"),
            ("text/html", 406, "application/json"),
            ("application/json", 200, "application/json"),
            ("*/*", 200, "application/json"),
            ("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
            ("application/json, */*", 200, "application/json"),
            ("", 200, "application/json"),
        ],
    )
    def test_stream_llm_chat(
        fastapi_serving_client_llm_chat,
        accept,
        expected_status_code,
        expected_content_type,
    ):
        payload = {
            "question": "What is the capital of France?",
            "chat_history": [],
        }
        headers = {
            "Content-Type": "application/json",
            "Accept": accept,
        }
        response = fastapi_serving_client_llm_chat.post("/score", json=payload, headers=headers)
        res_content_type = response.headers.get("content-type")
>       assert response.status_code == expected_status_code
E       assert 400 == 200
E        +  where 400 = <Response [400 Bad Request]>.status_code

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve_fastapi.py:261: AssertionError

Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve_fastapi

See this annotation in the file changed.

@github-actions github-actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++)

All 4 runs failed: test_stream_llm_chat[text/event-stream, application/json-200-text/event-stream; charset=utf-8] (tests.sdk_cli_test.e2etests.test_flow_serve_fastapi)

artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 200
 +  where 400 = <Response [400 Bad Request]>.status_code
fastapi_serving_client_llm_chat = <starlette.testclient.TestClient object at 0x7fe287ec2c50>
accept = 'text/event-stream, application/json', expected_status_code = 200
expected_content_type = 'text/event-stream; charset=utf-8'

    @pytest.mark.usefixtures("recording_injection", "setup_local_connection")
    @pytest.mark.e2etest
    @pytest.mark.parametrize(
        "accept, expected_status_code, expected_content_type",
        [
            ("text/event-stream", 200, "text/event-stream; charset=utf-8"),
            ("text/html", 406, "application/json"),
            ("application/json", 200, "application/json"),
            ("*/*", 200, "application/json"),
            ("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
            ("application/json, */*", 200, "application/json"),
            ("", 200, "application/json"),
        ],
    )
    def test_stream_llm_chat(
        fastapi_serving_client_llm_chat,
        accept,
        expected_status_code,
        expected_content_type,
    ):
        payload = {
            "question": "What is the capital of France?",
            "chat_history": [],
        }
        headers = {
            "Content-Type": "application/json",
            "Accept": accept,
        }
        response = fastapi_serving_client_llm_chat.post("/score", json=payload, headers=headers)
        res_content_type = response.headers.get("content-type")
>       assert response.status_code == expected_status_code
E       assert 400 == 200
E        +  where 400 = <Response [400 Bad Request]>.status_code

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve_fastapi.py:261: AssertionError

Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve_fastapi

See this annotation in the file changed.

@github-actions github-actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++)

All 4 runs failed: test_stream_llm_chat[application/json, */*-200-application/json] (tests.sdk_cli_test.e2etests.test_flow_serve_fastapi)

artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 200
 +  where 400 = <Response [400 Bad Request]>.status_code
fastapi_serving_client_llm_chat = <starlette.testclient.TestClient object at 0x7fe287d21010>
accept = 'application/json, */*', expected_status_code = 200
expected_content_type = 'application/json'

    @pytest.mark.usefixtures("recording_injection", "setup_local_connection")
    @pytest.mark.e2etest
    @pytest.mark.parametrize(
        "accept, expected_status_code, expected_content_type",
        [
            ("text/event-stream", 200, "text/event-stream; charset=utf-8"),
            ("text/html", 406, "application/json"),
            ("application/json", 200, "application/json"),
            ("*/*", 200, "application/json"),
            ("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
            ("application/json, */*", 200, "application/json"),
            ("", 200, "application/json"),
        ],
    )
    def test_stream_llm_chat(
        fastapi_serving_client_llm_chat,
        accept,
        expected_status_code,
        expected_content_type,
    ):
        payload = {
            "question": "What is the capital of France?",
            "chat_history": [],
        }
        headers = {
            "Content-Type": "application/json",
            "Accept": accept,
        }
        response = fastapi_serving_client_llm_chat.post("/score", json=payload, headers=headers)
        res_content_type = response.headers.get("content-type")
>       assert response.status_code == expected_status_code
E       assert 400 == 200
E        +  where 400 = <Response [400 Bad Request]>.status_code

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve_fastapi.py:261: AssertionError

Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve_fastapi

See this annotation in the file changed.

@github-actions github-actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++)

All 4 runs failed: test_stream_llm_chat[-200-application/json] (tests.sdk_cli_test.e2etests.test_flow_serve_fastapi)

artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 200
 +  where 400 = <Response [400 Bad Request]>.status_code
fastapi_serving_client_llm_chat = <starlette.testclient.TestClient object at 0x7fe287650a90>
accept = '', expected_status_code = 200
expected_content_type = 'application/json'

    @pytest.mark.usefixtures("recording_injection", "setup_local_connection")
    @pytest.mark.e2etest
    @pytest.mark.parametrize(
        "accept, expected_status_code, expected_content_type",
        [
            ("text/event-stream", 200, "text/event-stream; charset=utf-8"),
            ("text/html", 406, "application/json"),
            ("application/json", 200, "application/json"),
            ("*/*", 200, "application/json"),
            ("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
            ("application/json, */*", 200, "application/json"),
            ("", 200, "application/json"),
        ],
    )
    def test_stream_llm_chat(
        fastapi_serving_client_llm_chat,
        accept,
        expected_status_code,
        expected_content_type,
    ):
        payload = {
            "question": "What is the capital of France?",
            "chat_history": [],
        }
        headers = {
            "Content-Type": "application/json",
            "Accept": accept,
        }
        response = fastapi_serving_client_llm_chat.post("/score", json=payload, headers=headers)
        res_content_type = response.headers.get("content-type")
>       assert response.status_code == expected_status_code
E       assert 400 == 200
E        +  where 400 = <Response [400 Bad Request]>.status_code

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve_fastapi.py:261: AssertionError

Check warning on line 0 in tests.sdk_cli_test.e2etests.test_trace.TestTraceEntitiesAndOperations

See this annotation in the file changed.

@github-actions github-actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++)

1 out of 4 runs failed: test_delete_traces_three_tables (tests.sdk_cli_test.e2etests.test_trace.TestTraceEntitiesAndOperations)

artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 1s]
Raw output
sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) database is locked
[SQL: DELETE FROM events WHERE events.trace_id IN (?)]
[parameters: ('455018a7-a3a2-4a73-9b80-3e904190dc95',)]
(Background on this error at: https://sqlalche.me/e/20/e3q8)
self = <sqlalchemy.engine.base.Connection object at 0x7fe287759e10>
dialect = <sqlalchemy.dialects.sqlite.pysqlite.SQLiteDialect_pysqlite object at 0x7fe286300dd0>
context = <sqlalchemy.dialects.sqlite.base.SQLiteExecutionContext object at 0x7fe2863b2810>
statement = <sqlalchemy.dialects.sqlite.base.SQLiteCompiler object at 0x7fe2863116d0>
parameters = [('455018a7-a3a2-4a73-9b80-3e904190dc95',)]

    def _exec_single_context(
        self,
        dialect: Dialect,
        context: ExecutionContext,
        statement: Union[str, Compiled],
        parameters: Optional[_AnyMultiExecuteParams],
    ) -> CursorResult[Any]:
        """continue the _execute_context() method for a single DBAPI
        cursor.execute() or cursor.executemany() call.
    
        """
        if dialect.bind_typing is BindTyping.SETINPUTSIZES:
            generic_setinputsizes = context._prepare_set_input_sizes()
    
            if generic_setinputsizes:
                try:
                    dialect.do_set_input_sizes(
                        context.cursor, generic_setinputsizes, context
                    )
                except BaseException as e:
                    self._handle_dbapi_exception(
                        e, str(statement), parameters, None, context
                    )
    
        cursor, str_statement, parameters = (
            context.cursor,
            context.statement,
            context.parameters,
        )
    
        effective_parameters: Optional[_AnyExecuteParams]
    
        if not context.executemany:
            effective_parameters = parameters[0]
        else:
            effective_parameters = parameters
    
        if self._has_events or self.engine._has_events:
            for fn in self.dispatch.before_cursor_execute:
                str_statement, effective_parameters = fn(
                    self,
                    cursor,
                    str_statement,
                    effective_parameters,
                    context,
                    context.executemany,
                )
    
        if self._echo:
            self._log_info(str_statement)
    
            stats = context._get_cache_stats()
    
            if not self.engine.hide_parameters:
                self._log_info(
                    "[%s] %r",
                    stats,
                    sql_util._repr_params(
                        effective_parameters,
                        batches=10,
                        ismulti=context.executemany,
                    ),
                )
            else:
                self._log_info(
                    "[%s] [SQL parameters hidden due to hide_parameters=True]",
                    stats,
                )
    
        evt_handled: bool = False
        try:
            if context.execute_style is ExecuteStyle.EXECUTEMANY:
                effective_parameters = cast(
                    "_CoreMultiExecuteParams", effective_parameters
                )
                if self.dialect._has_events:
                    for fn in self.dialect.dispatch.do_executemany:
                        if fn(
                            cursor,
                            str_statement,
                            effective_parameters,
                            context,
                        ):
                            evt_handled = True
                            break
                if not evt_handled:
                    self.dialect.do_executemany(
                        cursor,
                        str_statement,
                        effective_parameters,
                        context,
                    )
            elif not effective_parameters and context.no_parameters:
                if self.dialect._has_events:
                    for fn in self.dialect.dispatch.do_execute_no_params:
                        if fn(cursor, str_statement, context):
                            evt_handled = True
                            break
                if not evt_handled:
                    self.dialect.do_execute_no_params(
                        cursor, str_statement, context
                    )
            else:
                effective_parameters = cast(
                    "_CoreSingleExecuteParams", effective_parameters
                )
                if self.dialect._has_events:
                    for fn in self.dialect.dispatch.do_execute:
                        if fn(
                            cursor,
                            str_statement,
                            effective_parameters,
                            context,
                        ):
                            evt_handled = True
                            break
                if not evt_handled:
>                   self.dialect.do_execute(
                        cursor, str_statement, effective_parameters, context
                    )

/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/engine/base.py:1967: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <sqlalchemy.dialects.sqlite.pysqlite.SQLiteDialect_pysqlite object at 0x7fe286300dd0>
cursor = <sqlite3.Cursor object at 0x7fe2726f5a40>
statement = 'DELETE FROM events WHERE events.trace_id IN (?)'
parameters = ('455018a7-a3a2-4a73-9b80-3e904190dc95',)
context = <sqlalchemy.dialects.sqlite.base.SQLiteExecutionContext object at 0x7fe2863b2810>

    def do_execute(self, cursor, statement, parameters, context=None):
>       cursor.execute(statement, parameters)
E       sqlite3.OperationalError: database is locked

/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/engine/default.py:924: OperationalError

The above exception was the direct cause of the following exception:

self = <sdk_cli_test.e2etests.test_trace.TestTraceEntitiesAndOperations object at 0x7fe2725e2790>
pf = <promptflow._sdk._pf_client.PFClient object at 0x7fe2861fecd0>

    def test_delete_traces_three_tables(self, pf: PFClient) -> None:
        # trace operation does not expose API for events and spans
        # so directly use ORM class to list and assert events and spans existence and deletion
        from promptflow._sdk._orm.trace import Event as ORMEvent
        from promptflow._sdk._orm.trace import LineRun as ORMLineRun
        from promptflow._sdk._orm.trace import Span as ORMSpan
    
        mock_run = str(uuid.uuid4())
        mock_span = mock_span_for_delete_tests(run=mock_run)
        # assert events, span and line_run are persisted
        assert len(ORMEvent.list(trace_id=mock_span.trace_id, span_id=mock_span.span_id)) == 2
        assert len(ORMSpan.list(trace_ids=[mock_span.trace_id])) == 1
        assert len(ORMLineRun.list(runs=[mock_run])) == 1
        # delete traces and assert all traces are deleted
>       pf.traces.delete(run=mock_run)

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_trace.py:319: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_telemetry/activity.py:265: in wrapper
    return f(self, *args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/operations/_trace_operations.py:175: in delete
    return self._delete_within_transaction(
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_orm/retry.py:50: in f_retry
    return f(*args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/operations/_trace_operations.py:240: in _delete_within_transaction
    event_cnt = session.query(ORMEvent).filter(ORMEvent.trace_id.in_(trace_ids)).delete()
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/orm/query.py:3161: in delete
    result: CursorResult[Any] = self.session.execute(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/orm/session.py:2351: in execute
    return self._execute_internal(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/orm/session.py:2236: in _execute_internal
    result: Result[Any] = compile_state_cls.orm_execute_statement(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/orm/bulk_persistence.py:1953: in orm_execute_statement
    return super().orm_execute_statement(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/orm/context.py:293: in orm_execute_statement
    result = conn.execute(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/engine/base.py:1418: in execute
    return meth(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/sql/elements.py:515: in _execute_on_connection
    return connection._execute_clauseelement(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/engine/base.py:1640: in _execute_clauseelement
    ret = self._execute_context(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/engine/base.py:1846: in _execute_context
    return self._exec_single_context(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/engine/base.py:1986: in _exec_single_context
    self._handle_dbapi_exception(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/engine/base.py:2353: in _handle_dbapi_exception
    raise sqlalchemy_exception.with_traceback(exc_info[2]) from e
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/engine/base.py:1967: in _exec_single_context
    self.dialect.do_execute(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <sqlalchemy.dialects.sqlite.pysqlite.SQLiteDialect_pysqlite object at 0x7fe286300dd0>
cursor = <sqlite3.Cursor object at 0x7fe2726f5a40>
statement = 'DELETE FROM events WHERE events.trace_id IN (?)'
parameters = ('455018a7-a3a2-4a73-9b80-3e904190dc95',)
context = <sqlalchemy.dialects.sqlite.base.SQLiteExecutionContext object at 0x7fe2863b2810>

    def do_execute(self, cursor, statement, parameters, context=None):
>       cursor.execute(statement, parameters)
E       sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) database is locked
E       [SQL: DELETE FROM events WHERE events.trace_id IN (?)]
E       [parameters: ('455018a7-a3a2-4a73-9b80-3e904190dc95',)]
E       (Background on this error at: https://sqlalche.me/e/20/e3q8)

/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/engine/default.py:924: OperationalError

Check warning on line 0 in tests.sdk_cli_test.e2etests.test_trace.TestTraceEntitiesAndOperations

See this annotation in the file changed.

@github-actions github-actions / SDK CLI Test Result [devs/peiwen/fix_chatgroup_run](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:devs/peiwen/fix_chatgroup_run++)

1 out of 4 runs failed: test_delete_traces_with_collection_and_started_before (tests.sdk_cli_test.e2etests.test_trace.TestTraceEntitiesAndOperations)

artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 1s]
Raw output
sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) database is locked
[SQL: DELETE FROM events WHERE events.trace_id IN (?)]
[parameters: ('9d7fbc0f-5bca-4cf2-adad-e229620985fd',)]
(Background on this error at: https://sqlalche.me/e/20/e3q8)
self = <sqlalchemy.engine.base.Connection object at 0x7f3ec5e58fd0>
dialect = <sqlalchemy.dialects.sqlite.pysqlite.SQLiteDialect_pysqlite object at 0x7f3ec4ddc700>
context = <sqlalchemy.dialects.sqlite.base.SQLiteExecutionContext object at 0x7f3ec5e66400>
statement = <sqlalchemy.dialects.sqlite.base.SQLiteCompiler object at 0x7f3ec5eb99a0>
parameters = [('9d7fbc0f-5bca-4cf2-adad-e229620985fd',)]

    def _exec_single_context(
        self,
        dialect: Dialect,
        context: ExecutionContext,
        statement: Union[str, Compiled],
        parameters: Optional[_AnyMultiExecuteParams],
    ) -> CursorResult[Any]:
        """continue the _execute_context() method for a single DBAPI
        cursor.execute() or cursor.executemany() call.
    
        """
        if dialect.bind_typing is BindTyping.SETINPUTSIZES:
            generic_setinputsizes = context._prepare_set_input_sizes()
    
            if generic_setinputsizes:
                try:
                    dialect.do_set_input_sizes(
                        context.cursor, generic_setinputsizes, context
                    )
                except BaseException as e:
                    self._handle_dbapi_exception(
                        e, str(statement), parameters, None, context
                    )
    
        cursor, str_statement, parameters = (
            context.cursor,
            context.statement,
            context.parameters,
        )
    
        effective_parameters: Optional[_AnyExecuteParams]
    
        if not context.executemany:
            effective_parameters = parameters[0]
        else:
            effective_parameters = parameters
    
        if self._has_events or self.engine._has_events:
            for fn in self.dispatch.before_cursor_execute:
                str_statement, effective_parameters = fn(
                    self,
                    cursor,
                    str_statement,
                    effective_parameters,
                    context,
                    context.executemany,
                )
    
        if self._echo:
            self._log_info(str_statement)
    
            stats = context._get_cache_stats()
    
            if not self.engine.hide_parameters:
                self._log_info(
                    "[%s] %r",
                    stats,
                    sql_util._repr_params(
                        effective_parameters,
                        batches=10,
                        ismulti=context.executemany,
                    ),
                )
            else:
                self._log_info(
                    "[%s] [SQL parameters hidden due to hide_parameters=True]",
                    stats,
                )
    
        evt_handled: bool = False
        try:
            if context.execute_style is ExecuteStyle.EXECUTEMANY:
                effective_parameters = cast(
                    "_CoreMultiExecuteParams", effective_parameters
                )
                if self.dialect._has_events:
                    for fn in self.dialect.dispatch.do_executemany:
                        if fn(
                            cursor,
                            str_statement,
                            effective_parameters,
                            context,
                        ):
                            evt_handled = True
                            break
                if not evt_handled:
                    self.dialect.do_executemany(
                        cursor,
                        str_statement,
                        effective_parameters,
                        context,
                    )
            elif not effective_parameters and context.no_parameters:
                if self.dialect._has_events:
                    for fn in self.dialect.dispatch.do_execute_no_params:
                        if fn(cursor, str_statement, context):
                            evt_handled = True
                            break
                if not evt_handled:
                    self.dialect.do_execute_no_params(
                        cursor, str_statement, context
                    )
            else:
                effective_parameters = cast(
                    "_CoreSingleExecuteParams", effective_parameters
                )
                if self.dialect._has_events:
                    for fn in self.dialect.dispatch.do_execute:
                        if fn(
                            cursor,
                            str_statement,
                            effective_parameters,
                            context,
                        ):
                            evt_handled = True
                            break
                if not evt_handled:
>                   self.dialect.do_execute(
                        cursor, str_statement, effective_parameters, context
                    )

/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.9/lib/python3.9/site-packages/sqlalchemy/engine/base.py:1967: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <sqlalchemy.dialects.sqlite.pysqlite.SQLiteDialect_pysqlite object at 0x7f3ec4ddc700>
cursor = <sqlite3.Cursor object at 0x7f3ec5e4d8f0>
statement = 'DELETE FROM events WHERE events.trace_id IN (?)'
parameters = ('9d7fbc0f-5bca-4cf2-adad-e229620985fd',)
context = <sqlalchemy.dialects.sqlite.base.SQLiteExecutionContext object at 0x7f3ec5e66400>

    def do_execute(self, cursor, statement, parameters, context=None):
>       cursor.execute(statement, parameters)
E       sqlite3.OperationalError: database is locked

/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.9/lib/python3.9/site-packages/sqlalchemy/engine/default.py:924: OperationalError

The above exception was the direct cause of the following exception:

self = <sdk_cli_test.e2etests.test_trace.TestTraceEntitiesAndOperations object at 0x7f3ee25a4e50>
pf = <promptflow._sdk._pf_client.PFClient object at 0x7f3ef7236fd0>

    def test_delete_traces_with_collection_and_started_before(self, pf: PFClient) -> None:
        # mock some traces that start 2 days before, and delete those start 1 days before
        mock_start_time = datetime.datetime.now() - datetime.timedelta(days=2)
        collection1, collection2 = str(uuid.uuid4()), str(uuid.uuid4())
        mock_span_for_delete_tests(collection=collection1, start_time=mock_start_time)
        mock_span_for_delete_tests(collection=collection2, start_time=mock_start_time)
        assert (
            len(pf.traces.list_line_runs(collection=collection1)) == 1
            and len(pf.traces.list_line_runs(collection=collection2)) == 1
        )
        delete_query_time = datetime.datetime.now() - datetime.timedelta(days=1)
        pf.traces.delete(collection=collection1, started_before=delete_query_time.isoformat())
        # only collection1 traces are deleted
        assert (
            len(pf.traces.list_line_runs(collection=collection1)) == 0
            and len(pf.traces.list_line_runs(collection=collection2)) == 1
        )
>       pf.traces.delete(collection=collection2, started_before=delete_query_time.isoformat())

/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_trace.py:355: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_telemetry/activity.py:265: in wrapper
    return f(self, *args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/operations/_trace_operations.py:175: in delete
    return self._delete_within_transaction(
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_orm/retry.py:50: in f_retry
    return f(*args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/operations/_trace_operations.py:240: in _delete_within_transaction
    event_cnt = session.query(ORMEvent).filter(ORMEvent.trace_id.in_(trace_ids)).delete()
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.9/lib/python3.9/site-packages/sqlalchemy/orm/query.py:3161: in delete
    result: CursorResult[Any] = self.session.execute(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.9/lib/python3.9/site-packages/sqlalchemy/orm/session.py:2351: in execute
    return self._execute_internal(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.9/lib/python3.9/site-packages/sqlalchemy/orm/session.py:2236: in _execute_internal
    result: Result[Any] = compile_state_cls.orm_execute_statement(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.9/lib/python3.9/site-packages/sqlalchemy/orm/bulk_persistence.py:1953: in orm_execute_statement
    return super().orm_execute_statement(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.9/lib/python3.9/site-packages/sqlalchemy/orm/context.py:293: in orm_execute_statement
    result = conn.execute(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.9/lib/python3.9/site-packages/sqlalchemy/engine/base.py:1418: in execute
    return meth(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.9/lib/python3.9/site-packages/sqlalchemy/sql/elements.py:515: in _execute_on_connection
    return connection._execute_clauseelement(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.9/lib/python3.9/site-packages/sqlalchemy/engine/base.py:1640: in _execute_clauseelement
    ret = self._execute_context(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.9/lib/python3.9/site-packages/sqlalchemy/engine/base.py:1846: in _execute_context
    return self._exec_single_context(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.9/lib/python3.9/site-packages/sqlalchemy/engine/base.py:1986: in _exec_single_context
    self._handle_dbapi_exception(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.9/lib/python3.9/site-packages/sqlalchemy/engine/base.py:2353: in _handle_dbapi_exception
    raise sqlalchemy_exception.with_traceback(exc_info[2]) from e
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.9/lib/python3.9/site-packages/sqlalchemy/engine/base.py:1967: in _exec_single_context
    self.dialect.do_execute(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <sqlalchemy.dialects.sqlite.pysqlite.SQLiteDialect_pysqlite object at 0x7f3ec4ddc700>
cursor = <sqlite3.Cursor object at 0x7f3ec5e4d8f0>
statement = 'DELETE FROM events WHERE events.trace_id IN (?)'
parameters = ('9d7fbc0f-5bca-4cf2-adad-e229620985fd',)
context = <sqlalchemy.dialects.sqlite.base.SQLiteExecutionContext object at 0x7f3ec5e66400>

    def do_execute(self, cursor, statement, parameters, context=None):
>       cursor.execute(statement, parameters)
E       sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) database is locked
E       [SQL: DELETE FROM events WHERE events.trace_id IN (?)]
E       [parameters: ('9d7fbc0f-5bca-4cf2-adad-e229620985fd',)]
E       (Background on this error at: https://sqlalche.me/e/20/e3q8)

/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.9/lib/python3.9/site-packages/sqlalchemy/engine/default.py:924: OperationalError