3 fail, 5 skipped, 1 044 pass in 2h 25m 56s
24 files 24 suites 2h 25m 56s ⏱️
1 052 tests 1 044 ✅ 5 💤 3 ❌
12 504 runs 12 389 ✅ 108 💤 7 ❌
Results for commit 250f62c.
Annotations
Check warning on line 0 in tests.executor.e2etests.test_logs.TestExecutorLogs
github-actions / Test Results
1 out of 12 runs failed: test_executor_logs[print_input_flow] (tests.executor.e2etests.test_logs.TestExecutorLogs)
artifacts/promptflow_executor_tests Test Results (Python 3.11) (OS windows-latest)/test-results-executor-e2e.xml [took 19s]
Raw output
assert 52 <= 50
self = <executor.e2etests.test_logs.TestExecutorLogs object at 0x00000115B3397710>
folder_name = 'print_input_flow'
@pytest.mark.parametrize(
"folder_name",
TEST_LOGS_FLOW,
)
def test_executor_logs(self, folder_name):
logs_directory = Path(mkdtemp())
flow_run_log_path = str(logs_directory / "test_flow_run.log")
bulk_run_log_path = str(logs_directory / "test_bulk_run.log")
# flow run: test exec_line
with LogContext(flow_run_log_path):
executor = FlowExecutor.create(get_yaml_file(folder_name), {})
executor.exec_line({"text": "line_text"})
log_content = load_content(flow_run_log_path)
loggers_name_list = ["execution", "execution.flow"]
missing_loggers = [logger for logger in loggers_name_list if logger not in log_content]
assert not missing_loggers, f"Missing loggers: {missing_loggers}\nLog content:\n---\n{log_content}"
line_count = count_lines(flow_run_log_path)
assert 6 == line_count, f"Expected 6 lines in log, but got {line_count}\nLog content:\n---\n{log_content}"
# bulk run: test batch_engine.run
# setting run_mode to BulkTest is a requirement to use bulk_logger
with LogContext(bulk_run_log_path, run_mode=RunMode.Batch):
self.submit_bulk_run(folder_name)
log_content = load_content(bulk_run_log_path)
loggers_name_list = ["execution", "execution.bulk"]
# bulk logger will print the average execution time and estimated time
bulk_logs_keywords = ["Average execution time for completed lines", "Estimated time for incomplete lines"]
assert all(logger in log_content for logger in loggers_name_list)
assert all(keyword in log_content for keyword in bulk_logs_keywords)
# Customer facing log is really important, so we pay the effort to make change
# about test wehen line count change a lot in the future.
line_count = count_lines(bulk_run_log_path)
> assert 40 <= line_count <= 50
E assert 52 <= 50
D:\a\promptflow\promptflow\src\promptflow\tests\executor\e2etests\test_logs.py:116: AssertionError
github-actions / Test Results
3 out of 12 runs failed: test_executor_exec_line_fail_with_exception[sync_tools_failures-sync_fail-In tool raise_an_exception: dummy_input] (tests.executor.e2etests.test_executor_execution_failures.TestExecutorFailures)
artifacts/promptflow_executor_tests Test Results (Python 3.11) (OS macos-13)/test-results-executor-e2e.xml [took 0s]
artifacts/promptflow_executor_tests Test Results (Python 3.11) (OS ubuntu-latest)/test-results-executor-e2e.xml [took 0s]
artifacts/promptflow_executor_tests Test Results (Python 3.11) (OS windows-latest)/test-results-executor-e2e.xml [took 0s]
Raw output
assert 16 == 17
+ where 16 = len(['Traceback (most recent call last):', ' File "/Users/runner/work/promptflow/promptflow/src/promptflow/tests/test_configs/flows/sync_tools_failures/sync_fail.py", line 11, in raise_an_exception', ' raise_exception(s)', ' File "/Users/runner/work/promptflow/promptflow/src/promptflow/tests/test_configs/flows/sync_tools_failures/sync_fail.py", line 5, in raise_exception', ' raise Exception(msg)', 'Exception: In raise_exception: dummy_input', ...])
+ and 17 = len(['Traceback (most recent call last):', 'sync_fail.py", line 11, in raise_an_exception', ' raise_exception(s)', 'sync_fail.py", line 5, in raise_exception', ' raise Exception(msg)', 'Exception: In raise_exception: dummy_input', ...])
self = <executor.e2etests.test_executor_execution_failures.TestExecutorFailures object at 0x10e637550>
flow_folder = 'sync_tools_failures', failed_node_name = 'sync_fail'
message = 'In tool raise_an_exception: dummy_input'
@pytest.mark.parametrize(
"flow_folder, failed_node_name, message",
[
("sync_tools_failures", "sync_fail", "In tool raise_an_exception: dummy_input"),
("async_tools_failures", "async_fail", "In tool raise_an_exception_async: dummy_input"),
],
)
def test_executor_exec_line_fail_with_exception(self, flow_folder, failed_node_name, message):
yaml_file = get_yaml_file(flow_folder)
# Here we set raise_ex to True to make sure the exception is raised and we can check the error detail.
executor = FlowExecutor.create(yaml_file, {}, raise_ex=True)
with pytest.raises(ToolExecutionError) as e:
executor.exec_line({})
ex = e.value
assert ex.error_codes == ["UserError", "ToolExecutionError"]
ex_str = str(ex)
assert ex_str.startswith(f"Execution failure in '{failed_node_name}'")
assert message in ex_str
expected_stack_trace = expected_stack_traces[flow_folder]
stacktrace = ex.tool_traceback.split("\n")
# Remove "^^^^^^^^" lines as they are not part of actual stack trace
stacktrace = [line for line in stacktrace if "^^^^^^^^" not in line]
> assert len(stacktrace) == len(expected_stack_trace)
E assert 16 == 17
E + where 16 = len(['Traceback (most recent call last):', ' File "/Users/runner/work/promptflow/promptflow/src/promptflow/tests/test_configs/flows/sync_tools_failures/sync_fail.py", line 11, in raise_an_exception', ' raise_exception(s)', ' File "/Users/runner/work/promptflow/promptflow/src/promptflow/tests/test_configs/flows/sync_tools_failures/sync_fail.py", line 5, in raise_exception', ' raise Exception(msg)', 'Exception: In raise_exception: dummy_input', ...])
E + and 17 = len(['Traceback (most recent call last):', 'sync_fail.py", line 11, in raise_an_exception', ' raise_exception(s)', 'sync_fail.py", line 5, in raise_exception', ' raise Exception(msg)', 'Exception: In raise_exception: dummy_input', ...])
/Users/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_executor_execution_failures.py:153: AssertionError
github-actions / Test Results
3 out of 12 runs failed: test_executor_exec_line_fail_with_exception[async_tools_failures-async_fail-In tool raise_an_exception_async: dummy_input] (tests.executor.e2etests.test_executor_execution_failures.TestExecutorFailures)
artifacts/promptflow_executor_tests Test Results (Python 3.11) (OS macos-13)/test-results-executor-e2e.xml [took 0s]
artifacts/promptflow_executor_tests Test Results (Python 3.11) (OS ubuntu-latest)/test-results-executor-e2e.xml [took 0s]
artifacts/promptflow_executor_tests Test Results (Python 3.11) (OS windows-latest)/test-results-executor-e2e.xml [took 0s]
Raw output
assert 16 == 17
+ where 16 = len(['Traceback (most recent call last):', ' File "/Users/runner/work/promptflow/promptflow/src/promptflow/tests/test_configs/flows/async_tools_failures/async_fail.py", line 11, in raise_an_exception_async', ' await raise_exception_async(s)', ' File "/Users/runner/work/promptflow/promptflow/src/promptflow/tests/test_configs/flows/async_tools_failures/async_fail.py", line 5, in raise_exception_async', ' raise Exception(msg)', 'Exception: In raise_exception_async: dummy_input', ...])
+ and 17 = len(['Traceback (most recent call last):', 'async_fail.py", line 11, in raise_an_exception_async', ' await raise_exception_async(s)', 'async_fail.py", line 5, in raise_exception_async', ' raise Exception(msg)', 'Exception: In raise_exception_async: dummy_input', ...])
self = <executor.e2etests.test_executor_execution_failures.TestExecutorFailures object at 0x10e636710>
flow_folder = 'async_tools_failures', failed_node_name = 'async_fail'
message = 'In tool raise_an_exception_async: dummy_input'
@pytest.mark.parametrize(
"flow_folder, failed_node_name, message",
[
("sync_tools_failures", "sync_fail", "In tool raise_an_exception: dummy_input"),
("async_tools_failures", "async_fail", "In tool raise_an_exception_async: dummy_input"),
],
)
def test_executor_exec_line_fail_with_exception(self, flow_folder, failed_node_name, message):
yaml_file = get_yaml_file(flow_folder)
# Here we set raise_ex to True to make sure the exception is raised and we can check the error detail.
executor = FlowExecutor.create(yaml_file, {}, raise_ex=True)
with pytest.raises(ToolExecutionError) as e:
executor.exec_line({})
ex = e.value
assert ex.error_codes == ["UserError", "ToolExecutionError"]
ex_str = str(ex)
assert ex_str.startswith(f"Execution failure in '{failed_node_name}'")
assert message in ex_str
expected_stack_trace = expected_stack_traces[flow_folder]
stacktrace = ex.tool_traceback.split("\n")
# Remove "^^^^^^^^" lines as they are not part of actual stack trace
stacktrace = [line for line in stacktrace if "^^^^^^^^" not in line]
> assert len(stacktrace) == len(expected_stack_trace)
E assert 16 == 17
E + where 16 = len(['Traceback (most recent call last):', ' File "/Users/runner/work/promptflow/promptflow/src/promptflow/tests/test_configs/flows/async_tools_failures/async_fail.py", line 11, in raise_an_exception_async', ' await raise_exception_async(s)', ' File "/Users/runner/work/promptflow/promptflow/src/promptflow/tests/test_configs/flows/async_tools_failures/async_fail.py", line 5, in raise_exception_async', ' raise Exception(msg)', 'Exception: In raise_exception_async: dummy_input', ...])
E + and 17 = len(['Traceback (most recent call last):', 'async_fail.py", line 11, in raise_an_exception_async', ' await raise_exception_async(s)', 'async_fail.py", line 5, in raise_exception_async', ' raise Exception(msg)', 'Exception: In raise_exception_async: dummy_input', ...])
/Users/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_executor_execution_failures.py:153: AssertionError