Skip to content

Commit

Permalink
vdk-core: add config option for logging execution result
Browse files Browse the repository at this point in the history
Why?

User reasearch indicates that the execution result is too verbose
for local runs. Users don't expect a lot of output for successful
jobs and expect error output for failing jobs.

What?

Introduce the LOG_EXECUTION_RESULT config option that enables/disables displaying the
end result.

How was this tested?

Ran locally with successful and failing jobs

What kind of change is this?

Feature/non-breaking

Signed-off-by: Dilyan Marinov <mdilyan@vmware.com>
  • Loading branch information
Dilyan Marinov committed Oct 24, 2023
1 parent dd35f2b commit d510fb0
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 18 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
LOG_LEVEL_VDK = "LOG_LEVEL_VDK"
LOG_LEVEL_MODULE = "LOG_LEVEL_MODULE"
LOG_STACK_TRACE_ON_EXIT = "LOG_STACK_TRACE_ON_EXIT"
LOG_EXECUTION_RESULT = "LOG_EXECUTION_RESULT"
LOG_EXCEPTION_FORMATTER = "LOG_EXCEPTION_FORMATTER"
WORKING_DIR = "WORKING_DIR"
ATTEMPT_ID = "ATTEMPT_ID"
Expand Down Expand Up @@ -92,6 +93,13 @@ def vdk_configure(self, config_builder: ConfigurationBuilder) -> None:
"Controls whether the full stack trace is displayed again on exit code 1. "
"False by default, shoud be set to true in production environments for more debug output. ",
)
config_builder.add(
LOG_EXECUTION_RESULT,
False,
True,
"Controls whether the job execution result is logged on job completion. "
"False by default, shoud be set to true in production environments for more debug output. ",
)
config_builder.add(
LOG_EXCEPTION_FORMATTER,
"pretty",
Expand Down
40 changes: 22 additions & 18 deletions projects/vdk-core/src/vdk/internal/builtin_plugins/run/cli_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,26 @@ def __warn_on_python_version_disparity(
"""
)

def __log_exec_result(self, execution_result):
# On some platforms, if the size of a string is too large, the
# logging module starts throwing OSError: [Errno 40] Message too long,
# so it is safer if we split large strings into smaller chunks.
string_exec_result = str(execution_result)
if len(string_exec_result) > 5000:
temp_exec_result = json.loads(string_exec_result)
steps = temp_exec_result.pop("steps_list")

log.info(
f"Data Job execution summary: {json.dumps(temp_exec_result, indent=2)}"
)

chunks = math.ceil(len(string_exec_result) / 5000)
for i in self.__split_into_chunks(exec_steps=steps, chunks=chunks):
log.info(f"Execution Steps: {json.dumps(i, indent=2)}")

else:
log.info(f"Data Job execution summary: {execution_result}")

def create_and_run_data_job(
self,
context: CoreContext,
Expand All @@ -141,24 +161,8 @@ def create_and_run_data_job(
execution_result = None
try:
execution_result = job.run(args)
# On some platforms, if the size of a string is too large, the
# logging module starts throwing OSError: [Errno 40] Message too long,
# so it is safer if we split large strings into smaller chunks.
string_exec_result = str(execution_result)
if len(string_exec_result) > 5000:
temp_exec_result = json.loads(string_exec_result)
steps = temp_exec_result.pop("steps_list")

log.info(
f"Data Job execution summary: {json.dumps(temp_exec_result, indent=2)}"
)

chunks = math.ceil(len(string_exec_result) / 5000)
for i in self.__split_into_chunks(exec_steps=steps, chunks=chunks):
log.info(f"Execution Steps: {json.dumps(i, indent=2)}")

else:
log.info(f"Data Job execution summary: {execution_result}")
if context.configuration.get_value("LOG_EXECUTION_RESULT"):
self.__log_exec_result(execution_result)
except BaseException as e:
log.error(
"\n".join(
Expand Down

0 comments on commit d510fb0

Please sign in to comment.