From 507d8beb2cf3c954ba12497d0a57583e778fbc83 Mon Sep 17 00:00:00 2001 From: Cesar Gray Blanco Date: Wed, 10 Jul 2024 14:44:04 -0700 Subject: [PATCH] standardize string replacement in the extension --- src/acrcssc/azext_acrcssc/_validators.py | 4 +- src/acrcssc/azext_acrcssc/cssc.py | 19 ++------ .../azext_acrcssc/helper/_deployment.py | 36 ++++++--------- .../helper/_ociartifactoperations.py | 13 +++--- .../azext_acrcssc/helper/_taskoperations.py | 46 +++++++++---------- src/acrcssc/azext_acrcssc/helper/_utility.py | 6 +-- 6 files changed, 52 insertions(+), 72 deletions(-) diff --git a/src/acrcssc/azext_acrcssc/_validators.py b/src/acrcssc/azext_acrcssc/_validators.py index f0faeb5ef24..8fb6ffbaeef 100644 --- a/src/acrcssc/azext_acrcssc/_validators.py +++ b/src/acrcssc/azext_acrcssc/_validators.py @@ -53,7 +53,7 @@ def _validate_continuouspatch_json(config_path): config = json.load(f) validate(config, CONTINUOUSPATCH_CONFIG_SCHEMA_V1) except Exception as e: - logger.debug("Error validating the continuous patch config file: %s", e) + logger.debug(f"Error validating the continuous patch config file: {e}") raise InvalidArgumentValueError("File used for --config is not a valid config JSON file. Use --help to see the schema of the config file.") finally: f.close() @@ -95,7 +95,7 @@ def _check_task_exists(cmd, registry, task_name=""): try: task = acrtask_client.get(resource_group, registry.name, task_name) except Exception as exception: - logger.debug("Failed to find task %s from registry %s : %s", task_name, registry.name, exception) + logger.debug(f"Failed to find task {task_name} from registry {registry.name} : {exception}") return False if task is not None: diff --git a/src/acrcssc/azext_acrcssc/cssc.py b/src/acrcssc/azext_acrcssc/cssc.py index 840df20bcd6..807e22eb2c0 100644 --- a/src/acrcssc/azext_acrcssc/cssc.py +++ b/src/acrcssc/azext_acrcssc/cssc.py @@ -53,12 +53,7 @@ def create_acrcssc(cmd, dryrun=False, defer_immediate_run=False): '''Create a continuous patch task in the registry.''' - logger.debug("Entering create_acrcssc with parameters: %s %s %s %s %s", - registry_name, - workflow_type, - config, - cadence, - dryrun) + logger.debug(f"Entering create_acrcssc with parameters: {registry_name} {workflow_type} {config} {cadence} {dryrun}") _perform_continuous_patch_operation(cmd, resource_group_name, registry_name, @@ -78,13 +73,7 @@ def update_acrcssc(cmd, dryrun=False, defer_immediate_run=False): '''Update a continuous patch task in the registry.''' - logger.debug('Entering update_acrcssc with parameters: %s %s %s %s %s %s', - registry_name, - workflow_type, - config, - cadence, - dryrun, - defer_immediate_run) + logger.debug(f'Entering update_acrcssc with parameters: {registry_name} {workflow_type} {config} {cadence} {dryrun} {defer_immediate_run}') _perform_continuous_patch_operation(cmd, resource_group_name, registry_name, @@ -100,7 +89,7 @@ def delete_acrcssc(cmd, registry_name, workflow_type): '''Delete a continuous patch task in the registry.''' - logger.debug("Entering delete_acrcssc with parameters: %s %s %s", resource_group_name, registry_name, workflow_type) + logger.debug(f"Entering delete_acrcssc with parameters: {resource_group_name} {registry_name} {workflow_type}") validate_task_type(workflow_type) acr_client_registries = cf_acr_registries(cmd.cli_ctx, None) @@ -119,7 +108,7 @@ def show_acrcssc(cmd, registry_name, workflow_type): '''Show a continuous patch task in the registry.''' - logger.debug('Entering show_acrcssc with parameters: %s %s', registry_name, workflow_type) + logger.debug(f'Entering show_acrcssc with parameters: {registry_name} {workflow_type}') acr_client_registries = cf_acr_registries(cmd.cli_ctx, None) registry = acr_client_registries.get(resource_group_name, registry_name) diff --git a/src/acrcssc/azext_acrcssc/helper/_deployment.py b/src/acrcssc/azext_acrcssc/helper/_deployment.py index 76f214fb2ec..be357016511 100644 --- a/src/acrcssc/azext_acrcssc/helper/_deployment.py +++ b/src/acrcssc/azext_acrcssc/helper/_deployment.py @@ -22,7 +22,7 @@ def validate_and_deploy_template(cmd_ctx, registry, resource_group: str, deployment_name: str, template_file_name: str, parameters: dict, dryrun: Optional[bool] = False): - logger.debug('Working with resource group %s, registry %s template %s', resource_group, registry, template_file_name) + logger.debug(f'Working with resource group {resource_group}, registry {registry} template {template_file_name}') deployment_path = os.path.dirname( os.path.join( @@ -44,8 +44,8 @@ def validate_and_deploy_template(cmd_ctx, registry, resource_group: str, deploym return deploy_template(cmd_ctx, resource_group, deployment_name, template) except Exception as exception: - logger.debug('Failed to validate and deploy template: %s', exception) - raise AzCLIError('Failed to validate and deploy template: %s' % exception) + logger.debug(f'Failed to validate and deploy template: {exception}') + raise AzCLIError(f'Failed to validate and deploy template: {exception}') def validate_template(cmd_ctx, resource_group, deployment_name, template): @@ -82,29 +82,25 @@ def validate_template(cmd_ctx, resource_group, deployment_name, template): # Don't expect to hit this but it appeases mypy raise RuntimeError(f"Validation of template {template} failed.") - logger.debug("Validation Result %s", validation_res) + logger.debug(f"Validation Result {validation_res}") if validation_res.error: # Validation failed so don't even try to deploy logger.error( ( - "Template for resource group %s has failed validation. The message" - " was: %s. See logs for additional details." - ), - resource_group, - validation_res.error.message, + f"Template for resource group {resource_group} has failed validation. The message" + " was: {validation_res.error.message}. See logs for additional details." + ) ) logger.debug( ( - "Template for resource group %s failed validation." - " Full error details: %s" - ), - resource_group, - validation_res.error, + f"Template for resource group {resource_group} failed validation." + " Full error details: {validation_res.error}" + ) ) raise RuntimeError("Azure template validation failed.") # Validation succeeded so proceed with deployment - logger.debug("Successfully validated resources for %s", resource_group) + logger.debug("Successfully validated resources for {resource_group}") def deploy_template(cmd_ctx, resource_group, deployment_name, template): @@ -134,20 +130,16 @@ def deploy_template(cmd_ctx, resource_group, deployment_name, template): depl_props = deployment.properties else: raise RuntimeError("The deployment has no properties.\nAborting") - logger.debug("Deployed: %s %s %s", deployment.name, deployment.id, depl_props) + logger.debug(f"Deployed: {deployment.name} {deployment.id} {depl_props}") if depl_props.provisioning_state != "Succeeded": - logger.debug("Failed to provision: %s", depl_props) + logger.debug(f"Failed to provision: {depl_props}") raise RuntimeError( "Deploy of template to resource group" f" {resource_group} proceeded but the provisioning" f" state returned is {depl_props.provisioning_state}." "\nAborting" ) - logger.debug( - "Provisioning state of deployment %s : %s", - resource_group, - depl_props.provisioning_state, - ) + logger.debug(f"Provisioning state of deployment {resource_group} : {depl_props.provisioning_state}") return depl_props.outputs diff --git a/src/acrcssc/azext_acrcssc/helper/_ociartifactoperations.py b/src/acrcssc/azext_acrcssc/helper/_ociartifactoperations.py index b6b27f9101b..b0ec88a9372 100644 --- a/src/acrcssc/azext_acrcssc/helper/_ociartifactoperations.py +++ b/src/acrcssc/azext_acrcssc/helper/_ociartifactoperations.py @@ -27,7 +27,7 @@ def create_oci_artifact_continuous_patch(registry, cssc_config_file, dryrun): - logger.debug("Entering create_oci_artifact_continuouspatching with parameters: %s %s %s", registry.name, cssc_config_file, dryrun) + logger.debug(f"Entering create_oci_artifact_continuouspatching with parameters: {registry.name} {cssc_config_file} {dryrun}") try: oras_client = _oras_client(registry) @@ -63,7 +63,7 @@ def create_oci_artifact_continuous_patch(registry, cssc_config_file, dryrun): def delete_oci_artifact_continuous_patch(cmd, registry, dryrun): - logger.debug("Entering delete_oci_artifact_continuous_patch with parameters %s %s", registry, dryrun) + logger.debug(f"Entering delete_oci_artifact_continuous_patch with parameters {registry} {dryrun}") resourceid = parse_resource_id(registry.id) subscription = resourceid[SUBSCRIPTION] @@ -78,14 +78,13 @@ def delete_oci_artifact_continuous_patch(cmd, registry, dryrun): cmd=cmd, registry_name=registry.name, repository=f"{CSSC_WORKFLOW_POLICY_REPOSITORY}/{CONTINUOSPATCH_OCI_ARTIFACT_CONFIG}", - # image=f"{CSSC_WORKFLOW_POLICY_REPOSITORY}/{CONTINUOSPATCH_OCI_ARTIFACT_CONFIG}:{CONTINUOSPATCH_OCI_ARTIFACT_CONFIG_TAG_V1}", username=BEARER_TOKEN_USERNAME, password=token, yes=not dryrun) logger.debug("Call to acr_repository_delete completed successfully") except Exception as exception: - logger.debug("%s", exception) - logger.error("%s/%s:%s might not exist or attempt to delete failed.", CSSC_WORKFLOW_POLICY_REPOSITORY, CONTINUOSPATCH_OCI_ARTIFACT_CONFIG, CONTINUOSPATCH_OCI_ARTIFACT_CONFIG_TAG_V1) + logger.debug(exception) + logger.error(f"{CSSC_WORKFLOW_POLICY_REPOSITORY}/{CONTINUOSPATCH_OCI_ARTIFACT_CONFIG}:{CONTINUOSPATCH_OCI_ARTIFACT_CONFIG_TAG_V1} might not exist or attempt to delete failed.") raise @@ -98,14 +97,14 @@ def _oras_client(registry): client = OrasClient(hostname=str.lower(registry.login_server)) client.login(BEARER_TOKEN_USERNAME, token) except Exception as exception: - raise AzCLIError("Failed to login to Artifact Store ACR %s: %s " % registry.name, exception) + raise AzCLIError(f"Failed to login to Artifact Store ACR {registry.name}: {exception}") return client # Need to check on this method once, if there's alternative to this def _get_acr_token(registry_name, subscription): - logger.debug("Using CLI user credentials to log into %s", registry_name) + logger.debug(f"Using CLI user credentials to log into {registry_name}") acr_login_with_token_cmd = [ str(shutil.which("az")), "acr", "login", diff --git a/src/acrcssc/azext_acrcssc/helper/_taskoperations.py b/src/acrcssc/azext_acrcssc/helper/_taskoperations.py index 4ebb8a8ddc1..4ea1a6b3182 100644 --- a/src/acrcssc/azext_acrcssc/helper/_taskoperations.py +++ b/src/acrcssc/azext_acrcssc/helper/_taskoperations.py @@ -47,12 +47,12 @@ def create_update_continuous_patch_v1(cmd, registry, cssc_config_file, cadence, dryrun, defer_immediate_run, is_create_workflow=True): - logger.debug("Entering continuousPatchV1_creation %s %s %s", cssc_config_file, dryrun, defer_immediate_run) + logger.debug(f"Entering continuousPatchV1_creation {cssc_config_file} {dryrun} {defer_immediate_run}") resource_group = parse_resource_id(registry.id)[RESOURCE_GROUP] schedule_cron_expression = None if cadence is not None: schedule_cron_expression = convert_timespan_to_cron(cadence) - logger.debug("converted cadence to cron expression: %s", schedule_cron_expression) + logger.debug(f"converted cadence to cron expression: {schedule_cron_expression}") cssc_tasks_exists = check_continuous_task_exists(cmd, registry) if is_create_workflow: if cssc_tasks_exists: @@ -65,7 +65,7 @@ def create_update_continuous_patch_v1(cmd, registry, cssc_config_file, cadence, if cssc_config_file is not None: create_oci_artifact_continuous_patch(registry, cssc_config_file, dryrun) - logger.debug("Uploading of %s completed successfully.", cssc_config_file) + logger.debug(f"Uploading of {cssc_config_file} completed successfully.") _eval_trigger_run(cmd, registry, resource_group, defer_immediate_run) @@ -92,7 +92,7 @@ def _create_cssc_workflow(cmd, registry, schedule_cron_expression, resource_grou dry_run ) - logger.warning("Deployment of %s tasks completed successfully.", CONTINUOUS_PATCHING_WORKFLOW_NAME) + logger.warning(f"Deployment of {CONTINUOUS_PATCHING_WORKFLOW_NAME} tasks completed successfully.") def _update_cssc_workflow(cmd, registry, schedule_cron_expression, resource_group, dry_run): @@ -102,7 +102,7 @@ def _update_cssc_workflow(cmd, registry, schedule_cron_expression, resource_grou def _eval_trigger_run(cmd, registry, resource_group, defer_immediate_run): if not defer_immediate_run: - logger.warning('Triggering the %s to run immediately', CONTINUOSPATCH_TASK_SCANREGISTRY_NAME) + logger.warning(f'Triggering the {CONTINUOSPATCH_TASK_SCANREGISTRY_NAME} to run immediately') # Seen Managed Identity taking time, see if there can be an alternative (one alternative is to schedule the cron expression with delay) # NEED TO SKIP THE TIME.SLEEP IN UNIT TEST CASE OR FIND AN ALTERNATIVE SOLUITION TO MI COMPLETE time.sleep(30) @@ -115,24 +115,24 @@ def delete_continuous_patch_v1(cmd, registry, dryrun): cssc_config_exists = check_continuous_task_config_exists(cmd, registry) if not dryrun and (cssc_tasks_exists or cssc_config_exists): cssc_tasks = ', '.join(CONTINUOSPATCH_ALL_TASK_NAMES) - logger.warning("All of these tasks will be deleted: %s", cssc_tasks) + logger.warning(f"All of these tasks will be deleted: {cssc_tasks}") for taskname in CONTINUOSPATCH_ALL_TASK_NAMES: # bug: if one of the deletion fails, the others will not be attempted, we need to attempt to delete all of them _delete_task(cmd, registry, taskname, dryrun) - logger.warning("Task %s deleted.", taskname) + logger.warning(f"Task {taskname} deleted.") - logger.warning("Deleting %s/%s:%s", CSSC_WORKFLOW_POLICY_REPOSITORY, CONTINUOSPATCH_OCI_ARTIFACT_CONFIG, CONTINUOSPATCH_OCI_ARTIFACT_CONFIG_TAG_V1) + logger.warning(f"Deleting {CSSC_WORKFLOW_POLICY_REPOSITORY}/{CONTINUOSPATCH_OCI_ARTIFACT_CONFIG}:{CONTINUOSPATCH_OCI_ARTIFACT_CONFIG_TAG_V1}") delete_oci_artifact_continuous_patch(cmd, registry, dryrun) if not cssc_tasks_exists: - logger.warning("%s workflow does not exist", CONTINUOUS_PATCHING_WORKFLOW_NAME) + logger.warning(f"{CONTINUOUS_PATCHING_WORKFLOW_NAME} workflow does not exist") def list_continuous_patch_v1(cmd, registry): logger.debug("Entering list_continuous_patch_v1") if not check_continuous_task_exists(cmd, registry): - logger.warning("%s workflow task does not exist. Run 'az acr supply-chain workflow create' to create workflow tasks", CONTINUOUS_PATCHING_WORKFLOW_NAME) + logger.warning(f"{CONTINUOUS_PATCHING_WORKFLOW_NAME} workflow task does not exist. Run 'az acr supply-chain workflow create' to create workflow tasks") return acr_task_client = cf_acr_tasks(cmd.cli_ctx) @@ -143,7 +143,7 @@ def list_continuous_patch_v1(cmd, registry): def acr_cssc_dry_run(cmd, registry, config_file_path, is_create=True): - logger.debug("Entering acr_cssc_dry_run with parameters: %s %s", registry, config_file_path) + logger.debug(f"Entering acr_cssc_dry_run with parameters: {registry} {config_file_path}") if config_file_path is None: logger.error("--config parameter is needed to perform dry-run check.") @@ -195,7 +195,7 @@ def acr_cssc_dry_run(cmd, registry, config_file_path, is_create=True): registry_name=registry.name, run_request=request)) run_id = queued.run_id - logger.warning("Performing dry-run check for filter policy using acr task run id: %s", run_id) + logger.warning(f"Performing dry-run check for filter policy using acr task run id: {run_id}") return generate_logs(cmd, acr_run_client, run_id, registry.name, resource_group_name) finally: delete_temporary_dry_run_file(tmp_folder) @@ -228,7 +228,7 @@ def _create_encoded_task(task_file): def _update_task_schedule(cmd, registry, cron_expression, resource_group_name, dryrun): - logger.debug("converted cadence to cron_expression: %s", cron_expression) + logger.debug(f"converted cadence to cron_expression: {cron_expression}") acr_task_client = cf_acr_tasks(cmd.cli_ctx) taskUpdateParameters = acr_task_client.models.TaskUpdateParameters( trigger=acr_task_client.models.TriggerUpdateParameters( @@ -262,9 +262,9 @@ def _delete_task(cmd, registry, task_name, dryrun): _delete_task_role_assignment(cmd.cli_ctx, acr_tasks_client, registry, resource_group, task_name, dryrun) if dryrun: - logger.debug("Dry run, skipping deletion of the task: %s ", task_name) + logger.debug(f"Dry run, skipping deletion of the task: {task_name}") return None - logger.debug("Deleting task %s", task_name) + logger.debug(f"Deleting task {task_name}") LongRunningOperation(cmd.cli_ctx)( acr_tasks_client.begin_delete( resource_group, @@ -272,9 +272,9 @@ def _delete_task(cmd, registry, task_name, dryrun): task_name)) except Exception as exception: - raise AzCLIError("Failed to delete task %s from registry %s : %s" % task_name, registry.name, exception) + raise AzCLIError(f"Failed to delete task {task_name} from registry {registry.name} : {exception}") - logger.debug("Task %s deleted successfully", task_name) + logger.debug(f"Task {task_name} deleted successfully") def _delete_task_role_assignment(cli_ctx, acrtask_client, registry, resource_group, task_name, dryrun): @@ -284,10 +284,10 @@ def _delete_task_role_assignment(cli_ctx, acrtask_client, registry, resource_gro try: task = acrtask_client.get(resource_group, registry.name, task_name) except ResourceNotFoundError: - logger.debug("Task %s does not exist in registry %s", task_name, registry.name) + logger.debug(f"Task {task_name} does not exist in registry {registry.name}") logger.debug("Continuing with deletion") return None - + identity = task.identity if identity: @@ -298,9 +298,9 @@ def _delete_task_role_assignment(cli_ctx, acrtask_client, registry, resource_gro for role in assigned_roles: if dryrun: - logger.debug("Dry run, skipping deletion of role assignments, task: %s, role name: %s", task_name, role.name) + logger.debug(f"Dry run, skipping deletion of role assignments, task: {task_name}, role name: {role.name}") return None - logger.debug("Deleting role assignments of task %s from the registry", task_name) + logger.debug(f"Deleting role assignments of task {task_name} from the registry") role_client.role_assignments.delete( scope=registry.id, role_assignment_name=role.name @@ -413,7 +413,7 @@ def generate_logs(cmd, run_id=run_id) log_file_sas = response.log_link except (AttributeError, CloudError) as e: - logger.debug("%s Exception: %s", error_msg, e) + logger.debug(f"{error_msg} Exception: {e}") raise AzCLIError(error_msg) account_name, endpoint_suffix, container_name, blob_name, sas_token = get_blob_info( @@ -426,7 +426,7 @@ def generate_logs(cmd, while _evaluate_task_run_nonterminal_state(run_status): run_status = _get_run_status(client, resource_group_name, registry_name, run_id) if _evaluate_task_run_nonterminal_state(run_status): - logger.debug("Waiting for the task run to complete. Current status: %s", run_status) + logger.debug(f"Waiting for the task run to complete. Current status: {run_status}") time.sleep(2) _download_logs(AppendBlobService( diff --git a/src/acrcssc/azext_acrcssc/helper/_utility.py b/src/acrcssc/azext_acrcssc/helper/_utility.py index 27991d87695..d82f3a9b45b 100644 --- a/src/acrcssc/azext_acrcssc/helper/_utility.py +++ b/src/acrcssc/azext_acrcssc/helper/_utility.py @@ -51,7 +51,7 @@ def create_temporary_dry_run_file(file_location, tmp_folder): os.path.dirname( os.path.abspath(__file__)), "../templates/")) - logger.debug("templates_path: %s", templates_path) + logger.debug(f"templates_path: {templates_path}") os.makedirs(tmp_folder, exist_ok=True) file_template_copy = templates_path + "/" + TMP_DRY_RUN_FILE_NAME @@ -59,9 +59,9 @@ def create_temporary_dry_run_file(file_location, tmp_folder): shutil.copy2(file_template_copy, tmp_folder) shutil.copy2(file_location, tmp_folder) folder_contents = os.listdir(tmp_folder) - logger.debug("Copied dry run file %s", folder_contents) + logger.debug(f"Copied dry run file {folder_contents}") def delete_temporary_dry_run_file(tmp_folder): - logger.debug("Deleting contents and directory %s", tmp_folder) + logger.debug(f"Deleting contents and directory {tmp_folder}") shutil.rmtree(tmp_folder)