Skip to content
This repository has been archived by the owner on Oct 3, 2020. It is now read-only.

Commit

Permalink
use f-literals
Browse files Browse the repository at this point in the history
  • Loading branch information
hjacobs committed Apr 10, 2020
1 parent 3d04d91 commit 9eab2b2
Show file tree
Hide file tree
Showing 3 changed files with 22 additions and 95 deletions.
2 changes: 1 addition & 1 deletion kube_downscaler/main.py
Expand Up @@ -83,7 +83,7 @@ def run_loop(
deployment_time_annotation=deployment_time_annotation,
)
except Exception as e:
logger.exception("Failed to autoscale : %s", e)
logger.exception(f"Failed to autoscale: {e}")
if run_once or handler.shutdown_now:
return
with handler.safe_exit():
Expand Down
113 changes: 20 additions & 93 deletions kube_downscaler/scaler.py
Expand Up @@ -14,7 +14,6 @@
from kube_downscaler.helper import matches_time_spec
from kube_downscaler.resources.stack import Stack

logger = logging.getLogger(__name__)
ORIGINAL_REPLICAS_ANNOTATION = "downscaler/original-replicas"
FORCE_UPTIME_ANNOTATION = "downscaler/force-uptime"
UPSCALE_PERIOD_ANNOTATION = "downscaler/upscale-period"
Expand All @@ -34,6 +33,8 @@
"%Y-%m-%d",
]

logger = logging.getLogger(__name__)


def parse_time(timestamp: str) -> datetime.datetime:
for fmt in TIMESTAMP_FORMATS:
Expand Down Expand Up @@ -76,7 +77,7 @@ def pods_force_uptime(api, namespace: str):
if pod.obj.get("status", {}).get("phase") in ("Succeeded", "Failed"):
continue
if pod.annotations.get(FORCE_UPTIME_ANNOTATION, "").lower() == "true":
logger.info("Forced uptime because of %s/%s", pod.namespace, pod.name)
logger.info(f"Forced uptime because of {pod.namespace}/{pod.name}")
return True
return False

Expand Down Expand Up @@ -123,36 +124,20 @@ def get_replicas(
if resource.kind == "CronJob":
suspended = resource.obj["spec"]["suspend"]
replicas = 0 if suspended else 1
state = "suspended" if suspended else "not suspended"
original_state = "suspended" if original_replicas == 0 else "not suspended"
logger.debug(
"%s %s/%s is %s (original: %s, uptime: %s)",
resource.kind,
resource.namespace,
resource.name,
"suspended" if suspended else "not suspended",
"suspended" if original_replicas == 0 else "not suspended",
uptime,
f"{resource.kind} {resource.namespace}/{resource.name} is {state} (original: {original_state}, uptime: {uptime})"
)
elif resource.kind == "HorizontalPodAutoscaler":
replicas = resource.obj["spec"]["minReplicas"]
logger.debug(
"%s %s/%s has %s minReplicas (original: %s, uptime: %s)",
resource.kind,
resource.namespace,
resource.name,
replicas,
original_replicas,
uptime,
f"{resource.kind} {resource.namespace}/{resource.name} has {replicas} minReplicas (original: {original_replicas}, uptime: {uptime})"
)
else:
replicas = resource.replicas
logger.debug(
"%s %s/%s has %s replicas (original: %s, uptime: %s)",
resource.kind,
resource.namespace,
resource.name,
replicas,
original_replicas,
uptime,
f"{resource.kind} {resource.namespace}/{resource.name} has {replicas} replicas (original: {original_replicas}, uptime: {uptime})"
)
return replicas

Expand All @@ -168,36 +153,17 @@ def scale_up(
resource.obj["spec"]["suspend"] = False
resource.obj["spec"]["startingDeadlineSeconds"] = 0
logger.info(
"Unsuspending %s %s/%s (uptime: %s, downtime: %s)",
resource.kind,
resource.namespace,
resource.name,
uptime,
downtime,
f"Unsuspending {resource.kind} {resource.namespace}/{resource.name} (uptime: {uptime}, downtime: {downtime})"
)
elif resource.kind == "HorizontalPodAutoscaler":
resource.obj["spec"]["minReplicas"] = original_replicas
logger.info(
"Scaling up %s %s/%s from %s to %s minReplicas (uptime: %s, downtime: %s)",
resource.kind,
resource.namespace,
resource.name,
replicas,
original_replicas,
uptime,
downtime,
f"Scaling up {resource.kind} {resource.namespace}/{resource.name} from {replicas} to {original_replicas} minReplicas (uptime: {uptime}, downtime: {downtime})"
)
else:
resource.replicas = original_replicas
logger.info(
"Scaling up %s %s/%s from %s to %s replicas (uptime: %s, downtime: %s)",
resource.kind,
resource.namespace,
resource.name,
replicas,
original_replicas,
uptime,
downtime,
f"Scaling up {resource.kind} {resource.namespace}/{resource.name} from {replicas} to {original_replicas} replicas (uptime: {uptime}, downtime: {downtime})"
)
resource.annotations[ORIGINAL_REPLICAS_ANNOTATION] = None

Expand All @@ -209,36 +175,17 @@ def scale_down(
if resource.kind == "CronJob":
resource.obj["spec"]["suspend"] = True
logger.info(
"Suspending %s %s/%s (uptime: %s, downtime: %s)",
resource.kind,
resource.namespace,
resource.name,
uptime,
downtime,
f"Suspending {resource.kind} {resource.namespace}/{resource.name} (uptime: {uptime}, downtime: {downtime})"
)
elif resource.kind == "HorizontalPodAutoscaler":
resource.obj["spec"]["minReplicas"] = target_replicas
logger.info(
"Scaling down %s %s/%s from %s to %s minReplicas (uptime: %s, downtime: %s)",
resource.kind,
resource.namespace,
resource.name,
replicas,
target_replicas,
uptime,
downtime,
f"Scaling down {resource.kind} {resource.namespace}/{resource.name} from {replicas} to {target_replicas} minReplicas (uptime: {uptime}, downtime: {downtime})"
)
else:
resource.replicas = target_replicas
logger.info(
"Scaling down %s %s/%s from %s to %s replicas (uptime: %s, downtime: %s)",
resource.kind,
resource.namespace,
resource.name,
replicas,
target_replicas,
uptime,
downtime,
f"Scaling down {resource.kind} {resource.namespace}/{resource.name} from {replicas} to {target_replicas} replicas (uptime: {uptime}, downtime: {downtime})"
)
resource.annotations[ORIGINAL_REPLICAS_ANNOTATION] = str(replicas)

Expand Down Expand Up @@ -284,10 +231,7 @@ def autoscale_resource(

if exclude and not original_replicas:
logger.debug(
"%s %s/%s was excluded",
resource.kind,
resource.namespace,
resource.name,
f"{resource.kind} {resource.namespace}/{resource.name} was excluded"
)
else:
ignore = False
Expand Down Expand Up @@ -316,11 +260,7 @@ def autoscale_resource(
else:
ignore = True
logger.debug(
"Periods checked: upscale=%s, downscale=%s, ignore=%s, is_uptime=%s",
upscale_period,
downscale_period,
ignore,
is_uptime,
f"Periods checked: upscale={upscale_period}, downscale={downscale_period}, ignore={ignore}, is_uptime={is_uptime}"
)
else:
uptime = resource.annotations.get(UPTIME_ANNOTATION, default_uptime)
Expand Down Expand Up @@ -354,32 +294,21 @@ def autoscale_resource(
resource, grace_period, now, deployment_time_annotation
):
logger.info(
"%s %s/%s within grace period (%ds), not scaling down (yet)",
resource.kind,
resource.namespace,
resource.name,
grace_period,
f"{resource.kind} {resource.namespace}/{resource.name} within grace period ({grace_period}s), not scaling down (yet)"
)
else:
scale_down(resource, replicas, downtime_replicas, uptime, downtime)
update_needed = True
if update_needed:
if dry_run:
logger.info(
"**DRY-RUN**: would update %s %s/%s",
resource.kind,
resource.namespace,
resource.name,
f"**DRY-RUN**: would update {resource.kind} {resource.namespace}/{resource.name}"
)
else:
resource.update()
except Exception as e:
logger.exception(
"Failed to process %s %s/%s : %s",
resource.kind,
resource.namespace,
resource.name,
str(e),
f"Failed to process {resource.kind} {resource.namespace}/{resource.name}: {e}"
)


Expand All @@ -403,9 +332,7 @@ def autoscale_resources(
for resource in kind.objects(api, namespace=(namespace or pykube.all)):
if resource.namespace in exclude_namespaces or resource.name in exclude_names:
logger.debug(
"Resource %s was excluded (either resource itself or namespace %s are excluded)",
resource.name,
namespace,
f"Resource {resource.name} was excluded (either resource itself or namespace {resource.namespace} are excluded)"
)
continue

Expand Down
2 changes: 1 addition & 1 deletion tests/test_autoscale_resource.py
Expand Up @@ -43,7 +43,7 @@ def test_swallow_exception(resource, caplog):
assert resource.replicas == 1
resource.update.assert_not_called()
# check that the failure was logged
msg = "Failed to process MockResource mock/res-1 : time data 'invalid-timestamp!' does not match any format (%Y-%m-%dT%H:%M:%SZ, %Y-%m-%dT%H:%M, %Y-%m-%d %H:%M, %Y-%m-%d)"
msg = "Failed to process MockResource mock/res-1: time data 'invalid-timestamp!' does not match any format (%Y-%m-%dT%H:%M:%SZ, %Y-%m-%dT%H:%M, %Y-%m-%d %H:%M, %Y-%m-%d)"
assert caplog.record_tuples == [("kube_downscaler.scaler", logging.ERROR, msg)]


Expand Down

0 comments on commit 9eab2b2

Please sign in to comment.