diff --git a/lib/kubernetes-deploy/kubernetes_resource/deployment.rb b/lib/kubernetes-deploy/kubernetes_resource/deployment.rb index 54be13008..46e03b6e9 100644 --- a/lib/kubernetes-deploy/kubernetes_resource/deployment.rb +++ b/lib/kubernetes-deploy/kubernetes_resource/deployment.rb @@ -8,15 +8,13 @@ def sync @found = st.success? if @found - deployment_data = JSON.parse(raw_json) - @desired_replicas = deployment_data["spec"]["replicas"].to_i - @latest_rs = find_latest_rs(deployment_data) - - @rollout_data = { "replicas" => 0 }.merge(deployment_data["status"] + @deployment_data = JSON.parse(raw_json) + @desired_replicas = @deployment_data["spec"]["replicas"].to_i + @latest_rs = find_latest_rs + @rollout_data = { "replicas" => 0 }.merge(@deployment_data["status"] .slice("replicas", "updatedReplicas", "availableReplicas", "unavailableReplicas")) @status = @rollout_data.map { |state_replicas, num| "#{num} #{state_replicas.chop.pluralize(num)}" }.join(", ") - - conditions = deployment_data.fetch("status", {}).fetch("conditions", []) + conditions = @deployment_data.fetch("status", {}).fetch("conditions", []) @progress_condition = conditions.find { |condition| condition['type'] == 'Progressing' } @progress_deadline = deployment_data['spec']['progressDeadlineSeconds'] else # reset @@ -43,10 +41,19 @@ def fetch_logs def deploy_succeeded? return false unless @latest_rs.present? - @latest_rs.deploy_succeeded? && - @latest_rs.desired_replicas == @desired_replicas && # latest RS fully scaled up - @rollout_data["updatedReplicas"].to_i == @desired_replicas && - @rollout_data["updatedReplicas"].to_i == @rollout_data["availableReplicas"].to_i + if min_required_rollout.blank? + @latest_rs.deploy_succeeded? && + @latest_rs.desired_replicas == @desired_replicas && # latest RS fully scaled up + @rollout_data["updatedReplicas"].to_i == @desired_replicas && + @rollout_data["updatedReplicas"].to_i == @rollout_data["availableReplicas"].to_i + else + minimum_needed = minimum_updated_replicas_to_succeeded + + running_rs.size <= 2 && + @latest_rs.desired_replicas > minimum_needed && + @rollout_data["updatedReplicas"].to_i > minimum_needed && + @rollout_data["availableReplicas"].to_i > minimum_needed + end end def deploy_failed? @@ -98,18 +105,44 @@ def deploy_failing_to_progress? Time.parse(@progress_condition["lastUpdateTime"]).to_i >= (@deploy_started_at - 5.seconds).to_i end - def find_latest_rs(deployment_data) - label_string = deployment_data["spec"]["selector"]["matchLabels"].map { |k, v| "#{k}=#{v}" }.join(",") + def all_rs_data(matchLabels) + label_string = matchLabels.map { |k, v| "#{k}=#{v}" }.join(",") raw_json, _err, st = kubectl.run("get", "replicasets", "--output=json", "--selector=#{label_string}") - return unless st.success? + return {} unless st.success? - all_rs_data = JSON.parse(raw_json)["items"] - current_revision = deployment_data["metadata"]["annotations"]["deployment.kubernetes.io/revision"] + JSON.parse(raw_json)["items"] + end - latest_rs_data = all_rs_data.find do |rs| - rs["metadata"]["ownerReferences"].any? { |ref| ref["uid"] == deployment_data["metadata"]["uid"] } && + def running_rs + all_rs_data(@deployment_data["spec"]["selector"]["matchLabels"]).select do |rs| + rs["status"]["replicas"].to_i > 0 + end + end + + def min_required_rollout + @deployment_data['metadata']['annotations']['kubernetes-deploy.shopify.io/min-required-rollout'] + end + + def minimum_updated_replicas_to_succeeded + desired = @desired_replicas + + if min_required_rollout =~ /%/ + desired *= (100 - min_required_rollout.to_i) / 100.0 + else + desired -= min_required_rollout.to_i + end + + desired.to_i + end + + def find_latest_rs + current_revision = @deployment_data["metadata"]["annotations"]["deployment.kubernetes.io/revision"] + + latest_rs_data = all_rs_data(@deployment_data["spec"]["selector"]["matchLabels"]).find do |rs| + rs["metadata"]["ownerReferences"].any? { |ref| ref["uid"] == @deployment_data["metadata"]["uid"] } && rs["metadata"]["annotations"]["deployment.kubernetes.io/revision"] == current_revision end + return unless latest_rs_data.present? rs = ReplicaSet.new( diff --git a/lib/kubernetes-deploy/kubernetes_resource/pod.rb b/lib/kubernetes-deploy/kubernetes_resource/pod.rb index 7d924ed67..fd406b3d5 100644 --- a/lib/kubernetes-deploy/kubernetes_resource/pod.rb +++ b/lib/kubernetes-deploy/kubernetes_resource/pod.rb @@ -3,11 +3,9 @@ module KubernetesDeploy class Pod < KubernetesResource TIMEOUT = 10.minutes - FAILED_PHASE_NAME = "Failed" - - def initialize(namespace:, context:, definition:, logger:, parent: nil, deploy_started_at: nil) + def initialize(namespace:, context:, definition:, logger:, parent: nil, deploy_started: nil) @parent = parent - @deploy_started_at = deploy_started_at + @deploy_started = deploy_started @containers = definition.fetch("spec", {}).fetch("containers", []).map { |c| Container.new(c) } unless @containers.present? logger.summary.add_paragraph("Rendered template content:\n#{definition.to_yaml}") @@ -21,7 +19,7 @@ def sync(pod_data = nil) if pod_data.blank? raw_json, _err, st = kubectl.run("get", type, @name, "-a", "--output=json") pod_data = JSON.parse(raw_json) if st.success? - raise_predates_deploy_error if pod_data.present? && unmanaged? && !deploy_started? + raise_predates_deploy_error if pod_data.present? && unmanaged? && !@deploy_started end if pod_data.present? @@ -48,7 +46,8 @@ def deploy_succeeded? end def deploy_failed? - failure_message.present? + return true if @phase == "Failed" + @containers.any?(&:doomed?) end def exists? @@ -63,23 +62,19 @@ def timeout_message end def failure_message - if @phase == FAILED_PHASE_NAME - phase_problem = "Pod status: #{@status}. " + doomed_containers = @containers.select(&:doomed?) + return unless doomed_containers.present? + container_messages = doomed_containers.map do |c| + red_name = ColorizedString.new(c.name).red + "> #{red_name}: #{c.doom_reason}" end - doomed_containers = @containers.select(&:doomed?) - if doomed_containers.present? - container_problems = if unmanaged? - "The following containers encountered errors:\n" - else - "The following containers are in a state that is unlikely to be recoverable:\n" - end - doomed_containers.each do |c| - red_name = ColorizedString.new(c.name).red - container_problems += "> #{red_name}: #{c.doom_reason}\n" - end + intro = if unmanaged? + "The following containers encountered errors:" + else + "The following containers are in a state that is unlikely to be recoverable:" end - "#{phase_problem}#{container_problems}".presence + intro + "\n" + container_messages.join("\n") + "\n" end # Returns a hash in the following format: @@ -94,7 +89,7 @@ def fetch_logs "logs", @name, "--container=#{container.name}", - "--since-time=#{@deploy_started_at.to_datetime.rfc3339}", + "--since-time=#{@deploy_started.to_datetime.rfc3339}", ] cmd << "--tail=#{LOG_LINE_COUNT}" unless unmanaged? out, _err, _st = kubectl.run(*cmd) diff --git a/test/integration/kubernetes_deploy_test.rb b/test/integration/kubernetes_deploy_test.rb index b41528368..47f9a3ba8 100644 --- a/test/integration/kubernetes_deploy_test.rb +++ b/test/integration/kubernetes_deploy_test.rb @@ -635,6 +635,22 @@ def test_can_deploy_deployment_with_zero_replicas ]) end + def test_can_deploy_deployment_with_partial_rollout_success + result = deploy_fixtures("hello-cloud", subset: ["configmap-data.yml", "web.yml.erb"], partial_rollout_success: 2) do |fixtures| + web = fixtures["web.yml.erb"]["Deployment"].first + web["spec"]["replicas"] = 4 + end + assert_deploy_success(result) + + pods = kubeclient.get_pods(namespace: @namespace) + assert_equal 2, pods.length, "Pods were running from zero-replica deployment" + + assert_logs_match_all([ + %r{Service/web\s+Selects 2 pods}, + %r{Deployment/web\s+4 replicas} + ]) + end + def test_deploy_aborts_immediately_if_metadata_name_missing result = deploy_fixtures("hello-cloud", subset: ["configmap-data.yml"]) do |fixtures| definition = fixtures["configmap-data.yml"]["ConfigMap"].first