Skip to content
This repository has been archived by the owner on May 6, 2020. It is now read-only.

Commit

Permalink
fix(scheduler): only get logs for run pods when possible
Browse files Browse the repository at this point in the history
Fixes #897
  • Loading branch information
helgi committed Jul 25, 2016
1 parent f5b5fdc commit cf16d56
Showing 1 changed file with 11 additions and 4 deletions.
15 changes: 11 additions & 4 deletions rootfs/scheduler/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -525,6 +525,12 @@ def run(self, namespace, name, image, entrypoint, command, **kwargs):
if waited == timeout:
raise KubeException('Timed out (20 mins) while running')

# check if it is possible to get logs
state = self.pod_state(self.get_pod(namespace, name).json())
# States below up do not have logs
if state < PodState.up:
return exit_code, 'Could not get logs. Pod is in state {}'.format(str(state))

# grab log information
log = self._pod_log(namespace, name)
log.encoding = 'utf-8' # defaults to "ISO-8859-1" otherwise...
Expand Down Expand Up @@ -698,8 +704,9 @@ def pod_state(self, pod):
'Unknown': PodState.error,
}

# being in a Pending state can mean different things, introspecting app container first
if pod['status']['phase'] == 'Pending':
# being in a Pending/ContainerCreating state can mean different things
# introspecting app container first
if pod['status']['phase'] in ['Pending', 'ContainerCreating']:
pod_state, _ = self._pod_pending_status(pod)
# being in a running state can mean a pod is starting, actually running or terminating
elif pod['status']['phase'] == 'Running':
Expand Down Expand Up @@ -942,7 +949,7 @@ def _wait_until_pods_are_ready(self, namespace, containers, labels, desired, tim
pods = self.get_pods(namespace, labels=labels).json()
for pod in pods['items']:
# Get more information on why a pod is pending
if pod['status']['phase'] == 'Pending':
if pod['status']['phase'] in ['Pending', 'ContainerCreating']:
reason, message = self._pod_pending_status(pod)
# If pulling an image is taking long then increase the timeout
timeout += self._handle_pod_long_image_pulling(pod, reason)
Expand Down Expand Up @@ -1723,7 +1730,7 @@ def _wait_until_deployment_is_ready(self, namespace, name, **kwargs):
pods = self.get_pods(namespace, labels=labels).json()
for pod in pods['items']:
# Get more information on why a pod is pending
if pod['status']['phase'] == 'Pending':
if pod['status']['phase'] in ['Pending', 'ContainerCreating']:
reason, message = self._pod_pending_status(pod)
# If pulling an image is taking long then increase the timeout
timeout += self._handle_pod_long_image_pulling(pod, reason)
Expand Down

0 comments on commit cf16d56

Please sign in to comment.