Skip to content

Commit

Permalink
Refactor to use -P
Browse files Browse the repository at this point in the history
This is a major refactoring to use the -P option of docker
which should simplify some of our deployment like running docker containers
on another host. In support of #372
  • Loading branch information
hexylena committed Sep 23, 2015
1 parent 56ac2c0 commit 4845789
Show file tree
Hide file tree
Showing 3 changed files with 86 additions and 32 deletions.
Expand Up @@ -33,18 +33,16 @@ if hda.datatype.__class__.__name__ != "Ipynb":
else:
shutil.copy( hda.file_name, empty_nb_path )
# Add all environment variables collected from Galaxy's IE infrastructure
ie_request.launch(env_override={
'notebook_password': PASSWORD,
})
## General IE specific
# Access URLs for the notebook from within galaxy.
notebook_access_url = ie_request.url_template('${PROXY_URL}/ipython/${PORT}/notebooks/ipython_galaxy_notebook.ipynb')
notebook_login_url = ie_request.url_template('${PROXY_URL}/ipython/${PORT}/login?next=%2Fipython%2F${PORT}%2Ftree')
# Add all environment variables collected from Galaxy's IE infrastructure
ie_request.launch(env_override={
'notebook_password': PASSWORD,
})
%>
<html>
<head>
Expand Down
Expand Up @@ -12,14 +12,6 @@ temp_dir = ie_request.temp_dir
PASSWORD = ie_request.notebook_pw
USERNAME = "galaxy"
## General IE specific
# Access URLs for the notebook from within galaxy.
# TODO: Make this work without pointing directly to IE. Currently does not work
# through proxy.
notebook_pubkey_url = ie_request.url_template('${PROXY_URL}/rstudio/${PORT}/auth-public-key')
notebook_access_url = ie_request.url_template('${PROXY_URL}/rstudio/${PORT}/')
notebook_login_url = ie_request.url_template('${PROXY_URL}/rstudio/${PORT}/auth-do-sign-in')
# Did the user give us an RData file?
if hda.datatype.__class__.__name__ == "RData":
shutil.copy( hda.file_name, os.path.join(temp_dir, '.RData') )
Expand All @@ -29,6 +21,15 @@ ie_request.launch(env_override={
'notebook_password': PASSWORD,
'cors_origin': ie_request.attr.proxy_url,
})
## General IE specific
# Access URLs for the notebook from within galaxy.
# TODO: Make this work without pointing directly to IE. Currently does not work
# through proxy.
notebook_pubkey_url = ie_request.url_template('${PROXY_URL}/rstudio/${PORT}/auth-public-key')
notebook_access_url = ie_request.url_template('${PROXY_URL}/rstudio/${PORT}/')
notebook_login_url = ie_request.url_template('${PROXY_URL}/rstudio/${PORT}/auth-do-sign-in')
%>
<html>
<head>
Expand Down
91 changes: 73 additions & 18 deletions lib/galaxy/web/base/interactive_environments.py
@@ -1,10 +1,11 @@
import ConfigParser

import os
import json
import stat
import random
import tempfile
from subprocess import Popen, PIPE
from subprocess import Popen, PIPE, check_output

from galaxy.util.bunch import Bunch
from galaxy import web
Expand Down Expand Up @@ -40,11 +41,6 @@ def __init__(self, trans, plugin):

self.load_deploy_config()
self.attr.docker_hostname = self.attr.viz_config.get("docker", "docker_hostname")
self.attr.proxy_request = trans.app.proxy_manager.setup_proxy(
trans, host=self.attr.docker_hostname
)
self.attr.proxy_url = self.attr.proxy_request[ 'proxy_url' ]
self.attr.PORT = self.attr.proxy_request[ 'proxied_port' ]

# Generate per-request passwords the IE plugin can use to configure
# the destination container.
Expand Down Expand Up @@ -102,7 +98,7 @@ def get_conf_dict(self):
'history_id': self.attr.history_id,
'api_key': api_key,
'remote_host': request.remote_addr,
'docker_port': self.attr.PORT,
# DOCKER_PORT is NO LONGER AVAILABLE. All IEs must update.
'cors_origin': request.host_url,
}

Expand Down Expand Up @@ -144,7 +140,6 @@ def url_template(self, url_template):
There are several variables accessible to the user:
- ${PROXY_URL} will be replaced with dynamically create proxy
- ${PORT} will be replaced with the port the docker image is attached to
"""
# Figure out our substitutions

Expand All @@ -156,18 +151,12 @@ def url_template(self, url_template):
else:
protocol = 'http'

if not self.attr.APACHE_URLS:
# If they are not using apache URLs, that implies there's a port attached to the host
# string, thus we replace just the first instance of host that we see.
url_template = url_template.replace('${HOST}', '${HOST}:${PORT}', 1)

url_template = url_template.replace('${PROTO}', protocol) \
.replace('${HOST}', self.attr.HOST)

# Only the following replacements are used with Galaxy dynamic proxy
# URLs
url = url_template.replace('${PROXY_URL}', str(self.attr.proxy_url)) \
.replace('${PORT}', str(self.attr.PORT))
url = url_template.replace('${PROXY_URL}', str(self.attr.proxy_url))
return url

def volume(self, host_path, container_path, **kwds):
Expand All @@ -188,13 +177,11 @@ def docker_cmd(self, env_override={}, volumes=[]):
# Then we format in the entire docker command in place of
# {docker_args}, so as to let the admin not worry about which args are
# getting passed
command = command.format(docker_args='{command_inject} {environment} -d -p {port_ext}:{port_int} -v "{temp_dir}:/import/" {volume_str} {image}')
command = command.format(docker_args='{command_inject} {environment} -d -P -v "{temp_dir}:/import/" {volume_str} {image}')
# Once that's available, we format again with all of our arguments
command = command.format(
command_inject=self.attr.viz_config.get("docker", "command_inject"),
environment=env_str,
port_ext=self.attr.PORT,
port_int=self.attr.docker_port,
temp_dir=temp_dir,
volume_str=volume_str,
image=self.attr.viz_config.get("docker", "image")
Expand All @@ -212,5 +199,73 @@ def launch(self, raw_cmd=None, env_override={}, volumes=[]):
stdout, stderr = p.communicate()
if p.returncode != 0 or len(stderr):
log.error( "%s\n%s" % (stdout, stderr) )
return None
else:
log.debug( "Container id: %s" % stdout)
port_mappings = self.get_proxied_ports(stdout)
if len(port_mappings) > 1:
log.warning("Don't know how to handle proxies to containers with multiple exposed ports. Arbitrarily choosing first")
elif len(port_mappings) == 0:
log.warning("No exposed ports to map! Images MUST EXPOSE")
return None
# Fetch the first port_mapping
(service, host_ip, host_port) = port_mappings[0]

# Now we configure our proxy_requst object and we manually specify
# the port to map to and ensure the proxy is available.
self.attr.proxy_request = self.trans.app.proxy_manager.setup_proxy(
self.trans,
host=self.attr.docker_hostname,
port=host_port,
)
# These variables then become available for use in templating URLs
self.attr.proxy_url = self.attr.proxy_request[ 'proxy_url' ]
# Commented out because it needs to be documented and visible that
# this variable was moved here. Usually would remove commented
# code, but again, needs to be clear where this went. Remove at a
# later time.
#
# PORT is no longer exposed internally. All requests are forced to
# go through the proxy we ship.
# self.attr.PORT = self.attr.proxy_request[ 'proxied_port' ]

def get_proxied_ports(self, container_id):
"""Run docker inspect on a container to figure out which ports were
mapped where.
:type container_id: str
:param container_id: a docker container ID
:returns: a list of triples containing (internal_port, external_ip,
external_port), of which the ports are probably the only
useful information.
Someday code that calls this should be refactored whenever we get
containers with multiple ports working.
"""
command = self.attr.viz_config.get("docker", "command")
command.replace(
"run {docker_args}",
"inspect %s" % container_id
)
output = check_output(command)
inspect_data = json.loads(output)
# [{
# "NetworkSettings" : {
# "Ports" : {
# "3306/tcp" : [
# {
# "HostIp" : "127.0.0.1",
# "HostPort" : "3306"
# }
# ]
mappings = []
port_mappings = inspect_data[0]['NetworkSettings']['Ports']
for port_name in port_mappings:
for binding in port_mappings[port_name]:
mappings.append((
port_name.replace('/tcp', '').replace('/udp', ''),
port_mappings[port_name][binding]['HostIp'],
port_mappings[port_name][binding]['HostPort']
))
return mappings

0 comments on commit 4845789

Please sign in to comment.