Skip to content

Commit

Permalink
Drop support for single-nova-consoleauth
Browse files Browse the repository at this point in the history
Remove support for single-nova-consoleauth operation; this option
managed a single instance of the nova-consoleauth process across
a cluster nova-cloud-controller application using the hacluster
charm.  This proves somewhat racey on deployment as the ocf resource
deep checks the operation of nova-consoleauth including connectivity
to AMQP etc..  If the clustering of the service occurs before
other principle relations have been completed, the resource will
fail to start and the hook execution will spin, never returning.

HA deployments should always use memcached to share tokens between
instances of the nova-consolauth daemon; If the 'ha' relation is
detected, then ensure that a memcache relation is then required
for charm operation.

To support evaluation of the memcache relation completeness
the memcache specific code in InstanceConsoleContext was split out
into a new memcache specific class RemoteMemcacheContext.

Existing pacemaker resources will be deleted on upgrade; units will
move into a blocked state until a relation is added to memcached.

The nova-consoleauth service is resumed on upgrade to ensure that
instances run on all nova-cloud-controller units.

Change-Id: I2ac91b2bd92269b761befeb7563ad01cc5431151
Closes-Bug: 1781620
  • Loading branch information
javacruft committed Mar 6, 2019
1 parent dd3fdda commit b6e3140
Show file tree
Hide file tree
Showing 7 changed files with 42 additions and 274 deletions.
11 changes: 0 additions & 11 deletions config.yaml
Expand Up @@ -235,17 +235,6 @@ options:
.
Increasing this value will increase instance density on compute nodes
with an increased risk of hypervisor storage becoming full.
single-nova-consoleauth:
type: boolean
default: true
description: |
When this configuration is set to True, a single instance of
nova-consoleauth service will be running, this allows users to always
authenticate against the same instance and avoid authentications issues
when the token used was stored in a different instance.
.
If memcached is being used to store the tokens, then it's recommended to
change this configuration to False.
action-managed-upgrade:
type: boolean
default: False
Expand Down
19 changes: 14 additions & 5 deletions hooks/nova_cc_context.py
Expand Up @@ -387,14 +387,13 @@ def __call__(self):
return ctxt


class InstanceConsoleContext(ch_context.OSContextGenerator):
interfaces = []
class RemoteMemcacheContext(ch_context.OSContextGenerator):
interfaces = ['memcache']

def __call__(self):
ctxt = {}
servers = []
try:
for rid in hookenv.relation_ids('memcache'):
for rid in hookenv.relation_ids(self.interfaces[0]):
for rel in hookenv.relations_for_id(rid):
priv_addr = rel['private-address']
# Format it as IPv6 address if needed
Expand All @@ -406,8 +405,18 @@ def __call__(self):
level='WARNING')
servers = []

ctxt['memcached_servers'] = ','.join(servers)
if servers:
return {
'memcached_servers': ','.join(servers)
}
return {}


class InstanceConsoleContext(ch_context.OSContextGenerator):
interfaces = []

def __call__(self):
ctxt = {}
# Configure nova-novncproxy https if nova-api is using https.
if ch_cluster.https():
cn = ch_ip.resolve_address(endpoint_type=ch_ip.INTERNAL)
Expand Down
83 changes: 6 additions & 77 deletions hooks/nova_cc_hooks.py
Expand Up @@ -63,9 +63,6 @@ def _add_path(path):
# Note that CONFIGS is now set up via resolve_CONFIGS so that it is not a
# module load time constraint.
CONFIGS = None
COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips'
AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth'
AGENT_CA_PARAMS = 'op monitor interval="5s"'


def deferred_config(k):
Expand Down Expand Up @@ -284,7 +281,6 @@ def config_changed():
for unit in hookenv.related_units(rid):
compute_changed(rid, unit)

update_nova_consoleauth_config()
ncc_utils.update_aws_compat_services()

if hookenv.config('vendor-data'):
Expand All @@ -293,6 +289,8 @@ def config_changed():
ncc_utils.set_shared_metadatasecret()
for rid in hookenv.relation_ids('ha'):
ha_joined(rid)
if not ch_utils.is_unit_paused_set():
ch_host.service_resume('nova-consoleauth')


@hooks.hook('amqp-relation-joined')
Expand Down Expand Up @@ -781,15 +779,10 @@ def cluster_changed():
@hooks.hook('ha-relation-joined')
def ha_joined(relation_id=None):
ha_console_settings = {}
if not hookenv.config('dns-ha'):
if (hookenv.config('single-nova-consoleauth') and
common.console_attributes('protocol')):
ha_console_settings = {
'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH},
'init_services': {'res_nova_consoleauth': 'nova-consoleauth'},
'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH},
'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS}}

ha_console_settings['delete_resources'] = [
'vip_consoleauth',
'res_nova_consoleauth'
]
settings = ch_ha_utils.generate_ha_relation_data(
'nova',
extra_settings=ha_console_settings)
Expand All @@ -811,8 +804,6 @@ def ha_changed():
for rid in hookenv.relation_ids('identity-service'):
identity_joined(rid=rid)

update_nova_consoleauth_config()


@hooks.hook('shared-db-relation-broken')
@ncc_utils.service_guard(ncc_utils.guard_map, resolve_CONFIGS,
Expand Down Expand Up @@ -918,7 +909,6 @@ def upgrade_charm():
leader_init_db_if_ready_allowed_units()

update_nrpe_config()
update_nova_consoleauth_config()


@hooks.hook('neutron-api-relation-joined')
Expand Down Expand Up @@ -999,67 +989,6 @@ def zeromq_configuration_relation_changed():
CONFIGS.write(ncc_utils.NOVA_CONF)


def update_nova_consoleauth_config():
"""
Configure nova-consoleauth pacemaker resources
"""
relids = hookenv.relation_ids('ha')
if len(relids) == 0:
hookenv.log('Related to {} ha services'.format(len(relids)),
level=hookenv.DEBUG)
ha_relid = None
data = {}
else:
ha_relid = relids[0]
data = hookenv.relation_get(rid=ha_relid) or {}

# initialize keys in case this is a new dict
data.setdefault('delete_resources', [])
for k in ['colocations', 'init_services', 'resources', 'resource_params']:
data.setdefault(k, {})

if (hookenv.config('single-nova-consoleauth') and
common.console_attributes('protocol')):
for item in ['vip_consoleauth', 'res_nova_consoleauth']:
try:
data['delete_resources'].remove(item)
except ValueError:
pass # nothing to remove, we are good

# the new pcmkr resources have to be added to the existing ones
data['colocations']['vip_consoleauth'] = COLO_CONSOLEAUTH
data['init_services']['res_nova_consoleauth'] = 'nova-consoleauth'
data['resources']['res_nova_consoleauth'] = AGENT_CONSOLEAUTH
data['resource_params']['res_nova_consoleauth'] = AGENT_CA_PARAMS

for rid in hookenv.relation_ids('ha'):
hookenv.relation_set(rid, **data)

# nova-consoleauth will be managed by pacemaker, so stop it
# and prevent it to be started again at boot. (LP: #1693629).
if hookenv.relation_ids('ha'):
ch_host.service_pause('nova-consoleauth')

elif (not hookenv.config('single-nova-consoleauth') and
common.console_attributes('protocol')):
for item in ['vip_consoleauth', 'res_nova_consoleauth']:
if item not in data['delete_resources']:
data['delete_resources'].append(item)

# remove them from the rel, so they aren't recreated when the hook
# is recreated
data['colocations'].pop('vip_consoleauth', None)
data['init_services'].pop('res_nova_consoleauth', None)
data['resources'].pop('res_nova_consoleauth', None)
data['resource_params'].pop('res_nova_consoleauth', None)

for rid in hookenv.relation_ids('ha'):
hookenv.relation_set(rid, **data)

if not ch_utils.is_unit_paused_set():
ch_host.service_resume('nova-consoleauth')


def nova_api_relation_joined(rid=None):
rel_data = {
'nova-api-ready': 'yes' if ncc_utils.is_api_ready(CONFIGS) else 'no'
Expand Down
11 changes: 3 additions & 8 deletions hooks/nova_cc_utils.py
Expand Up @@ -174,6 +174,7 @@ def get_base_resource_map():
nova_cc_context.NovaIPv6Context(),
nova_cc_context.NeutronCCContext(),
nova_cc_context.NovaConfigContext(),
nova_cc_context.RemoteMemcacheContext(),
nova_cc_context.InstanceConsoleContext(),
nova_cc_context.ConsoleSSLContext(),
nova_cc_context.CloudComputeContext(),
Expand Down Expand Up @@ -271,14 +272,6 @@ def resource_map(actual_services=True):
if common.console_attributes('services'):
_resource_map[NOVA_CONF]['services'] += (
common.console_attributes('services'))
# nova-consoleauth will be managed by pacemaker, if
# single-nova-consoleauth is used, then don't monitor for the
# nova-consoleauth service to be started (LP: #1660244).
if (hookenv.config('single-nova-consoleauth') and
hookenv.relation_ids('ha')):
services = _resource_map[NOVA_CONF]['services']
if 'nova-consoleauth' in services:
services.remove('nova-consoleauth')

if (hookenv.config('enable-serial-console') and cmp_os_release >= 'juno'):
_resource_map[NOVA_CONF]['services'] += SERIAL_CONSOLE['services']
Expand Down Expand Up @@ -1335,6 +1328,8 @@ def get_optional_interfaces():
optional_interfaces['cinder'] = ['cinder-volume-service']
if hookenv.relation_ids('neutron-api'):
optional_interfaces['neutron-api'] = ['neutron-api']
if hookenv.relation_ids('ha'):
optional_interfaces['memcache'] = ['memcache']

return optional_interfaces

Expand Down
31 changes: 15 additions & 16 deletions unit_tests/test_nova_cc_contexts.py
Expand Up @@ -51,44 +51,43 @@ def setUp(self):
lambda *args, **kwargs: None)
@mock.patch.object(utils, 'os_release')
@mock.patch('charmhelpers.contrib.network.ip.log')
def test_instance_console_context_without_memcache(self, os_release, log_):
def test_remote_memcache_context_without_memcache(self, os_release, log_):
self.relation_ids.return_value = 'cache:0'
self.related_units.return_value = 'memcached/0'
instance_console = context.InstanceConsoleContext()
remote_memcache = context.RemoteMemcacheContext()
os_release.return_value = 'icehouse'
self.assertEqual({'memcached_servers': ''},
instance_console())
self.assertEqual({}, remote_memcache())

@mock.patch('charmhelpers.contrib.openstack.ip.resolve_address',
lambda *args, **kwargs: None)
@mock.patch.object(utils, 'os_release')
@mock.patch('charmhelpers.contrib.network.ip.log')
def test_instance_console_context_with_memcache(self, os_release, log_):
self.check_instance_console_context_with_memcache(os_release,
'127.0.1.1',
'127.0.1.1')
def test_remote_memcache_context_with_memcache(self, os_release, log_):
self.check_remote_memcache_context_with_memcache(os_release,
'127.0.1.1',
'127.0.1.1')

@mock.patch('charmhelpers.contrib.openstack.ip.resolve_address',
lambda *args, **kwargs: None)
@mock.patch.object(utils, 'os_release')
@mock.patch('charmhelpers.contrib.network.ip.log')
def test_instance_console_context_with_memcache_ipv6(self, os_release,
log_):
self.check_instance_console_context_with_memcache(os_release, '::1',
'[::1]')
def test_remote_memcache_context_with_memcache_ipv6(self, os_release,
log_):
self.check_remote_memcache_context_with_memcache(os_release, '::1',
'[::1]')

def check_instance_console_context_with_memcache(self, os_release, ip,
formated_ip):
def check_remote_memcache_context_with_memcache(self, os_release, ip,
formated_ip):
memcached_servers = [{'private-address': formated_ip,
'port': '11211'}]
self.relation_ids.return_value = ['cache:0']
self.relations_for_id.return_value = memcached_servers
self.related_units.return_value = 'memcached/0'
instance_console = context.InstanceConsoleContext()
remote_memcache = context.RemoteMemcacheContext()
os_release.return_value = 'icehouse'
self.maxDiff = None
self.assertEqual({'memcached_servers': "%s:11211" % (formated_ip, )},
instance_console())
remote_memcache())

@mock.patch('charmhelpers.contrib.openstack.ip.config')
@mock.patch('charmhelpers.contrib.openstack.neutron.config')
Expand Down

0 comments on commit b6e3140

Please sign in to comment.