Skip to content

Commit

Permalink
Fix hardcoded topic strings with constants.
Browse files Browse the repository at this point in the history
Replace hardcoded topic strings like 'volume' or 'compute'
with config constants like FLAGS.volume_topic, etc. See
bug #1057831 and bug #1061628.

Change-Id: I817ecc3cbe3245b51a0c047be58d17edfec8a838
  • Loading branch information
geekinutah committed Oct 26, 2012
1 parent 5d677e2 commit 0bf28be
Show file tree
Hide file tree
Showing 16 changed files with 46 additions and 29 deletions.
3 changes: 2 additions & 1 deletion bin/nova-cert
Expand Up @@ -39,8 +39,9 @@ from nova import utils

if __name__ == '__main__':
flags.parse_args(sys.argv)
FLAGS = flags.FLAGS
logging.setup("nova")
utils.monkey_patch()
server = service.Service.create(binary='nova-cert')
server = service.Service.create(binary='nova-cert', topic=FLAGS.cert_topic)
service.serve(server)
service.wait()
4 changes: 3 additions & 1 deletion bin/nova-compute
Expand Up @@ -41,8 +41,10 @@ from nova import utils

if __name__ == '__main__':
flags.parse_args(sys.argv)
FLAGS = flags.FLAGS
logging.setup('nova')
utils.monkey_patch()
server = service.Service.create(binary='nova-compute')
server = service.Service.create(binary='nova-compute',
topic=FLAGS.compute_topic)
service.serve(server)
service.wait()
4 changes: 3 additions & 1 deletion bin/nova-console
Expand Up @@ -39,7 +39,9 @@ from nova import service

if __name__ == '__main__':
flags.parse_args(sys.argv)
FLAGS = flags.FLAGS
logging.setup("nova")
server = service.Service.create(binary='nova-console')
server = service.Service.create(binary='nova-console',
topic=FLAGS.console_topic)
service.serve(server)
service.wait()
5 changes: 3 additions & 2 deletions bin/nova-consoleauth
Expand Up @@ -39,8 +39,9 @@ from nova import service

if __name__ == "__main__":
flags.parse_args(sys.argv)
FLAGS = flags.FLAGS
logging.setup("nova")

server = service.Service.create(binary='nova-consoleauth')
server = service.Service.create(binary='nova-consoleauth',
topic=FLAGS.consoleauth_topic)
service.serve(server)
service.wait()
4 changes: 3 additions & 1 deletion bin/nova-network
Expand Up @@ -41,8 +41,10 @@ from nova import utils

if __name__ == '__main__':
flags.parse_args(sys.argv)
FLAGS = flags.FLAGS
logging.setup("nova")
utils.monkey_patch()
server = service.Service.create(binary='nova-network')
server = service.Service.create(binary='nova-network',
topic=FLAGS.network_topic)
service.serve(server)
service.wait()
4 changes: 3 additions & 1 deletion bin/nova-scheduler
Expand Up @@ -43,8 +43,10 @@ from nova import utils

if __name__ == '__main__':
flags.parse_args(sys.argv)
FLAGS = flags.FLAGS
logging.setup("nova")
utils.monkey_patch()
server = service.Service.create(binary='nova-scheduler')
server = service.Service.create(binary='nova-scheduler',
topic=FLAGS.scheduler_topic)
service.serve(server)
service.wait()
4 changes: 3 additions & 1 deletion bin/nova-volume
Expand Up @@ -41,8 +41,10 @@ from nova import utils

if __name__ == '__main__':
flags.parse_args(sys.argv)
FLAGS = flags.FLAGS
logging.setup("nova")
utils.monkey_patch()
server = service.Service.create(binary='nova-volume')
server = service.Service.create(binary='nova-volume',
topic=FLAGS.volume_topic)
service.serve(server)
service.wait()
Expand Up @@ -82,7 +82,7 @@ def _get_audit_task_logs(self, context, begin=None, end=None,
# We do this this way to include disabled compute services,
# which can have instances on them. (mdragon)
services = [svc for svc in db.service_get_all(context)
if svc['topic'] == 'compute']
if svc['topic'] == FLAGS.compute_topic]
hosts = set(serv['host'] for serv in services)
seen_hosts = set()
done_hosts = set()
Expand Down
9 changes: 5 additions & 4 deletions nova/db/sqlalchemy/api.py
Expand Up @@ -331,7 +331,8 @@ def service_destroy(context, service_id):
service_ref = service_get(context, service_id, session=session)
service_ref.delete(session=session)

if service_ref.topic == 'compute' and service_ref.compute_node:
if (service_ref.topic == FLAGS.compute_topic and
service_ref.compute_node):
for c in service_ref.compute_node:
c.delete(session=session)

Expand Down Expand Up @@ -387,7 +388,7 @@ def service_get_all_compute_by_host(context, host):
result = model_query(context, models.Service, read_deleted="no").\
options(joinedload('compute_node')).\
filter_by(host=host).\
filter_by(topic="compute").\
filter_by(topic=FLAGS.compute_topic).\
all()

if not result:
Expand Down Expand Up @@ -420,7 +421,7 @@ def service_get_all_compute_sorted(context):
# (SELECT host, SUM(instances.vcpus) AS instance_cores
# FROM instances GROUP BY host) AS inst_cores
# ON services.host = inst_cores.host
topic = 'compute'
topic = FLAGS.compute_topic
label = 'instance_cores'
subq = model_query(context, models.Instance.host,
func.sum(models.Instance.vcpus).label(label),
Expand All @@ -438,7 +439,7 @@ def service_get_all_compute_sorted(context):
def service_get_all_volume_sorted(context):
session = get_session()
with session.begin():
topic = 'volume'
topic = FLAGS.volume_topic
label = 'volume_gigabytes'
subq = model_query(context, models.Volume.host,
func.sum(models.Volume.size).label(label),
Expand Down
6 changes: 3 additions & 3 deletions nova/scheduler/chance.py
Expand Up @@ -65,8 +65,8 @@ def schedule_run_instance(self, context, request_spec,
for num, instance_uuid in enumerate(instance_uuids):
request_spec['instance_properties']['launch_index'] = num
try:
host = self._schedule(context, 'compute', request_spec,
filter_properties)
host = self._schedule(context, FLAGS.compute_topic,
request_spec, filter_properties)
updated_instance = driver.instance_update_db(context,
instance_uuid)
self.compute_rpcapi.run_instance(context,
Expand All @@ -88,7 +88,7 @@ def schedule_prep_resize(self, context, image, request_spec,
filter_properties, instance, instance_type,
reservations):
"""Select a target for resize."""
host = self._schedule(context, 'compute', request_spec,
host = self._schedule(context, FLAGS.compute_topic, request_spec,
filter_properties)
self.compute_rpcapi.prep_resize(context, image, instance,
instance_type, host, reservations)
Expand Down
8 changes: 4 additions & 4 deletions nova/scheduler/driver.py
Expand Up @@ -96,7 +96,7 @@ def cast_to_volume_host(context, host, method, **kwargs):
db.volume_update(context, volume_id,
{'host': host, 'scheduled_at': now})
rpc.cast(context,
rpc.queue_get_for(context, 'volume', host),
rpc.queue_get_for(context, FLAGS.volume_topic, host),
{"method": method, "args": kwargs})
LOG.debug(_("Casted '%(method)s' to volume '%(host)s'") % locals())

Expand All @@ -119,7 +119,7 @@ def cast_to_compute_host(context, host, method, **kwargs):
instance_update_db(context, instance_uuid)

rpc.cast(context,
rpc.queue_get_for(context, 'compute', host),
rpc.queue_get_for(context, FLAGS.compute_topic, host),
{"method": method, "args": kwargs})
LOG.debug(_("Casted '%(method)s' to compute '%(host)s'") % locals())

Expand All @@ -128,8 +128,8 @@ def cast_to_host(context, topic, host, method, **kwargs):
"""Generic cast to host"""

topic_mapping = {
"compute": cast_to_compute_host,
"volume": cast_to_volume_host}
FLAGS.compute_topic: cast_to_compute_host,
FLAGS.volume_topic: cast_to_volume_host}

func = topic_mapping.get(topic)
if func:
Expand Down
11 changes: 6 additions & 5 deletions nova/scheduler/filter_scheduler.py
Expand Up @@ -69,8 +69,9 @@ def schedule_run_instance(self, context, request_spec,
notifier.notify(context, notifier.publisher_id("scheduler"),
'scheduler.run_instance.start', notifier.INFO, payload)

weighted_hosts = self._schedule(context, "compute", request_spec,
filter_properties, instance_uuids)
weighted_hosts = self._schedule(context, FLAGS.compute_topic,
request_spec, filter_properties,
instance_uuids)

# NOTE(comstud): Make sure we do not pass this through. It
# contains an instance of RpcContext that cannot be serialized.
Expand Down Expand Up @@ -115,7 +116,7 @@ def schedule_prep_resize(self, context, image, request_spec,
the prep_resize operation to it.
"""

hosts = self._schedule(context, 'compute', request_spec,
hosts = self._schedule(context, FLAGS.compute_topic, request_spec,
filter_properties, [instance['uuid']])
if not hosts:
raise exception.NoValidHost(reason="")
Expand Down Expand Up @@ -219,7 +220,7 @@ def _schedule(self, context, topic, request_spec, filter_properties,
ordered by their fitness.
"""
elevated = context.elevated()
if topic != "compute":
if topic != FLAGS.compute_topic:
msg = _("Scheduler only understands Compute nodes (for now)")
raise NotImplementedError(msg)

Expand Down Expand Up @@ -299,7 +300,7 @@ def get_cost_functions(self, topic=None):
"""
if topic is None:
# Schedulers only support compute right now.
topic = "compute"
topic = FLAGS.compute_topic
if topic in self.cost_function_cache:
return self.cost_function_cache[topic]

Expand Down
4 changes: 3 additions & 1 deletion nova/scheduler/filters/compute_filter.py
Expand Up @@ -13,10 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.

from nova import flags
from nova.openstack.common import log as logging
from nova.scheduler import filters
from nova import utils

FLAGS = flags.FLAGS

LOG = logging.getLogger(__name__)

Expand All @@ -27,7 +29,7 @@ class ComputeFilter(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
"""Returns True for only active compute nodes"""
instance_type = filter_properties.get('instance_type')
if host_state.topic != 'compute' or not instance_type:
if host_state.topic != FLAGS.compute_topic or not instance_type:
return True
capabilities = host_state.capabilities
service = host_state.service
Expand Down
2 changes: 1 addition & 1 deletion nova/scheduler/filters/core_filter.py
Expand Up @@ -37,7 +37,7 @@ class CoreFilter(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
"""Return True if host has sufficient CPU cores."""
instance_type = filter_properties.get('instance_type')
if host_state.topic != 'compute' or not instance_type:
if host_state.topic != FLAGS.compute_topic or not instance_type:
return True

if not host_state.vcpus_total:
Expand Down
2 changes: 1 addition & 1 deletion nova/scheduler/host_manager.py
Expand Up @@ -337,7 +337,7 @@ def get_all_host_states(self, context, topic):
with the instance (in case the InstanceType changed since the
instance was created)."""

if topic != 'compute':
if topic != FLAGS.compute_topic:
raise NotImplementedError(_(
"host_manager only implemented for 'compute'"))

Expand Down
3 changes: 2 additions & 1 deletion nova/service.py
Expand Up @@ -479,7 +479,8 @@ def create(cls, host=None, binary=None, topic=None, manager=None,
if not topic:
topic = binary.rpartition('nova-')[2]
if not manager:
manager = FLAGS.get('%s_manager' % topic, None)
manager = FLAGS.get('%s_manager' %
binary.rpartition('nova-')[2], None)
if report_interval is None:
report_interval = FLAGS.report_interval
if periodic_interval is None:
Expand Down

0 comments on commit 0bf28be

Please sign in to comment.