Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

renamed xxxnode to xxservice

  • Loading branch information...
commit 35fda702abf91792d3c4753a1bbccdb119eaf6eb 1 parent d0f3ad1
@vishvananda vishvananda authored
View
4 bin/nova-compute
@@ -22,11 +22,11 @@
"""
from nova import twistd
-from nova.compute import computenode
+from nova.compute import computeservice
if __name__ == '__main__':
twistd.serve(__file__)
if __name__ == '__builtin__':
- application = computenode.ComputeNode.create()
+ application = computeservice.ComputeService.create()
View
4 bin/nova-network
@@ -22,11 +22,11 @@
"""
from nova import twistd
-from nova.network import networknode
+from nova.network import networkservice
if __name__ == '__main__':
twistd.serve(__file__)
if __name__ == '__builtin__':
- application = networknode.NetworkNode.create()
+ application = networkservice.NetworkService.create()
View
4 bin/nova-volume
@@ -22,11 +22,11 @@
"""
from nova import twistd
-from nova.volume import volumenode
+from nova.volume import volumeservice
if __name__ == '__main__':
twistd.serve(__file__)
if __name__ == '__builtin__':
- application = volumenode.VolumeNode.create()
+ application = volumeservice.VolumeService.create()
View
17 nova/compute/computenode.py → nova/compute/computeservice.py
@@ -17,9 +17,9 @@
# under the License.
"""
-Compute Node:
+Compute Service:
- Runs on each compute node, managing the
+ Runs on each compute host, managing the
hypervisor using libvirt.
"""
@@ -32,7 +32,6 @@
import sys
from twisted.internet import defer
from twisted.internet import task
-from twisted.application import service
try:
@@ -43,14 +42,14 @@
from nova import exception
from nova import fakevirt
from nova import flags
-from nova import node
from nova import process
+from nova import service
from nova import utils
from nova.compute import disk
from nova.compute import model
from nova.compute import network
from nova.objectstore import image # for image_path flag
-from nova.volume import volumenode
+from nova.volume import volumeservice
FLAGS = flags.FLAGS
@@ -79,13 +78,13 @@ def _image_url(path):
return "%s:%s/_images/%s" % (FLAGS.s3_host, FLAGS.s3_port, path)
-class ComputeNode(node.Node):
+class ComputeService(service.Service):
"""
Manages the running instances.
"""
def __init__(self):
""" load configuration options for this node and connect to libvirt """
- super(ComputeNode, self).__init__()
+ super(ComputeService, self).__init__()
self._instances = {}
self._conn = self._get_connection()
self.instdir = model.InstanceDirectory()
@@ -222,7 +221,7 @@ def get_console_output(self, instance_id):
@exception.wrap_exception
def attach_volume(self, instance_id = None,
volume_id = None, mountpoint = None):
- volume = volumenode.get_volume(volume_id)
+ volume = volumeservice.get_volume(volume_id)
yield self._init_aoe()
yield process.simple_execute(
"sudo virsh attach-disk %s /dev/etherd/%s %s" %
@@ -243,7 +242,7 @@ def detach_volume(self, instance_id, volume_id):
""" detach a volume from an instance """
# despite the documentation, virsh detach-disk just wants the device
# name without the leading /dev/
- volume = volumenode.get_volume(volume_id)
+ volume = volumeservice.get_volume(volume_id)
target = volume['mountpoint'].rpartition('/dev/')[2]
yield process.simple_execute(
"sudo virsh detach-disk %s %s " % (instance_id, target))
View
15 nova/endpoint/cloud.py
@@ -23,7 +23,6 @@
"""
import base64
-import json
import logging
import os
import time
@@ -38,9 +37,9 @@
from nova.auth import users
from nova.compute import model
from nova.compute import network
-from nova.compute import computenode
+from nova.compute import computeservice
from nova.endpoint import images
-from nova.volume import volumenode
+from nova.volume import volumeservice
FLAGS = flags.FLAGS
@@ -76,7 +75,7 @@ def instances(self):
def volumes(self):
""" returns a list of all volumes """
for volume_id in datastore.Redis.instance().smembers("volumes"):
- volume = volumenode.get_volume(volume_id)
+ volume = volumeservice.get_volume(volume_id)
yield volume
def __str__(self):
@@ -103,7 +102,7 @@ def _get_mpi_data(self, project_id):
result = {}
for instance in self.instdir.all:
if instance['project_id'] == project_id:
- line = '%s slots=%d' % (instance['private_dns_name'], computenode.INSTANCE_TYPES[instance['instance_type']]['vcpus'])
+ line = '%s slots=%d' % (instance['private_dns_name'], computeservice.INSTANCE_TYPES[instance['instance_type']]['vcpus'])
if instance['key_name'] in result:
result[instance['key_name']].append(line)
else:
@@ -296,7 +295,7 @@ def format_volume(self, context, volume):
@rbac.allow('projectmanager', 'sysadmin')
def create_volume(self, context, size, **kwargs):
- # TODO(vish): refactor this to create the volume object here and tell volumenode to create it
+ # TODO(vish): refactor this to create the volume object here and tell volumeservice to create it
res = rpc.call(FLAGS.volume_topic, {"method": "create_volume",
"args" : {"size": size,
"user_id": context.user.id,
@@ -331,7 +330,7 @@ def _get_instance(self, context, instance_id):
raise exception.NotFound('Instance %s could not be found' % instance_id)
def _get_volume(self, context, volume_id):
- volume = volumenode.get_volume(volume_id)
+ volume = volumeservice.get_volume(volume_id)
if context.user.is_admin() or volume['project_id'] == context.project.id:
return volume
raise exception.NotFound('Volume %s could not be found' % volume_id)
@@ -578,7 +577,7 @@ def run_instances(self, context, **kwargs):
"args": {"instance_id" : inst.instance_id}})
logging.debug("Casting to node for %s's instance with IP of %s" %
(context.user.name, inst['private_dns_name']))
- # TODO: Make the NetworkComputeNode figure out the network name from ip.
+ # TODO: Make Network figure out the network name from ip.
return defer.succeed(self._format_instances(
context, reservation_id))
View
4 nova/network/networknode.py → nova/network/networkservice.py
@@ -23,12 +23,12 @@
import logging
from nova import flags
-from nova import node
+from nova import service
FLAGS = flags.FLAGS
-class NetworkNode(node.Node):
+class NetworkService(service.Service):
"""Allocates ips and sets up networks"""
def __init__(self):
View
2  nova/node.py → nova/service.py
@@ -40,7 +40,7 @@
'seconds between nodes reporting state to cloud',
lower_bound=1)
-class Node(object, service.Service):
+class Service(object, service.Service):
"""Base class for workers that run on hosts"""
@classmethod
View
8 nova/test.py
@@ -156,9 +156,9 @@ def _maybeInlineCallbacks(self, f):
Example (callback chain, ugly):
- d = self.node.terminate_instance(instance_id) # a Deferred instance
+ d = self.compute.terminate_instance(instance_id) # a Deferred instance
def _describe(_):
- d_desc = self.node.describe_instances() # another Deferred instance
+ d_desc = self.compute.describe_instances() # another Deferred instance
return d_desc
def _checkDescribe(rv):
self.assertEqual(rv, [])
@@ -169,8 +169,8 @@ def _checkDescribe(rv):
Example (inline callbacks! yay!):
- yield self.node.terminate_instance(instance_id)
- rv = yield self.node.describe_instances()
+ yield self.compute.terminate_instance(instance_id)
+ rv = yield self.compute.describe_instances()
self.assertEqual(rv, [])
If the test fits the Inline Callbacks pattern we will automatically
View
18 nova/tests/cloud_unittest.py
@@ -28,7 +28,7 @@
from nova import rpc
from nova import test
from nova.auth import users
-from nova.compute import computenode
+from nova.compute import computeservice
from nova.endpoint import api
from nova.endpoint import cloud
@@ -53,12 +53,12 @@ def setUp(self):
proxy=self.cloud)
self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop))
- # set up a node
- self.node = computenode.ComputeNode()
- self.node_consumer = rpc.AdapterConsumer(connection=self.conn,
+ # set up a service
+ self.compute = computeservice.ComputeService()
+ self.compute_consumer = rpc.AdapterConsumer(connection=self.conn,
topic=FLAGS.compute_topic,
- proxy=self.node)
- self.injected.append(self.node_consumer.attach_to_tornado(self.ioloop))
+ proxy=self.compute)
+ self.injected.append(self.compute_consumer.attach_to_tornado(self.ioloop))
try:
users.UserManager.instance().create_user('admin', 'admin', 'admin')
@@ -76,11 +76,11 @@ def test_console_output(self):
logging.debug("Can't test instances without a real virtual env.")
return
instance_id = 'foo'
- inst = yield self.node.run_instance(instance_id)
+ inst = yield self.compute.run_instance(instance_id)
output = yield self.cloud.get_console_output(self.context, [instance_id])
logging.debug(output)
self.assert_(output)
- rv = yield self.node.terminate_instance(instance_id)
+ rv = yield self.compute.terminate_instance(instance_id)
def test_run_instances(self):
if FLAGS.fake_libvirt:
@@ -112,7 +112,7 @@ def test_run_instances(self):
# for instance in reservations[res_id]:
for instance in reservations[reservations.keys()[0]]:
logging.debug("Terminating instance %s" % instance['instance_id'])
- rv = yield self.node.terminate_instance(instance['instance_id'])
+ rv = yield self.compute.terminate_instance(instance['instance_id'])
def test_instance_update_state(self):
def instance(num):
View
36 nova/tests/compute_unittest.py
@@ -26,7 +26,7 @@
from nova import test
from nova import utils
from nova.compute import model
-from nova.compute import computenode
+from nova.compute import computeservice
FLAGS = flags.FLAGS
@@ -60,7 +60,7 @@ def setUp(self):
self.flags(fake_libvirt=True,
fake_storage=True,
fake_users=True)
- self.node = computenode.ComputeNode()
+ self.compute = computeservice.ComputeService()
def create_instance(self):
instdir = model.InstanceDirectory()
@@ -81,48 +81,48 @@ def create_instance(self):
def test_run_describe_terminate(self):
instance_id = self.create_instance()
- rv = yield self.node.run_instance(instance_id)
+ rv = yield self.compute.run_instance(instance_id)
- rv = yield self.node.describe_instances()
+ rv = yield self.compute.describe_instances()
logging.info("Running instances: %s", rv)
self.assertEqual(rv[instance_id].name, instance_id)
- rv = yield self.node.terminate_instance(instance_id)
+ rv = yield self.compute.terminate_instance(instance_id)
- rv = yield self.node.describe_instances()
+ rv = yield self.compute.describe_instances()
logging.info("After terminating instances: %s", rv)
self.assertEqual(rv, {})
@defer.inlineCallbacks
def test_reboot(self):
instance_id = self.create_instance()
- rv = yield self.node.run_instance(instance_id)
+ rv = yield self.compute.run_instance(instance_id)
- rv = yield self.node.describe_instances()
+ rv = yield self.compute.describe_instances()
self.assertEqual(rv[instance_id].name, instance_id)
- yield self.node.reboot_instance(instance_id)
+ yield self.compute.reboot_instance(instance_id)
- rv = yield self.node.describe_instances()
+ rv = yield self.compute.describe_instances()
self.assertEqual(rv[instance_id].name, instance_id)
- rv = yield self.node.terminate_instance(instance_id)
+ rv = yield self.compute.terminate_instance(instance_id)
@defer.inlineCallbacks
def test_console_output(self):
instance_id = self.create_instance()
- rv = yield self.node.run_instance(instance_id)
+ rv = yield self.compute.run_instance(instance_id)
- console = yield self.node.get_console_output(instance_id)
+ console = yield self.compute.get_console_output(instance_id)
self.assert_(console)
- rv = yield self.node.terminate_instance(instance_id)
+ rv = yield self.compute.terminate_instance(instance_id)
@defer.inlineCallbacks
def test_run_instance_existing(self):
instance_id = self.create_instance()
- rv = yield self.node.run_instance(instance_id)
+ rv = yield self.compute.run_instance(instance_id)
- rv = yield self.node.describe_instances()
+ rv = yield self.compute.describe_instances()
self.assertEqual(rv[instance_id].name, instance_id)
- self.assertRaises(exception.Error, self.node.run_instance, instance_id)
- rv = yield self.node.terminate_instance(instance_id)
+ self.assertRaises(exception.Error, self.compute.run_instance, instance_id)
+ rv = yield self.compute.terminate_instance(instance_id)
View
46 nova/tests/volume_unittest.py
@@ -21,8 +21,8 @@
from nova import exception
from nova import flags
from nova import test
-from nova.compute import computenode
-from nova.volume import volumenode
+from nova.compute import computeservice
+from nova.volume import volumeservice
FLAGS = flags.FLAGS
@@ -32,24 +32,24 @@ class VolumeTestCase(test.TrialTestCase):
def setUp(self):
logging.getLogger().setLevel(logging.DEBUG)
super(VolumeTestCase, self).setUp()
- self.mynode = computenode.ComputeNode()
- self.mystorage = None
+ self.compute = computeservice.ComputeService()
+ self.volume = None
self.flags(fake_libvirt=True,
fake_storage=True)
- self.mystorage = volumenode.VolumeNode()
+ self.volume = volumeservice.VolumeService()
def test_run_create_volume(self):
vol_size = '0'
user_id = 'fake'
project_id = 'fake'
- volume_id = self.mystorage.create_volume(vol_size, user_id, project_id)
+ volume_id = self.volume.create_volume(vol_size, user_id, project_id)
# TODO(termie): get_volume returns differently than create_volume
self.assertEqual(volume_id,
- volumenode.get_volume(volume_id)['volume_id'])
+ volumeservice.get_volume(volume_id)['volume_id'])
- rv = self.mystorage.delete_volume(volume_id)
+ rv = self.volume.delete_volume(volume_id)
self.assertRaises(exception.Error,
- volumenode.get_volume,
+ volumeservice.get_volume,
volume_id)
def test_too_big_volume(self):
@@ -57,7 +57,7 @@ def test_too_big_volume(self):
user_id = 'fake'
project_id = 'fake'
self.assertRaises(TypeError,
- self.mystorage.create_volume,
+ self.volume.create_volume,
vol_size, user_id, project_id)
def test_too_many_volumes(self):
@@ -68,26 +68,26 @@ def test_too_many_volumes(self):
total_slots = FLAGS.slots_per_shelf * num_shelves
vols = []
for i in xrange(total_slots):
- vid = self.mystorage.create_volume(vol_size, user_id, project_id)
+ vid = self.volume.create_volume(vol_size, user_id, project_id)
vols.append(vid)
- self.assertRaises(volumenode.NoMoreVolumes,
- self.mystorage.create_volume,
+ self.assertRaises(volumeservice.NoMoreVolumes,
+ self.volume.create_volume,
vol_size, user_id, project_id)
for id in vols:
- self.mystorage.delete_volume(id)
+ self.volume.delete_volume(id)
def test_run_attach_detach_volume(self):
- # Create one volume and one node to test with
+ # Create one volume and one compute to test with
instance_id = "storage-test"
vol_size = "5"
user_id = "fake"
project_id = 'fake'
mountpoint = "/dev/sdf"
- volume_id = self.mystorage.create_volume(vol_size, user_id, project_id)
+ volume_id = self.volume.create_volume(vol_size, user_id, project_id)
- volume_obj = volumenode.get_volume(volume_id)
+ volume_obj = volumeservice.get_volume(volume_id)
volume_obj.start_attach(instance_id, mountpoint)
- rv = yield self.mynode.attach_volume(volume_id,
+ rv = yield self.compute.attach_volume(volume_id,
instance_id,
mountpoint)
self.assertEqual(volume_obj['status'], "in-use")
@@ -96,16 +96,16 @@ def test_run_attach_detach_volume(self):
self.assertEqual(volume_obj['mountpoint'], mountpoint)
self.assertRaises(exception.Error,
- self.mystorage.delete_volume,
+ self.volume.delete_volume,
volume_id)
- rv = yield self.mystorage.detach_volume(volume_id)
- volume_obj = volumenode.get_volume(volume_id)
+ rv = yield self.volume.detach_volume(volume_id)
+ volume_obj = volumeservice.get_volume(volume_id)
self.assertEqual(volume_obj['status'], "available")
- rv = self.mystorage.delete_volume(volume_id)
+ rv = self.volume.delete_volume(volume_id)
self.assertRaises(exception.Error,
- volumenode.get_volume,
+ volumeservice.get_volume,
volume_id)
def test_multi_node(self):
View
15 nova/volume/volumenode.py → nova/volume/volumeservice.py
@@ -29,14 +29,13 @@
import socket
import tempfile
-from twisted.application import service
from twisted.internet import defer
from nova import datastore
from nova import exception
from nova import flags
-from nova import node
from nova import process
+from nova import service
from nova import utils
from nova import validate
@@ -50,13 +49,13 @@
'Which device to export the volumes on')
flags.DEFINE_string('storage_name',
socket.gethostname(),
- 'name of this node')
+ 'name of this service')
flags.DEFINE_integer('first_shelf_id',
utils.last_octet(utils.get_my_ip()) * 10,
- 'AoE starting shelf_id for this node')
+ 'AoE starting shelf_id for this service')
flags.DEFINE_integer('last_shelf_id',
utils.last_octet(utils.get_my_ip()) * 10 + 9,
- 'AoE starting shelf_id for this node')
+ 'AoE starting shelf_id for this service')
flags.DEFINE_string('aoe_export_dir',
'/var/lib/vblade-persist/vblades',
'AoE directory where exports are created')
@@ -65,7 +64,7 @@
'Number of AoE slots per shelf')
flags.DEFINE_string('storage_availability_zone',
'nova',
- 'availability zone of this node')
+ 'availability zone of this service')
flags.DEFINE_boolean('fake_storage', False,
'Should we make real storage volumes to attach?')
@@ -82,14 +81,14 @@ def get_volume(volume_id):
return volume_class(volume_id=volume_id)
raise exception.Error("Volume does not exist")
-class VolumeNode(node.Node):
+class VolumeService(service.Service):
"""
There is one VolumeNode running on each host.
However, each VolumeNode can report on the state of
*all* volumes in the cluster.
"""
def __init__(self):
- super(VolumeNode, self).__init__()
+ super(VolumeService, self).__init__()
self.volume_class = Volume
if FLAGS.fake_storage:
FLAGS.aoe_export_dir = tempfile.mkdtemp()
Please sign in to comment.
Something went wrong with that request. Please try again.