From 8e28dd8331f99223696ab6656cd555be12c28e85 Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Fri, 8 Oct 2010 17:57:13 -0700 Subject: [PATCH 001/147] Twisted pidfile and other flag parameters simply do not function on Windows. --- nova/twistd.py | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/nova/twistd.py b/nova/twistd.py index 9511c231c71e..29d75328320d 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -224,21 +224,22 @@ def serve(filename): logging.getLogger('amqplib').setLevel(logging.WARN) FLAGS.python = filename FLAGS.no_save = True - if not FLAGS.pidfile: - FLAGS.pidfile = '%s.pid' % name - elif FLAGS.pidfile.endswith('twistd.pid'): - FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', '%s.pid' % name) - # NOTE(vish): if we're running nodaemon, redirect the log to stdout - if FLAGS.nodaemon and not FLAGS.logfile: - FLAGS.logfile = "-" - if not FLAGS.logfile: - FLAGS.logfile = '%s.log' % name - elif FLAGS.logfile.endswith('twistd.log'): - FLAGS.logfile = FLAGS.logfile.replace('twistd.log', '%s.log' % name) - if not FLAGS.prefix: - FLAGS.prefix = name - elif FLAGS.prefix.endswith('twisted'): - FLAGS.prefix = FLAGS.prefix.replace('twisted', name) + if sys.platform != 'win32': + if not FLAGS.pidfile: + FLAGS.pidfile = '%s.pid' % name + elif FLAGS.pidfile.endswith('twistd.pid'): + FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', '%s.pid' % name) + # NOTE(vish): if we're running nodaemon, redirect the log to stdout + if FLAGS.nodaemon and not FLAGS.logfile: + FLAGS.logfile = "-" + if not FLAGS.logfile: + FLAGS.logfile = '%s.log' % name + elif FLAGS.logfile.endswith('twistd.log'): + FLAGS.logfile = FLAGS.logfile.replace('twistd.log', '%s.log' % name) + if not FLAGS.prefix: + FLAGS.prefix = name + elif FLAGS.prefix.endswith('twisted'): + FLAGS.prefix = FLAGS.prefix.replace('twisted', name) action = 'start' if len(argv) > 1: From b578047ec26ac7d0ad26ccaab8b596ba5373b278 Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Fri, 8 Oct 2010 17:57:49 -0700 Subject: [PATCH 002/147] hyper-v driver created --- nova/virt/hyperv.py | 387 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 387 insertions(+) create mode 100644 nova/virt/hyperv.py diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py new file mode 100644 index 000000000000..91b86e2658df --- /dev/null +++ b/nova/virt/hyperv.py @@ -0,0 +1,387 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Cloud.com, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A connection to Hyper-V . + +""" + +import os +import logging +import wmi +import time + +from twisted.internet import defer + +from nova import flags +from nova.auth.manager import AuthManager +from nova.compute import power_state +from nova.virt import images + + +FLAGS = flags.FLAGS + + +HYPERV_POWER_STATE = { + 3 : power_state.SHUTDOWN, + 2 : power_state.RUNNING, + 32768 : power_state.PAUSED, + 32768: power_state.PAUSED, # TODO + 3 : power_state.CRASHED +} + +REQ_POWER_STATE = { + 'Enabled' : 2, + 'Disabled': 3, + 'Reboot' : 10, + 'Reset' : 11, + 'Paused' : 32768, + 'Suspended': 32769 +} + + +def get_connection(_): + return HyperVConnection() + + +class HyperVConnection(object): + def __init__(self): + self._conn = wmi.WMI(moniker = '//./root/virtualization') + self._cim_conn = wmi.WMI(moniker = '//./root/cimv2') + + def list_instances(self): + vms = [v.ElementName \ + for v in self._conn.Msvm_ComputerSystem(['ElementName'])] + return vms + + @defer.inlineCallbacks + def spawn(self, instance): + vm = yield self._lookup(instance.name) + if vm is not None: + raise Exception('Attempted to create non-unique name %s' % + instance.name) + + user = AuthManager().get_user(instance['user_id']) + project = AuthManager().get_project(instance['project_id']) + vhdfile = os.path.join(FLAGS.instances_path, instance['str_id'])+".vhd" + yield images.fetch(instance['image_id'], vhdfile, user, project) + + try: + yield self._create_vm(instance) + + yield self._create_disk(instance['name'], vhdfile) + yield self._create_nic(instance['name'], instance['mac_address']) + + logging.debug ('Starting VM %s ', instance.name) + yield self._set_vm_state(instance['name'], 'Enabled') + logging.info('Started VM %s ', instance.name) + except Exception as exn: + logging.error('spawn vm failed: %s', exn) + self.destroy(instance) + + def _create_vm(self, instance): + """Create a VM record. """ + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + + vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new() + vs_gs_data.ElementName = instance['name'] + (job, ret_val) = vs_man_svc.DefineVirtualSystem( + [], None, vs_gs_data.GetText_(1))[1:] + if (ret_val == 4096 ): #WMI job started + success = self._check_job_status(job) + else: + success = (ret_val == 0) + + if not success: + raise Exception('Failed to create VM %s', instance.name) + + logging.debug('Created VM %s...', instance.name) + vm = self._conn.Msvm_ComputerSystem (ElementName=instance.name)[0] + + vmsettings = vm.associators(wmi_result_class= + 'Msvm_VirtualSystemSettingData') + vmsetting = [s for s in vmsettings + if s.SettingType == 3][0] #avoid snapshots + memsetting = vmsetting.associators(wmi_result_class= + 'Msvm_MemorySettingData')[0] + #No Dynamic Memory + mem = long(str(instance['memory_mb'])) + memsetting.VirtualQuantity = mem + memsetting.Reservation = mem + memsetting.Limit = mem + + (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources( + vm.path_(), [memsetting.GetText_(1)]) + + logging.debug('Set memory for vm %s...', instance.name) + procsetting = vmsetting.associators(wmi_result_class= + 'Msvm_ProcessorSettingData')[0] + vcpus = long(str(instance['vcpus'])) + #vcpus = 1 + procsetting.VirtualQuantity = vcpus + procsetting.Reservation = vcpus + procsetting.Limit = vcpus + + (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources( + vm.path_(), [procsetting.GetText_(1)]) + + logging.debug('Set vcpus for vm %s...', instance.name) + + + def _create_disk(self, vm_name, vhdfile): + """Create a disk and attach it to the vm""" + logging.debug("Creating disk for %s by attaching disk file %s", \ + vm_name, vhdfile) + vms = self._conn.MSVM_ComputerSystem (ElementName=vm_name) + vm = vms[0] + vmsettings = vm.associators( + wmi_result_class='Msvm_VirtualSystemSettingData') + rasds = vmsettings[0].associators( + wmi_result_class='MSVM_ResourceAllocationSettingData') + ctrller = [r for r in rasds + if r.ResourceSubType == 'Microsoft Emulated IDE Controller'\ + and r.Address == "0" ] + diskdflt = self._conn.query( + "SELECT * FROM Msvm_ResourceAllocationSettingData \ + WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\ + AND InstanceID LIKE '%Default%'")[0] + diskdrive = self._clone_wmi_obj( + 'Msvm_ResourceAllocationSettingData', diskdflt) + diskdrive.Parent = ctrller[0].path_() + diskdrive.Address = 0 + new_resources = self._add_virt_resource(diskdrive, vm) + + if new_resources is None: + raise Exception('Failed to add diskdrive to VM %s', vm_name) + + diskdrive_path = new_resources[0] + logging.debug("New disk drive path is " + diskdrive_path) + vhddefault = self._conn.query( + "SELECT * FROM Msvm_ResourceAllocationSettingData \ + WHERE ResourceSubType LIKE 'Microsoft Virtual Hard Disk' AND \ + InstanceID LIKE '%Default%' ")[0] + + vhddisk = self._clone_wmi_obj( + 'Msvm_ResourceAllocationSettingData', vhddefault) + vhddisk.Parent = diskdrive_path + vhddisk.Connection = [vhdfile] + + new_resources = self._add_virt_resource(vhddisk, vm) + if new_resources is None: + raise Exception('Failed to add vhd file to VM %s', vm_name) + logging.info("Created disk for %s ", vm_name) + + + def _create_nic(self, vm_name, mac): + """Create a (emulated) nic and attach it to the vm""" + logging.debug("Creating nic for %s ", vm_name) + vms = self._conn.Msvm_ComputerSystem (ElementName=vm_name) + extswitch = self._find_external_network() + vm = vms[0] + switch_svc = self._conn.Msvm_VirtualSwitchManagementService ()[0] + #use Msvm_SyntheticEthernetPortSettingData for Windows VMs or Linux with + #Linux Integration Components installed + emulatednics_data = self._conn.Msvm_EmulatedEthernetPortSettingData() + default_nic_data = [n for n in emulatednics_data + if n.InstanceID.rfind('Default') >0 ] + new_nic_data = self._clone_wmi_obj( + 'Msvm_EmulatedEthernetPortSettingData', + default_nic_data[0]) + + (created_sw, ret_val) = switch_svc.CreateSwitchPort(vm_name, vm_name, + "", extswitch.path_()) + if (ret_val != 0): + logging.debug("Failed to create a new port on the external network") + return + logging.debug("Created switch port %s on switch %s", + vm_name, extswitch.path_()) + new_nic_data.Connection = [created_sw] + new_nic_data.ElementName = vm_name + ' nic' + new_nic_data.Address = ''.join(mac.split(':')) + new_nic_data.StaticMacAddress = 'TRUE' + new_resources = self._add_virt_resource(new_nic_data, vm) + if new_resources is None: + raise Exception('Failed to add nic to VM %s', vm_name) + logging.info("Created nic for %s ", vm_name) + + + def _add_virt_resource(self, res_setting_data, target_vm): + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + (job, new_resources, return_val) = vs_man_svc.\ + AddVirtualSystemResources([res_setting_data.GetText_(1)], + target_vm.path_()) + success = True + if (return_val == 4096 ): #WMI job started + success = self._check_job_status(job) + else: + success = (return_val == 0) + if success: + return new_resources + else: + return None + + #TODO: use the reactor to poll instead of sleep + def _check_job_status(self, jobpath): + inst_id = jobpath.split(':')[1].split('=')[1].strip('\"') + jobs = self._conn.Msvm_ConcreteJob(InstanceID=inst_id) + if (len(jobs) == 0): + return False + job = jobs[0] + while job.JobState == 4: #job started + time.sleep(0.1) + job = self._conn.Msvm_ConcreteJob(InstanceID=inst_id)[0] + + if (job.JobState != 7): #job success + logging.debug("WMI job failed: " + job.ErrorSummaryDescription) + return False + + logging.debug("WMI job succeeded: " + job.Description + ",Elapsed = " \ + + job.ElapsedTime) + + return True + + + + def _find_external_network(self): + bound = self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE') + if (len(bound) == 0): + return None + + return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')[0]\ + .associators(wmi_result_class='Msvm_SwitchLANEndpoint')[0]\ + .associators(wmi_result_class='Msvm_SwitchPort')[0]\ + .associators(wmi_result_class='Msvm_VirtualSwitch')[0] + + def _clone_wmi_obj(self, wmi_class, wmi_obj): + cl = self._conn.__getattr__(wmi_class) + newinst = cl.new() + for prop in wmi_obj._properties: + newinst.Properties_.Item(prop).Value =\ + wmi_obj.Properties_.Item(prop).Value + return newinst + + + @defer.inlineCallbacks + def reboot(self, instance): + vm = yield self._lookup(instance.name) + if vm is None: + raise Exception('instance not present %s' % instance.name) + self._set_vm_state(instance.name, 'Reboot') + + + @defer.inlineCallbacks + def destroy(self, instance): + logging.debug("Got request to destroy vm %s", instance.name) + vm = yield self._lookup(instance.name) + if vm is None: + defer.returnValue(None) + vm = self._conn.Msvm_ComputerSystem (ElementName=instance.name)[0] + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + self._set_vm_state(instance.name, 'Disabled') + vmsettings = vm.associators(wmi_result_class= + 'Msvm_VirtualSystemSettingData') + rasds = vmsettings[0].associators(wmi_result_class= + 'MSVM_ResourceAllocationSettingData') + disks = [r for r in rasds \ + if r.ResourceSubType == 'Microsoft Virtual Hard Disk' ] + diskfiles = [] + for disk in disks: + diskfiles.extend([c for c in disk.Connection]) + + (job, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_()) + if (ret_val == 4096 ): #WMI job started + success = self._check_job_status(job) + elif (ret_val == 0): + success = True + if not success: + raise Exception('Failed to destroy vm %s' % instance.name) + for disk in diskfiles: + vhdfile = self._cim_conn.CIM_DataFile(Name=disk) + for vf in vhdfile: + vf.Delete() + logging.debug("Deleted disk %s vm %s", vhdfile, instance.name) + + + + def get_info(self, instance_id): + vm = self._lookup(instance_id) + if vm is None: + raise Exception('instance not present %s' % instance_id) + vm = self._conn.Msvm_ComputerSystem(ElementName=instance_id)[0] + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + vmsettings = vm.associators(wmi_result_class= + 'Msvm_VirtualSystemSettingData') + settings_paths = [ v.path_() for v in vmsettings] + summary_info = vs_man_svc.GetSummaryInformation( + [4,100,103,105], settings_paths)[1] + info = summary_info[0] + logging.debug("Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, \ + cpu_time=%s", instance_id, + str(HYPERV_POWER_STATE[info.EnabledState]), + str(info.MemoryUsage), + str(info.NumberOfProcessors), + str(info.UpTime)) + + return {'state': HYPERV_POWER_STATE[info.EnabledState], + 'max_mem': info.MemoryUsage, + 'mem': info.MemoryUsage, + 'num_cpu': info.NumberOfProcessors, + 'cpu_time': info.UpTime} + + + def _lookup(self, i): + vms = self._conn.Msvm_ComputerSystem (ElementName=i) + n = len(vms) + if n == 0: + return None + elif n > 1: + raise Exception('duplicate name found: %s' % i) + else: + return vms[0].ElementName + + def _set_vm_state(self, vm_name, req_state): + vms = self._conn.Msvm_ComputerSystem (ElementName=vm_name) + if len(vms) == 0: + return False + status = vms[0].RequestStateChange(REQ_POWER_STATE[req_state]) + job = status[0] + return_val = status[1] + if (return_val == 4096 ): #WMI job started + success = self._check_job_status(job) + elif (return_val == 0): + success = True + if success: + logging.info("Successfully changed vm state of %s to %s", + vm_name, req_state) + return True + else: + logging.debug("Failed to change vm state of %s to %s", + vm_name, req_state) + return False + + + def attach_volume(self, instance_name, device_path, mountpoint): + vm = self._lookup(instance_name) + if vm is None: + raise Exception('Attempted to attach volume to nonexistent %s vm' % + instance_name) + + def detach_volume(self, instance_name, mountpoint): + vm = self._lookup(instance_name) + if vm is None: + raise Exception('Attempted to detach volume from nonexistent %s ' % + instance_name) + From 85c890e91f493f254801edd5e5aed115d8d9c4a6 Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Fri, 8 Oct 2010 17:58:01 -0700 Subject: [PATCH 003/147] Register the Hyper-V module into the list of virt modules --- nova/virt/connection.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nova/virt/connection.py b/nova/virt/connection.py index 34e37adf7587..0f736ce39e7f 100644 --- a/nova/virt/connection.py +++ b/nova/virt/connection.py @@ -26,6 +26,7 @@ from nova import flags from nova.virt import fake from nova.virt import libvirt_conn from nova.virt import xenapi +from nova.virt import hyperv FLAGS = flags.FLAGS @@ -49,6 +50,8 @@ def get_connection(read_only=False): conn = libvirt_conn.get_connection(read_only) elif t == 'xenapi': conn = xenapi.get_connection(read_only) + elif t == 'hyperv': + conn = hyperv.get_connection(read_only) else: raise Exception('Unknown connection type "%s"' % t) From 6669b46ca91f462c96b033c6e04618c06fecb31f Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Fri, 8 Oct 2010 17:58:53 -0700 Subject: [PATCH 004/147] curl not available on Windows for s3 download. also os-agnostic local copy --- nova/virt/images.py | 36 +++++++++++++++++++++++++++--------- 1 file changed, 27 insertions(+), 9 deletions(-) diff --git a/nova/virt/images.py b/nova/virt/images.py index dc50764d9120..90071107b38d 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -24,13 +24,15 @@ Handling of VM disk images. import os.path import time import urlparse +import shutil from nova import flags -from nova import process from nova.auth import manager from nova.auth import signer -from nova.objectstore import image +import logging +import urllib2 +import os FLAGS = flags.FLAGS flags.DEFINE_bool('use_s3', True, @@ -47,6 +49,7 @@ def fetch(image, path, user, project): def _fetch_s3_image(image, path, user, project): url = image_url(image) + logging.debug("About to retrieve %s and place it in %s", url, path) # This should probably move somewhere else, like e.g. a download_as # method on User objects and at the same time get rewritten to use @@ -61,17 +64,32 @@ def _fetch_s3_image(image, path, user, project): url_path) headers['Authorization'] = 'AWS %s:%s' % (access, signature) - cmd = ['/usr/bin/curl', '--fail', '--silent', url] - for (k,v) in headers.iteritems(): - cmd += ['-H', '%s: %s' % (k,v)] + def urlretrieve(urlfile, fpath): + chunk = 1*1024*1024 + f = open(fpath, "wb") + while 1: + data = urlfile.read(chunk) + if not data: + break + f.write(data) - cmd += ['-o', path] - return process.SharedPool().execute(executable=cmd[0], args=cmd[1:]) + request = urllib2.Request(url) + for (k, v) in headers.iteritems(): + request.add_header(k, v) + + urlopened = urllib2.urlopen(request) + + urlretrieve(urlopened, path) + + logging.debug("Finished retreving %s -- placed in %s", url, path) + + return def _fetch_local_image(image, path, user, project): - source = _image_path('%s/image' % image) - return process.simple_execute('cp %s %s' % (source, path)) + source = _image_path(os.path.join(image,'image')) + logging.debug("About to copy %s to %s", source, path) + return shutil.copy(source, path) def _image_path(path): From 202da619d383db9e0968a1fc67acdf48101235c0 Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Fri, 8 Oct 2010 17:59:17 -0700 Subject: [PATCH 005/147] if using local copy (use_s3=false) we need to know where to find the image --- nova/compute/manager.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 131fac406abc..8d2705da2628 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -39,6 +39,8 @@ flags.DEFINE_string('instances_path', utils.abspath('../instances'), 'where instances are stored on disk') flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', 'Driver to use for volume creation') +flags.DEFINE_string('images_path', utils.abspath('../images'), + 'path to decrypted local images if not using s3') class ComputeManager(manager.Manager): From 20aab4195baac543d638cf9c3a1484f8f9fb3d80 Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Tue, 12 Oct 2010 15:04:39 -0700 Subject: [PATCH 006/147] Add design doc, docstrings, document hyper-v wmi, python wmi usage. Adhere to pep-8 more closely --- nova/virt/hyperv.py | 148 ++++++++++++++++++++++++++++++++------------ 1 file changed, 107 insertions(+), 41 deletions(-) diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index 91b86e2658df..388e833b24f8 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -16,16 +16,58 @@ """ A connection to Hyper-V . +Uses Windows Management Instrumentation (WMI) calls to interact with Hyper-V +Hyper-V WMI usage: + http://msdn.microsoft.com/en-us/library/cc723875%28v=VS.85%29.aspx +The Hyper-V object model briefly: + The physical computer and its hosted virtual machines are each represented + by the Msvm_ComputerSystem class. + + Each virtual machine is associated with a + Msvm_VirtualSystemGlobalSettingData (vs_gs_data) instance and one or more + Msvm_VirtualSystemSettingData (vmsetting) instances. For each vmsetting + there is a series of Msvm_ResourceAllocationSettingData (rasd) objects. + The rasd objects describe the settings for each device in a virtual machine. + Together, the vs_gs_data, vmsettings and rasds describe the configuration + of the virtual machine. + + Creating new resources such as disks and nics involves cloning a default + rasd object and appropriately modifying the clone and calling the + AddVirtualSystemResources WMI method + Changing resources such as memory uses the ModifyVirtualSystemResources + WMI method + +Using the Python WMI library: + Tutorial: + http://timgolden.me.uk/python/wmi/tutorial.html + Hyper-V WMI objects can be retrieved simply by using the class name + of the WMI object and optionally specifying a column to filter the + result set. More complex filters can be formed using WQL (sql-like) + queries. + The parameters and return tuples of WMI method calls can gleaned by + examining the doc string. For example: + >>> vs_man_svc.ModifyVirtualSystemResources.__doc__ + ModifyVirtualSystemResources (ComputerSystem, ResourceSettingData[]) + => (Job, ReturnValue)' + When passing setting data (ResourceSettingData) to the WMI method, + an XML representation of the data is passed in using the GetText_(1) method. + Available methods on a service can be determined using method.keys(): + >>> vs_man_svc.methods.keys() + vmsettings and rasds for a vm can be retrieved using the 'associators' + method with the appropriate return class. + Long running WMI commands generally return a Job (an instance of + Msvm_ConcreteJob) whose state can be polled to determine when it finishes """ import os import logging -import wmi import time from twisted.internet import defer +import wmi +from nova import exception from nova import flags from nova.auth.manager import AuthManager from nova.compute import power_state @@ -39,10 +81,9 @@ HYPERV_POWER_STATE = { 3 : power_state.SHUTDOWN, 2 : power_state.RUNNING, 32768 : power_state.PAUSED, - 32768: power_state.PAUSED, # TODO - 3 : power_state.CRASHED } + REQ_POWER_STATE = { 'Enabled' : 2, 'Disabled': 3, @@ -53,6 +94,11 @@ REQ_POWER_STATE = { } +WMI_JOB_STATUS_STARTED = 4096 +WMI_JOB_STATE_RUNNING = 4 +WMI_JOB_STATE_COMPLETED = 7 + + def get_connection(_): return HyperVConnection() @@ -63,19 +109,22 @@ class HyperVConnection(object): self._cim_conn = wmi.WMI(moniker = '//./root/cimv2') def list_instances(self): + """ Return the names of all the instances known to Hyper-V. """ vms = [v.ElementName \ for v in self._conn.Msvm_ComputerSystem(['ElementName'])] return vms @defer.inlineCallbacks def spawn(self, instance): + """ Create a new VM and start it.""" vm = yield self._lookup(instance.name) if vm is not None: - raise Exception('Attempted to create non-unique name %s' % + raise exception.Duplicate('Attempted to create duplicate name %s' % instance.name) user = AuthManager().get_user(instance['user_id']) project = AuthManager().get_project(instance['project_id']) + #Fetch the file, assume it is a VHD file. vhdfile = os.path.join(FLAGS.instances_path, instance['str_id'])+".vhd" yield images.fetch(instance['image_id'], vhdfile, user, project) @@ -93,14 +142,14 @@ class HyperVConnection(object): self.destroy(instance) def _create_vm(self, instance): - """Create a VM record. """ + """Create a VM but don't start it. """ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new() vs_gs_data.ElementName = instance['name'] (job, ret_val) = vs_man_svc.DefineVirtualSystem( [], None, vs_gs_data.GetText_(1))[1:] - if (ret_val == 4096 ): #WMI job started + if ret_val == WMI_JOB_STATUS_STARTED: success = self._check_job_status(job) else: success = (ret_val == 0) @@ -117,7 +166,7 @@ class HyperVConnection(object): if s.SettingType == 3][0] #avoid snapshots memsetting = vmsetting.associators(wmi_result_class= 'Msvm_MemorySettingData')[0] - #No Dynamic Memory + #No Dynamic Memory, so reservation, limit and quantity are identical. mem = long(str(instance['memory_mb'])) memsetting.VirtualQuantity = mem memsetting.Reservation = mem @@ -129,8 +178,7 @@ class HyperVConnection(object): logging.debug('Set memory for vm %s...', instance.name) procsetting = vmsetting.associators(wmi_result_class= 'Msvm_ProcessorSettingData')[0] - vcpus = long(str(instance['vcpus'])) - #vcpus = 1 + vcpus = long(instance['vcpus']) procsetting.VirtualQuantity = vcpus procsetting.Reservation = vcpus procsetting.Limit = vcpus @@ -140,11 +188,11 @@ class HyperVConnection(object): logging.debug('Set vcpus for vm %s...', instance.name) - def _create_disk(self, vm_name, vhdfile): """Create a disk and attach it to the vm""" logging.debug("Creating disk for %s by attaching disk file %s", \ vm_name, vhdfile) + #Find the IDE controller for the vm. vms = self._conn.MSVM_ComputerSystem (ElementName=vm_name) vm = vms[0] vmsettings = vm.associators( @@ -154,14 +202,17 @@ class HyperVConnection(object): ctrller = [r for r in rasds if r.ResourceSubType == 'Microsoft Emulated IDE Controller'\ and r.Address == "0" ] + #Find the default disk drive object for the vm and clone it. diskdflt = self._conn.query( "SELECT * FROM Msvm_ResourceAllocationSettingData \ WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\ AND InstanceID LIKE '%Default%'")[0] diskdrive = self._clone_wmi_obj( 'Msvm_ResourceAllocationSettingData', diskdflt) - diskdrive.Parent = ctrller[0].path_() + #Set the IDE ctrller as parent. + diskdrive.Parent = ctrller[0].path_() diskdrive.Address = 0 + #Add the cloned disk drive object to the vm. new_resources = self._add_virt_resource(diskdrive, vm) if new_resources is None: @@ -169,31 +220,36 @@ class HyperVConnection(object): diskdrive_path = new_resources[0] logging.debug("New disk drive path is " + diskdrive_path) + #Find the default VHD disk object. vhddefault = self._conn.query( "SELECT * FROM Msvm_ResourceAllocationSettingData \ WHERE ResourceSubType LIKE 'Microsoft Virtual Hard Disk' AND \ InstanceID LIKE '%Default%' ")[0] + #Clone the default and point it to the image file. vhddisk = self._clone_wmi_obj( 'Msvm_ResourceAllocationSettingData', vhddefault) - vhddisk.Parent = diskdrive_path + #Set the new drive as the parent. + vhddisk.Parent = diskdrive_path vhddisk.Connection = [vhdfile] + #Add the new vhd object as a virtual hard disk to the vm. new_resources = self._add_virt_resource(vhddisk, vm) if new_resources is None: raise Exception('Failed to add vhd file to VM %s', vm_name) logging.info("Created disk for %s ", vm_name) - def _create_nic(self, vm_name, mac): """Create a (emulated) nic and attach it to the vm""" logging.debug("Creating nic for %s ", vm_name) + #Find the vswitch that is connected to the physical nic. vms = self._conn.Msvm_ComputerSystem (ElementName=vm_name) extswitch = self._find_external_network() vm = vms[0] switch_svc = self._conn.Msvm_VirtualSwitchManagementService ()[0] - #use Msvm_SyntheticEthernetPortSettingData for Windows VMs or Linux with - #Linux Integration Components installed + #Find the default nic and clone it to create a new nic for the vm. + #Use Msvm_SyntheticEthernetPortSettingData for Windows VMs or Linux with + #Linux Integration Components installed. emulatednics_data = self._conn.Msvm_EmulatedEthernetPortSettingData() default_nic_data = [n for n in emulatednics_data if n.InstanceID.rfind('Default') >0 ] @@ -201,30 +257,33 @@ class HyperVConnection(object): 'Msvm_EmulatedEthernetPortSettingData', default_nic_data[0]) + #Create a port on the vswitch. (created_sw, ret_val) = switch_svc.CreateSwitchPort(vm_name, vm_name, "", extswitch.path_()) - if (ret_val != 0): + if ret_val != 0: logging.debug("Failed to create a new port on the external network") return logging.debug("Created switch port %s on switch %s", vm_name, extswitch.path_()) + #Connect the new nic to the new port. new_nic_data.Connection = [created_sw] new_nic_data.ElementName = vm_name + ' nic' new_nic_data.Address = ''.join(mac.split(':')) new_nic_data.StaticMacAddress = 'TRUE' + #Add the new nic to the vm. new_resources = self._add_virt_resource(new_nic_data, vm) if new_resources is None: raise Exception('Failed to add nic to VM %s', vm_name) logging.info("Created nic for %s ", vm_name) - def _add_virt_resource(self, res_setting_data, target_vm): + """Add a new resource (disk/nic) to the VM""" vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] (job, new_resources, return_val) = vs_man_svc.\ AddVirtualSystemResources([res_setting_data.GetText_(1)], target_vm.path_()) success = True - if (return_val == 4096 ): #WMI job started + if return_val == WMI_JOB_STATUS_STARTED: success = self._check_job_status(job) else: success = (return_val == 0) @@ -235,16 +294,17 @@ class HyperVConnection(object): #TODO: use the reactor to poll instead of sleep def _check_job_status(self, jobpath): + """Poll WMI job state for completion""" inst_id = jobpath.split(':')[1].split('=')[1].strip('\"') jobs = self._conn.Msvm_ConcreteJob(InstanceID=inst_id) - if (len(jobs) == 0): + if len(jobs) == 0: return False job = jobs[0] - while job.JobState == 4: #job started + while job.JobState == WMI_JOB_STATE_RUNNING: time.sleep(0.1) job = self._conn.Msvm_ConcreteJob(InstanceID=inst_id)[0] - if (job.JobState != 7): #job success + if job.JobState != WMI_JOB_STATE_COMPLETED: logging.debug("WMI job failed: " + job.ErrorSummaryDescription) return False @@ -253,11 +313,13 @@ class HyperVConnection(object): return True - - def _find_external_network(self): + """Find the vswitch that is connected to the physical nic. + Assumes only one physical nic on the host + """ + #If there are no physical nics connected to networks, return. bound = self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE') - if (len(bound) == 0): + if len(bound) == 0: return None return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')[0]\ @@ -266,30 +328,33 @@ class HyperVConnection(object): .associators(wmi_result_class='Msvm_VirtualSwitch')[0] def _clone_wmi_obj(self, wmi_class, wmi_obj): - cl = self._conn.__getattr__(wmi_class) - newinst = cl.new() + """Clone a WMI object""" + cl = self._conn.__getattr__(wmi_class) #get the class + newinst = cl.new() + #Copy the properties from the original. for prop in wmi_obj._properties: newinst.Properties_.Item(prop).Value =\ wmi_obj.Properties_.Item(prop).Value return newinst - @defer.inlineCallbacks def reboot(self, instance): + """Reboot the specified instance.""" vm = yield self._lookup(instance.name) if vm is None: - raise Exception('instance not present %s' % instance.name) + raise exception.NotFound('instance not present %s' % instance.name) self._set_vm_state(instance.name, 'Reboot') - @defer.inlineCallbacks def destroy(self, instance): + """Destroy the VM. Also destroy the associated VHD disk files""" logging.debug("Got request to destroy vm %s", instance.name) vm = yield self._lookup(instance.name) if vm is None: defer.returnValue(None) vm = self._conn.Msvm_ComputerSystem (ElementName=instance.name)[0] vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + #Stop the VM first. self._set_vm_state(instance.name, 'Disabled') vmsettings = vm.associators(wmi_result_class= 'Msvm_VirtualSystemSettingData') @@ -298,33 +363,35 @@ class HyperVConnection(object): disks = [r for r in rasds \ if r.ResourceSubType == 'Microsoft Virtual Hard Disk' ] diskfiles = [] + #Collect disk file information before destroying the VM. for disk in disks: diskfiles.extend([c for c in disk.Connection]) - + #Nuke the VM. Does not destroy disks. (job, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_()) - if (ret_val == 4096 ): #WMI job started + if ret_val == WMI_JOB_STATUS_STARTED: success = self._check_job_status(job) - elif (ret_val == 0): + elif ret_val == 0: success = True if not success: raise Exception('Failed to destroy vm %s' % instance.name) + #Delete associated vhd disk files. for disk in diskfiles: vhdfile = self._cim_conn.CIM_DataFile(Name=disk) for vf in vhdfile: vf.Delete() logging.debug("Deleted disk %s vm %s", vhdfile, instance.name) - - def get_info(self, instance_id): + """Get information about the VM""" vm = self._lookup(instance_id) if vm is None: - raise Exception('instance not present %s' % instance_id) + raise exception.NotFound('instance not present %s' % instance_id) vm = self._conn.Msvm_ComputerSystem(ElementName=instance_id)[0] vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] vmsettings = vm.associators(wmi_result_class= 'Msvm_VirtualSystemSettingData') settings_paths = [ v.path_() for v in vmsettings] + #See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx summary_info = vs_man_svc.GetSummaryInformation( [4,100,103,105], settings_paths)[1] info = summary_info[0] @@ -341,7 +408,6 @@ class HyperVConnection(object): 'num_cpu': info.NumberOfProcessors, 'cpu_time': info.UpTime} - def _lookup(self, i): vms = self._conn.Msvm_ComputerSystem (ElementName=i) n = len(vms) @@ -353,15 +419,16 @@ class HyperVConnection(object): return vms[0].ElementName def _set_vm_state(self, vm_name, req_state): + """Set the desired state of the VM""" vms = self._conn.Msvm_ComputerSystem (ElementName=vm_name) if len(vms) == 0: return False status = vms[0].RequestStateChange(REQ_POWER_STATE[req_state]) job = status[0] return_val = status[1] - if (return_val == 4096 ): #WMI job started + if return_val == WMI_JOB_STATUS_STARTED: success = self._check_job_status(job) - elif (return_val == 0): + elif return_val == 0: success = True if success: logging.info("Successfully changed vm state of %s to %s", @@ -372,16 +439,15 @@ class HyperVConnection(object): vm_name, req_state) return False - def attach_volume(self, instance_name, device_path, mountpoint): vm = self._lookup(instance_name) if vm is None: - raise Exception('Attempted to attach volume to nonexistent %s vm' % + raise exception.NotFound('Cannot attach volume to missing %s vm' % instance_name) def detach_volume(self, instance_name, mountpoint): vm = self._lookup(instance_name) if vm is None: - raise Exception('Attempted to detach volume from nonexistent %s ' % + raise exception.NotFound('Cannot detach volume from missing %s ' % instance_name) From f224c0ed419f885aa85065d1a27623b22721d34c Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Tue, 12 Oct 2010 23:45:30 -0700 Subject: [PATCH 007/147] Fix typo, fix import --- nova/virt/images.py | 54 +++++++++++++++++++++++++++------------------ 1 file changed, 32 insertions(+), 22 deletions(-) diff --git a/nova/virt/images.py b/nova/virt/images.py index 90071107b38d..dad285fe0be0 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -27,12 +27,14 @@ import urlparse import shutil from nova import flags +from nova import process from nova.auth import manager from nova.auth import signer import logging import urllib2 import os +import sys FLAGS = flags.FLAGS flags.DEFINE_bool('use_s3', True, @@ -47,6 +49,24 @@ def fetch(image, path, user, project): return f(image, path, user, project) +def _fetch_image_no_curl(url, path, headers): + request = urllib2.Request(url) + for (k, v) in headers.iteritems(): + request.add_header(k, v) + + def urlretrieve(urlfile, fpath): + chunk = 1*1024*1024 + f = open(fpath, "wb") + while 1: + data = urlfile.read(chunk) + if not data: + break + f.write(data) + + urlopened = urllib2.urlopen(request) + urlretrieve(urlopened, path) + logging.debug("Finished retreving %s -- placed in %s", url, path) + def _fetch_s3_image(image, path, user, project): url = image_url(image) logging.debug("About to retrieve %s and place it in %s", url, path) @@ -64,32 +84,22 @@ def _fetch_s3_image(image, path, user, project): url_path) headers['Authorization'] = 'AWS %s:%s' % (access, signature) - def urlretrieve(urlfile, fpath): - chunk = 1*1024*1024 - f = open(fpath, "wb") - while 1: - data = urlfile.read(chunk) - if not data: - break - f.write(data) - - request = urllib2.Request(url) - for (k, v) in headers.iteritems(): - request.add_header(k, v) - - urlopened = urllib2.urlopen(request) - - urlretrieve(urlopened, path) - - logging.debug("Finished retreving %s -- placed in %s", url, path) - - return - + if sys.platform.startswith('win'): + return _fetch_image_no_curl(url, path, headers) + else: + cmd = ['/usr/bin/curl', '--fail', '--silent', url] + for (k,v) in headers.iteritems(): + cmd += ['-H', '%s: %s' % (k,v)] + cmd += ['-o', path] + return process.SharedPool().execute(executable=cmd[0], args=cmd[1:]) def _fetch_local_image(image, path, user, project): source = _image_path(os.path.join(image,'image')) logging.debug("About to copy %s to %s", source, path) - return shutil.copy(source, path) + if sys.platform.startswith('win'): + return shutil.copy(source, path) + else: + return process.simple_execute('cp %s %s' % (source, path)) def _image_path(path): From 01ad0a05c4f93bb5e95a1c781d492374739dce2c Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Tue, 12 Oct 2010 23:53:31 -0700 Subject: [PATCH 008/147] Remove extraneous newlines --- nova/virt/images.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/nova/virt/images.py b/nova/virt/images.py index dad285fe0be0..75e6f783ec76 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -48,7 +48,6 @@ def fetch(image, path, user, project): f = _fetch_local_image return f(image, path, user, project) - def _fetch_image_no_curl(url, path, headers): request = urllib2.Request(url) for (k, v) in headers.iteritems(): @@ -101,11 +100,9 @@ def _fetch_local_image(image, path, user, project): else: return process.simple_execute('cp %s %s' % (source, path)) - def _image_path(path): return os.path.join(FLAGS.images_path, path) - def image_url(image): return "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port, image) From b28c43c1f66cc111e34e9bbc45a78ff7aa60fd29 Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Wed, 13 Oct 2010 00:06:29 -0700 Subject: [PATCH 009/147] Newlines again, reorder imports --- nova/virt/images.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/nova/virt/images.py b/nova/virt/images.py index 75e6f783ec76..a68d856a1936 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -21,20 +21,20 @@ Handling of VM disk images. """ +import logging +import os import os.path -import time -import urlparse import shutil +import sys +import time +import urllib2 +import urlparse from nova import flags from nova import process from nova.auth import manager from nova.auth import signer -import logging -import urllib2 -import os -import sys FLAGS = flags.FLAGS flags.DEFINE_bool('use_s3', True, @@ -48,6 +48,7 @@ def fetch(image, path, user, project): f = _fetch_local_image return f(image, path, user, project) + def _fetch_image_no_curl(url, path, headers): request = urllib2.Request(url) for (k, v) in headers.iteritems(): @@ -103,6 +104,7 @@ def _fetch_local_image(image, path, user, project): def _image_path(path): return os.path.join(FLAGS.images_path, path) + def image_url(image): return "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port, image) From 273f5c1c5a3f2ae1f540ba2432cc8a2d0a9c1826 Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Wed, 13 Oct 2010 23:19:25 -0700 Subject: [PATCH 010/147] Added a unit test but not integrated it --- nova/tests/hyperv_unittest.py | 67 +++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 nova/tests/hyperv_unittest.py diff --git a/nova/tests/hyperv_unittest.py b/nova/tests/hyperv_unittest.py new file mode 100644 index 000000000000..e5c6d719ed86 --- /dev/null +++ b/nova/tests/hyperv_unittest.py @@ -0,0 +1,67 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2010 Cloud.com, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Hyper-V driver +""" + +import random + +from nova import db +from nova import flags +from nova import test + +from nova.virt import hyperv + +FLAGS = flags.FLAGS +FLAGS.connection_type = 'hyperv' +# Redis is probably not running on Hyper-V host. +# Change this to the actual Redis host +FLAGS.redis_host = '127.0.0.1' + + +class HyperVTestCase(test.TrialTestCase): + """Test cases for the Hyper-V driver""" + def setUp(self): # pylint: disable-msg=C0103 + pass + + def test_create_destroy(self): + """Create a VM and destroy it""" + instance = {'internal_id' : random.randint(1, 1000000), + 'memory_mb' : '1024', + 'mac_address' : '02:12:34:46:56:67', + 'vcpus' : 2, + 'project_id' : 'fake', + 'instance_type' : 'm1.small'} + + instance_ref = db.instance_create(None, instance) + + conn = hyperv.get_connection(False) + conn._create_vm(instance_ref) # pylint: disable-msg=W0212 + found = [n for n in conn.list_instances() + if n == instance_ref['name']] + self.assertTrue(len(found) == 1) + info = conn.get_info(instance_ref['name']) + #Unfortunately since the vm is not running at this point, + #we cannot obtain memory information from get_info + self.assertEquals(info['num_cpu'], instance_ref['vcpus']) + + conn.destroy(instance_ref) + found = [n for n in conn.list_instances() + if n == instance_ref['name']] + self.assertTrue(len(found) == 0) + + def tearDown(self): # pylint: disable-msg=C0103 + pass From 9caed7b34d9b953bb8ecd306509443d076d1e4fe Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Wed, 13 Oct 2010 23:21:22 -0700 Subject: [PATCH 011/147] review comments --- nova/compute/manager.py | 2 - nova/virt/hyperv.py | 250 ++++++++++++++++++++++------------------ nova/virt/images.py | 11 +- 3 files changed, 147 insertions(+), 116 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 8d2705da2628..131fac406abc 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -39,8 +39,6 @@ flags.DEFINE_string('instances_path', utils.abspath('../instances'), 'where instances are stored on disk') flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', 'Driver to use for volume creation') -flags.DEFINE_string('images_path', utils.abspath('../images'), - 'path to decrypted local images if not using s3') class ComputeManager(manager.Manager): diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index 388e833b24f8..7451cac97224 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -20,21 +20,21 @@ Uses Windows Management Instrumentation (WMI) calls to interact with Hyper-V Hyper-V WMI usage: http://msdn.microsoft.com/en-us/library/cc723875%28v=VS.85%29.aspx The Hyper-V object model briefly: - The physical computer and its hosted virtual machines are each represented - by the Msvm_ComputerSystem class. + The physical computer and its hosted virtual machines are each represented + by the Msvm_ComputerSystem class. - Each virtual machine is associated with a - Msvm_VirtualSystemGlobalSettingData (vs_gs_data) instance and one or more - Msvm_VirtualSystemSettingData (vmsetting) instances. For each vmsetting - there is a series of Msvm_ResourceAllocationSettingData (rasd) objects. - The rasd objects describe the settings for each device in a virtual machine. - Together, the vs_gs_data, vmsettings and rasds describe the configuration + Each virtual machine is associated with a + Msvm_VirtualSystemGlobalSettingData (vs_gs_data) instance and one or more + Msvm_VirtualSystemSettingData (vmsetting) instances. For each vmsetting + there is a series of Msvm_ResourceAllocationSettingData (rasd) objects. + The rasd objects describe the settings for each device in a VM. + Together, the vs_gs_data, vmsettings and rasds describe the configuration of the virtual machine. - Creating new resources such as disks and nics involves cloning a default - rasd object and appropriately modifying the clone and calling the + Creating new resources such as disks and nics involves cloning a default + rasd object and appropriately modifying the clone and calling the AddVirtualSystemResources WMI method - Changing resources such as memory uses the ModifyVirtualSystemResources + Changing resources such as memory uses the ModifyVirtualSystemResources WMI method Using the Python WMI library: @@ -47,10 +47,10 @@ Using the Python WMI library: The parameters and return tuples of WMI method calls can gleaned by examining the doc string. For example: >>> vs_man_svc.ModifyVirtualSystemResources.__doc__ - ModifyVirtualSystemResources (ComputerSystem, ResourceSettingData[]) + ModifyVirtualSystemResources (ComputerSystem, ResourceSettingData[]) => (Job, ReturnValue)' - When passing setting data (ResourceSettingData) to the WMI method, - an XML representation of the data is passed in using the GetText_(1) method. + When passing setting data (ResourceSettingData) to the WMI method, + an XML representation of the data is passed in using GetText_(1). Available methods on a service can be determined using method.keys(): >>> vs_man_svc.methods.keys() vmsettings and rasds for a vm can be retrieved using the 'associators' @@ -65,22 +65,23 @@ import logging import time from twisted.internet import defer -import wmi from nova import exception from nova import flags -from nova.auth.manager import AuthManager +from nova.auth import manager from nova.compute import power_state from nova.virt import images +wmi = None + FLAGS = flags.FLAGS HYPERV_POWER_STATE = { - 3 : power_state.SHUTDOWN, - 2 : power_state.RUNNING, - 32768 : power_state.PAUSED, + 3 : power_state.SHUTDOWN, + 2 : power_state.RUNNING, + 32768 : power_state.PAUSED, } @@ -98,15 +99,42 @@ WMI_JOB_STATUS_STARTED = 4096 WMI_JOB_STATE_RUNNING = 4 WMI_JOB_STATE_COMPLETED = 7 +##### Exceptions + + +class HyperVError(Exception): + """Base Exception class for all hyper-v errors.""" + def __init__(self, *args): + Exception.__init__(self, *args) + + +class VmResourceAllocationError(HyperVError): + """Raised when Hyper-V is unable to create or add a resource to + a VM + """ + def __init__(self, *args): + HyperVError.__init__(self, *args) + + +class VmOperationError(HyperVError): + """Raised when Hyper-V is unable to change the state of + a VM (start/stop/reboot/destroy) + """ + def __init__(self, *args): + HyperVError.__init__(self, *args) + def get_connection(_): + global wmi + if wmi is None: + wmi = __import__('wmi') return HyperVConnection() class HyperVConnection(object): def __init__(self): - self._conn = wmi.WMI(moniker = '//./root/virtualization') - self._cim_conn = wmi.WMI(moniker = '//./root/cimv2') + self._conn = wmi.WMI(moniker='//./root/virtualization') + self._cim_conn = wmi.WMI(moniker='//./root/cimv2') def list_instances(self): """ Return the names of all the instances known to Hyper-V. """ @@ -121,20 +149,22 @@ class HyperVConnection(object): if vm is not None: raise exception.Duplicate('Attempted to create duplicate name %s' % instance.name) - - user = AuthManager().get_user(instance['user_id']) - project = AuthManager().get_project(instance['project_id']) + + user = manager.AuthManager().get_user(instance['user_id']) + project = manager.AuthManager().get_project(instance['project_id']) #Fetch the file, assume it is a VHD file. - vhdfile = os.path.join(FLAGS.instances_path, instance['str_id'])+".vhd" + base_vhd_filename = os.path.join(FLAGS.instances_path, + instance['str_id']) + vhdfile = "%s.vhd" % (base_vhd_filename) yield images.fetch(instance['image_id'], vhdfile, user, project) - + try: yield self._create_vm(instance) yield self._create_disk(instance['name'], vhdfile) yield self._create_nic(instance['name'], instance['mac_address']) - - logging.debug ('Starting VM %s ', instance.name) + + logging.debug('Starting VM %s ', instance.name) yield self._set_vm_state(instance['name'], 'Enabled') logging.info('Started VM %s ', instance.name) except Exception as exn: @@ -147,23 +177,24 @@ class HyperVConnection(object): vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new() vs_gs_data.ElementName = instance['name'] - (job, ret_val) = vs_man_svc.DefineVirtualSystem( + (job, ret_val) = vs_man_svc.DefineVirtualSystem( [], None, vs_gs_data.GetText_(1))[1:] - if ret_val == WMI_JOB_STATUS_STARTED: + if ret_val == WMI_JOB_STATUS_STARTED: success = self._check_job_status(job) else: success = (ret_val == 0) - - if not success: - raise Exception('Failed to create VM %s', instance.name) - - logging.debug('Created VM %s...', instance.name) - vm = self._conn.Msvm_ComputerSystem (ElementName=instance.name)[0] - vmsettings = vm.associators(wmi_result_class= - 'Msvm_VirtualSystemSettingData') + if not success: + raise VmResourceAllocationException('Failed to create VM %s', + instance.name) + + logging.debug('Created VM %s...', instance.name) + vm = self._conn.Msvm_ComputerSystem(ElementName=instance.name)[0] + + vmsettings = vm.associators( + wmi_result_class='Msvm_VirtualSystemSettingData') vmsetting = [s for s in vmsettings - if s.SettingType == 3][0] #avoid snapshots + if s.SettingType == 3][0] # avoid snapshots memsetting = vmsetting.associators(wmi_result_class= 'Msvm_MemorySettingData')[0] #No Dynamic Memory, so reservation, limit and quantity are identical. @@ -174,7 +205,6 @@ class HyperVConnection(object): (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources( vm.path_(), [memsetting.GetText_(1)]) - logging.debug('Set memory for vm %s...', instance.name) procsetting = vmsetting.associators(wmi_result_class= 'Msvm_ProcessorSettingData')[0] @@ -185,41 +215,39 @@ class HyperVConnection(object): (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources( vm.path_(), [procsetting.GetText_(1)]) - logging.debug('Set vcpus for vm %s...', instance.name) - + def _create_disk(self, vm_name, vhdfile): """Create a disk and attach it to the vm""" - logging.debug("Creating disk for %s by attaching disk file %s", \ + logging.debug("Creating disk for %s by attaching disk file %s", vm_name, vhdfile) #Find the IDE controller for the vm. - vms = self._conn.MSVM_ComputerSystem (ElementName=vm_name) + vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name) vm = vms[0] vmsettings = vm.associators( wmi_result_class='Msvm_VirtualSystemSettingData') rasds = vmsettings[0].associators( wmi_result_class='MSVM_ResourceAllocationSettingData') ctrller = [r for r in rasds - if r.ResourceSubType == 'Microsoft Emulated IDE Controller'\ - and r.Address == "0" ] + if r.ResourceSubType == 'Microsoft Emulated IDE Controller'\ + and r.Address == "0"] #Find the default disk drive object for the vm and clone it. diskdflt = self._conn.query( - "SELECT * FROM Msvm_ResourceAllocationSettingData \ - WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\ - AND InstanceID LIKE '%Default%'")[0] + "SELECT * FROM Msvm_ResourceAllocationSettingData \ + WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\ + AND InstanceID LIKE '%Default%'")[0] diskdrive = self._clone_wmi_obj( 'Msvm_ResourceAllocationSettingData', diskdflt) #Set the IDE ctrller as parent. - diskdrive.Parent = ctrller[0].path_() + diskdrive.Parent = ctrller[0].path_() diskdrive.Address = 0 #Add the cloned disk drive object to the vm. new_resources = self._add_virt_resource(diskdrive, vm) - if new_resources is None: - raise Exception('Failed to add diskdrive to VM %s', vm_name) - + raise VmResourceAllocationError('Failed to add diskdrive to VM %s', + vm_name) diskdrive_path = new_resources[0] - logging.debug("New disk drive path is " + diskdrive_path) + logging.debug("New disk drive path is %s", diskdrive_path) #Find the default VHD disk object. vhddefault = self._conn.query( "SELECT * FROM Msvm_ResourceAllocationSettingData \ @@ -230,64 +258,66 @@ class HyperVConnection(object): vhddisk = self._clone_wmi_obj( 'Msvm_ResourceAllocationSettingData', vhddefault) #Set the new drive as the parent. - vhddisk.Parent = diskdrive_path + vhddisk.Parent = diskdrive_path vhddisk.Connection = [vhdfile] #Add the new vhd object as a virtual hard disk to the vm. new_resources = self._add_virt_resource(vhddisk, vm) if new_resources is None: - raise Exception('Failed to add vhd file to VM %s', vm_name) + raise VmResourceAllocationError('Failed to add vhd file to VM %s', + vm_name) logging.info("Created disk for %s ", vm_name) - + def _create_nic(self, vm_name, mac): """Create a (emulated) nic and attach it to the vm""" logging.debug("Creating nic for %s ", vm_name) #Find the vswitch that is connected to the physical nic. - vms = self._conn.Msvm_ComputerSystem (ElementName=vm_name) + vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name) extswitch = self._find_external_network() vm = vms[0] - switch_svc = self._conn.Msvm_VirtualSwitchManagementService ()[0] + switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0] #Find the default nic and clone it to create a new nic for the vm. - #Use Msvm_SyntheticEthernetPortSettingData for Windows VMs or Linux with + #Use Msvm_SyntheticEthernetPortSettingData for Windows or Linux with #Linux Integration Components installed. emulatednics_data = self._conn.Msvm_EmulatedEthernetPortSettingData() default_nic_data = [n for n in emulatednics_data - if n.InstanceID.rfind('Default') >0 ] + if n.InstanceID.rfind('Default') > 0] new_nic_data = self._clone_wmi_obj( 'Msvm_EmulatedEthernetPortSettingData', default_nic_data[0]) - #Create a port on the vswitch. - (created_sw, ret_val) = switch_svc.CreateSwitchPort(vm_name, vm_name, + (new_port, ret_val) = switch_svc.CreateSwitchPort(vm_name, vm_name, "", extswitch.path_()) if ret_val != 0: - logging.debug("Failed to create a new port on the external network") - return + logging.error("Failed creating a new port on the external vswitch") + raise VmResourceAllocationError('Failed creating port for %s', + vm_name) logging.debug("Created switch port %s on switch %s", vm_name, extswitch.path_()) #Connect the new nic to the new port. - new_nic_data.Connection = [created_sw] + new_nic_data.Connection = [new_port] new_nic_data.ElementName = vm_name + ' nic' new_nic_data.Address = ''.join(mac.split(':')) new_nic_data.StaticMacAddress = 'TRUE' #Add the new nic to the vm. new_resources = self._add_virt_resource(new_nic_data, vm) if new_resources is None: - raise Exception('Failed to add nic to VM %s', vm_name) + raise VmResourceAllocationError('Failed to add nic to VM %s', + vm_name) logging.info("Created nic for %s ", vm_name) def _add_virt_resource(self, res_setting_data, target_vm): """Add a new resource (disk/nic) to the VM""" vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] - (job, new_resources, return_val) = vs_man_svc.\ + (job, new_resources, ret_val) = vs_man_svc.\ AddVirtualSystemResources([res_setting_data.GetText_(1)], target_vm.path_()) success = True - if return_val == WMI_JOB_STATUS_STARTED: + if ret_val == WMI_JOB_STATUS_STARTED: success = self._check_job_status(job) else: - success = (return_val == 0) - if success: + success = (ret_val == 0) + if success: return new_resources else: return None @@ -295,24 +325,23 @@ class HyperVConnection(object): #TODO: use the reactor to poll instead of sleep def _check_job_status(self, jobpath): """Poll WMI job state for completion""" + #Jobs have a path of the form: + #\\WIN-P5IG7367DAG\root\virtualization:Msvm_ConcreteJob.InstanceID="8A496B9C-AF4D-4E98-BD3C-1128CD85320D" inst_id = jobpath.split(':')[1].split('=')[1].strip('\"') jobs = self._conn.Msvm_ConcreteJob(InstanceID=inst_id) if len(jobs) == 0: return False job = jobs[0] - while job.JobState == WMI_JOB_STATE_RUNNING: + while job.JobState == WMI_JOB_STATE_RUNNING: time.sleep(0.1) job = self._conn.Msvm_ConcreteJob(InstanceID=inst_id)[0] - - if job.JobState != WMI_JOB_STATE_COMPLETED: - logging.debug("WMI job failed: " + job.ErrorSummaryDescription) + if job.JobState != WMI_JOB_STATE_COMPLETED: + logging.debug("WMI job failed: %s", job.ErrorSummaryDescription) return False - - logging.debug("WMI job succeeded: " + job.Description + ",Elapsed = " \ - + job.ElapsedTime) - + logging.debug("WMI job succeeded: %s, Elapsed=%s ", job.Description, + job.ElapsedTime) return True - + def _find_external_network(self): """Find the vswitch that is connected to the physical nic. Assumes only one physical nic on the host @@ -321,16 +350,15 @@ class HyperVConnection(object): bound = self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE') if len(bound) == 0: return None - return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')[0]\ .associators(wmi_result_class='Msvm_SwitchLANEndpoint')[0]\ .associators(wmi_result_class='Msvm_SwitchPort')[0]\ - .associators(wmi_result_class='Msvm_VirtualSwitch')[0] + .associators(wmi_result_class='Msvm_VirtualSwitch')[0] def _clone_wmi_obj(self, wmi_class, wmi_obj): """Clone a WMI object""" - cl = self._conn.__getattr__(wmi_class) #get the class - newinst = cl.new() + cl = self._conn.__getattr__(wmi_class) # get the class + newinst = cl.new() #Copy the properties from the original. for prop in wmi_obj._properties: newinst.Properties_.Item(prop).Value =\ @@ -343,8 +371,8 @@ class HyperVConnection(object): vm = yield self._lookup(instance.name) if vm is None: raise exception.NotFound('instance not present %s' % instance.name) - self._set_vm_state(instance.name, 'Reboot') - + self._set_vm_state(instance.name, 'Reboot') + @defer.inlineCallbacks def destroy(self, instance): """Destroy the VM. Also destroy the associated VHD disk files""" @@ -352,7 +380,7 @@ class HyperVConnection(object): vm = yield self._lookup(instance.name) if vm is None: defer.returnValue(None) - vm = self._conn.Msvm_ComputerSystem (ElementName=instance.name)[0] + vm = self._conn.Msvm_ComputerSystem(ElementName=instance.name)[0] vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] #Stop the VM first. self._set_vm_state(instance.name, 'Disabled') @@ -361,26 +389,26 @@ class HyperVConnection(object): rasds = vmsettings[0].associators(wmi_result_class= 'MSVM_ResourceAllocationSettingData') disks = [r for r in rasds \ - if r.ResourceSubType == 'Microsoft Virtual Hard Disk' ] + if r.ResourceSubType == 'Microsoft Virtual Hard Disk'] diskfiles = [] #Collect disk file information before destroying the VM. for disk in disks: diskfiles.extend([c for c in disk.Connection]) #Nuke the VM. Does not destroy disks. (job, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_()) - if ret_val == WMI_JOB_STATUS_STARTED: + if ret_val == WMI_JOB_STATUS_STARTED: success = self._check_job_status(job) elif ret_val == 0: success = True if not success: - raise Exception('Failed to destroy vm %s' % instance.name) + raise VmOperationError('Failed to destroy vm %s' % instance.name) #Delete associated vhd disk files. for disk in diskfiles: vhdfile = self._cim_conn.CIM_DataFile(Name=disk) for vf in vhdfile: vf.Delete() logging.debug("Deleted disk %s vm %s", vhdfile, instance.name) - + def get_info(self, instance_id): """Get information about the VM""" vm = self._lookup(instance_id) @@ -390,10 +418,10 @@ class HyperVConnection(object): vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] vmsettings = vm.associators(wmi_result_class= 'Msvm_VirtualSystemSettingData') - settings_paths = [ v.path_() for v in vmsettings] + settings_paths = [v.path_() for v in vmsettings] #See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx summary_info = vs_man_svc.GetSummaryInformation( - [4,100,103,105], settings_paths)[1] + [4, 100, 103, 105], settings_paths)[1] info = summary_info[0] logging.debug("Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, \ cpu_time=%s", instance_id, @@ -401,16 +429,16 @@ class HyperVConnection(object): str(info.MemoryUsage), str(info.NumberOfProcessors), str(info.UpTime)) - + return {'state': HYPERV_POWER_STATE[info.EnabledState], 'max_mem': info.MemoryUsage, 'mem': info.MemoryUsage, 'num_cpu': info.NumberOfProcessors, 'cpu_time': info.UpTime} - + def _lookup(self, i): - vms = self._conn.Msvm_ComputerSystem (ElementName=i) - n = len(vms) + vms = self._conn.Msvm_ComputerSystem(ElementName=i) + n = len(vms) if n == 0: return None elif n > 1: @@ -420,34 +448,36 @@ class HyperVConnection(object): def _set_vm_state(self, vm_name, req_state): """Set the desired state of the VM""" - vms = self._conn.Msvm_ComputerSystem (ElementName=vm_name) + vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name) if len(vms) == 0: return False - status = vms[0].RequestStateChange(REQ_POWER_STATE[req_state]) - job = status[0] - return_val = status[1] - if return_val == WMI_JOB_STATUS_STARTED: + (job, ret_val) = vms[0].RequestStateChange(REQ_POWER_STATE[req_state]) + success = False + if ret_val == WMI_JOB_STATUS_STARTED: success = self._check_job_status(job) - elif return_val == 0: + elif ret_val == 0: + success = True + elif ret_val == 32775: + #Invalid state for current operation. Typically means it is + #already in the state requested success = True if success: logging.info("Successfully changed vm state of %s to %s", vm_name, req_state) - return True else: - logging.debug("Failed to change vm state of %s to %s", + logging.error("Failed to change vm state of %s to %s", vm_name, req_state) - return False - + raise VmOperationError("Failed to change vm state of %s to %s", + vm_name, req_state) + def attach_volume(self, instance_name, device_path, mountpoint): - vm = self._lookup(instance_name) + vm = self._lookup(instance_name) if vm is None: raise exception.NotFound('Cannot attach volume to missing %s vm' % instance_name) def detach_volume(self, instance_name, mountpoint): - vm = self._lookup(instance_name) + vm = self._lookup(instance_name) if vm is None: raise exception.NotFound('Cannot detach volume from missing %s ' % instance_name) - diff --git a/nova/virt/images.py b/nova/virt/images.py index a68d856a1936..6ef652e4a55f 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -22,7 +22,6 @@ Handling of VM disk images. """ import logging -import os import os.path import shutil import sys @@ -34,6 +33,7 @@ from nova import flags from nova import process from nova.auth import manager from nova.auth import signer +from nova.objectstore import image FLAGS = flags.FLAGS @@ -55,7 +55,7 @@ def _fetch_image_no_curl(url, path, headers): request.add_header(k, v) def urlretrieve(urlfile, fpath): - chunk = 1*1024*1024 + chunk = 1 * 1024 * 1024 f = open(fpath, "wb") while 1: data = urlfile.read(chunk) @@ -66,7 +66,8 @@ def _fetch_image_no_curl(url, path, headers): urlopened = urllib2.urlopen(request) urlretrieve(urlopened, path) logging.debug("Finished retreving %s -- placed in %s", url, path) - + + def _fetch_s3_image(image, path, user, project): url = image_url(image) logging.debug("About to retrieve %s and place it in %s", url, path) @@ -93,14 +94,16 @@ def _fetch_s3_image(image, path, user, project): cmd += ['-o', path] return process.SharedPool().execute(executable=cmd[0], args=cmd[1:]) + def _fetch_local_image(image, path, user, project): - source = _image_path(os.path.join(image,'image')) + source = _image_path(os.path.join(image, 'image')) logging.debug("About to copy %s to %s", source, path) if sys.platform.startswith('win'): return shutil.copy(source, path) else: return process.simple_execute('cp %s %s' % (source, path)) + def _image_path(path): return os.path.join(FLAGS.images_path, path) From 5a34f93790cf6fb98e9474797f5be3f231a4a6a4 Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Thu, 14 Oct 2010 11:27:42 -0700 Subject: [PATCH 012/147] fix indent --- nova/virt/images.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/virt/images.py b/nova/virt/images.py index 6ef652e4a55f..b7daeb06483a 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -91,8 +91,9 @@ def _fetch_s3_image(image, path, user, project): cmd = ['/usr/bin/curl', '--fail', '--silent', url] for (k,v) in headers.iteritems(): cmd += ['-H', '%s: %s' % (k,v)] - cmd += ['-o', path] - return process.SharedPool().execute(executable=cmd[0], args=cmd[1:]) + + cmd += ['-o', path] + return process.SharedPool().execute(executable=cmd[0], args=cmd[1:]) def _fetch_local_image(image, path, user, project): From b328bac09fee6ff2de6e8326e655ee648bda5e2d Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Thu, 14 Oct 2010 11:56:25 -0700 Subject: [PATCH 013/147] revert to generic exceptions --- nova/virt/hyperv.py | 38 +++++++------------------------------- 1 file changed, 7 insertions(+), 31 deletions(-) diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index 7451cac97224..9688891168c8 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -99,30 +99,6 @@ WMI_JOB_STATUS_STARTED = 4096 WMI_JOB_STATE_RUNNING = 4 WMI_JOB_STATE_COMPLETED = 7 -##### Exceptions - - -class HyperVError(Exception): - """Base Exception class for all hyper-v errors.""" - def __init__(self, *args): - Exception.__init__(self, *args) - - -class VmResourceAllocationError(HyperVError): - """Raised when Hyper-V is unable to create or add a resource to - a VM - """ - def __init__(self, *args): - HyperVError.__init__(self, *args) - - -class VmOperationError(HyperVError): - """Raised when Hyper-V is unable to change the state of - a VM (start/stop/reboot/destroy) - """ - def __init__(self, *args): - HyperVError.__init__(self, *args) - def get_connection(_): global wmi @@ -244,7 +220,7 @@ class HyperVConnection(object): #Add the cloned disk drive object to the vm. new_resources = self._add_virt_resource(diskdrive, vm) if new_resources is None: - raise VmResourceAllocationError('Failed to add diskdrive to VM %s', + raise Exception('Failed to add diskdrive to VM %s', vm_name) diskdrive_path = new_resources[0] logging.debug("New disk drive path is %s", diskdrive_path) @@ -264,7 +240,7 @@ class HyperVConnection(object): #Add the new vhd object as a virtual hard disk to the vm. new_resources = self._add_virt_resource(vhddisk, vm) if new_resources is None: - raise VmResourceAllocationError('Failed to add vhd file to VM %s', + raise Exception('Failed to add vhd file to VM %s', vm_name) logging.info("Created disk for %s ", vm_name) @@ -290,7 +266,7 @@ class HyperVConnection(object): "", extswitch.path_()) if ret_val != 0: logging.error("Failed creating a new port on the external vswitch") - raise VmResourceAllocationError('Failed creating port for %s', + raise Exception('Failed creating port for %s', vm_name) logging.debug("Created switch port %s on switch %s", vm_name, extswitch.path_()) @@ -302,7 +278,7 @@ class HyperVConnection(object): #Add the new nic to the vm. new_resources = self._add_virt_resource(new_nic_data, vm) if new_resources is None: - raise VmResourceAllocationError('Failed to add nic to VM %s', + raise Exception('Failed to add nic to VM %s', vm_name) logging.info("Created nic for %s ", vm_name) @@ -327,7 +303,7 @@ class HyperVConnection(object): """Poll WMI job state for completion""" #Jobs have a path of the form: #\\WIN-P5IG7367DAG\root\virtualization:Msvm_ConcreteJob.InstanceID="8A496B9C-AF4D-4E98-BD3C-1128CD85320D" - inst_id = jobpath.split(':')[1].split('=')[1].strip('\"') + inst_id = jobpath.split('=')[1].strip('"') jobs = self._conn.Msvm_ConcreteJob(InstanceID=inst_id) if len(jobs) == 0: return False @@ -401,7 +377,7 @@ class HyperVConnection(object): elif ret_val == 0: success = True if not success: - raise VmOperationError('Failed to destroy vm %s' % instance.name) + raise Exception('Failed to destroy vm %s' % instance.name) #Delete associated vhd disk files. for disk in diskfiles: vhdfile = self._cim_conn.CIM_DataFile(Name=disk) @@ -467,7 +443,7 @@ class HyperVConnection(object): else: logging.error("Failed to change vm state of %s to %s", vm_name, req_state) - raise VmOperationError("Failed to change vm state of %s to %s", + raise Exception("Failed to change vm state of %s to %s", vm_name, req_state) def attach_volume(self, instance_name, device_path, mountpoint): From a58648f0ce5472e0b671d1b043fc4e0afd01658c Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Thu, 14 Oct 2010 13:37:49 -0700 Subject: [PATCH 014/147] remove nonexistent exception --- nova/virt/hyperv.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index 9688891168c8..9adb2f00a590 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -161,8 +161,7 @@ class HyperVConnection(object): success = (ret_val == 0) if not success: - raise VmResourceAllocationException('Failed to create VM %s', - instance.name) + raise Exception('Failed to create VM %s', instance.name) logging.debug('Created VM %s...', instance.name) vm = self._conn.Msvm_ComputerSystem(ElementName=instance.name)[0] From e75b8f9bb05bc539500b88ebba7a98903bec0ba9 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Thu, 2 Dec 2010 11:40:44 +0100 Subject: [PATCH 015/147] Add a simple abstraction for firewalls. Some might say I should have done this from the start. They'd be absolutely correct. --- nova/virt/libvirt_conn.py | 47 +++++++++++++++++++++++++++++++++------ 1 file changed, 40 insertions(+), 7 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 18085089f3b5..0870a00fba8e 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -104,6 +104,8 @@ flags.DEFINE_string('libvirt_uri', flags.DEFINE_bool('allow_project_net_traffic', True, 'Whether to allow in project network traffic') +flags.DEFINE_bool('firewall_driver', None, + 'Firewall driver (defaults to nwfilter)') def get_connection(read_only): @@ -128,6 +130,12 @@ class LibvirtConnection(object): self.rescue_xml = open(rescue_file).read() self._wrapped_conn = None self.read_only = read_only + if not FLAGS.firewall_driver: + # This is weird looking, but NWFilter is libvirt specific + # and requires more cooperation between the two. + self.firewall_driver = NWFilterFirewall(self._conn) + else: + self.firewall_driver = utils.import_object(FLAGS.firewall_driver) @property def _conn(self): @@ -344,11 +352,12 @@ class LibvirtConnection(object): instance['id'], power_state.NOSTATE, 'launching') - yield NWFilterFirewall(self._conn).\ - setup_nwfilters_for_instance(instance) + + yield self.firewall_driver.prepare_instance_filter(instance) yield self._create_image(instance, xml) yield self._conn.createXML(xml, 0) logging.debug("instance %s: is running", instance['name']) + yield self.firewall_driver.apply_instance_filter(instance) local_d = defer.Deferred() timer = task.LoopingCall(f=None) @@ -645,11 +654,35 @@ class LibvirtConnection(object): return domain.interfaceStats(interface) def refresh_security_group(self, security_group_id): - fw = NWFilterFirewall(self._conn) - fw.ensure_security_group_filter(security_group_id) + self.firewall_driver.refresh_security_group(security_group_id) -class NWFilterFirewall(object): +class FirewallDriver(object): + def prepare_instance_filter(self, instance): + """Prepare filters for the instance. + + At this point, the instance isn't running yet.""" + raise NotImplementedError() + + def apply_instance_filter(self, instance): + """Apply instance filter. + + Once this method returns, the instance should be firewalled + appropriately. This method should as far as possible be a + no-op. It's vastly preferred to get everything set up in + prepare_instance_filter. + """ + raise NotImplementedError() + + def refresh_security_group(security_group_id): + """Refresh security group from data store + + Gets called when changes have been made to the security + group.""" + raise NotImplementedError() + + +class NWFilterFirewall(FirewallDriver): """ This class implements a network filtering mechanism versatile enough for EC2 style Security Group filtering by leveraging @@ -767,7 +800,7 @@ class NWFilterFirewall(object): return str(net.net()), str(net.netmask()) @defer.inlineCallbacks - def setup_nwfilters_for_instance(self, instance): + def prepare_instance_filter(self, instance): """ Creates an NWFilter for the given instance. In the process, it makes sure the filters for the security groups as well as @@ -795,7 +828,7 @@ class NWFilterFirewall(object): instance['project_id'] for security_group in instance.security_groups: - yield self.ensure_security_group_filter(security_group['id']) + yield self.refresh_security_group(security_group['id']) nwfilter_xml += " \n" % \ security_group['id'] From 16c440c5b598dab51ce4bd37c48f02f3da87c092 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Thu, 2 Dec 2010 16:21:31 +0100 Subject: [PATCH 016/147] Refactor nwfilter code somewhat. For iptables based firewalls, I still want to leave it to nwfilter to protect against arp, mac, and ip spoofing, so it needed a bit of a split. --- nova/tests/virt_unittest.py | 8 +- nova/virt/libvirt_conn.py | 153 +++++++++++++++++++++++++----------- 2 files changed, 110 insertions(+), 51 deletions(-) diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index d49383fb7ba2..4bbf2b50baf1 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -89,7 +89,7 @@ class LibvirtConnTestCase(test.TrialTestCase): for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): FLAGS.libvirt_type = libvirt_type - conn = libvirt_conn.LibvirtConnection(True) + conn = libvirt_conn.get_connection(True) uri, _template, _rescue = conn.get_uri_and_templates() self.assertEquals(uri, expected_uri) @@ -130,6 +130,8 @@ class NWFilterTestCase(test.TrialTestCase): class Mock(object): pass + #def __call__(self, *args, **kwargs): + # return self.manager = manager.AuthManager() self.user = self.manager.create_user('fake', 'fake', 'fake', @@ -139,7 +141,7 @@ class NWFilterTestCase(test.TrialTestCase): self.fake_libvirt_connection = Mock() - self.fw = libvirt_conn.NWFilterFirewall(self.fake_libvirt_connection) + self.fw = libvirt_conn.NWFilterFirewall(lambda:self.fake_libvirt_connection) def tearDown(self): self.manager.delete_project(self.project) @@ -252,7 +254,7 @@ class NWFilterTestCase(test.TrialTestCase): self.security_group.id) instance = db.instance_get(self.context, inst_id) - d = self.fw.setup_nwfilters_for_instance(instance) + d = self.fw.prepare_instance_filter(instance) d.addCallback(_ensure_all_called) d.addCallback(lambda _: self.teardown_security_group()) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 0870a00fba8e..a0149c5cad58 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -130,20 +130,22 @@ class LibvirtConnection(object): self.rescue_xml = open(rescue_file).read() self._wrapped_conn = None self.read_only = read_only + + self.nwfilter = NWFilterFirewall(self._get_connection) + if not FLAGS.firewall_driver: - # This is weird looking, but NWFilter is libvirt specific - # and requires more cooperation between the two. - self.firewall_driver = NWFilterFirewall(self._conn) + self.firewall_driver = self.nwfilter + self.nwfilter.handle_security_groups = True else: self.firewall_driver = utils.import_object(FLAGS.firewall_driver) - @property - def _conn(self): + def _get_connection(self): if not self._wrapped_conn or not self._test_connection(): logging.debug('Connecting to libvirt: %s' % self.libvirt_uri) self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only) return self._wrapped_conn + _conn = property(_get_connection) def _test_connection(self): try: @@ -353,6 +355,7 @@ class LibvirtConnection(object): power_state.NOSTATE, 'launching') + yield self.nwfilter.setup_basic_filtering(instance) yield self.firewall_driver.prepare_instance_filter(instance) yield self._create_image(instance, xml) yield self._conn.createXML(xml, 0) @@ -689,6 +692,9 @@ class NWFilterFirewall(FirewallDriver): libvirt's nwfilter. First, all instances get a filter ("nova-base-filter") applied. + This filter provides some basic security such as protection against + MAC spoofing, IP spoofing, and ARP spoofing. + This filter drops all incoming ipv4 and ipv6 connections. Outgoing connections are never blocked. @@ -722,44 +728,80 @@ class NWFilterFirewall(FirewallDriver): (*) This sentence brought to you by the redundancy department of redundancy. + """ def __init__(self, get_connection): - self._conn = get_connection + self._libvirt_get_connection = get_connection + self.static_filters_configured = False - nova_base_filter = ''' - 26717364-50cf-42d1-8185-29bf893ab110 - - - - - - - - ''' + def _get_connection(self): + return self._libvirt_get_connection() + _conn = property(_get_connection) - nova_dhcp_filter = ''' - 891e4787-e5c0-d59b-cbd6-41bc3c6b36fc - - - - - - - ''' + def nova_dhcp_filter(self): + """The standard allow-dhcp-server filter is an one, so it uses + ebtables to allow traffic through. Without a corresponding rule in + iptables, it'll get blocked anyway.""" + + return ''' + 891e4787-e5c0-d59b-cbd6-41bc3c6b36fc + + + + + + + ''' + + def setup_basic_filtering(self, instance): + """Set up basic filtering (MAC, IP, and ARP spoofing protection)""" + + if self.handle_security_groups: + # No point in setting up a filter set that we'll be overriding + # anyway. + return + + self._ensure_static_filters() + + instance_filter_name = self._instance_filter_name(instance) + self._define_filter(self._filter_container(instance_filter_name, + ['nova-base'])) + + @defer.inlineCallbacks + def _ensure_static_filters(self): + if self.static_filters_configured: + return + + yield self._define_filter(self._filter_container('nova-base', + ['no-mac-spoofing', + 'no-ip-spoofing', + 'no-arp-spoofing', + 'allow-dhcp-server'])) + yield self._define_filter(self.nova_base_ipv4_filter) + yield self._define_filter(self.nova_base_ipv6_filter) + yield self._define_filter(self.nova_dhcp_filter) + + self.static_filters_configured = True + + def _filter_container(self, name, filters): + xml = '''%s''' % ( + name, + ''.join(["" % (f,) for f in filters])) + return xml def nova_base_ipv4_filter(self): retval = "" for protocol in ['tcp', 'udp', 'icmp']: for direction, action, priority in [('out', 'accept', 399), - ('inout', 'drop', 400)]: + ('in', 'drop', 400)]: retval += """ <%s /> """ % (action, direction, @@ -771,7 +813,7 @@ class NWFilterFirewall(FirewallDriver): retval = "" for protocol in ['tcp', 'udp', 'icmp']: for direction, action, priority in [('out', 'accept', 399), - ('inout', 'drop', 400)]: + ('in', 'drop', 400)]: retval += """ <%s-ipv6 /> """ % (action, direction, @@ -799,6 +841,11 @@ class NWFilterFirewall(FirewallDriver): net = IPy.IP(cidr) return str(net.net()), str(net.netmask()) + def setup_security_groups_filtering(self, instance): + """Set up basic filtering (MAC, IP, and ARP spoofing protection) + as well as security groups filtering.""" + + @defer.inlineCallbacks def prepare_instance_filter(self, instance): """ @@ -807,37 +854,43 @@ class NWFilterFirewall(FirewallDriver): the base filter are all in place. """ - yield self._define_filter(self.nova_base_ipv4_filter) - yield self._define_filter(self.nova_base_ipv6_filter) - yield self._define_filter(self.nova_dhcp_filter) - yield self._define_filter(self.nova_base_filter) + yield self._ensure_static_filters() - nwfilter_xml = "\n" \ - " \n" % \ - instance['name'] + instance_filter_name = self._instance_filter_name(instance) + instance_secgroup_filter_name = '%s-secgroup' % (instance_filter_name,) + instance_filter_children = ['nova-base', instance_secgroup_filter_name] + instance_secgroup_filter_children = ['nova-base-ipv4', 'nova-base-ipv6', + 'nova-allow-dhcp-server'] if FLAGS.allow_project_net_traffic: network_ref = db.project_get_network(context.get_admin_context(), instance['project_id']) net, mask = self._get_net_and_mask(network_ref['cidr']) + project_filter = self.nova_project_filter(instance['project_id'], net, mask) yield self._define_filter(project_filter) - nwfilter_xml += " \n" % \ - instance['project_id'] + instance_secgroup_filter_children += [('nova-project-%s' % + instance['project_id'])] for security_group in instance.security_groups: yield self.refresh_security_group(security_group['id']) - nwfilter_xml += " \n" % \ - security_group['id'] - nwfilter_xml += "" + instance_secgroup_filter_children += [('nova-secgroup-%s' % + security_group['id'])] + + yield self._define_filter( + self._filter_container(instance_secgroup_filter_name, + instance_secgroup_filter_children)) + + yield self._define_filter( + self._filter_container(instance_filter_name, + instance_filter_children)) - yield self._define_filter(nwfilter_xml) return - def ensure_security_group_filter(self, security_group_id): + def refresh_security_group(self, security_group_id): return self._define_filter( self.security_group_to_nwfilter_xml(security_group_id)) @@ -868,3 +921,7 @@ class NWFilterFirewall(FirewallDriver): xml = "%s" % \ (security_group_id, rule_xml,) return xml + + def _instance_filter_name(self, instance): + return 'nova-instance-%s' % instance['name'] + From cf21683d741165d2cf0798b7dc9968daa311fafc Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 6 Dec 2010 22:19:29 +0100 Subject: [PATCH 017/147] Add iptables based security groups implementation. --- nova/db/sqlalchemy/api.py | 20 +++++ nova/network/linux_net.py | 2 + nova/tests/virt_unittest.py | 121 +++++++++++++++++++++++--- nova/virt/libvirt_conn.py | 165 ++++++++++++++++++++++++++++++++++-- 4 files changed, 290 insertions(+), 18 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index afa55fc03a11..21b991548233 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -574,12 +574,14 @@ def instance_get(context, instance_id, session=None): if is_admin_context(context): result = session.query(models.Instance).\ options(joinedload('security_groups')).\ + options(joinedload_all('security_groups.rules')).\ filter_by(id=instance_id).\ filter_by(deleted=can_read_deleted(context)).\ first() elif is_user_context(context): result = session.query(models.Instance).\ options(joinedload('security_groups')).\ + options(joinedload_all('security_groups.rules')).\ filter_by(project_id=context.project_id).\ filter_by(id=instance_id).\ filter_by(deleted=False).\ @@ -1505,6 +1507,24 @@ def security_group_rule_get(context, security_group_rule_id, session=None): return result +@require_context +def security_group_rule_get_by_security_group(context, security_group_id, session=None): + if not session: + session = get_session() + if is_admin_context(context): + result = session.query(models.SecurityGroupIngressRule).\ + filter_by(deleted=can_read_deleted(context)).\ + filter_by(parent_group_id=security_group_id).\ + all() + else: + # TODO(vish): Join to group and check for project_id + result = session.query(models.SecurityGroupIngressRule).\ + filter_by(deleted=False).\ + filter_by(parent_group_id=security_group_id).\ + all() + return result + + @require_context def security_group_rule_create(context, values): security_group_rule_ref = models.SecurityGroupIngressRule() diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 7b00e65d4290..3803f886e0c9 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -160,6 +160,8 @@ def ensure_bridge(bridge, interface, net_attrs=None): _execute("sudo ifconfig %s up" % bridge) _confirm_rule("FORWARD", "--in-interface %s -j ACCEPT" % bridge) _confirm_rule("FORWARD", "--out-interface %s -j ACCEPT" % bridge) + _execute("sudo iptables -N nova-local", check_exit_code=False) + _confirm_rule("FORWARD", "-j nova-local") def get_dhcp_hosts(context, network_id): diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index 4bbf2b50baf1..6c0f379dad5c 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -43,15 +43,14 @@ class LibvirtConnTestCase(test.TrialTestCase): def test_get_uri_and_template(self): ip = '10.11.12.13' - instance = {'internal_id': 1, - 'memory_kb': '1024000', - 'basepath': '/some/path', - 'bridge_name': 'br100', - 'mac_address': '02:12:34:46:56:67', - 'vcpus': 2, - 'project_id': 'fake', - 'bridge': 'br101', - 'instance_type': 'm1.small'} + instance = { 'memory_kb': '1024000', + 'basepath': '/some/path', + 'bridge_name': 'br100', + 'mac_address': '02:12:34:46:56:67', + 'vcpus': 2, + 'project_id': 'fake', + 'bridge': 'br101', + 'instance_type': 'm1.small'} user_context = context.RequestContext(project=self.project, user=self.user) @@ -123,6 +122,108 @@ class LibvirtConnTestCase(test.TrialTestCase): self.manager.delete_user(self.user) +class IptablesFirewallTestCase(test.TrialTestCase): + def setUp(self): + super(IptablesFirewallTestCase, self).setUp() + + self.manager = manager.AuthManager() + self.user = self.manager.create_user('fake', 'fake', 'fake', + admin=True) + self.project = self.manager.create_project('fake', 'fake', 'fake') + self.context = context.RequestContext('fake', 'fake') + self.network = utils.import_object(FLAGS.network_manager) + self.fw = libvirt_conn.IptablesFirewallDriver() + + def tearDown(self): + self.manager.delete_project(self.project) + self.manager.delete_user(self.user) + super(IptablesFirewallTestCase, self).tearDown() + + def _p(self, *args, **kwargs): + if 'iptables-restore' in args: + print ' '.join(args), kwargs['stdin'] + if 'iptables-save' in args: + return + in_rules = ['# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010', + '*filter', + ':INPUT ACCEPT [969615:281627771]', + ':FORWARD ACCEPT [0:0]', + ':OUTPUT ACCEPT [915599:63811649]', + ':nova-block-ipv4 - [0:0]', + '-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ', + '-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ', + '-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ', + '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ', + '-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED,ESTABLISHED -j ACCEPT ', + '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ', + '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ', + '-A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable ', + '-A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable ', + 'COMMIT', + '# Completed on Mon Dec 6 11:54:13 2010'] + + def test_static_filters(self): + self.fw.execute = self._p + instance_ref = db.instance_create(self.context, + {'user_id': 'fake', + 'project_id': 'fake'}) + ip = '10.11.12.13' + + network_ref = self.network.get_network(self.context) + + fixed_ip = {'address': ip, + 'network_id': network_ref['id']} + + admin_ctxt = context.get_admin_context() + db.fixed_ip_create(admin_ctxt, fixed_ip) + db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, + 'instance_id': instance_ref['id']}) + + + secgroup = db.security_group_create(admin_ctxt, + {'user_id': 'fake', + 'project_id': 'fake', + 'name': 'testgroup', + 'description': 'test group'}) + + db.security_group_rule_create(admin_ctxt, + {'parent_group_id': secgroup['id'], + 'protocol': 'tcp', + 'from_port': 80, + 'to_port': 81, + 'cidr': '192.168.10.0/24'}) + + db.instance_add_security_group(admin_ctxt, instance_ref['id'], + secgroup['id']) + instance_ref = db.instance_get(admin_ctxt, instance_ref['id']) + + self.fw.add_instance(instance_ref) + + out_rules = self.fw.modify_rules(self.in_rules) + + in_rules = filter(lambda l: not l.startswith('#'), self.in_rules) + for rule in in_rules: + if not 'nova' in rule: + self.assertTrue(rule in out_rules, 'Rule went missing: %s' % rule) + + print '\n'.join(out_rules) + + def est_stuff(self): + self.fw.execute = self._p + cloud_controller = cloud.CloudController() + cloud_controller.create_security_group(self.context, + 'testgroup', + 'test group description') + cloud_controller.authorize_security_group_ingress(self.context, + 'testgroup', + from_port='80', + to_port='81', + ip_protocol='tcp', + cidr_ip='0.0.0.0/0') + + self.fw._apply_ruleset() + + class NWFilterTestCase(test.TrialTestCase): def setUp(self): @@ -130,8 +231,6 @@ class NWFilterTestCase(test.TrialTestCase): class Mock(object): pass - #def __call__(self, *args, **kwargs): - # return self.manager = manager.AuthManager() self.user = self.manager.create_user('fake', 'fake', 'fake', diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index a0149c5cad58..495ee020d0f7 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -104,7 +104,7 @@ flags.DEFINE_string('libvirt_uri', flags.DEFINE_bool('allow_project_net_traffic', True, 'Whether to allow in project network traffic') -flags.DEFINE_bool('firewall_driver', None, +flags.DEFINE_string('firewall_driver', 'nova.virt.libvirt_conn.IptablesFirewallDriver', 'Firewall driver (defaults to nwfilter)') @@ -677,7 +677,7 @@ class FirewallDriver(object): """ raise NotImplementedError() - def refresh_security_group(security_group_id): + def refresh_security_group(self, security_group_id): """Refresh security group from data store Gets called when changes have been made to the security @@ -734,6 +734,7 @@ class NWFilterFirewall(FirewallDriver): def __init__(self, get_connection): self._libvirt_get_connection = get_connection self.static_filters_configured = False + self.handle_security_groups = False def _get_connection(self): return self._libvirt_get_connection() @@ -763,12 +764,14 @@ class NWFilterFirewall(FirewallDriver): def setup_basic_filtering(self, instance): """Set up basic filtering (MAC, IP, and ARP spoofing protection)""" + logging.info('called setup_basic_filtering in nwfilter') if self.handle_security_groups: # No point in setting up a filter set that we'll be overriding # anyway. return + logging.info('ensuring static filters') self._ensure_static_filters() instance_filter_name = self._instance_filter_name(instance) @@ -841,10 +844,6 @@ class NWFilterFirewall(FirewallDriver): net = IPy.IP(cidr) return str(net.net()), str(net.netmask()) - def setup_security_groups_filtering(self, instance): - """Set up basic filtering (MAC, IP, and ARP spoofing protection) - as well as security groups filtering.""" - @defer.inlineCallbacks def prepare_instance_filter(self, instance): @@ -874,7 +873,7 @@ class NWFilterFirewall(FirewallDriver): instance_secgroup_filter_children += [('nova-project-%s' % instance['project_id'])] - for security_group in instance.security_groups: + for security_group in db.security_group_get_by_instance(instance['id']): yield self.refresh_security_group(security_group['id']) instance_secgroup_filter_children += [('nova-secgroup-%s' % @@ -925,3 +924,155 @@ class NWFilterFirewall(FirewallDriver): def _instance_filter_name(self, instance): return 'nova-instance-%s' % instance['name'] + +class IptablesFirewallDriver(FirewallDriver): + def __init__(self, execute=None): + self.execute = execute or utils.execute + self.instances = set() + + def apply_instance_filter(self, instance): + """No-op. Everything is done in prepare_instance_filter""" + pass + + def remove_instance(self, instance): + self.instances.remove(instance) + + def add_instance(self, instance): + self.instances.add(instance) + + def prepare_instance_filter(self, instance): + self.add_instance(instance) + self.apply_ruleset() + + def apply_ruleset(self): + current_filter, _ = self.execute('sudo iptables-save -t filter') + current_lines = current_filter.split('\n') + new_filter = self.modify_rules(current_lines) + self.execute('sudo iptables-restore', + process_input='\n'.join(new_filter)) + + def modify_rules(self, current_lines): + ctxt = context.get_admin_context() + # Remove any trace of nova rules. + new_filter = filter(lambda l: 'nova-' not in l, current_lines) + + seen_chains = False + for rules_index in range(len(new_filter)): + if not seen_chains: + if new_filter[rules_index].startswith(':'): + seen_chains = True + elif seen_chains == 1: + if not new_filter[rules_index].startswith(':'): + break + + + our_chains = [':nova-ipv4-fallback - [0:0]'] + our_rules = ['-A nova-ipv4-fallback -j DROP'] + + our_chains += [':nova-local - [0:0]'] + our_rules += ['-A FORWARD -j nova-local'] + + security_groups = set() + # Add our chains + # First, we add instance chains and rules + for instance in self.instances: + chain_name = self._instance_chain_name(instance) + ip_address = self._ip_for_instance(instance) + + our_chains += [':%s - [0:0]' % chain_name] + + # Jump to the per-instance chain + our_rules += ['-A nova-local -d %s -j %s' % (ip_address, + chain_name)] + + # Always drop invalid packets + our_rules += ['-A %s -m state --state ' + 'INVALID -j DROP' % (chain_name,)] + + # Allow established connections + our_rules += ['-A %s -m state --state ' + 'ESTABLISHED,RELATED -j ACCEPT' % (chain_name,)] + + # Jump to each security group chain in turn + for security_group in \ + db.security_group_get_by_instance(ctxt, + instance['id']): + security_groups.add(security_group) + + sg_chain_name = self._security_group_chain_name(security_group) + + our_rules += ['-A %s -j %s' % (chain_name, sg_chain_name)] + + # Allow DHCP responses + dhcp_server = self._dhcp_server_for_instance(instance) + our_rules += ['-A %s -s %s -p udp --sport 67 --dport 68' % (chain_name, dhcp_server)] + + # If nothing matches, jump to the fallback chain + our_rules += ['-A %s -j nova-ipv4-fallback' % (chain_name,)] + + + # then, security group chains and rules + for security_group in security_groups: + chain_name = self._security_group_chain_name(security_group) + our_chains += [':%s - [0:0]' % chain_name] + + rules = \ + db.security_group_rule_get_by_security_group(ctxt, + security_group['id']) + + for rule in rules: + logging.info('%r', rule) + args = ['-A', chain_name, '-p', rule.protocol] + + if rule.cidr: + args += ['-s', rule.cidr] + else: + # Something about ipsets + pass + + if rule.protocol in ['udp', 'tcp']: + if rule.from_port == rule.to_port: + args += ['--dport', '%s' % (rule.from_port,)] + else: + args += ['-m', 'multiport', + '--dports', '%s:%s' % (rule.from_port, + rule.to_port)] + elif rule.protocol == 'icmp': + icmp_type = rule.from_port + icmp_code = rule.to_port + + if icmp_type == '-1': + icmp_type_arg = None + else: + icmp_type_arg = '%s' % icmp_type + if not icmp_code == '-1': + icmp_type_arg += '/%s' % icmp_code + + if icmp_type_arg: + args += ['-m', 'icmp', '--icmp_type', icmp_type_arg] + + args += ['-j ACCEPT'] + our_rules += [' '.join(args)] + + new_filter[rules_index:rules_index] = our_rules + new_filter[rules_index:rules_index] = our_chains + logging.info('new_filter: %s', '\n'.join(new_filter)) + return new_filter + + def refresh_security_group(self, security_group): + self.apply_ruleset() + + def _security_group_chain_name(self, security_group): + return 'nova-sg-%s' % (security_group['id'],) + + def _instance_chain_name(self, instance): + return 'nova-inst-%s' % (instance['id'],) + + def _ip_for_instance(self, instance): + return db.instance_get_fixed_address(context.get_admin_context(), + instance['id']) + + def _dhcp_server_for_instance(self, instance): + network = db.project_get_network(context.get_admin_context(), + instance['project_id']) + return network['gateway'] From e1e4e639bf24dab49676f619fbb358c91cca3023 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 6 Dec 2010 22:20:05 +0100 Subject: [PATCH 018/147] Remove dead test code. --- nova/tests/virt_unittest.py | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index 6c0f379dad5c..d725c2ce2aef 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -200,7 +200,7 @@ class IptablesFirewallTestCase(test.TrialTestCase): self.fw.add_instance(instance_ref) out_rules = self.fw.modify_rules(self.in_rules) - + in_rules = filter(lambda l: not l.startswith('#'), self.in_rules) for rule in in_rules: if not 'nova' in rule: @@ -208,21 +208,6 @@ class IptablesFirewallTestCase(test.TrialTestCase): print '\n'.join(out_rules) - def est_stuff(self): - self.fw.execute = self._p - cloud_controller = cloud.CloudController() - cloud_controller.create_security_group(self.context, - 'testgroup', - 'test group description') - cloud_controller.authorize_security_group_ingress(self.context, - 'testgroup', - from_port='80', - to_port='81', - ip_protocol='tcp', - cidr_ip='0.0.0.0/0') - - self.fw._apply_ruleset() - class NWFilterTestCase(test.TrialTestCase): From 916f23e63add6167aef40931d6f564c685c6aefd Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Thu, 9 Dec 2010 14:15:38 +0100 Subject: [PATCH 019/147] Ignore security group rules that reference foreign security groups. --- nova/virt/libvirt_conn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 495ee020d0f7..2b5969ce10e3 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1028,7 +1028,7 @@ class IptablesFirewallDriver(FirewallDriver): args += ['-s', rule.cidr] else: # Something about ipsets - pass + continue if rule.protocol in ['udp', 'tcp']: if rule.from_port == rule.to_port: From 77d7e022fd5f2c8709a6784cc83429494d126a3b Mon Sep 17 00:00:00 2001 From: Eric Day Date: Thu, 9 Dec 2010 13:59:50 -0800 Subject: [PATCH 020/147] Converted the instance table to use a uuid instead of a auto_increment ID and a random internal_id. I had to use a String(32) column with hex and not a String(16) with bytes because SQLAlchemy doesn't like non-unicode strings going in for String types. We could try another type, but I didn't want a primary_key on blob types. --- nova/api/ec2/cloud.py | 70 +++++++++++------------- nova/api/openstack/servers.py | 4 +- nova/compute/api.py | 26 ++++----- nova/db/api.py | 8 +-- nova/db/sqlalchemy/api.py | 32 +++-------- nova/db/sqlalchemy/models.py | 23 ++++---- nova/tests/api/openstack/test_servers.py | 9 ++- nova/tests/cloud_unittest.py | 4 +- 8 files changed, 78 insertions(+), 98 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 05f8c3d0b04c..a831b2a6220f 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -72,18 +72,14 @@ def _gen_key(context, user_id, key_name): return {'private_key': private_key, 'fingerprint': fingerprint} -def ec2_id_to_internal_id(ec2_id): - """Convert an ec2 ID (i-[base 36 number]) to an internal id (int)""" - return int(ec2_id[2:], 36) +def ec2_id_to_id(ec2_id): + """Convert an ec2 ID and an instance ID.""" + return ec2_id[2:] -def internal_id_to_ec2_id(internal_id): - """Convert an internal ID (int) to an ec2 ID (i-[base 36 number])""" - digits = [] - while internal_id != 0: - internal_id, remainder = divmod(internal_id, 36) - digits.append('0123456789abcdefghijklmnopqrstuvwxyz'[remainder]) - return "i-%s" % ''.join(reversed(digits)) +def id_to_ec2_id(instance_id): + """Convert an instance ID to an ec2 ID.""" + return "i-%s" % instance_id class CloudController(object): @@ -153,7 +149,7 @@ class CloudController(object): hostname = instance_ref['hostname'] floating_ip = db.instance_get_floating_address(ctxt, instance_ref['id']) - ec2_id = internal_id_to_ec2_id(instance_ref['internal_id']) + ec2_id = id_to_ec2_id(instance_ref['id']) data = { 'user-data': base64.b64decode(instance_ref['user_data']), 'meta-data': { @@ -437,8 +433,8 @@ class CloudController(object): def get_console_output(self, context, instance_id, **kwargs): # instance_id is passed in as a list of instances ec2_id = instance_id[0] - internal_id = ec2_id_to_internal_id(ec2_id) - instance_ref = self.compute_api.get_instance(context, internal_id) + instance_id = ec2_id_to_id(ec2_id) + instance_ref = self.compute_api.get_instance(context, instance_id) output = rpc.call(context, '%s.%s' % (FLAGS.compute_topic, instance_ref['host']), @@ -464,8 +460,8 @@ class CloudController(object): instance_ec2_id = None instance_data = None if volume.get('instance', None): - internal_id = volume['instance']['internal_id'] - instance_ec2_id = internal_id_to_ec2_id(internal_id) + instance_id = volume['instance']['id'] + instance_ec2_id = id_to_ec2_id(instance_id) instance_data = '%s[%s]' % (instance_ec2_id, volume['instance']['host']) v = {} @@ -534,8 +530,8 @@ class CloudController(object): raise exception.ApiError("Volume status must be available") if volume_ref['attach_status'] == "attached": raise exception.ApiError("Volume is already attached") - internal_id = ec2_id_to_internal_id(instance_id) - instance_ref = self.compute_api.get_instance(context, internal_id) + instance_id = ec2_id_to_id(instance_id) + instance_ref = self.compute_api.get_instance(context, instance_id) host = instance_ref['host'] rpc.cast(context, db.queue_get_for(context, FLAGS.compute_topic, host), @@ -570,11 +566,11 @@ class CloudController(object): # If the instance doesn't exist anymore, # then we need to call detach blind db.volume_detached(context) - internal_id = instance_ref['internal_id'] - ec2_id = internal_id_to_ec2_id(internal_id) + instance_id = instance_ref['id'] + ec2_id = id_to_ec2_id(instance_id) return {'attachTime': volume_ref['attach_time'], 'device': volume_ref['mountpoint'], - 'instanceId': internal_id, + 'instanceId': instance_id, 'requestId': context.request_id, 'status': volume_ref['attach_status'], 'volumeId': volume_ref['id']} @@ -619,8 +615,8 @@ class CloudController(object): if instance['image_id'] == FLAGS.vpn_image_id: continue i = {} - internal_id = instance['internal_id'] - ec2_id = internal_id_to_ec2_id(internal_id) + instance_id = instance['id'] + ec2_id = id_to_ec2_id(instance_id) i['instanceId'] = ec2_id i['imageId'] = instance['image_id'] i['instanceState'] = { @@ -673,8 +669,8 @@ class CloudController(object): ec2_id = None if (floating_ip_ref['fixed_ip'] and floating_ip_ref['fixed_ip']['instance']): - internal_id = floating_ip_ref['fixed_ip']['instance']['ec2_id'] - ec2_id = internal_id_to_ec2_id(internal_id) + instance_id = floating_ip_ref['fixed_ip']['instance']['ec2_id'] + ec2_id = id_to_ec2_id(instance_id) address_rv = {'public_ip': address, 'instance_id': ec2_id} if context.user.is_admin(): @@ -709,8 +705,8 @@ class CloudController(object): return {'releaseResponse': ["Address released."]} def associate_address(self, context, instance_id, public_ip, **kwargs): - internal_id = ec2_id_to_internal_id(instance_id) - instance_ref = self.compute_api.get_instance(context, internal_id) + instance_id = ec2_id_to_id(instance_id) + instance_ref = self.compute_api.get_instance(context, instance_id) fixed_address = db.instance_get_fixed_address(context, instance_ref['id']) floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) @@ -755,7 +751,7 @@ class CloudController(object): description=kwargs.get('display_description'), key_name=kwargs.get('key_name'), security_group=kwargs.get('security_group'), - generate_hostname=internal_id_to_ec2_id) + generate_hostname=id_to_ec2_id) return self._format_run_instances(context, instances[0]['reservation_id']) @@ -764,27 +760,27 @@ class CloudController(object): instance_id is a kwarg so its name cannot be modified.""" logging.debug("Going to start terminating instances") for ec2_id in instance_id: - internal_id = ec2_id_to_internal_id(ec2_id) - self.compute_api.delete_instance(context, internal_id) + instance_id = ec2_id_to_id(ec2_id) + self.compute_api.delete_instance(context, instance_id) return True def reboot_instances(self, context, instance_id, **kwargs): """instance_id is a list of instance ids""" for ec2_id in instance_id: - internal_id = ec2_id_to_internal_id(ec2_id) - self.compute_api.reboot(context, internal_id) + instance_id = ec2_id_to_id(ec2_id) + self.compute_api.reboot(context, instance_id) return True def rescue_instance(self, context, instance_id, **kwargs): """This is an extension to the normal ec2_api""" - internal_id = ec2_id_to_internal_id(instance_id) - self.compute_api.rescue(context, internal_id) + instance_id = ec2_id_to_id(instance_id) + self.compute_api.rescue(context, instance_id) return True def unrescue_instance(self, context, instance_id, **kwargs): """This is an extension to the normal ec2_api""" - internal_id = ec2_id_to_internal_id(instance_id) - self.compute_api.unrescue(context, internal_id) + instance_id = ec2_id_to_id(instance_id) + self.compute_api.unrescue(context, instance_id) return True def update_instance(self, context, ec2_id, **kwargs): @@ -794,8 +790,8 @@ class CloudController(object): if field in kwargs: changes[field] = kwargs[field] if changes: - internal_id = ec2_id_to_internal_id(ec2_id) - inst = self.compute_api.get_instance(context, internal_id) + instance_id = ec2_id_to_id(ec2_id) + inst = self.compute_api.get_instance(context, instance_id) db.instance_update(context, inst['id'], kwargs) return True diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 6f2f6fed992e..f8f40b764e17 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -46,7 +46,7 @@ def _entity_detail(inst): inst_dict = {} mapped_keys = dict(status='state', imageId='image_id', - flavorId='instance_type', name='display_name', id='internal_id') + flavorId='instance_type', name='display_name', id='id') for k, v in mapped_keys.iteritems(): inst_dict[k] = inst[v] @@ -61,7 +61,7 @@ def _entity_detail(inst): def _entity_inst(inst): """ Filters all model attributes save for id and name """ - return dict(server=dict(id=inst['internal_id'], name=inst['display_name'])) + return dict(server=dict(id=inst['id'], name=inst['display_name'])) class Controller(wsgi.Controller): diff --git a/nova/compute/api.py b/nova/compute/api.py index 8e0efa4cc756..f310c575f215 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -36,9 +36,9 @@ from nova.db import base FLAGS = flags.FLAGS -def generate_default_hostname(internal_id): +def generate_default_hostname(instance_id): """Default function to generate a hostname given an instance reference.""" - return str(internal_id) + return str(instance_id) class ComputeAPI(base.Base): @@ -127,7 +127,6 @@ class ComputeAPI(base.Base): **base_options) instance = self.db.instance_create(context, instance) instance_id = instance['id'] - internal_id = instance['internal_id'] elevated = context.elevated() if not security_groups: @@ -138,9 +137,9 @@ class ComputeAPI(base.Base): security_group_id) # Set sane defaults if not specified - updates = dict(hostname=generate_hostname(internal_id)) + updates = dict(hostname=generate_hostname(instance_id)) if 'display_name' not in instance: - updates['display_name'] = "Server %s" % internal_id + updates['display_name'] = "Server %s" % instance_id instance = self.update_instance(context, instance_id, **updates) instances.append(instance) @@ -199,17 +198,16 @@ class ComputeAPI(base.Base): return self.db.instance_update(context, instance_id, kwargs) def delete_instance(self, context, instance_id): - logging.debug("Going to try and terminate %d" % instance_id) + logging.debug("Going to try and terminate %s" % instance_id) try: - instance = self.db.instance_get_by_internal_id(context, - instance_id) + instance = self.db.instance_get_by_id(context, instance_id) except exception.NotFound as e: - logging.warning("Instance %d was not found during terminate", + logging.warning("Instance %s was not found during terminate", instance_id) raise e if (instance['state_description'] == 'terminating'): - logging.warning("Instance %d is already being terminated", + logging.warning("Instance %s is already being terminated", instance_id) return @@ -264,11 +262,11 @@ class ComputeAPI(base.Base): return self.db.instance_get_all(context) def get_instance(self, context, instance_id): - return self.db.instance_get_by_internal_id(context, instance_id) + return self.db.instance_get_by_id(context, instance_id) def reboot(self, context, instance_id): """Reboot the given instance.""" - instance = self.db.instance_get_by_internal_id(context, instance_id) + instance = self.db.instance_get_by_id(context, instance_id) host = instance['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), @@ -277,7 +275,7 @@ class ComputeAPI(base.Base): def rescue(self, context, instance_id): """Rescue the given instance.""" - instance = self.db.instance_get_by_internal_id(context, instance_id) + instance = self.db.instance_get_by_id(context, instance_id) host = instance['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), @@ -286,7 +284,7 @@ class ComputeAPI(base.Base): def unrescue(self, context, instance_id): """Unrescue the given instance.""" - instance = self.db.instance_get_by_internal_id(context, instance_id) + instance = self.db.instance_get_by_id(context, instance_id) host = instance['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), diff --git a/nova/db/api.py b/nova/db/api.py index 8f9dc244301c..e607ac938eea 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -304,9 +304,9 @@ def instance_get_floating_address(context, instance_id): return IMPL.instance_get_floating_address(context, instance_id) -def instance_get_by_internal_id(context, internal_id): - """Get an instance by internal id.""" - return IMPL.instance_get_by_internal_id(context, internal_id) +def instance_get_by_id(context, instance_id): + """Get an instance by id.""" + return IMPL.instance_get_by_id(context, instance_id) def instance_is_vpn(context, instance_id): @@ -658,7 +658,7 @@ def security_group_get_all(context): def security_group_get(context, security_group_id): - """Get security group by its internal id.""" + """Get security group by its id.""" return IMPL.security_group_get(context, security_group_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 55036d1d1065..0505b77a634f 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -19,7 +19,7 @@ Implementation of SQLAlchemy backend. """ -import random +import uuid import warnings from nova import db @@ -525,28 +525,18 @@ def fixed_ip_update(context, address, values): ################### -#TODO(gundlach): instance_create and volume_create are nearly identical -#and should be refactored. I expect there are other copy-and-paste -#functions between the two of them as well. @require_context def instance_create(context, values): """Create a new Instance record in the database. context - request context object values - dict containing column values. - 'internal_id' is auto-generated and should not be specified. """ instance_ref = models.Instance() instance_ref.update(values) session = get_session() with session.begin(): - while instance_ref.internal_id == None: - # Instances have integer internal ids. - internal_id = random.randint(0, 2 ** 31 - 1) - if not instance_internal_id_exists(context, internal_id, - session=session): - instance_ref.internal_id = internal_id instance_ref.save(session=session) return instance_ref @@ -652,37 +642,31 @@ def instance_get_all_by_reservation(context, reservation_id): @require_context -def instance_get_by_internal_id(context, internal_id): +def instance_get_by_id(context, instance_id): session = get_session() + if type(instance_id) is int: + instance_id = uuid.UUID(int=instance_id).hex + if is_admin_context(context): result = session.query(models.Instance).\ options(joinedload('security_groups')).\ - filter_by(internal_id=internal_id).\ + filter_by(id=instance_id).\ filter_by(deleted=can_read_deleted(context)).\ first() elif is_user_context(context): result = session.query(models.Instance).\ options(joinedload('security_groups')).\ filter_by(project_id=context.project_id).\ - filter_by(internal_id=internal_id).\ + filter_by(id=instance_id).\ filter_by(deleted=False).\ first() if not result: - raise exception.NotFound('Instance %s not found' % (internal_id)) + raise exception.NotFound('Instance %s not found' % (instance_id)) return result -@require_context -def instance_internal_id_exists(context, internal_id, session=None): - if not session: - session = get_session() - return session.query(exists().\ - where(models.Instance.internal_id == internal_id)).\ - one()[0] - - @require_context def instance_get_fixed_address(context, instance_id): session = get_session() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index fe0a9a92162d..a0b6fff890b1 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -20,6 +20,7 @@ SQLAlchemy models for nova data. """ import datetime +import uuid from sqlalchemy.orm import relationship, backref, object_mapper from sqlalchemy import Column, Integer, String, schema @@ -39,6 +40,10 @@ FLAGS = flags.FLAGS BASE = declarative_base() +def make_uuid(): + return uuid.uuid1().hex + + class NovaBase(object): """Base class for Nova Models.""" __table_args__ = {'mysql_engine': 'InnoDB'} @@ -154,11 +159,13 @@ class Service(BASE, NovaBase): class Instance(BASE, NovaBase): """Represents a guest vm.""" __tablename__ = 'instances' - id = Column(Integer, primary_key=True) - internal_id = Column(Integer, unique=True) + id = Column(String(32), primary_key=True, default=make_uuid) + + @property + def name(self): + return "instance-%s" % self.id admin_pass = Column(String(255)) - user_id = Column(String(255)) project_id = Column(String(255)) @@ -170,10 +177,6 @@ class Instance(BASE, NovaBase): def project(self): return auth.manager.AuthManager().get_project(self.project_id) - @property - def name(self): - return "instance-%d" % self.internal_id - image_id = Column(String(255)) kernel_id = Column(String(255)) ramdisk_id = Column(String(255)) @@ -238,7 +241,7 @@ class Volume(BASE, NovaBase): host = Column(String(255)) # , ForeignKey('hosts.id')) size = Column(Integer) availability_zone = Column(String(255)) # TODO(vish): foreign key? - instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) + instance_id = Column(String(32), ForeignKey('instances.id'), nullable=True) instance = relationship(Instance, backref=backref('volumes'), foreign_keys=instance_id, @@ -311,7 +314,7 @@ class SecurityGroupInstanceAssociation(BASE, NovaBase): __tablename__ = 'security_group_instance_association' id = Column(Integer, primary_key=True) security_group_id = Column(Integer, ForeignKey('security_groups.id')) - instance_id = Column(Integer, ForeignKey('instances.id')) + instance_id = Column(String(32), ForeignKey('instances.id')) class SecurityGroup(BASE, NovaBase): @@ -431,7 +434,7 @@ class FixedIp(BASE, NovaBase): address = Column(String(255)) network_id = Column(Integer, ForeignKey('networks.id'), nullable=True) network = relationship(Network, backref=backref('fixed_ips')) - instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) + instance_id = Column(String(32), ForeignKey('instances.id'), nullable=True) instance = relationship(Instance, backref=backref('fixed_ip', uselist=False), foreign_keys=instance_id, diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 8444b6fce628..d2b0e0fc873c 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -56,8 +56,8 @@ def instance_address(context, instance_id): def stub_instance(id, user_id=1): - return Instance(id=id + 123456, state=0, image_id=10, user_id=user_id, - display_name='server%s' % id, internal_id=id) + return Instance(id=id, state=0, image_id=10, user_id=user_id, + display_name='server%s' % id) class ServersTest(unittest.TestCase): @@ -71,8 +71,7 @@ class ServersTest(unittest.TestCase): fakes.stub_out_key_pair_funcs(self.stubs) fakes.stub_out_image_service(self.stubs) self.stubs.Set(nova.db.api, 'instance_get_all', return_servers) - self.stubs.Set(nova.db.api, 'instance_get_by_internal_id', - return_server) + self.stubs.Set(nova.db.api, 'instance_get_by_id', return_server) self.stubs.Set(nova.db.api, 'instance_get_all_by_user', return_servers) self.stubs.Set(nova.db.api, 'instance_add_security_group', @@ -107,7 +106,7 @@ class ServersTest(unittest.TestCase): def test_create_instance(self): def instance_create(context, inst): - return {'id': 1, 'internal_id': 1, 'display_name': ''} + return {'id': '1', 'display_name': ''} def server_update(context, id, params): return instance_create(context, id) diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py index 9886a2449b65..f63eed65ae11 100644 --- a/nova/tests/cloud_unittest.py +++ b/nova/tests/cloud_unittest.py @@ -113,7 +113,7 @@ class CloudTestCase(test.TrialTestCase): self.cloud.allocate_address(self.context) inst = db.instance_create(self.context, {}) fixed = self.network.allocate_fixed_ip(self.context, inst['id']) - ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id']) + ec2_id = cloud.id_to_ec2_id(inst['id']) self.cloud.associate_address(self.context, instance_id=ec2_id, public_ip=address) @@ -289,7 +289,7 @@ class CloudTestCase(test.TrialTestCase): def test_update_of_instance_display_fields(self): inst = db.instance_create(self.context, {}) - ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id']) + ec2_id = cloud.id_to_ec2_id(inst['id']) self.cloud.update_instance(self.context, ec2_id, display_name='c00l 1m4g3') inst = db.instance_get(self.context, inst['id']) From 8db57c605d59f492eaba68d134275a348c525640 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 13 Dec 2010 09:49:13 +0100 Subject: [PATCH 021/147] Elaborate a bit on ipsets comment. --- nova/virt/libvirt_conn.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 2b5969ce10e3..a123f76715ce 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -861,9 +861,10 @@ class NWFilterFirewall(FirewallDriver): instance_secgroup_filter_children = ['nova-base-ipv4', 'nova-base-ipv6', 'nova-allow-dhcp-server'] + ctxt = context.get_admin_context() + if FLAGS.allow_project_net_traffic: - network_ref = db.project_get_network(context.get_admin_context(), - instance['project_id']) + network_ref = db.project_get_network(ctxt, instance['project_id']) net, mask = self._get_net_and_mask(network_ref['cidr']) project_filter = self.nova_project_filter(instance['project_id'], @@ -873,7 +874,8 @@ class NWFilterFirewall(FirewallDriver): instance_secgroup_filter_children += [('nova-project-%s' % instance['project_id'])] - for security_group in db.security_group_get_by_instance(instance['id']): + for security_group in db.security_group_get_by_instance(ctxt, + instance['id']): yield self.refresh_security_group(security_group['id']) instance_secgroup_filter_children += [('nova-secgroup-%s' % @@ -1027,7 +1029,8 @@ class IptablesFirewallDriver(FirewallDriver): if rule.cidr: args += ['-s', rule.cidr] else: - # Something about ipsets + # Eventually, a mechanism to grant access for security + # groups will turn up here. It'll use ipsets. continue if rule.protocol in ['udp', 'tcp']: From be9a3cd7e17edac4032c8ae554f75d725b0ad54a Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 13 Dec 2010 16:42:35 +0100 Subject: [PATCH 022/147] Move security group refresh logic into ComputeAPI. Add a trigger_security_group_members_refresh to ComputeAPI which finds the hosts that have instances that have security groups that reference a security group in which a new instance has just been placed, and sends a refresh_security_group_members to each of them. --- nova/api/ec2/cloud.py | 15 +++------- nova/compute/api.py | 61 +++++++++++++++++++++++++++++++++++++++ nova/compute/manager.py | 16 ++++++++-- nova/db/api.py | 7 +++++ nova/db/sqlalchemy/api.py | 19 ++++++++++++ nova/virt/libvirt_conn.py | 32 ++++++++++++++------ 6 files changed, 127 insertions(+), 23 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 05f8c3d0b04c..2694b8b001a3 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -130,15 +130,6 @@ class CloudController(object): result[key] = [line] return result - def _trigger_refresh_security_group(self, context, security_group): - nodes = set([instance['host'] for instance in security_group.instances - if instance['host'] is not None]) - for node in nodes: - rpc.cast(context, - '%s.%s' % (FLAGS.compute_topic, node), - {"method": "refresh_security_group", - "args": {"security_group_id": security_group.id}}) - def get_metadata(self, address): ctxt = context.get_admin_context() instance_ref = db.fixed_ip_get_instance(ctxt, address) @@ -369,7 +360,8 @@ class CloudController(object): match = False if match: db.security_group_rule_destroy(context, rule['id']) - self._trigger_refresh_security_group(context, security_group) + self.compute_api.trigger_security_group_rules_refresh(context, + security_group['id']) return True raise exception.ApiError("No rule for the specified parameters.") @@ -392,7 +384,8 @@ class CloudController(object): security_group_rule = db.security_group_rule_create(context, values) - self._trigger_refresh_security_group(context, security_group) + self.compute_api.trigger_security_group_rules_refresh(context, + security_group['id']) return True diff --git a/nova/compute/api.py b/nova/compute/api.py index 8e0efa4cc756..27010d513823 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -24,6 +24,7 @@ import datetime import logging import time +from nova import context from nova import db from nova import exception from nova import flags @@ -165,6 +166,10 @@ class ComputeAPI(base.Base): "args": {"topic": FLAGS.compute_topic, "instance_id": instance_id}}) + + for group_id in security_groups: + self.trigger_security_group_members_refresh(elevated, group_id) + return instances def ensure_default_security_group(self, context): @@ -184,6 +189,62 @@ class ComputeAPI(base.Base): 'project_id': context.project_id} db.security_group_create(context, values) + + def trigger_security_group_rules_refresh(self, context, security_group_id): + """Called when a rule is added to or removed from a security_group""" + + security_group = db.security_group_get(context, security_group_id) + + hosts = set() + for instance in security_group['instances']: + if instance['host'] is not None: + hosts.add(instance['host']) + + for host in hosts: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "refresh_security_group", + "args": {"security_group_id": security_group.id}}) + + + def trigger_security_group_members_refresh(self, context, group_id): + """Called when a security group gains a new or loses a member + + Sends an update request to each compute node for whom this is + relevant.""" + + # First, we get the security group rules that reference this group as + # the grantee.. + security_group_rules = \ + db.security_group_rule_get_by_security_group_grantee(context, + group_id) + + # ..then we distill the security groups to which they belong.. + security_groups = set() + for rule in security_group_rules: + security_groups.add(rule['parent_group_id']) + + # ..then we find the instances that are members of these groups.. + instances = set() + for security_group in security_groups: + for instance in security_group['instances']: + instances.add(instance['id']) + + # ...then we find the hosts where they live... + hosts = set() + for instance in instances: + if instance['host']: + hosts.add(instance['host']) + + # ...and finally we tell these nodes to refresh their view of this + # particular security group. + for host in hosts: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "refresh_security_group_members", + "args": {"security_group_id": group_id}}) + + def update_instance(self, context, instance_id, **kwargs): """Updates the instance in the datastore. diff --git a/nova/compute/manager.py b/nova/compute/manager.py index dd8d41129cc5..ee449c81944a 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -80,9 +80,19 @@ class ComputeManager(manager.Manager): @defer.inlineCallbacks @exception.wrap_exception - def refresh_security_group(self, context, security_group_id, **_kwargs): - """This call passes stright through to the virtualization driver.""" - yield self.driver.refresh_security_group(security_group_id) + def refresh_security_group_rules(self, context, + security_group_id, **_kwargs): + """This call passes straight through to the virtualization driver.""" + yield self.driver.refresh_security_group_rules(security_group_id) + + + @defer.inlineCallbacks + @exception.wrap_exception + def refresh_security_group_members(self, context, + security_group_id, **_kwargs): + """This call passes straight through to the virtualization driver.""" + yield self.driver.refresh_security_group_members(security_group_id) + @defer.inlineCallbacks @exception.wrap_exception diff --git a/nova/db/api.py b/nova/db/api.py index 8f9dc244301c..6fa80c247373 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -711,6 +711,13 @@ def security_group_rule_get_by_security_group(context, security_group_id): security_group_id) +def security_group_rule_get_by_security_group_grantee(context, + security_group_id): + """Get all rules that grant access to the given security group.""" + return IMPL.security_group_rule_get_by_security_group_grantee(context, + security_group_id) + + def security_group_rule_destroy(context, security_group_rule_id): """Deletes a security group rule.""" return IMPL.security_group_rule_destroy(context, security_group_rule_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 5214dd62b334..deb248f82799 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1532,6 +1532,25 @@ def security_group_rule_get_by_security_group(context, security_group_id, sessio return result +@require_context +def security_group_rule_get_by_security_group_grantee(context, + security_group_id, + session=None): + if not session: + session = get_session() + if is_admin_context(context): + result = session.query(models.SecurityGroupIngressRule).\ + filter_by(deleted=can_read_deleted(context)).\ + filter_by(group_id=security_group_id).\ + all() + else: + result = session.query(models.SecurityGroupIngressRule).\ + filter_by(deleted=False).\ + filter_by(group_id=security_group_id).\ + all() + return result + + @require_context def security_group_rule_create(context, values): security_group_rule_ref = models.SecurityGroupIngressRule() diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index a123f76715ce..da566c33b909 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -656,8 +656,11 @@ class LibvirtConnection(object): domain = self._conn.lookupByName(instance_name) return domain.interfaceStats(interface) - def refresh_security_group(self, security_group_id): - self.firewall_driver.refresh_security_group(security_group_id) + def refresh_security_group_rules(self, security_group_id): + self.firewall_driver.refresh_security_group_rules(security_group_id) + + def refresh_security_group_members(self, security_group_id): + self.firewall_driver.refresh_security_group_members(security_group_id) class FirewallDriver(object): @@ -677,11 +680,19 @@ class FirewallDriver(object): """ raise NotImplementedError() - def refresh_security_group(self, security_group_id): - """Refresh security group from data store + def refresh_security_group_rules(self, security_group_id): + """Refresh security group rules from data store - Gets called when changes have been made to the security - group.""" + Gets called when a rule has been added to or removed from + the security group.""" + raise NotImplementedError() + + + def refresh_security_group_members(self, security_group_id): + """Refresh security group members from data store + + Gets called when an instance gets added to or removed from + the security group.""" raise NotImplementedError() @@ -876,7 +887,7 @@ class NWFilterFirewall(FirewallDriver): for security_group in db.security_group_get_by_instance(ctxt, instance['id']): - yield self.refresh_security_group(security_group['id']) + yield self.refresh_security_group_rules(security_group['id']) instance_secgroup_filter_children += [('nova-secgroup-%s' % security_group['id'])] @@ -891,7 +902,7 @@ class NWFilterFirewall(FirewallDriver): return - def refresh_security_group(self, security_group_id): + def refresh_security_group_rules(self, security_group_id): return self._define_filter( self.security_group_to_nwfilter_xml(security_group_id)) @@ -1062,7 +1073,10 @@ class IptablesFirewallDriver(FirewallDriver): logging.info('new_filter: %s', '\n'.join(new_filter)) return new_filter - def refresh_security_group(self, security_group): + def refresh_security_group_members(self, security_group): + pass + + def refresh_security_group_rules(self, security_group): self.apply_ruleset() def _security_group_chain_name(self, security_group): From 82ccd2b656a364251aeecbf4c31cd062af6513f0 Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Tue, 14 Dec 2010 16:48:44 -0800 Subject: [PATCH 023/147] remove some logging --- nova/virt/images.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/virt/images.py b/nova/virt/images.py index b322f3eb2601..69838ac5b42b 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -70,7 +70,6 @@ def _fetch_image_no_curl(url, path, headers): def _fetch_s3_image(image, path, user, project): url = image_url(image) - logging.debug("About to retrieve %s and place it in %s", url, path) # This should probably move somewhere else, like e.g. a download_as # method on User objects and at the same time get rewritten to use @@ -98,7 +97,6 @@ def _fetch_s3_image(image, path, user, project): def _fetch_local_image(image, path, user, project): source = _image_path(os.path.join(image, 'image')) - logging.debug("About to copy %s to %s", source, path) if sys.platform.startswith('win'): return shutil.copy(source, path) else: From 1539df7429a235ba2fefe3f65422fe94b248ac08 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Wed, 15 Dec 2010 14:03:19 +0100 Subject: [PATCH 024/147] refresh_security_group renamed to refresh_security_group_rules --- nova/compute/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 27010d513823..686c1eb0a8ef 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -203,7 +203,7 @@ class ComputeAPI(base.Base): for host in hosts: rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "refresh_security_group", + {"method": "refresh_security_group_rules", "args": {"security_group_id": security_group.id}}) From b420a3daa5f1b827f49e5d6557aaa0f8d396b81b Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Wed, 15 Dec 2010 14:04:06 +0100 Subject: [PATCH 025/147] Lots of PEP-8 work. --- nova/api/ec2/cloud.py | 4 +-- nova/compute/api.py | 8 ++--- nova/compute/manager.py | 2 -- nova/db/api.py | 2 +- nova/db/sqlalchemy/api.py | 3 +- nova/tests/virt_unittest.py | 67 ++++++++++++++++++++----------------- nova/virt/libvirt_conn.py | 29 ++++++++-------- 7 files changed, 57 insertions(+), 58 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 74be6d05bbf0..018139634918 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -361,7 +361,7 @@ class CloudController(object): if match: db.security_group_rule_destroy(context, rule['id']) self.compute_api.trigger_security_group_rules_refresh(context, - security_group['id']) + security_group['id']) return True raise exception.ApiError("No rule for the specified parameters.") @@ -385,7 +385,7 @@ class CloudController(object): security_group_rule = db.security_group_rule_create(context, values) self.compute_api.trigger_security_group_rules_refresh(context, - security_group['id']) + security_group['id']) return True diff --git a/nova/compute/api.py b/nova/compute/api.py index 686c1eb0a8ef..7c91792e33b6 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -166,7 +166,6 @@ class ComputeAPI(base.Base): "args": {"topic": FLAGS.compute_topic, "instance_id": instance_id}}) - for group_id in security_groups: self.trigger_security_group_members_refresh(elevated, group_id) @@ -189,7 +188,6 @@ class ComputeAPI(base.Base): 'project_id': context.project_id} db.security_group_create(context, values) - def trigger_security_group_rules_refresh(self, context, security_group_id): """Called when a rule is added to or removed from a security_group""" @@ -206,10 +204,9 @@ class ComputeAPI(base.Base): {"method": "refresh_security_group_rules", "args": {"security_group_id": security_group.id}}) - def trigger_security_group_members_refresh(self, context, group_id): """Called when a security group gains a new or loses a member - + Sends an update request to each compute node for whom this is relevant.""" @@ -223,7 +220,7 @@ class ComputeAPI(base.Base): security_groups = set() for rule in security_group_rules: security_groups.add(rule['parent_group_id']) - + # ..then we find the instances that are members of these groups.. instances = set() for security_group in security_groups: @@ -244,7 +241,6 @@ class ComputeAPI(base.Base): {"method": "refresh_security_group_members", "args": {"security_group_id": group_id}}) - def update_instance(self, context, instance_id, **kwargs): """Updates the instance in the datastore. diff --git a/nova/compute/manager.py b/nova/compute/manager.py index ee449c81944a..f039bca2ed0c 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -85,7 +85,6 @@ class ComputeManager(manager.Manager): """This call passes straight through to the virtualization driver.""" yield self.driver.refresh_security_group_rules(security_group_id) - @defer.inlineCallbacks @exception.wrap_exception def refresh_security_group_members(self, context, @@ -93,7 +92,6 @@ class ComputeManager(manager.Manager): """This call passes straight through to the virtualization driver.""" yield self.driver.refresh_security_group_members(security_group_id) - @defer.inlineCallbacks @exception.wrap_exception def run_instance(self, context, instance_id, **_kwargs): diff --git a/nova/db/api.py b/nova/db/api.py index 6fa80c247373..67796c24681b 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -715,7 +715,7 @@ def security_group_rule_get_by_security_group_grantee(context, security_group_id): """Get all rules that grant access to the given security group.""" return IMPL.security_group_rule_get_by_security_group_grantee(context, - security_group_id) + security_group_id) def security_group_rule_destroy(context, security_group_rule_id): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index deb248f82799..4e3ef577157a 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1515,7 +1515,8 @@ def security_group_rule_get(context, security_group_rule_id, session=None): @require_context -def security_group_rule_get_by_security_group(context, security_group_id, session=None): +def security_group_rule_get_by_security_group(context, security_group_id, + session=None): if not session: session = get_session() if is_admin_context(context): diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index d725c2ce2aef..1d6241fba803 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -43,14 +43,14 @@ class LibvirtConnTestCase(test.TrialTestCase): def test_get_uri_and_template(self): ip = '10.11.12.13' - instance = { 'memory_kb': '1024000', - 'basepath': '/some/path', - 'bridge_name': 'br100', - 'mac_address': '02:12:34:46:56:67', - 'vcpus': 2, - 'project_id': 'fake', - 'bridge': 'br101', - 'instance_type': 'm1.small'} + instance = {'memory_kb': '1024000', + 'basepath': '/some/path', + 'bridge_name': 'br100', + 'mac_address': '02:12:34:46:56:67', + 'vcpus': 2, + 'project_id': 'fake', + 'bridge': 'br101', + 'instance_type': 'm1.small'} user_context = context.RequestContext(project=self.project, user=self.user) @@ -125,7 +125,7 @@ class LibvirtConnTestCase(test.TrialTestCase): class IptablesFirewallTestCase(test.TrialTestCase): def setUp(self): super(IptablesFirewallTestCase, self).setUp() - + self.manager = manager.AuthManager() self.user = self.manager.create_user('fake', 'fake', 'fake', admin=True) @@ -141,26 +141,30 @@ class IptablesFirewallTestCase(test.TrialTestCase): def _p(self, *args, **kwargs): if 'iptables-restore' in args: - print ' '.join(args), kwargs['stdin'] + print ' '.join(args), kwargs['stdin'] if 'iptables-save' in args: - return - in_rules = ['# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010', - '*filter', - ':INPUT ACCEPT [969615:281627771]', - ':FORWARD ACCEPT [0:0]', - ':OUTPUT ACCEPT [915599:63811649]', - ':nova-block-ipv4 - [0:0]', - '-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ', - '-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ', - '-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ', - '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ', - '-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED,ESTABLISHED -j ACCEPT ', - '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ', - '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ', - '-A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable ', - '-A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable ', - 'COMMIT', - '# Completed on Mon Dec 6 11:54:13 2010'] + return + + in_rules = [ + '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010', + '*filter', + ':INPUT ACCEPT [969615:281627771]', + ':FORWARD ACCEPT [0:0]', + ':OUTPUT ACCEPT [915599:63811649]', + ':nova-block-ipv4 - [0:0]', + '-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ', + '-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ', + '-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ', + '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ', + '-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED' + ',ESTABLISHED -j ACCEPT ', + '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ', + '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ', + '-A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable ', + '-A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable ', + 'COMMIT', + '# Completed on Mon Dec 6 11:54:13 2010' + ] def test_static_filters(self): self.fw.execute = self._p @@ -179,7 +183,6 @@ class IptablesFirewallTestCase(test.TrialTestCase): db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, 'instance_id': instance_ref['id']}) - secgroup = db.security_group_create(admin_ctxt, {'user_id': 'fake', 'project_id': 'fake', @@ -204,7 +207,8 @@ class IptablesFirewallTestCase(test.TrialTestCase): in_rules = filter(lambda l: not l.startswith('#'), self.in_rules) for rule in in_rules: if not 'nova' in rule: - self.assertTrue(rule in out_rules, 'Rule went missing: %s' % rule) + self.assertTrue(rule in out_rules, + 'Rule went missing: %s' % rule) print '\n'.join(out_rules) @@ -225,7 +229,8 @@ class NWFilterTestCase(test.TrialTestCase): self.fake_libvirt_connection = Mock() - self.fw = libvirt_conn.NWFilterFirewall(lambda:self.fake_libvirt_connection) + self.fw = libvirt_conn.NWFilterFirewall( + lambda: self.fake_libvirt_connection) def tearDown(self): self.manager.delete_project(self.project) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index da566c33b909..e55638224cf4 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -104,8 +104,9 @@ flags.DEFINE_string('libvirt_uri', flags.DEFINE_bool('allow_project_net_traffic', True, 'Whether to allow in project network traffic') -flags.DEFINE_string('firewall_driver', 'nova.virt.libvirt_conn.IptablesFirewallDriver', - 'Firewall driver (defaults to nwfilter)') +flags.DEFINE_string('firewall_driver', + 'nova.virt.libvirt_conn.IptablesFirewallDriver', + 'Firewall driver (defaults to nwfilter)') def get_connection(read_only): @@ -687,7 +688,6 @@ class FirewallDriver(object): the security group.""" raise NotImplementedError() - def refresh_security_group_members(self, security_group_id): """Refresh security group members from data store @@ -855,7 +855,6 @@ class NWFilterFirewall(FirewallDriver): net = IPy.IP(cidr) return str(net.net()), str(net.netmask()) - @defer.inlineCallbacks def prepare_instance_filter(self, instance): """ @@ -869,8 +868,9 @@ class NWFilterFirewall(FirewallDriver): instance_filter_name = self._instance_filter_name(instance) instance_secgroup_filter_name = '%s-secgroup' % (instance_filter_name,) instance_filter_children = ['nova-base', instance_secgroup_filter_name] - instance_secgroup_filter_children = ['nova-base-ipv4', 'nova-base-ipv6', - 'nova-allow-dhcp-server'] + instance_secgroup_filter_children = ['nova-base-ipv4', + 'nova-base-ipv6', + 'nova-allow-dhcp-server'] ctxt = context.get_admin_context() @@ -883,14 +883,14 @@ class NWFilterFirewall(FirewallDriver): yield self._define_filter(project_filter) instance_secgroup_filter_children += [('nova-project-%s' % - instance['project_id'])] + instance['project_id'])] for security_group in db.security_group_get_by_instance(ctxt, - instance['id']): + instance['id']): yield self.refresh_security_group_rules(security_group['id']) instance_secgroup_filter_children += [('nova-secgroup-%s' % - security_group['id'])] + security_group['id'])] yield self._define_filter( self._filter_container(instance_secgroup_filter_name, @@ -978,12 +978,11 @@ class IptablesFirewallDriver(FirewallDriver): if not new_filter[rules_index].startswith(':'): break - our_chains = [':nova-ipv4-fallback - [0:0]'] - our_rules = ['-A nova-ipv4-fallback -j DROP'] + our_rules = ['-A nova-ipv4-fallback -j DROP'] our_chains += [':nova-local - [0:0]'] - our_rules += ['-A FORWARD -j nova-local'] + our_rules += ['-A FORWARD -j nova-local'] security_groups = set() # Add our chains @@ -1018,12 +1017,12 @@ class IptablesFirewallDriver(FirewallDriver): # Allow DHCP responses dhcp_server = self._dhcp_server_for_instance(instance) - our_rules += ['-A %s -s %s -p udp --sport 67 --dport 68' % (chain_name, dhcp_server)] + our_rules += ['-A %s -s %s -p udp --sport 67 --dport 68' % + (chain_name, dhcp_server)] # If nothing matches, jump to the fallback chain our_rules += ['-A %s -j nova-ipv4-fallback' % (chain_name,)] - # then, security group chains and rules for security_group in security_groups: chain_name = self._security_group_chain_name(security_group) @@ -1031,7 +1030,7 @@ class IptablesFirewallDriver(FirewallDriver): rules = \ db.security_group_rule_get_by_security_group(ctxt, - security_group['id']) + security_group['id']) for rule in rules: logging.info('%r', rule) From c364724a0dc7a658058fcb167af66ee7eb5bcd2a Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 21 Dec 2010 01:41:28 -0500 Subject: [PATCH 026/147] Use paste.deploy for running the api server. --- bin/nova-api-paste | 103 +++++++++++++++++++++++++ etc/nova-api.conf | 62 +++++++++++++++ nova/api/__init__.py | 1 - nova/api/cloudpipe/__init__.py | 3 + nova/api/ec2/__init__.py | 50 +++++++++++- nova/api/ec2/metadatarequesthandler.py | 3 + nova/api/openstack/__init__.py | 13 ++++ tools/pip-requires | 4 +- 8 files changed, 235 insertions(+), 4 deletions(-) create mode 100755 bin/nova-api-paste create mode 100644 etc/nova-api.conf diff --git a/bin/nova-api-paste b/bin/nova-api-paste new file mode 100755 index 000000000000..dcb76522fc7f --- /dev/null +++ b/bin/nova-api-paste @@ -0,0 +1,103 @@ +#!/usr/bin/env python +# pylint: disable-msg=C0103 +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Starter script for Nova API.""" + +import gettext +import logging +import os +import sys + +from paste import deploy + +from nova import flags +from nova import wsgi + +# If ../nova/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): + sys.path.insert(0, possible_topdir) + +gettext.install('nova', unicode=1) + +LOG = logging.getLogger('nova.api') +LOG.setLevel(logging.DEBUG) +LOG.addHandler(logging.StreamHandler()) + +FLAGS = flags.FLAGS + +API_ENDPOINTS = ['ec2', 'openstack'] + +def load_configuration(paste_config): + """Load the paste configuration from the config file and return it.""" + config = None + # Try each known name to get the global DEFAULTS, which will give ports + for name in API_ENDPOINTS: + try: + config = deploy.appconfig("config:%s" % paste_config, name=name) + except LookupError: + pass + if config: + verbose = config.get('verbose', None) + if verbose: + FLAGS.verbose = int(verbose) == 1 + if FLAGS.verbose: + logging.getLogger().setLevel(logging.DEBUG) + return config + LOG.debug("Paste config at %s has no secion for known apis", paste_config) + print "Paste config at %s has no secion for any known apis" % paste_config + os.exit(1) + +def launch_api(paste_config_file, section, server, port, host): + """Launch an api server from the specified port and IP.""" + LOG.debug("Launching api %s on %s:%s", section, host, port) + app = deploy.loadapp('config:%s' % paste_config_file, name=section) + server.start(app, int(port), host) + +def run_app(paste_config_file): + LOG.debug("Using paste.deploy config at: %s", configfile) + config = load_configuration(paste_config_file) + LOG.debug("Configuration: %r", config) + server = wsgi.Server() + ip = config.get('host', None) + for api in API_ENDPOINTS: + port = config.get("%s_port" % api, None) + if not port: + continue + host = config.get("%s_host" % api, None) or ip or '0.0.0.0' + launch_api(configfile, api, server, port, host) + LOG.debug("All api servers launched, now waiting") + server.wait() + +if __name__ == '__main__': + FLAGS(sys.argv) + configfiles = ['/etc/nova/nova-api.conf'] + if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): + configfiles.insert(0, + os.path.join(possible_topdir, 'etc', 'nova-api.conf')) + for configfile in configfiles: + if os.path.exists(configfile): + run_app(configfile) + break + else: + LOG.debug("Skipping missing configuration: %s", configfile) diff --git a/etc/nova-api.conf b/etc/nova-api.conf new file mode 100644 index 000000000000..eaa74639e7ca --- /dev/null +++ b/etc/nova-api.conf @@ -0,0 +1,62 @@ +[DEFAULT] +verbose = 1 +ec2_port = 8773 +ec2_address = 0.0.0.0 +openstack_port = 8774 +openstack_address = 0.0.0.0 + +####### +# EC2 # +####### + +[app:ec2] +use = ec2composite + +[composite:ec2composite] +use = egg:Paste#urlmap +/: ec2versions +/services: ec2api +/cloudpipe: cloudpipe +/latest: ec2metadata +/200: ec2metadata +/1.0: ec2metadata + +[pipeline:ec2api] +pipeline = authenticate router authorizer ec2executor + +[filter:authenticate] +paste.filter_factory = nova.api.ec2:authenticate_factory + +[filter:router] +paste.filter_factory = nova.api.ec2:router_factory + +[filter:authorizer] +paste.filter_factory = nova.api.ec2:authorizer_factory + +[app:ec2executor] +paste.app_factory = nova.api.ec2:executor_factory + +[app:ec2versions] +paste.app_factory = nova.api.ec2:versions_factory + +[app:ec2metadata] +paste.app_factory = nova.api.ec2.metadatarequesthandler:metadata_factory + +[app:cloudpipe] +paste.app_factory = nova.api.cloudpipe:cloudpipe_factory + +############# +# Openstack # +############ + +[pipeline:openstack] +pipeline = auth ratelimit osapi + +[filter:auth] +paste.filter_factory = nova.api.openstack:auth_factory + +[filter:ratelimit] +paste.filter_factory = nova.api.openstack:ratelimit_factory + +[app:osapi] +paste.app_factory = nova.api.openstack:router_factory diff --git a/nova/api/__init__.py b/nova/api/__init__.py index 80f9f210927d..92b495e8cc18 100644 --- a/nova/api/__init__.py +++ b/nova/api/__init__.py @@ -29,7 +29,6 @@ import routes import webob.dec from nova import flags -from nova import utils from nova import wsgi from nova.api import cloudpipe from nova.api import ec2 diff --git a/nova/api/cloudpipe/__init__.py b/nova/api/cloudpipe/__init__.py index 6d40990a8859..47349d9f9a7b 100644 --- a/nova/api/cloudpipe/__init__.py +++ b/nova/api/cloudpipe/__init__.py @@ -67,3 +67,6 @@ class API(wsgi.Application): project_id = self.get_project_id_from_ip(req.remote_addr) cert = self.str_params['cert'] return crypto.sign_csr(urllib.unquote(cert), project_id) + +def cloudpipe_factory(global_opts, **local_opts): + return API() diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index a6ee16c332a2..50cb18078e11 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -225,10 +225,9 @@ class Executor(wsgi.Application): args = req.environ['ec2.action_args'] api_request = apirequest.APIRequest(controller, action) + result = None try: result = api_request.send(context, **args) - req.headers['Content-Type'] = 'text/xml' - return result except exception.ApiError as ex: if ex.code: @@ -238,6 +237,13 @@ class Executor(wsgi.Application): # TODO(vish): do something more useful with unknown exceptions except Exception as ex: return self._error(req, type(ex).__name__, str(ex)) + else: + resp = webob.Response() + resp.status = 200 + resp.headers['Content-Type'] = 'text/xml' + resp.body = str(result) + return resp + def _error(self, req, code, message): logging.error("%s: %s", code, message) @@ -249,3 +255,43 @@ class Executor(wsgi.Application): '%s' '?' % (code, message)) return resp + +class Versions(wsgi.Application): + + @webob.dec.wsgify + def __call__(self, req): + """Respond to a request for all EC2 versions.""" + # available api versions + versions = [ + '1.0', + '2007-01-19', + '2007-03-01', + '2007-08-29', + '2007-10-10', + '2007-12-15', + '2008-02-01', + '2008-09-01', + '2009-04-04', + ] + return ''.join('%s\n' % v for v in versions) + +def authenticate_factory(global_args, **local_args): + def authenticator(app): + return Authenticate(app) + return authenticator + +def router_factory(global_args, **local_args): + def router(app): + return Router(app) + return router + +def authorizer_factory(global_args, **local_args): + def authorizer(app): + return Authorizer(app) + return authorizer + +def executor_factory(global_args, **local_args): + return Executor() + +def versions_factory(global_args, **local_args): + return Versions() diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index 2f4f414ccb5c..fffefb97b74d 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -72,3 +72,6 @@ class MetadataRequestHandler(object): if data is None: raise webob.exc.HTTPNotFound() return self.print_data(data) + +def metadata_factory(global_args, **local_args): + return MetadataRequestHandler() diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index b9ecbd9b8d6b..cb825cf41f72 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -210,3 +210,16 @@ def limited(items, req): limit = min(1000, limit) range_end = offset + limit return items[offset:range_end] + +def auth_factory(global_conf, **local_conf): + def auth(app): + return AuthMiddleware(app) + return auth + +def ratelimit_factory(global_conf, **local_conf): + def rl(app): + return RateLimitingMiddleware(app) + return rl + +def router_factory(global_cof, **local_conf): + return APIRouter() diff --git a/tools/pip-requires b/tools/pip-requires index 52451b8cbd88..4fd147670ad9 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -21,4 +21,6 @@ mox==0.5.0 greenlet==0.3.1 nose bzr -Twisted>=10.1.0 \ No newline at end of file +Twisted>=10.1.0 +PasteDeploy +paste From 8c8b289f2626b1d9bad76bc5d4819904ace5800d Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 21 Dec 2010 02:21:01 -0500 Subject: [PATCH 027/147] Remove ec2 config chain and move openstack versions to top-level application. --- etc/nova-api.conf | 15 ++++++++++----- nova/api/openstack/__init__.py | 18 ++++++++++++++++-- 2 files changed, 26 insertions(+), 7 deletions(-) diff --git a/etc/nova-api.conf b/etc/nova-api.conf index eaa74639e7ca..152913ecf122 100644 --- a/etc/nova-api.conf +++ b/etc/nova-api.conf @@ -9,10 +9,7 @@ openstack_address = 0.0.0.0 # EC2 # ####### -[app:ec2] -use = ec2composite - -[composite:ec2composite] +[composite:ec2] use = egg:Paste#urlmap /: ec2versions /services: ec2api @@ -49,7 +46,12 @@ paste.app_factory = nova.api.cloudpipe:cloudpipe_factory # Openstack # ############ -[pipeline:openstack] +[composite:openstack] +use = egg:Paste#urlmap +/: osversions +/v1.0: openstackapi + +[pipeline:openstackapi] pipeline = auth ratelimit osapi [filter:auth] @@ -60,3 +62,6 @@ paste.filter_factory = nova.api.openstack:ratelimit_factory [app:osapi] paste.app_factory = nova.api.openstack:router_factory + +[app:osversions] +paste.app_factory = nova.api.openstack:versions_factory diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index cb825cf41f72..f3686997fbc5 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -20,7 +20,6 @@ WSGI middleware for OpenStack API controllers. """ -import json import time import logging @@ -41,7 +40,6 @@ from nova.api.openstack import images from nova.api.openstack import ratelimiting from nova.api.openstack import servers from nova.api.openstack import sharedipgroups -from nova.auth import manager FLAGS = flags.FLAGS @@ -193,6 +191,19 @@ class APIRouter(wsgi.Router): super(APIRouter, self).__init__(mapper) +class Versions(wsgi.Application): + @webob.dec.wsgify + def __call__(self, req): + """Respond to a request for all OpenStack API versions.""" + response = { + "versions": [ + dict(status="CURRENT", id="v1.0")]} + metadata = { + "application/xml": { + "attributes": dict(version=["status", "id"])}} + return wsgi.Serializer(req.environ, metadata).to_content_type(response) + + def limited(items, req): """Return a slice of items according to requested offset and limit. @@ -223,3 +234,6 @@ def ratelimit_factory(global_conf, **local_conf): def router_factory(global_cof, **local_conf): return APIRouter() + +def versions_factory(global_conf, **local_conf): + return Versions() From 729468d0be1bf97c869b1169414154a76d9b96b2 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 21 Dec 2010 19:20:28 -0500 Subject: [PATCH 028/147] Burnin support by specifying a specific host via availability_zone for running instances and volumes on. --- bin/nova-manage | 50 +++++++++++++- nova/api/ec2/cloud.py | 38 +++++++++++ nova/compute/api.py | 4 +- nova/db/sqlalchemy/models.py | 4 +- nova/scheduler/driver.py | 5 ++ nova/scheduler/simple.py | 26 +++++++ nova/tests/scheduler_unittest.py | 112 +++++++++++++++++++++++++++++-- 7 files changed, 232 insertions(+), 7 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 0c1b621ed72f..34bdd3df9316 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -53,6 +53,7 @@ CLI interface for nova management. """ +import datetime import gettext import logging import os @@ -432,6 +433,52 @@ class NetworkCommands(object): int(network_size), int(vlan_start), int(vpn_start)) + +class ServiceCommands(object): + """Enable and disable running services""" + + def list(self, host=None, service=None): + """Show a list of all running services. Filter by host & service name. + args: [host] [service]""" + ctxt = context.get_admin_context() + now = datetime.datetime.utcnow() + services = db.service_get_all(ctxt) + if host: + services = [s for s in services if s['host'] == host] + if service: + services = [s for s in services if s['binary'] == service] + for svc in services: + delta = now - (svc['updated_at'] or svc['created_at']) + alive = (delta.seconds <= 15) + art = (alive and ":-)") or "XXX" + active = 'enabled' + if svc['disabled']: + active = 'disabled' + print "%-10s %-10s %-8s %s %s" % (svc['host'], svc['binary'], + active, art, + svc['updated_at']) + + def enable(self, host, service): + """Enable scheduling for a service + args: host service""" + ctxt = context.get_admin_context() + svc = db.service_get_by_args(ctxt, host, service) + if not svc: + print "Unable to find service" + return + db.service_update(ctxt, svc['id'], {'disabled': False}) + + def disable(self, host, service): + """Disable scheduling for a service + args: host service""" + ctxt = context.get_admin_context() + svc = db.service_get_by_args(ctxt, host, service) + if not svc: + print "Unable to find service" + return + db.service_update(ctxt, svc['id'], {'disabled': True}) + + CATEGORIES = [ ('user', UserCommands), ('project', ProjectCommands), @@ -439,7 +486,8 @@ CATEGORIES = [ ('shell', ShellCommands), ('vpn', VpnCommands), ('floating', FloatingIpCommands), - ('network', NetworkCommands)] + ('network', NetworkCommands), + ('service', ServiceCommands)] def lazy_match(name, key_value_tuples): diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 8375c4399bdd..b181a947fc5d 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -189,9 +189,45 @@ class CloudController(object): return data def describe_availability_zones(self, context, **kwargs): + if ('zone_name' in kwargs and + 'verbose' in kwargs['zone_name'] and + context.is_admin): + return self._describe_availability_zones_verbose(context, + **kwargs) + else: + return self._describe_availability_zones(context, **kwargs) + + def _describe_availability_zones(self, context, **kwargs): return {'availabilityZoneInfo': [{'zoneName': 'nova', 'zoneState': 'available'}]} + def _describe_availability_zones_verbose(self, context, **kwargs): + rv = {'availabilityZoneInfo': [{'zoneName': 'nova', + 'zoneState': 'available'}]} + + services = db.service_get_all(context) + now = db.get_time() + hosts = [] + for host in [service['host'] for service in services]: + if not host in hosts: + hosts.append(host) + for host in hosts: + rv['availabilityZoneInfo'].append({'zoneName': '|- %s' % host, + 'zoneState': ''}) + hsvcs = [service for service in services if service['host'] == host] + for svc in hsvcs: + delta = now - (svc['updated_at'] or svc['created_at']) + alive = (delta.seconds <= FLAGS.service_down_time) + art = (alive and ":-)") or "XXX" + active = 'enabled' + if svc['disabled']: + active = 'disabled' + rv['availabilityZoneInfo'].append({ + 'zoneName': '| |- %s' % svc['binary'], + 'zoneState': '%s %s %s' % (active, art, + svc['updated_at'])}) + return rv + def describe_regions(self, context, region_name=None, **kwargs): if FLAGS.region_list: regions = [] @@ -757,6 +793,8 @@ class CloudController(object): description=kwargs.get('display_description'), key_name=kwargs.get('key_name'), security_group=kwargs.get('security_group'), + availability_zone=kwargs.get('placement', {}).get( + 'AvailabilityZone'), generate_hostname=internal_id_to_ec2_id) return self._format_run_instances(context, instances[0]['reservation_id']) diff --git a/nova/compute/api.py b/nova/compute/api.py index c740814da5af..299083bf4a06 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -57,6 +57,7 @@ class ComputeAPI(base.Base): max_count=1, kernel_id=None, ramdisk_id=None, display_name='', description='', key_name=None, key_data=None, security_group='default', + availability_zone=None, generate_hostname=generate_default_hostname): """Create the number of instances requested if quote and other arguments check out ok.""" @@ -121,7 +122,8 @@ class ComputeAPI(base.Base): 'display_name': display_name, 'display_description': description, 'key_name': key_name, - 'key_data': key_data} + 'key_data': key_data, + 'availability_zone': availability_zone} elevated = context.elevated() instances = [] diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 96d981571705..41e8cfefaa56 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -148,7 +148,7 @@ class Service(BASE, NovaBase): binary = Column(String(255)) topic = Column(String(255)) report_count = Column(Integer, nullable=False, default=0) - disabled = Column(Boolean, default=False) + disabled = Column(Boolean, default=True) class Instance(BASE, NovaBase): @@ -210,6 +210,8 @@ class Instance(BASE, NovaBase): launched_at = Column(DateTime) terminated_at = Column(DateTime) + availability_zone = Column(String(255)) + # User editable field for display in user-facing UIs display_name = Column(String(255)) display_description = Column(String(255)) diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index f271d573fd37..340f40310d80 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -37,6 +37,11 @@ class NoValidHost(exception.Error): pass +class WillNotSchedule(exception.Error): + """The specified host is not up or doesn't exist.""" + pass + + class Scheduler(object): """The base class that all Scheduler clases should inherit from.""" diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py index 7f50936564d9..9e85ba952a42 100644 --- a/nova/scheduler/simple.py +++ b/nova/scheduler/simple.py @@ -43,6 +43,19 @@ class SimpleScheduler(chance.ChanceScheduler): def schedule_run_instance(self, context, instance_id, *_args, **_kwargs): """Picks a host that is up and has the fewest running instances.""" instance_ref = db.instance_get(context, instance_id) + if instance_ref['availability_zone'] and context.is_admin: + zone, _x, host = instance_ref['availability_zone'].partition(':') + service = db.service_get_by_args(context.elevated(), host, + 'nova-compute') + if not self.service_is_up(service): + raise driver.WillNotSchedule("Host %s is not alive" % host) + + # TODO(vish): this probably belongs in the manager, if we + # can generalize this somehow + now = datetime.datetime.utcnow() + db.instance_update(context, instance_id, {'host': host, + 'scheduled_at': now}) + return host results = db.service_get_all_compute_sorted(context) for result in results: (service, instance_cores) = result @@ -62,6 +75,19 @@ class SimpleScheduler(chance.ChanceScheduler): def schedule_create_volume(self, context, volume_id, *_args, **_kwargs): """Picks a host that is up and has the fewest volumes.""" volume_ref = db.volume_get(context, volume_id) + if (':' in volume_ref['availability_zone']) and context.is_admin: + zone, _x, host = volume_ref['availability_zone'].partition(':') + service = db.service_get_by_args(context.elevated(), host, + 'nova-volume') + if not self.service_is_up(service): + raise driver.WillNotSchedule("Host %s not available" % host) + + # TODO(vish): this probably belongs in the manager, if we + # can generalize this somehow + now = datetime.datetime.utcnow() + db.volume_update(context, volume_id, {'host': host, + 'scheduled_at': now}) + return host results = db.service_get_all_volume_sorted(context) for result in results: (service, volume_gigabytes) = result diff --git a/nova/tests/scheduler_unittest.py b/nova/tests/scheduler_unittest.py index d1756b8fb5a4..92262cc7d51f 100644 --- a/nova/tests/scheduler_unittest.py +++ b/nova/tests/scheduler_unittest.py @@ -19,6 +19,8 @@ Tests For Scheduler """ +import datetime + from nova import context from nova import db from nova import flags @@ -93,7 +95,7 @@ class SimpleDriverTestCase(test.TestCase): self.manager.delete_user(self.user) self.manager.delete_project(self.project) - def _create_instance(self): + def _create_instance(self, **kwargs): """Create a test instance""" inst = {} inst['image_id'] = 'ami-test' @@ -104,6 +106,7 @@ class SimpleDriverTestCase(test.TestCase): inst['mac_address'] = utils.generate_mac() inst['ami_launch_index'] = 0 inst['vcpus'] = 1 + inst['availability_zone'] = kwargs.get('availability_zone', None) return db.instance_create(self.context, inst)['id'] def _create_volume(self): @@ -112,10 +115,11 @@ class SimpleDriverTestCase(test.TestCase): vol['image_id'] = 'ami-test' vol['reservation_id'] = 'r-fakeres' vol['size'] = 1 + vol['availability_zone'] = 'test' return db.volume_create(self.context, vol)['id'] - def test_hosts_are_up(self): - """Ensures driver can find the hosts that are up""" + def test_doesnt_report_disabled_hosts_as_up(self): + """Ensures driver doesn't find hosts before they are enabled""" # NOTE(vish): constructing service without create method # because we are going to use it without queue compute1 = service.Service('host1', @@ -129,7 +133,30 @@ class SimpleDriverTestCase(test.TestCase): FLAGS.compute_manager) compute2.start() hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(len(hosts), 2) + self.assertEqual(0, len(hosts)) + compute1.kill() + compute2.kill() + + def test_reports_enabled_hosts_as_up(self): + """Ensures driver can find the hosts that are up""" + # NOTE(vish): constructing service without create method + # because we are going to use it without queue + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': False}) + db.service_update(self.context, s2['id'], {'disabled': False}) + hosts = self.scheduler.driver.hosts_up(self.context, 'compute') + self.assertEqual(2, len(hosts)) compute1.kill() compute2.kill() @@ -145,6 +172,10 @@ class SimpleDriverTestCase(test.TestCase): 'compute', FLAGS.compute_manager) compute2.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': False}) + db.service_update(self.context, s2['id'], {'disabled': False}) instance_id1 = self._create_instance() compute1.run_instance(self.context, instance_id1) instance_id2 = self._create_instance() @@ -156,6 +187,67 @@ class SimpleDriverTestCase(test.TestCase): compute1.kill() compute2.kill() + def test_specific_host_gets_instance(self): + """Ensures if you set availability_zone it launches on that zone""" + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': False}) + db.service_update(self.context, s2['id'], {'disabled': False}) + instance_id1 = self._create_instance() + compute1.run_instance(self.context, instance_id1) + instance_id2 = self._create_instance(availability_zone='nova:host1') + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual('host1', host) + compute1.terminate_instance(self.context, instance_id1) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + compute2.kill() + + def test_wont_sechedule_if_specified_host_is_down(self): + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + now = datetime.datetime.utcnow() + delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) + past = now - delta + db.service_update(self.context, s1['id'], {'disabled': False, + 'updated_at': past}) + instance_id2 = self._create_instance(availability_zone='nova:host1') + self.assertRaises(driver.WillNotSchedule, + self.scheduler.driver.schedule_run_instance, + self.context, + instance_id2) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + + def test_will_schedule_on_disabled_host_if_specified(self): + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + db.service_get_by_args(self.context, 'host1', 'nova-compute') + instance_id2 = self._create_instance(availability_zone='nova:host1') + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual('host1', host) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + def test_too_many_cores(self): """Ensures we don't go over max cores""" compute1 = service.Service('host1', @@ -168,6 +260,10 @@ class SimpleDriverTestCase(test.TestCase): 'compute', FLAGS.compute_manager) compute2.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': False}) + db.service_update(self.context, s2['id'], {'disabled': False}) instance_ids1 = [] instance_ids2 = [] for index in xrange(FLAGS.max_cores): @@ -201,6 +297,10 @@ class SimpleDriverTestCase(test.TestCase): 'volume', FLAGS.volume_manager) volume2.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-volume') + s2 = db.service_get_by_args(self.context, 'host2', 'nova-volume') + db.service_update(self.context, s1['id'], {'disabled': False}) + db.service_update(self.context, s2['id'], {'disabled': False}) volume_id1 = self._create_volume() volume1.create_volume(self.context, volume_id1) volume_id2 = self._create_volume() @@ -224,6 +324,10 @@ class SimpleDriverTestCase(test.TestCase): 'volume', FLAGS.volume_manager) volume2.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-volume') + s2 = db.service_get_by_args(self.context, 'host2', 'nova-volume') + db.service_update(self.context, s1['id'], {'disabled': False}) + db.service_update(self.context, s2['id'], {'disabled': False}) volume_ids1 = [] volume_ids2 = [] for index in xrange(FLAGS.max_gigabytes): From 438197264ea5ddc8bf076100586af6c71b0bf58d Mon Sep 17 00:00:00 2001 From: Eric Day Date: Mon, 27 Dec 2010 11:22:15 -0800 Subject: [PATCH 029/147] Added custom guid type so we can choose the most efficient backend DB type easily. --- nova/db/sqlalchemy/models.py | 40 ++++++++++++++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index a7725ac3c43c..7ec96bc6d3cf 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -22,12 +22,14 @@ SQLAlchemy models for nova data. import datetime import uuid -from sqlalchemy.orm import relationship, backref, object_mapper from sqlalchemy import Column, Integer, Float, String, schema from sqlalchemy import ForeignKey, DateTime, Boolean, Text +from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.exc import IntegrityError from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import relationship, backref, object_mapper from sqlalchemy.schema import ForeignKeyConstraint +from sqlalchemy.types import TypeDecorator, CHAR from nova.db.sqlalchemy.session import get_session @@ -44,6 +46,40 @@ def make_uuid(): return uuid.uuid1().hex +class GUID(TypeDecorator): + """Platform-independent GUID type. + + Uses Postgresql's UUID type, otherwise uses + CHAR(32), storing as stringified hex values. + + """ + impl = CHAR + + def load_dialect_impl(self, dialect): + if dialect.name == 'postgresql': + return dialect.type_descriptor(UUID()) + else: + return dialect.type_descriptor(CHAR(32)) + + def process_bind_param(self, value, dialect): + if value is None: + return value + elif dialect.name == 'postgresql': + return str(value) + else: + if not isinstance(value, uuid.UUID): + return "%.32x" % uuid.UUID(value) + else: + # hexstring + return "%.32x" % value + + def process_result_value(self, value, dialect): + if value is None: + return value + else: + return uuid.UUID(value).hex + + class NovaBase(object): """Base class for Nova Models.""" __table_args__ = {'mysql_engine': 'InnoDB'} @@ -169,7 +205,7 @@ class Certificate(BASE, NovaBase): class Instance(BASE, NovaBase): """Represents a guest vm.""" __tablename__ = 'instances' - id = Column(String(32), primary_key=True, default=make_uuid) + id = Column(GUID, primary_key=True, default=make_uuid) @property def name(self): From 89000675dfe321240b3dae53604ba87115a3ca3e Mon Sep 17 00:00:00 2001 From: Eric Day Date: Mon, 27 Dec 2010 11:43:17 -0800 Subject: [PATCH 030/147] Converted instance references to GUID type. --- nova/db/sqlalchemy/models.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 7ec96bc6d3cf..5634dda5e3da 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -279,7 +279,7 @@ class InstanceDiagnostics(BASE, NovaBase): """Represents a guest VM's diagnostics""" __tablename__ = "instance_diagnostics" id = Column(Integer, primary_key=True) - instance_id = Column(Integer, ForeignKey('instances.id')) + instance_id = Column(GUID, ForeignKey('instances.id')) memory_available = Column(Float) memory_free = Column(Float) @@ -294,7 +294,7 @@ class InstanceActions(BASE, NovaBase): """Represents a guest VM's actions and results""" __tablename__ = "instance_actions" id = Column(Integer, primary_key=True) - instance_id = Column(Integer, ForeignKey('instances.id')) + instance_id = Column(GUID, ForeignKey('instances.id')) action = Column(String(255)) error = Column(Text) @@ -312,7 +312,7 @@ class Volume(BASE, NovaBase): host = Column(String(255)) # , ForeignKey('hosts.id')) size = Column(Integer) availability_zone = Column(String(255)) # TODO(vish): foreign key? - instance_id = Column(String(32), ForeignKey('instances.id'), nullable=True) + instance_id = Column(GUID, ForeignKey('instances.id'), nullable=True) instance = relationship(Instance, backref=backref('volumes'), foreign_keys=instance_id, @@ -385,7 +385,7 @@ class SecurityGroupInstanceAssociation(BASE, NovaBase): __tablename__ = 'security_group_instance_association' id = Column(Integer, primary_key=True) security_group_id = Column(Integer, ForeignKey('security_groups.id')) - instance_id = Column(String(32), ForeignKey('instances.id')) + instance_id = Column(GUID, ForeignKey('instances.id')) class SecurityGroup(BASE, NovaBase): @@ -505,7 +505,7 @@ class FixedIp(BASE, NovaBase): address = Column(String(255)) network_id = Column(Integer, ForeignKey('networks.id'), nullable=True) network = relationship(Network, backref=backref('fixed_ips')) - instance_id = Column(String(32), ForeignKey('instances.id'), nullable=True) + instance_id = Column(GUID, ForeignKey('instances.id'), nullable=True) instance = relationship(Instance, backref=backref('fixed_ip', uselist=False), foreign_keys=instance_id, From 6debe20395d6ab476bfd2a237df8c2b08050e0e6 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Mon, 27 Dec 2010 12:19:36 -0800 Subject: [PATCH 031/147] Converted Volume model and operation to use UUIDs. --- nova/api/ec2/cloud.py | 10 ++++----- nova/db/sqlalchemy/api.py | 39 ------------------------------------ nova/db/sqlalchemy/models.py | 15 +++++++------- nova/tests/test_cloud.py | 4 ++-- nova/tests/test_xenapi.py | 11 +++++----- 5 files changed, 19 insertions(+), 60 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index d50a950955f8..8c687f173d54 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -458,7 +458,7 @@ class CloudController(object): # NOTE(vish): volume_id is an optional list of volume ids to filter by. volumes = [self._format_volume(context, v) for v in volumes - if volume_id is None or v['ec2_id'] in volume_id] + if volume_id is None or v['id'] in volume_id] return {'volumeSet': volumes} @@ -471,7 +471,7 @@ class CloudController(object): instance_data = '%s[%s]' % (instance_ec2_id, volume['instance']['host']) v = {} - v['volumeId'] = volume['ec2_id'] + v['volumeId'] = volume['id'] v['status'] = volume['status'] v['size'] = volume['size'] v['availabilityZone'] = volume['availability_zone'] @@ -527,7 +527,7 @@ class CloudController(object): return {'volumeSet': [self._format_volume(context, dict(volume_ref))]} def attach_volume(self, context, volume_id, instance_id, device, **kwargs): - volume_ref = db.volume_get_by_ec2_id(context, volume_id) + volume_ref = db.volume_get(context, volume_id) if not re.match("^/dev/[a-z]d[a-z]+$", device): raise exception.ApiError(_("Invalid device specified: %s. " "Example device: /dev/vdb") % device) @@ -553,7 +553,7 @@ class CloudController(object): 'volumeId': volume_ref['id']} def detach_volume(self, context, volume_id, **kwargs): - volume_ref = db.volume_get_by_ec2_id(context, volume_id) + volume_ref = db.volume_get(context, volume_id) instance_ref = db.volume_get_instance(context.elevated(), volume_ref['id']) if not instance_ref: @@ -807,7 +807,7 @@ class CloudController(object): def delete_volume(self, context, volume_id, **kwargs): # TODO: return error if not authorized - volume_ref = db.volume_get_by_ec2_id(context, volume_id) + volume_ref = db.volume_get(context, volume_id) if volume_ref['status'] != "available": raise exception.ApiError(_("Volume status must be available")) now = datetime.datetime.utcnow() diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 57b70288c95d..50f833a5ff70 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1285,10 +1285,6 @@ def volume_create(context, values): session = get_session() with session.begin(): - while volume_ref.ec2_id == None: - ec2_id = utils.generate_uid('vol') - if not volume_ec2_id_exists(context, ec2_id, session=session): - volume_ref.ec2_id = ec2_id volume_ref.save(session=session) return volume_ref @@ -1386,41 +1382,6 @@ def volume_get_all_by_project(context, project_id): all() -@require_context -def volume_get_by_ec2_id(context, ec2_id): - session = get_session() - result = None - - if is_admin_context(context): - result = session.query(models.Volume).\ - filter_by(ec2_id=ec2_id).\ - filter_by(deleted=can_read_deleted(context)).\ - first() - elif is_user_context(context): - result = session.query(models.Volume).\ - filter_by(project_id=context.project_id).\ - filter_by(ec2_id=ec2_id).\ - filter_by(deleted=False).\ - first() - else: - raise exception.NotAuthorized() - - if not result: - raise exception.NotFound(_('Volume %s not found') % ec2_id) - - return result - - -@require_context -def volume_ec2_id_exists(context, ec2_id, session=None): - if not session: - session = get_session() - - return session.query(exists().\ - where(models.Volume.id == ec2_id)).\ - one()[0] - - @require_admin_context def volume_get_instance(context, volume_id): session = get_session() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 5634dda5e3da..418c8914e1c4 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -303,8 +303,11 @@ class InstanceActions(BASE, NovaBase): class Volume(BASE, NovaBase): """Represents a block storage device that can be attached to a vm.""" __tablename__ = 'volumes' - id = Column(Integer, primary_key=True) - ec2_id = Column(String(12), unique=True) + id = Column(GUID, primary_key=True, default=make_uuid) + + @property + def name(self): + return "vollume-%s" % self.id user_id = Column(String(255)) project_id = Column(String(255)) @@ -330,10 +333,6 @@ class Volume(BASE, NovaBase): display_name = Column(String(255)) display_description = Column(String(255)) - @property - def name(self): - return self.ec2_id - class Quota(BASE, NovaBase): """Represents quota overrides for a project.""" @@ -357,7 +356,7 @@ class ExportDevice(BASE, NovaBase): id = Column(Integer, primary_key=True) shelf_id = Column(Integer) blade_id = Column(Integer) - volume_id = Column(Integer, ForeignKey('volumes.id'), nullable=True) + volume_id = Column(GUID, ForeignKey('volumes.id'), nullable=True) volume = relationship(Volume, backref=backref('export_device', uselist=False), foreign_keys=volume_id, @@ -373,7 +372,7 @@ class IscsiTarget(BASE, NovaBase): id = Column(Integer, primary_key=True) target_num = Column(Integer) host = Column(String(255)) - volume_id = Column(Integer, ForeignKey('volumes.id'), nullable=True) + volume_id = Column(GUID, ForeignKey('volumes.id'), nullable=True) volume = relationship(Volume, backref=backref('iscsi_target', uselist=False), foreign_keys=volume_id, diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index ca400077a813..42344af1cb06 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -127,9 +127,9 @@ class CloudTestCase(test.TestCase): result = self.cloud.describe_volumes(self.context) self.assertEqual(len(result['volumeSet']), 2) result = self.cloud.describe_volumes(self.context, - volume_id=[vol2['ec2_id']]) + volume_id=[vol2['id']]) self.assertEqual(len(result['volumeSet']), 1) - self.assertEqual(result['volumeSet'][0]['volumeId'], vol2['ec2_id']) + self.assertEqual(result['volumeSet'][0]['volumeId'], vol2['id']) db.volume_destroy(self.context, vol1['id']) db.volume_destroy(self.context, vol2['id']) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index ed2e4ffde717..900b9af2bcac 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -79,8 +79,8 @@ class XenAPIVolumeTestCase(test.TestCase): helper = volume_utils.VolumeHelper helper.XenAPI = session.get_imported_xenapi() vol = self._create_volume() - info = helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') - label = 'SR-%s' % vol['ec2_id'] + info = helper.parse_volume_info(vol['id'], '/dev/sdc') + label = 'SR-%s' % vol['id'] description = 'Test-SR' sr_ref = helper.create_iscsi_storage(session, info, label, description) srs = fake.get_all('SR') @@ -97,7 +97,7 @@ class XenAPIVolumeTestCase(test.TestCase): # oops, wrong mount point! self.assertRaises(volume_utils.StorageError, helper.parse_volume_info, - vol['ec2_id'], + vol['id'], '/dev/sd') db.volume_destroy(context.get_admin_context(), vol['id']) @@ -108,8 +108,7 @@ class XenAPIVolumeTestCase(test.TestCase): volume = self._create_volume() instance = db.instance_create(self.values) fake.create_vm(instance.name, 'Running') - result = conn.attach_volume(instance.name, volume['ec2_id'], - '/dev/sdc') + result = conn.attach_volume(instance.name, volume['id'], '/dev/sdc') def check(): # check that the VM has a VBD attached to it @@ -134,7 +133,7 @@ class XenAPIVolumeTestCase(test.TestCase): self.assertRaises(Exception, conn.attach_volume, instance.name, - volume['ec2_id'], + volume['id'], '/dev/sdc') def tearDown(self): From 5a25de893f34cb9b05996406488188b6ed47fca1 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 28 Dec 2010 17:14:01 -0500 Subject: [PATCH 032/147] Add flag --enable_new_services to toggle default state of service when created. --- nova/db/api.py | 5 +++++ nova/db/sqlalchemy/api.py | 2 ++ nova/tests/test_service.py | 25 +++++++++++++++++++++++++ 3 files changed, 32 insertions(+) diff --git a/nova/db/api.py b/nova/db/api.py index fde3f0852a87..fcb1cc3f929f 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -27,6 +27,9 @@ The underlying driver is loaded as a :class:`LazyPluggable`. :sql_connection: string specifying the sqlalchemy connection to use, like: `sqlite:///var/lib/nova/nova.sqlite`. + +:enable_new_services: when adding a new service to the database, is it in the + pool of available hardware (Default: False) """ from nova import exception @@ -37,6 +40,8 @@ from nova import utils FLAGS = flags.FLAGS flags.DEFINE_string('db_backend', 'sqlalchemy', 'The backend to use for db') +flags.DEFINE_boolean('enable_new_services', False, + 'Services to be added to the available pool on create') IMPL = utils.LazyPluggable(FLAGS['db_backend'], diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 7e945e4cb3d8..bcc076c5a58d 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -236,6 +236,8 @@ def service_get_by_args(context, host, binary): def service_create(context, values): service_ref = models.Service() service_ref.update(values) + if FLAGS.enable_new_services: + service_ref.disabled = False service_ref.save() return service_ref diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index b30838ad738c..01f27e5b4126 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -22,6 +22,8 @@ Unit Tests for remote procedure calls using queue import mox +from nova import context +from nova import db from nova import exception from nova import flags from nova import rpc @@ -72,6 +74,29 @@ class ServiceManagerTestCase(test.TestCase): self.assertEqual(serv.test_method(), 'service') +class ServiceFlagsTestCase(test.TestCase): + def test_service_enabled_on_create_based_on_flag(self): + self.flags(enable_new_services=True) + host = 'foo' + binary = 'nova-fake' + app = service.Service.create(host=host, binary=binary) + app.start() + app.stop() + ref = db.service_get(context.get_admin_context(), app.service_id) + db.service_destroy(context.get_admin_context(), app.service_id) + self.assert_(not ref['disabled']) + + def test_service_disabled_on_create_based_on_flag(self): + self.flags(enable_new_services=False) + host = 'foo' + binary = 'nova-fake' + app = service.Service.create(host=host, binary=binary) + app.start() + app.stop() + ref = db.service_get(context.get_admin_context(), app.service_id) + db.service_destroy(context.get_admin_context(), app.service_id) + self.assert_(ref['disabled']) + class ServiceTestCase(test.TestCase): """Test cases for Services""" From 9da1fcd6eca6f2f88e95242b8d046f4ee11f3761 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 28 Dec 2010 17:54:31 -0500 Subject: [PATCH 033/147] Defualt services to enabled. --- nova/db/api.py | 4 ++-- nova/db/sqlalchemy/api.py | 4 ++-- nova/db/sqlalchemy/models.py | 2 +- nova/tests/test_scheduler.py | 34 +++++++--------------------------- 4 files changed, 12 insertions(+), 32 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index fcb1cc3f929f..f32e1e3f5750 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -29,7 +29,7 @@ The underlying driver is loaded as a :class:`LazyPluggable`. `sqlite:///var/lib/nova/nova.sqlite`. :enable_new_services: when adding a new service to the database, is it in the - pool of available hardware (Default: False) + pool of available hardware (Default: True) """ from nova import exception @@ -40,7 +40,7 @@ from nova import utils FLAGS = flags.FLAGS flags.DEFINE_string('db_backend', 'sqlalchemy', 'The backend to use for db') -flags.DEFINE_boolean('enable_new_services', False, +flags.DEFINE_boolean('enable_new_services', True, 'Services to be added to the available pool on create') diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index bcc076c5a58d..9f0597b548e4 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -236,8 +236,8 @@ def service_get_by_args(context, host, binary): def service_create(context, values): service_ref = models.Service() service_ref.update(values) - if FLAGS.enable_new_services: - service_ref.disabled = False + if not FLAGS.enable_new_services: + service_ref.disabled = True service_ref.save() return service_ref diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 86c24cb90dcc..ca54d446683b 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -148,7 +148,7 @@ class Service(BASE, NovaBase): binary = Column(String(255)) topic = Column(String(255)) report_count = Column(Integer, nullable=False, default=0) - disabled = Column(Boolean, default=True) + disabled = Column(Boolean, default=False) class Certificate(BASE, NovaBase): diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 65430fb362db..e8021ed5a5c7 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -133,6 +133,10 @@ class SimpleDriverTestCase(test.TestCase): 'compute', FLAGS.compute_manager) compute2.start() + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': True}) + db.service_update(self.context, s2['id'], {'disabled': True}) hosts = self.scheduler.driver.hosts_up(self.context, 'compute') self.assertEqual(0, len(hosts)) compute1.kill() @@ -152,10 +156,6 @@ class SimpleDriverTestCase(test.TestCase): 'compute', FLAGS.compute_manager) compute2.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': False}) - db.service_update(self.context, s2['id'], {'disabled': False}) hosts = self.scheduler.driver.hosts_up(self.context, 'compute') self.assertEqual(2, len(hosts)) compute1.kill() @@ -173,10 +173,6 @@ class SimpleDriverTestCase(test.TestCase): 'compute', FLAGS.compute_manager) compute2.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': False}) - db.service_update(self.context, s2['id'], {'disabled': False}) instance_id1 = self._create_instance() compute1.run_instance(self.context, instance_id1) instance_id2 = self._create_instance() @@ -200,10 +196,6 @@ class SimpleDriverTestCase(test.TestCase): 'compute', FLAGS.compute_manager) compute2.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': False}) - db.service_update(self.context, s2['id'], {'disabled': False}) instance_id1 = self._create_instance() compute1.run_instance(self.context, instance_id1) instance_id2 = self._create_instance(availability_zone='nova:host1') @@ -225,8 +217,7 @@ class SimpleDriverTestCase(test.TestCase): now = datetime.datetime.utcnow() delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) past = now - delta - db.service_update(self.context, s1['id'], {'disabled': False, - 'updated_at': past}) + db.service_update(self.context, s1['id'], {'updated_at': past}) instance_id2 = self._create_instance(availability_zone='nova:host1') self.assertRaises(driver.WillNotSchedule, self.scheduler.driver.schedule_run_instance, @@ -241,7 +232,8 @@ class SimpleDriverTestCase(test.TestCase): 'compute', FLAGS.compute_manager) compute1.start() - db.service_get_by_args(self.context, 'host1', 'nova-compute') + s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') + db.service_update(self.context, s1['id'], {'disabled': True}) instance_id2 = self._create_instance(availability_zone='nova:host1') host = self.scheduler.driver.schedule_run_instance(self.context, instance_id2) @@ -261,10 +253,6 @@ class SimpleDriverTestCase(test.TestCase): 'compute', FLAGS.compute_manager) compute2.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': False}) - db.service_update(self.context, s2['id'], {'disabled': False}) instance_ids1 = [] instance_ids2 = [] for index in xrange(FLAGS.max_cores): @@ -298,10 +286,6 @@ class SimpleDriverTestCase(test.TestCase): 'volume', FLAGS.volume_manager) volume2.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-volume') - s2 = db.service_get_by_args(self.context, 'host2', 'nova-volume') - db.service_update(self.context, s1['id'], {'disabled': False}) - db.service_update(self.context, s2['id'], {'disabled': False}) volume_id1 = self._create_volume() volume1.create_volume(self.context, volume_id1) volume_id2 = self._create_volume() @@ -325,10 +309,6 @@ class SimpleDriverTestCase(test.TestCase): 'volume', FLAGS.volume_manager) volume2.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-volume') - s2 = db.service_get_by_args(self.context, 'host2', 'nova-volume') - db.service_update(self.context, s1['id'], {'disabled': False}) - db.service_update(self.context, s2['id'], {'disabled': False}) volume_ids1 = [] volume_ids2 = [] for index in xrange(FLAGS.max_gigabytes): From 8aea573bd2e44e152fb4ef1627640bab1818dede Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Tue, 28 Dec 2010 23:55:58 -0600 Subject: [PATCH 034/147] initial lock functionality commit --- nova/api/openstack/__init__.py | 73 +++++++++++++++++++++++++++++ nova/api/openstack/servers.py | 86 ++++++++++++++++++++++++++++++++++ nova/compute/api.py | 35 +++++++++++++- nova/compute/manager.py | 24 ++++++++++ nova/db/sqlalchemy/api.py | 22 +++++++++ nova/db/sqlalchemy/models.py | 2 + 6 files changed, 241 insertions(+), 1 deletion(-) diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index bebcdc18cda4..b3bb65550539 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -22,6 +22,7 @@ WSGI middleware for OpenStack API controllers. import json import time +import functools import logging import routes @@ -113,3 +114,75 @@ class APIRouter(wsgi.Router): controller=sharedipgroups.Controller()) super(APIRouter, self).__init__(mapper) + + +#class CheckLock(object): +# """ +# decorator used for preventing action against locked instances +# unless, of course, you happen to be admin +# +# """ +# def __init__(self, function): +# self.function = function +# +# def __getattribute__(self, attr): +# if attr == "function": +# return super(CheckLock, self).__getattribute__(attr) +# return self.function.__getattribute__(attr) +# +# def __call__(self, *args, **kwargs): +# logging.info(_("Calling %s. Checking locks and privileges"), +# self.function.__name__) +# +# # get req +# if 'req' is in kwargs: +# req = kwargs['req'] +# else: +# req = args[1] +# +# # check table for lock +# locked = True +# if(locked): +# # check context for admin +# if(req.environ['nova.context'].is_admin): +# self.function(*args, **kwargs) +# else: +# pass +# # return 404 +# +# def __get__(self, obj, objtype): +# f = functools.partial(self.__call__, obj) +# f.__doc__ = self.function.__doc__ +# return f + + + + +#def checks_lock(function): +# """ +# decorator used for preventing action against locked instances +# unless, of course, you happen to be admin +# +# """ +# +# @functools.wraps(function) +# def decorated_function(*args, **kwargs): +# +# # check table for lock +# locked = True +# if(locked): +# try: +# # get context from req and check for admin +# if 'req' is in kwargs: +# req = kwargs['req'] +# else: +# req = args[1] +# if(req.environ['nova.context'].is_admin): +# function(*args, **kwargs) +# else: +# pass +# # return 404 +# except: +# logging.error(_("CheckLock: error getting context")) +# +# return decorated_function diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 10c3973843f0..46e65ca83b71 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -35,6 +35,40 @@ LOG = logging.getLogger('server') LOG.setLevel(logging.DEBUG) +def checks_lock(function): + """ + decorator used for preventing action against locked instances + unless, of course, you happen to be admin + + """ + + @functools.wraps(function) + def decorated_function(*args, **kwargs): + + # grab args to function + try: + if 'req' is in kwargs: + req = kwargs['req'] + else: + req = args[1] + if 'id' is in kwargs: + _id = kwargs['id'] + else: + req = args[2] + context = req.environ['nova.context'] + except: + logging.error(_("CheckLock: argument error")) + + # if locked and admin call function, otherwise 404 + if(compute_api.ComputeAPI().get_lock(context, _id)): + if(req.environ['nova.context'].is_admin): + function(*args, **kwargs) + # return 404 + return faults.Fault(exc.HTTPUnprocessableEntity()) + + return decorated_function + + def _entity_list(entities): """ Coerces a list of servers into proper dictionary format """ return dict(servers=entities) @@ -104,6 +138,7 @@ class Controller(wsgi.Controller): res = [entity_maker(inst)['server'] for inst in limited_list] return _entity_list(res) + @checks_lock def show(self, req, id): """ Returns server details by server id """ try: @@ -113,6 +148,7 @@ class Controller(wsgi.Controller): except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) + @checks_lock def delete(self, req, id): """ Destroys a server """ try: @@ -140,6 +176,7 @@ class Controller(wsgi.Controller): key_data=key_pair['public_key']) return _entity_inst(instances[0]) + @checks_lock def update(self, req, id): """ Updates the server name or password """ inst_dict = self._deserialize(req.body, req) @@ -160,6 +197,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPNotFound()) return exc.HTTPNoContent() + @checks_lock def action(self, req, id): """ Multi-purpose method used to reboot, rebuild, and resize a server """ @@ -176,6 +214,51 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + def lock(self, req, id): + """ + lock the instance with id + admin only operation + + """ + context = req.environ['nova.context'] + try: + self.compute_api.lock(context, id) + except: + readable = traceback.format_exc() + logging.error(_("Compute.api::lock %s"), readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() + + def unlock(self, req, id): + """ + unlock the instance with id + admin only operation + + """ + context = req.environ['nova.context'] + try: + self.compute_api.unlock(context, id) + except: + readable = traceback.format_exc() + logging.error(_("Compute.api::unlock %s"), readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() + + def get_lock(self, req, id): + """ + return the boolean state of (instance with id)'s lock + + """ + context = req.environ['nova.context'] + try: + self.compute_api.get_lock(context, id) + except: + readable = traceback.format_exc() + logging.error(_("Compute.api::get_lock %s"), readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() + + @checks_lock def pause(self, req, id): """ Permit Admins to Pause the server. """ ctxt = req.environ['nova.context'] @@ -187,6 +270,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + @checks_lock def unpause(self, req, id): """ Permit Admins to Unpause the server. """ ctxt = req.environ['nova.context'] @@ -198,6 +282,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + @checks_lock def suspend(self, req, id): """permit admins to suspend the server""" context = req.environ['nova.context'] @@ -209,6 +294,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + @checks_lock def resume(self, req, id): """permit admins to resume the server from suspend""" context = req.environ['nova.context'] diff --git a/nova/compute/api.py b/nova/compute/api.py index a47703461a76..361ab991418f 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -141,7 +141,8 @@ class ComputeAPI(base.Base): 'display_description': description, 'user_data': user_data or '', 'key_name': key_name, - 'key_data': key_data} + 'key_data': key_data, + 'locked': False} elevated = context.elevated() instances = [] @@ -319,3 +320,35 @@ class ComputeAPI(base.Base): self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "unrescue_instance", "args": {"instance_id": instance['id']}}) + + def lock(self, context, instance_id): + """ + lock the instance with instance_id + + """ + instance = self.get_instance(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "lock_instance", + "args": {"instance_id": instance['id']}}) + + def unlock(self, context, instance_id): + """ + unlock the instance with instance_id + + """ + instance = self.get_instance(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "unlock_instance", + "args": {"instance_id": instance['id']}}) + + def get_lock(self, context, instance_id): + """ + return the boolean state of (instance with instance_id)'s lock + + """ + instance = self.get_instance(context, instance_id) + return instance['locked'] diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 70b175e7c682..05f1e44a2265 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -329,6 +329,30 @@ class ComputeManager(manager.Manager): instance_id, result)) + @exception.wrap_exception + def lock_instance(self, context, instance_id): + """ + lock the instance with instance_id + + """ + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + + logging.debug(_('instance %s: locking'), instance_ref['internal_id']) + self.db.instance_set_lock(context, instance_id, True) + + @exception.wrap_exception + def unlock_instance(self, context, instance_id): + """ + unlock the instance with instance_id + + """ + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + + logging.debug(_('instance %s: unlocking'), instance_ref['internal_id']) + self.db.instance_set_lock(context, instance_id, False) + @exception.wrap_exception def get_console_output(self, context, instance_id): """Send the console output for an instance.""" diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 7e945e4cb3d8..6d774b39ce18 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -856,6 +856,28 @@ def instance_action_create(context, values): return action_ref +@require_admin_context +def instance_set_lock(context, instance_id, lock): + """ + twiddle the locked bit in the db + lock is a boolean + + """ + db.instance_update(context, + instance_id, + {'locked': lock}) + + +#@require_admin_context +#def instance_is_locked(context, instance_id): +# """ +# return the boolean state of (instance with instance_id)'s lock +# +# """ +# instance_ref = instance_get(context, instance_id) +# return instance_ref['locked'] + + ################### diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 693db8d23d97..830ef30a16f5 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -224,6 +224,8 @@ class Instance(BASE, NovaBase): display_name = Column(String(255)) display_description = Column(String(255)) + locked = Column(Boolean) + # TODO(vish): see Ewan's email about state improvements, probably # should be in a driver base class or some such # vmstate_state = running, halted, suspended, paused From 3a85ba4fa4215737731b2e755abfa350c509e46f Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 13:04:41 -0600 Subject: [PATCH 035/147] syntax error --- nova/api/openstack/servers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 46e65ca83b71..7744815fc3ad 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -47,11 +47,11 @@ def checks_lock(function): # grab args to function try: - if 'req' is in kwargs: + if 'req' in kwargs: req = kwargs['req'] else: req = args[1] - if 'id' is in kwargs: + if 'id' in kwargs: _id = kwargs['id'] else: req = args[2] From b6e5c68d65701b840006cea49367879ee88c9b80 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 13:09:49 -0600 Subject: [PATCH 036/147] forgot import --- nova/api/openstack/__init__.py | 1 - nova/api/openstack/servers.py | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index b3bb65550539..c0bd37fef5b1 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -22,7 +22,6 @@ WSGI middleware for OpenStack API controllers. import json import time -import functools import logging import routes diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 7744815fc3ad..8b837e6fc392 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -17,6 +17,7 @@ import logging import traceback +import functools from webob import exc From 0afb4a06dcb94ae41d04b3d78304746b0cc5b26f Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 13:33:51 -0600 Subject: [PATCH 037/147] refactor --- nova/api/openstack/servers.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 8b837e6fc392..292a664b7ff1 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -58,14 +58,15 @@ def checks_lock(function): req = args[2] context = req.environ['nova.context'] except: - logging.error(_("CheckLock: argument error")) + logging.error(_("CheckLock: argument error: |%s|, |%s|"), args, + kwargs) + # if admin or unlocked call function, otherwise 404 + locked = compute_api.ComputeAPI().get_lock(context, _id) + admin = req.environ['nova.context'].is_admin + if(admin or not locked): + return function(*args, **kwargs) - # if locked and admin call function, otherwise 404 - if(compute_api.ComputeAPI().get_lock(context, _id)): - if(req.environ['nova.context'].is_admin): - function(*args, **kwargs) - # return 404 - return faults.Fault(exc.HTTPUnprocessableEntity()) + return faults.Fault(exc.HTTPNotFound()) return decorated_function From 2eaf3bb2a9d54bb7dd2c518cecca0caf7c80571f Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 13:50:25 -0600 Subject: [PATCH 038/147] added test for lock to os api --- nova/tests/api/openstack/test_servers.py | 44 ++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 5d23db588863..464bae231b54 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -321,5 +321,49 @@ class ServersTest(unittest.TestCase): self.assertEqual(self.server_delete_called, True) + def test_lock(self): + # part one: stubs it to be locked and test pause + def get_locked(context, id): + return True + + # set get to return locked + self.stubs.Set(nova.compute, 'get_lock', get_locked) + + # attempt to pause + FLAGS.allow_admin_api = True + body = dict(server=dict( + name='server_test', imageId=2, flavorId=2, metadata={}, + personality={})) + req = webob.Request.blank('/v1.0/servers/1/pause') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(nova.api.API('os')) + + # expect a 404 since it was locked + self.assertEqual(res.status_int, 404) + + # Part two: stubs it to be unlocked and test pause + def get_unlocked(context, id): + return False + + # set get to return locked + self.stubs.Set(nova.compute, 'get_lock', get_unlocked) + + # attempt to pause + FLAGS.allow_admin_api = True + body = dict(server=dict( + name='server_test', imageId=2, flavorId=2, metadata={}, + personality={})) + req = webob.Request.blank('/v1.0/servers/1/pause') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(nova.api.API('os')) + + # expect a 202 since it was unlocked + self.assertEqual(res.status_int, 202) + + if __name__ == "__main__": unittest.main() From 48f0aa891c9c82c1c9e7a2e4bc1bef4da3c4d90b Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 14:30:29 -0600 Subject: [PATCH 039/147] fixed up test for lock --- nova/tests/api/openstack/test_servers.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 464bae231b54..b2a8e5ac0795 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -323,11 +323,11 @@ class ServersTest(unittest.TestCase): def test_lock(self): # part one: stubs it to be locked and test pause - def get_locked(context, id): + def get_locked(self, context, id): return True # set get to return locked - self.stubs.Set(nova.compute, 'get_lock', get_locked) + self.stubs.Set(nova.compute.api.ComputeAPI, 'get_lock', get_locked) # attempt to pause FLAGS.allow_admin_api = True @@ -344,11 +344,11 @@ class ServersTest(unittest.TestCase): self.assertEqual(res.status_int, 404) # Part two: stubs it to be unlocked and test pause - def get_unlocked(context, id): + def get_unlocked(self, context, id): return False # set get to return locked - self.stubs.Set(nova.compute, 'get_lock', get_unlocked) + self.stubs.Set(nova.compute.api.ComputeAPI, 'get_lock', get_unlocked) # attempt to pause FLAGS.allow_admin_api = True From 9b9b5fed18231a800018bc60fa653ec521b34a5c Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 14:32:03 -0600 Subject: [PATCH 040/147] pep8 --- nova/tests/api/openstack/test_servers.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index b2a8e5ac0795..a122f394644b 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -320,7 +320,6 @@ class ServersTest(unittest.TestCase): self.assertEqual(res.status, '202 Accepted') self.assertEqual(self.server_delete_called, True) - def test_lock(self): # part one: stubs it to be locked and test pause def get_locked(self, context, id): From 6202b21b42615cf15b0dd60089026472e6836c69 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 15:31:53 -0600 Subject: [PATCH 041/147] removed some code i didn't end up using --- nova/api/openstack/__init__.py | 72 ---------------------------------- nova/db/sqlalchemy/api.py | 10 ----- 2 files changed, 82 deletions(-) diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 7cceb7733ba7..66aceee2d06c 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -113,75 +113,3 @@ class APIRouter(wsgi.Router): controller=sharedipgroups.Controller()) super(APIRouter, self).__init__(mapper) - - -#class CheckLock(object): -# """ -# decorator used for preventing action against locked instances -# unless, of course, you happen to be admin -# -# """ -# def __init__(self, function): -# self.function = function -# -# def __getattribute__(self, attr): -# if attr == "function": -# return super(CheckLock, self).__getattribute__(attr) -# return self.function.__getattribute__(attr) -# -# def __call__(self, *args, **kwargs): -# logging.info(_("Calling %s. Checking locks and privileges"), -# self.function.__name__) -# -# # get req -# if 'req' is in kwargs: -# req = kwargs['req'] -# else: -# req = args[1] -# -# # check table for lock -# locked = True -# if(locked): -# # check context for admin -# if(req.environ['nova.context'].is_admin): -# self.function(*args, **kwargs) -# else: -# pass -# # return 404 -# -# def __get__(self, obj, objtype): -# f = functools.partial(self.__call__, obj) -# f.__doc__ = self.function.__doc__ -# return f - - - - -#def checks_lock(function): -# """ -# decorator used for preventing action against locked instances -# unless, of course, you happen to be admin -# -# """ -# -# @functools.wraps(function) -# def decorated_function(*args, **kwargs): -# -# # check table for lock -# locked = True -# if(locked): -# try: -# # get context from req and check for admin -# if 'req' is in kwargs: -# req = kwargs['req'] -# else: -# req = args[1] -# if(req.environ['nova.context'].is_admin): -# function(*args, **kwargs) -# else: -# pass -# # return 404 -# except: -# logging.error(_("CheckLock: error getting context")) -# -# return decorated_function diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 6d774b39ce18..ca71df7b3968 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -868,16 +868,6 @@ def instance_set_lock(context, instance_id, lock): {'locked': lock}) -#@require_admin_context -#def instance_is_locked(context, instance_id): -# """ -# return the boolean state of (instance with instance_id)'s lock -# -# """ -# instance_ref = instance_get(context, instance_id) -# return instance_ref['locked'] - - ################### From 66a074cc74a6c3cc09d7b36f3e5dcb5ad5e7b6d8 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Wed, 29 Dec 2010 17:08:42 -0500 Subject: [PATCH 042/147] Pep-8 cleanup. --- bin/nova-api-paste | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bin/nova-api-paste b/bin/nova-api-paste index dcb76522fc7f..3d26fdb4fd85 100755 --- a/bin/nova-api-paste +++ b/bin/nova-api-paste @@ -48,6 +48,7 @@ FLAGS = flags.FLAGS API_ENDPOINTS = ['ec2', 'openstack'] + def load_configuration(paste_config): """Load the paste configuration from the config file and return it.""" config = None @@ -68,12 +69,14 @@ def load_configuration(paste_config): print "Paste config at %s has no secion for any known apis" % paste_config os.exit(1) + def launch_api(paste_config_file, section, server, port, host): """Launch an api server from the specified port and IP.""" LOG.debug("Launching api %s on %s:%s", section, host, port) app = deploy.loadapp('config:%s' % paste_config_file, name=section) server.start(app, int(port), host) + def run_app(paste_config_file): LOG.debug("Using paste.deploy config at: %s", configfile) config = load_configuration(paste_config_file) @@ -89,6 +92,7 @@ def run_app(paste_config_file): LOG.debug("All api servers launched, now waiting") server.wait() + if __name__ == '__main__': FLAGS(sys.argv) configfiles = ['/etc/nova/nova-api.conf'] From 903b053f7eb2bcac7ee0809d7a1cd1efe676909e Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Wed, 29 Dec 2010 17:15:50 -0500 Subject: [PATCH 043/147] i18n --- bin/nova-api-paste | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/bin/nova-api-paste b/bin/nova-api-paste index 3d26fdb4fd85..9bcb983727e5 100755 --- a/bin/nova-api-paste +++ b/bin/nova-api-paste @@ -27,9 +27,6 @@ import sys from paste import deploy -from nova import flags -from nova import wsgi - # If ../nova/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), @@ -40,6 +37,9 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): gettext.install('nova', unicode=1) +from nova import flags +from nova import wsgi + LOG = logging.getLogger('nova.api') LOG.setLevel(logging.DEBUG) LOG.addHandler(logging.StreamHandler()) @@ -65,22 +65,24 @@ def load_configuration(paste_config): if FLAGS.verbose: logging.getLogger().setLevel(logging.DEBUG) return config - LOG.debug("Paste config at %s has no secion for known apis", paste_config) - print "Paste config at %s has no secion for any known apis" % paste_config + LOG.debug(_("Paste config at %s has no secion for known apis"), + paste_config) + print _("Paste config at %s has no secion for any known apis") % \ + paste_config os.exit(1) def launch_api(paste_config_file, section, server, port, host): """Launch an api server from the specified port and IP.""" - LOG.debug("Launching api %s on %s:%s", section, host, port) + LOG.debug(_("Launching %s api on %s:%s"), section, host, port) app = deploy.loadapp('config:%s' % paste_config_file, name=section) server.start(app, int(port), host) def run_app(paste_config_file): - LOG.debug("Using paste.deploy config at: %s", configfile) + LOG.debug(_("Using paste.deploy config at: %s"), configfile) config = load_configuration(paste_config_file) - LOG.debug("Configuration: %r", config) + LOG.debug(_("Configuration: %r"), config) server = wsgi.Server() ip = config.get('host', None) for api in API_ENDPOINTS: @@ -89,7 +91,7 @@ def run_app(paste_config_file): continue host = config.get("%s_host" % api, None) or ip or '0.0.0.0' launch_api(configfile, api, server, port, host) - LOG.debug("All api servers launched, now waiting") + LOG.debug(_("All api servers launched, now waiting")) server.wait() @@ -104,4 +106,4 @@ if __name__ == '__main__': run_app(configfile) break else: - LOG.debug("Skipping missing configuration: %s", configfile) + LOG.debug(_("Skipping missing configuration: %s"), configfile) From c1acb68ef54309584816fbf5c93e38266accb2f0 Mon Sep 17 00:00:00 2001 From: Ryan Lucio Date: Wed, 29 Dec 2010 15:04:21 -0800 Subject: [PATCH 044/147] Add the pool_recycle setting to enable connection pooling features for the sql engine. The setting is hard-coded to 3600 seconds (one hour) per the recommendation provided on sqlalchemy's site --- nova/db/sqlalchemy/session.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index e0d84c1075fd..5f31743cd143 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -36,7 +36,7 @@ def get_session(autocommit=True, expire_on_commit=False): global _MAKER if not _MAKER: if not _ENGINE: - _ENGINE = create_engine(FLAGS.sql_connection, echo=False) + _ENGINE = create_engine(FLAGS.sql_connection, pool_recycle=3600, echo=False) _MAKER = (sessionmaker(bind=_ENGINE, autocommit=autocommit, expire_on_commit=expire_on_commit)) From aac25e8cc6e75d5d0abc41a8cf979300e58bcc3b Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 17:04:40 -0600 Subject: [PATCH 045/147] removed () from if (can't believe i did that) and renamed checks_lock decorator --- nova/api/openstack/servers.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index c7263273c7ae..74b4f55b5480 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -36,7 +36,7 @@ LOG = logging.getLogger('server') LOG.setLevel(logging.DEBUG) -def checks_lock(function): +def checks_instance_lock(function): """ decorator used for preventing action against locked instances unless, of course, you happen to be admin @@ -58,12 +58,12 @@ def checks_lock(function): req = args[2] context = req.environ['nova.context'] except: - logging.error(_("CheckLock: argument error: |%s|, |%s|"), args, + logging.error(_("check_lock: argument error: |%s|, |%s|"), args, kwargs) # if admin or unlocked call function, otherwise 404 locked = compute_api.ComputeAPI().get_lock(context, _id) admin = req.environ['nova.context'].is_admin - if(admin or not locked): + if admin or not locked: return function(*args, **kwargs) return faults.Fault(exc.HTTPNotFound()) @@ -138,7 +138,7 @@ class Controller(wsgi.Controller): res = [entity_maker(inst)['server'] for inst in limited_list] return dict(servers=res) - @checks_lock + @checks_instance_lock def show(self, req, id): """ Returns server details by server id """ try: @@ -148,7 +148,7 @@ class Controller(wsgi.Controller): except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - @checks_lock + @checks_instance_lock def delete(self, req, id): """ Destroys a server """ try: @@ -176,7 +176,7 @@ class Controller(wsgi.Controller): key_data=key_pair['public_key']) return _translate_keys(instances[0]) - @checks_lock + @checks_instance_lock def update(self, req, id): """ Updates the server name or password """ inst_dict = self._deserialize(req.body, req) @@ -198,7 +198,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPNotFound()) return exc.HTTPNoContent() - @checks_lock + @checks_instance_lock def action(self, req, id): """ Multi-purpose method used to reboot, rebuild, and resize a server """ @@ -259,7 +259,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - @checks_lock + @checks_instance_lock def pause(self, req, id): """ Permit Admins to Pause the server. """ ctxt = req.environ['nova.context'] @@ -271,7 +271,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - @checks_lock + @checks_instance_lock def unpause(self, req, id): """ Permit Admins to Unpause the server. """ ctxt = req.environ['nova.context'] @@ -283,7 +283,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - @checks_lock + @checks_instance_lock def suspend(self, req, id): """permit admins to suspend the server""" context = req.environ['nova.context'] @@ -295,7 +295,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - @checks_lock + @checks_instance_lock def resume(self, req, id): """permit admins to resume the server from suspend""" context = req.environ['nova.context'] From be6750a77e5121fe8f0d95016da4e96c9de3b5aa Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 17:40:18 -0600 Subject: [PATCH 046/147] removed lock check from show and changed returning 404 to 405 --- nova/api/openstack/servers.py | 3 +-- nova/tests/api/openstack/test_servers.py | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 74b4f55b5480..24fd5000ca89 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -66,7 +66,7 @@ def checks_instance_lock(function): if admin or not locked: return function(*args, **kwargs) - return faults.Fault(exc.HTTPNotFound()) + return faults.Fault(exc.HTTPMethodNotAllowed()) return decorated_function @@ -138,7 +138,6 @@ class Controller(wsgi.Controller): res = [entity_maker(inst)['server'] for inst in limited_list] return dict(servers=res) - @checks_instance_lock def show(self, req, id): """ Returns server details by server id """ try: diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index a122f394644b..56a5a9b275ce 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -340,7 +340,7 @@ class ServersTest(unittest.TestCase): res = req.get_response(nova.api.API('os')) # expect a 404 since it was locked - self.assertEqual(res.status_int, 404) + self.assertEqual(res.status_int, 405) # Part two: stubs it to be unlocked and test pause def get_unlocked(self, context, id): From 64078137ce12ee52fff710f5a262d57b4ace2809 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 29 Dec 2010 16:29:15 -0800 Subject: [PATCH 047/147] Moved ec2 volume operations into a volume API interface for other components to use. Added attach/detach as compute.api methods, since they operate in the context of instances (and to avoid a dependency loop). --- nova/api/ec2/cloud.py | 147 +++++++++++----------------------------- nova/compute/api.py | 34 +++++++++- nova/volume/__init__.py | 91 ++++++++++++++++++++++--- 3 files changed, 152 insertions(+), 120 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 8c687f173d54..74c73e0ddb3d 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -38,12 +38,12 @@ from nova import flags from nova import quota from nova import rpc from nova import utils +from nova import volume from nova.compute import api as compute_api from nova.compute import instance_types FLAGS = flags.FLAGS -flags.DECLARE('storage_availability_zone', 'nova.volume.manager') InvalidInputException = exception.InvalidInputException @@ -89,8 +89,10 @@ class CloudController(object): def __init__(self): self.network_manager = utils.import_object(FLAGS.network_manager) self.image_service = utils.import_object(FLAGS.image_service) + self.volume_api = volume.API() self.compute_api = compute_api.ComputeAPI(self.network_manager, - self.image_service) + self.image_service, + self.volume_api) self.setup() def __str__(self): @@ -451,15 +453,10 @@ class CloudController(object): "output": base64.b64encode(output)} def describe_volumes(self, context, volume_id=None, **kwargs): - if context.user.is_admin(): - volumes = db.volume_get_all(context) - else: - volumes = db.volume_get_all_by_project(context, context.project_id) - + volumes = self.volume_api.get(context) # NOTE(vish): volume_id is an optional list of volume ids to filter by. volumes = [self._format_volume(context, v) for v in volumes if volume_id is None or v['id'] in volume_id] - return {'volumeSet': volumes} def _format_volume(self, context, volume): @@ -498,95 +495,17 @@ class CloudController(object): return v def create_volume(self, context, size, **kwargs): - # check quota - if quota.allowed_volumes(context, 1, size) < 1: - logging.warn("Quota exceeeded for %s, tried to create %sG volume", - context.project_id, size) - raise quota.QuotaError("Volume quota exceeded. You cannot " - "create a volume of size %s" % size) - vol = {} - vol['size'] = size - vol['user_id'] = context.user.id - vol['project_id'] = context.project_id - vol['availability_zone'] = FLAGS.storage_availability_zone - vol['status'] = "creating" - vol['attach_status'] = "detached" - vol['display_name'] = kwargs.get('display_name') - vol['display_description'] = kwargs.get('display_description') - volume_ref = db.volume_create(context, vol) - - rpc.cast(context, - FLAGS.scheduler_topic, - {"method": "create_volume", - "args": {"topic": FLAGS.volume_topic, - "volume_id": volume_ref['id']}}) - + volume = self.volume_api.create(context, size, + kwargs.get('display_name'), + kwargs.get('display_description')) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. return {'volumeSet': [self._format_volume(context, dict(volume_ref))]} - def attach_volume(self, context, volume_id, instance_id, device, **kwargs): - volume_ref = db.volume_get(context, volume_id) - if not re.match("^/dev/[a-z]d[a-z]+$", device): - raise exception.ApiError(_("Invalid device specified: %s. " - "Example device: /dev/vdb") % device) - # TODO(vish): abstract status checking? - if volume_ref['status'] != "available": - raise exception.ApiError(_("Volume status must be available")) - if volume_ref['attach_status'] == "attached": - raise exception.ApiError(_("Volume is already attached")) - instance_id = ec2_id_to_id(instance_id) - instance_ref = self.compute_api.get_instance(context, instance_id) - host = instance_ref['host'] - rpc.cast(context, - db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "attach_volume", - "args": {"volume_id": volume_ref['id'], - "instance_id": instance_ref['id'], - "mountpoint": device}}) - return {'attachTime': volume_ref['attach_time'], - 'device': volume_ref['mountpoint'], - 'instanceId': instance_ref['id'], - 'requestId': context.request_id, - 'status': volume_ref['attach_status'], - 'volumeId': volume_ref['id']} - - def detach_volume(self, context, volume_id, **kwargs): - volume_ref = db.volume_get(context, volume_id) - instance_ref = db.volume_get_instance(context.elevated(), - volume_ref['id']) - if not instance_ref: - raise exception.ApiError(_("Volume isn't attached to anything!")) - # TODO(vish): abstract status checking? - if volume_ref['status'] == "available": - raise exception.ApiError(_("Volume is already detached")) - try: - host = instance_ref['host'] - rpc.cast(context, - db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "detach_volume", - "args": {"instance_id": instance_ref['id'], - "volume_id": volume_ref['id']}}) - except exception.NotFound: - # If the instance doesn't exist anymore, - # then we need to call detach blind - db.volume_detached(context) - instance_id = instance_ref['id'] - ec2_id = id_to_ec2_id(instance_id) - return {'attachTime': volume_ref['attach_time'], - 'device': volume_ref['mountpoint'], - 'instanceId': instance_id, - 'requestId': context.request_id, - 'status': volume_ref['attach_status'], - 'volumeId': volume_ref['id']} - - def _convert_to_set(self, lst, label): - if lst == None or lst == []: - return None - if not isinstance(lst, list): - lst = [lst] - return [{label: x} for x in lst] + def delete_volume(self, context, volume_id, **kwargs): + self.volume_api.delete(context, volume_id) + return True def update_volume(self, context, volume_id, **kwargs): updatable_fields = ['display_name', 'display_description'] @@ -595,9 +514,36 @@ class CloudController(object): if field in kwargs: changes[field] = kwargs[field] if changes: - db.volume_update(context, volume_id, kwargs) + self.volume_api.update(context, volume_id, kwargs) return True + def attach_volume(self, context, volume_id, instance_id, device, **kwargs): + self.compute_api.attach_volume(context, instance_id, volume_id, device) + volume = self.volume_api.get(context, volume_id) + return {'attachTime': volume['attach_time'], + 'device': volume['mountpoint'], + 'instanceId': instance_id, + 'requestId': context.request_id, + 'status': volume['attach_status'], + 'volumeId': volume_id} + + def detach_volume(self, context, volume_id, **kwargs): + volume = self.volume_api.get(context, volume_id) + instance = self.compute_api.detach_volume(context, volume_id) + return {'attachTime': volume['attach_time'], + 'device': volume['mountpoint'], + 'instanceId': id_to_ec2_id(instance['id']), + 'requestId': context.request_id, + 'status': volume['attach_status'], + 'volumeId': volume_id} + + def _convert_to_set(self, lst, label): + if lst == None or lst == []: + return None + if not isinstance(lst, list): + lst = [lst] + return [{label: x} for x in lst] + def describe_instances(self, context, **kwargs): return self._format_describe_instances(context) @@ -805,21 +751,6 @@ class CloudController(object): db.instance_update(context, inst['id'], kwargs) return True - def delete_volume(self, context, volume_id, **kwargs): - # TODO: return error if not authorized - volume_ref = db.volume_get(context, volume_id) - if volume_ref['status'] != "available": - raise exception.ApiError(_("Volume status must be available")) - now = datetime.datetime.utcnow() - db.volume_update(context, volume_ref['id'], {'status': 'deleting', - 'terminated_at': now}) - host = volume_ref['host'] - rpc.cast(context, - db.queue_get_for(context, FLAGS.volume_topic, host), - {"method": "delete_volume", - "args": {"volume_id": volume_ref['id']}}) - return True - def describe_images(self, context, image_id=None, **kwargs): # Note: image_id is a list! images = self.image_service.index(context) diff --git a/nova/compute/api.py b/nova/compute/api.py index 28af434e3f1f..870fcdbe4da1 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -30,6 +30,7 @@ from nova import flags from nova import quota from nova import rpc from nova import utils +from nova import volume from nova.compute import instance_types from nova.db import base @@ -44,13 +45,17 @@ def generate_default_hostname(instance_id): class ComputeAPI(base.Base): """API for interacting with the compute manager.""" - def __init__(self, network_manager=None, image_service=None, **kwargs): + def __init__(self, network_manager=None, image_service=None, + volume_api=None, **kwargs): if not network_manager: network_manager = utils.import_object(FLAGS.network_manager) self.network_manager = network_manager if not image_service: image_service = utils.import_object(FLAGS.image_service) self.image_service = image_service + if not volume_api: + volume_api = volume.API() + self.volume_api = volume_api super(ComputeAPI, self).__init__(**kwargs) def get_network_topic(self, context, instance_id): @@ -298,3 +303,30 @@ class ComputeAPI(base.Base): self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "unrescue_instance", "args": {"instance_id": instance['id']}}) + + def attach_volume(self, context, instance_id, volume_id, device): + if not re.match("^/dev/[a-z]d[a-z]+$", device): + raise exception.ApiError(_("Invalid device specified: %s. " + "Example device: /dev/vdb") % device) + self.volume_api.check_attach(context, volume_id) + instance = self.get_instance(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "attach_volume", + "args": {"volume_id": volume_id, + "instance_id": instance_id, + "mountpoint": device}}) + + def detach_volume(self, context, volume_id): + instance = self.db.volume_get_instance(context.elevated(), volume_id) + if not instance: + raise exception.ApiError(_("Volume isn't attached to anything!")) + self.volume_api.check_detach(context, volume_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "detach_volume", + "args": {"instance_id": instance['id'], + "volume_id": volume_id}}) + return instance diff --git a/nova/volume/__init__.py b/nova/volume/__init__.py index d6e944fc0bdf..48ecdbe68b17 100644 --- a/nova/volume/__init__.py +++ b/nova/volume/__init__.py @@ -17,15 +17,84 @@ # under the License. """ -:mod:`nova.volume` -- Nova Block Storage -===================================================== - -.. automodule:: nova.volume - :platform: Unix -.. moduleauthor:: Jesse Andrews -.. moduleauthor:: Devin Carlen -.. moduleauthor:: Vishvananda Ishaya -.. moduleauthor:: Joshua McKenty -.. moduleauthor:: Manish Singh -.. moduleauthor:: Andy Smith +Handles all requests relating to volumes. """ + +import datetime +import logging + +from nova import db +from nova import exception +from nova import flags +from nova import quota +from nova import rpc +from nova.db import base + +FLAGS = flags.FLAGS +flags.DECLARE('storage_availability_zone', 'nova.volume.manager') + + +class API(base.Base): + """API for interacting with the volume manager.""" + + def create(self, context, size, name, description): + if quota.allowed_volumes(context, 1, size) < 1: + logging.warn("Quota exceeeded for %s, tried to create %sG volume", + context.project_id, size) + raise quota.QuotaError("Volume quota exceeded. You cannot " + "create a volume of size %s" % size) + + options = { + 'size': size, + 'user_id': context.user.id, + 'project_id': context.project_id, + 'availability_zone': FLAGS.storage_availability_zone, + 'status': "creating", + 'attach_status': "detached", + 'display_name': name, + 'display_description': description} + + volume = self.db.volume_create(context, options) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "create_volume", + "args": {"topic": FLAGS.volume_topic, + "volume_id": volume['id']}}) + return volume + + def delete(self, context, volume_id): + volume = self.db.volume_get(context, volume_id) + if volume['status'] != "available": + raise exception.ApiError(_("Volume status must be available")) + now = datetime.datetime.utcnow() + self.db.volume_update(context, volume_id, {'status': 'deleting', + 'terminated_at': now}) + host = volume['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.volume_topic, host), + {"method": "delete_volume", + "args": {"volume_id": volume_id}}) + + def update(self, context, volume_id, fields): + self.db.volume_update(context, volume_id, fields) + + def get(self, context, volume_id=None): + if volume_id is not None: + return self.db.volume_get(context, volume_id) + if context.user.is_admin(): + return self.db.volume_get_all(context) + return self.db.volume_get_all_by_project(context, context.project_id) + + def check_attach(self, context, volume_id): + volume = self.db.volume_get(context, volume_id) + # TODO(vish): abstract status checking? + if volume['status'] != "available": + raise exception.ApiError(_("Volume status must be available")) + if volume['attach_status'] == "attached": + raise exception.ApiError(_("Volume is already attached")) + + def check_detach(self, context, volume_id): + volume = self.db.volume_get(context, volume_id) + # TODO(vish): abstract status checking? + if volume['status'] == "available": + raise exception.ApiError(_("Volume is already detached")) From 24e253a1feaa0a39e4095f447f62f7ea9b43c8bb Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 18:30:01 -0600 Subject: [PATCH 048/147] moved check lock decorator to compute api level. altered openstack.test_servers according and wrote test for lock in tests.test_compute --- nova/api/openstack/servers.py | 43 ---------------------- nova/compute/api.py | 44 +++++++++++++++++++++++ nova/tests/api/openstack/test_servers.py | 45 ++++++++---------------- nova/tests/test_compute.py | 17 +++++++++ 4 files changed, 76 insertions(+), 73 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 24fd5000ca89..497a04ae37b3 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -17,7 +17,6 @@ import logging import traceback -import functools from webob import exc @@ -36,41 +35,6 @@ LOG = logging.getLogger('server') LOG.setLevel(logging.DEBUG) -def checks_instance_lock(function): - """ - decorator used for preventing action against locked instances - unless, of course, you happen to be admin - - """ - - @functools.wraps(function) - def decorated_function(*args, **kwargs): - - # grab args to function - try: - if 'req' in kwargs: - req = kwargs['req'] - else: - req = args[1] - if 'id' in kwargs: - _id = kwargs['id'] - else: - req = args[2] - context = req.environ['nova.context'] - except: - logging.error(_("check_lock: argument error: |%s|, |%s|"), args, - kwargs) - # if admin or unlocked call function, otherwise 404 - locked = compute_api.ComputeAPI().get_lock(context, _id) - admin = req.environ['nova.context'].is_admin - if admin or not locked: - return function(*args, **kwargs) - - return faults.Fault(exc.HTTPMethodNotAllowed()) - - return decorated_function - - def _translate_detail_keys(inst): """ Coerces into dictionary format, mapping everything to Rackspace-like attributes for return""" @@ -147,7 +111,6 @@ class Controller(wsgi.Controller): except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - @checks_instance_lock def delete(self, req, id): """ Destroys a server """ try: @@ -175,7 +138,6 @@ class Controller(wsgi.Controller): key_data=key_pair['public_key']) return _translate_keys(instances[0]) - @checks_instance_lock def update(self, req, id): """ Updates the server name or password """ inst_dict = self._deserialize(req.body, req) @@ -197,7 +159,6 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPNotFound()) return exc.HTTPNoContent() - @checks_instance_lock def action(self, req, id): """ Multi-purpose method used to reboot, rebuild, and resize a server """ @@ -258,7 +219,6 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - @checks_instance_lock def pause(self, req, id): """ Permit Admins to Pause the server. """ ctxt = req.environ['nova.context'] @@ -270,7 +230,6 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - @checks_instance_lock def unpause(self, req, id): """ Permit Admins to Unpause the server. """ ctxt = req.environ['nova.context'] @@ -282,7 +241,6 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - @checks_instance_lock def suspend(self, req, id): """permit admins to suspend the server""" context = req.environ['nova.context'] @@ -294,7 +252,6 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - @checks_instance_lock def resume(self, req, id): """permit admins to resume the server from suspend""" context = req.environ['nova.context'] diff --git a/nova/compute/api.py b/nova/compute/api.py index 361ab991418f..d720a8f2c740 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -23,6 +23,7 @@ Handles all API requests relating to instances (guest vms). import datetime import logging import time +import functools from nova import db from nova import exception @@ -36,6 +37,40 @@ from nova.db import base FLAGS = flags.FLAGS +def checks_instance_lock(function): + """ + decorator used for preventing action against locked instances + unless, of course, you happen to be admin + + """ + + @functools.wraps(function) + def decorated_function(*args, **kwargs): + + # grab args to function + try: + if 'context' in kwargs: + context = kwargs['context'] + else: + context = args[1] + if 'instance_id' in kwargs: + instance_id = kwargs['instance_id'] + else: + instance_id = args[2] + locked = ComputeAPI().get_lock(context, instance_id) + admin = context.is_admin + except: + logging.error(_("check_instance_lock: argument error: |%s|, |%s|"), + args, + kwargs) + # if admin or unlocked call function, otherwise 405 + if admin or not locked: + return function(*args, **kwargs) + raise Exception(_("Instance is locked, cannot execute |%s|"), function) + + return decorated_function + + def generate_default_hostname(internal_id): """Default function to generate a hostname given an instance reference.""" return str(internal_id) @@ -198,6 +233,7 @@ class ComputeAPI(base.Base): 'project_id': context.project_id} db.security_group_create(context, values) + @checks_instance_lock def update_instance(self, context, instance_id, **kwargs): """Updates the instance in the datastore. @@ -212,6 +248,7 @@ class ComputeAPI(base.Base): """ return self.db.instance_update(context, instance_id, kwargs) + @checks_instance_lock def delete_instance(self, context, instance_id): logging.debug("Going to try and terminate %d" % instance_id) try: @@ -258,6 +295,7 @@ class ComputeAPI(base.Base): def get_instance(self, context, instance_id): return self.db.instance_get_by_internal_id(context, instance_id) + @checks_instance_lock def reboot(self, context, instance_id): """Reboot the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -267,6 +305,7 @@ class ComputeAPI(base.Base): {"method": "reboot_instance", "args": {"instance_id": instance['id']}}) + @checks_instance_lock def pause(self, context, instance_id): """Pause the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -276,6 +315,7 @@ class ComputeAPI(base.Base): {"method": "pause_instance", "args": {"instance_id": instance['id']}}) + @checks_instance_lock def unpause(self, context, instance_id): """Unpause the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -285,6 +325,7 @@ class ComputeAPI(base.Base): {"method": "unpause_instance", "args": {"instance_id": instance['id']}}) + @checks_instance_lock def suspend(self, context, instance_id): """suspend the instance with instance_id""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -294,6 +335,7 @@ class ComputeAPI(base.Base): {"method": "suspend_instance", "args": {"instance_id": instance['id']}}) + @checks_instance_lock def resume(self, context, instance_id): """resume the instance with instance_id""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -303,6 +345,7 @@ class ComputeAPI(base.Base): {"method": "resume_instance", "args": {"instance_id": instance['id']}}) + @checks_instance_lock def rescue(self, context, instance_id): """Rescue the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -312,6 +355,7 @@ class ComputeAPI(base.Base): {"method": "rescue_instance", "args": {"instance_id": instance['id']}}) + @checks_instance_lock def unrescue(self, context, instance_id): """Unrescue the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 56a5a9b275ce..05419fb70f51 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -321,47 +321,32 @@ class ServersTest(unittest.TestCase): self.assertEqual(self.server_delete_called, True) def test_lock(self): - # part one: stubs it to be locked and test pause + FLAGS.allow_admin_api = True + body = dict(server=dict( + name='server_test', imageId=2, flavorId=2, metadata={}, + personality={})) + req = webob.Request.blank('/v1.0/servers/1/pause') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + + # part one: stubs it to be locked and attempt pause expecting exception def get_locked(self, context, id): return True - - # set get to return locked self.stubs.Set(nova.compute.api.ComputeAPI, 'get_lock', get_locked) - # attempt to pause - FLAGS.allow_admin_api = True - body = dict(server=dict( - name='server_test', imageId=2, flavorId=2, metadata={}, - personality={})) - req = webob.Request.blank('/v1.0/servers/1/pause') - req.method = 'POST' - req.content_type = 'application/json' - req.body = json.dumps(body) - res = req.get_response(nova.api.API('os')) + # pause should raise exception on locked instance + self.assertRaises(Exception, req.get_response, nova.api.API('os')) - # expect a 404 since it was locked - self.assertEqual(res.status_int, 405) - - # Part two: stubs it to be unlocked and test pause + # Part two: stubs it to be unlocked and attempt pause expecting success def get_unlocked(self, context, id): return False - - # set get to return locked self.stubs.Set(nova.compute.api.ComputeAPI, 'get_lock', get_unlocked) - # attempt to pause - FLAGS.allow_admin_api = True - body = dict(server=dict( - name='server_test', imageId=2, flavorId=2, metadata={}, - personality={})) - req = webob.Request.blank('/v1.0/servers/1/pause') - req.method = 'POST' - req.content_type = 'application/json' - req.body = json.dumps(body) res = req.get_response(nova.api.API('os')) - # expect a 202 since it was unlocked - self.assertEqual(res.status_int, 202) + # expecting no exception, test will fail if exception is raised + res = req.get_response(nova.api.API('os')) if __name__ == "__main__": diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index bcb8a1526a13..422d59da0ed0 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -170,3 +170,20 @@ class ComputeTestCase(test.TestCase): self.context, instance_id) self.compute.terminate_instance(self.context, instance_id) + + def test_lock(self): + """ensure locked instance cannot be changed""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + self.compute.pause_instance(self.context, instance_id) + self.compute.lock_instance(self.context, instance_id) + + # pause should raise exception on locked instance + self.assertRaises(Exception, self.compute.unpause_instance, + self.context, instance_id) + + # test will fail if exception is raised + self.compute.unlock_instance(self.context, instance_id) + self.compute.unpause_instance(self.context, instance_id) + + self.compute.terminate_instance(self.context, instance_id) From 74785bf8c070bf0760724b3412f4ee1bb05cf72b Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 18:34:49 -0600 Subject: [PATCH 049/147] fixd variables being out of scope in lock decorator --- nova/compute/api.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index d720a8f2c740..073129c13e71 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -47,6 +47,10 @@ def checks_instance_lock(function): @functools.wraps(function) def decorated_function(*args, **kwargs): + # assume worst case (have to declare so they are in scope) + admin = False + locked = True + # grab args to function try: if 'context' in kwargs: @@ -60,7 +64,7 @@ def checks_instance_lock(function): locked = ComputeAPI().get_lock(context, instance_id) admin = context.is_admin except: - logging.error(_("check_instance_lock: argument error: |%s|, |%s|"), + raise Exception(_("check_instance_lock argument error |%s|, |%s|"), args, kwargs) # if admin or unlocked call function, otherwise 405 From d06f85c611adf244f2c757f023c92c2b6cad2e7c Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 18:40:03 -0600 Subject: [PATCH 050/147] altered error exception/logging --- nova/compute/api.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 073129c13e71..19459c6d9154 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -64,8 +64,7 @@ def checks_instance_lock(function): locked = ComputeAPI().get_lock(context, instance_id) admin = context.is_admin except: - raise Exception(_("check_instance_lock argument error |%s|, |%s|"), - args, + logging.error(_("check_instance_lock: arguments: |%s| |%s|"), args, kwargs) # if admin or unlocked call function, otherwise 405 if admin or not locked: From 837724193ece16310ff588a84d23891a75ced2f2 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 18:46:36 -0600 Subject: [PATCH 051/147] altered error exception/logging --- nova/compute/api.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 19459c6d9154..6602f2534b66 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -63,9 +63,11 @@ def checks_instance_lock(function): instance_id = args[2] locked = ComputeAPI().get_lock(context, instance_id) admin = context.is_admin - except: + except Excetion as e: logging.error(_("check_instance_lock: arguments: |%s| |%s|"), args, kwargs) + raise e + # if admin or unlocked call function, otherwise 405 if admin or not locked: return function(*args, **kwargs) From 6f76367d2fefcec9b957352dd60e76c2cc3ba233 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 18:49:45 -0600 Subject: [PATCH 052/147] typo, trying to hurry.. look where that got me --- nova/compute/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 6602f2534b66..232d1f26b879 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -63,7 +63,7 @@ def checks_instance_lock(function): instance_id = args[2] locked = ComputeAPI().get_lock(context, instance_id) admin = context.is_admin - except Excetion as e: + except Exception as e: logging.error(_("check_instance_lock: arguments: |%s| |%s|"), args, kwargs) raise e From b848f7459eb65ad365177d831783b3d63818f977 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 18:57:33 -0600 Subject: [PATCH 053/147] added some logging --- nova/compute/api.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nova/compute/api.py b/nova/compute/api.py index 232d1f26b879..0513ce95d38b 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -47,6 +47,9 @@ def checks_instance_lock(function): @functools.wraps(function) def decorated_function(*args, **kwargs): + logging.info(_("check_instance_locks decorating |%s|"), function) + logging.info(_("check_instance_locks: arguments: |%s| |%s|"), args, + kwargs) # assume worst case (have to declare so they are in scope) admin = False locked = True From 32b310f430c5db05c99de65a5bd400675770ef1d Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 19:27:43 -0600 Subject: [PATCH 054/147] removed db.set_lock, using update_instance instead --- nova/compute/manager.py | 4 ++-- nova/db/sqlalchemy/api.py | 12 ------------ 2 files changed, 2 insertions(+), 14 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 05f1e44a2265..9a33c7cac899 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -339,7 +339,7 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) logging.debug(_('instance %s: locking'), instance_ref['internal_id']) - self.db.instance_set_lock(context, instance_id, True) + self.db.instance_update(context, instance_id, {'locked': True}) @exception.wrap_exception def unlock_instance(self, context, instance_id): @@ -351,7 +351,7 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) logging.debug(_('instance %s: unlocking'), instance_ref['internal_id']) - self.db.instance_set_lock(context, instance_id, False) + self.db.instance_update(context, instance_id, {'locked': False}) @exception.wrap_exception def get_console_output(self, context, instance_id): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index ca71df7b3968..7e945e4cb3d8 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -856,18 +856,6 @@ def instance_action_create(context, values): return action_ref -@require_admin_context -def instance_set_lock(context, instance_id, lock): - """ - twiddle the locked bit in the db - lock is a boolean - - """ - db.instance_update(context, - instance_id, - {'locked': lock}) - - ################### From f1523f2fd19cde4ddbb046dc0362a0ac7d6b79e8 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 20:48:33 -0600 Subject: [PATCH 055/147] moved check lock decorator from the compute api to the come manager... when it rains it pours --- nova/compute/api.py | 52 --------------------------------- nova/compute/manager.py | 65 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+), 52 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 0513ce95d38b..361ab991418f 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -23,7 +23,6 @@ Handles all API requests relating to instances (guest vms). import datetime import logging import time -import functools from nova import db from nova import exception @@ -37,48 +36,6 @@ from nova.db import base FLAGS = flags.FLAGS -def checks_instance_lock(function): - """ - decorator used for preventing action against locked instances - unless, of course, you happen to be admin - - """ - - @functools.wraps(function) - def decorated_function(*args, **kwargs): - - logging.info(_("check_instance_locks decorating |%s|"), function) - logging.info(_("check_instance_locks: arguments: |%s| |%s|"), args, - kwargs) - # assume worst case (have to declare so they are in scope) - admin = False - locked = True - - # grab args to function - try: - if 'context' in kwargs: - context = kwargs['context'] - else: - context = args[1] - if 'instance_id' in kwargs: - instance_id = kwargs['instance_id'] - else: - instance_id = args[2] - locked = ComputeAPI().get_lock(context, instance_id) - admin = context.is_admin - except Exception as e: - logging.error(_("check_instance_lock: arguments: |%s| |%s|"), args, - kwargs) - raise e - - # if admin or unlocked call function, otherwise 405 - if admin or not locked: - return function(*args, **kwargs) - raise Exception(_("Instance is locked, cannot execute |%s|"), function) - - return decorated_function - - def generate_default_hostname(internal_id): """Default function to generate a hostname given an instance reference.""" return str(internal_id) @@ -241,7 +198,6 @@ class ComputeAPI(base.Base): 'project_id': context.project_id} db.security_group_create(context, values) - @checks_instance_lock def update_instance(self, context, instance_id, **kwargs): """Updates the instance in the datastore. @@ -256,7 +212,6 @@ class ComputeAPI(base.Base): """ return self.db.instance_update(context, instance_id, kwargs) - @checks_instance_lock def delete_instance(self, context, instance_id): logging.debug("Going to try and terminate %d" % instance_id) try: @@ -303,7 +258,6 @@ class ComputeAPI(base.Base): def get_instance(self, context, instance_id): return self.db.instance_get_by_internal_id(context, instance_id) - @checks_instance_lock def reboot(self, context, instance_id): """Reboot the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -313,7 +267,6 @@ class ComputeAPI(base.Base): {"method": "reboot_instance", "args": {"instance_id": instance['id']}}) - @checks_instance_lock def pause(self, context, instance_id): """Pause the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -323,7 +276,6 @@ class ComputeAPI(base.Base): {"method": "pause_instance", "args": {"instance_id": instance['id']}}) - @checks_instance_lock def unpause(self, context, instance_id): """Unpause the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -333,7 +285,6 @@ class ComputeAPI(base.Base): {"method": "unpause_instance", "args": {"instance_id": instance['id']}}) - @checks_instance_lock def suspend(self, context, instance_id): """suspend the instance with instance_id""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -343,7 +294,6 @@ class ComputeAPI(base.Base): {"method": "suspend_instance", "args": {"instance_id": instance['id']}}) - @checks_instance_lock def resume(self, context, instance_id): """resume the instance with instance_id""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -353,7 +303,6 @@ class ComputeAPI(base.Base): {"method": "resume_instance", "args": {"instance_id": instance['id']}}) - @checks_instance_lock def rescue(self, context, instance_id): """Rescue the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -363,7 +312,6 @@ class ComputeAPI(base.Base): {"method": "rescue_instance", "args": {"instance_id": instance['id']}}) - @checks_instance_lock def unrescue(self, context, instance_id): """Unrescue the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 9a33c7cac899..22415959626c 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -36,6 +36,7 @@ terminating it. import datetime import logging +import functools from nova import exception from nova import flags @@ -53,6 +54,48 @@ flags.DEFINE_string('stub_network', False, 'Stub network related code') +def checks_instance_lock(function): + """ + decorator used for preventing action against locked instances + unless, of course, you happen to be admin + + """ + + @functools.wraps(function) + def decorated_function(*args, **kwargs): + + logging.info(_("check_instance_locks decorating |%s|"), function) + logging.info(_("check_instance_locks: arguments: |%s| |%s|"), args, + kwargs) + # assume worst case (have to declare so they are in scope) + admin = False + locked = True + + # grab args to function + try: + if 'context' in kwargs: + context = kwargs['context'] + else: + context = args[1] + if 'instance_id' in kwargs: + instance_id = kwargs['instance_id'] + else: + instance_id = args[2] + locked = args[0].get_locked(context, instance_id) + admin = context.is_admin + except Exception as e: + logging.error(_("check_instance_lock: arguments: |%s| |%s|"), args, + kwargs) + raise e + + # if admin or unlocked call function, otherwise 405 + if admin or not locked: + return function(*args, **kwargs) + raise Exception(_("Instance is locked, cannot execute |%s|"), function) + + return decorated_function + + class ComputeManager(manager.Manager): """Manages the running instances from creation to destruction.""" @@ -158,6 +201,7 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) @exception.wrap_exception + @checks_instance_lock def terminate_instance(self, context, instance_id): """Terminate an instance on this machine.""" context = context.elevated() @@ -202,6 +246,7 @@ class ComputeManager(manager.Manager): self.db.instance_destroy(context, instance_id) @exception.wrap_exception + @checks_instance_lock def reboot_instance(self, context, instance_id): """Reboot an instance on this server.""" context = context.elevated() @@ -225,6 +270,7 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) @exception.wrap_exception + @checks_instance_lock def rescue_instance(self, context, instance_id): """Rescue an instance on this server.""" context = context.elevated() @@ -241,6 +287,7 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) @exception.wrap_exception + @checks_instance_lock def unrescue_instance(self, context, instance_id): """Rescue an instance on this server.""" context = context.elevated() @@ -261,6 +308,7 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) @exception.wrap_exception + @checks_instance_lock def pause_instance(self, context, instance_id): """Pause an instance on this server.""" context = context.elevated() @@ -279,6 +327,7 @@ class ComputeManager(manager.Manager): result)) @exception.wrap_exception + @checks_instance_lock def unpause_instance(self, context, instance_id): """Unpause a paused instance on this server.""" context = context.elevated() @@ -297,6 +346,7 @@ class ComputeManager(manager.Manager): result)) @exception.wrap_exception + @checks_instance_lock def suspend_instance(self, context, instance_id): """suspend the instance with instance_id""" context = context.elevated() @@ -314,6 +364,7 @@ class ComputeManager(manager.Manager): result)) @exception.wrap_exception + @checks_instance_lock def resume_instance(self, context, instance_id): """resume the suspended instance with instance_id""" context = context.elevated() @@ -353,6 +404,18 @@ class ComputeManager(manager.Manager): logging.debug(_('instance %s: unlocking'), instance_ref['internal_id']) self.db.instance_update(context, instance_id, {'locked': False}) + @exception.wrap_exception + def get_locked(self, context, instance_id): + """ + return the boolean state of (instance with instance_id)'s lock + + """ + context = context.elevated() + logging.debug(_('instance %s: getting locked'), + instance_ref['internal_id']) + instance_ref = self.db.instance_get(context, instance_id) + return instance_ref['locked'] + @exception.wrap_exception def get_console_output(self, context, instance_id): """Send the console output for an instance.""" @@ -363,6 +426,7 @@ class ComputeManager(manager.Manager): return self.driver.get_console_output(instance_ref) @exception.wrap_exception + @checks_instance_lock def attach_volume(self, context, instance_id, volume_id, mountpoint): """Attach a volume to an instance.""" context = context.elevated() @@ -392,6 +456,7 @@ class ComputeManager(manager.Manager): return True @exception.wrap_exception + @checks_instance_lock def detach_volume(self, context, instance_id, volume_id): """Detach a volume from an instance.""" context = context.elevated() From 656233762a61929d43f671e4765d52f25299562f Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 20:52:48 -0600 Subject: [PATCH 056/147] syntax error --- nova/compute/manager.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 22415959626c..cd2c95d8dbc4 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -411,8 +411,7 @@ class ComputeManager(manager.Manager): """ context = context.elevated() - logging.debug(_('instance %s: getting locked'), - instance_ref['internal_id']) + logging.debug(_('instance %s: getting locked state'), instance_id) instance_ref = self.db.instance_get(context, instance_id) return instance_ref['locked'] From 2515d8ee9e32e0658b6179e900cf2e0e87a032dc Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 21:16:53 -0600 Subject: [PATCH 057/147] fixed up the compute lock test, was failing because the context was always admin --- nova/tests/test_compute.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 422d59da0ed0..f914294f0fda 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -175,15 +175,15 @@ class ComputeTestCase(test.TestCase): """ensure locked instance cannot be changed""" instance_id = self._create_instance() self.compute.run_instance(self.context, instance_id) - self.compute.pause_instance(self.context, instance_id) self.compute.lock_instance(self.context, instance_id) + non_admin_context = context.RequestContext(None, None, False, False) # pause should raise exception on locked instance - self.assertRaises(Exception, self.compute.unpause_instance, - self.context, instance_id) + self.assertRaises(Exception, self.compute.reboot_instance, + non_admin_context, instance_id) # test will fail if exception is raised self.compute.unlock_instance(self.context, instance_id) - self.compute.unpause_instance(self.context, instance_id) + self.compute.reboot_instance(non_admin_context, instance_id) self.compute.terminate_instance(self.context, instance_id) From da7d31d5a4fa712ae24f6ec56d7469a3ee453c87 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 21:26:45 -0600 Subject: [PATCH 058/147] removed tests.api.openstack.test_servers test_lock, to hell with it. i'm not even sure if testing lock needs to be at this level --- nova/tests/api/openstack/test_servers.py | 28 ------------------------ 1 file changed, 28 deletions(-) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 05419fb70f51..5d23db588863 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -320,34 +320,6 @@ class ServersTest(unittest.TestCase): self.assertEqual(res.status, '202 Accepted') self.assertEqual(self.server_delete_called, True) - def test_lock(self): - FLAGS.allow_admin_api = True - body = dict(server=dict( - name='server_test', imageId=2, flavorId=2, metadata={}, - personality={})) - req = webob.Request.blank('/v1.0/servers/1/pause') - req.method = 'POST' - req.content_type = 'application/json' - req.body = json.dumps(body) - - # part one: stubs it to be locked and attempt pause expecting exception - def get_locked(self, context, id): - return True - self.stubs.Set(nova.compute.api.ComputeAPI, 'get_lock', get_locked) - - # pause should raise exception on locked instance - self.assertRaises(Exception, req.get_response, nova.api.API('os')) - - # Part two: stubs it to be unlocked and attempt pause expecting success - def get_unlocked(self, context, id): - return False - self.stubs.Set(nova.compute.api.ComputeAPI, 'get_lock', get_unlocked) - - res = req.get_response(nova.api.API('os')) - - # expecting no exception, test will fail if exception is raised - res = req.get_response(nova.api.API('os')) - if __name__ == "__main__": unittest.main() From 4531600425d71659581aa549bdc5e719e41efc9e Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 22:08:38 -0600 Subject: [PATCH 059/147] altered the compute lock test --- nova/compute/manager.py | 16 ++++++++++------ nova/tests/test_compute.py | 12 +++++++----- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index cd2c95d8dbc4..2671d401c367 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -70,6 +70,7 @@ def checks_instance_lock(function): # assume worst case (have to declare so they are in scope) admin = False locked = True + instance_id = False # grab args to function try: @@ -81,17 +82,20 @@ def checks_instance_lock(function): instance_id = kwargs['instance_id'] else: instance_id = args[2] - locked = args[0].get_locked(context, instance_id) + locked = args[0].get_lock(context, instance_id) admin = context.is_admin except Exception as e: - logging.error(_("check_instance_lock: arguments: |%s| |%s|"), args, + logging.error(_("check_instance_lock fail! args: |%s| |%s|"), args, kwargs) raise e - # if admin or unlocked call function, otherwise 405 + # if admin or unlocked call function otherwise log error if admin or not locked: - return function(*args, **kwargs) - raise Exception(_("Instance is locked, cannot execute |%s|"), function) + function(*args, **kwargs) + else: + logging.error(_("Instance |%s| is locked, cannot execute |%s|"), + instance_id, function) + return False return decorated_function @@ -405,7 +409,7 @@ class ComputeManager(manager.Manager): self.db.instance_update(context, instance_id, {'locked': False}) @exception.wrap_exception - def get_locked(self, context, instance_id): + def get_lock(self, context, instance_id): """ return the boolean state of (instance with instance_id)'s lock diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index f914294f0fda..78582b75a3ec 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -178,12 +178,14 @@ class ComputeTestCase(test.TestCase): self.compute.lock_instance(self.context, instance_id) non_admin_context = context.RequestContext(None, None, False, False) - # pause should raise exception on locked instance - self.assertRaises(Exception, self.compute.reboot_instance, - non_admin_context, instance_id) - # test will fail if exception is raised + # decorator for reboot should return False + ret_val = self.compute.reboot_instance(non_admin_context,instance_id) + self.assertEqual(ret_val, False) + + # decorator for pause should return the result of the function reboot self.compute.unlock_instance(self.context, instance_id) - self.compute.reboot_instance(non_admin_context, instance_id) + ret_val = self.compute.reboot_instance(non_admin_context,instance_id) + self.assertNotEqual(ret_val, None) self.compute.terminate_instance(self.context, instance_id) From a4088ce75347acb2ee2f2550c185afb4ce3231de Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 22:16:34 -0600 Subject: [PATCH 060/147] fixed the compute lock test --- nova/tests/test_compute.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 78582b75a3ec..993c4fd3cb42 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -175,17 +175,17 @@ class ComputeTestCase(test.TestCase): """ensure locked instance cannot be changed""" instance_id = self._create_instance() self.compute.run_instance(self.context, instance_id) - self.compute.lock_instance(self.context, instance_id) non_admin_context = context.RequestContext(None, None, False, False) - # decorator for reboot should return False + # decorator should return False (fail) with locked nonadmin context + self.compute.lock_instance(self.context, instance_id) ret_val = self.compute.reboot_instance(non_admin_context,instance_id) self.assertEqual(ret_val, False) - # decorator for pause should return the result of the function reboot + # decorator should return None (success) with unlocked nonadmin context self.compute.unlock_instance(self.context, instance_id) ret_val = self.compute.reboot_instance(non_admin_context,instance_id) - self.assertNotEqual(ret_val, None) + self.assertEqual(ret_val, None) self.compute.terminate_instance(self.context, instance_id) From b1a08af498ed6b52e3373a23196ded0396e6d34b Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 29 Dec 2010 20:30:36 -0800 Subject: [PATCH 061/147] Cleaned up the compute API, mostly consistency with other parts of the system and renaming redundant module names. --- nova/api/ec2/cloud.py | 24 +- nova/api/openstack/servers.py | 22 +- nova/compute/__init__.py | 324 +++++++++++++++++++++- nova/compute/api.py | 332 ----------------------- nova/tests/api/openstack/test_servers.py | 8 +- nova/tests/test_compute.py | 10 +- 6 files changed, 341 insertions(+), 379 deletions(-) delete mode 100644 nova/compute/api.py diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 74c73e0ddb3d..cc58f3cfe718 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -31,6 +31,7 @@ import os from nova import context import IPy +from nova import compute from nova import crypto from nova import db from nova import exception @@ -39,7 +40,6 @@ from nova import quota from nova import rpc from nova import utils from nova import volume -from nova.compute import api as compute_api from nova.compute import instance_types @@ -90,9 +90,9 @@ class CloudController(object): self.network_manager = utils.import_object(FLAGS.network_manager) self.image_service = utils.import_object(FLAGS.image_service) self.volume_api = volume.API() - self.compute_api = compute_api.ComputeAPI(self.network_manager, - self.image_service, - self.volume_api) + self.compute_api = compute.API(self.network_manager, + self.image_service, + self.volume_api) self.setup() def __str__(self): @@ -116,7 +116,7 @@ class CloudController(object): def _get_mpi_data(self, context, project_id): result = {} - for instance in self.compute_api.get_instances(context, project_id): + for instance in self.compute_api.get(context, project_id=project_id): if instance['fixed_ip']: line = '%s slots=%d' % (instance['fixed_ip']['address'], instance['vcpus']) @@ -440,7 +440,7 @@ class CloudController(object): # instance_id is passed in as a list of instances ec2_id = instance_id[0] instance_id = ec2_id_to_id(ec2_id) - instance_ref = self.compute_api.get_instance(context, instance_id) + instance_ref = self.compute_api.get(context, instance_id) output = rpc.call(context, '%s.%s' % (FLAGS.compute_topic, instance_ref['host']), @@ -561,7 +561,7 @@ class CloudController(object): instances = db.instance_get_all_by_reservation(context, reservation_id) else: - instances = self.compute_api.get_instances(context) + instances = self.compute_api.get(context) for instance in instances: if not context.user.is_admin(): if instance['image_id'] == FLAGS.vpn_image_id: @@ -664,7 +664,7 @@ class CloudController(object): def associate_address(self, context, instance_id, public_ip, **kwargs): instance_id = ec2_id_to_id(instance_id) - instance_ref = self.compute_api.get_instance(context, instance_id) + instance_ref = self.compute_api.get(context, instance_id) fixed_address = db.instance_get_fixed_address(context, instance_ref['id']) floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) @@ -695,7 +695,7 @@ class CloudController(object): def run_instances(self, context, **kwargs): max_count = int(kwargs.get('max_count', 1)) - instances = self.compute_api.create_instances(context, + instances = self.compute_api.create(context, instance_types.get_by_type(kwargs.get('instance_type', None)), kwargs['image_id'], min_count=int(kwargs.get('min_count', max_count)), @@ -703,7 +703,7 @@ class CloudController(object): kernel_id=kwargs.get('kernel_id', None), ramdisk_id=kwargs.get('ramdisk_id'), display_name=kwargs.get('display_name'), - description=kwargs.get('display_description'), + display_description=kwargs.get('display_description'), key_name=kwargs.get('key_name'), user_data=kwargs.get('user_data'), security_group=kwargs.get('security_group'), @@ -717,7 +717,7 @@ class CloudController(object): logging.debug("Going to start terminating instances") for ec2_id in instance_id: instance_id = ec2_id_to_id(ec2_id) - self.compute_api.delete_instance(context, instance_id) + self.compute_api.delete(context, instance_id) return True def reboot_instances(self, context, instance_id, **kwargs): @@ -747,7 +747,7 @@ class CloudController(object): changes[field] = kwargs[field] if changes: instance_id = ec2_id_to_id(ec2_id) - inst = self.compute_api.get_instance(context, instance_id) + inst = self.compute_api.get(context, instance_id) db.instance_update(context, inst['id'], kwargs) return True diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index cac5428b9365..30b2160bd21a 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -20,12 +20,12 @@ import traceback from webob import exc +from nova import compute from nova import exception from nova import wsgi from nova.api.openstack import common from nova.api.openstack import faults from nova.auth import manager as auth_manager -from nova.compute import api as compute_api from nova.compute import instance_types from nova.compute import power_state import nova.api.openstack @@ -81,7 +81,7 @@ class Controller(wsgi.Controller): "status", "progress"]}}} def __init__(self): - self.compute_api = compute_api.ComputeAPI() + self.compute_api = compute.API() super(Controller, self).__init__() def index(self, req): @@ -97,8 +97,7 @@ class Controller(wsgi.Controller): entity_maker - either _entity_detail or _entity_inst """ - instance_list = self.compute_api.get_instances( - req.environ['nova.context']) + instance_list = self.compute_api.get(req.environ['nova.context']) limited_list = common.limited(instance_list, req) res = [entity_maker(inst)['server'] for inst in limited_list] return _entity_list(res) @@ -106,8 +105,7 @@ class Controller(wsgi.Controller): def show(self, req, id): """ Returns server details by server id """ try: - instance = self.compute_api.get_instance( - req.environ['nova.context'], int(id)) + instance = self.compute_api.get(req.environ['nova.context'], id) return _entity_detail(instance) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) @@ -115,8 +113,7 @@ class Controller(wsgi.Controller): def delete(self, req, id): """ Destroys a server """ try: - self.compute_api.delete_instance(req.environ['nova.context'], - int(id)) + self.compute_api.delete(req.environ['nova.context'], id) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) return exc.HTTPAccepted() @@ -129,12 +126,12 @@ class Controller(wsgi.Controller): key_pair = auth_manager.AuthManager.get_key_pairs( req.environ['nova.context'])[0] - instances = self.compute_api.create_instances( + instances = self.compute_api.create( req.environ['nova.context'], instance_types.get_by_flavor_id(env['server']['flavorId']), env['server']['imageId'], display_name=env['server']['name'], - description=env['server']['name'], + display_description=env['server']['name'], key_name=key_pair['name'], key_data=key_pair['public_key']) return _entity_inst(instances[0]) @@ -152,9 +149,8 @@ class Controller(wsgi.Controller): update_dict['display_name'] = inst_dict['server']['name'] try: - self.compute_api.update_instance(req.environ['nova.context'], - instance['id'], - **update_dict) + self.compute_api.update(req.environ['nova.context'], id, + **update_dict) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) return exc.HTTPNoContent() diff --git a/nova/compute/__init__.py b/nova/compute/__init__.py index a5df2ec1acc2..648c0ff6a9c6 100644 --- a/nova/compute/__init__.py +++ b/nova/compute/__init__.py @@ -17,16 +17,316 @@ # under the License. """ -:mod:`nova.compute` -- Compute Nodes using LibVirt -===================================================== - -.. automodule:: nova.compute - :platform: Unix - :synopsis: Thin wrapper around libvirt for VM mgmt. -.. moduleauthor:: Jesse Andrews -.. moduleauthor:: Devin Carlen -.. moduleauthor:: Vishvananda Ishaya -.. moduleauthor:: Joshua McKenty -.. moduleauthor:: Manish Singh -.. moduleauthor:: Andy Smith +Handles all requests relating to instances (guest vms). """ + +import datetime +import logging +import time + +from nova import db +from nova import exception +from nova import flags +from nova import quota +from nova import rpc +from nova import utils +from nova import volume +from nova.compute import instance_types +from nova.db import base + +FLAGS = flags.FLAGS + + +def generate_default_hostname(instance_id): + """Default function to generate a hostname given an instance reference.""" + return str(instance_id) + + +class API(base.Base): + """API for interacting with the compute manager.""" + + def __init__(self, network_manager=None, image_service=None, + volume_api=None, **kwargs): + if not network_manager: + network_manager = utils.import_object(FLAGS.network_manager) + self.network_manager = network_manager + if not image_service: + image_service = utils.import_object(FLAGS.image_service) + self.image_service = image_service + if not volume_api: + volume_api = volume.API() + self.volume_api = volume_api + super(API, self).__init__(**kwargs) + + def get_network_topic(self, context, instance_id): + try: + instance = self.get(context, instance_id) + except exception.NotFound as e: + logging.warning("Instance %d was not found in get_network_topic", + instance_id) + raise e + + host = instance['host'] + if not host: + raise exception.Error("Instance %d has no host" % instance_id) + topic = self.db.queue_get_for(context, FLAGS.compute_topic, host) + return rpc.call(context, + topic, + {"method": "get_network_topic", "args": {'fake': 1}}) + + def create(self, context, instance_type, + image_id, kernel_id=None, ramdisk_id=None, + min_count=1, max_count=1, + display_name='', display_description='', + key_name=None, key_data=None, security_group='default', + user_data=None, generate_hostname=generate_default_hostname): + """Create the number of instances requested if quota and + other arguments check out ok.""" + + num_instances = quota.allowed_instances(context, max_count, + instance_type) + if num_instances < min_count: + logging.warn("Quota exceeeded for %s, tried to run %s instances", + context.project_id, min_count) + raise quota.QuotaError("Instance quota exceeded. You can only " + "run %s more instances of this type." % + num_instances, "InstanceLimitExceeded") + + is_vpn = image_id == FLAGS.vpn_image_id + if not is_vpn: + image = self.image_service.show(context, image_id) + if kernel_id is None: + kernel_id = image.get('kernelId', None) + if ramdisk_id is None: + ramdisk_id = image.get('ramdiskId', None) + # No kernel and ramdisk for raw images + if kernel_id == str(FLAGS.null_kernel): + kernel_id = None + ramdisk_id = None + logging.debug("Creating a raw instance") + # Make sure we have access to kernel and ramdisk (if not raw) + if kernel_id: + self.image_service.show(context, kernel_id) + if ramdisk_id: + self.image_service.show(context, ramdisk_id) + + if security_group is None: + security_group = ['default'] + if not type(security_group) is list: + security_group = [security_group] + + security_groups = [] + self.ensure_default_security_group(context) + for security_group_name in security_group: + group = db.security_group_get_by_name(context, + context.project_id, + security_group_name) + security_groups.append(group['id']) + + if key_data is None and key_name: + key_pair = db.key_pair_get(context, context.user_id, key_name) + key_data = key_pair['public_key'] + + type_data = instance_types.INSTANCE_TYPES[instance_type] + base_options = { + 'reservation_id': utils.generate_uid('r'), + 'image_id': image_id, + 'kernel_id': kernel_id or '', + 'ramdisk_id': ramdisk_id or '', + 'state_description': 'scheduling', + 'user_id': context.user_id, + 'project_id': context.project_id, + 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), + 'instance_type': instance_type, + 'memory_mb': type_data['memory_mb'], + 'vcpus': type_data['vcpus'], + 'local_gb': type_data['local_gb'], + 'display_name': display_name, + 'display_description': display_description, + 'user_data': user_data or '', + 'key_name': key_name, + 'key_data': key_data} + + elevated = context.elevated() + instances = [] + logging.debug(_("Going to run %s instances..."), num_instances) + for num in range(num_instances): + instance = dict(mac_address=utils.generate_mac(), + launch_index=num, + **base_options) + instance = self.db.instance_create(context, instance) + instance_id = instance['id'] + + elevated = context.elevated() + if not security_groups: + security_groups = [] + for security_group_id in security_groups: + self.db.instance_add_security_group(elevated, + instance_id, + security_group_id) + + # Set sane defaults if not specified + updates = dict(hostname=generate_hostname(instance_id)) + if 'display_name' not in instance: + updates['display_name'] = "Server %s" % instance_id + + instance = self.update(context, instance_id, **updates) + instances.append(instance) + + logging.debug(_("Casting to scheduler for %s/%s's instance %s"), + context.project_id, context.user_id, instance_id) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "run_instance", + "args": {"topic": FLAGS.compute_topic, + "instance_id": instance_id}}) + + return instances + + def ensure_default_security_group(self, context): + """ Create security group for the security context if it + does not already exist + + :param context: the security context + + """ + try: + db.security_group_get_by_name(context, context.project_id, + 'default') + except exception.NotFound: + values = {'name': 'default', + 'description': 'default', + 'user_id': context.user_id, + 'project_id': context.project_id} + db.security_group_create(context, values) + + def update(self, context, instance_id, **kwargs): + """Updates the instance in the datastore. + + :param context: The security context + :param instance_id: ID of the instance to update + :param kwargs: All additional keyword args are treated + as data fields of the instance to be + updated + + :retval None + + """ + return self.db.instance_update(context, instance_id, kwargs) + + def delete(self, context, instance_id): + logging.debug("Going to try and terminate %s" % instance_id) + try: + instance = self.get(context, instance_id) + except exception.NotFound as e: + logging.warning(_("Instance %s was not found during terminate"), + instance_id) + raise e + + if (instance['state_description'] == 'terminating'): + logging.warning(_("Instance %s is already being terminated"), + instance_id) + return + + self.update(context, + instance['id'], + state_description='terminating', + state=0, + terminated_at=datetime.datetime.utcnow()) + + host = instance['host'] + if host: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "terminate_instance", + "args": {"instance_id": instance['id']}}) + else: + self.db.instance_destroy(context, instance['id']) + + def get(self, context, instance_id=None, project_id=None): + """Get one or more instances, possibly filtered by project + ID or user ID. If there is no filter and the context is + an admin, it will retreive all instances in the system.""" + if instance_id is not None: + return self.db.instance_get_by_id(context, instance_id) + if project_id or not context.is_admin: + if not context.project: + return self.db.instance_get_all_by_user(context, + context.user_id) + if project_id is None: + project_id = context.project_id + return self.db.instance_get_all_by_project(context, + project_id) + return self.db.instance_get_all(context) + + def reboot(self, context, instance_id): + """Reboot the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "reboot_instance", + "args": {"instance_id": instance['id']}}) + + def pause(self, context, instance_id): + """Pause the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "pause_instance", + "args": {"instance_id": instance['id']}}) + + def unpause(self, context, instance_id): + """Unpause the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "unpause_instance", + "args": {"instance_id": instance['id']}}) + + def rescue(self, context, instance_id): + """Rescue the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "rescue_instance", + "args": {"instance_id": instance['id']}}) + + def unrescue(self, context, instance_id): + """Unrescue the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "unrescue_instance", + "args": {"instance_id": instance['id']}}) + + def attach_volume(self, context, instance_id, volume_id, device): + if not re.match("^/dev/[a-z]d[a-z]+$", device): + raise exception.ApiError(_("Invalid device specified: %s. " + "Example device: /dev/vdb") % device) + self.volume_api.check_attach(context, volume_id) + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "attach_volume", + "args": {"volume_id": volume_id, + "instance_id": instance_id, + "mountpoint": device}}) + + def detach_volume(self, context, volume_id): + instance = self.db.volume_get_instance(context.elevated(), volume_id) + if not instance: + raise exception.ApiError(_("Volume isn't attached to anything!")) + self.volume_api.check_detach(context, volume_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "detach_volume", + "args": {"instance_id": instance['id'], + "volume_id": volume_id}}) + return instance diff --git a/nova/compute/api.py b/nova/compute/api.py deleted file mode 100644 index 870fcdbe4da1..000000000000 --- a/nova/compute/api.py +++ /dev/null @@ -1,332 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Handles all API requests relating to instances (guest vms). -""" - -import datetime -import logging -import time - -from nova import db -from nova import exception -from nova import flags -from nova import quota -from nova import rpc -from nova import utils -from nova import volume -from nova.compute import instance_types -from nova.db import base - -FLAGS = flags.FLAGS - - -def generate_default_hostname(instance_id): - """Default function to generate a hostname given an instance reference.""" - return str(instance_id) - - -class ComputeAPI(base.Base): - """API for interacting with the compute manager.""" - - def __init__(self, network_manager=None, image_service=None, - volume_api=None, **kwargs): - if not network_manager: - network_manager = utils.import_object(FLAGS.network_manager) - self.network_manager = network_manager - if not image_service: - image_service = utils.import_object(FLAGS.image_service) - self.image_service = image_service - if not volume_api: - volume_api = volume.API() - self.volume_api = volume_api - super(ComputeAPI, self).__init__(**kwargs) - - def get_network_topic(self, context, instance_id): - try: - instance = self.db.instance_get_by_id(context, instance_id) - except exception.NotFound as e: - logging.warning("Instance %d was not found in get_network_topic", - instance_id) - raise e - - host = instance['host'] - if not host: - raise exception.Error("Instance %d has no host" % instance_id) - topic = self.db.queue_get_for(context, FLAGS.compute_topic, host) - return rpc.call(context, - topic, - {"method": "get_network_topic", "args": {'fake': 1}}) - - def create_instances(self, context, instance_type, image_id, min_count=1, - max_count=1, kernel_id=None, ramdisk_id=None, - display_name='', description='', key_name=None, - key_data=None, security_group='default', - user_data=None, - generate_hostname=generate_default_hostname): - """Create the number of instances requested if quote and - other arguments check out ok.""" - - num_instances = quota.allowed_instances(context, max_count, - instance_type) - if num_instances < min_count: - logging.warn("Quota exceeeded for %s, tried to run %s instances", - context.project_id, min_count) - raise quota.QuotaError("Instance quota exceeded. You can only " - "run %s more instances of this type." % - num_instances, "InstanceLimitExceeded") - - is_vpn = image_id == FLAGS.vpn_image_id - if not is_vpn: - image = self.image_service.show(context, image_id) - if kernel_id is None: - kernel_id = image.get('kernelId', None) - if ramdisk_id is None: - ramdisk_id = image.get('ramdiskId', None) - #No kernel and ramdisk for raw images - if kernel_id == str(FLAGS.null_kernel): - kernel_id = None - ramdisk_id = None - logging.debug("Creating a raw instance") - # Make sure we have access to kernel and ramdisk (if not raw) - if kernel_id: - self.image_service.show(context, kernel_id) - if ramdisk_id: - self.image_service.show(context, ramdisk_id) - - if security_group is None: - security_group = ['default'] - if not type(security_group) is list: - security_group = [security_group] - - security_groups = [] - self.ensure_default_security_group(context) - for security_group_name in security_group: - group = db.security_group_get_by_name(context, - context.project_id, - security_group_name) - security_groups.append(group['id']) - - if key_data is None and key_name: - key_pair = db.key_pair_get(context, context.user_id, key_name) - key_data = key_pair['public_key'] - - type_data = instance_types.INSTANCE_TYPES[instance_type] - base_options = { - 'reservation_id': utils.generate_uid('r'), - 'image_id': image_id, - 'kernel_id': kernel_id or '', - 'ramdisk_id': ramdisk_id or '', - 'state_description': 'scheduling', - 'user_id': context.user_id, - 'project_id': context.project_id, - 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), - 'instance_type': instance_type, - 'memory_mb': type_data['memory_mb'], - 'vcpus': type_data['vcpus'], - 'local_gb': type_data['local_gb'], - 'display_name': display_name, - 'display_description': description, - 'user_data': user_data or '', - 'key_name': key_name, - 'key_data': key_data} - - elevated = context.elevated() - instances = [] - logging.debug(_("Going to run %s instances..."), num_instances) - for num in range(num_instances): - instance = dict(mac_address=utils.generate_mac(), - launch_index=num, - **base_options) - instance = self.db.instance_create(context, instance) - instance_id = instance['id'] - - elevated = context.elevated() - if not security_groups: - security_groups = [] - for security_group_id in security_groups: - self.db.instance_add_security_group(elevated, - instance_id, - security_group_id) - - # Set sane defaults if not specified - updates = dict(hostname=generate_hostname(instance_id)) - if 'display_name' not in instance: - updates['display_name'] = "Server %s" % instance_id - - instance = self.update_instance(context, instance_id, **updates) - instances.append(instance) - - logging.debug(_("Casting to scheduler for %s/%s's instance %s"), - context.project_id, context.user_id, instance_id) - rpc.cast(context, - FLAGS.scheduler_topic, - {"method": "run_instance", - "args": {"topic": FLAGS.compute_topic, - "instance_id": instance_id}}) - - return instances - - def ensure_default_security_group(self, context): - """ Create security group for the security context if it - does not already exist - - :param context: the security context - - """ - try: - db.security_group_get_by_name(context, context.project_id, - 'default') - except exception.NotFound: - values = {'name': 'default', - 'description': 'default', - 'user_id': context.user_id, - 'project_id': context.project_id} - db.security_group_create(context, values) - - def update_instance(self, context, instance_id, **kwargs): - """Updates the instance in the datastore. - - :param context: The security context - :param instance_id: ID of the instance to update - :param kwargs: All additional keyword args are treated - as data fields of the instance to be - updated - - :retval None - - """ - return self.db.instance_update(context, instance_id, kwargs) - - def delete_instance(self, context, instance_id): - logging.debug("Going to try and terminate %s" % instance_id) - try: - instance = self.db.instance_get_by_id(context, instance_id) - except exception.NotFound as e: - logging.warning(_("Instance %s was not found during terminate"), - instance_id) - raise e - - if (instance['state_description'] == 'terminating'): - logging.warning(_("Instance %s is already being terminated"), - instance_id) - return - - self.update_instance(context, - instance['id'], - state_description='terminating', - state=0, - terminated_at=datetime.datetime.utcnow()) - - host = instance['host'] - if host: - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "terminate_instance", - "args": {"instance_id": instance['id']}}) - else: - self.db.instance_destroy(context, instance['id']) - - def get_instances(self, context, project_id=None): - """Get all instances, possibly filtered by project ID or - user ID. If there is no filter and the context is an admin, - it will retreive all instances in the system.""" - if project_id or not context.is_admin: - if not context.project: - return self.db.instance_get_all_by_user(context, - context.user_id) - if project_id is None: - project_id = context.project_id - return self.db.instance_get_all_by_project(context, project_id) - return self.db.instance_get_all(context) - - def get_instance(self, context, instance_id): - return self.db.instance_get_by_id(context, instance_id) - - def reboot(self, context, instance_id): - """Reboot the given instance.""" - instance = self.db.instance_get_by_id(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "reboot_instance", - "args": {"instance_id": instance['id']}}) - - def pause(self, context, instance_id): - """Pause the given instance.""" - instance = self.db.instance_get_by_id(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "pause_instance", - "args": {"instance_id": instance['id']}}) - - def unpause(self, context, instance_id): - """Unpause the given instance.""" - instance = self.db.instance_get_by_id(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "unpause_instance", - "args": {"instance_id": instance['id']}}) - - def rescue(self, context, instance_id): - """Rescue the given instance.""" - instance = self.db.instance_get_by_id(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "rescue_instance", - "args": {"instance_id": instance['id']}}) - - def unrescue(self, context, instance_id): - """Unrescue the given instance.""" - instance = self.db.instance_get_by_id(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "unrescue_instance", - "args": {"instance_id": instance['id']}}) - - def attach_volume(self, context, instance_id, volume_id, device): - if not re.match("^/dev/[a-z]d[a-z]+$", device): - raise exception.ApiError(_("Invalid device specified: %s. " - "Example device: /dev/vdb") % device) - self.volume_api.check_attach(context, volume_id) - instance = self.get_instance(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "attach_volume", - "args": {"volume_id": volume_id, - "instance_id": instance_id, - "mountpoint": device}}) - - def detach_volume(self, context, volume_id): - instance = self.db.volume_get_instance(context.elevated(), volume_id) - if not instance: - raise exception.ApiError(_("Volume isn't attached to anything!")) - self.volume_api.check_detach(context, volume_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "detach_volume", - "args": {"instance_id": instance['id'], - "volume_id": volume_id}}) - return instance diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 173772364ee0..1cdfb86f19e2 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -86,10 +86,8 @@ class ServersTest(unittest.TestCase): instance_address) self.stubs.Set(nova.db.api, 'instance_get_floating_address', instance_address) - self.stubs.Set(nova.compute.api.ComputeAPI, 'pause', - fake_compute_api) - self.stubs.Set(nova.compute.api.ComputeAPI, 'unpause', - fake_compute_api) + self.stubs.Set(nova.compute.API, 'pause', fake_compute_api) + self.stubs.Set(nova.compute.API, 'unpause', fake_compute_api) self.allow_admin = FLAGS.allow_admin_api def tearDown(self): @@ -100,7 +98,7 @@ class ServersTest(unittest.TestCase): req = webob.Request.blank('/v1.0/servers/1') res = req.get_response(nova.api.API('os')) res_dict = json.loads(res.body) - self.assertEqual(res_dict['server']['id'], 1) + self.assertEqual(res_dict['server']['id'], '1') self.assertEqual(res_dict['server']['name'], 'server1') def test_get_server_list(self): diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 348bb3351d86..f7067b98a859 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -22,6 +22,7 @@ Tests For Compute import datetime import logging +from nova import compute from nova import context from nova import db from nova import exception @@ -29,7 +30,6 @@ from nova import flags from nova import test from nova import utils from nova.auth import manager -from nova.compute import api as compute_api FLAGS = flags.FLAGS @@ -44,7 +44,7 @@ class ComputeTestCase(test.TestCase): stub_network=True, network_manager='nova.network.manager.FlatManager') self.compute = utils.import_object(FLAGS.compute_manager) - self.compute_api = compute_api.ComputeAPI() + self.compute_api = compute.API() self.manager = manager.AuthManager() self.user = self.manager.create_user('fake', 'fake', 'fake') self.project = self.manager.create_project('fake', 'fake', 'fake') @@ -72,7 +72,7 @@ class ComputeTestCase(test.TestCase): """Verify that an instance cannot be created without a display_name.""" cases = [dict(), dict(display_name=None)] for instance in cases: - ref = self.compute_api.create_instances(self.context, + ref = self.compute_api.create(self.context, FLAGS.default_instance_type, None, **instance) try: self.assertNotEqual(ref[0].display_name, None) @@ -80,13 +80,13 @@ class ComputeTestCase(test.TestCase): db.instance_destroy(self.context, ref[0]['id']) def test_create_instance_associates_security_groups(self): - """Make sure create_instances associates security groups""" + """Make sure create associates security groups""" values = {'name': 'default', 'description': 'default', 'user_id': self.user.id, 'project_id': self.project.id} group = db.security_group_create(self.context, values) - ref = self.compute_api.create_instances(self.context, + ref = self.compute_api.create(self.context, FLAGS.default_instance_type, None, security_group=['default']) try: self.assertEqual(len(ref[0]['security_groups']), 1) From 750a0c9b413ad3912d522355332cffadd9667d0c Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 29 Dec 2010 21:41:42 -0800 Subject: [PATCH 062/147] Converted a few more ec2 calls to use compute api. --- nova/api/ec2/cloud.py | 13 ++++--------- nova/compute/__init__.py | 12 +++++++++--- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index cc58f3cfe718..5ffc301be3f8 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -138,7 +138,7 @@ class CloudController(object): def get_metadata(self, address): ctxt = context.get_admin_context() - instance_ref = db.fixed_ip_get_instance(ctxt, address) + instance_ref = self.compute_api.get(ctxt, fixed_ip=address) if instance_ref is None: return None mpi = self._get_mpi_data(ctxt, instance_ref['project_id']) @@ -555,13 +555,9 @@ class CloudController(object): assert len(i) == 1 return i[0] - def _format_instances(self, context, reservation_id=None): + def _format_instances(self, context, **kwargs): reservations = {} - if reservation_id: - instances = db.instance_get_all_by_reservation(context, - reservation_id) - else: - instances = self.compute_api.get(context) + instances = self.compute_api.get(context, **kwargs) for instance in instances: if not context.user.is_admin(): if instance['image_id'] == FLAGS.vpn_image_id: @@ -747,8 +743,7 @@ class CloudController(object): changes[field] = kwargs[field] if changes: instance_id = ec2_id_to_id(ec2_id) - inst = self.compute_api.get(context, instance_id) - db.instance_update(context, inst['id'], kwargs) + self.compute_api.update(context, instance_id, **kwargs) return True def describe_images(self, context, image_id=None, **kwargs): diff --git a/nova/compute/__init__.py b/nova/compute/__init__.py index 648c0ff6a9c6..fd1cdcd1beb0 100644 --- a/nova/compute/__init__.py +++ b/nova/compute/__init__.py @@ -243,12 +243,18 @@ class API(base.Base): else: self.db.instance_destroy(context, instance['id']) - def get(self, context, instance_id=None, project_id=None): - """Get one or more instances, possibly filtered by project - ID or user ID. If there is no filter and the context is + def get(self, context, instance_id=None, project_id=None, + reservation_id=None, fixed_ip=None): + """Get one or more instances, possibly filtered by one of the + given parameters. If there is no filter and the context is an admin, it will retreive all instances in the system.""" if instance_id is not None: return self.db.instance_get_by_id(context, instance_id) + if reservation_id is not None: + return self.db.instance_get_all_by_reservation(context, + reservation_id) + if fixed_ip is not None: + return self.db.fixed_ip_get_instance(context, fixed_ip) if project_id or not context.is_admin: if not context.project: return self.db.instance_get_all_by_user(context, From 5b0450d5a145814baee9d5e05eab6fcc872dab9a Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Thu, 30 Dec 2010 01:19:38 -0500 Subject: [PATCH 063/147] Clean up how we determine IP to bind to. --- bin/nova-api-paste | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/nova-api-paste b/bin/nova-api-paste index 9bcb983727e5..6ee833a18b96 100755 --- a/bin/nova-api-paste +++ b/bin/nova-api-paste @@ -84,12 +84,12 @@ def run_app(paste_config_file): config = load_configuration(paste_config_file) LOG.debug(_("Configuration: %r"), config) server = wsgi.Server() - ip = config.get('host', None) + ip = config.get('host', '0.0.0.0') for api in API_ENDPOINTS: port = config.get("%s_port" % api, None) if not port: continue - host = config.get("%s_host" % api, None) or ip or '0.0.0.0' + host = config.get("%s_host" % api, ip) launch_api(configfile, api, server, port, host) LOG.debug(_("All api servers launched, now waiting")) server.wait() From aa73649911132598a96bbd908670dc3dadf50e91 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Thu, 30 Dec 2010 01:25:29 -0500 Subject: [PATCH 064/147] remove cloudpipe from paste config --- etc/nova-api.conf | 4 ---- 1 file changed, 4 deletions(-) diff --git a/etc/nova-api.conf b/etc/nova-api.conf index 84f2aee0cd30..c5dd0aaec450 100644 --- a/etc/nova-api.conf +++ b/etc/nova-api.conf @@ -13,7 +13,6 @@ openstack_address = 0.0.0.0 use = egg:Paste#urlmap /: ec2versions /services: ec2api -/cloudpipe: cloudpipe /latest: ec2metadata /200: ec2metadata /1.0: ec2metadata @@ -39,9 +38,6 @@ paste.app_factory = nova.api.ec2:versions_factory [app:ec2metadata] paste.app_factory = nova.api.ec2.metadatarequesthandler:metadata_factory -[app:cloudpipe] -paste.app_factory = nova.api.cloudpipe:cloudpipe_factory - ############# # Openstack # ############# From ba31a61ae6348bffbd70d5875f12a540d49e8885 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Thu, 30 Dec 2010 02:20:31 -0500 Subject: [PATCH 065/147] pep 8 --- nova/api/ec2/__init__.py | 7 ++++++- nova/api/ec2/metadatarequesthandler.py | 1 + nova/api/openstack/__init__.py | 1 + 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 7bec7f81ee91..aa3bfaeb4520 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -313,7 +313,6 @@ class Executor(wsgi.Application): resp.body = str(result) return resp - def _error(self, req, code, message): logging.error("%s: %s", code, message) resp = webob.Response() @@ -325,6 +324,7 @@ class Executor(wsgi.Application): '?' % (code, message)) return resp + class Versions(wsgi.Application): @webob.dec.wsgify @@ -344,23 +344,28 @@ class Versions(wsgi.Application): ] return ''.join('%s\n' % v for v in versions) + def authenticate_factory(global_args, **local_args): def authenticator(app): return Authenticate(app) return authenticator + def router_factory(global_args, **local_args): def router(app): return Router(app) return router + def authorizer_factory(global_args, **local_args): def authorizer(app): return Authorizer(app) return authorizer + def executor_factory(global_args, **local_args): return Executor() + def versions_factory(global_args, **local_args): return Versions() diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index 9d1594ae7857..a57a6698a5d3 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -80,5 +80,6 @@ class MetadataRequestHandler(object): raise webob.exc.HTTPNotFound() return self.print_data(data) + def metadata_factory(global_args, **local_args): return MetadataRequestHandler() diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 5dd092a1fc15..dc77948768b0 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -129,5 +129,6 @@ class Versions(wsgi.Application): def router_factory(global_cof, **local_conf): return APIRouter() + def versions_factory(global_conf, **local_conf): return Versions() From 26cdebcf742c5fea533c9947ef7278948a772e29 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Thu, 30 Dec 2010 12:10:31 -0500 Subject: [PATCH 066/147] Mention Authors and .mailmap files in Developer Guide --- doc/source/community.rst | 3 ++- doc/source/devref/development.environment.rst | 7 ++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/doc/source/community.rst b/doc/source/community.rst index bfb93414ce1e..01ff5f055aa2 100644 --- a/doc/source/community.rst +++ b/doc/source/community.rst @@ -35,7 +35,8 @@ Contributing Code To contribute code, sign up for a Launchpad account and sign a contributor license agreement, available on the `OpenStack Wiki `_. Once the CLA is signed you -can contribute code through the Bazaar version control system which is related to your Launchpad account. +can contribute code through the Bazaar version control system which is related to your Launchpad +account. See the :doc:`devref/development.environment` page to get started. #openstack on Freenode IRC Network ---------------------------------- diff --git a/doc/source/devref/development.environment.rst b/doc/source/devref/development.environment.rst index 6344c5382ced..9e189f04ef94 100644 --- a/doc/source/devref/development.environment.rst +++ b/doc/source/devref/development.environment.rst @@ -88,7 +88,12 @@ Here's how to get the latest code:: source .nova_venv/bin/activate ./run_tests.sh -And then you can do cleaning work or hack hack hack with a branched named cleaning:: +Then you can do cleaning work or hack hack hack with a branched named cleaning. + +Contributing Your Work +---------------------- + +Once your work is complete you may wish to contribute it to the project. Add your name and email address to the `Authors` file (as well as the `.mailmap` file if you use multiple email addresses), and push the branch to Launchpad:: bzr push lp:~launchpaduserid/nova/cleaning From 384f39966b6387fcc9466f0b42bcc0ffaf49ba4c Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Thu, 30 Dec 2010 12:25:14 -0500 Subject: [PATCH 067/147] Note that contributors are required to be listed in Authors file before work can be merged into trunk --- doc/source/devref/development.environment.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/devref/development.environment.rst b/doc/source/devref/development.environment.rst index 9e189f04ef94..3de2e228748b 100644 --- a/doc/source/devref/development.environment.rst +++ b/doc/source/devref/development.environment.rst @@ -93,7 +93,7 @@ Then you can do cleaning work or hack hack hack with a branched named cleaning. Contributing Your Work ---------------------- -Once your work is complete you may wish to contribute it to the project. Add your name and email address to the `Authors` file (as well as the `.mailmap` file if you use multiple email addresses), and push the branch to Launchpad:: +Once your work is complete you may wish to contribute it to the project. Add your name and email address to the `Authors` file, and also to the `.mailmap` file if you use multiple email addresses. Your contributions can not be merged into trunk unless you are listed in the Authors file. Now, push the branch to Launchpad:: bzr push lp:~launchpaduserid/nova/cleaning From f5611d9fdeaed5e2c16cf4b31a85db2ba4f5b30d Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Thu, 30 Dec 2010 14:23:52 -0500 Subject: [PATCH 068/147] Several documentation corrections --- doc/source/adminguide/multi.node.install.rst | 23 +++++----- doc/source/devref/addmethod.openstackapi.rst | 8 ++-- doc/source/devref/rabbit.rst | 4 +- doc/source/nova.concepts.rst | 14 +++--- doc/source/quickstart.rst | 47 +++++++------------- 5 files changed, 39 insertions(+), 57 deletions(-) diff --git a/doc/source/adminguide/multi.node.install.rst b/doc/source/adminguide/multi.node.install.rst index fcb76c5e5383..40490e71e08d 100644 --- a/doc/source/adminguide/multi.node.install.rst +++ b/doc/source/adminguide/multi.node.install.rst @@ -77,21 +77,20 @@ Nova development has consolidated all .conf files to nova.conf as of November 20 #. These need to be defined in the nova.conf configuration file:: - --sql_connection=mysql://root:nova@$CC_ADDR/nova # location of nova sql db - --s3_host=$CC_ADDR # This is where nova is hosting the objectstore service, which - # will contain the VM images and buckets - --rabbit_host=$CC_ADDR # This is where the rabbit AMQP messaging service is hosted - --cc_host=$CC_ADDR # This is where the the nova-api service lives - --verbose # Optional but very helpful during initial setup - --ec2_url=http://$CC_ADDR:8773/services/Cloud - --network_manager=nova.network.manager.FlatManager # simple, no-vlan networking type - - --fixed_range= # ip network to use for VM guests, ex 192.168.2.64/26 - --network_size=<# of addrs> # number of ip addrs to use for VM guests, ex 64 + --sql_connection=mysql://root:nova@$CC_ADDR/nova # location of nova sql db + --s3_host=$CC_ADDR # This is where Nova is hosting the objectstore service, which + # will contain the VM images and buckets + --rabbit_host=$CC_ADDR # This is where the rabbit AMQP messaging service is hosted + --cc_host=$CC_ADDR # This is where the the nova-api service lives + --verbose # Optional but very helpful during initial setup + --ec2_url=http://$CC_ADDR:8773/services/Cloud + --network_manager=nova.network.manager.FlatManager # simple, no-vlan networking type + --fixed_range= # ip network to use for VM guests, ex 192.168.2.64/26 + --network_size=<# of addrs> # number of ip addrs to use for VM guests, ex 64 #. Create a nova group:: - sudo addgroup nova + sudo addgroup nova The Nova config file should have its owner set to root:nova, and mode set to 0640, since they contain your MySQL server's root password. diff --git a/doc/source/devref/addmethod.openstackapi.rst b/doc/source/devref/addmethod.openstackapi.rst index 6484613df2b0..4baa46e20568 100644 --- a/doc/source/devref/addmethod.openstackapi.rst +++ b/doc/source/devref/addmethod.openstackapi.rst @@ -24,7 +24,7 @@ Routing To map URLs to controllers+actions, OpenStack uses the Routes package, a clone of Rails routes for Python implementations. See http://routes.groovie.org/ fore more information. -URLs are mapped to "action" methods on "controller" classes in nova/api/openstack/__init__/ApiRouter.__init__ . +URLs are mapped to "action" methods on "controller" classes in `nova/api/openstack/__init__/ApiRouter.__init__` . See http://routes.groovie.org/manual.html for all syntax, but you'll probably just need these two: - mapper.connect() lets you map a single URL to a single action on a controller. @@ -33,9 +33,9 @@ See http://routes.groovie.org/manual.html for all syntax, but you'll probably ju Controllers and actions ----------------------- -Controllers live in nova/api/openstack, and inherit from nova.wsgi.Controller. +Controllers live in `nova/api/openstack`, and inherit from nova.wsgi.Controller. -See nova/api/openstack/servers.py for an example. +See `nova/api/openstack/servers.py` for an example. Action methods take parameters that are sucked out of the URL by mapper.connect() or .resource(). The first two parameters are self and the WebOb request, from which you can get the req.environ, req.body, req.headers, etc. @@ -46,7 +46,7 @@ Actions return a dictionary, and wsgi.Controller serializes that to JSON or XML If you define a new controller, you'll need to define a _serialization_metadata attribute on the class, to tell wsgi.Controller how to convert your dictionary to XML. It needs to know the singular form of any list tag (e.g. list contains tags) and which dictionary keys are to be XML attributes as opposed to subtags (e.g. instead of 4). -See nova/api/openstack/servers.py for an example. +See `nova/api/openstack/servers.py` for an example. Faults ------ diff --git a/doc/source/devref/rabbit.rst b/doc/source/devref/rabbit.rst index 423284a55c47..ae0bac49dfad 100644 --- a/doc/source/devref/rabbit.rst +++ b/doc/source/devref/rabbit.rst @@ -71,8 +71,8 @@ RPC Casts The diagram below the message flow during an rp.cast operation: - 1. a Topic Publisher is instantiated to send the message request to the queuing system. - 2. once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic') and passed to the Worker in charge of the task. + 1. A Topic Publisher is instantiated to send the message request to the queuing system. + 2. Once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic') and passed to the Worker in charge of the task. .. image:: /images/rabbit/flow2.png :width: 60% diff --git a/doc/source/nova.concepts.rst b/doc/source/nova.concepts.rst index 18368546bdc2..fb3969a433e6 100644 --- a/doc/source/nova.concepts.rst +++ b/doc/source/nova.concepts.rst @@ -75,7 +75,7 @@ Nova is built on a shared-nothing, messaging-based architecture. All of the majo To achieve the shared-nothing property with multiple copies of the same component, Nova keeps all the cloud system state in a distributed data store. Updates to system state are written into this store, using atomic transactions when required. Requests for system state are read out of this store. In limited cases, the read results are cached within controllers for short periods of time (for example, the current list of system users.) - .. note:: The database schema is available on the `OpenStack Wiki _`. + .. note:: The database schema is available on the `OpenStack Wiki `_. Concept: Storage ---------------- @@ -129,12 +129,12 @@ The simplest networking mode. Each instance receives a fixed ip from the pool. Flat DHCP Mode ~~~~~~~~~~~~~~ -This is similar to the flat mode, in that all instances are attached to the same bridge. In this mode nova does a bit more configuration, it will attempt to bridge into an ethernet device (eth0 by default). It will also run dnsmasq as a dhcpserver listening on this bridge. Instances receive their fixed IPs by doing a dhcpdiscover. +This is similar to the flat mode, in that all instances are attached to the same bridge. In this mode Nova does a bit more configuration, it will attempt to bridge into an ethernet device (eth0 by default). It will also run dnsmasq as a dhcpserver listening on this bridge. Instances receive their fixed IPs by doing a dhcpdiscover. VLAN DHCP Mode ~~~~~~~~~~~~~~ -This is the default networking mode and supports the most features. For multiple machine installation, it requires a switch that supports host-managed vlan tagging. In this mode, nova will create a vlan and bridge for each project. The project gets a range of private ips that are only accessible from inside the vlan. In order for a user to access the instances in their project, a special vpn instance (code named :ref:`cloudpipe `) needs to be created. Nova generates a certificate and key for the user to access the vpn and starts the vpn automatically. More information on cloudpipe can be found :ref:`here `. +This is the default networking mode and supports the most features. For multiple machine installation, it requires a switch that supports host-managed vlan tagging. In this mode, Nova will create a vlan and bridge for each project. The project gets a range of private ips that are only accessible from inside the vlan. In order for a user to access the instances in their project, a special vpn instance (code named :ref:`cloudpipe `) needs to be created. Nova generates a certificate and key for the user to access the vpn and starts the vpn automatically. More information on cloudpipe can be found :ref:`here `. The following diagram illustrates how the communication that occurs between the vlan (the dashed box) and the public internet (represented by the two clouds) @@ -154,16 +154,16 @@ Concept: nova-manage -------------------- The nova-manage command is used to perform many essential functions for -administration and ongoing maintenance of nova, such as user creation, +administration and ongoing maintenance of Nova, such as user creation, vpn management, and much more. -See doc:`nova.manage` in the Administration Guide for more details. +See :doc:`nova.manage` in the Administration Guide for more details. Concept: Flags -------------- -Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within flag files. When you install Nova packages, each nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth. +Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within flag files. When you install Nova packages, each Nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth. Concept: Plugins @@ -181,7 +181,7 @@ Concept: Plugins Concept: IPC/RPC ---------------- -Nova utilizes the RabbitMQ implementation of the AMQP messaging standard for performing communication between the various nova services. This message queuing service is used for both local and remote communication because Nova is designed so that there is no requirement that any of the services exist on the same physical machine. RabbitMQ in particular is very robust and provides the efficiency and reliability that Nova needs. More information about RabbitMQ can be found at http://www.rabbitmq.com/. +Nova utilizes the RabbitMQ implementation of the AMQP messaging standard for performing communication between the various Nova services. This message queuing service is used for both local and remote communication because Nova is designed so that there is no requirement that any of the services exist on the same physical machine. RabbitMQ in particular is very robust and provides the efficiency and reliability that Nova needs. More information about RabbitMQ can be found at http://www.rabbitmq.com/. Concept: Fakes -------------- diff --git a/doc/source/quickstart.rst b/doc/source/quickstart.rst index ae2b64d8a51d..fa5d96738ca2 100644 --- a/doc/source/quickstart.rst +++ b/doc/source/quickstart.rst @@ -59,38 +59,21 @@ different configurations (though for more complex setups you should see * HOST_IP * Default: address of first interface from the ifconfig command * Values: 127.0.0.1, or any other valid address - -TEST -~~~~ - -**Default**: 0 -**Values**: 1, run tests after checkout and initial setup - -USE_MYSQL -~~~~~~~~~ - -**Default**: 0, use sqlite3 -**Values**: 1, use mysql instead of sqlite3 - -MYSQL_PASS -~~~~~~~~~~ - -Only useful if $USE_MYSQL=1. - -**Default**: nova -**Values**: value of root password for mysql - -USE_LDAP -~~~~~~~~ - -**Default**: 0, use :mod:`nova.auth.dbdriver` -**Values**: 1, use :mod:`nova.auth.ldapdriver` - -LIBVIRT_TYPE -~~~~~~~~~~~~ - -**Default**: qemu -**Values**: uml, kvm +* TEST + * Default: 0 + * Values: 1, run tests after checkout and initial setup +* USE_MYSQL + * Default: 0, use sqlite3 + * Values: 1, use mysql instead of sqlite3 +* MYSQL_PASS (Only useful if $USE_MYSQL=1) + * Default: nova + * Values: value of root password for mysql +* USE_LDAP + * Default: 0, use :mod:`nova.auth.dbdriver` + * Values: 1, use :mod:`nova.auth.ldapdriver` +* LIBVIRT_TYPE + * Default: qemu + * Values: uml, kvm Usage ----- From 71d715d422a746f4951877d8ff76e0ace355281e Mon Sep 17 00:00:00 2001 From: Eric Day Date: Thu, 30 Dec 2010 15:43:41 -0800 Subject: [PATCH 069/147] Moved network operation code in ec2 api into a generic network API class. Removed a circular dependency with compute/quota. --- nova/api/ec2/cloud.py | 59 ++++-------------------------- nova/compute/__init__.py | 20 ++++++---- nova/network/__init__.py | 79 ++++++++++++++++++++++++++++++++++------ nova/quota.py | 8 ++-- nova/tests/test_quota.py | 6 +-- 5 files changed, 93 insertions(+), 79 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 5ffc301be3f8..d68c598e9090 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -36,7 +36,7 @@ from nova import crypto from nova import db from nova import exception from nova import flags -from nova import quota +from nova import network from nova import rpc from nova import utils from nova import volume @@ -87,11 +87,10 @@ class CloudController(object): sent to the other nodes. """ def __init__(self): - self.network_manager = utils.import_object(FLAGS.network_manager) self.image_service = utils.import_object(FLAGS.image_service) + self.network_api = network.API() self.volume_api = volume.API() - self.compute_api = compute.API(self.network_manager, - self.image_service, + self.compute_api = compute.API(self.image_service, self.network_api, self.volume_api) self.setup() @@ -629,64 +628,20 @@ class CloudController(object): return {'addressesSet': addresses} def allocate_address(self, context, **kwargs): - # check quota - if quota.allowed_floating_ips(context, 1) < 1: - logging.warn(_("Quota exceeeded for %s, tried to allocate " - "address"), - context.project_id) - raise quota.QuotaError(_("Address quota exceeded. You cannot " - "allocate any more addresses")) - # NOTE(vish): We don't know which network host should get the ip - # when we allocate, so just send it to any one. This - # will probably need to move into a network supervisor - # at some point. - public_ip = rpc.call(context, - FLAGS.network_topic, - {"method": "allocate_floating_ip", - "args": {"project_id": context.project_id}}) + public_ip = self.network_api.allocate_floating_ip(context) return {'addressSet': [{'publicIp': public_ip}]} def release_address(self, context, public_ip, **kwargs): - floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) - # NOTE(vish): We don't know which network host should get the ip - # when we deallocate, so just send it to any one. This - # will probably need to move into a network supervisor - # at some point. - rpc.cast(context, - FLAGS.network_topic, - {"method": "deallocate_floating_ip", - "args": {"floating_address": floating_ip_ref['address']}}) + self.network_api.release_floating_ip(context, public_ip) return {'releaseResponse': ["Address released."]} def associate_address(self, context, instance_id, public_ip, **kwargs): instance_id = ec2_id_to_id(instance_id) - instance_ref = self.compute_api.get(context, instance_id) - fixed_address = db.instance_get_fixed_address(context, - instance_ref['id']) - floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) - # NOTE(vish): Perhaps we should just pass this on to compute and - # let compute communicate with network. - network_topic = self.compute_api.get_network_topic(context, - instance_id) - rpc.cast(context, - network_topic, - {"method": "associate_floating_ip", - "args": {"floating_address": floating_ip_ref['address'], - "fixed_address": fixed_address}}) + self.compute_api.associate_floating_ip(context, instance_id, public_ip) return {'associateResponse': ["Address associated."]} def disassociate_address(self, context, public_ip, **kwargs): - floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) - # NOTE(vish): Get the topic from the host name of the network of - # the associated fixed ip. - if not floating_ip_ref.get('fixed_ip'): - raise exception.ApiError('Address is not associated.') - host = floating_ip_ref['fixed_ip']['network']['host'] - topic = db.queue_get_for(context, FLAGS.network_topic, host) - rpc.cast(context, - topic, - {"method": "disassociate_floating_ip", - "args": {"floating_address": floating_ip_ref['address']}}) + self.network_api.disassociate_floating_ip(context, public_ip) return {'disassociateResponse': ["Address disassociated."]} def run_instances(self, context, **kwargs): diff --git a/nova/compute/__init__.py b/nova/compute/__init__.py index fd1cdcd1beb0..14c242641ab1 100644 --- a/nova/compute/__init__.py +++ b/nova/compute/__init__.py @@ -27,6 +27,7 @@ import time from nova import db from nova import exception from nova import flags +from nova import network from nova import quota from nova import rpc from nova import utils @@ -45,14 +46,14 @@ def generate_default_hostname(instance_id): class API(base.Base): """API for interacting with the compute manager.""" - def __init__(self, network_manager=None, image_service=None, - volume_api=None, **kwargs): - if not network_manager: - network_manager = utils.import_object(FLAGS.network_manager) - self.network_manager = network_manager + def __init__(self, image_service=None, network_api=None, volume_api=None, + **kwargs): if not image_service: image_service = utils.import_object(FLAGS.image_service) self.image_service = image_service + if not network_api: + network_api = network.API() + self.network_api = network_api if not volume_api: volume_api = volume.API() self.volume_api = volume_api @@ -83,8 +84,9 @@ class API(base.Base): """Create the number of instances requested if quota and other arguments check out ok.""" + type_data = instance_types.INSTANCE_TYPES[instance_type] num_instances = quota.allowed_instances(context, max_count, - instance_type) + type_data['vcpus']) if num_instances < min_count: logging.warn("Quota exceeeded for %s, tried to run %s instances", context.project_id, min_count) @@ -127,7 +129,6 @@ class API(base.Base): key_pair = db.key_pair_get(context, context.user_id, key_name) key_data = key_pair['public_key'] - type_data = instance_types.INSTANCE_TYPES[instance_type] base_options = { 'reservation_id': utils.generate_uid('r'), 'image_id': image_id, @@ -336,3 +337,8 @@ class API(base.Base): "args": {"instance_id": instance['id'], "volume_id": volume_id}}) return instance + + def associate_floating_ip(self, context, instance_id, address): + instance = self.get(context, instance_id) + self.network_api.associate_floating_ip(context, address, + instance['fixed_ip']) diff --git a/nova/network/__init__.py b/nova/network/__init__.py index dcc54db094a8..cbd912047cab 100644 --- a/nova/network/__init__.py +++ b/nova/network/__init__.py @@ -17,16 +17,71 @@ # under the License. """ -:mod:`nova.network` -- Network Nodes -===================================================== - -.. automodule:: nova.network - :platform: Unix - :synopsis: Network is responsible for managing networking -.. moduleauthor:: Jesse Andrews -.. moduleauthor:: Devin Carlen -.. moduleauthor:: Vishvananda Ishaya -.. moduleauthor:: Joshua McKenty -.. moduleauthor:: Manish Singh -.. moduleauthor:: Andy Smith +Handles all requests relating to instances (guest vms). """ + +import logging + +from nova import db +from nova import flags +from nova import quota +from nova import rpc +from nova.db import base + +FLAGS = flags.FLAGS + + +class API(base.Base): + """API for interacting with the network manager.""" + + def allocate_floating_ip(self, context): + if quota.allowed_floating_ips(context, 1) < 1: + logging.warn(_("Quota exceeeded for %s, tried to allocate " + "address"), + context.project_id) + raise quota.QuotaError(_("Address quota exceeded. You cannot " + "allocate any more addresses")) + # NOTE(vish): We don't know which network host should get the ip + # when we allocate, so just send it to any one. This + # will probably need to move into a network supervisor + # at some point. + return rpc.call(context, + FLAGS.network_topic, + {"method": "allocate_floating_ip", + "args": {"project_id": context.project_id}}) + + def release_floating_ip(self, context, address): + floating_ip = self.db.floating_ip_get_by_address(context, address) + # NOTE(vish): We don't know which network host should get the ip + # when we deallocate, so just send it to any one. This + # will probably need to move into a network supervisor + # at some point. + rpc.cast(context, + FLAGS.network_topic, + {"method": "deallocate_floating_ip", + "args": {"floating_address": floating_ip['address']}}) + + def associate_floating_ip(self, context, floating_ip, fixed_ip): + if isinstance(fixed_ip, str) or isinstance(fixed_ip, unicode): + fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_ip) + floating_ip = self.db.floating_ip_get_by_address(context, floating_ip) + # NOTE(vish): Perhaps we should just pass this on to compute and + # let compute communicate with network. + host = fixed_ip['network']['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.network_topic, host), + {"method": "associate_floating_ip", + "args": {"floating_address": floating_ip['address'], + "fixed_address": fixed_ip['address']}}) + + def disassociate_floating_ip(self, context, address): + floating_ip = self.db.floating_ip_get_by_address(context, address) + if not floating_ip.get('fixed_ip'): + raise exception.ApiError('Address is not associated.') + # NOTE(vish): Get the topic from the host name of the network of + # the associated fixed ip. + host = floating_ip['fixed_ip']['network']['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.network_topic, host), + {"method": "disassociate_floating_ip", + "args": {"floating_address": floating_ip['address']}}) diff --git a/nova/quota.py b/nova/quota.py index f6ca9f77c733..12dbfcf43862 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -22,7 +22,6 @@ Quotas for instances, volumes, and floating ips from nova import db from nova import exception from nova import flags -from nova.compute import instance_types FLAGS = flags.FLAGS @@ -54,7 +53,7 @@ def get_quota(context, project_id): return rval -def allowed_instances(context, num_instances, instance_type): +def allowed_instances(context, num_instances, cores_per_instance): """Check quota and return min(num_instances, allowed_instances)""" project_id = context.project_id context = context.elevated() @@ -63,10 +62,9 @@ def allowed_instances(context, num_instances, instance_type): quota = get_quota(context, project_id) allowed_instances = quota['instances'] - used_instances allowed_cores = quota['cores'] - used_cores - type_cores = instance_types.INSTANCE_TYPES[instance_type]['vcpus'] - num_cores = num_instances * type_cores + num_cores = num_instances * cores_per_instance allowed_instances = min(allowed_instances, - int(allowed_cores // type_cores)) + int(allowed_cores // cores_per_instance)) return min(num_instances, allowed_instances) diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 8cf2a5e54652..c158187746b3 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -78,14 +78,14 @@ class QuotaTestCase(test.TestCase): def test_quota_overrides(self): """Make sure overriding a projects quotas works""" - num_instances = quota.allowed_instances(self.context, 100, 'm1.small') + num_instances = quota.allowed_instances(self.context, 100, 1) self.assertEqual(num_instances, 2) db.quota_create(self.context, {'project_id': self.project.id, 'instances': 10}) - num_instances = quota.allowed_instances(self.context, 100, 'm1.small') + num_instances = quota.allowed_instances(self.context, 100, 1) self.assertEqual(num_instances, 4) db.quota_update(self.context, self.project.id, {'cores': 100}) - num_instances = quota.allowed_instances(self.context, 100, 'm1.small') + num_instances = quota.allowed_instances(self.context, 100, 1) self.assertEqual(num_instances, 10) db.quota_destroy(self.context, self.project.id) From 66f8e28fb4f4a898803ac6a38974a9fa804612d0 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Thu, 30 Dec 2010 18:12:57 -0600 Subject: [PATCH 070/147] completed the basic xenstore read/write/delete functionality --- nova/virt/xenapi/vmops.py | 222 ++++++++++++++++-- nova/virt/xenapi_conn.py | 28 ++- .../xenapi/etc/xapi.d/plugins/xenstore.py | 158 +++++++++++++ 3 files changed, 378 insertions(+), 30 deletions(-) create mode 100755 plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 0517209e2086..5c9455db3c28 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2010 Citrix Systems, Inc. +# Copyright 2010 OpenStack LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -18,6 +19,7 @@ Management class for VM-related functions (spawn, reboot, etc). """ +import json import logging from nova import db @@ -36,7 +38,6 @@ class VMOps(object): """ Management class for VM-related tasks """ - def __init__(self, session): self.XenAPI = session.get_imported_xenapi() self._session = session @@ -120,6 +121,20 @@ class VMOps(object): timer.f = _wait_for_boot return timer.start(interval=0.5, now=True) + def _get_vm_opaque_ref(self, instance_or_vm): + """Refactored out the common code of many methods that receive either + a vm name or a vm instance, and want a vm instance in return. + """ + try: + instance_name = instance_or_vm.name + vm = VMHelper.lookup(self._session, instance_name) + except AttributeError: + # A vm opaque ref was passed + vm = instance_or_vm + if vm is None: + raise Exception(_('Instance not present %s') % instance_name) + return vm + def snapshot(self, instance, name): """ Create snapshot from a running VM instance @@ -168,11 +183,7 @@ class VMOps(object): def reboot(self, instance): """Reboot VM instance""" - instance_name = instance.name - vm = VMHelper.lookup(self._session, instance_name) - if vm is None: - raise exception.NotFound(_('instance not' - ' found %s') % instance_name) + vm = self._get_vm_opaque_ref(instance) task = self._session.call_xenapi('Async.VM.clean_reboot', vm) self._session.wait_for_task(instance.id, task) @@ -215,27 +226,19 @@ class VMOps(object): ret = None try: ret = self._session.wait_for_task(instance_id, task) - except XenAPI.Failure, exc: + except self.XenAPI.Failure, exc: logging.warn(exc) callback(ret) def pause(self, instance, callback): """Pause VM instance""" - instance_name = instance.name - vm = VMHelper.lookup(self._session, instance_name) - if vm is None: - raise exception.NotFound(_('Instance not' - ' found %s') % instance_name) + vm = self._get_vm_opaque_ref(instance) task = self._session.call_xenapi('Async.VM.pause', vm) self._wait_with_callback(instance.id, task, callback) def unpause(self, instance, callback): """Unpause VM instance""" - instance_name = instance.name - vm = VMHelper.lookup(self._session, instance_name) - if vm is None: - raise exception.NotFound(_('Instance not' - ' found %s') % instance_name) + vm = self._get_vm_opaque_ref(instance) task = self._session.call_xenapi('Async.VM.unpause', vm) self._wait_with_callback(instance.id, task, callback) @@ -270,9 +273,7 @@ class VMOps(object): def get_diagnostics(self, instance_id): """Return data about VM diagnostics""" - vm = VMHelper.lookup(self._session, instance_id) - if vm is None: - raise exception.NotFound(_("Instance not found %s") % instance_id) + vm = self._get_vm_opaque_ref(instance) rec = self._session.get_xenapi().VM.get_record(vm) return VMHelper.compile_diagnostics(self._session, rec) @@ -280,3 +281,184 @@ class VMOps(object): """Return snapshot of console""" # TODO: implement this to fix pylint! return 'FAKE CONSOLE OUTPUT of instance' + + def list_from_xenstore(self, vm, path): + """Runs the xenstore-ls command to get a listing of all records + from 'path' downward. Returns a dict with the sub-paths as keys, + and the value stored in those paths as values. If nothing is + found at that path, returns None. + """ + ret = self._make_xenstore_call('list_records', vm, path) + try: + return json.loads(ret) + except ValueError: + # Not a valid JSON value + return ret + + def read_from_xenstore(self, vm, path): + """Returns the value stored in the xenstore record for the given VM + at the specified location. A XenAPIPlugin.PluginError will be raised + if any error is encountered in the read process. + """ + try: + ret = self._make_xenstore_call('read_record', vm, path, + {'ignore_missing_path': 'True'}) + except self.XenAPI.Failure, e: + print "XENERR", e + return None + except StandardError, e: + print "ERR", type(e), e, e.msg + return None + try: + return json.loads(ret) + except ValueError: + # Not a JSON object + if ret == "None": + # Can't marshall None over RPC calls. + return None + return ret + + def write_to_xenstore(self, vm, path, value): + """Writes the passed value to the xenstore record for the given VM + at the specified location. A XenAPIPlugin.PluginError will be raised + if any error is encountered in the write process. + """ + return self._make_xenstore_call('write_record', vm, path, {'value': json.dumps(value)}) + + def clear_xenstore(self, vm, path): + """Deletes the VM's xenstore record for the specified path. + If there is no such record, the request is ignored. + """ + self._make_xenstore_call('delete_record', vm, path) + + def _make_xenstore_call(self, method, vm, path, addl_args={}): + """Handles calls to the xenstore xenapi plugin.""" + return self._make_plugin_call('xenstore.py', method=method, vm=vm, path=path, + addl_args=addl_args) + + def _make_plugin_call(self, plugin, method, vm, path, addl_args={}): + """Abstracts out the process of calling a method of a xenapi plugin. + Any errors raised by the plugin will in turn raise a RuntimeError here. + """ + vm = self._get_vm_opaque_ref(vm) + rec = self._session.get_xenapi().VM.get_record(vm) + args = {'dom_id': rec['domid'], 'path': path} + args.update(addl_args) + # If the 'testing_mode' attribute is set, add that to the args. + if getattr(self, 'testing_mode', False): + args['testing_mode'] = 'true' + try: + task = self._session.async_call_plugin(plugin, method, args) + ret = self._session.wait_for_task(0, task) + except self.XenAPI.Failure, e: + raise RuntimeError("%s" % e.details[-1]) + return ret + + def add_to_xenstore(self, vm, path, key, value): + """Adds the passed key/value pair to the xenstore record for + the given VM at the specified location. A XenAPIPlugin.PluginError + will be raised if any error is encountered in the write process. + """ + current = self.read_from_xenstore(vm, path) + if not current: + # Nothing at that location + current = {key: value} + else: + current[key] = value + self.write_to_xenstore(vm, path, current) + + def remove_from_xenstore(self, vm, path, key_or_keys): + """Takes either a single key or a list of keys and removes + them from the xenstoreirecord data for the given VM. + If the key doesn't exist, the request is ignored. + """ + current = self.list_from_xenstore(vm, path) + if not current: + return + if isinstance(key_or_keys, basestring): + keys = [key_or_keys] + else: + keys = key_or_keys + keys.sort(lambda x,y: cmp(y.count('/'), x.count('/'))) + for key in keys: + if path: + keypath = "%s/%s" % (path, key) + else: + keypath = key + self._make_xenstore_call('delete_record', vm, keypath) + + + ######################################################################## + ###### The following methods interact with the xenstore parameter + ###### record, not the live xenstore. They were created before I + ###### knew the difference, and are left in here in case they prove + ###### to be useful. They all have '_param' added to their method + ###### names to distinguish them. (dabo) + ######################################################################## + def read_partial_from_param_xenstore(self, instance_or_vm, key_prefix): + """Returns a dict of all the keys in the xenstore parameter record + for the given instance that begin with the key_prefix. + """ + data = self.read_from_param_xenstore(instance_or_vm) + badkeys = [k for k in data.keys() + if not k.startswith(key_prefix)] + for badkey in badkeys: + del data[badkey] + return data + + def read_from_param_xenstore(self, instance_or_vm, keys=None): + """Returns the xenstore parameter record data for the specified VM instance + as a dict. Accepts an optional key or list of keys; if a value for 'keys' + is passed, the returned dict is filtered to only return the values + for those keys. + """ + vm = self._get_vm_opaque_ref(instance_or_vm) + data = self._session.call_xenapi_request('VM.get_xenstore_data', (vm, )) + ret = {} + if keys is None: + keys = data.keys() + elif isinstance(keys, basestring): + keys = [keys] + for key in keys: + raw = data.get(key) + if raw: + ret[key] = json.loads(raw) + else: + ret[key] = raw + return ret + + def add_to_param_xenstore(self, instance_or_vm, key, val): + """Takes a key/value pair and adds it to the xenstore parameter + record for the given vm instance. If the key exists in xenstore, + it is overwritten""" + vm = self._get_vm_opaque_ref(instance_or_vm) + self.remove_from_param_xenstore(instance_or_vm, key) + jsonval = json.dumps(val) + self._session.call_xenapi_request('VM.add_to_xenstore_data', + (vm, key, jsonval)) + + def write_to_param_xenstore(self, instance_or_vm, mapping): + """Takes a dict and writes each key/value pair to the xenstore + parameter record for the given vm instance. Any existing data for + those keys is overwritten. + """ + for k, v in mapping.iteritems(): + self.add_to_param_xenstore(instance_or_vm, k, v) + + def remove_from_param_xenstore(self, instance_or_vm, key_or_keys): + """Takes either a single key or a list of keys and removes + them from the xenstore parameter record data for the given VM. + If the key doesn't exist, the request is ignored. + """ + vm = self._get_vm_opaque_ref(instance_or_vm) + if isinstance(key_or_keys, basestring): + keys = [key_or_keys] + else: + keys = key_or_keys + for key in keys: + self._session.call_xenapi_request('VM.remove_from_xenstore_data', (vm, key)) + + def clear_param_xenstore(self, instance_or_vm): + """Removes all data from the xenstore parameter record for this VM.""" + self.write_to_param_xenstore(instance_or_vm, {}) + ######################################################################## diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index d12e78c67f09..863461ee7820 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2010 Citrix Systems, Inc. +# Copyright 2010 OpenStack LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -19,15 +20,15 @@ A connection to XenServer or Xen Cloud Platform. The concurrency model for this class is as follows: -All XenAPI calls are on a thread (using t.i.t.deferToThread, via the decorator -deferredToThread). They are remote calls, and so may hang for the usual -reasons. They should not be allowed to block the reactor thread. +All XenAPI calls are on a green thread (using eventlet's "tpool" +thread pool). They are remote calls, and so may hang for the usual +reasons. All long-running XenAPI calls (VM.start, VM.reboot, etc) are called async -(using XenAPI.VM.async_start etc). These return a task, which can then be -polled for completion. Polling is handled using reactor.callLater. +(using XenAPI.VM.async_start etc). These return a task, which can then be +polled for completion. -This combination of techniques means that we don't block the reactor thread at +This combination of techniques means that we don't block the main thread at all, and at the same time we don't hold lots of threads waiting for long-running operations. @@ -81,7 +82,7 @@ flags.DEFINE_string('xenapi_connection_password', flags.DEFINE_float('xenapi_task_poll_interval', 0.5, 'The interval used for polling of remote tasks ' - '(Async.VM.start, etc). Used only if ' + '(Async.VM.start, etc). Used only if ' 'connection_type=xenapi.') flags.DEFINE_float('xenapi_vhd_coalesce_poll_interval', 5.0, @@ -213,6 +214,14 @@ class XenAPISession(object): f = f.__getattr__(m) return tpool.execute(f, *args) + def call_xenapi_request(self, method, *args): + """Some interactions with dom0, such as interacting with xenstore's + param record, require using the xenapi_request method of the session + object. This wraps that call on a background thread. + """ + f = self._session.xenapi_request + return tpool.execute(f, method, *args) + def async_call_plugin(self, plugin, fn, args): """Call Async.host.call_plugin on a background thread.""" return tpool.execute(self._unwrap_plugin_exceptions, @@ -222,7 +231,6 @@ class XenAPISession(object): def wait_for_task(self, id, task): """Return the result of the given task. The task is polled until it completes.""" - done = event.Event() loop = utils.LoopingCall(self._poll_task, id, task, done) loop.start(FLAGS.xenapi_task_poll_interval, now=True) @@ -235,7 +243,7 @@ class XenAPISession(object): return self.XenAPI.Session(url) def _poll_task(self, id, task, done): - """Poll the given XenAPI task, and fire the given Deferred if we + """Poll the given XenAPI task, and fire the given action if we get a result.""" try: name = self._session.xenapi.task.get_name_label(task) @@ -290,7 +298,7 @@ class XenAPISession(object): def _parse_xmlrpc_value(val): - """Parse the given value as if it were an XML-RPC value. This is + """Parse the given value as if it were an XML-RPC value. This is sometimes used as the format for the task.result field.""" if not val: return val diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py new file mode 100755 index 000000000000..3b9d65b856f7 --- /dev/null +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python + +# Copyright (c) 2010 Citrix Systems, Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# +# XenAPI plugin for reading/writing information to xenstore +# + +try: + import json +except ImportError: + import simplejson as json +import subprocess + +import XenAPIPlugin + +from pluginlib_nova import * +configure_logging("xenstore") + + +def read_record(self, arg_dict): + """Returns the value stored at the given path for the given dom_id. + These must be encoded as key/value pairs in arg_dict. You can + optinally include a key 'ignore_missing_path'; if this is present + and boolean True, attempting to read a non-existent path will return + the string 'None' instead of raising an exception. + """ + cmd = "xenstore-read /local/domain/%(dom_id)s/%(path)s" % arg_dict + try: + return _run_command(cmd).rstrip("\n") + except PluginError, e: + if arg_dict.get("ignore_missing_path", False): + cmd = "xenstore-exists /local/domain/%(dom_id)s/%(path)s; echo $?" % arg_dict + ret = _run_command(cmd).strip() + # If the path exists, the cmd should return "0" + if ret != "0": + # No such path, so ignore the error + return "None" + # Either we shouldn't ignore path errors, or another + # error was hit. Re-raise. + raise + +def write_record(self, arg_dict): + """Writes to xenstore at the specified path. If there is information + already stored in that location, it is overwritten. As in read_record, + the dom_id and path must be specified in the arg_dict; additionally, + you must specify a 'value' key, whose value must be a string. Typically, + you can json-ify more complex values and store the json output. + """ + cmd = "xenstore-write /local/domain/%(dom_id)s/%(path)s '%(value)s'" % arg_dict + _run_command(cmd) + return arg_dict["value"] + +def list_records(self, arg_dict): + """Returns all the stored data at or below the given path for the + given dom_id. The data is returned as a json-ified dict, with the + path as the key and the stored value as the value. If the path + doesn't exist, an empty dict is returned. + """ + cmd = "xenstore-ls /local/domain/%(dom_id)s/%(path)s" % arg_dict + cmd = cmd.rstrip("/") + try: + recs = _run_command(cmd) + except PluginError, e: + if "No such file or directory" in "%s" % e: + # Path doesn't exist. + return json.dumps({}) + raise + base_path = arg_dict["path"] + paths = _paths_from_ls(recs) + ret = {} + for path in paths: + if base_path: + arg_dict["path"] = "%s/%s" % (base_path, path) + else: + arg_dict["path"] = path + rec = read_record(self, arg_dict) + try: + val = json.loads(rec) + except ValueError: + val = rec + ret[path] = val + return json.dumps(ret) + +def delete_record(self, arg_dict): + """Just like it sounds: it removes the record for the specified + VM and the specified path from xenstore. + """ + cmd = "xenstore-rm /local/domain/%(dom_id)s/%(path)s" % arg_dict + return _run_command(cmd) + +def _paths_from_ls(recs): + """The xenstore-ls command returns a listing that isn't terribly + useful. This method cleans that up into a dict with each path + as the key, and the associated string as the value. + """ + ret = {} + last_nm = "" + level = 0 + path = [] + ret = [] + for ln in recs.splitlines(): + nm, val = ln.rstrip().split(" = ") + barename = nm.lstrip() + this_level = len(nm) - len(barename) + if this_level == 0: + ret.append(barename) + level = 0 + path = [] + elif this_level == level: + # child of same parent + ret.append("%s/%s" % ("/".join(path), barename)) + elif this_level > level: + path.append(last_nm) + ret.append("%s/%s" % ("/".join(path), barename)) + level = this_level + elif this_level < level: + path = path[:this_level] + ret.append("%s/%s" % ("/".join(path), barename)) + level = this_level + last_nm = barename + return ret + +def _run_command(cmd): + """Abstracts out the basics of issuing system commands. If the command + returns anything in stderr, a PluginError is raised with that information. + Otherwise, the output from stdout is returned. + """ + pipe = subprocess.PIPE + proc = subprocess.Popen([cmd], shell=True, stdin=pipe, stdout=pipe, stderr=pipe, close_fds=True) + proc.wait() + err = proc.stderr.read() + if err: + raise PluginError(err) + return proc.stdout.read() + + +if __name__ == "__main__": + XenAPIPlugin.dispatch( + {"read_record": read_record, + "write_record": write_record, + "list_records": list_records, + "delete_record": delete_record}) From 7f27f62e41fd655049574975bd3bf6c5b00e9ccf Mon Sep 17 00:00:00 2001 From: Ryan Lucio Date: Thu, 30 Dec 2010 16:21:11 -0800 Subject: [PATCH 071/147] Converted the pool_recycle setting to be a flag with a default of 3600 seconds --- nova/db/sqlalchemy/session.py | 4 +++- nova/flags.py | 3 +++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index 5f31743cd143..ca23e93d83b4 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -36,7 +36,9 @@ def get_session(autocommit=True, expire_on_commit=False): global _MAKER if not _MAKER: if not _ENGINE: - _ENGINE = create_engine(FLAGS.sql_connection, pool_recycle=3600, echo=False) + _ENGINE = create_engine(FLAGS.sql_connection, + pool_recycle=FLAGS.sql_idle_timeout, + echo=False) _MAKER = (sessionmaker(bind=_ENGINE, autocommit=autocommit, expire_on_commit=expire_on_commit)) diff --git a/nova/flags.py b/nova/flags.py index 76a98d35a859..338dcbf460ac 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -260,6 +260,9 @@ DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'), DEFINE_string('sql_connection', 'sqlite:///$state_path/nova.sqlite', 'connection string for sql database') +DEFINE_string('sql_idle_timeout', + '3600', + 'timeout for idle sql database connections') DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager', 'Manager for compute') From b5f5ec40bbc6b898ac73444e9a0f0372c92fc12a Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Thu, 30 Dec 2010 18:25:30 -0600 Subject: [PATCH 072/147] removed some debugging code left in previous push. --- nova/virt/xenapi/vmops.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 5c9455db3c28..c46a881a894a 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -304,10 +304,6 @@ class VMOps(object): ret = self._make_xenstore_call('read_record', vm, path, {'ignore_missing_path': 'True'}) except self.XenAPI.Failure, e: - print "XENERR", e - return None - except StandardError, e: - print "ERR", type(e), e, e.msg return None try: return json.loads(ret) From a89730699db2add131028ccda031ead3526b2c4f Mon Sep 17 00:00:00 2001 From: Devin Carlen Date: Thu, 30 Dec 2010 17:50:32 -0800 Subject: [PATCH 073/147] Removed dependencies on flags.py from adminclient --- nova/adminclient.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/nova/adminclient.py b/nova/adminclient.py index 6ae9f0c0f409..b2609c8c40e2 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -23,12 +23,9 @@ import base64 import boto import httplib -from nova import flags from boto.ec2.regioninfo import RegionInfo -FLAGS = flags.FLAGS - DEFAULT_CLC_URL = 'http://127.0.0.1:8773' DEFAULT_REGION = 'nova' @@ -199,8 +196,8 @@ class NovaAdminClient(object): self, clc_url=DEFAULT_CLC_URL, region=DEFAULT_REGION, - access_key=FLAGS.aws_access_key_id, - secret_key=FLAGS.aws_secret_access_key, + access_key=None, + secret_key=None, **kwargs): parts = self.split_clc_url(clc_url) From b097d5a247f95fac180c3270cb1f613edfa46523 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Fri, 31 Dec 2010 04:44:45 -0600 Subject: [PATCH 074/147] Corrected the sloppy import in the xenstore plugin that was copied from other plugins. --- .../xenserver/xenapi/etc/xapi.d/plugins/xenstore.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py index 3b9d65b856f7..e0a1251707ed 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py @@ -29,8 +29,8 @@ import subprocess import XenAPIPlugin -from pluginlib_nova import * -configure_logging("xenstore") +import pluginlib_nova as pluginlib +pluginlib.configure_logging("xenstore") def read_record(self, arg_dict): @@ -43,7 +43,7 @@ def read_record(self, arg_dict): cmd = "xenstore-read /local/domain/%(dom_id)s/%(path)s" % arg_dict try: return _run_command(cmd).rstrip("\n") - except PluginError, e: + except pluginlib.PluginError, e: if arg_dict.get("ignore_missing_path", False): cmd = "xenstore-exists /local/domain/%(dom_id)s/%(path)s; echo $?" % arg_dict ret = _run_command(cmd).strip() @@ -76,7 +76,7 @@ def list_records(self, arg_dict): cmd = cmd.rstrip("/") try: recs = _run_command(cmd) - except PluginError, e: + except pluginlib.PluginError, e: if "No such file or directory" in "%s" % e: # Path doesn't exist. return json.dumps({}) @@ -146,7 +146,7 @@ def _run_command(cmd): proc.wait() err = proc.stderr.read() if err: - raise PluginError(err) + raise pluginlib.PluginError(err) return proc.stdout.read() From f0e4bed6f4bf4ab3835ecd3e54eb9d7ac21dd5f1 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Fri, 31 Dec 2010 07:04:40 -0600 Subject: [PATCH 075/147] fixed pep8 issues --- nova/virt/xenapi/vmops.py | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index c46a881a894a..20f5e0215c33 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -285,7 +285,7 @@ class VMOps(object): def list_from_xenstore(self, vm, path): """Runs the xenstore-ls command to get a listing of all records from 'path' downward. Returns a dict with the sub-paths as keys, - and the value stored in those paths as values. If nothing is + and the value stored in those paths as values. If nothing is found at that path, returns None. """ ret = self._make_xenstore_call('list_records', vm, path) @@ -319,7 +319,8 @@ class VMOps(object): at the specified location. A XenAPIPlugin.PluginError will be raised if any error is encountered in the write process. """ - return self._make_xenstore_call('write_record', vm, path, {'value': json.dumps(value)}) + return self._make_xenstore_call('write_record', vm, path, + {'value': json.dumps(value)}) def clear_xenstore(self, vm, path): """Deletes the VM's xenstore record for the specified path. @@ -329,8 +330,8 @@ class VMOps(object): def _make_xenstore_call(self, method, vm, path, addl_args={}): """Handles calls to the xenstore xenapi plugin.""" - return self._make_plugin_call('xenstore.py', method=method, vm=vm, path=path, - addl_args=addl_args) + return self._make_plugin_call('xenstore.py', method=method, vm=vm, + path=path, addl_args=addl_args) def _make_plugin_call(self, plugin, method, vm, path, addl_args={}): """Abstracts out the process of calling a method of a xenapi plugin. @@ -375,7 +376,7 @@ class VMOps(object): keys = [key_or_keys] else: keys = key_or_keys - keys.sort(lambda x,y: cmp(y.count('/'), x.count('/'))) + keys.sort(lambda x, y: cmp(y.count('/'), x.count('/'))) for key in keys: if path: keypath = "%s/%s" % (path, key) @@ -383,9 +384,8 @@ class VMOps(object): keypath = key self._make_xenstore_call('delete_record', vm, keypath) - ######################################################################## - ###### The following methods interact with the xenstore parameter + ###### The following methods interact with the xenstore parameter ###### record, not the live xenstore. They were created before I ###### knew the difference, and are left in here in case they prove ###### to be useful. They all have '_param' added to their method @@ -403,13 +403,14 @@ class VMOps(object): return data def read_from_param_xenstore(self, instance_or_vm, keys=None): - """Returns the xenstore parameter record data for the specified VM instance - as a dict. Accepts an optional key or list of keys; if a value for 'keys' - is passed, the returned dict is filtered to only return the values - for those keys. + """Returns the xenstore parameter record data for the specified VM + instance as a dict. Accepts an optional key or list of keys; if a + value for 'keys' is passed, the returned dict is filtered to only + return the values for those keys. """ vm = self._get_vm_opaque_ref(instance_or_vm) - data = self._session.call_xenapi_request('VM.get_xenstore_data', (vm, )) + data = self._session.call_xenapi_request('VM.get_xenstore_data', + (vm, )) ret = {} if keys is None: keys = data.keys() @@ -452,7 +453,8 @@ class VMOps(object): else: keys = key_or_keys for key in keys: - self._session.call_xenapi_request('VM.remove_from_xenstore_data', (vm, key)) + self._session.call_xenapi_request('VM.remove_from_xenstore_data', + (vm, key)) def clear_param_xenstore(self, instance_or_vm): """Removes all data from the xenstore parameter record for this VM.""" From 933531440767f0696e14a73069448d0c3f5ae24e Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Fri, 31 Dec 2010 08:53:01 -0600 Subject: [PATCH 076/147] Added OpenStack's copyright to the xenstore plugin. --- plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py index e0a1251707ed..71ed82d6270f 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py @@ -1,6 +1,7 @@ #!/usr/bin/env python # Copyright (c) 2010 Citrix Systems, Inc. +# Copyright 2010 OpenStack LLC. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. From 090a2aa8c5b1e833617adfa375605158fa4e191d Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 3 Jan 2011 10:08:32 +0100 Subject: [PATCH 077/147] Remove references to nova-core/ppa and openstack/ppa PPA's. --- doc/source/adminguide/distros/ubuntu.10.04.rst | 13 ++++++------- doc/source/adminguide/getting.started.rst | 6 +++--- doc/source/adminguide/multi.node.install.rst | 4 ++-- 3 files changed, 11 insertions(+), 12 deletions(-) diff --git a/doc/source/adminguide/distros/ubuntu.10.04.rst b/doc/source/adminguide/distros/ubuntu.10.04.rst index ce368fab8ddc..9d856458a62e 100644 --- a/doc/source/adminguide/distros/ubuntu.10.04.rst +++ b/doc/source/adminguide/distros/ubuntu.10.04.rst @@ -16,13 +16,13 @@ Here's a script you can use to install (and then run) Nova on Ubuntu or Debian ( Step 2: Install dependencies ---------------------------- -Nova requires rabbitmq for messaging and optionally you can use redis for storing state, so install these first. +Nova requires rabbitmq for messaging, so install that first. *Note:* You must have sudo installed to run these commands as shown here. :: - sudo apt-get install rabbitmq-server redis-server + sudo apt-get install rabbitmq-server You'll see messages starting with "Reading package lists... Done" and you must confirm by typing Y that you want to continue. @@ -31,11 +31,10 @@ If you're running on Ubuntu 10.04, you'll need to install Twisted and python-gfl :: - sudo apt-get install python-twisted - - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 95C71FE2 - sudo sh -c 'echo "deb http://ppa.launchpad.net/openstack/openstack-ppa/ubuntu lucid main" > /etc/apt/sources.list.d/openstackppa.list' - sudo apt-get update && sudo apt-get install python-gflags + sudo add-get install python-software-properties + sudo add-apt-repository ppa:nova-core/trunk + sudo apt-get update + sudo apt-get install python-twisted python-gflags Once you've done this, continue at Step 3 here: :doc:`../single.node.install` diff --git a/doc/source/adminguide/getting.started.rst b/doc/source/adminguide/getting.started.rst index 3e8073606920..0cadeb45eb02 100644 --- a/doc/source/adminguide/getting.started.rst +++ b/doc/source/adminguide/getting.started.rst @@ -76,11 +76,11 @@ External unix tools that are required: * aoetools and vblade-persist (if you use aoe-volumes) Nova uses cutting-edge versions of many packages. There are ubuntu packages in -the nova-core ppa. You can use add this ppa to your sources list on an ubuntu -machine with the following commands:: +the nova-core trunk ppa. You can use add this ppa to your sources list on an +ubuntu machine with the following commands:: sudo apt-get install -y python-software-properties - sudo add-apt-repository ppa:nova-core/ppa + sudo add-apt-repository ppa:nova-core/trunk Recommended ----------- diff --git a/doc/source/adminguide/multi.node.install.rst b/doc/source/adminguide/multi.node.install.rst index fcb76c5e5383..7c4a69ccda56 100644 --- a/doc/source/adminguide/multi.node.install.rst +++ b/doc/source/adminguide/multi.node.install.rst @@ -46,12 +46,12 @@ Assumptions Step 1 Use apt-get to get the latest code ----------------------------------------- -1. Setup Nova PPA with https://launchpad.net/~nova-core/+archive/ppa. +1. Setup Nova PPA with https://launchpad.net/~nova-core/+archive/trunk. :: sudo apt-get install python-software-properties - sudo add-apt-repository ppa:nova-core/ppa + sudo add-apt-repository ppa:nova-core/trunk 2. Run update. From 2b26cbfd8dc5f03026dfb03eef9cd3a443edab86 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 3 Jan 2011 11:39:02 +0100 Subject: [PATCH 078/147] Fix a merge artifact. --- nova/virt/libvirt_conn.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 30ac11bdd59e..4923537930ea 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -927,18 +927,11 @@ class NWFilterFirewall(FirewallDriver): ctxt = context.get_admin_context() if FLAGS.allow_project_net_traffic: - network_ref = db.project_get_network(ctxt, instance['project_id']) - net, mask = _get_net_and_mask(network_ref['cidr']) - - project_filter = self.nova_project_filter(instance['project_id'], - net, mask) - self._define_filter(project_filter) - - instance_secgroup_filter_children += [('nova-project-%s' % - instance['project_id'])] + instance_filter_children += ['nova-project'] for security_group in db.security_group_get_by_instance(ctxt, instance['id']): + self.refresh_security_group_rules(security_group['id']) instance_secgroup_filter_children += [('nova-secgroup-%s' % From 4102913e33093e984aa5cbaae6666bb4c6d4312b Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 3 Jan 2011 11:39:31 +0100 Subject: [PATCH 079/147] Adjust test suite to the split between base firewall rules provided by nwfilter and the security group filtering. --- nova/tests/test_virt.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 0e2644eff775..2f418bd5d644 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -259,7 +259,8 @@ class IptablesFirewallTestCase(test.TestCase): 'project_id': 'fake'}) ip = '10.11.12.13' - network_ref = self.network.get_network(self.context) + network_ref = db.project_get_network(self.context, + 'fake') fixed_ip = {'address': ip, 'network_id': network_ref['id']} @@ -428,6 +429,7 @@ class NWFilterTestCase(test.TestCase): self.security_group.id) instance = db.instance_get(self.context, inst_id) + self.fw.setup_basic_filtering(instance) self.fw.prepare_instance_filter(instance) _ensure_all_called() self.teardown_security_group() From 2281c6b6b27777a7c9bfa75acf7679dd76fcfb4d Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 3 Jan 2011 13:06:14 +0100 Subject: [PATCH 080/147] Stub out init_host in libvirt driver. --- nova/virt/libvirt_conn.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 4923537930ea..b65041caaa99 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -135,6 +135,9 @@ class LibvirtConnection(object): else: self.firewall_driver = utils.import_object(FLAGS.firewall_driver) + def init_host(self): + pass + def _get_connection(self): if not self._wrapped_conn or not self._test_connection(): logging.debug(_('Connecting to libvirt: %s') % self.libvirt_uri) From bb14565d4a21084b54a4fad3c395b31b88f41680 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 3 Jan 2011 13:16:02 +0100 Subject: [PATCH 081/147] Move a closing bracket. --- nova/virt/libvirt_conn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index b65041caaa99..e1ab2aca7eae 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -366,7 +366,7 @@ class LibvirtConnection(object): self.firewall_driver.prepare_instance_filter(instance) self._create_image(instance, xml) self._conn.createXML(xml, 0) - logging.debug(_("instance %s: is running", instance['name'])) + logging.debug(_("instance %s: is running"), instance['name']) self.firewall_driver.apply_instance_filter(instance) timer = utils.LoopingCall(f=None) From 804169715c020e4c2387a1bb8aa565547c4a6a42 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 3 Jan 2011 13:54:54 +0100 Subject: [PATCH 082/147] Don't lie about which is the default firewall implementation. --- nova/virt/libvirt_conn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index e1ab2aca7eae..8a89b162b1d5 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -87,7 +87,7 @@ flags.DEFINE_bool('allow_project_net_traffic', 'Whether to allow in project network traffic') flags.DEFINE_string('firewall_driver', 'nova.virt.libvirt_conn.IptablesFirewallDriver', - 'Firewall driver (defaults to nwfilter)') + 'Firewall driver (defaults to iptables)') def get_connection(read_only): From d1118830c01267082c1371ef2faad1057e7a811e Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 3 Jan 2011 13:55:44 +0100 Subject: [PATCH 083/147] Stop returning generators in the refresh_security_group_{rules,members} methods. --- nova/compute/manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 6615ad65b08c..23523709145f 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -103,13 +103,13 @@ class ComputeManager(manager.Manager): def refresh_security_group_rules(self, context, security_group_id, **_kwargs): """This call passes straight through to the virtualization driver.""" - yield self.driver.refresh_security_group_rules(security_group_id) + return self.driver.refresh_security_group_rules(security_group_id) @exception.wrap_exception def refresh_security_group_members(self, context, security_group_id, **_kwargs): """This call passes straight through to the virtualization driver.""" - yield self.driver.refresh_security_group_members(security_group_id) + return self.driver.refresh_security_group_members(security_group_id) @exception.wrap_exception def run_instance(self, context, instance_id, **_kwargs): From 5fd9ff898bf372f26bac3c0530521ba7abb7f26c Mon Sep 17 00:00:00 2001 From: Ryan Lucio Date: Mon, 3 Jan 2011 10:55:52 -0800 Subject: [PATCH 084/147] removed extra whitespace chars at the end of the changed lines --- nova/db/sqlalchemy/session.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index ca23e93d83b4..c3876c02a3cb 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -36,8 +36,8 @@ def get_session(autocommit=True, expire_on_commit=False): global _MAKER if not _MAKER: if not _ENGINE: - _ENGINE = create_engine(FLAGS.sql_connection, - pool_recycle=FLAGS.sql_idle_timeout, + _ENGINE = create_engine(FLAGS.sql_connection, + pool_recycle=FLAGS.sql_idle_timeout, echo=False) _MAKER = (sessionmaker(bind=_ENGINE, autocommit=autocommit, From a36b38d78ef887e23451c1dd22fdde8615c40b08 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Mon, 3 Jan 2011 11:19:36 -0800 Subject: [PATCH 085/147] Removed compute dependency in quota.py. --- nova/compute/__init__.py | 4 ++-- nova/quota.py | 8 +++----- nova/tests/test_quota.py | 6 +++--- 3 files changed, 8 insertions(+), 10 deletions(-) diff --git a/nova/compute/__init__.py b/nova/compute/__init__.py index f096dab63d62..7d797282abbb 100644 --- a/nova/compute/__init__.py +++ b/nova/compute/__init__.py @@ -83,8 +83,9 @@ class API(base.Base): """Create the number of instances requested if quota and other arguments check out ok.""" + type_data = instance_types.INSTANCE_TYPES[instance_type] num_instances = quota.allowed_instances(context, max_count, - instance_type) + type_data['vcpus']) if num_instances < min_count: logging.warn("Quota exceeeded for %s, tried to run %s instances", context.project_id, min_count) @@ -127,7 +128,6 @@ class API(base.Base): key_pair = db.key_pair_get(context, context.user_id, key_name) key_data = key_pair['public_key'] - type_data = instance_types.INSTANCE_TYPES[instance_type] base_options = { 'reservation_id': utils.generate_uid('r'), 'image_id': image_id, diff --git a/nova/quota.py b/nova/quota.py index f6ca9f77c733..12dbfcf43862 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -22,7 +22,6 @@ Quotas for instances, volumes, and floating ips from nova import db from nova import exception from nova import flags -from nova.compute import instance_types FLAGS = flags.FLAGS @@ -54,7 +53,7 @@ def get_quota(context, project_id): return rval -def allowed_instances(context, num_instances, instance_type): +def allowed_instances(context, num_instances, cores_per_instance): """Check quota and return min(num_instances, allowed_instances)""" project_id = context.project_id context = context.elevated() @@ -63,10 +62,9 @@ def allowed_instances(context, num_instances, instance_type): quota = get_quota(context, project_id) allowed_instances = quota['instances'] - used_instances allowed_cores = quota['cores'] - used_cores - type_cores = instance_types.INSTANCE_TYPES[instance_type]['vcpus'] - num_cores = num_instances * type_cores + num_cores = num_instances * cores_per_instance allowed_instances = min(allowed_instances, - int(allowed_cores // type_cores)) + int(allowed_cores // cores_per_instance)) return min(num_instances, allowed_instances) diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 8cf2a5e54652..c158187746b3 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -78,14 +78,14 @@ class QuotaTestCase(test.TestCase): def test_quota_overrides(self): """Make sure overriding a projects quotas works""" - num_instances = quota.allowed_instances(self.context, 100, 'm1.small') + num_instances = quota.allowed_instances(self.context, 100, 1) self.assertEqual(num_instances, 2) db.quota_create(self.context, {'project_id': self.project.id, 'instances': 10}) - num_instances = quota.allowed_instances(self.context, 100, 'm1.small') + num_instances = quota.allowed_instances(self.context, 100, 1) self.assertEqual(num_instances, 4) db.quota_update(self.context, self.project.id, {'cores': 100}) - num_instances = quota.allowed_instances(self.context, 100, 'm1.small') + num_instances = quota.allowed_instances(self.context, 100, 1) self.assertEqual(num_instances, 10) db.quota_destroy(self.context, self.project.id) From 97cfb850033597eebe6be88266cd0e1f457ec9bc Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Mon, 3 Jan 2011 11:37:07 -0800 Subject: [PATCH 086/147] Merge from trunk: process replaced with util --- nova/virt/images.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/nova/virt/images.py b/nova/virt/images.py index 69838ac5b42b..0ec578b06914 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -30,7 +30,7 @@ import urllib2 import urlparse from nova import flags -from nova import process +from nova import utils from nova.auth import manager from nova.auth import signer from nova.objectstore import image @@ -60,9 +60,7 @@ def _fetch_image_no_curl(url, path, headers): while 1: data = urlfile.read(chunk) if not data: - break - f.write(data) - + break f.write(data) urlopened = urllib2.urlopen(request) urlretrieve(urlopened, path) logging.debug("Finished retreving %s -- placed in %s", url, path) @@ -73,7 +71,7 @@ def _fetch_s3_image(image, path, user, project): # This should probably move somewhere else, like e.g. a download_as # method on User objects and at the same time get rewritten to use - # twisted web client. + # a web client. headers = {} headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) @@ -90,9 +88,9 @@ def _fetch_s3_image(image, path, user, project): cmd = ['/usr/bin/curl', '--fail', '--silent', url] for (k, v) in headers.iteritems(): cmd += ['-H', '%s: %s' % (k, v)] - - cmd += ['-o', path] - return process.SharedPool().execute(executable=cmd[0], args=cmd[1:]) + cmd += ['-o', path] + cmd_out = ' '.join(cmd) + return utils.execute(cmd_out) def _fetch_local_image(image, path, user, project): @@ -100,7 +98,7 @@ def _fetch_local_image(image, path, user, project): if sys.platform.startswith('win'): return shutil.copy(source, path) else: - return process.simple_execute('cp %s %s' % (source, path)) + return utils.execute('cp %s %s' % (source, path)) def _image_path(path): From 6a8f011789ddad57726ce55962b51a04a69fe527 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Mon, 3 Jan 2011 16:08:52 -0600 Subject: [PATCH 087/147] Fixes LP688545 --- nova/compute/manager.py | 1 + nova/db/sqlalchemy/__init__.py | 21 ++++++++++++++++++++- nova/flags.py | 2 ++ nova/tests/test_xenapi.py | 19 ++++++++++--------- 4 files changed, 33 insertions(+), 10 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index c9aff75acdf9..6e8f34347825 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -327,6 +327,7 @@ class ComputeManager(manager.Manager): instance_ref["internal_id"]) return self.driver.get_diagnostics(instance_ref) + @exception.wrap_exception def suspend_instance(self, context, instance_id): """suspend the instance with instance_id""" context = context.elevated() diff --git a/nova/db/sqlalchemy/__init__.py b/nova/db/sqlalchemy/__init__.py index 3288ebd20f5e..22aa1cfe6c8b 100644 --- a/nova/db/sqlalchemy/__init__.py +++ b/nova/db/sqlalchemy/__init__.py @@ -19,6 +19,25 @@ """ SQLAlchemy database backend """ +import logging +import time + +from sqlalchemy.exc import OperationalError + +from nova import flags from nova.db.sqlalchemy import models -models.register_models() + +FLAGS = flags.FLAGS + + +for i in xrange(FLAGS.sql_max_retries): + if i > 0: + time.sleep(FLAGS.sql_retry_interval) + + try: + models.register_models() + break + except OperationalError: + logging.exception(_("Data store is unreachable." + " Trying again in %d seconds.") % FLAGS.sql_retry_interval) diff --git a/nova/flags.py b/nova/flags.py index e872ba217403..4b73349270a3 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -266,6 +266,8 @@ DEFINE_string('sql_connection', DEFINE_string('sql_idle_timeout', '3600', 'timeout for idle sql database connections') +DEFINE_integer('sql_max_retries', 12, 'sql connection attempts') +DEFINE_integer('sql_retry_interval', 10, 'sql connection retry interval') DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager', 'Manager for compute') diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index c95a53af3c65..33571dad09be 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -250,15 +250,16 @@ class XenAPIVMTestCase(test.TestCase): def _create_instance(self): """Creates and spawns a test instance""" - values = {'name': 1, 'id': 1, - 'project_id': self.project.id, - 'user_id': self.user.id, - 'image_id': 1, - 'kernel_id': 2, - 'ramdisk_id': 3, - 'instance_type': 'm1.large', - 'mac_address': 'aa:bb:cc:dd:ee:ff' - } + values = { + 'name': 1, + 'id': 1, + 'project_id': self.project.id, + 'user_id': self.user.id, + 'image_id': 1, + 'kernel_id': 2, + 'ramdisk_id': 3, + 'instance_type': 'm1.large', + 'mac_address': 'aa:bb:cc:dd:ee:ff'} instance = db.instance_create(values) self.conn.spawn(instance) return instance From c7305af78049f94dedcbb55480b91a3c6d843b9f Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 4 Jan 2011 00:23:35 -0500 Subject: [PATCH 088/147] Apply logging changes as a giant patch to work around the cloudpipe delete + add issue in the original patch. --- bin/nova-dhcpbridge | 20 +- bin/nova-instancemonitor | 7 +- bin/nova-logspool | 156 ++++++++++++++ bin/nova-manage | 19 +- bin/nova-spoolsentry | 97 +++++++++ nova/api/ec2/__init__.py | 92 ++++++--- nova/api/ec2/admin.py | 38 +++- nova/api/ec2/apirequest.py | 12 +- nova/api/ec2/cloud.py | 45 +++- nova/api/ec2/metadatarequesthandler.py | 7 +- nova/api/openstack/__init__.py | 13 +- nova/auth/ldapdriver.py | 7 +- nova/auth/manager.py | 62 +++++- nova/auth/signer.py | 15 +- nova/compute/api.py | 33 +-- nova/compute/disk.py | 11 +- nova/compute/manager.py | 74 ++++--- nova/compute/monitor.py | 39 ++-- nova/crypto.py | 5 +- nova/exception.py | 8 +- nova/fakerabbit.py | 20 +- nova/flags.py | 12 +- nova/image/glance.py | 25 +-- nova/log.py | 261 ++++++++++++++++++++++++ nova/network/linux_net.py | 19 +- nova/network/manager.py | 16 +- nova/objectstore/handler.py | 68 ++++-- nova/rpc.py | 26 ++- nova/scheduler/manager.py | 5 +- nova/service.py | 19 +- nova/tests/api/openstack/test_images.py | 1 - nova/tests/objectstore_unittest.py | 2 - nova/tests/test_access.py | 1 - nova/tests/test_auth.py | 9 +- nova/tests/test_cloud.py | 18 +- nova/tests/test_compute.py | 8 +- nova/tests/test_log.py | 107 ++++++++++ nova/tests/test_network.py | 9 +- nova/tests/test_quota.py | 4 - nova/tests/test_rpc.py | 7 +- nova/tests/test_volume.py | 6 +- nova/twistd.py | 25 +-- nova/utils.py | 17 +- nova/virt/connection.py | 5 +- nova/virt/libvirt_conn.py | 63 +++--- nova/virt/xenapi/fake.py | 24 ++- nova/virt/xenapi/vm_utils.py | 68 +++--- nova/virt/xenapi/vmops.py | 26 +-- nova/virt/xenapi/volume_utils.py | 44 ++-- nova/virt/xenapi/volumeops.py | 31 +-- nova/virt/xenapi_conn.py | 16 +- nova/volume/driver.py | 11 +- nova/volume/manager.py | 21 +- nova/wsgi.py | 8 +- setup.py | 2 + 55 files changed, 1282 insertions(+), 482 deletions(-) create mode 100644 bin/nova-logspool create mode 100644 bin/nova-spoolsentry create mode 100644 nova/log.py create mode 100644 nova/tests/test_log.py diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 828aba3d1bac..cecc1c80cdef 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -22,7 +22,6 @@ Handle lease database updates from DHCP servers. """ import gettext -import logging import os import sys @@ -39,6 +38,7 @@ gettext.install('nova', unicode=1) from nova import context from nova import db from nova import flags +from nova import log as logging from nova import rpc from nova import utils from nova.network import linux_net @@ -50,10 +50,15 @@ flags.DECLARE('num_networks', 'nova.network.manager') flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager') +LOG = logging.getLogger('nova-dhcpbridge') +if FLAGS.verbose: + LOG.setLevel(logging.DEBUG) + + def add_lease(mac, ip_address, _hostname, _interface): """Set the IP that was assigned by the DHCP server.""" if FLAGS.fake_rabbit: - logging.debug("leasing ip") + LOG.debug("leasing ip") network_manager = utils.import_object(FLAGS.network_manager) network_manager.lease_fixed_ip(context.get_admin_context(), mac, @@ -68,14 +73,14 @@ def add_lease(mac, ip_address, _hostname, _interface): def old_lease(mac, ip_address, hostname, interface): """Update just as add lease.""" - logging.debug("Adopted old lease or got a change of mac/hostname") + LOG.debug("Adopted old lease or got a change of mac/hostname") add_lease(mac, ip_address, hostname, interface) def del_lease(mac, ip_address, _hostname, _interface): """Called when a lease expires.""" if FLAGS.fake_rabbit: - logging.debug("releasing ip") + LOG.debug("releasing ip") network_manager = utils.import_object(FLAGS.network_manager) network_manager.release_fixed_ip(context.get_admin_context(), mac, @@ -100,6 +105,7 @@ def main(): flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile) utils.default_flagfile(flagfile) argv = FLAGS(sys.argv) + logging.basicConfig() interface = os.environ.get('DNSMASQ_INTERFACE', 'br0') if int(os.environ.get('TESTING', '0')): FLAGS.fake_rabbit = True @@ -117,9 +123,9 @@ def main(): mac = argv[2] ip = argv[3] hostname = argv[4] - logging.debug("Called %s for mac %s with ip %s and " - "hostname %s on interface %s", - action, mac, ip, hostname, interface) + LOG.debug("Called %s for mac %s with ip %s and " + "hostname %s on interface %s", + action, mac, ip, hostname, interface) globals()[action + '_lease'](mac, ip, hostname, interface) else: print init_leases(interface) diff --git a/bin/nova-instancemonitor b/bin/nova-instancemonitor index 5dac3ffe608d..17f573b9de6f 100755 --- a/bin/nova-instancemonitor +++ b/bin/nova-instancemonitor @@ -23,7 +23,6 @@ import gettext import os -import logging import sys from twisted.application import service @@ -37,19 +36,23 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): gettext.install('nova', unicode=1) +from nova import log as logging from nova import utils from nova import twistd from nova.compute import monitor +# TODO(todd): shouldn't this be done with flags? And what about verbose? logging.getLogger('boto').setLevel(logging.WARN) +LOG = logging.getLogger('nova-instancemonitor') + if __name__ == '__main__': utils.default_flagfile() twistd.serve(__file__) if __name__ == '__builtin__': - logging.warn('Starting instance monitor') + LOG.warn(_('Starting instance monitor')) # pylint: disable-msg=C0103 monitor = monitor.InstanceMonitor() diff --git a/bin/nova-logspool b/bin/nova-logspool new file mode 100644 index 000000000000..d5aef475660a --- /dev/null +++ b/bin/nova-logspool @@ -0,0 +1,156 @@ +#!/usr/bin/env python + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Tools for working with logs generated by nova components +""" + + +import json +import os +import re +import sys + + +class Request(object): + + def __init__(self): + self.time = "" + self.host = "" + self.logger = "" + self.message = "" + self.trace = "" + self.env = "" + self.request_id = "" + + def add_error_line(self, error_line): + self.time = " ".join(error_line.split(" ")[:3]) + self.host = error_line.split(" ")[3] + self.logger = error_line.split("(")[1].split(" ")[0] + self.request_id = error_line.split("[")[1].split(" ")[0] + error_lines = error_line.split("#012") + self.message = self.clean_log_line(error_lines.pop(0)) + self.trace = "\n".join([self.clean_trace(l) for l in error_lines]) + + def add_environment_line(self, env_line): + self.env = self.clean_env_line(env_line) + + def clean_log_line(self, line): + """Remove log format for time, level, etc: split after context""" + return line.split('] ')[-1] + + def clean_env_line(self, line): + """Also has an 'Environment: ' string in the message""" + return re.sub(r'^Environment: ', '', self.clean_log_line(line)) + + def clean_trace(self, line): + """trace has a different format, so split on TRACE:""" + return line.split('TRACE: ')[-1] + + def to_dict(self): + return {'traceback': self.trace, 'message': self.message, + 'host': self.host, 'env': self.env, 'logger': self.logger, + 'request_id': self.request_id} + +class LogReader(object): + def __init__(self, filename): + self.filename = filename + self._errors = {} + + def process(self, spooldir): + with open(self.filename) as f: + line = f.readline() + while len(line) > 0: + parts = line.split(" ") + level = (len(parts) < 6) or parts[5] + if level == 'ERROR': + self.handle_logged_error(line) + elif level == '[-]' and self.last_error: + # twisted stack trace line + clean_line = " ".join(line.split(" ")[6:]) + self.last_error.trace = self.last_error.trace + clean_line + else: + self.last_error = None + line = f.readline() + self.update_spool(spooldir) + + def handle_logged_error(self, line): + request_id = re.search(r' \[([A-Z0-9\-/]+)', line) + if not request_id: + raise Exception("Unable to parse request id from %s" % line) + request_id = request_id.group(1) + data = self._errors.get(request_id, Request()) + if self.is_env_line(line): + data.add_environment_line(line) + elif self.is_error_line(line): + data.add_error_line(line) + else: + # possibly error from twsited + data.add_error_line(line) + self.last_error = data + self._errors[request_id] = data + + def is_env_line(self, line): + return re.search('Environment: ', line) + + def is_error_line(self, line): + return re.search('raised', line) + + def update_spool(self, directory): + processed_dir = "%s/processed" % directory + self._ensure_dir_exists(processed_dir) + for rid, value in self._errors.iteritems(): + if not self.has_been_processed(processed_dir, rid): + with open("%s/%s" % (directory, rid), "w") as spool: + spool.write(json.dumps(value.to_dict())) + self.flush_old_processed_spool(processed_dir) + + def _ensure_dir_exists(self, d): + mkdir = False + try: + os.stat(d) + except: + mkdir = True + if mkdir: + os.mkdir(d) + + def has_been_processed(self, processed_dir, rid): + rv = False + try: + os.stat("%s/%s" % (processed_dir, rid)) + rv = True + except: + pass + return rv + + def flush_old_processed_spool(self, processed_dir): + keys = self._errors.keys() + procs = os.listdir(processed_dir) + for p in procs: + if p not in keys: + # log has rotated and the old error won't be seen again + os.unlink("%s/%s" % (processed_dir, p)) + +if __name__ == '__main__': + filename = '/var/log/nova.log' + spooldir = '/var/spool/nova' + if len(sys.argv) > 1: + filename = sys.argv[1] + if len(sys.argv) > 2: + spooldir = sys.argv[2] + LogReader(filename).process(spooldir) diff --git a/bin/nova-manage b/bin/nova-manage index 3416c1a52ee1..169ab5f628a6 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -55,8 +55,8 @@ import datetime import gettext -import logging import os +import re import sys import time @@ -77,6 +77,7 @@ from nova import crypto from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import quota from nova import utils from nova.auth import manager @@ -499,6 +500,15 @@ class ServiceCommands(object): db.service_update(ctxt, svc['id'], {'disabled': True}) +class LogCommands(object): + def request(self, request_id, logfile='/var/log/nova.log'): + """Show all fields in the log for the given request. Assumes you + haven't changed the log format too much. + ARGS: request_id [logfile]""" + lines = utils.execute("cat %s | grep '\[%s '" % (logfile, request_id)) + print re.sub('#012', "\n", "\n".join(lines)) + + CATEGORIES = [ ('user', UserCommands), ('project', ProjectCommands), @@ -507,7 +517,8 @@ CATEGORIES = [ ('vpn', VpnCommands), ('floating', FloatingIpCommands), ('network', NetworkCommands), - ('service', ServiceCommands)] + ('service', ServiceCommands), + ('log', LogCommands)] def lazy_match(name, key_value_tuples): @@ -546,9 +557,7 @@ def main(): utils.default_flagfile() argv = FLAGS(sys.argv) - if FLAGS.verbose: - logging.getLogger().setLevel(logging.DEBUG) - + logging._set_log_levels() script_name = argv.pop(0) if len(argv) < 1: print script_name + " category action []" diff --git a/bin/nova-spoolsentry b/bin/nova-spoolsentry new file mode 100644 index 000000000000..ab20268a9948 --- /dev/null +++ b/bin/nova-spoolsentry @@ -0,0 +1,97 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import base64 +import json +import logging +import os +import shutil +import sys +import urllib +import urllib2 +try: + import cPickle as pickle +except: + import pickle + + +class SpoolSentry(object): + def __init__(self, spool_dir, sentry_url, key=None): + self.spool_dir = spool_dir + self.sentry_url = sentry_url + self.key = key + + def process(self): + for fname in os.listdir(self.spool_dir): + if fname == "processed": + continue + try: + sourcefile = "%s/%s" % (self.spool_dir, fname) + with open(sourcefile) as f: + fdata = f.read() + data_from_json = json.loads(fdata) + data = self.build_data(data_from_json) + self.send_data(data) + destfile = "%s/processed/%s" % (self.spool_dir, fname) + shutil.move(sourcefile, destfile) + except: + logging.exception("Unable to upload record %s", fname) + raise + + def build_data(self, filejson): + env = {'SERVER_NAME': 'unknown', 'SERVER_PORT': '0000', + 'SCRIPT_NAME': '/unknown/', 'PATH_INFO': 'unknown'} + if filejson['env']: + env = json.loads(filejson['env']) + url = "http://%s:%s%s%s" % (env['SERVER_NAME'], env['SERVER_PORT'], + env['SCRIPT_NAME'], env['PATH_INFO']) + rv = {'logger': filejson['logger'], 'level': logging.ERROR, + 'server_name': filejson['host'], 'url': url, + 'message': filejson['message'], + 'traceback': filejson['traceback']} + rv['data'] = {} + if filejson['env']: + rv['data']['META'] = env + if filejson['request_id']: + rv['data']['request_id'] = filejson['request_id'] + return rv + + def send_data(self, data): + data = { + 'data': base64.b64encode(pickle.dumps(data).encode('zlib')), + 'key': self.key + } + req = urllib2.Request(self.sentry_url) + res = urllib2.urlopen(req, urllib.urlencode(data)) + if res.getcode() != 200: + raise Exception("Bad HTTP code: %s" % res.getcode()) + txt = res.read() + +if __name__ == '__main__': + sentryurl = 'http://127.0.0.1/sentry/store/' + key = '' + spooldir = '/var/spool/nova' + if len(sys.argv) > 1: + sentryurl = sys.argv[1] + if len(sys.argv) > 2: + key = sys.argv[2] + if len(sys.argv) > 3: + spooldir = sys.argv[3] + SpoolSentry(spooldir, sentryurl, key).process() diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index aa3bfaeb4520..4dd2e55cd3e4 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -20,7 +20,7 @@ Starting point for routing EC2 requests. """ -import logging +import datetime import routes import webob import webob.dec @@ -29,6 +29,7 @@ import webob.exc from nova import context from nova import exception from nova import flags +from nova import log as logging from nova import wsgi from nova.api.ec2 import apirequest from nova.api.ec2 import admin @@ -37,6 +38,7 @@ from nova.auth import manager FLAGS = flags.FLAGS +LOG = logging.getLogger("nova.api") flags.DEFINE_boolean('use_forwarded_for', False, 'Treat X-Forwarded-For as the canonical remote address. ' 'Only enable this if you have a sanitizing proxy.') @@ -52,10 +54,6 @@ flags.DEFINE_list('lockout_memcached_servers', None, 'Memcached servers or None for in process cache.') -_log = logging.getLogger("api") -_log.setLevel(logging.DEBUG) - - class API(wsgi.Middleware): """Routing for all EC2 API requests.""" @@ -64,6 +62,40 @@ class API(wsgi.Middleware): if FLAGS.use_lockout: self.application = Lockout(self.application) + @webob.dec.wsgify + def __call__(self, req): + rv = req.get_response(self.application) + self.log_request_completion(rv, req) + return rv + + def log_request_completion(self, response, request): + controller = request.environ.get('ec2.controller', None) + if controller: + controller = controller.__class__.__name__ + action = request.environ.get('ec2.action', None) + ctxt = request.environ.get('ec2.context', None) + seconds = 'X' + microseconds = 'X' + if ctxt: + delta = datetime.datetime.utcnow() - \ + ctxt.timestamp + seconds = delta.seconds + microseconds = delta.microseconds + LOG.info( + "%s.%ss %s %s %s %s:%s %s [%s] %s %s", + seconds, + microseconds, + request.remote_addr, + request.method, + request.path_info, + controller, + action, + response.status_int, + request.user_agent, + request.content_type, + response.content_type, + context=ctxt) + class Lockout(wsgi.Middleware): """Lockout for x minutes on y failed auths in a z minute period. @@ -107,14 +139,13 @@ class Lockout(wsgi.Middleware): # NOTE(vish): To use incr, failures has to be a string. self.mc.set(failures_key, '1', time=FLAGS.lockout_window * 60) elif failures >= FLAGS.lockout_attempts: - _log.warn('Access key %s has had %d failed authentications' - ' and will be locked out for %d minutes.' % - (access_key, failures, FLAGS.lockout_minutes)) + LOG.warn('Access key %s has had %d failed authentications' + ' and will be locked out for %d minutes.', + access_key, failures, FLAGS.lockout_minutes) self.mc.set(failures_key, str(failures), time=FLAGS.lockout_minutes * 60) return res - class Authenticate(wsgi.Middleware): """Authenticate an EC2 request and add 'ec2.context' to WSGI environ.""" @@ -142,8 +173,9 @@ class Authenticate(wsgi.Middleware): req.method, req.host, req.path) - except exception.Error, ex: - logging.debug(_("Authentication Failure: %s") % ex) + # Be explicit for what exceptions are 403, the rest bubble as 500 + except (exception.NotFound, exception.NotAuthorized) as ex: + LOG.audit(_("Authentication Failure: %s"), str(ex)) raise webob.exc.HTTPForbidden() # Authenticated! @@ -154,6 +186,8 @@ class Authenticate(wsgi.Middleware): project=project, remote_address=remote_address) req.environ['ec2.context'] = ctxt + LOG.audit(_('Authenticated Request For %s:%s)'), user.name, + project.name, context=req.environ['ec2.context']) return self.application @@ -189,9 +223,9 @@ class Router(wsgi.Middleware): except: raise webob.exc.HTTPBadRequest() - _log.debug(_('action: %s') % action) + LOG.debug(_('action: %s'), action) for key, value in args.items(): - _log.debug(_('arg: %s\t\tval: %s') % (key, value)) + LOG.debug(_('arg: %s\t\tval: %s'), key, value) # Success! req.environ['ec2.controller'] = controller @@ -263,6 +297,8 @@ class Authorizer(wsgi.Middleware): if self._matches_any_role(context, allowed_roles): return self.application else: + LOG.audit("Unauthorized request for controller=%s and action=%s", + controller_name, action, context=context) raise webob.exc.HTTPUnauthorized() def _matches_any_role(self, context, roles): @@ -297,15 +333,24 @@ class Executor(wsgi.Application): result = None try: result = api_request.send(context, **args) + except exception.NotFound as ex: + LOG.info(_('NotFound raised: %s'), str(ex), context=context) + return self._error(req, context, type(ex).__name__, str(ex)) except exception.ApiError as ex: - + LOG.exception('ApiError raised', context=context) if ex.code: - return self._error(req, ex.code, ex.message) + return self._error(req, context, ex.code, str(ex)) else: - return self._error(req, type(ex).__name__, ex.message) - # TODO(vish): do something more useful with unknown exceptions + return self._error(req, context, type(ex).__name__, str(ex)) except Exception as ex: - return self._error(req, type(ex).__name__, str(ex)) + extra = {'environment': req.environ} + LOG.exception(_('Unexpected error raised: %s'), str(ex), + extra=extra, context=context) + return self._error(req, + context, + 'UnknownError', + _('An unknown error has occurred. ' + 'Please try your request again.')) else: resp = webob.Response() resp.status = 200 @@ -313,15 +358,16 @@ class Executor(wsgi.Application): resp.body = str(result) return resp - def _error(self, req, code, message): - logging.error("%s: %s", code, message) + def _error(self, req, context, code, message): + LOG.error("%s: %s", code, message, context=context) resp = webob.Response() resp.status = 400 resp.headers['Content-Type'] = 'text/xml' resp.body = str('\n' - '%s' - '%s' - '?' % (code, message)) + '%s' + '%s' + '%s' % + (code, message, context.request_id)) return resp diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index fac01369eca1..758b612e80fb 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -24,9 +24,13 @@ import base64 from nova import db from nova import exception +from nova import log as logging from nova.auth import manager +LOG = logging.getLogger('nova.api.ec2.admin') + + def user_dict(user, base64_file=None): """Convert the user object to a result dict""" if user: @@ -75,17 +79,18 @@ class AdminController(object): return {'userSet': [user_dict(u) for u in manager.AuthManager().get_users()]} - def register_user(self, _context, name, **_kwargs): + def register_user(self, context, name, **_kwargs): """Creates a new user, and returns generated credentials.""" + LOG.audit(_("Creating new user: %s"), name, context=context) return user_dict(manager.AuthManager().create_user(name)) - def deregister_user(self, _context, name, **_kwargs): + def deregister_user(self, context, name, **_kwargs): """Deletes a single user (NOT undoable.) Should throw an exception if the user has instances, volumes, or buckets remaining. """ + LOG.audit(_("Deleting user: %s"), name, context=context) manager.AuthManager().delete_user(name) - return True def describe_roles(self, context, project_roles=True, **kwargs): @@ -105,15 +110,27 @@ class AdminController(object): operation='add', **kwargs): """Add or remove a role for a user and project.""" if operation == 'add': + if project: + LOG.audit(_("Adding role %s to user %s for project %s"), role, + user, project, context=context) + else: + LOG.audit(_("Adding sitewide role %s to user %s"), role, user, + context=context) manager.AuthManager().add_role(user, role, project) elif operation == 'remove': + if project: + LOG.audit(_("Removing role %s from user %s for project %s"), + role, user, project, context=context) + else: + LOG.audit(_("Removing sitewide role %s from user %s"), role, + user, context=context) manager.AuthManager().remove_role(user, role, project) else: - raise exception.ApiError('operation must be add or remove') + raise exception.ApiError(_('operation must be add or remove')) return True - def generate_x509_for_user(self, _context, name, project=None, **kwargs): + def generate_x509_for_user(self, context, name, project=None, **kwargs): """Generates and returns an x509 certificate for a single user. Is usually called from a client that will wrap this with access and secret key info, and return a zip file. @@ -122,6 +139,8 @@ class AdminController(object): project = name project = manager.AuthManager().get_project(project) user = manager.AuthManager().get_user(name) + LOG.audit(_("Getting x509 for user: %s on project: %s"), name, + project, context=context) return user_dict(user, base64.b64encode(project.get_credentials(user))) def describe_project(self, context, name, **kwargs): @@ -137,6 +156,8 @@ class AdminController(object): def register_project(self, context, name, manager_user, description=None, member_users=None, **kwargs): """Creates a new project""" + LOG.audit(_("Create project %s managed by %s"), name, manager_user, + context=context) return project_dict( manager.AuthManager().create_project( name, @@ -146,6 +167,7 @@ class AdminController(object): def deregister_project(self, context, name): """Permanently deletes a project.""" + LOG.audit(_("Delete project: %s"), name, context=context) manager.AuthManager().delete_project(name) return True @@ -159,11 +181,15 @@ class AdminController(object): **kwargs): """Add or remove a user from a project.""" if operation == 'add': + LOG.audit(_("Adding user %s to project %s"), user, project, + context=context) manager.AuthManager().add_to_project(user, project) elif operation == 'remove': + LOG.audit(_("Removing user %s from project %s"), user, project, + context=context) manager.AuthManager().remove_from_project(user, project) else: - raise exception.ApiError('operation must be add or remove') + raise exception.ApiError(_('operation must be add or remove')) return True # FIXME(vish): these host commands don't work yet, perhaps some of the diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py index a90fbeb0c2b5..d0b417db1134 100644 --- a/nova/api/ec2/apirequest.py +++ b/nova/api/ec2/apirequest.py @@ -20,13 +20,13 @@ APIRequest class """ -import logging import re # TODO(termie): replace minidom with etree from xml.dom import minidom -_log = logging.getLogger("api") -_log.setLevel(logging.DEBUG) +from nova import log as logging + +LOG = logging.getLogger("nova.api.request") _c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))') @@ -94,7 +94,7 @@ class APIRequest(object): except AttributeError: _error = _('Unsupported API request: controller = %s,' 'action = %s') % (self.controller, self.action) - _log.warning(_error) + LOG.exception(_error) # TODO: Raise custom exception, trap in apiserver, # and reraise as 400 error. raise Exception(_error) @@ -142,7 +142,7 @@ class APIRequest(object): response = xml.toxml() xml.unlink() - _log.debug(response) + LOG.debug(response) return response def _render_dict(self, xml, el, data): @@ -151,7 +151,7 @@ class APIRequest(object): val = data[key] el.appendChild(self._render_data(xml, key, val)) except: - _log.debug(data) + LOG.debug(data) raise def _render_data(self, xml, el_name, data): diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 9fb6307a830d..d0db08db7f08 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -24,17 +24,16 @@ datastore. import base64 import datetime -import logging +import IPy import re import os from nova import context -import IPy - from nova import crypto from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import quota from nova import rpc from nova import utils @@ -45,6 +44,8 @@ from nova.compute import instance_types FLAGS = flags.FLAGS flags.DECLARE('storage_availability_zone', 'nova.volume.manager') +LOG = logging.getLogger("nova.api.cloud") + InvalidInputException = exception.InvalidInputException @@ -280,6 +281,7 @@ class CloudController(object): return {'keypairsSet': result} def create_key_pair(self, context, key_name, **kwargs): + LOG.audit(_("Create key pair %s"), key_name, context=context) data = _gen_key(context, context.user.id, key_name) return {'keyName': key_name, 'keyFingerprint': data['fingerprint'], @@ -287,6 +289,7 @@ class CloudController(object): # TODO(vish): when context is no longer an object, pass it here def delete_key_pair(self, context, key_name, **kwargs): + LOG.audit(_("Delete key pair %s"), key_name, context=context) try: db.key_pair_destroy(context, context.user.id, key_name) except exception.NotFound: @@ -393,6 +396,8 @@ class CloudController(object): return False def revoke_security_group_ingress(self, context, group_name, **kwargs): + LOG.audit(_("Revoke security group ingress %s"), group_name, + context=context) self.compute_api.ensure_default_security_group(context) security_group = db.security_group_get_by_name(context, context.project_id, @@ -419,6 +424,8 @@ class CloudController(object): # for these operations, so support for newer API versions # is sketchy. def authorize_security_group_ingress(self, context, group_name, **kwargs): + LOG.audit(_("Authorize security group ingress %s"), group_name, + context=context) self.compute_api.ensure_default_security_group(context) security_group = db.security_group_get_by_name(context, context.project_id, @@ -455,6 +462,7 @@ class CloudController(object): return source_project_id def create_security_group(self, context, group_name, group_description): + LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): raise exception.ApiError(_('group %s already exists') % group_name) @@ -469,6 +477,7 @@ class CloudController(object): group_ref)]} def delete_security_group(self, context, group_name, **kwargs): + LOG.audit(_("Delete security group %s"), group_name, context=context) security_group = db.security_group_get_by_name(context, context.project_id, group_name) @@ -476,6 +485,8 @@ class CloudController(object): return True def get_console_output(self, context, instance_id, **kwargs): + LOG.audit(_("Get console output for instance %s"), instance_id, + context=context) # instance_id is passed in as a list of instances ec2_id = instance_id[0] internal_id = ec2_id_to_internal_id(ec2_id) @@ -539,10 +550,12 @@ class CloudController(object): return v def create_volume(self, context, size, **kwargs): + LOG.audit(_("Create volume of %s GB"), size, context=context) # check quota if quota.allowed_volumes(context, 1, size) < 1: - logging.warn("Quota exceeeded for %s, tried to create %sG volume", - context.project_id, size) + LOG.warn(_("Quota exceeeded for project %s, tried to create " + "%sG volume"), context.project_id, size, + context=context) raise quota.QuotaError("Volume quota exceeded. You cannot " "create a volume of size %s" % size) vol = {} @@ -568,6 +581,8 @@ class CloudController(object): return {'volumeSet': [self._format_volume(context, dict(volume_ref))]} def attach_volume(self, context, volume_id, instance_id, device, **kwargs): + LOG.audit(_("Attach volume %s to instacne %s at %s"), volume_id, + instance_id, device, context=context) volume_ref = db.volume_get_by_ec2_id(context, volume_id) if not re.match("^/dev/[a-z]d[a-z]+$", device): raise exception.ApiError(_("Invalid device specified: %s. " @@ -594,6 +609,7 @@ class CloudController(object): 'volumeId': volume_ref['id']} def detach_volume(self, context, volume_id, **kwargs): + LOG.audit("Detach volume %s", volume_id, context=context) volume_ref = db.volume_get_by_ec2_id(context, volume_id) instance_ref = db.volume_get_instance(context.elevated(), volume_ref['id']) @@ -728,11 +744,11 @@ class CloudController(object): return {'addressesSet': addresses} def allocate_address(self, context, **kwargs): + LOG.audit(_("Allocate address"), context=context) # check quota if quota.allowed_floating_ips(context, 1) < 1: - logging.warn(_("Quota exceeeded for %s, tried to allocate " - "address"), - context.project_id) + LOG.warn(_("Quota exceeeded for %s, tried to allocate address"), + context.project_id, context=context) raise quota.QuotaError(_("Address quota exceeded. You cannot " "allocate any more addresses")) # NOTE(vish): We don't know which network host should get the ip @@ -746,6 +762,7 @@ class CloudController(object): return {'addressSet': [{'publicIp': public_ip}]} def release_address(self, context, public_ip, **kwargs): + LOG.audit(_("Release address %s"), public_ip, context=context) floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) # NOTE(vish): We don't know which network host should get the ip # when we deallocate, so just send it to any one. This @@ -758,6 +775,8 @@ class CloudController(object): return {'releaseResponse': ["Address released."]} def associate_address(self, context, instance_id, public_ip, **kwargs): + LOG.audit(_("Associate address %s to instance %s"), public_ip, + instance_id, context=context) internal_id = ec2_id_to_internal_id(instance_id) instance_ref = self.compute_api.get_instance(context, internal_id) fixed_address = db.instance_get_fixed_address(context, @@ -775,6 +794,7 @@ class CloudController(object): return {'associateResponse': ["Address associated."]} def disassociate_address(self, context, public_ip, **kwargs): + LOG.audit(_("Disassociate address %s"), public_ip, context=context) floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) # NOTE(vish): Get the topic from the host name of the network of # the associated fixed ip. @@ -811,7 +831,7 @@ class CloudController(object): def terminate_instances(self, context, instance_id, **kwargs): """Terminate each instance in instance_id, which is a list of ec2 ids. instance_id is a kwarg so its name cannot be modified.""" - logging.debug("Going to start terminating instances") + LOG.debug(_("Going to start terminating instances")) for ec2_id in instance_id: internal_id = ec2_id_to_internal_id(ec2_id) self.compute_api.delete_instance(context, internal_id) @@ -819,6 +839,7 @@ class CloudController(object): def reboot_instances(self, context, instance_id, **kwargs): """instance_id is a list of instance ids""" + LOG.audit(_("Reboot instance %r"), instance_id, context=context) for ec2_id in instance_id: internal_id = ec2_id_to_internal_id(ec2_id) self.compute_api.reboot(context, internal_id) @@ -850,6 +871,7 @@ class CloudController(object): def delete_volume(self, context, volume_id, **kwargs): # TODO: return error if not authorized + LOG.audit(_("Deleting volume %s"), volume_id, context=context) volume_ref = db.volume_get_by_ec2_id(context, volume_id) if volume_ref['status'] != "available": raise exception.ApiError(_("Volume status must be available")) @@ -871,6 +893,7 @@ class CloudController(object): return {'imagesSet': images} def deregister_image(self, context, image_id, **kwargs): + LOG.audit("De-registering image %s", image_id, context=context) self.image_service.deregister(context, image_id) return {'imageId': image_id} @@ -878,7 +901,8 @@ class CloudController(object): if image_location is None and 'name' in kwargs: image_location = kwargs['name'] image_id = self.image_service.register(context, image_location) - logging.debug("Registered %s as %s" % (image_location, image_id)) + LOG.audit(_("Registered image %s with id %s"), image_location, + image_id, context=context) return {'imageId': image_id} def describe_image_attribute(self, context, image_id, attribute, **kwargs): @@ -906,6 +930,7 @@ class CloudController(object): raise exception.ApiError(_('only group "all" is supported')) if not operation_type in ['add', 'remove']: raise exception.ApiError(_('operation_type must be add or remove')) + LOG.audit(_("Updating image %s publicity"), image_id, context=context) return self.image_service.modify(context, image_id, operation_type) def update_image(self, context, image_id, **kwargs): diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index a57a6698a5d3..848f0b034359 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -18,15 +18,15 @@ """Metadata request handler.""" -import logging - import webob.dec import webob.exc +from nova import log as logging from nova import flags from nova.api.ec2 import cloud +LOG = logging.getLogger('nova.api.ec2.metadata') FLAGS = flags.FLAGS @@ -72,8 +72,7 @@ class MetadataRequestHandler(object): remote_address = req.headers.get('X-Forwarded-For', remote_address) meta_data = cc.get_metadata(remote_address) if meta_data is None: - logging.error(_('Failed to get metadata for ip: %s') % - remote_address) + LOG.error(_('Failed to get metadata for ip: %s'), remote_address) raise webob.exc.HTTPNotFound() data = self.lookup(req.path_info, meta_data) if data is None: diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index a1430caedf1d..7e1c03d9f99f 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -20,28 +20,24 @@ WSGI middleware for OpenStack API controllers. """ -import time - -import logging import routes -import traceback import webob.dec import webob.exc import webob -from nova import context from nova import flags +from nova import log as logging from nova import utils from nova import wsgi from nova.api.openstack import faults from nova.api.openstack import backup_schedules from nova.api.openstack import flavors from nova.api.openstack import images -from nova.api.openstack import ratelimiting from nova.api.openstack import servers from nova.api.openstack import sharedipgroups +LOG = logging.getLogger('nova.api.openstack') FLAGS = flags.FLAGS flags.DEFINE_string('os_api_auth', 'nova.api.openstack.auth.AuthMiddleware', @@ -71,8 +67,7 @@ class API(wsgi.Middleware): try: return req.get_response(self.application) except Exception as ex: - logging.warn(_("Caught error: %s") % str(ex)) - logging.error(traceback.format_exc()) + LOG.exception(_("Caught error: %s"), str(ex)) exc = webob.exc.HTTPInternalServerError(explanation=str(ex)) return faults.Fault(exc) @@ -88,7 +83,7 @@ class APIRouter(wsgi.Router): server_members = {'action': 'POST'} if FLAGS.allow_admin_api: - logging.debug("Including admin operations in API.") + LOG.debug("Including admin operations in API.") server_members['pause'] = 'POST' server_members['unpause'] = 'POST' server_members["diagnostics"] = "GET" diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 7616ff112b26..3e0837ba8ad0 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -24,11 +24,11 @@ other backends by creating another class that exposes the same public methods. """ -import logging import sys from nova import exception from nova import flags +from nova import log as logging FLAGS = flags.FLAGS @@ -66,6 +66,9 @@ flags.DEFINE_string('ldap_developer', 'cn=developers,ou=Groups,dc=example,dc=com', 'cn for Developers') +LOG = logging.getLogger("nova.ldapdriver") + + # TODO(vish): make an abstract base class with the same public methods # to define a set interface for AuthDrivers. I'm delaying # creating this now because I'm expecting an auth refactor @@ -502,7 +505,7 @@ class LdapDriver(object): try: self.conn.modify_s(group_dn, attr) except self.ldap.OBJECT_CLASS_VIOLATION: - logging.debug(_("Attempted to remove the last member of a group. " + LOG.debug(_("Attempted to remove the last member of a group. " "Deleting the group at %s instead."), group_dn) self.__delete_group(group_dn) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index d3e266952d6e..bfac7fc2a7c8 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -20,7 +20,6 @@ Nova authentication management """ -import logging import os import shutil import string # pylint: disable-msg=W0402 @@ -33,6 +32,7 @@ from nova import crypto from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import utils from nova.auth import signer @@ -71,6 +71,9 @@ flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver', 'Driver that auth manager uses') +LOG = logging.getLogger('nova.authmanager') + + class AuthBase(object): """Base class for objects relating to auth @@ -254,43 +257,51 @@ class AuthManager(object): # TODO(vish): check for valid timestamp (access_key, _sep, project_id) = access.partition(':') - logging.info(_('Looking up user: %r'), access_key) + LOG.debug(_('Looking up user: %r'), access_key) user = self.get_user_from_access_key(access_key) - logging.info('user: %r', user) + LOG.debug('user: %r', user) if user == None: + LOG.audit(_("Failed authorization for access key %s"), access_key) raise exception.NotFound(_('No user found for access key %s') % access_key) # NOTE(vish): if we stop using project name as id we need better # logic to find a default project for user if project_id == '': + LOG.debug(_("Using project name = user name (%s)"), user.name) project_id = user.name project = self.get_project(project_id) if project == None: + LOG.audit(_("failed authorization: no project named %s (user=%s)"), + project_id, user.name) raise exception.NotFound(_('No project called %s could be found') % project_id) if not self.is_admin(user) and not self.is_project_member(user, project): + LOG.audit(_("Failed authorization: user %s not admin and not " + "member of project %s"), user.name, project.name) raise exception.NotFound(_('User %s is not a member of project %s') % (user.id, project.id)) if check_type == 's3': sign = signer.Signer(user.secret.encode()) expected_signature = sign.s3_authorization(headers, verb, path) - logging.debug('user.secret: %s', user.secret) - logging.debug('expected_signature: %s', expected_signature) - logging.debug('signature: %s', signature) + LOG.debug('user.secret: %s', user.secret) + LOG.debug('expected_signature: %s', expected_signature) + LOG.debug('signature: %s', signature) if signature != expected_signature: + LOG.audit(_("Invalid signature for user %s"), user.name) raise exception.NotAuthorized(_('Signature does not match')) elif check_type == 'ec2': # NOTE(vish): hmac can't handle unicode, so encode ensures that # secret isn't unicode expected_signature = signer.Signer(user.secret.encode()).generate( params, verb, server_string, path) - logging.debug('user.secret: %s', user.secret) - logging.debug('expected_signature: %s', expected_signature) - logging.debug('signature: %s', signature) + LOG.debug('user.secret: %s', user.secret) + LOG.debug('expected_signature: %s', expected_signature) + LOG.debug('signature: %s', signature) if signature != expected_signature: + LOG.audit(_("Invalid signature for user %s"), user.name) raise exception.NotAuthorized(_('Signature does not match')) return (user, project) @@ -398,6 +409,12 @@ class AuthManager(object): raise exception.NotFound(_("The %s role can not be found") % role) if project is not None and role in FLAGS.global_roles: raise exception.NotFound(_("The %s role is global only") % role) + if project: + LOG.audit(_("Adding role %s to user %s in project %s"), role, + User.safe_id(user), Project.safe_id(project)) + else: + LOG.audit(_("Adding sitewide role %s to user %s"), role, + User.safe_id(user)) with self.driver() as drv: drv.add_role(User.safe_id(user), role, Project.safe_id(project)) @@ -418,6 +435,12 @@ class AuthManager(object): @type project: Project or project_id @param project: Project in which to remove local role. """ + if project: + LOG.audit(_("Removing role %s from user %s on project %s"), + role, User.safe_id(user), Project.safe_id(project)) + else: + LOG.audit(_("Removing sitewide role %s from user %s"), role, + User.safe_id(user)) with self.driver() as drv: drv.remove_role(User.safe_id(user), role, Project.safe_id(project)) @@ -480,6 +503,8 @@ class AuthManager(object): description, member_users) if project_dict: + LOG.audit(_("Created project %s with manager %s"), name, + manager_user) project = Project(**project_dict) return project @@ -496,6 +521,7 @@ class AuthManager(object): @param project: This will be the new description of the project. """ + LOG.audit(_("modifying project %s"), Project.safe_id(project)) if manager_user: manager_user = User.safe_id(manager_user) with self.driver() as drv: @@ -505,6 +531,8 @@ class AuthManager(object): def add_to_project(self, user, project): """Add user to project""" + LOG.audit(_("Adding user %s to project %s"), User.safe_id(user), + Project.safe_id(project)) with self.driver() as drv: return drv.add_to_project(User.safe_id(user), Project.safe_id(project)) @@ -523,6 +551,8 @@ class AuthManager(object): def remove_from_project(self, user, project): """Removes a user from a project""" + LOG.audit(_("Remove user %s from project %s"), User.safe_id(user), + Project.safe_id(project)) with self.driver() as drv: return drv.remove_from_project(User.safe_id(user), Project.safe_id(project)) @@ -549,6 +579,7 @@ class AuthManager(object): def delete_project(self, project): """Deletes a project""" + LOG.audit(_("Deleting project %s"), Project.safe_id(project)) with self.driver() as drv: drv.delete_project(Project.safe_id(project)) @@ -603,13 +634,16 @@ class AuthManager(object): with self.driver() as drv: user_dict = drv.create_user(name, access, secret, admin) if user_dict: - return User(**user_dict) + rv = User(**user_dict) + LOG.audit(_("Created user %s (admin: %r)"), rv.name, rv.admin) + return rv def delete_user(self, user): """Deletes a user Additionally deletes all users key_pairs""" uid = User.safe_id(user) + LOG.audit(_("Deleting user %s"), uid) db.key_pair_destroy_all_by_user(context.get_admin_context(), uid) with self.driver() as drv: @@ -618,6 +652,12 @@ class AuthManager(object): def modify_user(self, user, access_key=None, secret_key=None, admin=None): """Modify credentials for a user""" uid = User.safe_id(user) + if access_key: + LOG.audit(_("Access Key change for user %s"), uid) + if secret_key: + LOG.audit(_("Secret Key change for user %s"), uid) + if admin is not None: + LOG.audit(_("Admin status set to %r for user %s"), admin, uid) with self.driver() as drv: drv.modify_user(uid, access_key, secret_key, admin) @@ -666,7 +706,7 @@ class AuthManager(object): port=vpn_port) zippy.writestr(FLAGS.credential_vpn_file, config) else: - logging.warn(_("No vpn data for project %s"), pid) + LOG.warn(_("No vpn data for project %s"), pid) zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(pid)) zippy.close() diff --git a/nova/auth/signer.py b/nova/auth/signer.py index f7d29f53431e..744e315d4877 100644 --- a/nova/auth/signer.py +++ b/nova/auth/signer.py @@ -46,7 +46,6 @@ Utility class for parsing signed AMI manifests. import base64 import hashlib import hmac -import logging import urllib # NOTE(vish): for new boto @@ -54,9 +53,13 @@ import boto # NOTE(vish): for old boto import boto.utils +from nova import log as logging from nova.exception import Error +LOG = logging.getLogger('nova.signer') + + class Signer(object): """Hacked up code from boto/connection.py""" @@ -120,7 +123,7 @@ class Signer(object): def _calc_signature_2(self, params, verb, server_string, path): """Generate AWS signature version 2 string.""" - logging.debug('using _calc_signature_2') + LOG.debug('using _calc_signature_2') string_to_sign = '%s\n%s\n%s\n' % (verb, server_string, path) if self.hmac_256: current_hmac = self.hmac_256 @@ -136,13 +139,13 @@ class Signer(object): val = urllib.quote(val, safe='-_~') pairs.append(urllib.quote(key, safe='') + '=' + val) qs = '&'.join(pairs) - logging.debug('query string: %s', qs) + LOG.debug('query string: %s', qs) string_to_sign += qs - logging.debug('string_to_sign: %s', string_to_sign) + LOG.debug('string_to_sign: %s', string_to_sign) current_hmac.update(string_to_sign) b64 = base64.b64encode(current_hmac.digest()) - logging.debug('len(b64)=%d', len(b64)) - logging.debug('base64 encoded digest: %s', b64) + LOG.debug('len(b64)=%d', len(b64)) + LOG.debug('base64 encoded digest: %s', b64) return b64 diff --git a/nova/compute/api.py b/nova/compute/api.py index 07c69bd31754..c0141e569e8f 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -21,12 +21,12 @@ Handles all API requests relating to instances (guest vms). """ import datetime -import logging import time from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import quota from nova import rpc from nova import utils @@ -34,6 +34,7 @@ from nova.compute import instance_types from nova.db import base FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.compute.api') def generate_default_hostname(internal_id): @@ -58,13 +59,13 @@ class ComputeAPI(base.Base): instance = self.db.instance_get_by_internal_id(context, instance_id) except exception.NotFound as e: - logging.warning("Instance %d was not found in get_network_topic", - instance_id) + LOG.warning(_("Instance %d was not found in get_network_topic"), + instance_id) raise e host = instance['host'] if not host: - raise exception.Error("Instance %d has no host" % instance_id) + raise exception.Error(_("Instance %d has no host") % instance_id) topic = self.db.queue_get_for(context, FLAGS.compute_topic, host) return rpc.call(context, topic, @@ -83,10 +84,10 @@ class ComputeAPI(base.Base): num_instances = quota.allowed_instances(context, max_count, instance_type) if num_instances < min_count: - logging.warn("Quota exceeeded for %s, tried to run %s instances", - context.project_id, min_count) - raise quota.QuotaError("Instance quota exceeded. You can only " - "run %s more instances of this type." % + LOG.warn(_("Quota exceeeded for %s, tried to run %s instances"), + context.project_id, min_count) + raise quota.QuotaError(_("Instance quota exceeded. You can only " + "run %s more instances of this type.") % num_instances, "InstanceLimitExceeded") is_vpn = image_id == FLAGS.vpn_image_id @@ -100,7 +101,7 @@ class ComputeAPI(base.Base): if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None - logging.debug("Creating a raw instance") + LOG.debug(_("Creating a raw instance")) # Make sure we have access to kernel and ramdisk (if not raw) if kernel_id: self.image_service.show(context, kernel_id) @@ -147,7 +148,7 @@ class ComputeAPI(base.Base): elevated = context.elevated() instances = [] - logging.debug(_("Going to run %s instances..."), num_instances) + LOG.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): instance = dict(mac_address=utils.generate_mac(), launch_index=num, @@ -172,7 +173,7 @@ class ComputeAPI(base.Base): instance = self.update_instance(context, instance_id, **updates) instances.append(instance) - logging.debug(_("Casting to scheduler for %s/%s's instance %s"), + LOG.debug(_("Casting to scheduler for %s/%s's instance %s"), context.project_id, context.user_id, instance_id) rpc.cast(context, FLAGS.scheduler_topic, @@ -214,18 +215,18 @@ class ComputeAPI(base.Base): return self.db.instance_update(context, instance_id, kwargs) def delete_instance(self, context, instance_id): - logging.debug("Going to try and terminate %d" % instance_id) + LOG.debug(_("Going to try and terminate %d"), instance_id) try: instance = self.db.instance_get_by_internal_id(context, instance_id) except exception.NotFound as e: - logging.warning(_("Instance %d was not found during terminate"), - instance_id) + LOG.warning(_("Instance %d was not found during terminate"), + instance_id) raise e if (instance['state_description'] == 'terminating'): - logging.warning(_("Instance %d is already being terminated"), - instance_id) + LOG.warning(_("Instance %d is already being terminated"), + instance_id) return self.update_instance(context, diff --git a/nova/compute/disk.py b/nova/compute/disk.py index 814a258cd61e..741499294082 100644 --- a/nova/compute/disk.py +++ b/nova/compute/disk.py @@ -22,14 +22,15 @@ Includes injection of SSH PGP keys into authorized_keys file. """ -import logging import os import tempfile from nova import exception from nova import flags +from nova import log as logging +LOG = logging.getLogger('nova.compute.disk') FLAGS = flags.FLAGS flags.DEFINE_integer('minimum_root_size', 1024 * 1024 * 1024 * 10, 'minimum size in bytes of root partition') @@ -67,12 +68,12 @@ def partition(infile, outfile, local_bytes=0, resize=True, execute('resize2fs %s' % infile) file_size = FLAGS.minimum_root_size elif file_size % sector_size != 0: - logging.warn(_("Input partition size not evenly divisible by" - " sector size: %d / %d"), file_size, sector_size) + LOG.warn(_("Input partition size not evenly divisible by" + " sector size: %d / %d"), file_size, sector_size) primary_sectors = file_size / sector_size if local_bytes % sector_size != 0: - logging.warn(_("Bytes for local storage not evenly divisible" - " by sector size: %d / %d"), local_bytes, sector_size) + LOG.warn(_("Bytes for local storage not evenly divisible" + " by sector size: %d / %d"), local_bytes, sector_size) local_sectors = local_bytes / sector_size mbr_last = 62 # a diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 6e8f34347825..cc5724346dce 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -35,10 +35,10 @@ terminating it. """ import datetime -import logging from nova import exception from nova import flags +from nova import log as logging from nova import manager from nova import rpc from nova import utils @@ -53,6 +53,9 @@ flags.DEFINE_string('stub_network', False, 'Stub network related code') +LOG = logging.getLogger('nova.computemanager') + + class ComputeManager(manager.Manager): """Manages the running instances from creation to destruction.""" @@ -111,7 +114,7 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) if instance_ref['name'] in self.driver.list_instances(): raise exception.Error(_("Instance has already been created")) - logging.debug(_("instance %s: starting..."), instance_id) + LOG.debug(_("instance %s: starting..."), instance_id) self.db.instance_update(context, instance_id, {'host': self.host}) @@ -149,8 +152,8 @@ class ComputeManager(manager.Manager): instance_id, {'launched_at': now}) except Exception: # pylint: disable-msg=W0702 - logging.exception(_("instance %s: Failed to spawn"), - instance_ref['name']) + LOG.exception(_("instance %s: Failed to spawn"), + instance_ref['name']) self.db.instance_set_state(context, instance_id, power_state.SHUTDOWN) @@ -161,14 +164,16 @@ class ComputeManager(manager.Manager): def terminate_instance(self, context, instance_id): """Terminate an instance on this machine.""" context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("Terminating instance %s//%s"), + instance_ref['internal_id'], instance_id, context=context) if not FLAGS.stub_network: address = self.db.instance_get_floating_address(context, instance_ref['id']) if address: - logging.debug(_("Disassociating address %s") % address) + LOG.debug(_("Disassociating address %s"), address, + context=context) # NOTE(vish): Right now we don't really care if the ip is # disassociated. We may need to worry about # checking this later. @@ -180,14 +185,15 @@ class ComputeManager(manager.Manager): address = self.db.instance_get_fixed_address(context, instance_ref['id']) if address: - logging.debug(_("Deallocating address %s") % address) + LOG.debug(_("Deallocating address %s"), address, + context=context) # NOTE(vish): Currently, nothing needs to be done on the # network node until release. If this changes, # we will need to cast here. self.network_manager.deallocate_fixed_ip(context.elevated(), address) - logging.debug(_("instance %s: terminating"), instance_id) + LOG.debug(_("instance %s: terminating"), instance_id, context=context) volumes = instance_ref.get('volumes', []) or [] for volume in volumes: @@ -207,15 +213,18 @@ class ComputeManager(manager.Manager): context = context.elevated() self._update_state(context, instance_id) instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("Rebooting instance %s//%s"), instance_ref['internal_id'], + instance_id, context=context) if instance_ref['state'] != power_state.RUNNING: - logging.warn(_('trying to reboot a non-running ' - 'instance: %s (state: %s excepted: %s)'), - instance_ref['internal_id'], - instance_ref['state'], - power_state.RUNNING) + LOG.warn(_('trying to reboot a non-running ' + 'instance: %s (state: %s excepted: %s)'), + instance_ref['internal_id'], + instance_ref['state'], + power_state.RUNNING, + context=context) - logging.debug(_('instance %s: rebooting'), instance_ref['name']) + LOG.debug(_('instance %s: rebooting'), instance_ref['name']) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -251,8 +260,8 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug(_('instance %s: rescuing'), - instance_ref['internal_id']) + LOG.audit(_('instance %s: rescuing'), instance_ref['internal_id'], + context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -267,8 +276,8 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug(_('instance %s: unrescuing'), - instance_ref['internal_id']) + LOG.audit(_('instance %s: unrescuing'), instance_ref['internal_id'], + context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -287,8 +296,8 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug('instance %s: pausing', - instance_ref['internal_id']) + LOG.debug(_('instance %s: pausing'), instance_ref['internal_id'], + context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -305,8 +314,8 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug('instance %s: unpausing', - instance_ref['internal_id']) + LOG.debug(_('instance %s: unpausing'), instance_ref['internal_id'], + context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -364,8 +373,9 @@ class ComputeManager(manager.Manager): def get_console_output(self, context, instance_id): """Send the console output for an instance.""" context = context.elevated() - logging.debug(_("instance %s: getting console output"), instance_id) instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("Get console output instance %s//%s"), + instance_ref['internal_id'], instance_id, context=context) return self.driver.get_console_output(instance_ref) @@ -373,8 +383,8 @@ class ComputeManager(manager.Manager): def attach_volume(self, context, instance_id, volume_id, mountpoint): """Attach a volume to an instance.""" context = context.elevated() - logging.debug(_("instance %s: attaching volume %s to %s"), instance_id, - volume_id, mountpoint) + LOG.audit(_("instance %s: attaching volume %s to %s"), instance_id, + volume_id, mountpoint, context=context) instance_ref = self.db.instance_get(context, instance_id) dev_path = self.volume_manager.setup_compute_volume(context, volume_id) @@ -390,8 +400,8 @@ class ComputeManager(manager.Manager): # NOTE(vish): The inline callback eats the exception info so we # log the traceback here and reraise the same # ecxception below. - logging.exception(_("instance %s: attach failed %s, removing"), - instance_id, mountpoint) + LOG.exception(_("instance %s: attach failed %s, removing"), + instance_id, mountpoint, context=context) self.volume_manager.remove_compute_volume(context, volume_id) raise exc @@ -402,14 +412,14 @@ class ComputeManager(manager.Manager): def detach_volume(self, context, instance_id, volume_id): """Detach a volume from an instance.""" context = context.elevated() - logging.debug(_("instance %s: detaching volume %s"), - instance_id, - volume_id) instance_ref = self.db.instance_get(context, instance_id) volume_ref = self.db.volume_get(context, volume_id) + LOG.audit(_("Detach volume %s from mountpoint %s on instance %s//%s"), + volume_id, volume_ref['mountpoint'], + instance_ref['internal_id'], instance_id, context=context) if instance_ref['name'] not in self.driver.list_instances(): - logging.warn(_("Detaching volume from unknown instance %s"), - instance_ref['name']) + LOG.warn(_("Detaching volume from unknown instance %s"), + instance_ref['name'], context=context) else: self.driver.detach_volume(instance_ref['name'], volume_ref['mountpoint']) diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index 60c347a5ea6e..cc94e44f440e 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -25,19 +25,17 @@ Instance Monitoring: """ import datetime -import logging import os -import sys import time import boto import boto.s3 import rrdtool -from twisted.internet import defer from twisted.internet import task from twisted.application import service from nova import flags +from nova import log as logging from nova.virt import connection as virt_connection @@ -91,6 +89,9 @@ RRD_VALUES = { utcnow = datetime.datetime.utcnow +LOG = logging.getLogger('nova.instancemonitor') + + def update_rrd(instance, name, data): """ Updates the specified RRD file. @@ -255,20 +256,20 @@ class Instance(object): Updates the instances statistics and stores the resulting graphs in the internal object store on the cloud controller. """ - logging.debug(_('updating %s...'), self.instance_id) + LOG.debug(_('updating %s...'), self.instance_id) try: data = self.fetch_cpu_stats() if data != None: - logging.debug('CPU: %s', data) + LOG.debug('CPU: %s', data) update_rrd(self, 'cpu', data) data = self.fetch_net_stats() - logging.debug('NET: %s', data) + LOG.debug('NET: %s', data) update_rrd(self, 'net', data) data = self.fetch_disk_stats() - logging.debug('DISK: %s', data) + LOG.debug('DISK: %s', data) update_rrd(self, 'disk', data) # TODO(devcamcar): Turn these into pool.ProcessPool.execute() calls @@ -285,7 +286,7 @@ class Instance(object): graph_disk(self, '1w') graph_disk(self, '1m') except Exception: - logging.exception(_('unexpected error during update')) + LOG.exception(_('unexpected error during update')) self.last_updated = utcnow() @@ -309,7 +310,7 @@ class Instance(object): self.cputime = float(info['cpu_time']) self.cputime_last_updated = utcnow() - logging.debug('CPU: %d', self.cputime) + LOG.debug('CPU: %d', self.cputime) # Skip calculation on first pass. Need delta to get a meaningful value. if cputime_last_updated == None: @@ -319,17 +320,17 @@ class Instance(object): d = self.cputime_last_updated - cputime_last_updated t = d.days * 86400 + d.seconds - logging.debug('t = %d', t) + LOG.debug('t = %d', t) # Calculate change over time in number of nanoseconds of CPU time used. cputime_delta = self.cputime - cputime_last - logging.debug('cputime_delta = %s', cputime_delta) + LOG.debug('cputime_delta = %s', cputime_delta) # Get the number of virtual cpus in this domain. vcpus = int(info['num_cpu']) - logging.debug('vcpus = %d', vcpus) + LOG.debug('vcpus = %d', vcpus) # Calculate CPU % used and cap at 100. return min(cputime_delta / (t * vcpus * 1.0e9) * 100, 100) @@ -351,8 +352,8 @@ class Instance(object): rd += rd_bytes wr += wr_bytes except TypeError: - logging.error(_('Cannot get blockstats for "%s" on "%s"'), - disk, self.instance_id) + LOG.error(_('Cannot get blockstats for "%s" on "%s"'), + disk, self.instance_id) raise return '%d:%d' % (rd, wr) @@ -373,8 +374,8 @@ class Instance(object): rx += stats[0] tx += stats[4] except TypeError: - logging.error(_('Cannot get ifstats for "%s" on "%s"'), - interface, self.instance_id) + LOG.error(_('Cannot get ifstats for "%s" on "%s"'), + interface, self.instance_id) raise return '%d:%d' % (rx, tx) @@ -408,7 +409,7 @@ class InstanceMonitor(object, service.Service): try: conn = virt_connection.get_connection(read_only=True) except Exception, exn: - logging.exception(_('unexpected exception getting connection')) + LOG.exception(_('unexpected exception getting connection')) time.sleep(FLAGS.monitoring_instances_delay) return @@ -416,14 +417,14 @@ class InstanceMonitor(object, service.Service): try: self.updateInstances_(conn, domain_ids) except Exception, exn: - logging.exception('updateInstances_') + LOG.exception('updateInstances_') def updateInstances_(self, conn, domain_ids): for domain_id in domain_ids: if not domain_id in self._instances: instance = Instance(conn, domain_id) self._instances[domain_id] = instance - logging.debug(_('Found instance: %s'), domain_id) + LOG.debug(_('Found instance: %s'), domain_id) for key in self._instances.keys(): instance = self._instances[key] diff --git a/nova/crypto.py b/nova/crypto.py index b8405552d12b..a34b940f50d5 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -24,7 +24,6 @@ Includes root and intermediate CAs, SSH key_pairs and x509 certificates. import base64 import gettext import hashlib -import logging import os import shutil import struct @@ -39,8 +38,10 @@ gettext.install('nova', unicode=1) from nova import context from nova import db from nova import flags +from nova import log as logging +LOG = logging.getLogger("nova.crypto") FLAGS = flags.FLAGS flags.DEFINE_string('ca_file', 'cacert.pem', _('Filename of root CA')) flags.DEFINE_string('key_file', @@ -254,7 +255,7 @@ def _sign_csr(csr_text, ca_folder): csrfile = open(inbound, "w") csrfile.write(csr_text) csrfile.close() - logging.debug(_("Flags path: %s") % ca_folder) + LOG.debug(_("Flags path: %s"), ca_folder) start = os.getcwd() # Change working dir to CA os.chdir(ca_folder) diff --git a/nova/exception.py b/nova/exception.py index 277033e0f324..7c6675e6216a 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -21,10 +21,8 @@ Nova base exception handling, including decorator for re-raising Nova-type exceptions. SHOULD include dedicated exception logging. """ -import logging -import sys -import traceback - +from nova import log as logging +LOG = logging.getLogger('nova.exception') class ProcessExecutionError(IOError): @@ -84,7 +82,7 @@ def wrap_exception(f): except Exception, e: if not isinstance(e, Error): #exc_type, exc_value, exc_traceback = sys.exc_info() - logging.exception(_('Uncaught exception')) + LOG.exception(_('Uncaught exception')) #logging.error(traceback.extract_stack(exc_traceback)) raise Error(str(e)) raise diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index 79d8b894d778..7c2d7177be17 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -18,12 +18,16 @@ """Based a bit on the carrot.backeds.queue backend... but a lot better.""" -import logging import Queue as queue from carrot.backends import base from eventlet import greenthread +from nova import log as logging + + +LOG = logging.getLogger("nova.fakerabbit") + EXCHANGES = {} QUEUES = {} @@ -41,12 +45,12 @@ class Exchange(object): self._routes = {} def publish(self, message, routing_key=None): - logging.debug(_('(%s) publish (key: %s) %s'), - self.name, routing_key, message) + LOG.debug(_('(%s) publish (key: %s) %s'), + self.name, routing_key, message) routing_key = routing_key.split('.')[0] if routing_key in self._routes: for f in self._routes[routing_key]: - logging.debug(_('Publishing to route %s'), f) + LOG.debug(_('Publishing to route %s'), f) f(message, routing_key=routing_key) def bind(self, callback, routing_key): @@ -76,19 +80,19 @@ class Backend(base.BaseBackend): def queue_declare(self, queue, **kwargs): global QUEUES if queue not in QUEUES: - logging.debug(_('Declaring queue %s'), queue) + LOG.debug(_('Declaring queue %s'), queue) QUEUES[queue] = Queue(queue) def exchange_declare(self, exchange, type, *args, **kwargs): global EXCHANGES if exchange not in EXCHANGES: - logging.debug(_('Declaring exchange %s'), exchange) + LOG.debug(_('Declaring exchange %s'), exchange) EXCHANGES[exchange] = Exchange(exchange, type) def queue_bind(self, queue, exchange, routing_key, **kwargs): global EXCHANGES global QUEUES - logging.debug(_('Binding %s to %s with key %s'), + LOG.debug(_('Binding %s to %s with key %s'), queue, exchange, routing_key) EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key) @@ -113,7 +117,7 @@ class Backend(base.BaseBackend): content_type=content_type, content_encoding=content_encoding) message.result = True - logging.debug(_('Getting from %s: %s'), queue, message) + LOG.debug(_('Getting from %s: %s'), queue, message) return message def prepare_message(self, message_data, delivery_mode, diff --git a/nova/flags.py b/nova/flags.py index 4b73349270a3..f5c2d4233e97 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -29,8 +29,6 @@ import sys import gflags -from nova import utils - class FlagValues(gflags.FlagValues): """Extension of gflags.FlagValues that allows undefined and runtime flags. @@ -213,10 +211,10 @@ DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake') DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID') DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key') DEFINE_integer('glance_port', 9292, 'glance port') -DEFINE_string('glance_host', utils.get_my_ip(), 'glance host') +DEFINE_string('glance_host', '127.0.0.1', 'glance host') DEFINE_integer('s3_port', 3333, 's3 port') -DEFINE_string('s3_host', utils.get_my_ip(), 's3 host (for infrastructure)') -DEFINE_string('s3_dmz', utils.get_my_ip(), 's3 dmz ip (for instances)') +DEFINE_string('s3_host', '127.0.0.1', 's3 host (for infrastructure)') +DEFINE_string('s3_dmz', '127.0.0.1', 's3 dmz ip (for instances)') DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on') DEFINE_string('scheduler_topic', 'scheduler', 'the topic scheduler nodes listen on') @@ -236,8 +234,8 @@ DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval') DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') DEFINE_string('ec2_prefix', 'http', 'prefix for ec2') -DEFINE_string('cc_host', utils.get_my_ip(), 'ip of api server') -DEFINE_string('cc_dmz', utils.get_my_ip(), 'internal ip of api server') +DEFINE_string('cc_host', '127.0.0.1', 'ip of api server') +DEFINE_string('cc_dmz', '127.0.0.1', 'internal ip of api server') DEFINE_integer('cc_port', 8773, 'cloud controller port') DEFINE_string('ec2_suffix', '/services/Cloud', 'suffix for ec2') diff --git a/nova/image/glance.py b/nova/image/glance.py index cc3192e7c85e..a3a2f430815f 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -19,20 +19,17 @@ import httplib import json -import logging import urlparse -import webob.exc - -from nova.compute import api as compute_api -from nova import utils -from nova import flags from nova import exception -import nova.image.service +from nova import flags +from nova import log as logging +from nova.image import service + + +LOG = logging.getLogger('nova.image.glance') FLAGS = flags.FLAGS - - flags.DEFINE_string('glance_teller_address', 'http://127.0.0.1', 'IP address or URL where Glance\'s Teller service resides') flags.DEFINE_string('glance_teller_port', '9191', @@ -78,8 +75,8 @@ class ParallaxClient(object): data = json.loads(res.read())['images'] return data else: - logging.warn(_("Parallax returned HTTP error %d from " - "request for /images"), res.status_int) + LOG.warn(_("Parallax returned HTTP error %d from " + "request for /images"), res.status_int) return [] finally: c.close() @@ -97,8 +94,8 @@ class ParallaxClient(object): data = json.loads(res.read())['images'] return data else: - logging.warn(_("Parallax returned HTTP error %d from " - "request for /images/detail"), res.status_int) + LOG.warn(_("Parallax returned HTTP error %d from " + "request for /images/detail"), res.status_int) return [] finally: c.close() @@ -166,7 +163,7 @@ class ParallaxClient(object): c.close() -class GlanceImageService(nova.image.service.BaseImageService): +class GlanceImageService(service.BaseImageService): """Provides storage and retrieval of disk image objects within Glance.""" def __init__(self): diff --git a/nova/log.py b/nova/log.py new file mode 100644 index 000000000000..88a961e13c78 --- /dev/null +++ b/nova/log.py @@ -0,0 +1,261 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Nova logging handler. + +This module adds to logging functionality by adding the option to specify +a context object when calling the various log methods. If the context object +is not specified, default formatting is used. + +It also allows setting of formatting information through flags. +""" + + +import cStringIO +import json +import logging +import logging.handlers +import traceback + +from nova import flags +# TODO(todd): fix after version.py merge +# from nova import version + + +FLAGS = flags.FLAGS + +# TODO(todd): fix after version.py merge +# '(%(name)s %(nova_version)s): %(levelname)s ' +flags.DEFINE_string('logging_context_format_string', + '(%(name)s): %(levelname)s ' + '[%(request_id)s %(user)s ' + '%(project)s] %(message)s', + 'format string to use for log messages') + +# TODO(todd): fix after version.py merge +# '(%(name)s %(nova_version)s): %(levelname)s [N/A] ' +flags.DEFINE_string('logging_default_format_string', + '(%(name)s): %(levelname)s [N/A] ' + '%(message)s', + 'format string to use for log messages') + +flags.DEFINE_string('logging_debug_format_suffix', + 'from %(processName)s (pid=%(process)d) %(funcName)s' + ' %(pathname)s:%(lineno)d', + 'data to append to log format when level is DEBUG') + +flags.DEFINE_string('logging_exception_prefix', + '(%(name)s): TRACE: ', + 'prefix each line of exception output with this format') + +flags.DEFINE_list('default_log_levels', + ['amqplib=WARN', + 'sqlalchemy=WARN', + 'audit=INFO'], + 'list of logger=LEVEL pairs') + +flags.DEFINE_bool('use_syslog', False, 'output to syslog') +flags.DEFINE_string('logfile', None, 'output to named file') + + + +# A list of things we want to replicate from logging. +# levels +CRITICAL = logging.CRITICAL +FATAL = logging.FATAL +ERROR = logging.ERROR +WARNING = logging.WARNING +WARN = logging.WARN +INFO = logging.INFO +DEBUG = logging.DEBUG +NOTSET = logging.NOTSET +# methods +getLogger = logging.getLogger +debug = logging.debug +info = logging.info +warning = logging.warning +warn = logging.warn +error = logging.error +exception = logging.exception +critical = logging.critical +log = logging.log +# handlers +StreamHandler = logging.StreamHandler +FileHandler = logging.FileHandler +# logging.SysLogHandler is nicer than logging.logging.handler.SysLogHandler. +SysLogHandler = logging.handlers.SysLogHandler + + +# our new audit level +AUDIT = logging.INFO + 1 +logging.addLevelName(AUDIT, 'AUDIT') + + +def _dictify_context(context): + if context == None: + return None + if not isinstance(context, dict) \ + and getattr(context, 'to_dict', None): + context = context.to_dict() + return context + + +def basicConfig(): + logging.basicConfig() + for handler in logging.root.handlers: + handler.setFormatter(_formatter) + if FLAGS.verbose: + logging.root.setLevel(logging.DEBUG) + if FLAGS.use_syslog: + syslog = SysLogHandler(address='/dev/log') + syslog.setFormatter(_formatter) + logging.root.addHandler(syslog) + if FLAGS.logfile: + logfile = FileHandler(FLAGS.logfile) + logfile.setFormatter(_formatter) + logging.root.addHandler(logfile) + + +class NovaLogger(logging.Logger): + """ + NovaLogger manages request context and formatting. + + This becomes the class that is instanciated by logging.getLogger. + """ + def __init__(self, name, level=NOTSET): + level_name = self._get_level_from_flags(name, FLAGS) + level = globals()[level_name] + logging.Logger.__init__(self, name, level) + + def _get_level_from_flags(self, name, FLAGS): + # if exactly "nova", or a child logger, honor the verbose flag + if (name == "nova" or name.startswith("nova.")) and FLAGS.verbose: + return 'DEBUG' + for pair in FLAGS.default_log_levels: + logger, _sep, level = pair.partition('=') + # NOTE(todd): if we set a.b, we want a.b.c to have the same level + # (but not a.bc, so we check the dot) + if name == logger: + return level + if name.startswith(logger) and name[len(logger)] == '.': + return level + return 'INFO' + + def _log(self, level, msg, args, exc_info=None, extra=None, context=None): + """Extract context from any log call""" + if not extra: + extra = {} + if context: + extra.update(_dictify_context(context)) + # TODO(todd): fix after version.py merge + #extra.update({"nova_version": version.string_with_vcs()}) + logging.Logger._log(self, level, msg, args, exc_info, extra) + + def addHandler(self, handler): + """Each handler gets our custom formatter""" + handler.setFormatter(_formatter) + logging.Logger.addHandler(self, handler) + + def audit(self, msg, *args, **kwargs): + """Shortcut for our AUDIT level""" + if self.isEnabledFor(AUDIT): + self._log(AUDIT, msg, args, **kwargs) + + def exception(self, msg, *args, **kwargs): + """Logging.exception doesn't handle kwargs, so breaks context""" + if not kwargs.get('exc_info'): + kwargs['exc_info'] = 1 + self.error(msg, *args, **kwargs) + # NOTE(todd): does this really go here, or in _log ? + extra = kwargs.get('extra') + if not extra: + return + env = extra.get('environment') + if env: + env = env.copy() + for k in env.keys(): + if not isinstance(env[k], str): + env.pop(k) + message = "Environment: %s" % json.dumps(env) + kwargs.pop('exc_info') + self.error(message, **kwargs) + +logging.setLoggerClass(NovaLogger) + + +class NovaRootLogger(NovaLogger): + pass + +if not isinstance(logging.root, NovaRootLogger): + logging.root = NovaRootLogger("nova.root", WARNING) + NovaLogger.root = logging.root + NovaLogger.manager.root = logging.root + + +class NovaFormatter(logging.Formatter): + """ + A nova.context.RequestContext aware formatter configured through flags. + + The flags used to set format strings are: logging_context_foramt_string + and logging_default_format_string. You can also specify + logging_debug_format_suffix to append extra formatting if the log level is + debug. + + For information about what variables are available for the formatter see: + http://docs.python.org/library/logging.html#formatter + """ + + def format(self, record): + """Uses contextstring if request_id is set, otherwise default""" + if record.__dict__.get('request_id', None): + self._fmt = FLAGS.logging_context_format_string + else: + self._fmt = FLAGS.logging_default_format_string + if record.levelno == logging.DEBUG \ + and FLAGS.logging_debug_format_suffix: + self._fmt += " " + FLAGS.logging_debug_format_suffix + # Cache this on the record, Logger will respect our formated copy + if record.exc_info: + record.exc_text = self.formatException(record.exc_info, record) + return logging.Formatter.format(self, record) + + def formatException(self, exc_info, record=None): + """Format exception output with FLAGS.logging_exception_prefix""" + if not record: + return logging.Formatter.formatException(self, exc_info) + stringbuffer = cStringIO.StringIO() + traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], + None, stringbuffer) + lines = stringbuffer.getvalue().split("\n") + stringbuffer.close() + formatted_lines = [] + for line in lines: + pl = FLAGS.logging_exception_prefix % record.__dict__ + fl = "%s%s" % (pl, line) + formatted_lines.append(fl) + return "\n".join(formatted_lines) + +_formatter = NovaFormatter() + + +def audit(msg, *args, **kwargs): + """Shortcut for logging to root log with sevrity 'AUDIT'.""" + if len(logging.root.handlers) == 0: + basicConfig() + logging.root.log(AUDIT, msg, *args, **kwargs) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 931a89554683..c525d5dc8831 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -17,16 +17,17 @@ Implements vlans, bridges, and iptables rules using linux utilities. """ -import logging import os -# TODO(ja): does the definition of network_path belong here? - from nova import db from nova import flags +from nova import log as logging from nova import utils +LOG = logging.getLogger("nova.linux_net") + + def _bin_file(script): """Return the absolute path to scipt in the bin directory""" return os.path.abspath(os.path.join(__file__, "../../../bin", script)) @@ -172,7 +173,7 @@ def ensure_vlan(vlan_num): """Create a vlan unless it already exists""" interface = "vlan%s" % vlan_num if not _device_exists(interface): - logging.debug(_("Starting VLAN inteface %s"), interface) + LOG.debug(_("Starting VLAN inteface %s"), interface) _execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD") _execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num)) _execute("sudo ifconfig %s up" % interface) @@ -182,7 +183,7 @@ def ensure_vlan(vlan_num): def ensure_bridge(bridge, interface, net_attrs=None): """Create a bridge unless it already exists""" if not _device_exists(bridge): - logging.debug(_("Starting Bridge interface for %s"), interface) + LOG.debug(_("Starting Bridge interface for %s"), interface) _execute("sudo brctl addbr %s" % bridge) _execute("sudo brctl setfd %s 0" % bridge) # _execute("sudo brctl setageing %s 10" % bridge) @@ -248,9 +249,9 @@ def update_dhcp(context, network_id): _execute('sudo kill -HUP %d' % pid) return except Exception as exc: # pylint: disable-msg=W0703 - logging.debug(_("Hupping dnsmasq threw %s"), exc) + LOG.debug(_("Hupping dnsmasq threw %s"), exc) else: - logging.debug(_("Pid %d is stale, relaunching dnsmasq"), pid) + LOG.debug(_("Pid %d is stale, relaunching dnsmasq"), pid) # FLAGFILE and DNSMASQ_INTERFACE in env env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile, @@ -270,7 +271,7 @@ def _host_dhcp(fixed_ip_ref): def _execute(cmd, *args, **kwargs): """Wrapper around utils._execute for fake_network""" if FLAGS.fake_network: - logging.debug("FAKE NET: %s", cmd) + LOG.debug("FAKE NET: %s", cmd) return "fake", 0 else: return utils.execute(cmd, *args, **kwargs) @@ -328,7 +329,7 @@ def _stop_dnsmasq(network): try: _execute('sudo kill -TERM %d' % pid) except Exception as exc: # pylint: disable-msg=W0703 - logging.debug(_("Killing dnsmasq threw %s"), exc) + LOG.debug(_("Killing dnsmasq threw %s"), exc) def _dhcp_file(bridge, kind): diff --git a/nova/network/manager.py b/nova/network/manager.py index 16aa8f895951..2b7325fd0a79 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -45,7 +45,6 @@ topologies. All of the network commands are issued to a subclass of """ import datetime -import logging import math import socket @@ -55,11 +54,13 @@ from nova import context from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import manager from nova import utils from nova import rpc +LOG = logging.getLogger("nova.networkmanager") FLAGS = flags.FLAGS flags.DEFINE_string('flat_network_bridge', 'br100', 'Bridge for simple network instances') @@ -131,7 +132,7 @@ class NetworkManager(manager.Manager): def set_network_host(self, context, network_id): """Safely sets the host of the network.""" - logging.debug(_("setting network host")) + LOG.debug(_("setting network host")) host = self.db.network_set_host(context, network_id, self.host) @@ -186,7 +187,7 @@ class NetworkManager(manager.Manager): def lease_fixed_ip(self, context, mac, address): """Called by dhcp-bridge when ip is leased.""" - logging.debug("Leasing IP %s", address) + LOG.debug(_("Leasing IP %s"), address) fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) instance_ref = fixed_ip_ref['instance'] if not instance_ref: @@ -201,12 +202,11 @@ class NetworkManager(manager.Manager): {'leased': True, 'updated_at': now}) if not fixed_ip_ref['allocated']: - logging.warn(_("IP %s leased that was already deallocated"), - address) + LOG.warn(_("IP %s leased that was already deallocated"), address) def release_fixed_ip(self, context, mac, address): """Called by dhcp-bridge when ip is released.""" - logging.debug("Releasing IP %s", address) + LOG.debug("Releasing IP %s", address) fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) instance_ref = fixed_ip_ref['instance'] if not instance_ref: @@ -216,7 +216,7 @@ class NetworkManager(manager.Manager): raise exception.Error(_("IP %s released from bad mac %s vs %s") % (address, instance_ref['mac_address'], mac)) if not fixed_ip_ref['leased']: - logging.warn(_("IP %s released that was not leased"), address) + LOG.warn(_("IP %s released that was not leased"), address) self.db.fixed_ip_update(context, fixed_ip_ref['address'], {'leased': False}) @@ -437,7 +437,7 @@ class VlanManager(NetworkManager): self.host, time) if num: - logging.debug(_("Dissassociated %s stale fixed ip(s)"), num) + LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num) def init_host(self): """Do any initialization that needs to be run if this is a diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index 52257f69fce1..8e9235da4e42 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -39,7 +39,6 @@ S3 client with this module:: import datetime import json -import logging import multiprocessing import os import urllib @@ -54,12 +53,14 @@ from twisted.web import static from nova import context from nova import exception from nova import flags +from nova import log as logging from nova import utils from nova.auth import manager from nova.objectstore import bucket from nova.objectstore import image +LOG = logging.getLogger('nova.objectstore.handler') FLAGS = flags.FLAGS flags.DEFINE_string('s3_listen_host', '', 'Host to listen on.') @@ -132,9 +133,11 @@ def get_context(request): request.uri, headers=request.getAllHeaders(), check_type='s3') - return context.RequestContext(user, project) + rv = context.RequestContext(user, project) + LOG.audit("Authenticated request", context=rv) + return rv except exception.Error as ex: - logging.debug(_("Authentication Failure: %s"), ex) + LOG.debug(_("Authentication Failure: %s"), ex) raise exception.NotAuthorized() @@ -176,7 +179,7 @@ class S3(ErrorHandlingResource): def render_GET(self, request): # pylint: disable-msg=R0201 """Renders the GET request for a list of buckets as XML""" - logging.debug('List of buckets requested') + LOG.debug(_('List of buckets requested'), context=request.context) buckets = [b for b in bucket.Bucket.all() \ if b.is_authorized(request.context)] @@ -203,7 +206,7 @@ class BucketResource(ErrorHandlingResource): def render_GET(self, request): "Returns the keys for the bucket resource""" - logging.debug("List keys for bucket %s", self.name) + LOG.debug(_("List keys for bucket %s"), self.name) try: bucket_object = bucket.Bucket(self.name) @@ -211,6 +214,8 @@ class BucketResource(ErrorHandlingResource): return error.NoResource(message="No such bucket").render(request) if not bucket_object.is_authorized(request.context): + LOG.audit(_("Unauthorized attempt to access bucket %s"), + self.name, context=request.context) raise exception.NotAuthorized() prefix = get_argument(request, "prefix", u"") @@ -227,8 +232,8 @@ class BucketResource(ErrorHandlingResource): def render_PUT(self, request): "Creates the bucket resource""" - logging.debug(_("Creating bucket %s"), self.name) - logging.debug("calling bucket.Bucket.create(%r, %r)", + LOG.debug(_("Creating bucket %s"), self.name) + LOG.debug("calling bucket.Bucket.create(%r, %r)", self.name, request.context) bucket.Bucket.create(self.name, request.context) @@ -237,10 +242,12 @@ class BucketResource(ErrorHandlingResource): def render_DELETE(self, request): """Deletes the bucket resource""" - logging.debug(_("Deleting bucket %s"), self.name) + LOG.debug(_("Deleting bucket %s"), self.name) bucket_object = bucket.Bucket(self.name) if not bucket_object.is_authorized(request.context): + LOG.audit(_("Unauthorized attempt to delete bucket %s"), + self.name, context=request.context) raise exception.NotAuthorized() bucket_object.delete() @@ -261,11 +268,12 @@ class ObjectResource(ErrorHandlingResource): Raises NotAuthorized if user in request context is not authorized to delete the object. """ - logging.debug(_("Getting object: %s / %s"), - self.bucket.name, - self.name) + LOG.debug(_("Getting object: %s / %s"), self.bucket.name, self.name) if not self.bucket.is_authorized(request.context): + LOG.audit(_("Unauthorized attempt to get object %s from bucket " + "%s"), self.name, self.bucket.name, + context=request.context) raise exception.NotAuthorized() obj = self.bucket[urllib.unquote(self.name)] @@ -281,11 +289,12 @@ class ObjectResource(ErrorHandlingResource): Raises NotAuthorized if user in request context is not authorized to delete the object. """ - logging.debug(_("Putting object: %s / %s"), - self.bucket.name, - self.name) + LOG.debug(_("Putting object: %s / %s"), self.bucket.name, self.name) if not self.bucket.is_authorized(request.context): + LOG.audit(_("Unauthorized attempt to upload object %s to bucket " + "%s"), + self.name, self.bucket.name, context=request.context) raise exception.NotAuthorized() key = urllib.unquote(self.name) @@ -302,11 +311,13 @@ class ObjectResource(ErrorHandlingResource): authorized to delete the object. """ - logging.debug(_("Deleting object: %s / %s"), - self.bucket.name, - self.name) + LOG.debug(_("Deleting object: %s / %s"), self.bucket.name, self.name, + context=request.context) if not self.bucket.is_authorized(request.context): + LOG.audit("Unauthorized attempt to delete object %s from " + "bucket %s", self.name, self.bucket.name, + context=request.context) raise exception.NotAuthorized() del self.bucket[urllib.unquote(self.name)] @@ -379,13 +390,21 @@ class ImagesResource(resource.Resource): image_path = os.path.join(FLAGS.images_path, image_id) if not image_path.startswith(FLAGS.images_path) or \ os.path.exists(image_path): + LOG.audit(_("Not authorized to upload image: invalid directory " + "%s"), + image_path, context=request.context) raise exception.NotAuthorized() bucket_object = bucket.Bucket(image_location.split("/")[0]) if not bucket_object.is_authorized(request.context): + LOG.audit(_("Not authorized to upload image: unauthorized " + "bucket %s"), bucket_object.name, + context=request.context) raise exception.NotAuthorized() + LOG.audit(_("Starting image upload: %s"), image_id, + context=request.context) p = multiprocessing.Process(target=image.Image.register_aws_image, args=(image_id, image_location, request.context)) p.start() @@ -398,17 +417,21 @@ class ImagesResource(resource.Resource): image_id = get_argument(request, 'image_id', u'') image_object = image.Image(image_id) if not image_object.is_authorized(request.context): - logging.debug(_("not authorized for render_POST in images")) + LOG.audit(_("Not authorized to update attributes of image %s"), + image_id, context=request.context) raise exception.NotAuthorized() operation = get_argument(request, 'operation', u'') if operation: # operation implies publicity toggle - logging.debug(_("handling publicity toggle")) - image_object.set_public(operation == 'add') + newstatus = (operation == 'add') + LOG.audit(_("Toggling publicity flag of image %s %r"), image_id, + newstatus, context=request.context) + image_object.set_public(newstatus) else: # other attributes imply update - logging.debug(_("update user fields")) + LOG.audit(_("Updating user fields on image %s"), image_id, + context=request.context) clean_args = {} for arg in request.args.keys(): clean_args[arg] = request.args[arg][0] @@ -421,9 +444,12 @@ class ImagesResource(resource.Resource): image_object = image.Image(image_id) if not image_object.is_authorized(request.context): + LOG.audit(_("Unauthorized attempt to delete image %s"), + image_id, context=request.context) raise exception.NotAuthorized() image_object.delete() + LOG.audit(_("Deleted image: %s"), image_id, context=request.context) request.setResponseCode(204) return '' diff --git a/nova/rpc.py b/nova/rpc.py index 844088348633..02052485dc85 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -22,7 +22,6 @@ No fan-out support yet. """ import json -import logging import sys import time import traceback @@ -36,13 +35,12 @@ from nova import context from nova import exception from nova import fakerabbit from nova import flags +from nova import log as logging from nova import utils FLAGS = flags.FLAGS - -LOG = logging.getLogger('amqplib') -LOG.setLevel(logging.DEBUG) +LOG = logging.getLogger('nova.rpc') class Connection(carrot_connection.BrokerConnection): @@ -91,14 +89,14 @@ class Consumer(messaging.Consumer): self.failed_connection = False break except: # Catching all because carrot sucks - logging.exception(_("AMQP server on %s:%d is unreachable." - " Trying again in %d seconds.") % ( - FLAGS.rabbit_host, - FLAGS.rabbit_port, - FLAGS.rabbit_retry_interval)) + LOG.exception(_("AMQP server on %s:%d is unreachable." + " Trying again in %d seconds.") % ( + FLAGS.rabbit_host, + FLAGS.rabbit_port, + FLAGS.rabbit_retry_interval)) self.failed_connection = True if self.failed_connection: - logging.exception(_("Unable to connect to AMQP server" + LOG.exception(_("Unable to connect to AMQP server" " after %d tries. Shutting down.") % FLAGS.rabbit_max_retries) sys.exit(1) @@ -116,14 +114,14 @@ class Consumer(messaging.Consumer): self.declare() super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) if self.failed_connection: - logging.error(_("Reconnected to queue")) + LOG.error(_("Reconnected to queue")) self.failed_connection = False # NOTE(vish): This is catching all errors because we really don't # exceptions to be logged 10 times a second if some # persistent failure occurs. except Exception: # pylint: disable-msg=W0703 if not self.failed_connection: - logging.exception(_("Failed to fetch message from queue")) + LOG.exception(_("Failed to fetch message from queue")) self.failed_connection = True def attach_to_eventlet(self): @@ -242,8 +240,8 @@ def msg_reply(msg_id, reply=None, failure=None): if failure: message = str(failure[1]) tb = traceback.format_exception(*failure) - logging.error(_("Returning exception %s to caller"), message) - logging.error(tb) + LOG.error(_("Returning exception %s to caller"), message) + LOG.error(tb) failure = (failure[0].__name__, str(failure[1]), tb) conn = Connection.instance(True) publisher = DirectPublisher(connection=conn, msg_id=msg_id) diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 44e21f2fdda7..a4d6dd574e8a 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -21,15 +21,16 @@ Scheduler Service """ -import logging import functools from nova import db from nova import flags +from nova import log as logging from nova import manager from nova import rpc from nova import utils +LOG = logging.getLogger('nova.scheduler.manager') FLAGS = flags.FLAGS flags.DEFINE_string('scheduler_driver', 'nova.scheduler.chance.ChanceScheduler', @@ -65,4 +66,4 @@ class SchedulerManager(manager.Manager): db.queue_get_for(context, topic, host), {"method": method, "args": kwargs}) - logging.debug(_("Casting to %s %s for %s"), topic, host, method) + LOG.debug(_("Casting to %s %s for %s"), topic, host, method) diff --git a/nova/service.py b/nova/service.py index f1f90742f845..fef7f0593f6b 100644 --- a/nova/service.py +++ b/nova/service.py @@ -21,7 +21,6 @@ Generic Node baseclass for all workers that run on hosts """ import inspect -import logging import os import sys @@ -32,6 +31,7 @@ from eventlet import greenpool from nova import context from nova import db from nova import exception +from nova import log as logging from nova import flags from nova import rpc from nova import utils @@ -151,7 +151,7 @@ class Service(object): report_interval = FLAGS.report_interval if not periodic_interval: periodic_interval = FLAGS.periodic_interval - logging.warn(_("Starting %s node"), topic) + logging.audit(_("Starting %s node"), topic) service_obj = cls(host, binary, topic, manager, report_interval, periodic_interval) @@ -206,20 +206,17 @@ class Service(object): def serve(*services): - argv = FLAGS(sys.argv) + FLAGS(sys.argv) + logging.basicConfig() + + # TODO(todd): make this pigggyback the flag-based level override method + logging.getLogger('amqplib').setLevel(logging.WARN) if not services: services = [Service.create()] name = '_'.join(x.binary for x in services) - logging.debug("Serving %s" % name) - - logging.getLogger('amqplib').setLevel(logging.WARN) - - if FLAGS.verbose: - logging.getLogger().setLevel(logging.DEBUG) - else: - logging.getLogger().setLevel(logging.WARNING) + logging.debug(_("Serving %s"), name) logging.debug(_("Full set of FLAGS:")) for flag in FLAGS: diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 0f274bd153a5..f5be9c94fb67 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -22,7 +22,6 @@ and as a WSGI layer import json import datetime -import logging import unittest import stubout diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py index ceac17adb6c6..da86e6e117bd 100644 --- a/nova/tests/objectstore_unittest.py +++ b/nova/tests/objectstore_unittest.py @@ -23,7 +23,6 @@ Unittets for S3 objectstore clone. import boto import glob import hashlib -import logging import os import shutil import tempfile @@ -63,7 +62,6 @@ class ObjectStoreTestCase(test.TestCase): self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'), images_path=os.path.join(OSS_TEMPDIR, 'images'), ca_path=os.path.join(os.path.dirname(__file__), 'CA')) - logging.getLogger().setLevel(logging.DEBUG) self.auth_manager = manager.AuthManager() self.auth_manager.create_user('user1') diff --git a/nova/tests/test_access.py b/nova/tests/test_access.py index 58fdea3b5c3e..0929903cf054 100644 --- a/nova/tests/test_access.py +++ b/nova/tests/test_access.py @@ -17,7 +17,6 @@ # under the License. import unittest -import logging import webob from nova import context diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py index 15d40bc5374a..35ffffb67f9e 100644 --- a/nova/tests/test_auth.py +++ b/nova/tests/test_auth.py @@ -16,17 +16,18 @@ # License for the specific language governing permissions and limitations # under the License. -import logging from M2Crypto import X509 import unittest from nova import crypto from nova import flags +from nova import log as logging from nova import test from nova.auth import manager from nova.api.ec2 import cloud FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.auth_unittest') class user_generator(object): @@ -211,12 +212,12 @@ class AuthManagerTestCase(object): # NOTE(vish): Setup runs genroot.sh if it hasn't been run cloud.CloudController().setup() _key, cert_str = crypto.generate_x509_cert(user.id, project.id) - logging.debug(cert_str) + LOG.debug(cert_str) full_chain = crypto.fetch_ca(project_id=project.id, chain=True) int_cert = crypto.fetch_ca(project_id=project.id, chain=False) cloud_cert = crypto.fetch_ca() - logging.debug("CA chain:\n\n =====\n%s\n\n=====" % full_chain) + LOG.debug("CA chain:\n\n =====\n%s\n\n=====", full_chain) signed_cert = X509.load_cert_string(cert_str) chain_cert = X509.load_cert_string(full_chain) int_cert = X509.load_cert_string(int_cert) @@ -331,7 +332,7 @@ class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase): test.TestCase.__init__(self, *args, **kwargs) import nova.auth.fakeldap as fakeldap if FLAGS.flush_db: - logging.info("Flushing datastore") + LOG.info("Flushing datastore") r = fakeldap.Store.instance() r.flushdb() diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 70d2c44daaec..e6ad2432b3ad 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -18,7 +18,6 @@ from base64 import b64decode import json -import logging from M2Crypto import BIO from M2Crypto import RSA import os @@ -31,6 +30,7 @@ from nova import context from nova import crypto from nova import db from nova import flags +from nova import log as logging from nova import rpc from nova import service from nova import test @@ -41,6 +41,7 @@ from nova.objectstore import image FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.cloud') # Temp dirs for working with image attributes through the cloud controller # (stole this from objectstore_unittest.py) @@ -56,7 +57,6 @@ class CloudTestCase(test.TestCase): images_path=IMAGES_PATH) self.conn = rpc.Connection.instance() - logging.getLogger().setLevel(logging.DEBUG) # set up our cloud self.cloud = cloud.CloudController() @@ -178,7 +178,7 @@ class CloudTestCase(test.TestCase): def test_run_instances(self): if FLAGS.connection_type == 'fake': - logging.debug("Can't test instances without a real virtual env.") + LOG.debug(_("Can't test instances without a real virtual env.")) return image_id = FLAGS.default_image instance_type = FLAGS.default_instance_type @@ -190,25 +190,25 @@ class CloudTestCase(test.TestCase): # TODO: check for proper response instance_id = rv['reservationSet'][0].keys()[0] instance = rv['reservationSet'][0][instance_id][0] - logging.debug("Need to watch instance %s until it's running..." % - instance['instance_id']) + LOG.debug(_("Need to watch instance %s until it's running..."), + instance['instance_id']) while True: greenthread.sleep(1) info = self.cloud._get_instance(instance['instance_id']) - logging.debug(info['state']) + LOG.debug(info['state']) if info['state'] == power_state.RUNNING: break self.assert_(rv) - if connection_type != 'fake': + if FLAGS.connection_type != 'fake': time.sleep(45) # Should use boto for polling here for reservations in rv['reservationSet']: # for res_id in reservations.keys(): - # logging.debug(reservations[res_id]) + # LOG.debug(reservations[res_id]) # for instance in reservations[res_id]: for instance in reservations[reservations.keys()[0]]: instance_id = instance['instance_id'] - logging.debug("Terminating instance %s" % instance_id) + LOG.debug(_("Terminating instance %s"), instance_id) rv = yield self.compute.terminate_instance(instance_id) def test_instance_update_state(self): diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 1fb9143f1d77..889ffe99f9f5 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -20,12 +20,12 @@ Tests For Compute """ import datetime -import logging from nova import context from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import test from nova import utils from nova.auth import manager @@ -33,12 +33,12 @@ from nova.compute import api as compute_api FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.compute') class ComputeTestCase(test.TestCase): """Test case for compute""" def setUp(self): - logging.getLogger().setLevel(logging.DEBUG) super(ComputeTestCase, self).setUp() self.flags(connection_type='fake', stub_network=True, @@ -101,13 +101,13 @@ class ComputeTestCase(test.TestCase): self.compute.run_instance(self.context, instance_id) instances = db.instance_get_all(context.get_admin_context()) - logging.info(_("Running instances: %s"), instances) + LOG.info(_("Running instances: %s"), instances) self.assertEqual(len(instances), 1) self.compute.terminate_instance(self.context, instance_id) instances = db.instance_get_all(context.get_admin_context()) - logging.info(_("After terminating instances: %s"), instances) + LOG.info(_("After terminating instances: %s"), instances) self.assertEqual(len(instances), 0) def test_run_terminate_timestamps(self): diff --git a/nova/tests/test_log.py b/nova/tests/test_log.py new file mode 100644 index 000000000000..d8dd387082e8 --- /dev/null +++ b/nova/tests/test_log.py @@ -0,0 +1,107 @@ +import cStringIO + +from nova import context +from nova import log +from nova import test + + +def _fake_context(): + return context.RequestContext(1, 1) + + +class RootLoggerTestCase(test.TrialTestCase): + def setUp(self): + super(RootLoggerTestCase, self).setUp() + self.log = log.logging.root + + def tearDown(self): + super(RootLoggerTestCase, self).tearDown() + log.NovaLogger.manager.loggerDict = {} + + def test_is_nova_instance(self): + self.assert_(isinstance(self.log, log.NovaLogger)) + + def test_name_is_nova_root(self): + self.assertEqual("nova.root", self.log.name) + + def test_handlers_have_nova_formatter(self): + formatters = [] + for h in self.log.handlers: + f = h.formatter + if isinstance(f, log.NovaFormatter): + formatters.append(f) + self.assert_(formatters) + self.assertEqual(len(formatters), len(self.log.handlers)) + + def test_handles_context_kwarg(self): + self.log.info("foo", context=_fake_context()) + self.assert_(True) # didn't raise exception + + def test_module_level_methods_handle_context_arg(self): + log.info("foo", context=_fake_context()) + self.assert_(True) # didn't raise exception + + def test_module_level_audit_handles_context_arg(self): + log.audit("foo", context=_fake_context()) + self.assert_(True) # didn't raise exception + +class NovaFormatterTestCase(test.TrialTestCase): + def setUp(self): + super(NovaFormatterTestCase, self).setUp() + self.flags(logging_context_format_string="HAS CONTEXT "\ + "[%(request_id)s]: %(message)s", + logging_default_format_string="NOCTXT: %(message)s", + logging_debug_format_suffix="--DBG") + self.log = log.logging.root + self.stream = cStringIO.StringIO() + handler = log.StreamHandler(self.stream) + self.log.addHandler(handler) + self.log.setLevel(log.DEBUG) + + def tearDown(self): + super(NovaFormatterTestCase, self).tearDown() + log.NovaLogger.manager.loggerDict = {} + + def test_uncontextualized_log(self): + self.log.info("foo") + self.assertEqual("NOCTXT: foo\n", self.stream.getvalue()) + + def test_contextualized_log(self): + ctxt = _fake_context() + self.log.info("bar", context=ctxt) + expected = "HAS CONTEXT [%s]: bar\n" % ctxt.request_id + self.assertEqual(expected, self.stream.getvalue()) + + def test_debugging_log(self): + self.log.debug("baz") + self.assertEqual("NOCTXT: baz --DBG\n", self.stream.getvalue()) + +class NovaLoggerTestCase(test.TrialTestCase): + def setUp(self): + super(NovaLoggerTestCase, self).setUp() + self.flags(default_log_levels=["nova-test=AUDIT"], verbose=False) + self.log = log.getLogger('nova-test') + + def tearDown(self): + super(NovaLoggerTestCase, self).tearDown() + log.NovaLogger.manager.loggerDict = {} + + def test_has_level_from_flags(self): + self.assertEqual(log.AUDIT, self.log.level) + + def test_child_log_has_level_of_parent_flag(self): + l = log.getLogger('nova-test.foo') + self.assertEqual(log.AUDIT, l.level) + +class VerboseLoggerTestCase(test.TrialTestCase): + def setUp(self): + super(VerboseLoggerTestCase, self).setUp() + self.flags(default_log_levels=["nova.test=AUDIT"], verbose=True) + self.log = log.getLogger('nova.test') + + def tearDown(self): + super(VerboseLoggerTestCase, self).tearDown() + log.NovaLogger.manager.loggerDict = {} + + def test_will_be_verbose_if_named_nova_and_verbose_flag_set(self): + self.assertEqual(log.DEBUG, self.log.level) diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 96473ac7c4bc..349e20f84150 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -20,18 +20,18 @@ Unit Tests for network code """ import IPy import os -import logging from nova import context from nova import db from nova import exception from nova import flags -from nova import service +from nova import log as logging from nova import test from nova import utils from nova.auth import manager FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.network') class NetworkTestCase(test.TestCase): @@ -45,7 +45,6 @@ class NetworkTestCase(test.TestCase): fake_network=True, network_size=16, num_networks=5) - logging.getLogger().setLevel(logging.DEBUG) self.manager = manager.AuthManager() self.user = self.manager.create_user('netuser', 'netuser', 'netuser') self.projects = [] @@ -328,7 +327,7 @@ def lease_ip(private_ip): 'TESTING': '1', 'FLAGFILE': FLAGS.dhcpbridge_flagfile} (out, err) = utils.execute(cmd, addl_env=env) - logging.debug("ISSUE_IP: %s, %s ", out, err) + LOG.debug("ISSUE_IP: %s, %s ", out, err) def release_ip(private_ip): @@ -344,4 +343,4 @@ def release_ip(private_ip): 'TESTING': '1', 'FLAGFILE': FLAGS.dhcpbridge_flagfile} (out, err) = utils.execute(cmd, addl_env=env) - logging.debug("RELEASE_IP: %s, %s ", out, err) + LOG.debug("RELEASE_IP: %s, %s ", out, err) diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 8cf2a5e54652..ea7a006e2707 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -16,11 +16,8 @@ # License for the specific language governing permissions and limitations # under the License. -import logging - from nova import context from nova import db -from nova import exception from nova import flags from nova import quota from nova import test @@ -34,7 +31,6 @@ FLAGS = flags.FLAGS class QuotaTestCase(test.TestCase): def setUp(self): - logging.getLogger().setLevel(logging.DEBUG) super(QuotaTestCase, self).setUp() self.flags(connection_type='fake', quota_instances=2, diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 6ea2edcab348..0e72b3ebab2a 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -18,15 +18,16 @@ """ Unit Tests for remote procedure calls using queue """ -import logging from nova import context from nova import flags +from nova import logging from nova import rpc from nova import test FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.rpc') class RpcTestCase(test.TestCase): @@ -115,13 +116,13 @@ class TestReceiver(object): @staticmethod def echo(context, value): """Simply returns whatever value is sent in""" - logging.debug("Received %s", value) + LOG.debug(_("Received %s"), value) return value @staticmethod def context(context, value): """Returns dictionary version of context""" - logging.debug("Received %s", context) + LOG.debug(_("Received %s"), context) return context.to_dict() @staticmethod diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index b13455fb07e6..b40ca004b6c1 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -19,23 +19,23 @@ Tests for Volume Code. """ -import logging from nova import context from nova import exception from nova import db from nova import flags +from nova import log as logging from nova import test from nova import utils FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.volume') class VolumeTestCase(test.TestCase): """Test Case for volumes.""" def setUp(self): - logging.getLogger().setLevel(logging.DEBUG) super(VolumeTestCase, self).setUp() self.compute = utils.import_object(FLAGS.compute_manager) self.flags(connection_type='fake') @@ -159,7 +159,7 @@ class VolumeTestCase(test.TestCase): volume_id) self.assert_(iscsi_target not in targets) targets.append(iscsi_target) - logging.debug("Target %s allocated", iscsi_target) + LOG.debug(_("Target %s allocated"), iscsi_target) total_slots = FLAGS.iscsi_num_targets for _index in xrange(total_slots): volume_id = self._create_volume() diff --git a/nova/twistd.py b/nova/twistd.py index 29be9c4e1457..5562719997fa 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -22,7 +22,6 @@ manage pid files and support syslogging. """ import gflags -import logging import os import signal import sys @@ -34,6 +33,7 @@ from twisted.python import runtime from twisted.python import usage from nova import flags +from nova import log as logging if runtime.platformType == "win32": @@ -234,22 +234,12 @@ def serve(filename): OptionsClass = WrapTwistedOptions(TwistdServerOptions) options = OptionsClass() argv = options.parseOptions() - logging.getLogger('amqplib').setLevel(logging.WARN) FLAGS.python = filename FLAGS.no_save = True if not FLAGS.pidfile: FLAGS.pidfile = '%s.pid' % name elif FLAGS.pidfile.endswith('twistd.pid'): FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', '%s.pid' % name) - # NOTE(vish): if we're running nodaemon, redirect the log to stdout - if FLAGS.nodaemon and not FLAGS.logfile: - FLAGS.logfile = "-" - if not FLAGS.logfile: - FLAGS.logfile = '%s.log' % name - elif FLAGS.logfile.endswith('twistd.log'): - FLAGS.logfile = FLAGS.logfile.replace('twistd.log', '%s.log' % name) - if FLAGS.logdir: - FLAGS.logfile = os.path.join(FLAGS.logdir, FLAGS.logfile) if not FLAGS.prefix: FLAGS.prefix = name elif FLAGS.prefix.endswith('twisted'): @@ -270,19 +260,10 @@ def serve(filename): print 'usage: %s [options] [start|stop|restart]' % argv[0] sys.exit(1) - formatter = logging.Formatter( - '(%(name)s): %(levelname)s %(message)s') - handler = logging.StreamHandler(log.StdioOnnaStick()) - handler.setFormatter(formatter) - logging.getLogger().addHandler(handler) - - if FLAGS.verbose: - logging.getLogger().setLevel(logging.DEBUG) - else: - logging.getLogger().setLevel(logging.WARNING) - + logging.basicConfig() logging.debug(_("Full set of FLAGS:")) for flag in FLAGS: logging.debug("%s : %s" % (flag, FLAGS.get(flag, None))) + logging.audit(_("Starting %s"), name) twistd.runApp(options) diff --git a/nova/utils.py b/nova/utils.py index 15112faa21bd..cc632b835d2f 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -22,7 +22,6 @@ System-level utilities and helper functions. import datetime import inspect -import logging import os import random import subprocess @@ -37,8 +36,10 @@ from eventlet import greenthread from nova import exception from nova.exception import ProcessExecutionError +from nova import log as logging +LOG = logging.getLogger("nova.utils") TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" @@ -109,7 +110,7 @@ def vpn_ping(address, port, timeout=0.05, session_id=None): def fetchfile(url, target): - logging.debug(_("Fetching %s") % url) + LOG.debug(_("Fetching %s") % url) # c = pycurl.Curl() # fp = open(target, "wb") # c.setopt(c.URL, url) @@ -121,7 +122,7 @@ def fetchfile(url, target): def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): - logging.debug(_("Running cmd (subprocess): %s"), cmd) + LOG.debug(_("Running cmd (subprocess): %s"), cmd) env = os.environ.copy() if addl_env: env.update(addl_env) @@ -134,7 +135,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): result = obj.communicate() obj.stdin.close() if obj.returncode: - logging.debug(_("Result was %s") % (obj.returncode)) + LOG.debug(_("Result was %s") % (obj.returncode)) if check_exit_code and obj.returncode != 0: (stdout, stderr) = result raise ProcessExecutionError(exit_code=obj.returncode, @@ -167,12 +168,12 @@ def default_flagfile(filename='nova.conf'): def debug(arg): - logging.debug('debug in callback: %s', arg) + LOG.debug(_('debug in callback: %s'), arg) return arg def runthis(prompt, cmd, check_exit_code=True): - logging.debug(_("Running %s") % (cmd)) + LOG.debug(_("Running %s"), (cmd)) rv, err = execute(cmd, check_exit_code=check_exit_code) @@ -203,7 +204,7 @@ def get_my_ip(): csock.close() return addr except socket.gaierror as ex: - logging.warn(_("Couldn't get IP, using 127.0.0.1 %s"), ex) + LOG.warn(_("Couldn't get IP, using 127.0.0.1 %s"), ex) return "127.0.0.1" @@ -296,7 +297,7 @@ class LazyPluggable(object): fromlist = backend self.__backend = __import__(name, None, None, fromlist) - logging.info('backend %s', self.__backend) + LOG.debug(_('backend %s'), self.__backend) return self.__backend def __getattr__(self, key): diff --git a/nova/virt/connection.py b/nova/virt/connection.py index 61e99944ef38..7602cbe50842 100644 --- a/nova/virt/connection.py +++ b/nova/virt/connection.py @@ -19,15 +19,16 @@ """Abstraction of the underlying virtualization API.""" -import logging import sys from nova import flags +from nova import log as logging from nova.virt import fake from nova.virt import libvirt_conn from nova.virt import xenapi_conn +LOG = logging.getLogger("nova.virt.connection") FLAGS = flags.FLAGS @@ -66,6 +67,6 @@ def get_connection(read_only=False): raise Exception('Unknown connection type "%s"' % t) if conn is None: - logging.error(_('Failed to open connection to the hypervisor')) + LOG.error(_('Failed to open connection to the hypervisor')) sys.exit(1) return conn diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 00edfbdc8ddd..ac82fdadbf56 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -36,7 +36,6 @@ Supports KVM, QEMU, UML, and XEN. """ -import logging import os import shutil @@ -50,6 +49,7 @@ from nova import context from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import utils #from nova.api import context from nova.auth import manager @@ -63,6 +63,9 @@ libxml2 = None Template = None +LOG = logging.getLogger('nova.virt.libvirt_conn') + + FLAGS = flags.FLAGS # TODO(vish): These flags should probably go into a shared location flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image') @@ -130,7 +133,7 @@ class LibvirtConnection(object): @property def _conn(self): if not self._wrapped_conn or not self._test_connection(): - logging.debug(_('Connecting to libvirt: %s') % self.libvirt_uri) + LOG.debug(_('Connecting to libvirt: %s'), self.libvirt_uri) self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only) return self._wrapped_conn @@ -142,7 +145,7 @@ class LibvirtConnection(object): except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \ e.get_error_domain() == libvirt.VIR_FROM_REMOTE: - logging.debug(_('Connection to libvirt broke')) + LOG.debug(_('Connection to libvirt broke')) return False raise @@ -214,8 +217,8 @@ class LibvirtConnection(object): def _cleanup(self, instance): target = os.path.join(FLAGS.instances_path, instance['name']) - logging.info(_('instance %s: deleting instance files %s'), - instance['name'], target) + LOG.info(_('instance %s: deleting instance files %s'), + instance['name'], target) if os.path.exists(target): shutil.rmtree(target) @@ -279,10 +282,10 @@ class LibvirtConnection(object): db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: - logging.debug(_('instance %s: rebooted'), instance['name']) + LOG.debug(_('instance %s: rebooted'), instance['name']) timer.stop() except Exception, exn: - logging.error(_('_wait_for_reboot failed: %s'), exn) + LOG.exception(_('_wait_for_reboot failed: %s'), exn) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) @@ -325,10 +328,10 @@ class LibvirtConnection(object): state = self.get_info(instance['name'])['state'] db.instance_set_state(None, instance['id'], state) if state == power_state.RUNNING: - logging.debug(_('instance %s: rescued'), instance['name']) + LOG.debug(_('instance %s: rescued'), instance['name']) timer.stop() except Exception, exn: - logging.error(_('_wait_for_rescue failed: %s'), exn) + LOG.exception(_('_wait_for_rescue failed: %s'), exn) db.instance_set_state(None, instance['id'], power_state.SHUTDOWN) @@ -353,7 +356,7 @@ class LibvirtConnection(object): NWFilterFirewall(self._conn).setup_nwfilters_for_instance(instance) self._create_image(instance, xml) self._conn.createXML(xml, 0) - logging.debug(_("instance %s: is running"), instance['name']) + LOG.debug(_("instance %s: is running"), instance['name']) timer = utils.LoopingCall(f=None) @@ -363,11 +366,11 @@ class LibvirtConnection(object): db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: - logging.debug(_('instance %s: booted'), instance['name']) + LOG.debug(_('instance %s: booted'), instance['name']) timer.stop() except: - logging.exception(_('instance %s: failed to boot'), - instance['name']) + LOG.exception(_('instance %s: failed to boot'), + instance['name']) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) @@ -377,11 +380,11 @@ class LibvirtConnection(object): return timer.start(interval=0.5, now=True) def _flush_xen_console(self, virsh_output): - logging.info('virsh said: %r' % (virsh_output,)) + LOG.info('virsh said: %r', virsh_output) virsh_output = virsh_output[0].strip() if virsh_output.startswith('/dev/'): - logging.info(_('cool, it\'s a device')) + LOG.info(_('cool, it\'s a device')) out, err = utils.execute("sudo dd if=%s iflag=nonblock" % virsh_output, check_exit_code=False) return out @@ -389,7 +392,7 @@ class LibvirtConnection(object): return '' def _append_to_file(self, data, fpath): - logging.info(_('data: %r, fpath: %r') % (data, fpath)) + LOG.info(_('data: %r, fpath: %r'), data, fpath) fp = open(fpath, 'a+') fp.write(data) return fpath @@ -397,7 +400,7 @@ class LibvirtConnection(object): def _dump_file(self, fpath): fp = open(fpath, 'r+') contents = fp.read() - logging.info('Contents: %r' % (contents,)) + LOG.info('Contents: %r', contents) return contents @exception.wrap_exception @@ -431,7 +434,7 @@ class LibvirtConnection(object): # TODO(termie): these are blocking calls, it would be great # if they weren't. - logging.info(_('instance %s: Creating image'), inst['name']) + LOG.info(_('instance %s: Creating image'), inst['name']) f = open(basepath('libvirt.xml'), 'w') f.write(libvirt_xml) f.close() @@ -487,10 +490,10 @@ class LibvirtConnection(object): 'dns': network_ref['dns']} if key or net: if key: - logging.info(_('instance %s: injecting key into image %s'), + LOG.info(_('instance %s: injecting key into image %s'), inst['name'], inst.image_id) if net: - logging.info(_('instance %s: injecting net into image %s'), + LOG.info(_('instance %s: injecting net into image %s'), inst['name'], inst.image_id) try: disk.inject_data(basepath('disk-raw'), key, net, @@ -498,9 +501,9 @@ class LibvirtConnection(object): execute=execute) except Exception as e: # This could be a windows image, or a vmdk format disk - logging.warn(_('instance %s: ignoring error injecting data' - ' into image %s (%s)'), - inst['name'], inst.image_id, e) + LOG.warn(_('instance %s: ignoring error injecting data' + ' into image %s (%s)'), + inst['name'], inst.image_id, e) if inst['kernel_id']: if os.path.exists(basepath('disk')): @@ -526,8 +529,10 @@ class LibvirtConnection(object): def to_xml(self, instance, rescue=False): # TODO(termie): cache? - logging.debug(_('instance %s: starting toXML method'), - instance['name']) + LOG.debug('instance %s: starting toXML method', instance['name']) + network = db.project_get_network(context.get_admin_context(), + instance['project_id']) + LOG.debug(_('instance %s: starting toXML method'), instance['name']) network = db.network_get_by_instance(context.get_admin_context(), instance['id']) # FIXME(vish): stick this in db @@ -569,7 +574,7 @@ class LibvirtConnection(object): xml_info['disk'] = xml_info['basepath'] + "/disk" xml = str(Template(self.libvirt_xml, searchList=[xml_info])) - logging.debug(_('instance %s: finished toXML method'), + LOG.debug(_('instance %s: finished toXML method'), instance['name']) return xml @@ -870,9 +875,9 @@ class NWFilterFirewall(object): rule_xml += "dstportstart='%s' dstportend='%s' " % \ (rule.from_port, rule.to_port) elif rule.protocol == 'icmp': - logging.info('rule.protocol: %r, rule.from_port: %r, ' - 'rule.to_port: %r' % - (rule.protocol, rule.from_port, rule.to_port)) + LOG.info('rule.protocol: %r, rule.from_port: %r, ' + 'rule.to_port: %r', rule.protocol, + rule.from_port, rule.to_port) if rule.from_port != -1: rule_xml += "type='%s' " % rule.from_port if rule.to_port != -1: diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index aa4026f9730a..f2c3a34f6c74 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -52,12 +52,12 @@ A fake XenAPI SDK. import datetime -import logging import uuid from pprint import pformat from nova import exception +from nova import log as logging _CLASSES = ['host', 'network', 'session', 'SR', 'VBD',\ @@ -65,9 +65,11 @@ _CLASSES = ['host', 'network', 'session', 'SR', 'VBD',\ _db_content = {} +LOG = logging.getLogger("nova.virt.xenapi.fake") + def log_db_contents(msg=None): - logging.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content)) + LOG.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content)) def reset(): @@ -242,9 +244,9 @@ class SessionBase(object): full_params = (self._session,) + params meth = getattr(self, methodname, None) if meth is None: - logging.warn('Raising NotImplemented') + LOG.debug('Raising NotImplemented') raise NotImplementedError( - 'xenapi.fake does not have an implementation for %s' % + _('xenapi.fake does not have an implementation for %s') % methodname) return meth(*full_params) @@ -278,12 +280,12 @@ class SessionBase(object): if impl is not None: def callit(*params): - logging.warn('Calling %s %s', name, impl) + LOG.debug(_('Calling %s %s'), name, impl) self._check_session(params) return impl(*params) return callit if self._is_gettersetter(name, True): - logging.warn('Calling getter %s', name) + LOG.debug(_('Calling getter %s'), name) return lambda *params: self._getter(name, params) elif self._is_create(name): return lambda *params: self._create(name, params) @@ -333,10 +335,10 @@ class SessionBase(object): field in _db_content[cls][ref]): return _db_content[cls][ref][field] - logging.error('Raising NotImplemented') + LOG.debuug(_('Raising NotImplemented')) raise NotImplementedError( - 'xenapi.fake does not have an implementation for %s or it has ' - 'been called with the wrong number of arguments' % name) + _('xenapi.fake does not have an implementation for %s or it has ' + 'been called with the wrong number of arguments') % name) def _setter(self, name, params): self._check_session(params) @@ -351,7 +353,7 @@ class SessionBase(object): field in _db_content[cls][ref]): _db_content[cls][ref][field] = val - logging.warn('Raising NotImplemented') + LOG.debug(_('Raising NotImplemented')) raise NotImplementedError( 'xenapi.fake does not have an implementation for %s or it has ' 'been called with the wrong number of arguments or the database ' @@ -399,7 +401,7 @@ class SessionBase(object): self._session not in _db_content['session']): raise Failure(['HANDLE_INVALID', 'session', self._session]) if len(params) == 0 or params[0] != self._session: - logging.warn('Raising NotImplemented') + LOG.debug(_('Raising NotImplemented')) raise NotImplementedError('Call to XenAPI without using .xenapi') def _check_arg_count(self, params, expected): diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 9d1b51848973..1e9448a269ae 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -19,7 +19,6 @@ Helper methods for operations related to the management of VM records and their attributes like VDIs, VIFs, as well as their lookup functions. """ -import logging import pickle import urllib from xml.dom import minidom @@ -27,6 +26,7 @@ from xml.dom import minidom from eventlet import event from nova import exception from nova import flags +from nova import log as logging from nova import utils from nova.auth.manager import AuthManager from nova.compute import instance_types @@ -37,6 +37,7 @@ from nova.virt.xenapi.volume_utils import StorageError FLAGS = flags.FLAGS +LOG = logging.getLogger("nova.virt.xenapi.vm_utils") XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, @@ -121,9 +122,9 @@ class VMHelper(HelperBase): rec['HVM_boot_params'] = {'order': 'dc'} rec['platform'] = {'acpi': 'true', 'apic': 'true', 'pae': 'true', 'viridian': 'true'} - logging.debug('Created VM %s...', instance.name) + LOG.debug(_('Created VM %s...'), instance.name) vm_ref = session.call_xenapi('VM.create', rec) - logging.debug(_('Created VM %s as %s.'), instance.name, vm_ref) + LOG.debug(_('Created VM %s as %s.'), instance.name, vm_ref) return vm_ref @classmethod @@ -143,10 +144,9 @@ class VMHelper(HelperBase): vbd_rec['qos_algorithm_type'] = '' vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] - logging.debug(_('Creating VBD for VM %s, VDI %s ... '), - vm_ref, vdi_ref) + LOG.debug(_('Creating VBD for VM %s, VDI %s ... '), vm_ref, vdi_ref) vbd_ref = session.call_xenapi('VBD.create', vbd_rec) - logging.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref, + LOG.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref, vdi_ref) return vbd_ref @@ -161,7 +161,7 @@ class VMHelper(HelperBase): if vbd_rec['userdevice'] == str(number): return vbd except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('VBD not found in instance %s') % vm_ref) @classmethod @@ -170,7 +170,7 @@ class VMHelper(HelperBase): try: vbd_ref = session.call_xenapi('VBD.unplug', vbd_ref) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) if exc.details[0] != 'DEVICE_ALREADY_DETACHED': raise StorageError(_('Unable to unplug VBD %s') % vbd_ref) @@ -183,7 +183,7 @@ class VMHelper(HelperBase): #with Josh Kearney session.wait_for_task(0, task) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('Unable to destroy VBD %s') % vbd_ref) @classmethod @@ -199,11 +199,11 @@ class VMHelper(HelperBase): vif_rec['other_config'] = {} vif_rec['qos_algorithm_type'] = '' vif_rec['qos_algorithm_params'] = {} - logging.debug(_('Creating VIF for VM %s, network %s.'), vm_ref, - network_ref) + LOG.debug(_('Creating VIF for VM %s, network %s.'), vm_ref, + network_ref) vif_ref = session.call_xenapi('VIF.create', vif_rec) - logging.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref, - vm_ref, network_ref) + LOG.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref, + vm_ref, network_ref) return vif_ref @classmethod @@ -213,8 +213,7 @@ class VMHelper(HelperBase): """ #TODO(sirp): Add quiesce and VSS locking support when Windows support # is added - logging.debug(_("Snapshotting VM %s with label '%s'..."), - vm_ref, label) + LOG.debug(_("Snapshotting VM %s with label '%s'..."), vm_ref, label) vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref) vm_vdi_uuid = vm_vdi_rec["uuid"] @@ -227,8 +226,8 @@ class VMHelper(HelperBase): template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1] template_vdi_uuid = template_vdi_rec["uuid"] - logging.debug(_('Created snapshot %s from VM %s.'), template_vm_ref, - vm_ref) + LOG.debug(_('Created snapshot %s from VM %s.'), template_vm_ref, + vm_ref) parent_uuid = wait_for_vhd_coalesce( session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid) @@ -241,8 +240,7 @@ class VMHelper(HelperBase): """ Requests that the Glance plugin bundle the specified VDIs and push them into Glance using the specified human-friendly name. """ - logging.debug(_("Asking xapi to upload %s as '%s'"), - vdi_uuids, image_name) + LOG.debug(_("Asking xapi to upload %s as '%s'"), vdi_uuids, image_name) params = {'vdi_uuids': vdi_uuids, 'image_name': image_name, @@ -260,7 +258,7 @@ class VMHelper(HelperBase): """ url = images.image_url(image) access = AuthManager().get_access_key(user, project) - logging.debug("Asking xapi to fetch %s as %s", url, access) + LOG.debug(_("Asking xapi to fetch %s as %s"), url, access) fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel' args = {} args['src_url'] = url @@ -278,7 +276,7 @@ class VMHelper(HelperBase): @classmethod def lookup_image(cls, session, vdi_ref): - logging.debug("Looking up vdi %s for PV kernel", vdi_ref) + LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref) fn = "is_vdi_pv" args = {} args['vdi-ref'] = vdi_ref @@ -289,7 +287,7 @@ class VMHelper(HelperBase): pv = True elif pv_str.lower() == 'false': pv = False - logging.debug("PV Kernel in VDI:%d", pv) + LOG.debug(_("PV Kernel in VDI:%d"), pv) return pv @classmethod @@ -317,10 +315,9 @@ class VMHelper(HelperBase): vdi = session.get_xenapi().VBD.get_VDI(vbd) # Test valid VDI record = session.get_xenapi().VDI.get_record(vdi) - logging.debug(_('VDI %s is still available'), - record['uuid']) + LOG.debug(_('VDI %s is still available'), record['uuid']) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) else: vdis.append(vdi) if len(vdis) > 0: @@ -331,10 +328,10 @@ class VMHelper(HelperBase): @classmethod def compile_info(cls, record): """Fill record with VM status information""" - logging.info(_("(VM_UTILS) xenserver vm state -> |%s|"), - record['power_state']) - logging.info(_("(VM_UTILS) xenapi power_state -> |%s|"), - XENAPI_POWER_STATE[record['power_state']]) + LOG.info(_("(VM_UTILS) xenserver vm state -> |%s|"), + record['power_state']) + LOG.info(_("(VM_UTILS) xenapi power_state -> |%s|"), + XENAPI_POWER_STATE[record['power_state']]) return {'state': XENAPI_POWER_STATE[record['power_state']], 'max_mem': long(record['memory_static_max']) >> 10, 'mem': long(record['memory_dynamic_max']) >> 10, @@ -388,11 +385,9 @@ def get_vhd_parent(session, vdi_rec): """ if 'vhd-parent' in vdi_rec['sm_config']: parent_uuid = vdi_rec['sm_config']['vhd-parent'] - #NOTE(sirp): changed xenapi -> get_xenapi() parent_ref = session.get_xenapi().VDI.get_by_uuid(parent_uuid) parent_rec = session.get_xenapi().VDI.get_record(parent_ref) - #NOTE(sirp): changed log -> logging - logging.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref) + LOG.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref) return parent_ref, parent_rec else: return None @@ -409,7 +404,7 @@ def get_vhd_parent_uuid(session, vdi_ref): def scan_sr(session, instance_id, sr_ref): - logging.debug(_("Re-scanning SR %s"), sr_ref) + LOG.debug(_("Re-scanning SR %s"), sr_ref) task = session.call_xenapi('Async.SR.scan', sr_ref) session.wait_for_task(instance_id, task) @@ -433,10 +428,9 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, scan_sr(session, instance_id, sr_ref) parent_uuid = get_vhd_parent_uuid(session, vdi_ref) if original_parent_uuid and (parent_uuid != original_parent_uuid): - logging.debug( - _("Parent %s doesn't match original parent %s, " - "waiting for coalesce..."), - parent_uuid, original_parent_uuid) + LOG.debug(_("Parent %s doesn't match original parent %s, " + "waiting for coalesce..."), parent_uuid, + original_parent_uuid) else: done.send(parent_uuid) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 76f31635ada4..b35153f903b4 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -18,10 +18,9 @@ Management class for VM-related functions (spawn, reboot, etc). """ -import logging - from nova import db from nova import context +from nova import log as logging from nova import exception from nova import utils @@ -31,6 +30,8 @@ from nova.virt.xenapi.network_utils import NetworkHelper from nova.virt.xenapi.vm_utils import VMHelper from nova.virt.xenapi.vm_utils import ImageType +XenAPI = None +LOG = logging.getLogger("nova.virt.xenapi.vmops") class VMOps(object): """ @@ -92,10 +93,9 @@ class VMOps(object): if network_ref: VMHelper.create_vif(self._session, vm_ref, network_ref, instance.mac_address) - logging.debug(_('Starting VM %s...'), vm_ref) + LOG.debug(_('Starting VM %s...'), vm_ref) self._session.call_xenapi('VM.start', vm_ref, False, False) - logging.info(_('Spawning VM %s created %s.'), instance.name, - vm_ref) + LOG.info(_('Spawning VM %s created %s.'), instance.name, vm_ref) # NOTE(armando): Do we really need to do this in virt? timer = utils.LoopingCall(f=None) @@ -106,12 +106,12 @@ class VMOps(object): db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: - logging.debug(_('Instance %s: booted'), instance['name']) + LOG.debug(_('Instance %s: booted'), instance['name']) timer.stop() except Exception, exc: - logging.warn(exc) - logging.exception(_('instance %s: failed to boot'), - instance['name']) + LOG.warn(exc) + LOG.exception(_('instance %s: failed to boot'), + instance['name']) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) @@ -194,7 +194,7 @@ class VMOps(object): task = self._session.call_xenapi('Async.VM.hard_shutdown', vm) self._session.wait_for_task(instance.id, task) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) # Disk clean-up if vdis: @@ -203,20 +203,20 @@ class VMOps(object): task = self._session.call_xenapi('Async.VDI.destroy', vdi) self._session.wait_for_task(instance.id, task) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) # VM Destroy try: task = self._session.call_xenapi('Async.VM.destroy', vm) self._session.wait_for_task(instance.id, task) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) def _wait_with_callback(self, instance_id, task, callback): ret = None try: ret = self._session.wait_for_task(instance_id, task) except XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) callback(ret) def pause(self, instance, callback): diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 1ca813bcf8f2..d9585922527c 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -21,16 +21,17 @@ and storage repositories import re import string -import logging from nova import db from nova import context from nova import exception from nova import flags +from nova import log as logging from nova import utils from nova.virt.xenapi import HelperBase FLAGS = flags.FLAGS +LOG = logging.getLogger("nova.virt.xenapi.volume_utils") class StorageError(Exception): @@ -53,7 +54,7 @@ class VolumeHelper(HelperBase): """ sr_ref = session.get_xenapi().SR.get_by_name_label(label) if len(sr_ref) == 0: - logging.debug('Introducing %s...', label) + LOG.debug(_('Introducing %s...'), label) record = {} if 'chapuser' in info and 'chappassword' in info: record = {'target': info['targetHost'], @@ -70,10 +71,10 @@ class VolumeHelper(HelperBase): session.get_xenapi_host(), record, '0', label, description, 'iscsi', '', False, {}) - logging.debug('Introduced %s as %s.', label, sr_ref) + LOG.debug(_('Introduced %s as %s.'), label, sr_ref) return sr_ref except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('Unable to create Storage Repository')) else: return sr_ref[0] @@ -85,32 +86,32 @@ class VolumeHelper(HelperBase): vdi_ref = session.get_xenapi().VBD.get_VDI(vbd_ref) sr_ref = session.get_xenapi().VDI.get_SR(vdi_ref) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('Unable to find SR from VBD %s') % vbd_ref) return sr_ref @classmethod def destroy_iscsi_storage(cls, session, sr_ref): """Forget the SR whilst preserving the state of the disk""" - logging.debug("Forgetting SR %s ... ", sr_ref) + LOG.debug(_("Forgetting SR %s ... "), sr_ref) pbds = [] try: pbds = session.get_xenapi().SR.get_PBDs(sr_ref) except cls.XenAPI.Failure, exc: - logging.warn('Ignoring exception %s when getting PBDs for %s', - exc, sr_ref) + LOG.warn(_('Ignoring exception %s when getting PBDs for %s'), + exc, sr_ref) for pbd in pbds: try: session.get_xenapi().PBD.unplug(pbd) except cls.XenAPI.Failure, exc: - logging.warn('Ignoring exception %s when unplugging PBD %s', - exc, pbd) + LOG.warn(_('Ignoring exception %s when unplugging PBD %s'), + exc, pbd) try: session.get_xenapi().SR.forget(sr_ref) - logging.debug("Forgetting SR %s done.", sr_ref) + LOG.debug(_("Forgetting SR %s done."), sr_ref) except cls.XenAPI.Failure, exc: - logging.warn('Ignoring exception %s when forgetting SR %s', - exc, sr_ref) + LOG.warn(_('Ignoring exception %s when forgetting SR %s'), exc, + sr_ref) @classmethod def introduce_vdi(cls, session, sr_ref): @@ -118,12 +119,12 @@ class VolumeHelper(HelperBase): try: vdis = session.get_xenapi().SR.get_VDIs(sr_ref) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('Unable to introduce VDI on SR %s') % sr_ref) try: vdi_rec = session.get_xenapi().VDI.get_record(vdis[0]) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('Unable to get record' ' of VDI %s on') % vdis[0]) else: @@ -141,7 +142,7 @@ class VolumeHelper(HelperBase): vdi_rec['xenstore_data'], vdi_rec['sm_config']) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('Unable to introduce VDI for SR %s') % sr_ref) @@ -165,11 +166,8 @@ class VolumeHelper(HelperBase): target_host = _get_target_host(iscsi_portal) target_port = _get_target_port(iscsi_portal) target_iqn = _get_iqn(iscsi_name, volume_id) - logging.debug('(vol_id,number,host,port,iqn): (%s,%s,%s,%s)', - volume_id, - target_host, - target_port, - target_iqn) + LOG.debug('(vol_id,number,host,port,iqn): (%s,%s,%s,%s)', + volume_id, target_host, target_port, target_iqn) if (device_number < 0) or \ (volume_id is None) or \ (target_host is None) or \ @@ -196,7 +194,7 @@ class VolumeHelper(HelperBase): elif re.match('^[0-9]+$', mountpoint): return string.atoi(mountpoint, 10) else: - logging.warn('Mountpoint cannot be translated: %s', mountpoint) + LOG.warn(_('Mountpoint cannot be translated: %s'), mountpoint) return -1 @@ -253,7 +251,7 @@ def _get_target(volume_id): "sendtargets -p %s" % volume_ref['host']) except exception.ProcessExecutionError, exc: - logging.warn(exc) + LOG.exception(exc) else: targets = r.splitlines() if len(_e) == 0 and len(targets) == 1: diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index fdeb2506ca0a..189f968c62f3 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -17,14 +17,17 @@ """ Management class for Storage-related functions (attach, detach, etc). """ -import logging from nova import exception +from nova import log as logging from nova.virt.xenapi.vm_utils import VMHelper from nova.virt.xenapi.volume_utils import VolumeHelper from nova.virt.xenapi.volume_utils import StorageError +LOG = logging.getLogger("nova.virt.xenapi.volumeops") + + class VolumeOps(object): """ Management class for Volume-related tasks @@ -45,8 +48,8 @@ class VolumeOps(object): raise exception.NotFound(_('Instance %s not found') % instance_name) # NOTE: No Resource Pool concept so far - logging.debug(_("Attach_volume: %s, %s, %s"), - instance_name, device_path, mountpoint) + LOG.debug(_("Attach_volume: %s, %s, %s"), + instance_name, device_path, mountpoint) # Create the iSCSI SR, and the PDB through which hosts access SRs. # But first, retrieve target info, like Host, IQN, LUN and SCSIID vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint) @@ -61,7 +64,7 @@ class VolumeOps(object): try: vdi_ref = VolumeHelper.introduce_vdi(self._session, sr_ref) except StorageError, exc: - logging.warn(exc) + LOG.exception(exc) VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) raise Exception(_('Unable to create VDI on SR %s for instance %s') % (sr_ref, @@ -73,7 +76,7 @@ class VolumeOps(object): vol_rec['deviceNumber'], False) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) raise Exception(_('Unable to use SR %s for instance %s') % (sr_ref, @@ -84,13 +87,13 @@ class VolumeOps(object): vbd_ref) self._session.wait_for_task(vol_rec['deviceNumber'], task) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) raise Exception(_('Unable to attach volume to instance %s') % instance_name) - logging.info(_('Mountpoint %s attached to instance %s'), - mountpoint, instance_name) + LOG.info(_('Mountpoint %s attached to instance %s'), + mountpoint, instance_name) def detach_volume(self, instance_name, mountpoint): """Detach volume storage to VM instance""" @@ -100,13 +103,13 @@ class VolumeOps(object): raise exception.NotFound(_('Instance %s not found') % instance_name) # Detach VBD from VM - logging.debug(_("Detach_volume: %s, %s"), instance_name, mountpoint) + LOG.debug(_("Detach_volume: %s, %s"), instance_name, mountpoint) device_number = VolumeHelper.mountpoint_to_number(mountpoint) try: vbd_ref = VMHelper.find_vbd_by_number(self._session, vm_ref, device_number) except StorageError, exc: - logging.warn(exc) + LOG.exception(exc) raise Exception(_('Unable to locate volume %s') % mountpoint) else: try: @@ -114,13 +117,13 @@ class VolumeOps(object): vbd_ref) VMHelper.unplug_vbd(self._session, vbd_ref) except StorageError, exc: - logging.warn(exc) + LOG.exception(exc) raise Exception(_('Unable to detach volume %s') % mountpoint) try: VMHelper.destroy_vbd(self._session, vbd_ref) except StorageError, exc: - logging.warn(exc) + LOG.exception(exc) # Forget SR VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) - logging.info(_('Mountpoint %s detached from instance %s'), - mountpoint, instance_name) + LOG.info(_('Mountpoint %s detached from instance %s'), + mountpoint, instance_name) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index f17c8f39d129..a798f9e33a72 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -50,7 +50,6 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block. :iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack' """ -import logging import sys import xmlrpclib @@ -61,9 +60,14 @@ from nova import context from nova import db from nova import utils from nova import flags +from nova import log as logging from nova.virt.xenapi.vmops import VMOps from nova.virt.xenapi.volumeops import VolumeOps + +LOG = logging.getLogger("nova.virt.xenapi") + + FLAGS = flags.FLAGS flags.DEFINE_string('xenapi_connection_url', @@ -248,7 +252,7 @@ class XenAPISession(object): return elif status == "success": result = self._session.xenapi.task.get_result(task) - logging.info(_("Task [%s] %s status: success %s") % ( + LOG.info(_("Task [%s] %s status: success %s") % ( name, task, result)) @@ -256,7 +260,7 @@ class XenAPISession(object): else: error_info = self._session.xenapi.task.get_error_info(task) action["error"] = str(error_info) - logging.warn(_("Task [%s] %s status: %s %s") % ( + LOG.warn(_("Task [%s] %s status: %s %s") % ( name, task, status, @@ -264,7 +268,7 @@ class XenAPISession(object): done.send_exception(self.XenAPI.Failure(error_info)) db.instance_action_create(context.get_admin_context(), action) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.warn(exc) done.send_exception(*sys.exc_info()) def _unwrap_plugin_exceptions(self, func, *args, **kwargs): @@ -272,7 +276,7 @@ class XenAPISession(object): try: return func(*args, **kwargs) except self.XenAPI.Failure, exc: - logging.debug(_("Got exception: %s"), exc) + LOG.debug(_("Got exception: %s"), exc) if (len(exc.details) == 4 and exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and exc.details[2] == 'Failure'): @@ -285,7 +289,7 @@ class XenAPISession(object): else: raise except xmlrpclib.ProtocolError, exc: - logging.debug(_("Got exception: %s"), exc) + LOG.debug(_("Got exception: %s"), exc) raise diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 8353b9712a3a..477e0abf4f57 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -20,15 +20,16 @@ Drivers for volumes. """ -import logging import os import time from nova import exception from nova import flags +from nova import log as logging from nova import utils +LOG = logging.getLogger("nova.volume.driver") FLAGS = flags.FLAGS flags.DEFINE_string('volume_group', 'nova-volumes', 'Name for the VG that will contain exported volumes') @@ -73,8 +74,8 @@ class VolumeDriver(object): tries = tries + 1 if tries >= FLAGS.num_shell_tries: raise - logging.exception(_("Recovering from a failed execute." - "Try number %s"), tries) + LOG.exception(_("Recovering from a failed execute. " + "Try number %s"), tries) time.sleep(tries ** 2) def check_for_setup_error(self): @@ -205,7 +206,7 @@ class FakeAOEDriver(AOEDriver): @staticmethod def fake_execute(cmd, *_args, **_kwargs): """Execute that simply logs the command.""" - logging.debug(_("FAKE AOE: %s"), cmd) + LOG.debug(_("FAKE AOE: %s"), cmd) return (None, None) @@ -310,5 +311,5 @@ class FakeISCSIDriver(ISCSIDriver): @staticmethod def fake_execute(cmd, *_args, **_kwargs): """Execute that simply logs the command.""" - logging.debug(_("FAKE ISCSI: %s"), cmd) + LOG.debug(_("FAKE ISCSI: %s"), cmd) return (None, None) diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 966334c50dda..6348539c50d4 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -42,17 +42,18 @@ intact. """ -import logging import datetime from nova import context from nova import exception from nova import flags +from nova import log as logging from nova import manager from nova import utils +LOG = logging.getLogger('nova.volume.manager') FLAGS = flags.FLAGS flags.DEFINE_string('storage_availability_zone', 'nova', @@ -81,7 +82,7 @@ class VolumeManager(manager.Manager): self.driver.check_for_setup_error() ctxt = context.get_admin_context() volumes = self.db.volume_get_all_by_host(ctxt, self.host) - logging.debug(_("Re-exporting %s volumes"), len(volumes)) + LOG.debug(_("Re-exporting %s volumes"), len(volumes)) for volume in volumes: self.driver.ensure_export(ctxt, volume) @@ -89,7 +90,7 @@ class VolumeManager(manager.Manager): """Creates and exports the volume.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) - logging.info(_("volume %s: creating"), volume_ref['name']) + LOG.info(_("volume %s: creating"), volume_ref['name']) self.db.volume_update(context, volume_id, @@ -98,18 +99,18 @@ class VolumeManager(manager.Manager): # before passing it to the driver. volume_ref['host'] = self.host - logging.debug(_("volume %s: creating lv of size %sG"), - volume_ref['name'], volume_ref['size']) + LOG.debug(_("volume %s: creating lv of size %sG"), volume_ref['name'], + volume_ref['size']) self.driver.create_volume(volume_ref) - logging.debug(_("volume %s: creating export"), volume_ref['name']) + LOG.debug(_("volume %s: creating export"), volume_ref['name']) self.driver.create_export(context, volume_ref) now = datetime.datetime.utcnow() self.db.volume_update(context, volume_ref['id'], {'status': 'available', 'launched_at': now}) - logging.debug(_("volume %s: created successfully"), volume_ref['name']) + LOG.debug(_("volume %s: created successfully"), volume_ref['name']) return volume_id def delete_volume(self, context, volume_id): @@ -120,12 +121,12 @@ class VolumeManager(manager.Manager): raise exception.Error(_("Volume is still attached")) if volume_ref['host'] != self.host: raise exception.Error(_("Volume is not local to this node")) - logging.debug(_("volume %s: removing export"), volume_ref['name']) + LOG.debug(_("volume %s: removing export"), volume_ref['name']) self.driver.remove_export(context, volume_ref) - logging.debug(_("volume %s: deleting"), volume_ref['name']) + LOG.debug(_("volume %s: deleting"), volume_ref['name']) self.driver.delete_volume(volume_ref) self.db.volume_destroy(context, volume_id) - logging.debug(_("volume %s: deleted successfully"), volume_ref['name']) + LOG.debug(_("volume %s: deleted successfully"), volume_ref['name']) return True def setup_compute_volume(self, context, volume_id): diff --git a/nova/wsgi.py b/nova/wsgi.py index b5d6b96c1592..6336912e4e5d 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -22,7 +22,6 @@ Utility methods for working with WSGI servers """ import json -import logging import sys from xml.dom import minidom @@ -35,18 +34,23 @@ import webob import webob.dec import webob.exc +from nova import log as logging + +# TODO(todd): should this just piggyback the handler for root logger +# since we usually log to syslog, but changes if not daemonzied? logging.getLogger("routes.middleware").addHandler(logging.StreamHandler()) - class Server(object): """Server class to manage multiple WSGI sockets and applications.""" def __init__(self, threads=1000): + logging.basicConfig() self.pool = eventlet.GreenPool(threads) def start(self, application, port, host='0.0.0.0', backlog=128): """Run a WSGI server with the given application.""" + logging.audit("Starting %s on %s:%s", sys.argv[0], host, port) socket = eventlet.listen((host, port), backlog=backlog) self.pool.spawn_n(self._run, application, socket) diff --git a/setup.py b/setup.py index 1abf4d9fe964..e00911099866 100644 --- a/setup.py +++ b/setup.py @@ -64,9 +64,11 @@ setup(name='nova', 'bin/nova-dhcpbridge', 'bin/nova-import-canonical-imagestore', 'bin/nova-instancemonitor', + 'bin/nova-logspool', 'bin/nova-manage', 'bin/nova-network', 'bin/nova-objectstore', 'bin/nova-scheduler', + 'bin/nova-spoolsentry', 'bin/nova-volume', 'tools/nova-debug']) From b9576a9f73195656f4a0a1327cd6bee3c4a6b6c9 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 4 Jan 2011 00:26:41 -0500 Subject: [PATCH 089/147] Final few log tweaks, i18n, levels, including contexts, etc. --- bin/nova-dhcpbridge | 15 +++---- bin/nova-instancemonitor | 2 +- bin/nova-manage | 2 - nova/api/ec2/__init__.py | 14 ++++--- nova/api/ec2/cloud.py | 4 +- nova/api/openstack/__init__.py | 2 +- nova/auth/ldapdriver.py | 3 +- nova/auth/manager.py | 3 +- nova/compute/manager.py | 77 +++++++++++++++++----------------- nova/compute/monitor.py | 2 +- nova/exception.py | 1 + nova/network/manager.py | 14 ++++--- nova/objectstore/handler.py | 2 +- nova/rpc.py | 5 ++- nova/service.py | 3 -- nova/virt/libvirt_conn.py | 10 ++--- nova/virt/xenapi/fake.py | 2 +- nova/virt/xenapi/vmops.py | 1 + nova/wsgi.py | 2 +- 19 files changed, 79 insertions(+), 85 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index cecc1c80cdef..1a994d956311 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -49,16 +49,13 @@ flags.DECLARE('network_size', 'nova.network.manager') flags.DECLARE('num_networks', 'nova.network.manager') flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager') - -LOG = logging.getLogger('nova-dhcpbridge') -if FLAGS.verbose: - LOG.setLevel(logging.DEBUG) +LOG = logging.getLogger('nova.dhcpbridge') def add_lease(mac, ip_address, _hostname, _interface): """Set the IP that was assigned by the DHCP server.""" if FLAGS.fake_rabbit: - LOG.debug("leasing ip") + LOG.debug(_("leasing ip")) network_manager = utils.import_object(FLAGS.network_manager) network_manager.lease_fixed_ip(context.get_admin_context(), mac, @@ -73,14 +70,14 @@ def add_lease(mac, ip_address, _hostname, _interface): def old_lease(mac, ip_address, hostname, interface): """Update just as add lease.""" - LOG.debug("Adopted old lease or got a change of mac/hostname") + LOG.debug(_("Adopted old lease or got a change of mac/hostname")) add_lease(mac, ip_address, hostname, interface) def del_lease(mac, ip_address, _hostname, _interface): """Called when a lease expires.""" if FLAGS.fake_rabbit: - LOG.debug("releasing ip") + LOG.debug(_("releasing ip")) network_manager = utils.import_object(FLAGS.network_manager) network_manager.release_fixed_ip(context.get_admin_context(), mac, @@ -123,8 +120,8 @@ def main(): mac = argv[2] ip = argv[3] hostname = argv[4] - LOG.debug("Called %s for mac %s with ip %s and " - "hostname %s on interface %s", + LOG.debug(_("Called %s for mac %s with ip %s and " + "hostname %s on interface %s"), action, mac, ip, hostname, interface) globals()[action + '_lease'](mac, ip, hostname, interface) else: diff --git a/bin/nova-instancemonitor b/bin/nova-instancemonitor index 17f573b9de6f..7dca0201451c 100755 --- a/bin/nova-instancemonitor +++ b/bin/nova-instancemonitor @@ -44,7 +44,7 @@ from nova.compute import monitor # TODO(todd): shouldn't this be done with flags? And what about verbose? logging.getLogger('boto').setLevel(logging.WARN) -LOG = logging.getLogger('nova-instancemonitor') +LOG = logging.getLogger('nova.instancemonitor') if __name__ == '__main__': diff --git a/bin/nova-manage b/bin/nova-manage index 169ab5f628a6..40f540e5b047 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -77,7 +77,6 @@ from nova import crypto from nova import db from nova import exception from nova import flags -from nova import log as logging from nova import quota from nova import utils from nova.auth import manager @@ -557,7 +556,6 @@ def main(): utils.default_flagfile() argv = FLAGS(sys.argv) - logging._set_log_levels() script_name = argv.pop(0) if len(argv) < 1: print script_name + " category action []" diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 4dd2e55cd3e4..2fa1f636cec8 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -130,7 +130,7 @@ class Lockout(wsgi.Middleware): failures_key = "authfailures-%s" % access_key failures = int(self.mc.get(failures_key) or 0) if failures >= FLAGS.lockout_attempts: - detail = "Too many failed authentications." + detail = _("Too many failed authentications.") raise webob.exc.HTTPForbidden(detail=detail) res = req.get_response(self.application) if res.status_int == 403: @@ -139,13 +139,14 @@ class Lockout(wsgi.Middleware): # NOTE(vish): To use incr, failures has to be a string. self.mc.set(failures_key, '1', time=FLAGS.lockout_window * 60) elif failures >= FLAGS.lockout_attempts: - LOG.warn('Access key %s has had %d failed authentications' - ' and will be locked out for %d minutes.', + LOG.warn(_('Access key %s has had %d failed authentications' + ' and will be locked out for %d minutes.'), access_key, failures, FLAGS.lockout_minutes) self.mc.set(failures_key, str(failures), time=FLAGS.lockout_minutes * 60) return res + class Authenticate(wsgi.Middleware): """Authenticate an EC2 request and add 'ec2.context' to WSGI environ.""" @@ -297,8 +298,9 @@ class Authorizer(wsgi.Middleware): if self._matches_any_role(context, allowed_roles): return self.application else: - LOG.audit("Unauthorized request for controller=%s and action=%s", - controller_name, action, context=context) + LOG.audit(_("Unauthorized request for controller=%s " + "and action=%s"), controller_name, action, + context=context) raise webob.exc.HTTPUnauthorized() def _matches_any_role(self, context, roles): @@ -337,7 +339,7 @@ class Executor(wsgi.Application): LOG.info(_('NotFound raised: %s'), str(ex), context=context) return self._error(req, context, type(ex).__name__, str(ex)) except exception.ApiError as ex: - LOG.exception('ApiError raised', context=context) + LOG.exception(_('ApiError raised: %s'), str(ex), context=context) if ex.code: return self._error(req, context, ex.code, str(ex)) else: diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index d0db08db7f08..2fb4455f207d 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -609,7 +609,7 @@ class CloudController(object): 'volumeId': volume_ref['id']} def detach_volume(self, context, volume_id, **kwargs): - LOG.audit("Detach volume %s", volume_id, context=context) + LOG.audit(_("Detach volume %s"), volume_id, context=context) volume_ref = db.volume_get_by_ec2_id(context, volume_id) instance_ref = db.volume_get_instance(context.elevated(), volume_ref['id']) @@ -893,7 +893,7 @@ class CloudController(object): return {'imagesSet': images} def deregister_image(self, context, image_id, **kwargs): - LOG.audit("De-registering image %s", image_id, context=context) + LOG.audit(_("De-registering image %s"), image_id, context=context) self.image_service.deregister(context, image_id) return {'imageId': image_id} diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 7e1c03d9f99f..ad203c51f1a1 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -83,7 +83,7 @@ class APIRouter(wsgi.Router): server_members = {'action': 'POST'} if FLAGS.allow_admin_api: - LOG.debug("Including admin operations in API.") + LOG.debug(_("Including admin operations in API.")) server_members['pause'] = 'POST' server_members['unpause'] = 'POST' server_members["diagnostics"] = "GET" diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 3e0837ba8ad0..c8de20028e53 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -65,7 +65,6 @@ flags.DEFINE_string('ldap_netadmin', flags.DEFINE_string('ldap_developer', 'cn=developers,ou=Groups,dc=example,dc=com', 'cn for Developers') - LOG = logging.getLogger("nova.ldapdriver") @@ -506,7 +505,7 @@ class LdapDriver(object): self.conn.modify_s(group_dn, attr) except self.ldap.OBJECT_CLASS_VIOLATION: LOG.debug(_("Attempted to remove the last member of a group. " - "Deleting the group at %s instead."), group_dn) + "Deleting the group at %s instead."), group_dn) self.__delete_group(group_dn) def __remove_from_all(self, uid): diff --git a/nova/auth/manager.py b/nova/auth/manager.py index bfac7fc2a7c8..5685ae5e2919 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -70,8 +70,7 @@ flags.DEFINE_string('credential_rc_file', '%src', flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver', 'Driver that auth manager uses') - -LOG = logging.getLogger('nova.authmanager') +LOG = logging.getLogger('nova.auth.manager') class AuthBase(object): diff --git a/nova/compute/manager.py b/nova/compute/manager.py index cc5724346dce..0098ded74243 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -52,8 +52,7 @@ flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', flags.DEFINE_string('stub_network', False, 'Stub network related code') - -LOG = logging.getLogger('nova.computemanager') +LOG = logging.getLogger('nova.compute.manager') class ComputeManager(manager.Manager): @@ -114,7 +113,8 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) if instance_ref['name'] in self.driver.list_instances(): raise exception.Error(_("Instance has already been created")) - LOG.debug(_("instance %s: starting..."), instance_id) + LOG.audit(_("instance %s//%s: starting..."), + instance_ref['internal_id'], instance_id, context=context) self.db.instance_update(context, instance_id, {'host': self.host}) @@ -152,8 +152,9 @@ class ComputeManager(manager.Manager): instance_id, {'launched_at': now}) except Exception: # pylint: disable-msg=W0702 - LOG.exception(_("instance %s: Failed to spawn"), - instance_ref['name']) + LOG.exception(_("instance %s//%s: Failed to spawn"), + instance_ref['internal_id'], instance_id, + context=context) self.db.instance_set_state(context, instance_id, power_state.SHUTDOWN) @@ -193,8 +194,6 @@ class ComputeManager(manager.Manager): self.network_manager.deallocate_fixed_ip(context.elevated(), address) - LOG.debug(_("instance %s: terminating"), instance_id, context=context) - volumes = instance_ref.get('volumes', []) or [] for volume in volumes: self.detach_volume(context, instance_id, volume['id']) @@ -217,14 +216,13 @@ class ComputeManager(manager.Manager): instance_id, context=context) if instance_ref['state'] != power_state.RUNNING: - LOG.warn(_('trying to reboot a non-running ' + .warn(_('trying to reboot a non-running ' 'instance: %s (state: %s excepted: %s)'), instance_ref['internal_id'], instance_ref['state'], power_state.RUNNING, context=context) - LOG.debug(_('instance %s: rebooting'), instance_ref['name']) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -244,13 +242,14 @@ class ComputeManager(manager.Manager): # potentially? self._update_state(context, instance_id) - logging.debug(_('instance %s: snapshotting'), instance_ref['name']) + LOG.audit(_('instance %s//%s: snapshotting'), + instance_ref['internal_id'], instance_id, context=context) if instance_ref['state'] != power_state.RUNNING: - logging.warn(_('trying to snapshot a non-running ' - 'instance: %s (state: %s excepted: %s)'), - instance_ref['internal_id'], - instance_ref['state'], - power_state.RUNNING) + LOG.warn(_('trying to snapshot a non-running ' + 'instance: %s//%s (state: %s excepted: %s)'), + instance_ref['internal_id'], instance_id, + instance_ref['state'], + power_state.RUNNING) self.driver.snapshot(instance_ref, name) @@ -259,9 +258,8 @@ class ComputeManager(manager.Manager): """Rescue an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - LOG.audit(_('instance %s: rescuing'), instance_ref['internal_id'], - context=context) + LOG.audit(_('instance %s//%s: rescuing'), instance_ref['internal_id'], + instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -276,8 +274,8 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_('instance %s: unrescuing'), instance_ref['internal_id'], - context=context) + LOG.audit(_('instance %s//%s: unrescuing'), + instance_ref['internal_id'], instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -295,9 +293,8 @@ class ComputeManager(manager.Manager): """Pause an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - LOG.debug(_('instance %s: pausing'), instance_ref['internal_id'], - context=context) + LOG.audit(_('instance %s//%s: pausing'), instance_ref['internal_id'], + instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -313,9 +310,8 @@ class ComputeManager(manager.Manager): """Unpause a paused instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - LOG.debug(_('instance %s: unpausing'), instance_ref['internal_id'], - context=context) + LOG.audit(_('instance %s//%s: unpausing'), + instance_ref['internal_id'], instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -332,8 +328,9 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) if instance_ref["state"] == power_state.RUNNING: - logging.debug(_("instance %s: retrieving diagnostics"), - instance_ref["internal_id"]) + LOG.audit(_("instance %s//%s: retrieving diagnostics"), + instance_ref["internal_id"], instance_id, + context=context) return self.driver.get_diagnostics(instance_ref) @exception.wrap_exception @@ -342,8 +339,8 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug(_('instance %s: suspending'), - instance_ref['internal_id']) + LOG.audit(_('instance %s//%s: suspending'), + instance_ref['internal_id'], instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, 'suspending') @@ -359,7 +356,8 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug(_('instance %s: resuming'), instance_ref['internal_id']) + LOG.audit(_('instance %s//%s: resuming'), instance_ref['internal_id'], + instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, 'resuming') @@ -374,18 +372,18 @@ class ComputeManager(manager.Manager): """Send the console output for an instance.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_("Get console output instance %s//%s"), + LOG.audit(_("Get console output for instance %s//%s"), instance_ref['internal_id'], instance_id, context=context) - return self.driver.get_console_output(instance_ref) @exception.wrap_exception def attach_volume(self, context, instance_id, volume_id, mountpoint): """Attach a volume to an instance.""" context = context.elevated() - LOG.audit(_("instance %s: attaching volume %s to %s"), instance_id, - volume_id, mountpoint, context=context) instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("instance %s//%s: attaching volume %s to %s"), + instance_ref['internal_id'], instance_id, + volume_id, mountpoint, context=context) dev_path = self.volume_manager.setup_compute_volume(context, volume_id) try: @@ -400,8 +398,9 @@ class ComputeManager(manager.Manager): # NOTE(vish): The inline callback eats the exception info so we # log the traceback here and reraise the same # ecxception below. - LOG.exception(_("instance %s: attach failed %s, removing"), - instance_id, mountpoint, context=context) + LOG.exception(_("instance %s//%s: attach failed %s, removing"), + instance_ref['internal_id'], instance_id, + mountpoint, context=context) self.volume_manager.remove_compute_volume(context, volume_id) raise exc @@ -418,8 +417,8 @@ class ComputeManager(manager.Manager): volume_id, volume_ref['mountpoint'], instance_ref['internal_id'], instance_id, context=context) if instance_ref['name'] not in self.driver.list_instances(): - LOG.warn(_("Detaching volume from unknown instance %s"), - instance_ref['name'], context=context) + LOG.warn(_("Detaching volume from unknown instance %s//%s"), + instance_ref['internal_id'], instance_id, context=context) else: self.driver.detach_volume(instance_ref['name'], volume_ref['mountpoint']) diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index cc94e44f440e..14d0e8ca1714 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -89,7 +89,7 @@ RRD_VALUES = { utcnow = datetime.datetime.utcnow -LOG = logging.getLogger('nova.instancemonitor') +LOG = logging.getLogger('nova.compute.monitor') def update_rrd(instance, name, data): diff --git a/nova/exception.py b/nova/exception.py index 7c6675e6216a..7680e534adee 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -24,6 +24,7 @@ Nova-type exceptions. SHOULD include dedicated exception logging. from nova import log as logging LOG = logging.getLogger('nova.exception') + class ProcessExecutionError(IOError): def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, diff --git a/nova/network/manager.py b/nova/network/manager.py index 2b7325fd0a79..fd286f2100ec 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -60,7 +60,7 @@ from nova import utils from nova import rpc -LOG = logging.getLogger("nova.networkmanager") +LOG = logging.getLogger("nova.network.manager") FLAGS = flags.FLAGS flags.DEFINE_string('flat_network_bridge', 'br100', 'Bridge for simple network instances') @@ -132,7 +132,7 @@ class NetworkManager(manager.Manager): def set_network_host(self, context, network_id): """Safely sets the host of the network.""" - LOG.debug(_("setting network host")) + LOG.debug(_("setting network host"), context=context) host = self.db.network_set_host(context, network_id, self.host) @@ -187,7 +187,7 @@ class NetworkManager(manager.Manager): def lease_fixed_ip(self, context, mac, address): """Called by dhcp-bridge when ip is leased.""" - LOG.debug(_("Leasing IP %s"), address) + LOG.debug(_("Leasing IP %s"), address, context=context) fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) instance_ref = fixed_ip_ref['instance'] if not instance_ref: @@ -202,11 +202,12 @@ class NetworkManager(manager.Manager): {'leased': True, 'updated_at': now}) if not fixed_ip_ref['allocated']: - LOG.warn(_("IP %s leased that was already deallocated"), address) + LOG.warn(_("IP %s leased that was already deallocated"), address, + context=context) def release_fixed_ip(self, context, mac, address): """Called by dhcp-bridge when ip is released.""" - LOG.debug("Releasing IP %s", address) + LOG.debug("Releasing IP %s", address, context=context) fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) instance_ref = fixed_ip_ref['instance'] if not instance_ref: @@ -216,7 +217,8 @@ class NetworkManager(manager.Manager): raise exception.Error(_("IP %s released from bad mac %s vs %s") % (address, instance_ref['mac_address'], mac)) if not fixed_ip_ref['leased']: - LOG.warn(_("IP %s released that was not leased"), address) + LOG.warn(_("IP %s released that was not leased"), address, + context=context) self.db.fixed_ip_update(context, fixed_ip_ref['address'], {'leased': False}) diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index 8e9235da4e42..bc26fd3c551f 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -134,7 +134,7 @@ def get_context(request): headers=request.getAllHeaders(), check_type='s3') rv = context.RequestContext(user, project) - LOG.audit("Authenticated request", context=rv) + LOG.audit(_("Authenticated request"), context=rv) return rv except exception.Error as ex: LOG.debug(_("Authentication Failure: %s"), ex) diff --git a/nova/rpc.py b/nova/rpc.py index 02052485dc85..bdf2f74b3a74 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -96,8 +96,9 @@ class Consumer(messaging.Consumer): FLAGS.rabbit_retry_interval)) self.failed_connection = True if self.failed_connection: - LOG.exception(_("Unable to connect to AMQP server" - " after %d tries. Shutting down.") % FLAGS.rabbit_max_retries) + LOG.exception(_("Unable to connect to AMQP server " + "after %d tries. Shutting down."), + FLAGS.rabbit_max_retries) sys.exit(1) def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): diff --git a/nova/service.py b/nova/service.py index fef7f0593f6b..a459f8df46cb 100644 --- a/nova/service.py +++ b/nova/service.py @@ -209,9 +209,6 @@ def serve(*services): FLAGS(sys.argv) logging.basicConfig() - # TODO(todd): make this pigggyback the flag-based level override method - logging.getLogger('amqplib').setLevel(logging.WARN) - if not services: services = [Service.create()] diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index ac82fdadbf56..764ef6600002 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -62,9 +62,7 @@ libvirt = None libxml2 = None Template = None - -LOG = logging.getLogger('nova.virt.libvirt_conn') - +LOG = logging.getLogger('nova.virt.libvirt_conn') FLAGS = flags.FLAGS # TODO(vish): These flags should probably go into a shared location @@ -380,7 +378,7 @@ class LibvirtConnection(object): return timer.start(interval=0.5, now=True) def _flush_xen_console(self, virsh_output): - LOG.info('virsh said: %r', virsh_output) + LOG.info(_('virsh said: %r'), virsh_output) virsh_output = virsh_output[0].strip() if virsh_output.startswith('/dev/'): @@ -400,7 +398,7 @@ class LibvirtConnection(object): def _dump_file(self, fpath): fp = open(fpath, 'r+') contents = fp.read() - LOG.info('Contents: %r', contents) + LOG.info(_('Contents of file %s: %r'), fpath, contents) return contents @exception.wrap_exception @@ -529,7 +527,7 @@ class LibvirtConnection(object): def to_xml(self, instance, rescue=False): # TODO(termie): cache? - LOG.debug('instance %s: starting toXML method', instance['name']) + LOG.debug(_('instance %s: starting toXML method'), instance['name']) network = db.project_get_network(context.get_admin_context(), instance['project_id']) LOG.debug(_('instance %s: starting toXML method'), instance['name']) diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index f2c3a34f6c74..96d8f5fc8c5e 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -244,7 +244,7 @@ class SessionBase(object): full_params = (self._session,) + params meth = getattr(self, methodname, None) if meth is None: - LOG.debug('Raising NotImplemented') + LOG.debug(_('Raising NotImplemented')) raise NotImplementedError( _('xenapi.fake does not have an implementation for %s') % methodname) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index b35153f903b4..88350f91a25b 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -33,6 +33,7 @@ from nova.virt.xenapi.vm_utils import ImageType XenAPI = None LOG = logging.getLogger("nova.virt.xenapi.vmops") + class VMOps(object): """ Management class for VM-related tasks diff --git a/nova/wsgi.py b/nova/wsgi.py index 6336912e4e5d..9c333d3abeb9 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -36,11 +36,11 @@ import webob.exc from nova import log as logging - # TODO(todd): should this just piggyback the handler for root logger # since we usually log to syslog, but changes if not daemonzied? logging.getLogger("routes.middleware").addHandler(logging.StreamHandler()) + class Server(object): """Server class to manage multiple WSGI sockets and applications.""" From 45f2f563d1722d2f4d81d49de9d6a3cfd3d0fe3e Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 4 Jan 2011 00:37:47 -0500 Subject: [PATCH 090/147] Don't know where that LOG went... --- nova/compute/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 0098ded74243..d68801146cca 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -216,7 +216,7 @@ class ComputeManager(manager.Manager): instance_id, context=context) if instance_ref['state'] != power_state.RUNNING: - .warn(_('trying to reboot a non-running ' + LOG.warn(_('trying to reboot a non-running ' 'instance: %s (state: %s excepted: %s)'), instance_ref['internal_id'], instance_ref['state'], From 8bbcadafbc25c7ab478d6143293232f2cea24411 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Tue, 4 Jan 2011 09:44:15 -0800 Subject: [PATCH 091/147] Removed UUID keys for instance and volume. --- nova/api/ec2/cloud.py | 12 ++++-- nova/db/sqlalchemy/models.py | 64 +++++--------------------------- nova/virt/xenapi/volume_utils.py | 3 ++ 3 files changed, 21 insertions(+), 58 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index a6818855dcd2..b1eaafc8ba2a 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -72,13 +72,17 @@ def _gen_key(context, user_id, key_name): def ec2_id_to_id(ec2_id): - """Convert an ec2 ID and an instance ID.""" - return ec2_id[2:] + """Convert an ec2 ID (i-[base 36 number]) to an instance id (int)""" + return int(ec2_id[2:], 36) def id_to_ec2_id(instance_id): - """Convert an instance ID to an ec2 ID.""" - return "i-%s" % instance_id + """Convert an instance ID (int) to an ec2 ID (i-[base 36 number])""" + digits = [] + while instance_id != 0: + instance_id, remainder = divmod(instance_id, 36) + digits.append('0123456789abcdefghijklmnopqrstuvwxyz'[remainder]) + return "i-%s" % ''.join(reversed(digits)) class CloudController(object): diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 78ab1ba9b539..2acff8e6012f 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -20,17 +20,13 @@ SQLAlchemy models for nova data. """ import datetime -import uuid from sqlalchemy.orm import relationship, backref, object_mapper from sqlalchemy import Column, Integer, String, schema from sqlalchemy import ForeignKey, DateTime, Boolean, Text -from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.exc import IntegrityError from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.orm import relationship, backref, object_mapper from sqlalchemy.schema import ForeignKeyConstraint -from sqlalchemy.types import TypeDecorator, CHAR from nova.db.sqlalchemy.session import get_session @@ -43,46 +39,6 @@ FLAGS = flags.FLAGS BASE = declarative_base() -def make_uuid(): - return uuid.uuid1().hex - - -class GUID(TypeDecorator): - """Platform-independent GUID type. - - Uses Postgresql's UUID type, otherwise uses - CHAR(32), storing as stringified hex values. - - """ - impl = CHAR - - def load_dialect_impl(self, dialect): - if dialect.name == 'postgresql': - return dialect.type_descriptor(UUID()) - else: - return dialect.type_descriptor(CHAR(32)) - - def process_bind_param(self, value, dialect): - if value is None: - return value - elif dialect.name == 'postgresql': - return str(value) - else: - if isinstance(value, int): - return "%.32x" % uuid.UUID(int=value) - elif not isinstance(value, uuid.UUID): - return "%.32x" % uuid.UUID(value) - else: - # hexstring - return "%.32x" % value - - def process_result_value(self, value, dialect): - if value is None: - return value - else: - return uuid.UUID(value).hex - - class NovaBase(object): """Base class for Nova Models.""" __table_args__ = {'mysql_engine': 'InnoDB'} @@ -208,11 +164,11 @@ class Certificate(BASE, NovaBase): class Instance(BASE, NovaBase): """Represents a guest vm.""" __tablename__ = 'instances' - id = Column(GUID, primary_key=True, default=make_uuid) + id = Column(Integer, primary_key=True, autoincrement=True) @property def name(self): - return "instance-%s" % self.id + return "instance-%08x" % self.id admin_pass = Column(String(255)) user_id = Column(String(255)) @@ -284,7 +240,7 @@ class InstanceActions(BASE, NovaBase): """Represents a guest VM's actions and results""" __tablename__ = "instance_actions" id = Column(Integer, primary_key=True) - instance_id = Column(GUID, ForeignKey('instances.id')) + instance_id = Column(Integer, ForeignKey('instances.id')) action = Column(String(255)) error = Column(Text) @@ -293,11 +249,11 @@ class InstanceActions(BASE, NovaBase): class Volume(BASE, NovaBase): """Represents a block storage device that can be attached to a vm.""" __tablename__ = 'volumes' - id = Column(GUID, primary_key=True, default=make_uuid) + id = Column(Integer, primary_key=True, autoincrement=True) @property def name(self): - return "vollume-%s" % self.id + return "volume-%08x" % self.id user_id = Column(String(255)) project_id = Column(String(255)) @@ -305,7 +261,7 @@ class Volume(BASE, NovaBase): host = Column(String(255)) # , ForeignKey('hosts.id')) size = Column(Integer) availability_zone = Column(String(255)) # TODO(vish): foreign key? - instance_id = Column(GUID, ForeignKey('instances.id'), nullable=True) + instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) instance = relationship(Instance, backref=backref('volumes'), foreign_keys=instance_id, @@ -346,7 +302,7 @@ class ExportDevice(BASE, NovaBase): id = Column(Integer, primary_key=True) shelf_id = Column(Integer) blade_id = Column(Integer) - volume_id = Column(GUID, ForeignKey('volumes.id'), nullable=True) + volume_id = Column(Integer, ForeignKey('volumes.id'), nullable=True) volume = relationship(Volume, backref=backref('export_device', uselist=False), foreign_keys=volume_id, @@ -362,7 +318,7 @@ class IscsiTarget(BASE, NovaBase): id = Column(Integer, primary_key=True) target_num = Column(Integer) host = Column(String(255)) - volume_id = Column(GUID, ForeignKey('volumes.id'), nullable=True) + volume_id = Column(Integer, ForeignKey('volumes.id'), nullable=True) volume = relationship(Volume, backref=backref('iscsi_target', uselist=False), foreign_keys=volume_id, @@ -374,7 +330,7 @@ class SecurityGroupInstanceAssociation(BASE, NovaBase): __tablename__ = 'security_group_instance_association' id = Column(Integer, primary_key=True) security_group_id = Column(Integer, ForeignKey('security_groups.id')) - instance_id = Column(GUID, ForeignKey('instances.id')) + instance_id = Column(Integer, ForeignKey('instances.id')) class SecurityGroup(BASE, NovaBase): @@ -494,7 +450,7 @@ class FixedIp(BASE, NovaBase): address = Column(String(255)) network_id = Column(Integer, ForeignKey('networks.id'), nullable=True) network = relationship(Network, backref=backref('fixed_ips')) - instance_id = Column(GUID, ForeignKey('instances.id'), nullable=True) + instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) instance = relationship(Instance, backref=backref('fixed_ip', uselist=False), foreign_keys=instance_id, diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 1ca813bcf8f2..0ad6fd450468 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -202,6 +202,9 @@ class VolumeHelper(HelperBase): def _get_volume_id(path): """Retrieve the volume id from device_path""" + # If we have the ID and not a path, just return it. + if isinstance(path, int): + return path # n must contain at least the volume_id # /vol- is for remote volumes # -vol- is for local volumes From 4b0509f014aa164273a0e544441838be5352b1eb Mon Sep 17 00:00:00 2001 From: Eric Day Date: Tue, 4 Jan 2011 12:16:43 -0800 Subject: [PATCH 092/147] Removed leftover UUID reference. --- nova/db/sqlalchemy/api.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index bb84883e7e6a..aaa07e3c9291 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -19,7 +19,6 @@ Implementation of SQLAlchemy backend. """ -import uuid import warnings from nova import db @@ -742,9 +741,6 @@ def instance_get_project_vpn(context, project_id): def instance_get_by_id(context, instance_id): session = get_session() - if type(instance_id) is int: - instance_id = uuid.UUID(int=instance_id).hex - if is_admin_context(context): result = session.query(models.Instance).\ options(joinedload('security_groups')).\ From 2899896d1c7742ad59e2da2d2369bc2ff9526fed Mon Sep 17 00:00:00 2001 From: Eric Day Date: Tue, 4 Jan 2011 12:27:50 -0800 Subject: [PATCH 093/147] Renamed argument to represent possible types in volume_utils. --- nova/virt/xenapi/volume_utils.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 0ad6fd450468..4bbc41b0323a 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -200,18 +200,19 @@ class VolumeHelper(HelperBase): return -1 -def _get_volume_id(path): +def _get_volume_id(path_or_id): """Retrieve the volume id from device_path""" # If we have the ID and not a path, just return it. - if isinstance(path, int): - return path + if isinstance(path_or_id, int): + return path_or_id # n must contain at least the volume_id # /vol- is for remote volumes # -vol- is for local volumes # see compute/manager->setup_compute_volume - volume_id = path[path.find('/vol-') + 1:] - if volume_id == path: - volume_id = path[path.find('-vol-') + 1:].replace('--', '-') + volume_id = path_or_id[path_or_id.find('/vol-') + 1:] + if volume_id == path_or_id: + volume_id = path_or_id[path_or_id.find('-vol-') + 1:] + volume_id = volume_id.replace('--', '-') return volume_id From 02c86d1e1146c1162a36620560eb8116ce8d47f1 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Tue, 4 Jan 2011 15:20:10 -0600 Subject: [PATCH 094/147] Made the plugin output fully json-ified, so I could remove the exception handlers in vmops.py. Cleaned up some pep8 issues that weren't caught in earlier runs. --- nova/virt/xenapi/vmops.py | 19 +++------ .../etc/xapi.d/plugins/pluginlib_nova.py | 28 ++++++++----- .../xenapi/etc/xapi.d/plugins/xenstore.py | 39 ++++++++++++++----- 3 files changed, 55 insertions(+), 31 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index d539f9416f6f..b6d6207827c4 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -289,11 +289,7 @@ class VMOps(object): found at that path, returns None. """ ret = self._make_xenstore_call('list_records', vm, path) - try: - return json.loads(ret) - except ValueError: - # Not a valid JSON value - return ret + return json.loads(ret) def read_from_xenstore(self, vm, path): """Returns the value stored in the xenstore record for the given VM @@ -305,14 +301,11 @@ class VMOps(object): {'ignore_missing_path': 'True'}) except self.XenAPI.Failure, e: return None - try: - return json.loads(ret) - except ValueError: - # Not a JSON object - if ret == "None": - # Can't marshall None over RPC calls. - return None - return ret + ret = json.loads(ret) + if ret == "None": + # Can't marshall None over RPC calls. + return None + return ret def write_to_xenstore(self, vm, path, value): """Writes the passed value to the xenstore record for the given VM diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py index 2d323a0167ba..8e7a829d5a62 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py @@ -45,6 +45,7 @@ class PluginError(Exception): def __init__(self, *args): Exception.__init__(self, *args) + class ArgumentError(PluginError): """Raised when required arguments are missing, argument values are invalid, or incompatible arguments are given. @@ -67,6 +68,7 @@ def ignore_failure(func, *args, **kwargs): ARGUMENT_PATTERN = re.compile(r'^[a-zA-Z0-9_:\.\-,]+$') + def validate_exists(args, key, default=None): """Validates that a string argument to a RPC method call is given, and matches the shell-safe regex, with an optional default value in case it @@ -76,20 +78,24 @@ def validate_exists(args, key, default=None): """ if key in args: if len(args[key]) == 0: - raise ArgumentError('Argument %r value %r is too short.' % (key, args[key])) + raise ArgumentError('Argument %r value %r is too short.' % + (key, args[key])) if not ARGUMENT_PATTERN.match(args[key]): - raise ArgumentError('Argument %r value %r contains invalid characters.' % (key, args[key])) + raise ArgumentError('Argument %r value %r contains invalid ' + 'characters.' % (key, args[key])) if args[key][0] == '-': - raise ArgumentError('Argument %r value %r starts with a hyphen.' % (key, args[key])) + raise ArgumentError('Argument %r value %r starts with a hyphen.' + % (key, args[key])) return args[key] elif default is not None: return default else: raise ArgumentError('Argument %s is required.' % key) + def validate_bool(args, key, default=None): - """Validates that a string argument to a RPC method call is a boolean string, - with an optional default value in case it does not exist. + """Validates that a string argument to a RPC method call is a boolean + string, with an optional default value in case it does not exist. Returns the python boolean value. """ @@ -99,7 +105,9 @@ def validate_bool(args, key, default=None): elif value.lower() == 'false': return False else: - raise ArgumentError("Argument %s may not take value %r. Valid values are ['true', 'false']." % (key, value)) + raise ArgumentError("Argument %s may not take value %r. " + "Valid values are ['true', 'false']." % (key, value)) + def exists(args, key): """Validates that a freeform string argument to a RPC method call is given. @@ -110,6 +118,7 @@ def exists(args, key): else: raise ArgumentError('Argument %s is required.' % key) + def optional(args, key): """If the given key is in args, return the corresponding value, otherwise return None""" @@ -122,13 +131,14 @@ def get_this_host(session): def get_domain_0(session): this_host_ref = get_this_host(session) - expr = 'field "is_control_domain" = "true" and field "resident_on" = "%s"' % this_host_ref + expr = 'field "is_control_domain" = "true" and field "resident_on" = "%s"' + expr = expr % this_host_ref return session.xenapi.VM.get_all_records_where(expr).keys()[0] def create_vdi(session, sr_ref, name_label, virtual_size, read_only): vdi_ref = session.xenapi.VDI.create( - { 'name_label': name_label, + {'name_label': name_label, 'name_description': '', 'SR': sr_ref, 'virtual_size': str(virtual_size), @@ -138,7 +148,7 @@ def create_vdi(session, sr_ref, name_label, virtual_size, read_only): 'xenstore_data': {}, 'other_config': {}, 'sm_config': {}, - 'tags': [] }) + 'tags': []}) logging.debug('Created VDI %s (%s, %s, %s) on %s.', vdi_ref, name_label, virtual_size, read_only, sr_ref) return vdi_ref diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py index 71ed82d6270f..695bf3448d69 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py @@ -34,10 +34,17 @@ import pluginlib_nova as pluginlib pluginlib.configure_logging("xenstore") +def jsonify(fnc): + def wrapper(*args, **kwargs): + return json.dumps(fnc(*args, **kwargs)) + return wrapper + + +@jsonify def read_record(self, arg_dict): """Returns the value stored at the given path for the given dom_id. - These must be encoded as key/value pairs in arg_dict. You can - optinally include a key 'ignore_missing_path'; if this is present + These must be encoded as key/value pairs in arg_dict. You can + optinally include a key 'ignore_missing_path'; if this is present and boolean True, attempting to read a non-existent path will return the string 'None' instead of raising an exception. """ @@ -46,16 +53,21 @@ def read_record(self, arg_dict): return _run_command(cmd).rstrip("\n") except pluginlib.PluginError, e: if arg_dict.get("ignore_missing_path", False): - cmd = "xenstore-exists /local/domain/%(dom_id)s/%(path)s; echo $?" % arg_dict + cmd = "xenstore-exists /local/domain/%(dom_id)s/%(path)s; echo $?" + cmd = cmd % arg_dict ret = _run_command(cmd).strip() # If the path exists, the cmd should return "0" if ret != "0": - # No such path, so ignore the error - return "None" + # No such path, so ignore the error and return the + # string 'None', since None can't be marshalled + # over RPC. + return "None" # Either we shouldn't ignore path errors, or another # error was hit. Re-raise. raise + +@jsonify def write_record(self, arg_dict): """Writes to xenstore at the specified path. If there is information already stored in that location, it is overwritten. As in read_record, @@ -63,10 +75,13 @@ def write_record(self, arg_dict): you must specify a 'value' key, whose value must be a string. Typically, you can json-ify more complex values and store the json output. """ - cmd = "xenstore-write /local/domain/%(dom_id)s/%(path)s '%(value)s'" % arg_dict + cmd = "xenstore-write /local/domain/%(dom_id)s/%(path)s '%(value)s'" + cmd = cmd % arg_dict _run_command(cmd) return arg_dict["value"] + +@jsonify def list_records(self, arg_dict): """Returns all the stored data at or below the given path for the given dom_id. The data is returned as a json-ified dict, with the @@ -80,7 +95,8 @@ def list_records(self, arg_dict): except pluginlib.PluginError, e: if "No such file or directory" in "%s" % e: # Path doesn't exist. - return json.dumps({}) + return {} + return str(e) raise base_path = arg_dict["path"] paths = _paths_from_ls(recs) @@ -96,8 +112,10 @@ def list_records(self, arg_dict): except ValueError: val = rec ret[path] = val - return json.dumps(ret) + return ret + +@jsonify def delete_record(self, arg_dict): """Just like it sounds: it removes the record for the specified VM and the specified path from xenstore. @@ -105,6 +123,7 @@ def delete_record(self, arg_dict): cmd = "xenstore-rm /local/domain/%(dom_id)s/%(path)s" % arg_dict return _run_command(cmd) + def _paths_from_ls(recs): """The xenstore-ls command returns a listing that isn't terribly useful. This method cleans that up into a dict with each path @@ -137,13 +156,15 @@ def _paths_from_ls(recs): last_nm = barename return ret + def _run_command(cmd): """Abstracts out the basics of issuing system commands. If the command returns anything in stderr, a PluginError is raised with that information. Otherwise, the output from stdout is returned. """ pipe = subprocess.PIPE - proc = subprocess.Popen([cmd], shell=True, stdin=pipe, stdout=pipe, stderr=pipe, close_fds=True) + proc = subprocess.Popen([cmd], shell=True, stdin=pipe, stdout=pipe, + stderr=pipe, close_fds=True) proc.wait() err = proc.stderr.read() if err: From e97cb0f19f66ee4d28685575cea57b1eb32c4ed3 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Tue, 4 Jan 2011 13:56:36 -0800 Subject: [PATCH 095/147] Moved __init__ api code to api.py and changed allowed_instances quota method argument to accept all type data, not just vcpu count. --- nova/compute/__init__.py | 369 +------------------------------------ nova/compute/api.py | 385 +++++++++++++++++++++++++++++++++++++++ nova/network/__init__.py | 70 +------ nova/network/api.py | 87 +++++++++ nova/quota.py | 6 +- nova/tests/test_quota.py | 10 +- nova/volume/__init__.py | 83 +-------- nova/volume/api.py | 100 ++++++++++ 8 files changed, 585 insertions(+), 525 deletions(-) create mode 100644 nova/compute/api.py create mode 100644 nova/network/api.py create mode 100644 nova/volume/api.py diff --git a/nova/compute/__init__.py b/nova/compute/__init__.py index 9efd90909827..b94f971d1d93 100644 --- a/nova/compute/__init__.py +++ b/nova/compute/__init__.py @@ -16,371 +16,4 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Handles all requests relating to instances (guest vms). -""" - -import datetime -import logging -import time - -from nova import db -from nova import exception -from nova import flags -from nova import network -from nova import quota -from nova import rpc -from nova import utils -from nova import volume -from nova.compute import instance_types -from nova.db import base - -FLAGS = flags.FLAGS - - -def generate_default_hostname(instance_id): - """Default function to generate a hostname given an instance reference.""" - return str(instance_id) - - -class API(base.Base): - """API for interacting with the compute manager.""" - - def __init__(self, image_service=None, network_api=None, volume_api=None, - **kwargs): - if not image_service: - image_service = utils.import_object(FLAGS.image_service) - self.image_service = image_service - if not network_api: - network_api = network.API() - self.network_api = network_api - if not volume_api: - volume_api = volume.API() - self.volume_api = volume_api - super(API, self).__init__(**kwargs) - - def get_network_topic(self, context, instance_id): - try: - instance = self.get(context, instance_id) - except exception.NotFound as e: - logging.warning("Instance %d was not found in get_network_topic", - instance_id) - raise e - - host = instance['host'] - if not host: - raise exception.Error("Instance %d has no host" % instance_id) - topic = self.db.queue_get_for(context, FLAGS.compute_topic, host) - return rpc.call(context, - topic, - {"method": "get_network_topic", "args": {'fake': 1}}) - - def create(self, context, instance_type, - image_id, kernel_id=None, ramdisk_id=None, - min_count=1, max_count=1, - display_name='', display_description='', - key_name=None, key_data=None, security_group='default', - availability_zone=None, user_data=None, - generate_hostname=generate_default_hostname): - """Create the number of instances requested if quota and - other arguments check out ok.""" - - type_data = instance_types.INSTANCE_TYPES[instance_type] - num_instances = quota.allowed_instances(context, max_count, - type_data['vcpus']) - if num_instances < min_count: - logging.warn("Quota exceeeded for %s, tried to run %s instances", - context.project_id, min_count) - raise quota.QuotaError("Instance quota exceeded. You can only " - "run %s more instances of this type." % - num_instances, "InstanceLimitExceeded") - - is_vpn = image_id == FLAGS.vpn_image_id - if not is_vpn: - image = self.image_service.show(context, image_id) - if kernel_id is None: - kernel_id = image.get('kernelId', None) - if ramdisk_id is None: - ramdisk_id = image.get('ramdiskId', None) - # No kernel and ramdisk for raw images - if kernel_id == str(FLAGS.null_kernel): - kernel_id = None - ramdisk_id = None - logging.debug("Creating a raw instance") - # Make sure we have access to kernel and ramdisk (if not raw) - if kernel_id: - self.image_service.show(context, kernel_id) - if ramdisk_id: - self.image_service.show(context, ramdisk_id) - - if security_group is None: - security_group = ['default'] - if not type(security_group) is list: - security_group = [security_group] - - security_groups = [] - self.ensure_default_security_group(context) - for security_group_name in security_group: - group = db.security_group_get_by_name(context, - context.project_id, - security_group_name) - security_groups.append(group['id']) - - if key_data is None and key_name: - key_pair = db.key_pair_get(context, context.user_id, key_name) - key_data = key_pair['public_key'] - - base_options = { - 'reservation_id': utils.generate_uid('r'), - 'image_id': image_id, - 'kernel_id': kernel_id or '', - 'ramdisk_id': ramdisk_id or '', - 'state_description': 'scheduling', - 'user_id': context.user_id, - 'project_id': context.project_id, - 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), - 'instance_type': instance_type, - 'memory_mb': type_data['memory_mb'], - 'vcpus': type_data['vcpus'], - 'local_gb': type_data['local_gb'], - 'display_name': display_name, - 'display_description': display_description, - 'user_data': user_data or '', - 'key_name': key_name, - 'key_data': key_data, - 'availability_zone': availability_zone} - - elevated = context.elevated() - instances = [] - logging.debug(_("Going to run %s instances..."), num_instances) - for num in range(num_instances): - instance = dict(mac_address=utils.generate_mac(), - launch_index=num, - **base_options) - instance = self.db.instance_create(context, instance) - instance_id = instance['id'] - - elevated = context.elevated() - if not security_groups: - security_groups = [] - for security_group_id in security_groups: - self.db.instance_add_security_group(elevated, - instance_id, - security_group_id) - - # Set sane defaults if not specified - updates = dict(hostname=generate_hostname(instance_id)) - if 'display_name' not in instance: - updates['display_name'] = "Server %s" % instance_id - - instance = self.update(context, instance_id, **updates) - instances.append(instance) - - logging.debug(_("Casting to scheduler for %s/%s's instance %s"), - context.project_id, context.user_id, instance_id) - rpc.cast(context, - FLAGS.scheduler_topic, - {"method": "run_instance", - "args": {"topic": FLAGS.compute_topic, - "instance_id": instance_id}}) - - return instances - - def ensure_default_security_group(self, context): - """ Create security group for the security context if it - does not already exist - - :param context: the security context - - """ - try: - db.security_group_get_by_name(context, context.project_id, - 'default') - except exception.NotFound: - values = {'name': 'default', - 'description': 'default', - 'user_id': context.user_id, - 'project_id': context.project_id} - db.security_group_create(context, values) - - def update(self, context, instance_id, **kwargs): - """Updates the instance in the datastore. - - :param context: The security context - :param instance_id: ID of the instance to update - :param kwargs: All additional keyword args are treated - as data fields of the instance to be - updated - - :retval None - - """ - return self.db.instance_update(context, instance_id, kwargs) - - def delete(self, context, instance_id): - logging.debug("Going to try and terminate %s" % instance_id) - try: - instance = self.get(context, instance_id) - except exception.NotFound as e: - logging.warning(_("Instance %s was not found during terminate"), - instance_id) - raise e - - if (instance['state_description'] == 'terminating'): - logging.warning(_("Instance %s is already being terminated"), - instance_id) - return - - self.update(context, - instance['id'], - state_description='terminating', - state=0, - terminated_at=datetime.datetime.utcnow()) - - host = instance['host'] - if host: - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "terminate_instance", - "args": {"instance_id": instance_id}}) - else: - self.db.instance_destroy(context, instance_id) - - def get(self, context, instance_id=None, project_id=None, - reservation_id=None, fixed_ip=None): - """Get one or more instances, possibly filtered by one of the - given parameters. If there is no filter and the context is - an admin, it will retreive all instances in the system.""" - if instance_id is not None: - return self.db.instance_get_by_id(context, instance_id) - if reservation_id is not None: - return self.db.instance_get_all_by_reservation(context, - reservation_id) - if fixed_ip is not None: - return self.db.fixed_ip_get_instance(context, fixed_ip) - if project_id or not context.is_admin: - if not context.project: - return self.db.instance_get_all_by_user(context, - context.user_id) - if project_id is None: - project_id = context.project_id - return self.db.instance_get_all_by_project(context, - project_id) - return self.db.instance_get_all(context) - - def snapshot(self, context, instance_id, name): - """Snapshot the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "snapshot_instance", - "args": {"instance_id": instance_id, "name": name}}) - - def reboot(self, context, instance_id): - """Reboot the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "reboot_instance", - "args": {"instance_id": instance_id}}) - - def pause(self, context, instance_id): - """Pause the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "pause_instance", - "args": {"instance_id": instance_id}}) - - def unpause(self, context, instance_id): - """Unpause the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "unpause_instance", - "args": {"instance_id": instance_id}}) - - def get_diagnostics(self, context, instance_id): - """Retrieve diagnostics for the given instance.""" - instance = self.get(context, instance_id) - host = instance["host"] - return rpc.call(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "get_diagnostics", - "args": {"instance_id": instance_id}}) - - def get_actions(self, context, instance_id): - """Retrieve actions for the given instance.""" - return self.db.instance_get_actions(context, instance_id) - - def suspend(self, context, instance_id): - """suspend the instance with instance_id""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "suspend_instance", - "args": {"instance_id": instance_id}}) - - def resume(self, context, instance_id): - """resume the instance with instance_id""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "resume_instance", - "args": {"instance_id": instance_id}}) - - def rescue(self, context, instance_id): - """Rescue the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "rescue_instance", - "args": {"instance_id": instance_id}}) - - def unrescue(self, context, instance_id): - """Unrescue the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "unrescue_instance", - "args": {"instance_id": instance_id}}) - - def attach_volume(self, context, instance_id, volume_id, device): - if not re.match("^/dev/[a-z]d[a-z]+$", device): - raise exception.ApiError(_("Invalid device specified: %s. " - "Example device: /dev/vdb") % device) - self.volume_api.check_attach(context, volume_id) - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "attach_volume", - "args": {"volume_id": volume_id, - "instance_id": instance_id, - "mountpoint": device}}) - - def detach_volume(self, context, volume_id): - instance = self.db.volume_get_instance(context.elevated(), volume_id) - if not instance: - raise exception.ApiError(_("Volume isn't attached to anything!")) - self.volume_api.check_detach(context, volume_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "detach_volume", - "args": {"instance_id": instance['id'], - "volume_id": volume_id}}) - return instance - - def associate_floating_ip(self, context, instance_id, address): - instance = self.get(context, instance_id) - self.network_api.associate_floating_ip(context, address, - instance['fixed_ip']) +from nova.compute.api import API diff --git a/nova/compute/api.py b/nova/compute/api.py new file mode 100644 index 000000000000..74d030c4d8ce --- /dev/null +++ b/nova/compute/api.py @@ -0,0 +1,385 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests relating to instances (guest vms). +""" + +import datetime +import logging +import time + +from nova import db +from nova import exception +from nova import flags +from nova import network +from nova import quota +from nova import rpc +from nova import utils +from nova import volume +from nova.compute import instance_types +from nova.db import base + +FLAGS = flags.FLAGS + + +def generate_default_hostname(instance_id): + """Default function to generate a hostname given an instance reference.""" + return str(instance_id) + + +class API(base.Base): + """API for interacting with the compute manager.""" + + def __init__(self, image_service=None, network_api=None, volume_api=None, + **kwargs): + if not image_service: + image_service = utils.import_object(FLAGS.image_service) + self.image_service = image_service + if not network_api: + network_api = network.API() + self.network_api = network_api + if not volume_api: + volume_api = volume.API() + self.volume_api = volume_api + super(API, self).__init__(**kwargs) + + def get_network_topic(self, context, instance_id): + try: + instance = self.get(context, instance_id) + except exception.NotFound as e: + logging.warning("Instance %d was not found in get_network_topic", + instance_id) + raise e + + host = instance['host'] + if not host: + raise exception.Error("Instance %d has no host" % instance_id) + topic = self.db.queue_get_for(context, FLAGS.compute_topic, host) + return rpc.call(context, + topic, + {"method": "get_network_topic", "args": {'fake': 1}}) + + def create(self, context, instance_type, + image_id, kernel_id=None, ramdisk_id=None, + min_count=1, max_count=1, + display_name='', display_description='', + key_name=None, key_data=None, security_group='default', + availability_zone=None, user_data=None, + generate_hostname=generate_default_hostname): + """Create the number of instances requested if quota and + other arguments check out ok.""" + + type_data = instance_types.INSTANCE_TYPES[instance_type] + num_instances = quota.allowed_instances(context, max_count, type_data) + if num_instances < min_count: + logging.warn("Quota exceeeded for %s, tried to run %s instances", + context.project_id, min_count) + raise quota.QuotaError("Instance quota exceeded. You can only " + "run %s more instances of this type." % + num_instances, "InstanceLimitExceeded") + + is_vpn = image_id == FLAGS.vpn_image_id + if not is_vpn: + image = self.image_service.show(context, image_id) + if kernel_id is None: + kernel_id = image.get('kernelId', None) + if ramdisk_id is None: + ramdisk_id = image.get('ramdiskId', None) + # No kernel and ramdisk for raw images + if kernel_id == str(FLAGS.null_kernel): + kernel_id = None + ramdisk_id = None + logging.debug("Creating a raw instance") + # Make sure we have access to kernel and ramdisk (if not raw) + if kernel_id: + self.image_service.show(context, kernel_id) + if ramdisk_id: + self.image_service.show(context, ramdisk_id) + + if security_group is None: + security_group = ['default'] + if not type(security_group) is list: + security_group = [security_group] + + security_groups = [] + self.ensure_default_security_group(context) + for security_group_name in security_group: + group = db.security_group_get_by_name(context, + context.project_id, + security_group_name) + security_groups.append(group['id']) + + if key_data is None and key_name: + key_pair = db.key_pair_get(context, context.user_id, key_name) + key_data = key_pair['public_key'] + + base_options = { + 'reservation_id': utils.generate_uid('r'), + 'image_id': image_id, + 'kernel_id': kernel_id or '', + 'ramdisk_id': ramdisk_id or '', + 'state_description': 'scheduling', + 'user_id': context.user_id, + 'project_id': context.project_id, + 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), + 'instance_type': instance_type, + 'memory_mb': type_data['memory_mb'], + 'vcpus': type_data['vcpus'], + 'local_gb': type_data['local_gb'], + 'display_name': display_name, + 'display_description': display_description, + 'user_data': user_data or '', + 'key_name': key_name, + 'key_data': key_data, + 'availability_zone': availability_zone} + + elevated = context.elevated() + instances = [] + logging.debug(_("Going to run %s instances..."), num_instances) + for num in range(num_instances): + instance = dict(mac_address=utils.generate_mac(), + launch_index=num, + **base_options) + instance = self.db.instance_create(context, instance) + instance_id = instance['id'] + + elevated = context.elevated() + if not security_groups: + security_groups = [] + for security_group_id in security_groups: + self.db.instance_add_security_group(elevated, + instance_id, + security_group_id) + + # Set sane defaults if not specified + updates = dict(hostname=generate_hostname(instance_id)) + if 'display_name' not in instance: + updates['display_name'] = "Server %s" % instance_id + + instance = self.update(context, instance_id, **updates) + instances.append(instance) + + logging.debug(_("Casting to scheduler for %s/%s's instance %s"), + context.project_id, context.user_id, instance_id) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "run_instance", + "args": {"topic": FLAGS.compute_topic, + "instance_id": instance_id}}) + + return instances + + def ensure_default_security_group(self, context): + """ Create security group for the security context if it + does not already exist + + :param context: the security context + + """ + try: + db.security_group_get_by_name(context, context.project_id, + 'default') + except exception.NotFound: + values = {'name': 'default', + 'description': 'default', + 'user_id': context.user_id, + 'project_id': context.project_id} + db.security_group_create(context, values) + + def update(self, context, instance_id, **kwargs): + """Updates the instance in the datastore. + + :param context: The security context + :param instance_id: ID of the instance to update + :param kwargs: All additional keyword args are treated + as data fields of the instance to be + updated + + :retval None + + """ + return self.db.instance_update(context, instance_id, kwargs) + + def delete(self, context, instance_id): + logging.debug("Going to try and terminate %s" % instance_id) + try: + instance = self.get(context, instance_id) + except exception.NotFound as e: + logging.warning(_("Instance %s was not found during terminate"), + instance_id) + raise e + + if (instance['state_description'] == 'terminating'): + logging.warning(_("Instance %s is already being terminated"), + instance_id) + return + + self.update(context, + instance['id'], + state_description='terminating', + state=0, + terminated_at=datetime.datetime.utcnow()) + + host = instance['host'] + if host: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "terminate_instance", + "args": {"instance_id": instance_id}}) + else: + self.db.instance_destroy(context, instance_id) + + def get(self, context, instance_id=None, project_id=None, + reservation_id=None, fixed_ip=None): + """Get one or more instances, possibly filtered by one of the + given parameters. If there is no filter and the context is + an admin, it will retreive all instances in the system.""" + if instance_id is not None: + return self.db.instance_get_by_id(context, instance_id) + if reservation_id is not None: + return self.db.instance_get_all_by_reservation(context, + reservation_id) + if fixed_ip is not None: + return self.db.fixed_ip_get_instance(context, fixed_ip) + if project_id or not context.is_admin: + if not context.project: + return self.db.instance_get_all_by_user(context, + context.user_id) + if project_id is None: + project_id = context.project_id + return self.db.instance_get_all_by_project(context, + project_id) + return self.db.instance_get_all(context) + + def snapshot(self, context, instance_id, name): + """Snapshot the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "snapshot_instance", + "args": {"instance_id": instance_id, "name": name}}) + + def reboot(self, context, instance_id): + """Reboot the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "reboot_instance", + "args": {"instance_id": instance_id}}) + + def pause(self, context, instance_id): + """Pause the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "pause_instance", + "args": {"instance_id": instance_id}}) + + def unpause(self, context, instance_id): + """Unpause the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "unpause_instance", + "args": {"instance_id": instance_id}}) + + def get_diagnostics(self, context, instance_id): + """Retrieve diagnostics for the given instance.""" + instance = self.get(context, instance_id) + host = instance["host"] + return rpc.call(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "get_diagnostics", + "args": {"instance_id": instance_id}}) + + def get_actions(self, context, instance_id): + """Retrieve actions for the given instance.""" + return self.db.instance_get_actions(context, instance_id) + + def suspend(self, context, instance_id): + """suspend the instance with instance_id""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "suspend_instance", + "args": {"instance_id": instance_id}}) + + def resume(self, context, instance_id): + """resume the instance with instance_id""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "resume_instance", + "args": {"instance_id": instance_id}}) + + def rescue(self, context, instance_id): + """Rescue the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "rescue_instance", + "args": {"instance_id": instance_id}}) + + def unrescue(self, context, instance_id): + """Unrescue the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "unrescue_instance", + "args": {"instance_id": instance_id}}) + + def attach_volume(self, context, instance_id, volume_id, device): + if not re.match("^/dev/[a-z]d[a-z]+$", device): + raise exception.ApiError(_("Invalid device specified: %s. " + "Example device: /dev/vdb") % device) + self.volume_api.check_attach(context, volume_id) + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "attach_volume", + "args": {"volume_id": volume_id, + "instance_id": instance_id, + "mountpoint": device}}) + + def detach_volume(self, context, volume_id): + instance = self.db.volume_get_instance(context.elevated(), volume_id) + if not instance: + raise exception.ApiError(_("Volume isn't attached to anything!")) + self.volume_api.check_detach(context, volume_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "detach_volume", + "args": {"instance_id": instance['id'], + "volume_id": volume_id}}) + return instance + + def associate_floating_ip(self, context, instance_id, address): + instance = self.get(context, instance_id) + self.network_api.associate_floating_ip(context, address, + instance['fixed_ip']) diff --git a/nova/network/__init__.py b/nova/network/__init__.py index cbd912047cab..6eb3e3ef6cbb 100644 --- a/nova/network/__init__.py +++ b/nova/network/__init__.py @@ -16,72 +16,4 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Handles all requests relating to instances (guest vms). -""" - -import logging - -from nova import db -from nova import flags -from nova import quota -from nova import rpc -from nova.db import base - -FLAGS = flags.FLAGS - - -class API(base.Base): - """API for interacting with the network manager.""" - - def allocate_floating_ip(self, context): - if quota.allowed_floating_ips(context, 1) < 1: - logging.warn(_("Quota exceeeded for %s, tried to allocate " - "address"), - context.project_id) - raise quota.QuotaError(_("Address quota exceeded. You cannot " - "allocate any more addresses")) - # NOTE(vish): We don't know which network host should get the ip - # when we allocate, so just send it to any one. This - # will probably need to move into a network supervisor - # at some point. - return rpc.call(context, - FLAGS.network_topic, - {"method": "allocate_floating_ip", - "args": {"project_id": context.project_id}}) - - def release_floating_ip(self, context, address): - floating_ip = self.db.floating_ip_get_by_address(context, address) - # NOTE(vish): We don't know which network host should get the ip - # when we deallocate, so just send it to any one. This - # will probably need to move into a network supervisor - # at some point. - rpc.cast(context, - FLAGS.network_topic, - {"method": "deallocate_floating_ip", - "args": {"floating_address": floating_ip['address']}}) - - def associate_floating_ip(self, context, floating_ip, fixed_ip): - if isinstance(fixed_ip, str) or isinstance(fixed_ip, unicode): - fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_ip) - floating_ip = self.db.floating_ip_get_by_address(context, floating_ip) - # NOTE(vish): Perhaps we should just pass this on to compute and - # let compute communicate with network. - host = fixed_ip['network']['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.network_topic, host), - {"method": "associate_floating_ip", - "args": {"floating_address": floating_ip['address'], - "fixed_address": fixed_ip['address']}}) - - def disassociate_floating_ip(self, context, address): - floating_ip = self.db.floating_ip_get_by_address(context, address) - if not floating_ip.get('fixed_ip'): - raise exception.ApiError('Address is not associated.') - # NOTE(vish): Get the topic from the host name of the network of - # the associated fixed ip. - host = floating_ip['fixed_ip']['network']['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.network_topic, host), - {"method": "disassociate_floating_ip", - "args": {"floating_address": floating_ip['address']}}) +from nova.network.api import API diff --git a/nova/network/api.py b/nova/network/api.py new file mode 100644 index 000000000000..cbd912047cab --- /dev/null +++ b/nova/network/api.py @@ -0,0 +1,87 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests relating to instances (guest vms). +""" + +import logging + +from nova import db +from nova import flags +from nova import quota +from nova import rpc +from nova.db import base + +FLAGS = flags.FLAGS + + +class API(base.Base): + """API for interacting with the network manager.""" + + def allocate_floating_ip(self, context): + if quota.allowed_floating_ips(context, 1) < 1: + logging.warn(_("Quota exceeeded for %s, tried to allocate " + "address"), + context.project_id) + raise quota.QuotaError(_("Address quota exceeded. You cannot " + "allocate any more addresses")) + # NOTE(vish): We don't know which network host should get the ip + # when we allocate, so just send it to any one. This + # will probably need to move into a network supervisor + # at some point. + return rpc.call(context, + FLAGS.network_topic, + {"method": "allocate_floating_ip", + "args": {"project_id": context.project_id}}) + + def release_floating_ip(self, context, address): + floating_ip = self.db.floating_ip_get_by_address(context, address) + # NOTE(vish): We don't know which network host should get the ip + # when we deallocate, so just send it to any one. This + # will probably need to move into a network supervisor + # at some point. + rpc.cast(context, + FLAGS.network_topic, + {"method": "deallocate_floating_ip", + "args": {"floating_address": floating_ip['address']}}) + + def associate_floating_ip(self, context, floating_ip, fixed_ip): + if isinstance(fixed_ip, str) or isinstance(fixed_ip, unicode): + fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_ip) + floating_ip = self.db.floating_ip_get_by_address(context, floating_ip) + # NOTE(vish): Perhaps we should just pass this on to compute and + # let compute communicate with network. + host = fixed_ip['network']['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.network_topic, host), + {"method": "associate_floating_ip", + "args": {"floating_address": floating_ip['address'], + "fixed_address": fixed_ip['address']}}) + + def disassociate_floating_ip(self, context, address): + floating_ip = self.db.floating_ip_get_by_address(context, address) + if not floating_ip.get('fixed_ip'): + raise exception.ApiError('Address is not associated.') + # NOTE(vish): Get the topic from the host name of the network of + # the associated fixed ip. + host = floating_ip['fixed_ip']['network']['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.network_topic, host), + {"method": "disassociate_floating_ip", + "args": {"floating_address": floating_ip['address']}}) diff --git a/nova/quota.py b/nova/quota.py index 12dbfcf43862..3884eb3081a0 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -53,7 +53,7 @@ def get_quota(context, project_id): return rval -def allowed_instances(context, num_instances, cores_per_instance): +def allowed_instances(context, num_instances, instance_type): """Check quota and return min(num_instances, allowed_instances)""" project_id = context.project_id context = context.elevated() @@ -62,9 +62,9 @@ def allowed_instances(context, num_instances, cores_per_instance): quota = get_quota(context, project_id) allowed_instances = quota['instances'] - used_instances allowed_cores = quota['cores'] - used_cores - num_cores = num_instances * cores_per_instance + num_cores = num_instances * instance_type['vcpus'] allowed_instances = min(allowed_instances, - int(allowed_cores // cores_per_instance)) + int(allowed_cores // instance_type['vcpus'])) return min(num_instances, allowed_instances) diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index c158187746b3..b5f9f30ef286 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -27,6 +27,7 @@ from nova import test from nova import utils from nova.auth import manager from nova.api.ec2 import cloud +from nova.compute import instance_types FLAGS = flags.FLAGS @@ -78,14 +79,17 @@ class QuotaTestCase(test.TestCase): def test_quota_overrides(self): """Make sure overriding a projects quotas works""" - num_instances = quota.allowed_instances(self.context, 100, 1) + num_instances = quota.allowed_instances(self.context, 100, + instance_types.INSTANCE_TYPES['m1.small']) self.assertEqual(num_instances, 2) db.quota_create(self.context, {'project_id': self.project.id, 'instances': 10}) - num_instances = quota.allowed_instances(self.context, 100, 1) + num_instances = quota.allowed_instances(self.context, 100, + instance_types.INSTANCE_TYPES['m1.small']) self.assertEqual(num_instances, 4) db.quota_update(self.context, self.project.id, {'cores': 100}) - num_instances = quota.allowed_instances(self.context, 100, 1) + num_instances = quota.allowed_instances(self.context, 100, + instance_types.INSTANCE_TYPES['m1.small']) self.assertEqual(num_instances, 10) db.quota_destroy(self.context, self.project.id) diff --git a/nova/volume/__init__.py b/nova/volume/__init__.py index 48ecdbe68b17..56ef9332e37e 100644 --- a/nova/volume/__init__.py +++ b/nova/volume/__init__.py @@ -16,85 +16,4 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Handles all requests relating to volumes. -""" - -import datetime -import logging - -from nova import db -from nova import exception -from nova import flags -from nova import quota -from nova import rpc -from nova.db import base - -FLAGS = flags.FLAGS -flags.DECLARE('storage_availability_zone', 'nova.volume.manager') - - -class API(base.Base): - """API for interacting with the volume manager.""" - - def create(self, context, size, name, description): - if quota.allowed_volumes(context, 1, size) < 1: - logging.warn("Quota exceeeded for %s, tried to create %sG volume", - context.project_id, size) - raise quota.QuotaError("Volume quota exceeded. You cannot " - "create a volume of size %s" % size) - - options = { - 'size': size, - 'user_id': context.user.id, - 'project_id': context.project_id, - 'availability_zone': FLAGS.storage_availability_zone, - 'status': "creating", - 'attach_status': "detached", - 'display_name': name, - 'display_description': description} - - volume = self.db.volume_create(context, options) - rpc.cast(context, - FLAGS.scheduler_topic, - {"method": "create_volume", - "args": {"topic": FLAGS.volume_topic, - "volume_id": volume['id']}}) - return volume - - def delete(self, context, volume_id): - volume = self.db.volume_get(context, volume_id) - if volume['status'] != "available": - raise exception.ApiError(_("Volume status must be available")) - now = datetime.datetime.utcnow() - self.db.volume_update(context, volume_id, {'status': 'deleting', - 'terminated_at': now}) - host = volume['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.volume_topic, host), - {"method": "delete_volume", - "args": {"volume_id": volume_id}}) - - def update(self, context, volume_id, fields): - self.db.volume_update(context, volume_id, fields) - - def get(self, context, volume_id=None): - if volume_id is not None: - return self.db.volume_get(context, volume_id) - if context.user.is_admin(): - return self.db.volume_get_all(context) - return self.db.volume_get_all_by_project(context, context.project_id) - - def check_attach(self, context, volume_id): - volume = self.db.volume_get(context, volume_id) - # TODO(vish): abstract status checking? - if volume['status'] != "available": - raise exception.ApiError(_("Volume status must be available")) - if volume['attach_status'] == "attached": - raise exception.ApiError(_("Volume is already attached")) - - def check_detach(self, context, volume_id): - volume = self.db.volume_get(context, volume_id) - # TODO(vish): abstract status checking? - if volume['status'] == "available": - raise exception.ApiError(_("Volume is already detached")) +from nova.volume.api import API diff --git a/nova/volume/api.py b/nova/volume/api.py new file mode 100644 index 000000000000..48ecdbe68b17 --- /dev/null +++ b/nova/volume/api.py @@ -0,0 +1,100 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests relating to volumes. +""" + +import datetime +import logging + +from nova import db +from nova import exception +from nova import flags +from nova import quota +from nova import rpc +from nova.db import base + +FLAGS = flags.FLAGS +flags.DECLARE('storage_availability_zone', 'nova.volume.manager') + + +class API(base.Base): + """API for interacting with the volume manager.""" + + def create(self, context, size, name, description): + if quota.allowed_volumes(context, 1, size) < 1: + logging.warn("Quota exceeeded for %s, tried to create %sG volume", + context.project_id, size) + raise quota.QuotaError("Volume quota exceeded. You cannot " + "create a volume of size %s" % size) + + options = { + 'size': size, + 'user_id': context.user.id, + 'project_id': context.project_id, + 'availability_zone': FLAGS.storage_availability_zone, + 'status': "creating", + 'attach_status': "detached", + 'display_name': name, + 'display_description': description} + + volume = self.db.volume_create(context, options) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "create_volume", + "args": {"topic": FLAGS.volume_topic, + "volume_id": volume['id']}}) + return volume + + def delete(self, context, volume_id): + volume = self.db.volume_get(context, volume_id) + if volume['status'] != "available": + raise exception.ApiError(_("Volume status must be available")) + now = datetime.datetime.utcnow() + self.db.volume_update(context, volume_id, {'status': 'deleting', + 'terminated_at': now}) + host = volume['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.volume_topic, host), + {"method": "delete_volume", + "args": {"volume_id": volume_id}}) + + def update(self, context, volume_id, fields): + self.db.volume_update(context, volume_id, fields) + + def get(self, context, volume_id=None): + if volume_id is not None: + return self.db.volume_get(context, volume_id) + if context.user.is_admin(): + return self.db.volume_get_all(context) + return self.db.volume_get_all_by_project(context, context.project_id) + + def check_attach(self, context, volume_id): + volume = self.db.volume_get(context, volume_id) + # TODO(vish): abstract status checking? + if volume['status'] != "available": + raise exception.ApiError(_("Volume status must be available")) + if volume['attach_status'] == "attached": + raise exception.ApiError(_("Volume is already attached")) + + def check_detach(self, context, volume_id): + volume = self.db.volume_get(context, volume_id) + # TODO(vish): abstract status checking? + if volume['status'] == "available": + raise exception.ApiError(_("Volume is already detached")) From 468bc4745f002b521f21c5d621bdcb596b8ddfcd Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Tue, 4 Jan 2011 15:18:28 -0800 Subject: [PATCH 096/147] Merge from trunk again -- get rid of twistd dependencies --- nova/twistd.py | 39 +++++++++++++++++++-------------------- nova/virt/hyperv.py | 30 +++++++++++++++--------------- nova/virt/images.py | 6 ++++-- 3 files changed, 38 insertions(+), 37 deletions(-) diff --git a/nova/twistd.py b/nova/twistd.py index 22e2d06d3ed8..1dd10dbb592f 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -237,26 +237,25 @@ def serve(filename): logging.getLogger('amqplib').setLevel(logging.WARN) FLAGS.python = filename FLAGS.no_save = True - if sys.platform != 'win32': - if not FLAGS.pidfile: - FLAGS.pidfile = '%s.pid' % name - elif FLAGS.pidfile.endswith('twistd.pid'): - FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', - '%s.pid' % name) - # NOTE(vish): if we're running nodaemon, redirect the log to stdout - if FLAGS.nodaemon and not FLAGS.logfile: - FLAGS.logfile = "-" - if not FLAGS.logfile: - FLAGS.logfile = '%s.log' % name - elif FLAGS.logfile.endswith('twistd.log'): - FLAGS.logfile = FLAGS.logfile.replace('twistd.log', - '%s.log' % name) - if FLAGS.logdir: - FLAGS.logfile = os.path.join(FLAGS.logdir, FLAGS.logfile) - if not FLAGS.prefix: - FLAGS.prefix = name - elif FLAGS.prefix.endswith('twisted'): - FLAGS.prefix = FLAGS.prefix.replace('twisted', name) + if not FLAGS.pidfile: + FLAGS.pidfile = '%s.pid' % name + elif FLAGS.pidfile.endswith('twistd.pid'): + FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', + '%s.pid' % name) + # NOTE(vish): if we're running nodaemon, redirect the log to stdout + if FLAGS.nodaemon and not FLAGS.logfile: + FLAGS.logfile = "-" + if not FLAGS.logfile: + FLAGS.logfile = '%s.log' % name + elif FLAGS.logfile.endswith('twistd.log'): + FLAGS.logfile = FLAGS.logfile.replace('twistd.log', + '%s.log' % name) + if FLAGS.logdir: + FLAGS.logfile = os.path.join(FLAGS.logdir, FLAGS.logfile) + if not FLAGS.prefix: + FLAGS.prefix = name + elif FLAGS.prefix.endswith('twisted'): + FLAGS.prefix = FLAGS.prefix.replace('twisted', name) action = 'start' if len(argv) > 1: diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index a254aaf8a1ae..6aeeb0837f23 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -64,8 +64,6 @@ import os import logging import time -from twisted.internet import defer - from nova import exception from nova import flags from nova.auth import manager @@ -112,16 +110,20 @@ class HyperVConnection(object): self._conn = wmi.WMI(moniker='//./root/virtualization') self._cim_conn = wmi.WMI(moniker='//./root/cimv2') + def init_host(self): + #FIXME(chiradeep): implement this + logging.debug('In init host') + pass + def list_instances(self): """ Return the names of all the instances known to Hyper-V. """ vms = [v.ElementName \ for v in self._conn.Msvm_ComputerSystem(['ElementName'])] return vms - @defer.inlineCallbacks def spawn(self, instance): """ Create a new VM and start it.""" - vm = yield self._lookup(instance.name) + vm = self._lookup(instance.name) if vm is not None: raise exception.Duplicate('Attempted to create duplicate name %s' % instance.name) @@ -130,18 +132,18 @@ class HyperVConnection(object): project = manager.AuthManager().get_project(instance['project_id']) #Fetch the file, assume it is a VHD file. base_vhd_filename = os.path.join(FLAGS.instances_path, - instance['str_id']) + instance.name) vhdfile = "%s.vhd" % (base_vhd_filename) - yield images.fetch(instance['image_id'], vhdfile, user, project) + images.fetch(instance['image_id'], vhdfile, user, project) try: - yield self._create_vm(instance) + self._create_vm(instance) - yield self._create_disk(instance['name'], vhdfile) - yield self._create_nic(instance['name'], instance['mac_address']) + self._create_disk(instance['name'], vhdfile) + self._create_nic(instance['name'], instance['mac_address']) logging.debug('Starting VM %s ', instance.name) - yield self._set_vm_state(instance['name'], 'Enabled') + self._set_vm_state(instance['name'], 'Enabled') logging.info('Started VM %s ', instance.name) except Exception as exn: logging.error('spawn vm failed: %s', exn) @@ -341,21 +343,19 @@ class HyperVConnection(object): wmi_obj.Properties_.Item(prop).Value return newinst - @defer.inlineCallbacks def reboot(self, instance): """Reboot the specified instance.""" - vm = yield self._lookup(instance.name) + vm = self._lookup(instance.name) if vm is None: raise exception.NotFound('instance not present %s' % instance.name) self._set_vm_state(instance.name, 'Reboot') - @defer.inlineCallbacks def destroy(self, instance): """Destroy the VM. Also destroy the associated VHD disk files""" logging.debug("Got request to destroy vm %s", instance.name) - vm = yield self._lookup(instance.name) + vm = self._lookup(instance.name) if vm is None: - defer.returnValue(None) + return vm = self._conn.Msvm_ComputerSystem(ElementName=instance.name)[0] vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] #Stop the VM first. diff --git a/nova/virt/images.py b/nova/virt/images.py index bd7426ad0416..41719753866c 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -60,7 +60,9 @@ def _fetch_image_no_curl(url, path, headers): while 1: data = urlfile.read(chunk) if not data: - break f.write(data) + break + f.write(data) + urlopened = urllib2.urlopen(request) urlretrieve(urlopened, path) logging.debug("Finished retreving %s -- placed in %s", url, path) @@ -68,7 +70,6 @@ def _fetch_image_no_curl(url, path, headers): def _fetch_s3_image(image, path, user, project): url = image_url(image) - # This should probably move somewhere else, like e.g. a download_as # method on User objects and at the same time get rewritten to use # a web client. @@ -88,6 +89,7 @@ def _fetch_s3_image(image, path, user, project): cmd = ['/usr/bin/curl', '--fail', '--silent', url] for (k, v) in headers.iteritems(): cmd += ['-H', '%s: %s' % (k, v)] + cmd += ['-o', path] cmd_out = ' '.join(cmd) return utils.execute(cmd_out) From 91e44607d1454a9c2e258910f009a034fb9cff1c Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Tue, 4 Jan 2011 15:42:29 -0800 Subject: [PATCH 097/147] i18n logging and exception strings --- nova/virt/hyperv.py | 62 ++++++++++++++++++++++----------------------- nova/virt/images.py | 2 +- 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index 6aeeb0837f23..4b9f6f946154 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -112,7 +112,7 @@ class HyperVConnection(object): def init_host(self): #FIXME(chiradeep): implement this - logging.debug('In init host') + logging.debug(_('In init host')) pass def list_instances(self): @@ -125,7 +125,7 @@ class HyperVConnection(object): """ Create a new VM and start it.""" vm = self._lookup(instance.name) if vm is not None: - raise exception.Duplicate('Attempted to create duplicate name %s' % + raise exception.Duplicate(_('Attempt to create duplicate vm %s') % instance.name) user = manager.AuthManager().get_user(instance['user_id']) @@ -142,11 +142,11 @@ class HyperVConnection(object): self._create_disk(instance['name'], vhdfile) self._create_nic(instance['name'], instance['mac_address']) - logging.debug('Starting VM %s ', instance.name) + logging.debug(_('Starting VM %s '), instance.name) self._set_vm_state(instance['name'], 'Enabled') - logging.info('Started VM %s ', instance.name) + logging.info(_('Started VM %s '), instance.name) except Exception as exn: - logging.error('spawn vm failed: %s', exn) + logging.error(_('spawn vm failed: %s'), exn) self.destroy(instance) def _create_vm(self, instance): @@ -163,9 +163,9 @@ class HyperVConnection(object): success = (ret_val == 0) if not success: - raise Exception('Failed to create VM %s', instance.name) + raise Exception(_('Failed to create VM %s'), instance.name) - logging.debug('Created VM %s...', instance.name) + logging.debug(_('Created VM %s...'), instance.name) vm = self._conn.Msvm_ComputerSystem(ElementName=instance.name)[0] vmsettings = vm.associators( @@ -182,7 +182,7 @@ class HyperVConnection(object): (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources( vm.path_(), [memsetting.GetText_(1)]) - logging.debug('Set memory for vm %s...', instance.name) + logging.debug(_('Set memory for vm %s...'), instance.name) procsetting = vmsetting.associators( wmi_result_class='Msvm_ProcessorSettingData')[0] vcpus = long(instance['vcpus']) @@ -192,11 +192,11 @@ class HyperVConnection(object): (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources( vm.path_(), [procsetting.GetText_(1)]) - logging.debug('Set vcpus for vm %s...', instance.name) + logging.debug(_('Set vcpus for vm %s...'), instance.name) def _create_disk(self, vm_name, vhdfile): """Create a disk and attach it to the vm""" - logging.debug("Creating disk for %s by attaching disk file %s", + logging.debug(_('Creating disk for %s by attaching disk file %s'), vm_name, vhdfile) #Find the IDE controller for the vm. vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name) @@ -221,10 +221,10 @@ class HyperVConnection(object): #Add the cloned disk drive object to the vm. new_resources = self._add_virt_resource(diskdrive, vm) if new_resources is None: - raise Exception('Failed to add diskdrive to VM %s', + raise Exception(_('Failed to add diskdrive to VM %s'), vm_name) diskdrive_path = new_resources[0] - logging.debug("New disk drive path is %s", diskdrive_path) + logging.debug(_('New disk drive path is %s'), diskdrive_path) #Find the default VHD disk object. vhddefault = self._conn.query( "SELECT * FROM Msvm_ResourceAllocationSettingData \ @@ -241,13 +241,13 @@ class HyperVConnection(object): #Add the new vhd object as a virtual hard disk to the vm. new_resources = self._add_virt_resource(vhddisk, vm) if new_resources is None: - raise Exception('Failed to add vhd file to VM %s', + raise Exception(_('Failed to add vhd file to VM %s'), vm_name) - logging.info("Created disk for %s ", vm_name) + logging.info(_('Created disk for %s'), vm_name) def _create_nic(self, vm_name, mac): """Create a (emulated) nic and attach it to the vm""" - logging.debug("Creating nic for %s ", vm_name) + logging.debug(_('Creating nic for %s '), vm_name) #Find the vswitch that is connected to the physical nic. vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name) extswitch = self._find_external_network() @@ -266,10 +266,10 @@ class HyperVConnection(object): (new_port, ret_val) = switch_svc.CreateSwitchPort(vm_name, vm_name, "", extswitch.path_()) if ret_val != 0: - logging.error("Failed creating a new port on the external vswitch") - raise Exception('Failed creating port for %s', + logging.error(_('Failed creating a port on the external vswitch')) + raise Exception(_('Failed creating port for %s'), vm_name) - logging.debug("Created switch port %s on switch %s", + logging.debug(_("Created switch port %s on switch %s"), vm_name, extswitch.path_()) #Connect the new nic to the new port. new_nic_data.Connection = [new_port] @@ -279,9 +279,9 @@ class HyperVConnection(object): #Add the new nic to the vm. new_resources = self._add_virt_resource(new_nic_data, vm) if new_resources is None: - raise Exception('Failed to add nic to VM %s', + raise Exception(_('Failed to add nic to VM %s'), vm_name) - logging.info("Created nic for %s ", vm_name) + logging.info(_("Created nic for %s "), vm_name) def _add_virt_resource(self, res_setting_data, target_vm): """Add a new resource (disk/nic) to the VM""" @@ -314,9 +314,9 @@ class HyperVConnection(object): time.sleep(0.1) job = self._conn.Msvm_ConcreteJob(InstanceID=inst_id)[0] if job.JobState != WMI_JOB_STATE_COMPLETED: - logging.debug("WMI job failed: %s", job.ErrorSummaryDescription) + logging.debug(_("WMI job failed: %s"), job.ErrorSummaryDescription) return False - logging.debug("WMI job succeeded: %s, Elapsed=%s ", job.Description, + logging.debug(_("WMI job succeeded: %s, Elapsed=%s "), job.Description, job.ElapsedTime) return True @@ -352,7 +352,7 @@ class HyperVConnection(object): def destroy(self, instance): """Destroy the VM. Also destroy the associated VHD disk files""" - logging.debug("Got request to destroy vm %s", instance.name) + logging.debug(_("Got request to destroy vm %s"), instance.name) vm = self._lookup(instance.name) if vm is None: return @@ -377,13 +377,13 @@ class HyperVConnection(object): elif ret_val == 0: success = True if not success: - raise Exception('Failed to destroy vm %s' % instance.name) + raise Exception(_('Failed to destroy vm %s') % instance.name) #Delete associated vhd disk files. for disk in diskfiles: vhdfile = self._cim_conn.CIM_DataFile(Name=disk) for vf in vhdfile: vf.Delete() - logging.debug("Deleted disk %s vm %s", vhdfile, instance.name) + logging.debug(_("Del: disk %s vm %s"), vhdfile, instance.name) def get_info(self, instance_id): """Get information about the VM""" @@ -399,8 +399,8 @@ class HyperVConnection(object): summary_info = vs_man_svc.GetSummaryInformation( [4, 100, 103, 105], settings_paths)[1] info = summary_info[0] - logging.debug("Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, \ - cpu_time=%s", instance_id, + logging.debug(_("Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, \ + cpu_time=%s"), instance_id, str(HYPERV_POWER_STATE[info.EnabledState]), str(info.MemoryUsage), str(info.NumberOfProcessors), @@ -418,7 +418,7 @@ class HyperVConnection(object): if n == 0: return None elif n > 1: - raise Exception('duplicate name found: %s' % i) + raise Exception(_('duplicate name found: %s') % i) else: return vms[0].ElementName @@ -438,12 +438,12 @@ class HyperVConnection(object): #already in the state requested success = True if success: - logging.info("Successfully changed vm state of %s to %s", + logging.info(_("Successfully changed vm state of %s to %s"), vm_name, req_state) else: - logging.error("Failed to change vm state of %s to %s", + logging.error(_("Failed to change vm state of %s to %s"), vm_name, req_state) - raise Exception("Failed to change vm state of %s to %s", + raise Exception(_("Failed to change vm state of %s to %s"), vm_name, req_state) def attach_volume(self, instance_name, device_path, mountpoint): diff --git a/nova/virt/images.py b/nova/virt/images.py index 41719753866c..be162b5b1317 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -65,7 +65,7 @@ def _fetch_image_no_curl(url, path, headers): urlopened = urllib2.urlopen(request) urlretrieve(urlopened, path) - logging.debug("Finished retreving %s -- placed in %s", url, path) + logging.debug(_("Finished retreving %s -- placed in %s"), url, path) def _fetch_s3_image(image, path, user, project): From 46a249eaa1db7d0f5b765cff701bb13005e3db49 Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Tue, 4 Jan 2011 16:06:03 -0800 Subject: [PATCH 098/147] Revert some unneeded formatting since twistd is no longer used --- nova/twistd.py | 6 ++---- nova/virt/images.py | 1 + 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/nova/twistd.py b/nova/twistd.py index 1dd10dbb592f..29be9c4e1457 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -240,16 +240,14 @@ def serve(filename): if not FLAGS.pidfile: FLAGS.pidfile = '%s.pid' % name elif FLAGS.pidfile.endswith('twistd.pid'): - FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', - '%s.pid' % name) + FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', '%s.pid' % name) # NOTE(vish): if we're running nodaemon, redirect the log to stdout if FLAGS.nodaemon and not FLAGS.logfile: FLAGS.logfile = "-" if not FLAGS.logfile: FLAGS.logfile = '%s.log' % name elif FLAGS.logfile.endswith('twistd.log'): - FLAGS.logfile = FLAGS.logfile.replace('twistd.log', - '%s.log' % name) + FLAGS.logfile = FLAGS.logfile.replace('twistd.log', '%s.log' % name) if FLAGS.logdir: FLAGS.logfile = os.path.join(FLAGS.logdir, FLAGS.logfile) if not FLAGS.prefix: diff --git a/nova/virt/images.py b/nova/virt/images.py index be162b5b1317..2d03da4b4fed 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -70,6 +70,7 @@ def _fetch_image_no_curl(url, path, headers): def _fetch_s3_image(image, path, user, project): url = image_url(image) + # This should probably move somewhere else, like e.g. a download_as # method on User objects and at the same time get rewritten to use # a web client. From 7924b211f23dcd687612b32341e2be0b57fd386e Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Tue, 4 Jan 2011 16:20:14 -0800 Subject: [PATCH 099/147] Redis dependency no longer needed --- nova/tests/hyperv_unittest.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/nova/tests/hyperv_unittest.py b/nova/tests/hyperv_unittest.py index 6346ce4c75f3..27a41f19eaa2 100644 --- a/nova/tests/hyperv_unittest.py +++ b/nova/tests/hyperv_unittest.py @@ -27,10 +27,6 @@ from nova.virt import hyperv FLAGS = flags.FLAGS FLAGS.connection_type = 'hyperv' -# Redis is probably not running on Hyper-V host. -# Change this to the actual Redis host -FLAGS.redis_host = '127.0.0.1' - class HyperVTestCase(test.TrialTestCase): """Test cases for the Hyper-V driver""" From f7543cdf973f4ddb5718255e9671530fc98fc756 Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Tue, 4 Jan 2011 16:21:27 -0800 Subject: [PATCH 100/147] need one more newline --- nova/tests/hyperv_unittest.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/tests/hyperv_unittest.py b/nova/tests/hyperv_unittest.py index 27a41f19eaa2..7044db43c2f9 100644 --- a/nova/tests/hyperv_unittest.py +++ b/nova/tests/hyperv_unittest.py @@ -28,6 +28,7 @@ from nova.virt import hyperv FLAGS = flags.FLAGS FLAGS.connection_type = 'hyperv' + class HyperVTestCase(test.TrialTestCase): """Test cases for the Hyper-V driver""" def setUp(self): # pylint: disable-msg=C0103 From 472af7e750f369e3b999d2b1ac48f74369975ba6 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 4 Jan 2011 18:34:47 -0600 Subject: [PATCH 101/147] Recover from a lost data store connection --- nova/service.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/nova/service.py b/nova/service.py index f1f90742f845..7203430c6f82 100644 --- a/nova/service.py +++ b/nova/service.py @@ -24,17 +24,21 @@ import inspect import logging import os import sys +import time from eventlet import event from eventlet import greenthread from eventlet import greenpool +from sqlalchemy.exc import OperationalError + from nova import context from nova import db from nova import exception from nova import flags from nova import rpc from nova import utils +from nova.db.sqlalchemy import models FLAGS = flags.FLAGS @@ -204,6 +208,14 @@ class Service(object): self.model_disconnected = True logging.exception(_("model server went away")) + try: + models.register_models() + except OperationalError: + logging.exception(_("Data store is unreachable." + " Trying again in %d seconds.") % + FLAGS.sql_retry_interval) + time.sleep(FLAGS.sql_retry_interval) + def serve(*services): argv = FLAGS(sys.argv) From aab31f797b41feb5b9b8856dd2df4b46435ccdbc Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Wed, 5 Jan 2011 00:59:39 -0500 Subject: [PATCH 102/147] Silence eventlet.wsgi.server so it doesn't go to stdout and pollute our logs. --- nova/wsgi.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/nova/wsgi.py b/nova/wsgi.py index 9c333d3abeb9..f85503149d83 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -36,9 +36,10 @@ import webob.exc from nova import log as logging -# TODO(todd): should this just piggyback the handler for root logger -# since we usually log to syslog, but changes if not daemonzied? -logging.getLogger("routes.middleware").addHandler(logging.StreamHandler()) + +class NullWsgiLogger(object): + def write(*args): + pass class Server(object): @@ -63,7 +64,8 @@ class Server(object): def _run(self, application, socket): """Start a WSGI server in a new green thread.""" - eventlet.wsgi.server(socket, application, custom_pool=self.pool) + eventlet.wsgi.server(socket, application, custom_pool=self.pool, + log=NullWsgiLogger()) class Application(object): From ada65e007e4511f63efcbfa94e51d2d41af1d315 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Wed, 5 Jan 2011 01:54:31 -0500 Subject: [PATCH 103/147] Better method for eventlet.wsgi.server logging. --- nova/log.py | 2 +- nova/wsgi.py | 15 +++++++++++---- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/nova/log.py b/nova/log.py index 88a961e13c78..97472b20bdfc 100644 --- a/nova/log.py +++ b/nova/log.py @@ -67,7 +67,7 @@ flags.DEFINE_string('logging_exception_prefix', flags.DEFINE_list('default_log_levels', ['amqplib=WARN', 'sqlalchemy=WARN', - 'audit=INFO'], + 'eventlet.wsgi.server=WARN'], 'list of logger=LEVEL pairs') flags.DEFINE_bool('use_syslog', False, 'output to syslog') diff --git a/nova/wsgi.py b/nova/wsgi.py index f85503149d83..b48747be41ab 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -37,9 +37,15 @@ import webob.exc from nova import log as logging -class NullWsgiLogger(object): - def write(*args): - pass +class WritableLogger(object): + """A thin wrapper that responds to `write` and logs.""" + + def __init__(self, logger, level=logging.DEBUG): + self.logger = logger + self.level = level + + def write(self, msg): + self.logger.log(self.level, msg) class Server(object): @@ -64,8 +70,9 @@ class Server(object): def _run(self, application, socket): """Start a WSGI server in a new green thread.""" + logger = logging.getLogger('eventlet.wsgi.server') eventlet.wsgi.server(socket, application, custom_pool=self.pool, - log=NullWsgiLogger()) + log=WritableLogger(logger)) class Application(object): From 5912176111ccb37d3d26dd7b415de83e544d9c54 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Wed, 5 Jan 2011 02:56:02 -0500 Subject: [PATCH 104/147] Remove stale doc files. --- doc/.autogenerated | 97 ------------------ doc/source/api/autoindex.rst | 99 ------------------- doc/source/api/nova..adminclient.rst | 6 -- doc/source/api/nova..api.cloud.rst | 6 -- doc/source/api/nova..api.ec2.admin.rst | 6 -- doc/source/api/nova..api.ec2.apirequest.rst | 6 -- doc/source/api/nova..api.ec2.cloud.rst | 6 -- doc/source/api/nova..api.ec2.images.rst | 6 -- .../nova..api.ec2.metadatarequesthandler.rst | 6 -- doc/source/api/nova..api.openstack.auth.rst | 6 -- .../nova..api.openstack.backup_schedules.rst | 6 -- doc/source/api/nova..api.openstack.faults.rst | 6 -- .../api/nova..api.openstack.flavors.rst | 6 -- doc/source/api/nova..api.openstack.images.rst | 6 -- .../api/nova..api.openstack.servers.rst | 6 -- .../nova..api.openstack.sharedipgroups.rst | 6 -- doc/source/api/nova..auth.dbdriver.rst | 6 -- doc/source/api/nova..auth.fakeldap.rst | 6 -- doc/source/api/nova..auth.ldapdriver.rst | 6 -- doc/source/api/nova..auth.manager.rst | 6 -- doc/source/api/nova..auth.signer.rst | 6 -- doc/source/api/nova..cloudpipe.pipelib.rst | 6 -- doc/source/api/nova..compute.disk.rst | 6 -- .../api/nova..compute.instance_types.rst | 6 -- doc/source/api/nova..compute.manager.rst | 6 -- doc/source/api/nova..compute.monitor.rst | 6 -- doc/source/api/nova..compute.power_state.rst | 6 -- doc/source/api/nova..context.rst | 6 -- doc/source/api/nova..crypto.rst | 6 -- doc/source/api/nova..db.api.rst | 6 -- doc/source/api/nova..db.sqlalchemy.api.rst | 6 -- doc/source/api/nova..db.sqlalchemy.models.rst | 6 -- .../api/nova..db.sqlalchemy.session.rst | 6 -- doc/source/api/nova..exception.rst | 6 -- doc/source/api/nova..fakerabbit.rst | 6 -- doc/source/api/nova..flags.rst | 6 -- doc/source/api/nova..image.service.rst | 6 -- doc/source/api/nova..manager.rst | 6 -- doc/source/api/nova..network.linux_net.rst | 6 -- doc/source/api/nova..network.manager.rst | 6 -- doc/source/api/nova..objectstore.bucket.rst | 6 -- doc/source/api/nova..objectstore.handler.rst | 6 -- doc/source/api/nova..objectstore.image.rst | 6 -- doc/source/api/nova..objectstore.stored.rst | 6 -- doc/source/api/nova..process.rst | 6 -- doc/source/api/nova..quota.rst | 6 -- doc/source/api/nova..rpc.rst | 6 -- doc/source/api/nova..scheduler.chance.rst | 6 -- doc/source/api/nova..scheduler.driver.rst | 6 -- doc/source/api/nova..scheduler.manager.rst | 6 -- doc/source/api/nova..scheduler.simple.rst | 6 -- doc/source/api/nova..server.rst | 6 -- doc/source/api/nova..service.rst | 6 -- doc/source/api/nova..test.rst | 6 -- .../api/nova..tests.access_unittest.rst | 6 -- doc/source/api/nova..tests.api.fakes.rst | 6 -- .../api/nova..tests.api.openstack.fakes.rst | 6 -- .../nova..tests.api.openstack.test_api.rst | 6 -- .../nova..tests.api.openstack.test_auth.rst | 6 -- .../nova..tests.api.openstack.test_faults.rst | 6 -- ...nova..tests.api.openstack.test_flavors.rst | 6 -- .../nova..tests.api.openstack.test_images.rst | 6 -- ....tests.api.openstack.test_ratelimiting.rst | 6 -- ...nova..tests.api.openstack.test_servers.rst | 6 -- ...ests.api.openstack.test_sharedipgroups.rst | 6 -- doc/source/api/nova..tests.api.test_wsgi.rst | 6 -- .../api/nova..tests.api_integration.rst | 6 -- doc/source/api/nova..tests.api_unittest.rst | 6 -- doc/source/api/nova..tests.auth_unittest.rst | 6 -- doc/source/api/nova..tests.cloud_unittest.rst | 6 -- .../api/nova..tests.compute_unittest.rst | 6 -- doc/source/api/nova..tests.declare_flags.rst | 6 -- doc/source/api/nova..tests.fake_flags.rst | 6 -- doc/source/api/nova..tests.flags_unittest.rst | 6 -- .../api/nova..tests.network_unittest.rst | 6 -- .../api/nova..tests.objectstore_unittest.rst | 6 -- .../api/nova..tests.process_unittest.rst | 6 -- doc/source/api/nova..tests.quota_unittest.rst | 6 -- doc/source/api/nova..tests.real_flags.rst | 6 -- doc/source/api/nova..tests.rpc_unittest.rst | 6 -- doc/source/api/nova..tests.runtime_flags.rst | 6 -- .../api/nova..tests.scheduler_unittest.rst | 6 -- .../api/nova..tests.service_unittest.rst | 6 -- .../api/nova..tests.twistd_unittest.rst | 6 -- .../api/nova..tests.validator_unittest.rst | 6 -- doc/source/api/nova..tests.virt_unittest.rst | 6 -- .../api/nova..tests.volume_unittest.rst | 6 -- doc/source/api/nova..twistd.rst | 6 -- doc/source/api/nova..utils.rst | 6 -- doc/source/api/nova..validate.rst | 6 -- doc/source/api/nova..virt.connection.rst | 6 -- doc/source/api/nova..virt.fake.rst | 6 -- doc/source/api/nova..virt.images.rst | 6 -- doc/source/api/nova..virt.libvirt_conn.rst | 6 -- doc/source/api/nova..virt.xenapi.rst | 6 -- doc/source/api/nova..volume.driver.rst | 6 -- doc/source/api/nova..volume.manager.rst | 6 -- doc/source/api/nova..wsgi.rst | 6 -- 98 files changed, 772 deletions(-) delete mode 100644 doc/.autogenerated delete mode 100644 doc/source/api/autoindex.rst delete mode 100644 doc/source/api/nova..adminclient.rst delete mode 100644 doc/source/api/nova..api.cloud.rst delete mode 100644 doc/source/api/nova..api.ec2.admin.rst delete mode 100644 doc/source/api/nova..api.ec2.apirequest.rst delete mode 100644 doc/source/api/nova..api.ec2.cloud.rst delete mode 100644 doc/source/api/nova..api.ec2.images.rst delete mode 100644 doc/source/api/nova..api.ec2.metadatarequesthandler.rst delete mode 100644 doc/source/api/nova..api.openstack.auth.rst delete mode 100644 doc/source/api/nova..api.openstack.backup_schedules.rst delete mode 100644 doc/source/api/nova..api.openstack.faults.rst delete mode 100644 doc/source/api/nova..api.openstack.flavors.rst delete mode 100644 doc/source/api/nova..api.openstack.images.rst delete mode 100644 doc/source/api/nova..api.openstack.servers.rst delete mode 100644 doc/source/api/nova..api.openstack.sharedipgroups.rst delete mode 100644 doc/source/api/nova..auth.dbdriver.rst delete mode 100644 doc/source/api/nova..auth.fakeldap.rst delete mode 100644 doc/source/api/nova..auth.ldapdriver.rst delete mode 100644 doc/source/api/nova..auth.manager.rst delete mode 100644 doc/source/api/nova..auth.signer.rst delete mode 100644 doc/source/api/nova..cloudpipe.pipelib.rst delete mode 100644 doc/source/api/nova..compute.disk.rst delete mode 100644 doc/source/api/nova..compute.instance_types.rst delete mode 100644 doc/source/api/nova..compute.manager.rst delete mode 100644 doc/source/api/nova..compute.monitor.rst delete mode 100644 doc/source/api/nova..compute.power_state.rst delete mode 100644 doc/source/api/nova..context.rst delete mode 100644 doc/source/api/nova..crypto.rst delete mode 100644 doc/source/api/nova..db.api.rst delete mode 100644 doc/source/api/nova..db.sqlalchemy.api.rst delete mode 100644 doc/source/api/nova..db.sqlalchemy.models.rst delete mode 100644 doc/source/api/nova..db.sqlalchemy.session.rst delete mode 100644 doc/source/api/nova..exception.rst delete mode 100644 doc/source/api/nova..fakerabbit.rst delete mode 100644 doc/source/api/nova..flags.rst delete mode 100644 doc/source/api/nova..image.service.rst delete mode 100644 doc/source/api/nova..manager.rst delete mode 100644 doc/source/api/nova..network.linux_net.rst delete mode 100644 doc/source/api/nova..network.manager.rst delete mode 100644 doc/source/api/nova..objectstore.bucket.rst delete mode 100644 doc/source/api/nova..objectstore.handler.rst delete mode 100644 doc/source/api/nova..objectstore.image.rst delete mode 100644 doc/source/api/nova..objectstore.stored.rst delete mode 100644 doc/source/api/nova..process.rst delete mode 100644 doc/source/api/nova..quota.rst delete mode 100644 doc/source/api/nova..rpc.rst delete mode 100644 doc/source/api/nova..scheduler.chance.rst delete mode 100644 doc/source/api/nova..scheduler.driver.rst delete mode 100644 doc/source/api/nova..scheduler.manager.rst delete mode 100644 doc/source/api/nova..scheduler.simple.rst delete mode 100644 doc/source/api/nova..server.rst delete mode 100644 doc/source/api/nova..service.rst delete mode 100644 doc/source/api/nova..test.rst delete mode 100644 doc/source/api/nova..tests.access_unittest.rst delete mode 100644 doc/source/api/nova..tests.api.fakes.rst delete mode 100644 doc/source/api/nova..tests.api.openstack.fakes.rst delete mode 100644 doc/source/api/nova..tests.api.openstack.test_api.rst delete mode 100644 doc/source/api/nova..tests.api.openstack.test_auth.rst delete mode 100644 doc/source/api/nova..tests.api.openstack.test_faults.rst delete mode 100644 doc/source/api/nova..tests.api.openstack.test_flavors.rst delete mode 100644 doc/source/api/nova..tests.api.openstack.test_images.rst delete mode 100644 doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst delete mode 100644 doc/source/api/nova..tests.api.openstack.test_servers.rst delete mode 100644 doc/source/api/nova..tests.api.openstack.test_sharedipgroups.rst delete mode 100644 doc/source/api/nova..tests.api.test_wsgi.rst delete mode 100644 doc/source/api/nova..tests.api_integration.rst delete mode 100644 doc/source/api/nova..tests.api_unittest.rst delete mode 100644 doc/source/api/nova..tests.auth_unittest.rst delete mode 100644 doc/source/api/nova..tests.cloud_unittest.rst delete mode 100644 doc/source/api/nova..tests.compute_unittest.rst delete mode 100644 doc/source/api/nova..tests.declare_flags.rst delete mode 100644 doc/source/api/nova..tests.fake_flags.rst delete mode 100644 doc/source/api/nova..tests.flags_unittest.rst delete mode 100644 doc/source/api/nova..tests.network_unittest.rst delete mode 100644 doc/source/api/nova..tests.objectstore_unittest.rst delete mode 100644 doc/source/api/nova..tests.process_unittest.rst delete mode 100644 doc/source/api/nova..tests.quota_unittest.rst delete mode 100644 doc/source/api/nova..tests.real_flags.rst delete mode 100644 doc/source/api/nova..tests.rpc_unittest.rst delete mode 100644 doc/source/api/nova..tests.runtime_flags.rst delete mode 100644 doc/source/api/nova..tests.scheduler_unittest.rst delete mode 100644 doc/source/api/nova..tests.service_unittest.rst delete mode 100644 doc/source/api/nova..tests.twistd_unittest.rst delete mode 100644 doc/source/api/nova..tests.validator_unittest.rst delete mode 100644 doc/source/api/nova..tests.virt_unittest.rst delete mode 100644 doc/source/api/nova..tests.volume_unittest.rst delete mode 100644 doc/source/api/nova..twistd.rst delete mode 100644 doc/source/api/nova..utils.rst delete mode 100644 doc/source/api/nova..validate.rst delete mode 100644 doc/source/api/nova..virt.connection.rst delete mode 100644 doc/source/api/nova..virt.fake.rst delete mode 100644 doc/source/api/nova..virt.images.rst delete mode 100644 doc/source/api/nova..virt.libvirt_conn.rst delete mode 100644 doc/source/api/nova..virt.xenapi.rst delete mode 100644 doc/source/api/nova..volume.driver.rst delete mode 100644 doc/source/api/nova..volume.manager.rst delete mode 100644 doc/source/api/nova..wsgi.rst diff --git a/doc/.autogenerated b/doc/.autogenerated deleted file mode 100644 index 3a70f87808d1..000000000000 --- a/doc/.autogenerated +++ /dev/null @@ -1,97 +0,0 @@ -source/api/nova..adminclient.rst -source/api/nova..api.cloud.rst -source/api/nova..api.ec2.admin.rst -source/api/nova..api.ec2.apirequest.rst -source/api/nova..api.ec2.cloud.rst -source/api/nova..api.ec2.images.rst -source/api/nova..api.ec2.metadatarequesthandler.rst -source/api/nova..api.openstack.auth.rst -source/api/nova..api.openstack.backup_schedules.rst -source/api/nova..api.openstack.faults.rst -source/api/nova..api.openstack.flavors.rst -source/api/nova..api.openstack.images.rst -source/api/nova..api.openstack.servers.rst -source/api/nova..api.openstack.sharedipgroups.rst -source/api/nova..auth.dbdriver.rst -source/api/nova..auth.fakeldap.rst -source/api/nova..auth.ldapdriver.rst -source/api/nova..auth.manager.rst -source/api/nova..auth.signer.rst -source/api/nova..cloudpipe.pipelib.rst -source/api/nova..compute.disk.rst -source/api/nova..compute.instance_types.rst -source/api/nova..compute.manager.rst -source/api/nova..compute.monitor.rst -source/api/nova..compute.power_state.rst -source/api/nova..context.rst -source/api/nova..crypto.rst -source/api/nova..db.api.rst -source/api/nova..db.sqlalchemy.api.rst -source/api/nova..db.sqlalchemy.models.rst -source/api/nova..db.sqlalchemy.session.rst -source/api/nova..exception.rst -source/api/nova..fakerabbit.rst -source/api/nova..flags.rst -source/api/nova..image.service.rst -source/api/nova..manager.rst -source/api/nova..network.linux_net.rst -source/api/nova..network.manager.rst -source/api/nova..objectstore.bucket.rst -source/api/nova..objectstore.handler.rst -source/api/nova..objectstore.image.rst -source/api/nova..objectstore.stored.rst -source/api/nova..process.rst -source/api/nova..quota.rst -source/api/nova..rpc.rst -source/api/nova..scheduler.chance.rst -source/api/nova..scheduler.driver.rst -source/api/nova..scheduler.manager.rst -source/api/nova..scheduler.simple.rst -source/api/nova..server.rst -source/api/nova..service.rst -source/api/nova..test.rst -source/api/nova..tests.access_unittest.rst -source/api/nova..tests.api.fakes.rst -source/api/nova..tests.api.openstack.fakes.rst -source/api/nova..tests.api.openstack.test_api.rst -source/api/nova..tests.api.openstack.test_auth.rst -source/api/nova..tests.api.openstack.test_faults.rst -source/api/nova..tests.api.openstack.test_flavors.rst -source/api/nova..tests.api.openstack.test_images.rst -source/api/nova..tests.api.openstack.test_ratelimiting.rst -source/api/nova..tests.api.openstack.test_servers.rst -source/api/nova..tests.api.openstack.test_sharedipgroups.rst -source/api/nova..tests.api.test_wsgi.rst -source/api/nova..tests.api_integration.rst -source/api/nova..tests.api_unittest.rst -source/api/nova..tests.auth_unittest.rst -source/api/nova..tests.cloud_unittest.rst -source/api/nova..tests.compute_unittest.rst -source/api/nova..tests.declare_flags.rst -source/api/nova..tests.fake_flags.rst -source/api/nova..tests.flags_unittest.rst -source/api/nova..tests.network_unittest.rst -source/api/nova..tests.objectstore_unittest.rst -source/api/nova..tests.process_unittest.rst -source/api/nova..tests.quota_unittest.rst -source/api/nova..tests.real_flags.rst -source/api/nova..tests.rpc_unittest.rst -source/api/nova..tests.runtime_flags.rst -source/api/nova..tests.scheduler_unittest.rst -source/api/nova..tests.service_unittest.rst -source/api/nova..tests.twistd_unittest.rst -source/api/nova..tests.validator_unittest.rst -source/api/nova..tests.virt_unittest.rst -source/api/nova..tests.volume_unittest.rst -source/api/nova..twistd.rst -source/api/nova..utils.rst -source/api/nova..validate.rst -source/api/nova..virt.connection.rst -source/api/nova..virt.fake.rst -source/api/nova..virt.images.rst -source/api/nova..virt.libvirt_conn.rst -source/api/nova..virt.xenapi.rst -source/api/nova..volume.driver.rst -source/api/nova..volume.manager.rst -source/api/nova..wsgi.rst -source/api/autoindex.rst diff --git a/doc/source/api/autoindex.rst b/doc/source/api/autoindex.rst deleted file mode 100644 index 6265b082be52..000000000000 --- a/doc/source/api/autoindex.rst +++ /dev/null @@ -1,99 +0,0 @@ -.. toctree:: - :maxdepth: 1 - - nova..adminclient.rst - nova..api.cloud.rst - nova..api.ec2.admin.rst - nova..api.ec2.apirequest.rst - nova..api.ec2.cloud.rst - nova..api.ec2.images.rst - nova..api.ec2.metadatarequesthandler.rst - nova..api.openstack.auth.rst - nova..api.openstack.backup_schedules.rst - nova..api.openstack.faults.rst - nova..api.openstack.flavors.rst - nova..api.openstack.images.rst - nova..api.openstack.servers.rst - nova..api.openstack.sharedipgroups.rst - nova..auth.dbdriver.rst - nova..auth.fakeldap.rst - nova..auth.ldapdriver.rst - nova..auth.manager.rst - nova..auth.signer.rst - nova..cloudpipe.pipelib.rst - nova..compute.disk.rst - nova..compute.instance_types.rst - nova..compute.manager.rst - nova..compute.monitor.rst - nova..compute.power_state.rst - nova..context.rst - nova..crypto.rst - nova..db.api.rst - nova..db.sqlalchemy.api.rst - nova..db.sqlalchemy.models.rst - nova..db.sqlalchemy.session.rst - nova..exception.rst - nova..fakerabbit.rst - nova..flags.rst - nova..image.service.rst - nova..manager.rst - nova..network.linux_net.rst - nova..network.manager.rst - nova..objectstore.bucket.rst - nova..objectstore.handler.rst - nova..objectstore.image.rst - nova..objectstore.stored.rst - nova..process.rst - nova..quota.rst - nova..rpc.rst - nova..scheduler.chance.rst - nova..scheduler.driver.rst - nova..scheduler.manager.rst - nova..scheduler.simple.rst - nova..server.rst - nova..service.rst - nova..test.rst - nova..tests.access_unittest.rst - nova..tests.api.fakes.rst - nova..tests.api.openstack.fakes.rst - nova..tests.api.openstack.test_api.rst - nova..tests.api.openstack.test_auth.rst - nova..tests.api.openstack.test_faults.rst - nova..tests.api.openstack.test_flavors.rst - nova..tests.api.openstack.test_images.rst - nova..tests.api.openstack.test_ratelimiting.rst - nova..tests.api.openstack.test_servers.rst - nova..tests.api.openstack.test_sharedipgroups.rst - nova..tests.api.test_wsgi.rst - nova..tests.api_integration.rst - nova..tests.api_unittest.rst - nova..tests.auth_unittest.rst - nova..tests.cloud_unittest.rst - nova..tests.compute_unittest.rst - nova..tests.declare_flags.rst - nova..tests.fake_flags.rst - nova..tests.flags_unittest.rst - nova..tests.network_unittest.rst - nova..tests.objectstore_unittest.rst - nova..tests.process_unittest.rst - nova..tests.quota_unittest.rst - nova..tests.real_flags.rst - nova..tests.rpc_unittest.rst - nova..tests.runtime_flags.rst - nova..tests.scheduler_unittest.rst - nova..tests.service_unittest.rst - nova..tests.twistd_unittest.rst - nova..tests.validator_unittest.rst - nova..tests.virt_unittest.rst - nova..tests.volume_unittest.rst - nova..twistd.rst - nova..utils.rst - nova..validate.rst - nova..virt.connection.rst - nova..virt.fake.rst - nova..virt.images.rst - nova..virt.libvirt_conn.rst - nova..virt.xenapi.rst - nova..volume.driver.rst - nova..volume.manager.rst - nova..wsgi.rst diff --git a/doc/source/api/nova..adminclient.rst b/doc/source/api/nova..adminclient.rst deleted file mode 100644 index 35fa839e1b1b..000000000000 --- a/doc/source/api/nova..adminclient.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..adminclient` Module -============================================================================== -.. automodule:: nova..adminclient - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.cloud.rst b/doc/source/api/nova..api.cloud.rst deleted file mode 100644 index 4138401858ea..000000000000 --- a/doc/source/api/nova..api.cloud.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.cloud` Module -============================================================================== -.. automodule:: nova..api.cloud - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.ec2.admin.rst b/doc/source/api/nova..api.ec2.admin.rst deleted file mode 100644 index 4e9ab308b4dd..000000000000 --- a/doc/source/api/nova..api.ec2.admin.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.ec2.admin` Module -============================================================================== -.. automodule:: nova..api.ec2.admin - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.ec2.apirequest.rst b/doc/source/api/nova..api.ec2.apirequest.rst deleted file mode 100644 index c17a2ff3ad29..000000000000 --- a/doc/source/api/nova..api.ec2.apirequest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.ec2.apirequest` Module -============================================================================== -.. automodule:: nova..api.ec2.apirequest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.ec2.cloud.rst b/doc/source/api/nova..api.ec2.cloud.rst deleted file mode 100644 index f6145c217f8c..000000000000 --- a/doc/source/api/nova..api.ec2.cloud.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.ec2.cloud` Module -============================================================================== -.. automodule:: nova..api.ec2.cloud - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.ec2.images.rst b/doc/source/api/nova..api.ec2.images.rst deleted file mode 100644 index 012d800e47fb..000000000000 --- a/doc/source/api/nova..api.ec2.images.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.ec2.images` Module -============================================================================== -.. automodule:: nova..api.ec2.images - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.ec2.metadatarequesthandler.rst b/doc/source/api/nova..api.ec2.metadatarequesthandler.rst deleted file mode 100644 index 75f5169e5f27..000000000000 --- a/doc/source/api/nova..api.ec2.metadatarequesthandler.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.ec2.metadatarequesthandler` Module -============================================================================== -.. automodule:: nova..api.ec2.metadatarequesthandler - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.openstack.auth.rst b/doc/source/api/nova..api.openstack.auth.rst deleted file mode 100644 index 8c3f8f2da626..000000000000 --- a/doc/source/api/nova..api.openstack.auth.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.openstack.auth` Module -============================================================================== -.. automodule:: nova..api.openstack.auth - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.openstack.backup_schedules.rst b/doc/source/api/nova..api.openstack.backup_schedules.rst deleted file mode 100644 index 6b406f12db4b..000000000000 --- a/doc/source/api/nova..api.openstack.backup_schedules.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.openstack.backup_schedules` Module -============================================================================== -.. automodule:: nova..api.openstack.backup_schedules - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.openstack.faults.rst b/doc/source/api/nova..api.openstack.faults.rst deleted file mode 100644 index 7b25561f74b7..000000000000 --- a/doc/source/api/nova..api.openstack.faults.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.openstack.faults` Module -============================================================================== -.. automodule:: nova..api.openstack.faults - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.openstack.flavors.rst b/doc/source/api/nova..api.openstack.flavors.rst deleted file mode 100644 index 0deb724deba1..000000000000 --- a/doc/source/api/nova..api.openstack.flavors.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.openstack.flavors` Module -============================================================================== -.. automodule:: nova..api.openstack.flavors - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.openstack.images.rst b/doc/source/api/nova..api.openstack.images.rst deleted file mode 100644 index 82bd5f1e86a3..000000000000 --- a/doc/source/api/nova..api.openstack.images.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.openstack.images` Module -============================================================================== -.. automodule:: nova..api.openstack.images - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.openstack.servers.rst b/doc/source/api/nova..api.openstack.servers.rst deleted file mode 100644 index c36856ea2bc5..000000000000 --- a/doc/source/api/nova..api.openstack.servers.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.openstack.servers` Module -============================================================================== -.. automodule:: nova..api.openstack.servers - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..api.openstack.sharedipgroups.rst b/doc/source/api/nova..api.openstack.sharedipgroups.rst deleted file mode 100644 index 07632acc8e49..000000000000 --- a/doc/source/api/nova..api.openstack.sharedipgroups.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..api.openstack.sharedipgroups` Module -============================================================================== -.. automodule:: nova..api.openstack.sharedipgroups - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..auth.dbdriver.rst b/doc/source/api/nova..auth.dbdriver.rst deleted file mode 100644 index 7de68b6e08bc..000000000000 --- a/doc/source/api/nova..auth.dbdriver.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..auth.dbdriver` Module -============================================================================== -.. automodule:: nova..auth.dbdriver - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..auth.fakeldap.rst b/doc/source/api/nova..auth.fakeldap.rst deleted file mode 100644 index ca8a3ad4d560..000000000000 --- a/doc/source/api/nova..auth.fakeldap.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..auth.fakeldap` Module -============================================================================== -.. automodule:: nova..auth.fakeldap - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..auth.ldapdriver.rst b/doc/source/api/nova..auth.ldapdriver.rst deleted file mode 100644 index c444635228f8..000000000000 --- a/doc/source/api/nova..auth.ldapdriver.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..auth.ldapdriver` Module -============================================================================== -.. automodule:: nova..auth.ldapdriver - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..auth.manager.rst b/doc/source/api/nova..auth.manager.rst deleted file mode 100644 index bc5ce2ec31e8..000000000000 --- a/doc/source/api/nova..auth.manager.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..auth.manager` Module -============================================================================== -.. automodule:: nova..auth.manager - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..auth.signer.rst b/doc/source/api/nova..auth.signer.rst deleted file mode 100644 index aad824eada39..000000000000 --- a/doc/source/api/nova..auth.signer.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..auth.signer` Module -============================================================================== -.. automodule:: nova..auth.signer - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..cloudpipe.pipelib.rst b/doc/source/api/nova..cloudpipe.pipelib.rst deleted file mode 100644 index 054aaf484fe2..000000000000 --- a/doc/source/api/nova..cloudpipe.pipelib.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..cloudpipe.pipelib` Module -============================================================================== -.. automodule:: nova..cloudpipe.pipelib - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..compute.disk.rst b/doc/source/api/nova..compute.disk.rst deleted file mode 100644 index 6410af6f3899..000000000000 --- a/doc/source/api/nova..compute.disk.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..compute.disk` Module -============================================================================== -.. automodule:: nova..compute.disk - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..compute.instance_types.rst b/doc/source/api/nova..compute.instance_types.rst deleted file mode 100644 index d206ff3a4a1f..000000000000 --- a/doc/source/api/nova..compute.instance_types.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..compute.instance_types` Module -============================================================================== -.. automodule:: nova..compute.instance_types - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..compute.manager.rst b/doc/source/api/nova..compute.manager.rst deleted file mode 100644 index 33a337c39843..000000000000 --- a/doc/source/api/nova..compute.manager.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..compute.manager` Module -============================================================================== -.. automodule:: nova..compute.manager - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..compute.monitor.rst b/doc/source/api/nova..compute.monitor.rst deleted file mode 100644 index a91169ecd309..000000000000 --- a/doc/source/api/nova..compute.monitor.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..compute.monitor` Module -============================================================================== -.. automodule:: nova..compute.monitor - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..compute.power_state.rst b/doc/source/api/nova..compute.power_state.rst deleted file mode 100644 index 41b1080e5a0e..000000000000 --- a/doc/source/api/nova..compute.power_state.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..compute.power_state` Module -============================================================================== -.. automodule:: nova..compute.power_state - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..context.rst b/doc/source/api/nova..context.rst deleted file mode 100644 index 9de1adb24827..000000000000 --- a/doc/source/api/nova..context.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..context` Module -============================================================================== -.. automodule:: nova..context - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..crypto.rst b/doc/source/api/nova..crypto.rst deleted file mode 100644 index af9f636344ab..000000000000 --- a/doc/source/api/nova..crypto.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..crypto` Module -============================================================================== -.. automodule:: nova..crypto - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..db.api.rst b/doc/source/api/nova..db.api.rst deleted file mode 100644 index 6d998fbb2f90..000000000000 --- a/doc/source/api/nova..db.api.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..db.api` Module -============================================================================== -.. automodule:: nova..db.api - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..db.sqlalchemy.api.rst b/doc/source/api/nova..db.sqlalchemy.api.rst deleted file mode 100644 index 76d0c1bd376f..000000000000 --- a/doc/source/api/nova..db.sqlalchemy.api.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..db.sqlalchemy.api` Module -============================================================================== -.. automodule:: nova..db.sqlalchemy.api - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..db.sqlalchemy.models.rst b/doc/source/api/nova..db.sqlalchemy.models.rst deleted file mode 100644 index 9c795d7f51f5..000000000000 --- a/doc/source/api/nova..db.sqlalchemy.models.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..db.sqlalchemy.models` Module -============================================================================== -.. automodule:: nova..db.sqlalchemy.models - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..db.sqlalchemy.session.rst b/doc/source/api/nova..db.sqlalchemy.session.rst deleted file mode 100644 index cbfd6416a888..000000000000 --- a/doc/source/api/nova..db.sqlalchemy.session.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..db.sqlalchemy.session` Module -============================================================================== -.. automodule:: nova..db.sqlalchemy.session - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..exception.rst b/doc/source/api/nova..exception.rst deleted file mode 100644 index 97ac6b752d27..000000000000 --- a/doc/source/api/nova..exception.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..exception` Module -============================================================================== -.. automodule:: nova..exception - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..fakerabbit.rst b/doc/source/api/nova..fakerabbit.rst deleted file mode 100644 index f1e27c2664fb..000000000000 --- a/doc/source/api/nova..fakerabbit.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..fakerabbit` Module -============================================================================== -.. automodule:: nova..fakerabbit - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..flags.rst b/doc/source/api/nova..flags.rst deleted file mode 100644 index 08165be44f46..000000000000 --- a/doc/source/api/nova..flags.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..flags` Module -============================================================================== -.. automodule:: nova..flags - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..image.service.rst b/doc/source/api/nova..image.service.rst deleted file mode 100644 index 78ef1eccadac..000000000000 --- a/doc/source/api/nova..image.service.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..image.service` Module -============================================================================== -.. automodule:: nova..image.service - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..manager.rst b/doc/source/api/nova..manager.rst deleted file mode 100644 index 576902491c7e..000000000000 --- a/doc/source/api/nova..manager.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..manager` Module -============================================================================== -.. automodule:: nova..manager - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..network.linux_net.rst b/doc/source/api/nova..network.linux_net.rst deleted file mode 100644 index 7af78d5ade7f..000000000000 --- a/doc/source/api/nova..network.linux_net.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..network.linux_net` Module -============================================================================== -.. automodule:: nova..network.linux_net - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..network.manager.rst b/doc/source/api/nova..network.manager.rst deleted file mode 100644 index 0ea70553325e..000000000000 --- a/doc/source/api/nova..network.manager.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..network.manager` Module -============================================================================== -.. automodule:: nova..network.manager - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..objectstore.bucket.rst b/doc/source/api/nova..objectstore.bucket.rst deleted file mode 100644 index 3bfdf639ccd4..000000000000 --- a/doc/source/api/nova..objectstore.bucket.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..objectstore.bucket` Module -============================================================================== -.. automodule:: nova..objectstore.bucket - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..objectstore.handler.rst b/doc/source/api/nova..objectstore.handler.rst deleted file mode 100644 index 0eb8c4efb497..000000000000 --- a/doc/source/api/nova..objectstore.handler.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..objectstore.handler` Module -============================================================================== -.. automodule:: nova..objectstore.handler - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..objectstore.image.rst b/doc/source/api/nova..objectstore.image.rst deleted file mode 100644 index fa4c971f119a..000000000000 --- a/doc/source/api/nova..objectstore.image.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..objectstore.image` Module -============================================================================== -.. automodule:: nova..objectstore.image - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..objectstore.stored.rst b/doc/source/api/nova..objectstore.stored.rst deleted file mode 100644 index 2b1d997a3fd0..000000000000 --- a/doc/source/api/nova..objectstore.stored.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..objectstore.stored` Module -============================================================================== -.. automodule:: nova..objectstore.stored - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..process.rst b/doc/source/api/nova..process.rst deleted file mode 100644 index 91eff8379070..000000000000 --- a/doc/source/api/nova..process.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..process` Module -============================================================================== -.. automodule:: nova..process - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..quota.rst b/doc/source/api/nova..quota.rst deleted file mode 100644 index 4140d95d66ac..000000000000 --- a/doc/source/api/nova..quota.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..quota` Module -============================================================================== -.. automodule:: nova..quota - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..rpc.rst b/doc/source/api/nova..rpc.rst deleted file mode 100644 index 5b2a9b8e2333..000000000000 --- a/doc/source/api/nova..rpc.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..rpc` Module -============================================================================== -.. automodule:: nova..rpc - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..scheduler.chance.rst b/doc/source/api/nova..scheduler.chance.rst deleted file mode 100644 index 89c074c8f6c7..000000000000 --- a/doc/source/api/nova..scheduler.chance.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..scheduler.chance` Module -============================================================================== -.. automodule:: nova..scheduler.chance - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..scheduler.driver.rst b/doc/source/api/nova..scheduler.driver.rst deleted file mode 100644 index 793ed9c7b3ae..000000000000 --- a/doc/source/api/nova..scheduler.driver.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..scheduler.driver` Module -============================================================================== -.. automodule:: nova..scheduler.driver - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..scheduler.manager.rst b/doc/source/api/nova..scheduler.manager.rst deleted file mode 100644 index d0fc7c4230db..000000000000 --- a/doc/source/api/nova..scheduler.manager.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..scheduler.manager` Module -============================================================================== -.. automodule:: nova..scheduler.manager - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..scheduler.simple.rst b/doc/source/api/nova..scheduler.simple.rst deleted file mode 100644 index dacc2cf3088c..000000000000 --- a/doc/source/api/nova..scheduler.simple.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..scheduler.simple` Module -============================================================================== -.. automodule:: nova..scheduler.simple - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..server.rst b/doc/source/api/nova..server.rst deleted file mode 100644 index 7cb2cfa5465d..000000000000 --- a/doc/source/api/nova..server.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..server` Module -============================================================================== -.. automodule:: nova..server - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..service.rst b/doc/source/api/nova..service.rst deleted file mode 100644 index 2d2dfcf2e52a..000000000000 --- a/doc/source/api/nova..service.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..service` Module -============================================================================== -.. automodule:: nova..service - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..test.rst b/doc/source/api/nova..test.rst deleted file mode 100644 index a6bdb6f1fffa..000000000000 --- a/doc/source/api/nova..test.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..test` Module -============================================================================== -.. automodule:: nova..test - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.access_unittest.rst b/doc/source/api/nova..tests.access_unittest.rst deleted file mode 100644 index 89554e430a4f..000000000000 --- a/doc/source/api/nova..tests.access_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.access_unittest` Module -============================================================================== -.. automodule:: nova..tests.access_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api.fakes.rst b/doc/source/api/nova..tests.api.fakes.rst deleted file mode 100644 index 5728b18f3616..000000000000 --- a/doc/source/api/nova..tests.api.fakes.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api.fakes` Module -============================================================================== -.. automodule:: nova..tests.api.fakes - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.fakes.rst b/doc/source/api/nova..tests.api.openstack.fakes.rst deleted file mode 100644 index 4a9ff593899f..000000000000 --- a/doc/source/api/nova..tests.api.openstack.fakes.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api.openstack.fakes` Module -============================================================================== -.. automodule:: nova..tests.api.openstack.fakes - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_api.rst b/doc/source/api/nova..tests.api.openstack.test_api.rst deleted file mode 100644 index 68106d221f83..000000000000 --- a/doc/source/api/nova..tests.api.openstack.test_api.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api.openstack.test_api` Module -============================================================================== -.. automodule:: nova..tests.api.openstack.test_api - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_auth.rst b/doc/source/api/nova..tests.api.openstack.test_auth.rst deleted file mode 100644 index 9f0011669a88..000000000000 --- a/doc/source/api/nova..tests.api.openstack.test_auth.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api.openstack.test_auth` Module -============================================================================== -.. automodule:: nova..tests.api.openstack.test_auth - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_faults.rst b/doc/source/api/nova..tests.api.openstack.test_faults.rst deleted file mode 100644 index b839ae8a3c77..000000000000 --- a/doc/source/api/nova..tests.api.openstack.test_faults.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api.openstack.test_faults` Module -============================================================================== -.. automodule:: nova..tests.api.openstack.test_faults - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_flavors.rst b/doc/source/api/nova..tests.api.openstack.test_flavors.rst deleted file mode 100644 index 471fac56ec84..000000000000 --- a/doc/source/api/nova..tests.api.openstack.test_flavors.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api.openstack.test_flavors` Module -============================================================================== -.. automodule:: nova..tests.api.openstack.test_flavors - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_images.rst b/doc/source/api/nova..tests.api.openstack.test_images.rst deleted file mode 100644 index 57ae93c8c63e..000000000000 --- a/doc/source/api/nova..tests.api.openstack.test_images.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api.openstack.test_images` Module -============================================================================== -.. automodule:: nova..tests.api.openstack.test_images - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst b/doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst deleted file mode 100644 index 9a857f795941..000000000000 --- a/doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api.openstack.test_ratelimiting` Module -============================================================================== -.. automodule:: nova..tests.api.openstack.test_ratelimiting - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_servers.rst b/doc/source/api/nova..tests.api.openstack.test_servers.rst deleted file mode 100644 index ea602e6ab16c..000000000000 --- a/doc/source/api/nova..tests.api.openstack.test_servers.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api.openstack.test_servers` Module -============================================================================== -.. automodule:: nova..tests.api.openstack.test_servers - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_sharedipgroups.rst b/doc/source/api/nova..tests.api.openstack.test_sharedipgroups.rst deleted file mode 100644 index 1fad49147d62..000000000000 --- a/doc/source/api/nova..tests.api.openstack.test_sharedipgroups.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api.openstack.test_sharedipgroups` Module -============================================================================== -.. automodule:: nova..tests.api.openstack.test_sharedipgroups - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api.test_wsgi.rst b/doc/source/api/nova..tests.api.test_wsgi.rst deleted file mode 100644 index 8e79caa4dd73..000000000000 --- a/doc/source/api/nova..tests.api.test_wsgi.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api.test_wsgi` Module -============================================================================== -.. automodule:: nova..tests.api.test_wsgi - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api_integration.rst b/doc/source/api/nova..tests.api_integration.rst deleted file mode 100644 index fd217acf73c0..000000000000 --- a/doc/source/api/nova..tests.api_integration.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api_integration` Module -============================================================================== -.. automodule:: nova..tests.api_integration - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.api_unittest.rst b/doc/source/api/nova..tests.api_unittest.rst deleted file mode 100644 index 44a65d48c463..000000000000 --- a/doc/source/api/nova..tests.api_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.api_unittest` Module -============================================================================== -.. automodule:: nova..tests.api_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.auth_unittest.rst b/doc/source/api/nova..tests.auth_unittest.rst deleted file mode 100644 index 5805dcf38b8e..000000000000 --- a/doc/source/api/nova..tests.auth_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.auth_unittest` Module -============================================================================== -.. automodule:: nova..tests.auth_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.cloud_unittest.rst b/doc/source/api/nova..tests.cloud_unittest.rst deleted file mode 100644 index d2ca3b013579..000000000000 --- a/doc/source/api/nova..tests.cloud_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.cloud_unittest` Module -============================================================================== -.. automodule:: nova..tests.cloud_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.compute_unittest.rst b/doc/source/api/nova..tests.compute_unittest.rst deleted file mode 100644 index 6a30bf7441f7..000000000000 --- a/doc/source/api/nova..tests.compute_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.compute_unittest` Module -============================================================================== -.. automodule:: nova..tests.compute_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.declare_flags.rst b/doc/source/api/nova..tests.declare_flags.rst deleted file mode 100644 index 524e72e9133d..000000000000 --- a/doc/source/api/nova..tests.declare_flags.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.declare_flags` Module -============================================================================== -.. automodule:: nova..tests.declare_flags - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.fake_flags.rst b/doc/source/api/nova..tests.fake_flags.rst deleted file mode 100644 index a8dc3df36efe..000000000000 --- a/doc/source/api/nova..tests.fake_flags.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.fake_flags` Module -============================================================================== -.. automodule:: nova..tests.fake_flags - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.flags_unittest.rst b/doc/source/api/nova..tests.flags_unittest.rst deleted file mode 100644 index 61087e683336..000000000000 --- a/doc/source/api/nova..tests.flags_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.flags_unittest` Module -============================================================================== -.. automodule:: nova..tests.flags_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.network_unittest.rst b/doc/source/api/nova..tests.network_unittest.rst deleted file mode 100644 index df057d813efb..000000000000 --- a/doc/source/api/nova..tests.network_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.network_unittest` Module -============================================================================== -.. automodule:: nova..tests.network_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.objectstore_unittest.rst b/doc/source/api/nova..tests.objectstore_unittest.rst deleted file mode 100644 index 0ae252f049c7..000000000000 --- a/doc/source/api/nova..tests.objectstore_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.objectstore_unittest` Module -============================================================================== -.. automodule:: nova..tests.objectstore_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.process_unittest.rst b/doc/source/api/nova..tests.process_unittest.rst deleted file mode 100644 index 30d1e129c942..000000000000 --- a/doc/source/api/nova..tests.process_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.process_unittest` Module -============================================================================== -.. automodule:: nova..tests.process_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.quota_unittest.rst b/doc/source/api/nova..tests.quota_unittest.rst deleted file mode 100644 index 6ab81310489d..000000000000 --- a/doc/source/api/nova..tests.quota_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.quota_unittest` Module -============================================================================== -.. automodule:: nova..tests.quota_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.real_flags.rst b/doc/source/api/nova..tests.real_flags.rst deleted file mode 100644 index e9c0d1abdc66..000000000000 --- a/doc/source/api/nova..tests.real_flags.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.real_flags` Module -============================================================================== -.. automodule:: nova..tests.real_flags - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.rpc_unittest.rst b/doc/source/api/nova..tests.rpc_unittest.rst deleted file mode 100644 index e6c7ceb2e04a..000000000000 --- a/doc/source/api/nova..tests.rpc_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.rpc_unittest` Module -============================================================================== -.. automodule:: nova..tests.rpc_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.runtime_flags.rst b/doc/source/api/nova..tests.runtime_flags.rst deleted file mode 100644 index 984e21199b4c..000000000000 --- a/doc/source/api/nova..tests.runtime_flags.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.runtime_flags` Module -============================================================================== -.. automodule:: nova..tests.runtime_flags - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.scheduler_unittest.rst b/doc/source/api/nova..tests.scheduler_unittest.rst deleted file mode 100644 index ae3a0661658d..000000000000 --- a/doc/source/api/nova..tests.scheduler_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.scheduler_unittest` Module -============================================================================== -.. automodule:: nova..tests.scheduler_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.service_unittest.rst b/doc/source/api/nova..tests.service_unittest.rst deleted file mode 100644 index c7c746d17e10..000000000000 --- a/doc/source/api/nova..tests.service_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.service_unittest` Module -============================================================================== -.. automodule:: nova..tests.service_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.twistd_unittest.rst b/doc/source/api/nova..tests.twistd_unittest.rst deleted file mode 100644 index ce88202e102a..000000000000 --- a/doc/source/api/nova..tests.twistd_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.twistd_unittest` Module -============================================================================== -.. automodule:: nova..tests.twistd_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.validator_unittest.rst b/doc/source/api/nova..tests.validator_unittest.rst deleted file mode 100644 index 980284327f89..000000000000 --- a/doc/source/api/nova..tests.validator_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.validator_unittest` Module -============================================================================== -.. automodule:: nova..tests.validator_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.virt_unittest.rst b/doc/source/api/nova..tests.virt_unittest.rst deleted file mode 100644 index 2189be41ef96..000000000000 --- a/doc/source/api/nova..tests.virt_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.virt_unittest` Module -============================================================================== -.. automodule:: nova..tests.virt_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.volume_unittest.rst b/doc/source/api/nova..tests.volume_unittest.rst deleted file mode 100644 index 791e192f5339..000000000000 --- a/doc/source/api/nova..tests.volume_unittest.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.volume_unittest` Module -============================================================================== -.. automodule:: nova..tests.volume_unittest - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..twistd.rst b/doc/source/api/nova..twistd.rst deleted file mode 100644 index d4145396db02..000000000000 --- a/doc/source/api/nova..twistd.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..twistd` Module -============================================================================== -.. automodule:: nova..twistd - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..utils.rst b/doc/source/api/nova..utils.rst deleted file mode 100644 index 1131d1080942..000000000000 --- a/doc/source/api/nova..utils.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..utils` Module -============================================================================== -.. automodule:: nova..utils - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..validate.rst b/doc/source/api/nova..validate.rst deleted file mode 100644 index 1d142f103259..000000000000 --- a/doc/source/api/nova..validate.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..validate` Module -============================================================================== -.. automodule:: nova..validate - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..virt.connection.rst b/doc/source/api/nova..virt.connection.rst deleted file mode 100644 index caf76676514e..000000000000 --- a/doc/source/api/nova..virt.connection.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..virt.connection` Module -============================================================================== -.. automodule:: nova..virt.connection - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..virt.fake.rst b/doc/source/api/nova..virt.fake.rst deleted file mode 100644 index 06ecdbf7d688..000000000000 --- a/doc/source/api/nova..virt.fake.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..virt.fake` Module -============================================================================== -.. automodule:: nova..virt.fake - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..virt.images.rst b/doc/source/api/nova..virt.images.rst deleted file mode 100644 index 4fdeb7af8dc7..000000000000 --- a/doc/source/api/nova..virt.images.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..virt.images` Module -============================================================================== -.. automodule:: nova..virt.images - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..virt.libvirt_conn.rst b/doc/source/api/nova..virt.libvirt_conn.rst deleted file mode 100644 index 7fb8aed5fe89..000000000000 --- a/doc/source/api/nova..virt.libvirt_conn.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..virt.libvirt_conn` Module -============================================================================== -.. automodule:: nova..virt.libvirt_conn - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..virt.xenapi.rst b/doc/source/api/nova..virt.xenapi.rst deleted file mode 100644 index 2e396bf06ccf..000000000000 --- a/doc/source/api/nova..virt.xenapi.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..virt.xenapi` Module -============================================================================== -.. automodule:: nova..virt.xenapi - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..volume.driver.rst b/doc/source/api/nova..volume.driver.rst deleted file mode 100644 index 51f5c07295c0..000000000000 --- a/doc/source/api/nova..volume.driver.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..volume.driver` Module -============================================================================== -.. automodule:: nova..volume.driver - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..volume.manager.rst b/doc/source/api/nova..volume.manager.rst deleted file mode 100644 index 91a192a8f5ef..000000000000 --- a/doc/source/api/nova..volume.manager.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..volume.manager` Module -============================================================================== -.. automodule:: nova..volume.manager - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..wsgi.rst b/doc/source/api/nova..wsgi.rst deleted file mode 100644 index 0bff1c332399..000000000000 --- a/doc/source/api/nova..wsgi.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..wsgi` Module -============================================================================== -.. automodule:: nova..wsgi - :members: - :undoc-members: - :show-inheritance: From def5583469bd265c9107ed54d461441bc6303151 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 5 Jan 2011 09:50:19 -0800 Subject: [PATCH 105/147] Split internal API get calls to get and get_all, where the former takes an ID and returns one resource, and the latter can optionally take a filter and return a list of resources. --- nova/api/ec2/cloud.py | 9 +++++---- nova/api/openstack/servers.py | 2 +- nova/compute/api.py | 12 +++++++----- nova/volume/api.py | 13 +++++++------ 4 files changed, 20 insertions(+), 16 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index b1eaafc8ba2a..0c0027287228 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -119,7 +119,8 @@ class CloudController(object): def _get_mpi_data(self, context, project_id): result = {} - for instance in self.compute_api.get(context, project_id=project_id): + for instance in self.compute_api.get_all(context, + project_id=project_id): if instance['fixed_ip']: line = '%s slots=%d' % (instance['fixed_ip']['address'], instance['vcpus']) @@ -141,7 +142,7 @@ class CloudController(object): def get_metadata(self, address): ctxt = context.get_admin_context() - instance_ref = self.compute_api.get(ctxt, fixed_ip=address) + instance_ref = self.compute_api.get_all(ctxt, fixed_ip=address) if instance_ref is None: return None mpi = self._get_mpi_data(ctxt, instance_ref['project_id']) @@ -493,7 +494,7 @@ class CloudController(object): "output": base64.b64encode(output)} def describe_volumes(self, context, volume_id=None, **kwargs): - volumes = self.volume_api.get(context) + volumes = self.volume_api.get_all(context) # NOTE(vish): volume_id is an optional list of volume ids to filter by. volumes = [self._format_volume(context, v) for v in volumes if volume_id is None or v['id'] in volume_id] @@ -597,7 +598,7 @@ class CloudController(object): def _format_instances(self, context, **kwargs): reservations = {} - instances = self.compute_api.get(context, **kwargs) + instances = self.compute_api.get_all(context, **kwargs) for instance in instances: if not context.user.is_admin(): if instance['image_id'] == FLAGS.vpn_image_id: diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 6be24629a5b8..ce64ac7ad2fb 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -96,7 +96,7 @@ class Controller(wsgi.Controller): entity_maker - either _translate_detail_keys or _translate_keys """ - instance_list = self.compute_api.get(req.environ['nova.context']) + instance_list = self.compute_api.get_all(req.environ['nova.context']) limited_list = common.limited(instance_list, req) res = [entity_maker(inst)['server'] for inst in limited_list] return dict(servers=res) diff --git a/nova/compute/api.py b/nova/compute/api.py index 74d030c4d8ce..64d47b1ce093 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -245,13 +245,15 @@ class API(base.Base): else: self.db.instance_destroy(context, instance_id) - def get(self, context, instance_id=None, project_id=None, - reservation_id=None, fixed_ip=None): - """Get one or more instances, possibly filtered by one of the + def get(self, context, instance_id): + """Get a single instance with the given ID.""" + return self.db.instance_get_by_id(context, instance_id) + + def get_all(self, context, project_id=None, reservation_id=None, + fixed_ip=None): + """Get all instances, possibly filtered by one of the given parameters. If there is no filter and the context is an admin, it will retreive all instances in the system.""" - if instance_id is not None: - return self.db.instance_get_by_id(context, instance_id) if reservation_id is not None: return self.db.instance_get_all_by_reservation(context, reservation_id) diff --git a/nova/volume/api.py b/nova/volume/api.py index 48ecdbe68b17..2d7fe3762fe3 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -63,7 +63,7 @@ class API(base.Base): return volume def delete(self, context, volume_id): - volume = self.db.volume_get(context, volume_id) + volume = self.get(context, volume_id) if volume['status'] != "available": raise exception.ApiError(_("Volume status must be available")) now = datetime.datetime.utcnow() @@ -78,15 +78,16 @@ class API(base.Base): def update(self, context, volume_id, fields): self.db.volume_update(context, volume_id, fields) - def get(self, context, volume_id=None): - if volume_id is not None: - return self.db.volume_get(context, volume_id) + def get(self, context, volume_id): + return self.db.volume_get(context, volume_id) + + def get_all(self, context): if context.user.is_admin(): return self.db.volume_get_all(context) return self.db.volume_get_all_by_project(context, context.project_id) def check_attach(self, context, volume_id): - volume = self.db.volume_get(context, volume_id) + volume = self.get(context, volume_id) # TODO(vish): abstract status checking? if volume['status'] != "available": raise exception.ApiError(_("Volume status must be available")) @@ -94,7 +95,7 @@ class API(base.Base): raise exception.ApiError(_("Volume is already attached")) def check_detach(self, context, volume_id): - volume = self.db.volume_get(context, volume_id) + volume = self.get(context, volume_id) # TODO(vish): abstract status checking? if volume['status'] == "available": raise exception.ApiError(_("Volume is already detached")) From 11d5e914044583882384ffd462991ef4f678b28e Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Wed, 5 Jan 2011 16:04:16 -0600 Subject: [PATCH 106/147] Updated register_models() docstring --- nova/db/sqlalchemy/models.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 0c9c387fc711..6367d1ff8fa9 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -545,7 +545,8 @@ def register_models(): """Register Models and create metadata. Called from nova.db.sqlalchemy.__init__ as part of loading the driver, - it will never need to be called explicitly elsewhere. + it will never need to be called explicitly elsewhere unless the + connection is lost and needs to be reestablished. """ from sqlalchemy import create_engine models = (Service, Instance, InstanceActions, From b4e57fe01778d7e3f115a369eebaeb9ee328895e Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Wed, 5 Jan 2011 15:02:09 -0800 Subject: [PATCH 107/147] Make test case work again --- nova/tests/hyperv_unittest.py | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/nova/tests/hyperv_unittest.py b/nova/tests/hyperv_unittest.py index 7044db43c2f9..3980ae3cb2f9 100644 --- a/nova/tests/hyperv_unittest.py +++ b/nova/tests/hyperv_unittest.py @@ -19,31 +19,36 @@ Tests For Hyper-V driver import random +from nova import context from nova import db from nova import flags from nova import test - +from nova.auth import manager from nova.virt import hyperv FLAGS = flags.FLAGS FLAGS.connection_type = 'hyperv' -class HyperVTestCase(test.TrialTestCase): +class HyperVTestCase(test.TestCase): """Test cases for the Hyper-V driver""" - def setUp(self): # pylint: disable-msg=C0103 - pass + def setUp(self): + super(HyperVTestCase, self).setUp() + self.manager = manager.AuthManager() + self.user = self.manager.create_user('fake', 'fake', 'fake', + admin=True) + self.project = self.manager.create_project('fake', 'fake', 'fake') + self.context = context.RequestContext(self.user, self.project) def test_create_destroy(self): """Create a VM and destroy it""" instance = {'internal_id': random.randint(1, 1000000), 'memory_mb': '1024', 'mac_address': '02:12:34:46:56:67', - 'vcpu': 2, + 'vcpus': 2, 'project_id': 'fake', 'instance_type': 'm1.small'} - - instance_ref = db.instance_create(None, instance) + instance_ref = db.instance_create(self.context, instance) conn = hyperv.get_connection(False) conn._create_vm(instance_ref) # pylint: disable-msg=W0212 @@ -60,5 +65,7 @@ class HyperVTestCase(test.TrialTestCase): if n == instance_ref['name']] self.assertTrue(len(found) == 0) - def tearDown(self): # pylint: disable-msg=C0103 - pass + def tearDown(self): + super(HyperVTestCase, self).tearDown() + self.manager.delete_project(self.project) + self.manager.delete_user(self.user) From 80abf5306c7dcc08e63c9af182b31007b9de677c Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Wed, 5 Jan 2011 15:04:51 -0800 Subject: [PATCH 108/147] Add to Authors and mailmap --- .mailmap | 1 + Authors | 1 + 2 files changed, 2 insertions(+) diff --git a/.mailmap b/.mailmap index 010678569a24..2af2d7cd9b63 100644 --- a/.mailmap +++ b/.mailmap @@ -30,3 +30,4 @@ + diff --git a/Authors b/Authors index 639e68a59e10..8dfaf955740a 100644 --- a/Authors +++ b/Authors @@ -3,6 +3,7 @@ Anne Gentle Anthony Young Antony Messerli Armando Migliaccio +Chiradeep Vittal Chris Behrens Chmouel Boudjnah Cory Wright From 4a7105898e45cf5b6393f68d8d2d921dd218724b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 6 Jan 2011 18:35:18 +0000 Subject: [PATCH 109/147] Fix format_run_instances to pass in reservation id as a kwarg --- nova/api/ec2/cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 0c0027287228..6619b545292c 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -592,7 +592,7 @@ class CloudController(object): return {'reservationSet': self._format_instances(context)} def _format_run_instances(self, context, reservation_id): - i = self._format_instances(context, reservation_id) + i = self._format_instances(context, reservation_id=reservation_id) assert len(i) == 1 return i[0] From 6c01a842493079fdff9d5887562aec1a6fe8033b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 6 Jan 2011 18:46:28 +0000 Subject: [PATCH 110/147] fix the broken tests that allowed the breakage in format to happen --- nova/tests/test_cloud.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 42344af1cb06..ba58fab59532 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -140,15 +140,16 @@ class CloudTestCase(test.TestCase): kwargs = {'image_id': image_id, 'instance_type': instance_type, 'max_count': max_count} - rv = yield self.cloud.run_instances(self.context, **kwargs) + rv = self.cloud.run_instances(self.context, **kwargs) + print rv instance_id = rv['instancesSet'][0]['instanceId'] - output = yield self.cloud.get_console_output(context=self.context, + output = self.cloud.get_console_output(context=self.context, instance_id=[instance_id]) self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE OUTPUT') # TODO(soren): We need this until we can stop polling in the rpc code # for unit tests. greenthread.sleep(0.3) - rv = yield self.cloud.terminate_instances(self.context, [instance_id]) + rv = self.cloud.terminate_instances(self.context, [instance_id]) def test_key_generation(self): result = self._create_key('test') @@ -186,7 +187,7 @@ class CloudTestCase(test.TestCase): kwargs = {'image_id': image_id, 'instance_type': instance_type, 'max_count': max_count} - rv = yield self.cloud.run_instances(self.context, **kwargs) + rv = self.cloud.run_instances(self.context, **kwargs) # TODO: check for proper response instance_id = rv['reservationSet'][0].keys()[0] instance = rv['reservationSet'][0][instance_id][0] @@ -209,7 +210,7 @@ class CloudTestCase(test.TestCase): for instance in reservations[reservations.keys()[0]]: instance_id = instance['instance_id'] logging.debug("Terminating instance %s" % instance_id) - rv = yield self.compute.terminate_instance(instance_id) + rv = self.compute.terminate_instance(instance_id) def test_instance_update_state(self): def instance(num): From 59b3e0f2700d6a9067bffe045ea335b7abc35a27 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Thu, 6 Jan 2011 15:08:14 -0500 Subject: [PATCH 111/147] pep8 --- nova/log.py | 3 +-- nova/tests/test_log.py | 9 ++++++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/nova/log.py b/nova/log.py index 97472b20bdfc..6f5377e87ef3 100644 --- a/nova/log.py +++ b/nova/log.py @@ -74,7 +74,6 @@ flags.DEFINE_bool('use_syslog', False, 'output to syslog') flags.DEFINE_string('logfile', None, 'output to named file') - # A list of things we want to replicate from logging. # levels CRITICAL = logging.CRITICAL @@ -112,7 +111,7 @@ def _dictify_context(context): return None if not isinstance(context, dict) \ and getattr(context, 'to_dict', None): - context = context.to_dict() + context = context.to_dict() return context diff --git a/nova/tests/test_log.py b/nova/tests/test_log.py index d8dd387082e8..beb1d97cf27f 100644 --- a/nova/tests/test_log.py +++ b/nova/tests/test_log.py @@ -35,15 +35,16 @@ class RootLoggerTestCase(test.TrialTestCase): def test_handles_context_kwarg(self): self.log.info("foo", context=_fake_context()) - self.assert_(True) # didn't raise exception + self.assert_(True) # didn't raise exception def test_module_level_methods_handle_context_arg(self): log.info("foo", context=_fake_context()) - self.assert_(True) # didn't raise exception + self.assert_(True) # didn't raise exception def test_module_level_audit_handles_context_arg(self): log.audit("foo", context=_fake_context()) - self.assert_(True) # didn't raise exception + self.assert_(True) # didn't raise exception + class NovaFormatterTestCase(test.TrialTestCase): def setUp(self): @@ -76,6 +77,7 @@ class NovaFormatterTestCase(test.TrialTestCase): self.log.debug("baz") self.assertEqual("NOCTXT: baz --DBG\n", self.stream.getvalue()) + class NovaLoggerTestCase(test.TrialTestCase): def setUp(self): super(NovaLoggerTestCase, self).setUp() @@ -93,6 +95,7 @@ class NovaLoggerTestCase(test.TrialTestCase): l = log.getLogger('nova-test.foo') self.assertEqual(log.AUDIT, l.level) + class VerboseLoggerTestCase(test.TrialTestCase): def setUp(self): super(VerboseLoggerTestCase, self).setUp() From 13b1374897c59c6e59fe5542ab71b0180aa6fc00 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Thu, 6 Jan 2011 15:08:26 -0500 Subject: [PATCH 112/147] Track version info, and make available for logging. --- nova/log.py | 14 ++++---------- setup.py | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 10 deletions(-) diff --git a/nova/log.py b/nova/log.py index 97472b20bdfc..aafc2e602d95 100644 --- a/nova/log.py +++ b/nova/log.py @@ -34,24 +34,19 @@ import logging.handlers import traceback from nova import flags -# TODO(todd): fix after version.py merge -# from nova import version +from nova import version FLAGS = flags.FLAGS -# TODO(todd): fix after version.py merge -# '(%(name)s %(nova_version)s): %(levelname)s ' flags.DEFINE_string('logging_context_format_string', - '(%(name)s): %(levelname)s ' + '(%(name)s %(nova_version)s): %(levelname)s ' '[%(request_id)s %(user)s ' '%(project)s] %(message)s', 'format string to use for log messages') -# TODO(todd): fix after version.py merge -# '(%(name)s %(nova_version)s): %(levelname)s [N/A] ' flags.DEFINE_string('logging_default_format_string', - '(%(name)s): %(levelname)s [N/A] ' + '(%(name)s %(nova_version)s): %(levelname)s [N/A] ' '%(message)s', 'format string to use for log messages') @@ -163,8 +158,7 @@ class NovaLogger(logging.Logger): extra = {} if context: extra.update(_dictify_context(context)) - # TODO(todd): fix after version.py merge - #extra.update({"nova_version": version.string_with_vcs()}) + extra.update({"nova_version": version.string_with_vcs()}) logging.Logger._log(self, level, msg, args, exc_info, extra) def addHandler(self, handler): diff --git a/setup.py b/setup.py index e00911099866..6d7cecb70ffe 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,45 @@ from sphinx.setup_command import BuildDoc from nova.utils import parse_mailmap, str_dict_replace +NOVA_VERSION = ['2011', '1'] + +VERSIONFILE_DEFAULT_VCS_VERSION = """ +version_info = {"branch_nick": "LOCALBRANCH", "revision_id": "LOCALREVISION"} +""" + +VERSIONFILE_DATA = """ +# below this line automatically generated by setup.py + +YEAR = %r +COUNT = %r +""" % (NOVA_VERSION[0], NOVA_VERSION[1]) + + +VERSIONFILE_DATA += """ + +def string(): + return '.'.join([YEAR, COUNT]) + + +def vcs_version_string(): + return "%s:%s" % (version_info['branch_nick'], version_info['revision_id']) + + +def string_with_vcs(): + return "%s-%s" % (string(), vcs_version_string()) +""" + +with open("nova/version.py", 'w') as version_file: + if os.path.isdir('.bzr'): + vcs_cmd = subprocess.Popen(["bzr", "version-info", "--python"], + stdout=subprocess.PIPE) + vcsversion = vcs_cmd.communicate()[0] + version_file.write(vcsversion) + else: + version_file.write(VERSIONFILE_DEFAULT_VCS_VERSION) + version_file.write(VERSIONFILE_DATA) + + class local_BuildDoc(BuildDoc): def run(self): for builder in ['html', 'man']: From f3ea4d876fe0d62dcf63cfdcaf7657949cc4dbcf Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Thu, 6 Jan 2011 15:20:04 -0500 Subject: [PATCH 113/147] Add default version file for developers. --- nova/version.py | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 nova/version.py diff --git a/nova/version.py b/nova/version.py new file mode 100644 index 000000000000..fc14b840119f --- /dev/null +++ b/nova/version.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python +"""This file is automatically generated by generate_version_info +It uses the current working tree to determine the revision. +So don't edit it. :) +""" + +version_info = {'branch_nick': u'LOCALBRANCH', 'revision_id': 'LOCALREVISION', + 'revno': 0} + +revisions = {} + +file_revisions = {} + + +if __name__ == '__main__': + print 'revision: %(revno)d' % version_info + print 'nick: %(branch_nick)s' % version_info + print 'revision id: %(revision_id)s' % version_info + +# below this line automatically generated by setup.py + +YEAR = '2011' +COUNT = '1-dev' + + +def string(): + return '.'.join([YEAR, COUNT]) + + +def vcs_version_string(): + return "%s:%s" % (version_info['branch_nick'], version_info['revision_id']) + + +def string_with_vcs(): + return "%s-%s" % (string(), vcs_version_string()) From 8cdfdd14a03e1356cda4fcbdfbcc528bc7f397bd Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Thu, 6 Jan 2011 15:29:38 -0500 Subject: [PATCH 114/147] Let documentation get version from nova/version.py as well. --- doc/source/conf.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 8f1b370ccbab..61b3749d06f2 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -60,10 +60,12 @@ copyright = u'2010, United States Government as represented by the Administrator # |version| and |release|, also used in various other places throughout the # built documents. # -# The short X.Y version. -version = '2011.1' +import re +from nova import version as nova_version # The full version, including alpha/beta/rc tags. -release = '2011.1-prerelease' +release = nova_version.string() +# The short X.Y version. +version = re.sub(r'-dev$', '', nova_version.string()) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. From 19db369868b2f4a200624cb67d72740eabaab699 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 6 Jan 2011 15:33:01 -0600 Subject: [PATCH 115/147] Include date in action query --- nova/db/sqlalchemy/api.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index aaa07e3c9291..003a96b75d31 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -840,12 +840,15 @@ def instance_action_create(context, values): def instance_get_actions(context, instance_id): """Return the actions associated to the given instance id""" session = get_session() - actions = {} + actions = [] for action in session.query(models.InstanceActions).\ filter_by(instance_id=instance_id).\ all(): - actions[action.action] = action.error - return actions + actions.append(dict( + date=action.created_at.ctime(), + action=action.action, + error=action.error)) + return dict(actions=actions) ################### From 006a3c43093ce3324173e0aed172a3be1396d5dc Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 6 Jan 2011 16:21:59 -0600 Subject: [PATCH 116/147] altered argument handling --- nova/compute/manager.py | 42 ++++++++++++++--------------------------- 1 file changed, 14 insertions(+), 28 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 2671d401c367..a5343ff221b8 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -62,39 +62,25 @@ def checks_instance_lock(function): """ @functools.wraps(function) - def decorated_function(*args, **kwargs): + def decorated_function(self, context, instance_id, *args, **kwargs): - logging.info(_("check_instance_locks decorating |%s|"), function) - logging.info(_("check_instance_locks: arguments: |%s| |%s|"), args, - kwargs) - # assume worst case (have to declare so they are in scope) - admin = False - locked = True - instance_id = False - - # grab args to function - try: - if 'context' in kwargs: - context = kwargs['context'] - else: - context = args[1] - if 'instance_id' in kwargs: - instance_id = kwargs['instance_id'] - else: - instance_id = args[2] - locked = args[0].get_lock(context, instance_id) - admin = context.is_admin - except Exception as e: - logging.error(_("check_instance_lock fail! args: |%s| |%s|"), args, - kwargs) - raise e + logging.info(_("check_instance_lock: decorating: |%s|"), function) + logging.info(_("check_instance_lock: arguments: |%s| |%s| |%s|"), + self, + context, + instance_id) + unlocked = not self.get_lock(context, instance_id) + admin = context.is_admin + logging.info(_("check_instance_lock: locked: |%s|"), locked) + logging.info(_("check_instance_lock: admin: |%s|"), admin) # if admin or unlocked call function otherwise log error - if admin or not locked: + if admin or unlocked: + logging.info(_("check_instance_lock: executing: |%s|"), function) function(*args, **kwargs) else: - logging.error(_("Instance |%s| is locked, cannot execute |%s|"), - instance_id, function) + logging.error(_("check_instance_lock: not executing |%s|"), + function) return False return decorated_function From ba245da7a339cb769451b67f27cd801c0ce12120 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 6 Jan 2011 16:30:45 -0600 Subject: [PATCH 117/147] pep8 --- nova/tests/test_compute.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 993c4fd3cb42..062f37f27c0b 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -180,12 +180,12 @@ class ComputeTestCase(test.TestCase): # decorator should return False (fail) with locked nonadmin context self.compute.lock_instance(self.context, instance_id) - ret_val = self.compute.reboot_instance(non_admin_context,instance_id) + ret_val = self.compute.reboot_instance(non_admin_context, instance_id) self.assertEqual(ret_val, False) # decorator should return None (success) with unlocked nonadmin context self.compute.unlock_instance(self.context, instance_id) - ret_val = self.compute.reboot_instance(non_admin_context,instance_id) + ret_val = self.compute.reboot_instance(non_admin_context, instance_id) self.assertEqual(ret_val, None) self.compute.terminate_instance(self.context, instance_id) From 11d6e9d2f917d124946d0fa47c1512a1f8ab940d Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 6 Jan 2011 17:02:20 -0600 Subject: [PATCH 118/147] accidentally left unlocked in there, it should have been locked --- nova/compute/manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 1ed1eb40aea4..3c20b7c73d0b 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -69,13 +69,13 @@ def checks_instance_lock(function): self, context, instance_id) - unlocked = not self.get_lock(context, instance_id) + locked = self.get_lock(context, instance_id) admin = context.is_admin logging.info(_("check_instance_lock: locked: |%s|"), locked) logging.info(_("check_instance_lock: admin: |%s|"), admin) # if admin or unlocked call function otherwise log error - if admin or unlocked: + if admin or not locked: logging.info(_("check_instance_lock: executing: |%s|"), function) function(*args, **kwargs) else: From 7450f84e0f491f8a24273135432e105677c4a589 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 6 Jan 2011 17:23:00 -0600 Subject: [PATCH 119/147] passing the correct parameters to decorated function --- nova/compute/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 3c20b7c73d0b..b7ec2e93b1d2 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -77,7 +77,7 @@ def checks_instance_lock(function): # if admin or unlocked call function otherwise log error if admin or not locked: logging.info(_("check_instance_lock: executing: |%s|"), function) - function(*args, **kwargs) + function(self, context, isntance_id, *args, **kwargs) else: logging.error(_("check_instance_lock: not executing |%s|"), function) From ebec92778bdaf4af58029f9977697865c53f881d Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 6 Jan 2011 17:25:17 -0600 Subject: [PATCH 120/147] refers to instance_id instead of instance_ref[instance_id] --- nova/compute/manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index b7ec2e93b1d2..24c32113060d 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -411,7 +411,7 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug(_('instance %s: locking'), instance_ref['internal_id']) + logging.debug(_('instance %s: locking'), instance_id) self.db.instance_update(context, instance_id, {'locked': True}) @exception.wrap_exception @@ -423,7 +423,7 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug(_('instance %s: unlocking'), instance_ref['internal_id']) + logging.debug(_('instance %s: unlocking'), instance_id) self.db.instance_update(context, instance_id, {'locked': False}) @exception.wrap_exception From 76e3923c40dff2f754b045847d8ad19ea9a7cef1 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 6 Jan 2011 17:27:57 -0600 Subject: [PATCH 121/147] typo --- nova/compute/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 24c32113060d..d5136eb26088 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -77,7 +77,7 @@ def checks_instance_lock(function): # if admin or unlocked call function otherwise log error if admin or not locked: logging.info(_("check_instance_lock: executing: |%s|"), function) - function(self, context, isntance_id, *args, **kwargs) + function(self, context, instance_id, *args, **kwargs) else: logging.error(_("check_instance_lock: not executing |%s|"), function) From 3dd9c56477078114bcd9b20a49a3413615539103 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 6 Jan 2011 17:50:44 -0600 Subject: [PATCH 122/147] Review feedback --- nova/api/openstack/servers.py | 3 ++- nova/compute/api.py | 9 ++++++++- nova/db/sqlalchemy/api.py | 7 ++----- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index ce64ac7ad2fb..ca2e5888be2a 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -222,4 +222,5 @@ class Controller(wsgi.Controller): def actions(self, req, id): """Permit Admins to retrieve server actions.""" ctxt = req.environ["nova.context"] - return self.compute_api.get_actions(ctxt, id) + items = self.compute_api.get_actions(ctxt, id) + return dict(actions=items) diff --git a/nova/compute/api.py b/nova/compute/api.py index 64d47b1ce093..46d921204f99 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -316,7 +316,14 @@ class API(base.Base): def get_actions(self, context, instance_id): """Retrieve actions for the given instance.""" - return self.db.instance_get_actions(context, instance_id) + items = self.db.instance_get_actions(context, instance_id) + actions = [] + for item in items: + actions.append(dict( + date=str(item[0]), + action=item[1], + error=item[2])) + return actions def suspend(self, context, instance_id): """suspend the instance with instance_id""" diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 003a96b75d31..0796a7f25676 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -844,11 +844,8 @@ def instance_get_actions(context, instance_id): for action in session.query(models.InstanceActions).\ filter_by(instance_id=instance_id).\ all(): - actions.append(dict( - date=action.created_at.ctime(), - action=action.action, - error=action.error)) - return dict(actions=actions) + actions.append((action.created_at, action.action, action.error)) + return actions ################### From 6a162512cac5eafdbe46ba4df6117bfed6f40e4b Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 6 Jan 2011 18:12:22 -0600 Subject: [PATCH 123/147] Review feedback --- nova/api/openstack/servers.py | 4 ++++ nova/compute/api.py | 9 +-------- nova/db/sqlalchemy/api.py | 5 ++++- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index ca2e5888be2a..9a62f2b96415 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -223,4 +223,8 @@ class Controller(wsgi.Controller): """Permit Admins to retrieve server actions.""" ctxt = req.environ["nova.context"] items = self.compute_api.get_actions(ctxt, id) + # TODO(jk0): Do not do pre-serialization here once the default + # serializer is updated + for item in items: + item["date"] = str(item["date"]) return dict(actions=items) diff --git a/nova/compute/api.py b/nova/compute/api.py index 46d921204f99..64d47b1ce093 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -316,14 +316,7 @@ class API(base.Base): def get_actions(self, context, instance_id): """Retrieve actions for the given instance.""" - items = self.db.instance_get_actions(context, instance_id) - actions = [] - for item in items: - actions.append(dict( - date=str(item[0]), - action=item[1], - error=item[2])) - return actions + return self.db.instance_get_actions(context, instance_id) def suspend(self, context, instance_id): """suspend the instance with instance_id""" diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 0796a7f25676..797dacb73948 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -844,7 +844,10 @@ def instance_get_actions(context, instance_id): for action in session.query(models.InstanceActions).\ filter_by(instance_id=instance_id).\ all(): - actions.append((action.created_at, action.action, action.error)) + actions.append(dict( + date=action.created_at, + action=action.action, + error=action.error)) return actions From a0edf5a7ba372419ebfed987b8585171e7167e48 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 6 Jan 2011 18:18:28 -0600 Subject: [PATCH 124/147] Review feedback --- nova/api/openstack/servers.py | 2 +- nova/db/sqlalchemy/api.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 9a62f2b96415..3ee161502fcd 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -226,5 +226,5 @@ class Controller(wsgi.Controller): # TODO(jk0): Do not do pre-serialization here once the default # serializer is updated for item in items: - item["date"] = str(item["date"]) + item["created_at"] = str(item["created_at"]) return dict(actions=items) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 797dacb73948..1b6e89541f22 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -845,7 +845,7 @@ def instance_get_actions(context, instance_id): filter_by(instance_id=instance_id).\ all(): actions.append(dict( - date=action.created_at, + created_at=action.created_at, action=action.action, error=action.error)) return actions From 59f8986df4d78f61528162e65f560064febef7af Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 6 Jan 2011 18:59:15 -0600 Subject: [PATCH 125/147] Review feedback --- nova/api/openstack/servers.py | 8 ++++++-- nova/db/sqlalchemy/api.py | 10 ++-------- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 3ee161502fcd..9a0c417ae28c 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -223,8 +223,12 @@ class Controller(wsgi.Controller): """Permit Admins to retrieve server actions.""" ctxt = req.environ["nova.context"] items = self.compute_api.get_actions(ctxt, id) + actions = [] # TODO(jk0): Do not do pre-serialization here once the default # serializer is updated for item in items: - item["created_at"] = str(item["created_at"]) - return dict(actions=items) + actions.append(dict( + created_at=str(item.created_at), + action=item.action, + error=item.error)) + return dict(actions=actions) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 1b6e89541f22..45427597a268 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -840,15 +840,9 @@ def instance_action_create(context, values): def instance_get_actions(context, instance_id): """Return the actions associated to the given instance id""" session = get_session() - actions = [] - for action in session.query(models.InstanceActions).\ + return session.query(models.InstanceActions).\ filter_by(instance_id=instance_id).\ - all(): - actions.append(dict( - created_at=action.created_at, - action=action.action, - error=action.error)) - return actions + all() ################### From 19ffc1275814a6c00f6ff19dd0c03060143d097a Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 7 Jan 2011 12:08:22 +0100 Subject: [PATCH 126/147] Remove redundant import of nova.context. Use db instance attribute rather than module directly. --- nova/compute/api.py | 6 +++--- nova/db/sqlalchemy/api.py | 2 -- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 2c2937f480ef..0d04d344c3fa 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -24,7 +24,6 @@ import datetime import logging import time -from nova import context from nova import db from nova import exception from nova import flags @@ -210,7 +209,7 @@ class API(base.Base): def trigger_security_group_rules_refresh(self, context, security_group_id): """Called when a rule is added to or removed from a security_group""" - security_group = db.security_group_get(context, security_group_id) + security_group = self.db.security_group_get(context, security_group_id) hosts = set() for instance in security_group['instances']: @@ -232,7 +231,8 @@ class API(base.Base): # First, we get the security group rules that reference this group as # the grantee.. security_group_rules = \ - db.security_group_rule_get_by_security_group_grantee(context, + self.db.security_group_rule_get_by_security_group_grantee( + context, group_id) # ..then we distill the security groups to which they belong.. diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 14ccc989ffbc..eb87355b66dd 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -650,7 +650,6 @@ def instance_get(context, instance_id, session=None): if is_admin_context(context): result = session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ - options(joinedload('security_groups')).\ options(joinedload_all('security_groups.rules')).\ options(joinedload('volumes')).\ filter_by(id=instance_id).\ @@ -659,7 +658,6 @@ def instance_get(context, instance_id, session=None): elif is_user_context(context): result = session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ - options(joinedload('security_groups')).\ options(joinedload_all('security_groups.rules')).\ options(joinedload('volumes')).\ filter_by(project_id=context.project_id).\ From 8b3925e4d4b97dc28bfc903483ec4793fb38fed5 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 7 Jan 2011 15:17:03 +0100 Subject: [PATCH 127/147] Less code generation. --- .bzrignore | 1 + doc/source/conf.py | 5 ++--- nova/version.py | 42 +++++++++++++++++++----------------------- setup.py | 38 ++++---------------------------------- 4 files changed, 26 insertions(+), 60 deletions(-) diff --git a/.bzrignore b/.bzrignore index d81a7d829df7..b271561a3564 100644 --- a/.bzrignore +++ b/.bzrignore @@ -12,3 +12,4 @@ CA/openssl.cnf CA/serial* CA/newcerts/*.pem CA/private/cakey.pem +nova/vcsversion.py diff --git a/doc/source/conf.py b/doc/source/conf.py index 61b3749d06f2..20e9c88afc5e 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -60,12 +60,11 @@ copyright = u'2010, United States Government as represented by the Administrator # |version| and |release|, also used in various other places throughout the # built documents. # -import re from nova import version as nova_version # The full version, including alpha/beta/rc tags. -release = nova_version.string() +release = nova_version.version() # The short X.Y version. -version = re.sub(r'-dev$', '', nova_version.string()) +version = nova_version.canonical_version() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/nova/version.py b/nova/version.py index fc14b840119f..1504a5c82fab 100644 --- a/nova/version.py +++ b/nova/version.py @@ -1,35 +1,31 @@ #!/usr/bin/env python -"""This file is automatically generated by generate_version_info -It uses the current working tree to determine the revision. -So don't edit it. :) -""" +try: + from nova.vcsversion import version_info +except ImportError: + version_info = {'branch_nick': u'LOCALBRANCH', + 'revision_id': 'LOCALREVISION', + 'revno': 0} -version_info = {'branch_nick': u'LOCALBRANCH', 'revision_id': 'LOCALREVISION', - 'revno': 0} +NOVA_VERSION = ['2011', '1'] +YEAR, COUNT = NOVA_VERSION -revisions = {} - -file_revisions = {} +FINAL = False # This becomes true at Release Candidate time -if __name__ == '__main__': - print 'revision: %(revno)d' % version_info - print 'nick: %(branch_nick)s' % version_info - print 'revision id: %(revision_id)s' % version_info - -# below this line automatically generated by setup.py - -YEAR = '2011' -COUNT = '1-dev' - - -def string(): +def canonical_version_string(): return '.'.join([YEAR, COUNT]) +def version_string(): + if FINAL: + return canonical_version_string() + else: + return '%s-dev' % (canonical_version_string(),) + + def vcs_version_string(): return "%s:%s" % (version_info['branch_nick'], version_info['revision_id']) -def string_with_vcs(): - return "%s-%s" % (string(), vcs_version_string()) +def version_string_with_vcs(): + return "%s-%s" % (canonical_version_string(), vcs_version_string()) diff --git a/setup.py b/setup.py index 6d7cecb70ffe..2ccece78965f 100644 --- a/setup.py +++ b/setup.py @@ -24,44 +24,14 @@ from setuptools.command.sdist import sdist from sphinx.setup_command import BuildDoc from nova.utils import parse_mailmap, str_dict_replace +from nova import version -NOVA_VERSION = ['2011', '1'] - -VERSIONFILE_DEFAULT_VCS_VERSION = """ -version_info = {"branch_nick": "LOCALBRANCH", "revision_id": "LOCALREVISION"} -""" - -VERSIONFILE_DATA = """ -# below this line automatically generated by setup.py - -YEAR = %r -COUNT = %r -""" % (NOVA_VERSION[0], NOVA_VERSION[1]) - - -VERSIONFILE_DATA += """ - -def string(): - return '.'.join([YEAR, COUNT]) - - -def vcs_version_string(): - return "%s:%s" % (version_info['branch_nick'], version_info['revision_id']) - - -def string_with_vcs(): - return "%s-%s" % (string(), vcs_version_string()) -""" - -with open("nova/version.py", 'w') as version_file: - if os.path.isdir('.bzr'): +if os.path.isdir('.bzr'): + with open("nova/vcsversion.py", 'w') as version_file: vcs_cmd = subprocess.Popen(["bzr", "version-info", "--python"], stdout=subprocess.PIPE) vcsversion = vcs_cmd.communicate()[0] version_file.write(vcsversion) - else: - version_file.write(VERSIONFILE_DEFAULT_VCS_VERSION) - version_file.write(VERSIONFILE_DATA) class local_BuildDoc(BuildDoc): @@ -88,7 +58,7 @@ class local_sdist(sdist): sdist.run(self) setup(name='nova', - version='2011.1', + version=version.canonical_version(), description='cloud computing fabric controller', author='OpenStack', author_email='nova@lists.launchpad.net', From 5e34b63b874b9c75215b9eeabc8e8e951a866fe7 Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Fri, 7 Jan 2011 15:12:34 +0000 Subject: [PATCH 128/147] Fixing headers line by wrapping the headers in single quotes --- nova/virt/images.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/images.py b/nova/virt/images.py index 2d03da4b4fed..048608e3da96 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -89,7 +89,7 @@ def _fetch_s3_image(image, path, user, project): else: cmd = ['/usr/bin/curl', '--fail', '--silent', url] for (k, v) in headers.iteritems(): - cmd += ['-H', '%s: %s' % (k, v)] + cmd += ['-H', '\'%s: %s\'' % (k, v)] cmd += ['-o', path] cmd_out = ' '.join(cmd) From 2ea4af0f3059ef2aee6e25db8849a39248983d30 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Fri, 7 Jan 2011 18:11:04 +0000 Subject: [PATCH 129/147] Bug #699912: When failing to connect to a data store, Nova doesn't log which data store it tried to connect to Log FLAGS.sql_connection when failing to connect, to aid debugging. --- nova/db/sqlalchemy/__init__.py | 5 +++-- nova/service.py | 7 ++++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/nova/db/sqlalchemy/__init__.py b/nova/db/sqlalchemy/__init__.py index 22aa1cfe6c8b..367fdda8b33f 100644 --- a/nova/db/sqlalchemy/__init__.py +++ b/nova/db/sqlalchemy/__init__.py @@ -39,5 +39,6 @@ for i in xrange(FLAGS.sql_max_retries): models.register_models() break except OperationalError: - logging.exception(_("Data store is unreachable." - " Trying again in %d seconds.") % FLAGS.sql_retry_interval) + logging.exception(_("Data store %s is unreachable." + " Trying again in %d seconds.") % + (FLAGS.sql_connection, FLAGS.sql_retry_interval)) diff --git a/nova/service.py b/nova/service.py index 7203430c6f82..a48d00e4502b 100644 --- a/nova/service.py +++ b/nova/service.py @@ -211,9 +211,10 @@ class Service(object): try: models.register_models() except OperationalError: - logging.exception(_("Data store is unreachable." - " Trying again in %d seconds.") % - FLAGS.sql_retry_interval) + logging.exception(_("Data store %s is unreachable." + " Trying again in %d seconds.") % + (FLAGS.sql_connection, + FLAGS.sql_retry_interval)) time.sleep(FLAGS.sql_retry_interval) From 01bb7d0c941c1cdb27e354c7f037fe0056a87770 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Fri, 7 Jan 2011 18:15:29 +0000 Subject: [PATCH 130/147] Bug #699910: Nova RPC layer silently swallows exceptions Log exceptions thrown during message handling. --- nova/rpc.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/rpc.py b/nova/rpc.py index 844088348633..ae5e7792109e 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -193,6 +193,7 @@ class AdapterConsumer(TopicConsumer): if msg_id: msg_reply(msg_id, rval, None) except Exception as e: + logging.exception("Exception during message handling") if msg_id: msg_reply(msg_id, None, sys.exc_info()) return From 8952629c576498c3b576a1f9085a8d1b850e8639 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Fri, 7 Jan 2011 14:09:38 -0500 Subject: [PATCH 131/147] pep8 fixes --- bin/nova-logspool | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-logspool b/bin/nova-logspool index d5aef475660a..097459b12339 100644 --- a/bin/nova-logspool +++ b/bin/nova-logspool @@ -28,7 +28,6 @@ import sys class Request(object): - def __init__(self): self.time = "" self.host = "" @@ -67,6 +66,7 @@ class Request(object): 'host': self.host, 'env': self.env, 'logger': self.logger, 'request_id': self.request_id} + class LogReader(object): def __init__(self, filename): self.filename = filename From 509c3b02f171d47ff9bc8cbbb3f0ac7cd1e888b3 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 7 Jan 2011 21:44:27 +0100 Subject: [PATCH 132/147] Add copyright and license info to version.py --- nova/version.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/nova/version.py b/nova/version.py index 1504a5c82fab..7b27acb6a554 100644 --- a/nova/version.py +++ b/nova/version.py @@ -1,4 +1,19 @@ -#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + try: from nova.vcsversion import version_info except ImportError: From 09a8b83c5fca2ba6ad250b0224b2297bff2306a2 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 7 Jan 2011 23:44:47 +0100 Subject: [PATCH 133/147] s/string_with_vcs/version_string_with_vcs/g --- nova/log.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/log.py b/nova/log.py index 5851bd224b73..c1428c051d9d 100644 --- a/nova/log.py +++ b/nova/log.py @@ -157,7 +157,7 @@ class NovaLogger(logging.Logger): extra = {} if context: extra.update(_dictify_context(context)) - extra.update({"nova_version": version.string_with_vcs()}) + extra.update({"nova_version": version.version_string_with_vcs()}) logging.Logger._log(self, level, msg, args, exc_info, extra) def addHandler(self, handler): From a29bba7e9f57b97085902fa97d17de32da8044cb Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Sat, 8 Jan 2011 00:02:24 +0100 Subject: [PATCH 134/147] s/canonical_version/canonical_version_string/g --- doc/source/conf.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 20e9c88afc5e..368a686c3f38 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -64,7 +64,7 @@ from nova import version as nova_version # The full version, including alpha/beta/rc tags. release = nova_version.version() # The short X.Y version. -version = nova_version.canonical_version() +version = nova_version.canonical_version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index 2ccece78965f..5b58274c6300 100644 --- a/setup.py +++ b/setup.py @@ -58,7 +58,7 @@ class local_sdist(sdist): sdist.run(self) setup(name='nova', - version=version.canonical_version(), + version=version.canonical_version_string(), description='cloud computing fabric controller', author='OpenStack', author_email='nova@lists.launchpad.net', From af5af6155690baf55c30f6a70c0c9f829f107802 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 7 Jan 2011 23:11:41 +0000 Subject: [PATCH 135/147] Now that we aren't using twisted we can vgs to check for the existence of the volume group --- nova/volume/driver.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 477e0abf4f57..dcddec92a260 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -80,7 +80,8 @@ class VolumeDriver(object): def check_for_setup_error(self): """Returns an error if prerequisites aren't met""" - if not os.path.isdir("/dev/%s" % FLAGS.volume_group): + out, err = self._execute("sudo vgs") + if not FLAGS.volume_group in out: raise exception.Error(_("volume group %s doesn't exist") % FLAGS.volume_group) From d757a1a10f0cbc5a3c0f5b1427d1d526584298ce Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 7 Jan 2011 23:22:52 +0000 Subject: [PATCH 136/147] Return region info in the proper format. --- nova/api/ec2/cloud.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index ccce83b849e4..b34488731add 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -249,6 +249,7 @@ class CloudController(object): FLAGS.cc_host, FLAGS.cc_port, FLAGS.ec2_suffix)}] + return {'regionInfo': regions} def describe_snapshots(self, context, From 5eb5373af5dd8f062975b4c42e12f95569f7e41b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sat, 8 Jan 2011 10:04:22 -0800 Subject: [PATCH 137/147] use safer vgs call --- nova/volume/driver.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index dcddec92a260..6bc925f3eedf 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -20,7 +20,6 @@ Drivers for volumes. """ -import os import time from nova import exception @@ -80,8 +79,9 @@ class VolumeDriver(object): def check_for_setup_error(self): """Returns an error if prerequisites aren't met""" - out, err = self._execute("sudo vgs") - if not FLAGS.volume_group in out: + out, err = self._execute("sudo vgs --noheadings -o name") + volume_groups = out.split() + if not FLAGS.volume_group in volume_groups: raise exception.Error(_("volume group %s doesn't exist") % FLAGS.volume_group) From 4a9a02575bacb493b57dd83744561a77516bd6ff Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sat, 8 Jan 2011 16:39:12 -0800 Subject: [PATCH 138/147] late import module for register_models() so it doesn't create the db before flags are loaded --- nova/service.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/nova/service.py b/nova/service.py index 864a4246972b..523c1a8d7389 100644 --- a/nova/service.py +++ b/nova/service.py @@ -38,7 +38,6 @@ from nova import log as logging from nova import flags from nova import rpc from nova import utils -from nova.db.sqlalchemy import models FLAGS = flags.FLAGS @@ -209,6 +208,10 @@ class Service(object): logging.exception(_("model server went away")) try: + # NOTE(vish): This is late-loaded to make sure that the + # database is not created before flags have + # been loaded. + from nova.db.sqlalchemy import models models.register_models() except OperationalError: logging.exception(_("Data store %s is unreachable." From 16eeac71055ffa9fe0fc7a13032da4e6397121b1 Mon Sep 17 00:00:00 2001 From: Ken Pepple Date: Sat, 8 Jan 2011 17:40:06 -0800 Subject: [PATCH 139/147] fixed doc make process for new nova version (rev530) machanism --- doc/source/conf.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 368a686c3f38..d09984b19e6c 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -60,11 +60,11 @@ copyright = u'2010, United States Government as represented by the Administrator # |version| and |release|, also used in various other places throughout the # built documents. # -from nova import version as nova_version +import nova.version # The full version, including alpha/beta/rc tags. -release = nova_version.version() +release = nova.version.version_string() # The short X.Y version. -version = nova_version.canonical_version_string() +version = nova,version.canonical_version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. From 346bd0e7c93757f940f133179b36657302d4296f Mon Sep 17 00:00:00 2001 From: Ken Pepple Date: Sat, 8 Jan 2011 17:46:22 -0800 Subject: [PATCH 140/147] typo correction --- doc/source/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index d09984b19e6c..34c4ec2881d4 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -64,7 +64,7 @@ import nova.version # The full version, including alpha/beta/rc tags. release = nova.version.version_string() # The short X.Y version. -version = nova,version.canonical_version_string() +version = nova.version.canonical_version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. From 5424690912a6edc2a64bfbbd44120e52a85c7f48 Mon Sep 17 00:00:00 2001 From: Ken Pepple Date: Sun, 9 Jan 2011 09:25:27 -0800 Subject: [PATCH 141/147] added myself to authors and fixed typo to follow standard --- Authors | 2 +- doc/source/conf.py | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/Authors b/Authors index 8dfaf955740a..99c493b0e510 100644 --- a/Authors +++ b/Authors @@ -40,4 +40,4 @@ Trey Morris Vishvananda Ishaya Youcef Laribi Zhixue Wu - +Ken Pepple diff --git a/doc/source/conf.py b/doc/source/conf.py index 34c4ec2881d4..996dfb0a7ff5 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -60,11 +60,12 @@ copyright = u'2010, United States Government as represented by the Administrator # |version| and |release|, also used in various other places throughout the # built documents. # -import nova.version +from nova import version as nova_version +#import nova.version # The full version, including alpha/beta/rc tags. -release = nova.version.version_string() +release = nova_version.version_string() # The short X.Y version. -version = nova.version.canonical_version_string() +version = nova_version.canonical_version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. From bc3d288abb6b1cae1465490b3df99a201be8bdc5 Mon Sep 17 00:00:00 2001 From: Ken Pepple Date: Sun, 9 Jan 2011 11:13:19 -0800 Subject: [PATCH 142/147] alphbetized Authors --- Authors | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Authors b/Authors index 99c493b0e510..47101e272a48 100644 --- a/Authors +++ b/Authors @@ -23,6 +23,7 @@ Jonathan Bryce Josh Kearney Joshua McKenty Justin Santa Barbara +Ken Pepple Matt Dietz Michael Gundlach Monty Taylor @@ -40,4 +41,3 @@ Trey Morris Vishvananda Ishaya Youcef Laribi Zhixue Wu -Ken Pepple From 6d05c3e5d9112aead1db23e942f24605a3301af9 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 9 Jan 2011 23:01:10 -0800 Subject: [PATCH 143/147] fix describe instances + test --- nova/api/ec2/cloud.py | 14 ++++++++------ nova/tests/test_cloud.py | 18 +++++++++++++++++- 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index b34488731add..fd3141a976d5 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -25,7 +25,6 @@ datastore. import base64 import datetime import IPy -import re import os from nova import compute @@ -35,7 +34,6 @@ from nova import db from nova import exception from nova import flags from nova import log as logging -from nova import quota from nova import network from nova import rpc from nova import utils @@ -603,19 +601,23 @@ class CloudController(object): return [{label: x} for x in lst] def describe_instances(self, context, **kwargs): - return self._format_describe_instances(context) + return self._format_describe_instances(context, **kwargs) - def _format_describe_instances(self, context): - return {'reservationSet': self._format_instances(context)} + def _format_describe_instances(self, context, **kwargs): + return {'reservationSet': self._format_instances(context, **kwargs)} def _format_run_instances(self, context, reservation_id): i = self._format_instances(context, reservation_id=reservation_id) assert len(i) == 1 return i[0] - def _format_instances(self, context, **kwargs): + def _format_instances(self, context, instance_id=None, **kwargs): reservations = {} instances = self.compute_api.get_all(context, **kwargs) + # NOTE(vish): instance_id is an optional list of ids to filter by + if instance_id: + instance_id = [ec2_id_to_id(x) for x in instance_id] + instances = [x for x in instances if x['id'] in instance_id] for instance in instances: if not context.user.is_admin(): if instance['image_id'] == FLAGS.vpn_image_id: diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index a645ef538089..b8a15c7b2360 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -133,6 +133,23 @@ class CloudTestCase(test.TestCase): db.volume_destroy(self.context, vol1['id']) db.volume_destroy(self.context, vol2['id']) + def test_describe_instances(self): + """Makes sure describe_instances works and filters results.""" + inst1 = db.instance_create(self.context, {'reservation_id': 'a'}) + inst2 = db.instance_create(self.context, {'reservation_id': 'a'}) + result = self.cloud.describe_instances(self.context) + result = result['reservationSet'][0] + self.assertEqual(len(result['instancesSet']), 2) + instance_id = cloud.id_to_ec2_id(inst2['id']) + result = self.cloud.describe_instances(self.context, + instance_id=[instance_id]) + result = result['reservationSet'][0] + self.assertEqual(len(result['instancesSet']), 1) + self.assertEqual(result['instancesSet'][0]['instanceId'], + instance_id) + db.instance_destroy(self.context, inst1['id']) + db.instance_destroy(self.context, inst2['id']) + def test_console_output(self): image_id = FLAGS.default_image instance_type = FLAGS.default_instance_type @@ -141,7 +158,6 @@ class CloudTestCase(test.TestCase): 'instance_type': instance_type, 'max_count': max_count} rv = self.cloud.run_instances(self.context, **kwargs) - print rv instance_id = rv['instancesSet'][0]['instanceId'] output = self.cloud.get_console_output(context=self.context, instance_id=[instance_id]) From c8566628d4c15bcaf16baf8fca2a31528e7eac13 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 9 Jan 2011 23:53:51 -0800 Subject: [PATCH 144/147] optimize to call get if instance_id is specified since most of the time people will just be requesting one id --- nova/api/ec2/cloud.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index fd3141a976d5..9166301c7074 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -613,11 +613,12 @@ class CloudController(object): def _format_instances(self, context, instance_id=None, **kwargs): reservations = {} - instances = self.compute_api.get_all(context, **kwargs) # NOTE(vish): instance_id is an optional list of ids to filter by if instance_id: instance_id = [ec2_id_to_id(x) for x in instance_id] - instances = [x for x in instances if x['id'] in instance_id] + instances = [self.compute_api.get(context, x) for x in instance_id] + else: + instances = self.compute_api.get_all(context, **kwargs) for instance in instances: if not context.user.is_admin(): if instance['image_id'] == FLAGS.vpn_image_id: From 15b81abbd23f033fc9e35a7d49b8f65d2ae76586 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 10 Jan 2011 11:32:17 +0100 Subject: [PATCH 145/147] Create LibvirtConnection directly, rather than going through libvirt_conn.get_connection. This should remove the dependency on libvirt for tests. --- nova/tests/test_virt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 2f418bd5d644..59053f4d0368 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -171,7 +171,7 @@ class LibvirtConnTestCase(test.TestCase): for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): FLAGS.libvirt_type = libvirt_type - conn = libvirt_conn.get_connection(True) + conn = libvirt_conn.LibvirtConnection(True) uri = conn.get_uri() self.assertEquals(uri, expected_uri) From ec8e7773b79ed52aa2950db185ead881c77632f7 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Mon, 10 Jan 2011 10:57:13 -0500 Subject: [PATCH 146/147] Fix describe_availablity_zones versobse. --- nova/api/ec2/cloud.py | 3 ++- nova/db/api.py | 5 +++++ nova/db/sqlalchemy/api.py | 12 ++++++++++++ 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 5360f2de77e0..359498d11762 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -44,6 +44,7 @@ from nova.compute import instance_types FLAGS = flags.FLAGS +flags.DECLARE('service_down_time', 'nova.scheduler.driver') LOG = logging.getLogger("nova.api.cloud") @@ -200,7 +201,7 @@ class CloudController(object): 'zoneState': 'available'}]} services = db.service_get_all(context) - now = db.get_time() + now = datetime.datetime.utcnow() hosts = [] for host in [service['host'] for service in services]: if not host in hosts: diff --git a/nova/db/api.py b/nova/db/api.py index 8b0242c9a046..a4d26ec85f72 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -81,6 +81,11 @@ def service_get(context, service_id): return IMPL.service_get(context, service_id) +def service_get_all(context): + """Get a list of all services on any machine on any topic of any type""" + return IMPL.service_get_all(context) + + def service_get_all_by_topic(context, topic): """Get all compute services for a given topic.""" return IMPL.service_get_all_by_topic(context, topic) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index eb87355b66dd..e475b4d8cba7 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -134,6 +134,18 @@ def service_get(context, service_id, session=None): return result +@require_admin_context +def service_get_all(context, session=None): + if not session: + session = get_session() + + result = session.query(models.Service).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + return result + + @require_admin_context def service_get_all_by_topic(context, topic): session = get_session() From 00808c08c6935872ad8842ce093b515b68bdf3de Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Mon, 10 Jan 2011 11:26:38 -0800 Subject: [PATCH 147/147] Added babel/gettext build support. --- babel.cfg | 2 + locale/nova.pot | 2130 +++++++++++++++++++++++++++++++++++++++++++++++ setup.cfg | 14 + setup.py | 15 +- 4 files changed, 2159 insertions(+), 2 deletions(-) create mode 100644 babel.cfg create mode 100644 locale/nova.pot diff --git a/babel.cfg b/babel.cfg new file mode 100644 index 000000000000..15cd6cb76b93 --- /dev/null +++ b/babel.cfg @@ -0,0 +1,2 @@ +[python: **.py] + diff --git a/locale/nova.pot b/locale/nova.pot new file mode 100644 index 000000000000..a96411e33922 --- /dev/null +++ b/locale/nova.pot @@ -0,0 +1,2130 @@ +# Translations template for nova. +# Copyright (C) 2011 ORGANIZATION +# This file is distributed under the same license as the nova project. +# FIRST AUTHOR , 2011. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: nova 2011.1\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.4\n" + +#: nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "" + +#: nova/crypto.py:49 +msgid "Filename of private key" +msgstr "" + +#: nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "" + +#: nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "" + +#: nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "" + +#: nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "" + +#: nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "" + +#: nova/exception.py:36 +#, python-format +msgid "" +"%s\n" +"Command: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" +msgstr "" + +#: nova/exception.py:86 +msgid "Uncaught exception" +msgstr "" + +#: nova/fakerabbit.py:48 +#, python-format +msgid "(%s) publish (key: %s) %s" +msgstr "" + +#: nova/fakerabbit.py:53 +#, python-format +msgid "Publishing to route %s" +msgstr "" + +#: nova/fakerabbit.py:83 +#, python-format +msgid "Declaring queue %s" +msgstr "" + +#: nova/fakerabbit.py:89 +#, python-format +msgid "Declaring exchange %s" +msgstr "" + +#: nova/fakerabbit.py:95 +#, python-format +msgid "Binding %s to %s with key %s" +msgstr "" + +#: nova/fakerabbit.py:120 +#, python-format +msgid "Getting from %s: %s" +msgstr "" + +#: nova/rpc.py:92 +#, python-format +msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgstr "" + +#: nova/rpc.py:99 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "" + +#: nova/rpc.py:118 +msgid "Reconnected to queue" +msgstr "" + +#: nova/rpc.py:125 +msgid "Failed to fetch message from queue" +msgstr "" + +#: nova/rpc.py:155 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "" + +#: nova/rpc.py:170 +#, python-format +msgid "received %s" +msgstr "" + +#: nova/rpc.py:183 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: nova/rpc.py:184 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: nova/rpc.py:245 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: nova/rpc.py:286 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: nova/rpc.py:305 +msgid "Making asynchronous call..." +msgstr "" + +#: nova/rpc.py:308 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: nova/rpc.py:356 +#, python-format +msgid "response %s" +msgstr "" + +#: nova/rpc.py:365 +#, python-format +msgid "topic is %s" +msgstr "" + +#: nova/rpc.py:366 +#, python-format +msgid "message %s" +msgstr "" + +#: nova/service.py:157 +#, python-format +msgid "Starting %s node" +msgstr "" + +#: nova/service.py:169 +msgid "Service killed that has no database entry" +msgstr "" + +#: nova/service.py:190 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: nova/service.py:202 +msgid "Recovered model server connection!" +msgstr "" + +#: nova/service.py:208 +msgid "model server went away" +msgstr "" + +#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#, python-format +msgid "Data store %s is unreachable. Trying again in %d seconds." +msgstr "" + +#: nova/service.py:232 nova/twistd.py:232 +#, python-format +msgid "Serving %s" +msgstr "" + +#: nova/service.py:234 nova/twistd.py:264 +msgid "Full set of FLAGS:" +msgstr "" + +#: nova/twistd.py:211 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "" + +#: nova/twistd.py:268 +#, python-format +msgid "Starting %s" +msgstr "" + +#: nova/utils.py:53 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: nova/utils.py:54 +#, python-format +msgid "Class %s cannot be found" +msgstr "" + +#: nova/utils.py:113 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: nova/utils.py:125 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: nova/utils.py:138 +#, python-format +msgid "Result was %s" +msgstr "" + +#: nova/utils.py:171 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: nova/utils.py:176 +#, python-format +msgid "Running %s" +msgstr "" + +#: nova/utils.py:207 +#, python-format +msgid "Couldn't get IP, using 127.0.0.1 %s" +msgstr "" + +#: nova/utils.py:289 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: nova/utils.py:300 +#, python-format +msgid "backend %s" +msgstr "" + +#: nova/api/ec2/__init__.py:133 +msgid "Too many failed authentications." +msgstr "" + +#: nova/api/ec2/__init__.py:142 +#, python-format +msgid "" +"Access key %s has had %d failed authentications and will be locked out " +"for %d minutes." +msgstr "" + +#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:190 +#, python-format +msgid "Authenticated Request For %s:%s)" +msgstr "" + +#: nova/api/ec2/__init__.py:227 +#, python-format +msgid "action: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:229 +#, python-format +msgid "arg: %s\t\tval: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:301 +#, python-format +msgid "Unauthorized request for controller=%s and action=%s" +msgstr "" + +#: nova/api/ec2/__init__.py:339 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:342 +#, python-format +msgid "ApiError raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:349 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:354 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: nova/api/ec2/admin.py:84 +#, python-format +msgid "Creating new user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:92 +#, python-format +msgid "Deleting user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:114 +#, python-format +msgid "Adding role %s to user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#, python-format +msgid "Adding sitewide role %s to user %s" +msgstr "" + +#: nova/api/ec2/admin.py:122 +#, python-format +msgid "Removing role %s from user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#, python-format +msgid "Removing sitewide role %s from user %s" +msgstr "" + +#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 +msgid "operation must be add or remove" +msgstr "" + +#: nova/api/ec2/admin.py:142 +#, python-format +msgid "Getting x509 for user: %s on project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:159 +#, python-format +msgid "Create project %s managed by %s" +msgstr "" + +#: nova/api/ec2/admin.py:170 +#, python-format +msgid "Delete project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#, python-format +msgid "Adding user %s to project %s" +msgstr "" + +#: nova/api/ec2/admin.py:188 +#, python-format +msgid "Removing user %s from project %s" +msgstr "" + +#: nova/api/ec2/apirequest.py:95 +#, python-format +msgid "Unsupported API request: controller = %s,action = %s" +msgstr "" + +#: nova/api/ec2/cloud.py:117 +#, python-format +msgid "Generating root CA: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:277 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:285 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:357 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "" + +#: nova/api/ec2/cloud.py:361 +msgid "Invalid port range" +msgstr "" + +#: nova/api/ec2/cloud.py:392 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +msgid "No rule for the specified parameters." +msgstr "" + +#: nova/api/ec2/cloud.py:421 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:432 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:460 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:463 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: nova/api/ec2/cloud.py:475 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:543 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: nova/api/ec2/cloud.py:567 +#, python-format +msgid "Attach volume %s to instacne %s at %s" +msgstr "" + +#: nova/api/ec2/cloud.py:579 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: nova/api/ec2/cloud.py:686 +msgid "Allocate address" +msgstr "" + +#: nova/api/ec2/cloud.py:691 +#, python-format +msgid "Release address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:696 +#, python-format +msgid "Associate address %s to instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:703 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:730 +msgid "Going to start terminating instances" +msgstr "" + +#: nova/api/ec2/cloud.py:738 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: nova/api/ec2/cloud.py:775 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: nova/api/ec2/cloud.py:783 +#, python-format +msgid "Registered image %s with id %s" +msgstr "" + +#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:794 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:807 +msgid "user or group not specified" +msgstr "" + +#: nova/api/ec2/cloud.py:809 +msgid "only group \"all\" is supported" +msgstr "" + +#: nova/api/ec2/cloud.py:811 +msgid "operation_type must be add or remove" +msgstr "" + +#: nova/api/ec2/cloud.py:812 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: nova/api/ec2/metadatarequesthandler.py:75 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:70 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:86 +msgid "Including admin operations in API." +msgstr "" + +#: nova/api/openstack/servers.py:184 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:199 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: nova/api/openstack/servers.py:213 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:224 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: nova/api/openstack/servers.py:235 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: nova/api/openstack/servers.py:246 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: nova/api/openstack/servers.py:257 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "" + +#: nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "" + +#: nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: nova/auth/ldapdriver.py:181 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:218 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:478 +#, python-format +msgid "User %s is already a member of the group %s" +msgstr "" + +#: nova/auth/ldapdriver.py:507 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: nova/auth/ldapdriver.py:528 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "" + +#: nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: nova/auth/manager.py:275 +#, python-format +msgid "failed authorization: no project named %s (user=%s)" +msgstr "" + +#: nova/auth/manager.py:277 +#, python-format +msgid "No project called %s could be found" +msgstr "" + +#: nova/auth/manager.py:281 +#, python-format +msgid "Failed authorization: user %s not admin and not member of project %s" +msgstr "" + +#: nova/auth/manager.py:283 +#, python-format +msgid "User %s is not a member of project %s" +msgstr "" + +#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: nova/auth/manager.py:293 nova/auth/manager.py:304 +msgid "Signature does not match" +msgstr "" + +#: nova/auth/manager.py:374 +msgid "Must specify project" +msgstr "" + +#: nova/auth/manager.py:408 +#, python-format +msgid "The %s role can not be found" +msgstr "" + +#: nova/auth/manager.py:410 +#, python-format +msgid "The %s role is global only" +msgstr "" + +#: nova/auth/manager.py:412 +#, python-format +msgid "Adding role %s to user %s in project %s" +msgstr "" + +#: nova/auth/manager.py:438 +#, python-format +msgid "Removing role %s from user %s on project %s" +msgstr "" + +#: nova/auth/manager.py:505 +#, python-format +msgid "Created project %s with manager %s" +msgstr "" + +#: nova/auth/manager.py:523 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: nova/auth/manager.py:553 +#, python-format +msgid "Remove user %s from project %s" +msgstr "" + +#: nova/auth/manager.py:581 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: nova/auth/manager.py:637 +#, python-format +msgid "Created user %s (admin: %r)" +msgstr "" + +#: nova/auth/manager.py:645 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: nova/auth/manager.py:655 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:657 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:659 +#, python-format +msgid "Admin status set to %r for user %s" +msgstr "" + +#: nova/auth/manager.py:708 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: nova/compute/api.py:67 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: nova/compute/api.py:73 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: nova/compute/api.py:92 +#, python-format +msgid "Quota exceeeded for %s, tried to run %s instances" +msgstr "" + +#: nova/compute/api.py:94 +#, python-format +msgid "Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: nova/compute/api.py:109 +msgid "Creating a raw instance" +msgstr "" + +#: nova/compute/api.py:156 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: nova/compute/api.py:180 +#, python-format +msgid "Casting to scheduler for %s/%s's instance %s" +msgstr "" + +#: nova/compute/api.py:279 +#, python-format +msgid "Going to try and terminate %s" +msgstr "" + +#: nova/compute/api.py:283 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: nova/compute/api.py:288 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: nova/compute/api.py:450 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: nova/compute/api.py:465 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: nova/compute/disk.py:71 +#, python-format +msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:75 +#, python-format +msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: nova/compute/disk.py:136 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: nova/compute/disk.py:158 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: nova/compute/manager.py:69 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: nova/compute/manager.py:71 +#, python-format +msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgstr "" + +#: nova/compute/manager.py:75 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: nova/compute/manager.py:77 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: nova/compute/manager.py:82 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: nova/compute/manager.py:157 +msgid "Instance has already been created" +msgstr "" + +#: nova/compute/manager.py:158 +#, python-format +msgid "instance %s: starting..." +msgstr "" + +#: nova/compute/manager.py:197 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "" + +#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#, python-format +msgid "Terminating instance %s" +msgstr "" + +#: nova/compute/manager.py:217 +#, python-format +msgid "Disassociating address %s" +msgstr "" + +#: nova/compute/manager.py:230 +#, python-format +msgid "Deallocating address %s" +msgstr "" + +#: nova/compute/manager.py:243 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" + +#: nova/compute/manager.py:257 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: nova/compute/manager.py:260 +#, python-format +msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:286 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: nova/compute/manager.py:289 +#, python-format +msgid "trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:301 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: nova/compute/manager.py:316 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: nova/compute/manager.py:335 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: nova/compute/manager.py:352 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: nova/compute/manager.py:369 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: nova/compute/manager.py:382 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: nova/compute/manager.py:401 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: nova/compute/manager.py:420 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: nova/compute/manager.py:432 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: nova/compute/manager.py:442 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: nova/compute/manager.py:462 +#, python-format +msgid "instance %s: attaching volume %s to %s" +msgstr "" + +#: nova/compute/manager.py:478 +#, python-format +msgid "instance %s: attach failed %s, removing" +msgstr "" + +#: nova/compute/manager.py:493 +#, python-format +msgid "Detach volume %s from mountpoint %s on instance %s" +msgstr "" + +#: nova/compute/manager.py:497 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "" + +#: nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: nova/compute/monitor.py:355 +#, python-format +msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:377 +#, python-format +msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:412 +msgid "unexpected exception getting connection" +msgstr "" + +#: nova/compute/monitor.py:427 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:43 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: nova/db/sqlalchemy/api.py:132 +#, python-format +msgid "No service for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:229 +#, python-format +msgid "No service for %s, %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:574 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:668 +#, python-format +msgid "No instance for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 +#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:891 +#, python-format +msgid "no keypair for user %s, name %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1036 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1050 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1180 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1205 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1356 +#, python-format +msgid "No volume for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1401 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1413 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1426 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1471 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1488 +#, python-format +msgid "No security group named %s for project: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1576 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1650 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1666 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1728 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: nova/image/glance.py:78 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images" +msgstr "" + +#: nova/image/glance.py:97 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images/detail" +msgstr "" + +#: nova/image/s3.py:82 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: nova/network/linux_net.py:176 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: nova/network/linux_net.py:186 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: nova/network/linux_net.py:254 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: nova/network/linux_net.py:256 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: nova/network/linux_net.py:334 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "" + +#: nova/network/manager.py:135 +msgid "setting network host" +msgstr "" + +#: nova/network/manager.py:190 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: nova/network/manager.py:194 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: nova/network/manager.py:197 +#, python-format +msgid "IP %s leased to bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:205 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: nova/network/manager.py:214 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: nova/network/manager.py:217 +#, python-format +msgid "IP %s released from bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:220 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: nova/network/manager.py:442 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:271 +#, python-format +msgid "Getting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:274 +#, python-format +msgid "Unauthorized attempt to get object %s from bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:292 +#, python-format +msgid "Putting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:295 +#, python-format +msgid "Unauthorized attempt to upload object %s to bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:314 +#, python-format +msgid "Deleting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:393 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: nova/objectstore/handler.py:401 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:406 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: nova/objectstore/handler.py:420 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: nova/objectstore/handler.py:428 +#, python-format +msgid "Toggling publicity flag of image %s %r" +msgstr "" + +#: nova/objectstore/handler.py:433 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: nova/objectstore/handler.py:447 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: nova/objectstore/handler.py:452 +#, python-format +msgid "Deleted image: %s" +msgstr "" + +#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 +#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 +msgid "No hosts found" +msgstr "" + +#: nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %s %s for %s" +msgstr "" + +#: nova/scheduler/simple.py:63 +msgid "All hosts have too many cores" +msgstr "" + +#: nova/scheduler/simple.py:95 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: nova/scheduler/simple.py:115 +msgid "All hosts have too many networks" +msgstr "" + +#: nova/tests/test_cloud.py:198 +msgid "Can't test instances without a real virtual env." +msgstr "" + +#: nova/tests/test_cloud.py:210 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: nova/tests/test_compute.py:104 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: nova/tests/test_compute.py:110 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %s, %s" +msgstr "" + +#: nova/tests/test_rpc.py:94 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#, python-format +msgid "Received %s" +msgstr "" + +#: nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: nova/virt/fake.py:210 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "" + +#: nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "" + +#: nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#, python-format +msgid "Created VM %s..." +msgstr "" + +#: nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %s by attaching disk file %s" +msgstr "" + +#: nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "" + +#: nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: nova/virt/hyperv.py:275 +#, python-format +msgid "Created switch port %s on switch %s" +msgstr "" + +#: nova/virt/hyperv.py:285 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:287 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:320 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:322 +#, python-format +msgid "WMI job succeeded: %s, Elapsed=%s " +msgstr "" + +#: nova/virt/hyperv.py:358 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:383 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:389 +#, python-format +msgid "Del: disk %s vm %s" +msgstr "" + +#: nova/virt/hyperv.py:405 +#, python-format +msgid "" +"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +"cpu_time=%s" +msgstr "" + +#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#, python-format +msgid "duplicate name found: %s" +msgstr "" + +#: nova/virt/hyperv.py:444 +#, python-format +msgid "Successfully changed vm state of %s to %s" +msgstr "" + +#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#, python-format +msgid "Failed to change vm state of %s to %s" +msgstr "" + +#: nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %s -- placed in %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:144 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:157 +msgid "Connection to libvirt broke" +msgstr "" + +#: nova/virt/libvirt_conn.py:229 +#, python-format +msgid "instance %s: deleting instance files %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:271 +#, python-format +msgid "No disk at %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:278 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: nova/virt/libvirt_conn.py:294 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: nova/virt/libvirt_conn.py:297 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:340 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: nova/virt/libvirt_conn.py:343 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:370 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: nova/virt/libvirt_conn.py:381 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: nova/virt/libvirt_conn.py:395 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:399 +msgid "cool, it's a device" +msgstr "" + +#: nova/virt/libvirt_conn.py:407 +#, python-format +msgid "data: %r, fpath: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:415 +#, python-format +msgid "Contents of file %s: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:449 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: nova/virt/libvirt_conn.py:505 +#, python-format +msgid "instance %s: injecting key into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:508 +#, python-format +msgid "instance %s: injecting net into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:516 +#, python-format +msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgstr "" + +#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: nova/virt/libvirt_conn.py:589 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: nova/virt/xenapi_conn.py:113 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username " +"(optionally), and xenapi_connection_password to use " +"connection_type=xenapi" +msgstr "" + +#: nova/virt/xenapi_conn.py:263 +#, python-format +msgid "Task [%s] %s status: success %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:271 +#, python-format +msgid "Task [%s] %s status: %s %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:72 +#, python-format +msgid "%s: _db_content => %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 +#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 +msgid "Raising NotImplemented" +msgstr "" + +#: nova/virt/xenapi/fake.py:249 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:283 +#, python-format +msgid "Calling %s %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:288 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:340 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:127 +#, python-format +msgid "Created VM %s as %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:147 +#, python-format +msgid "Creating VBD for VM %s, VDI %s ... " +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:149 +#, python-format +msgid "Created VBD %s for VM %s, VDI %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:165 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:175 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:202 +#, python-format +msgid "Creating VIF for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:205 +#, python-format +msgid "Created VIF %s for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:216 +#, python-format +msgid "Snapshotting VM %s with label '%s'..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:229 +#, python-format +msgid "Created snapshot %s from VM %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:243 +#, python-format +msgid "Asking xapi to upload %s as '%s'" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:261 +#, python-format +msgid "Asking xapi to fetch %s as %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:279 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:290 +#, python-format +msgid "PV Kernel in VDI:%d" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:318 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:331 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:333 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:390 +#, python-format +msgid "VHD %s has parent %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:407 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:448 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:452 +#, python-format +msgid "Unexpected number of VDIs (%s) found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:62 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:99 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: nova/virt/xenapi/vmops.py:101 +#, python-format +msgid "Spawning VM %s created %s." +msgstr "" + +#: nova/virt/xenapi/vmops.py:112 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: nova/virt/xenapi/vmops.py:137 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:166 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:174 +#, python-format +msgid "Unable to Snapshot %s: %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:184 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:252 +#, python-format +msgid "suspend: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:262 +#, python-format +msgid "resume: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:271 +#, python-format +msgid "Instance not found %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %s as %s." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %s when getting PBDs for %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %s when unplugging PBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %s when forgetting SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %s, %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %s, %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Unable to use SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:95 +#, python-format +msgid "Mountpoint %s attached to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:106 +#, python-format +msgid "Detach_volume: %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:113 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:128 +#, python-format +msgid "Mountpoint %s detached from instance %s" +msgstr "" + +#: nova/volume/api.py:44 +#, python-format +msgid "Quota exceeeded for %s, tried to create %sG volume" +msgstr "" + +#: nova/volume/api.py:46 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgstr "" + +#: nova/volume/api.py:70 nova/volume/api.py:95 +msgid "Volume status must be available" +msgstr "" + +#: nova/volume/api.py:97 +msgid "Volume is already attached" +msgstr "" + +#: nova/volume/api.py:103 +msgid "Volume is already detached" +msgstr "" + +#: nova/volume/driver.py:76 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: nova/volume/driver.py:85 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: nova/volume/driver.py:210 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: nova/volume/driver.py:315 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: nova/volume/manager.py:93 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: nova/volume/manager.py:102 +#, python-format +msgid "volume %s: creating lv of size %sG" +msgstr "" + +#: nova/volume/manager.py:106 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: nova/volume/manager.py:113 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: nova/volume/manager.py:121 +msgid "Volume is still attached" +msgstr "" + +#: nova/volume/manager.py:123 +msgid "Volume is not local to this node" +msgstr "" + +#: nova/volume/manager.py:124 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: nova/volume/manager.py:126 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: nova/volume/manager.py:129 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + diff --git a/setup.cfg b/setup.cfg index 14dcb5c8edde..9c0a331e35e2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -8,3 +8,17 @@ tag_build = tag_date = 0 tag_svn_revision = 0 +[compile_catalog] +directory = locale +domain = nova + +[update_catalog] +domain = nova +output_dir = locale +input_file = locale/nova.pot + +[extract_messages] +keywords = _ l_ lazy_gettext +mapping_file = babel.cfg +output_file = locale/nova.pot + diff --git a/setup.py b/setup.py index 5b58274c6300..3608ff805a32 100644 --- a/setup.py +++ b/setup.py @@ -57,14 +57,25 @@ class local_sdist(sdist): changelog_file.write(str_dict_replace(changelog, mailmap)) sdist.run(self) +nova_cmdclass= { 'sdist': local_sdist, + 'build_sphinx' : local_BuildDoc } + +try: + from babel.messages import frontend as babel + nova_cmdclass['compile_catalog'] = babel.compile_catalog + nova_cmdclass['extract_messages'] = babel.extract_messages + nova_cmdclass['init_catalog'] = babel.init_catalog + nova_cmdclass['update_catalog'] = babel.update_catalog +except: + pass + setup(name='nova', version=version.canonical_version_string(), description='cloud computing fabric controller', author='OpenStack', author_email='nova@lists.launchpad.net', url='http://www.openstack.org/', - cmdclass={ 'sdist': local_sdist, - 'build_sphinx' : local_BuildDoc }, + cmdclass=nova_cmdclass, packages=find_packages(exclude=['bin', 'smoketests']), include_package_data=True, test_suite='nose.collector',