From 5e82d6cf6edfb478f18b03547ce285d55566b042 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Tue, 20 Jul 2010 00:32:42 -0700 Subject: [PATCH 01/55] Able to boot without kernel or ramdisk. libvirt.xml.template is now a Cheetah template --- nova/endpoint/cloud.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 3b7b4804..b7a4fe20 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -544,8 +544,8 @@ class CloudController(object): for num in range(int(kwargs['max_count'])): inst = self.instdir.new() inst['image_id'] = image_id - inst['kernel_id'] = kernel_id - inst['ramdisk_id'] = ramdisk_id + inst['kernel_id'] = kernel_id or '' + inst['ramdisk_id'] = ramdisk_id or '' inst['user_data'] = kwargs.get('user_data', '') inst['instance_type'] = kwargs.get('instance_type', 'm1.small') inst['reservation_id'] = reservation_id From 6b54713dcdc84295c0835cf5811058daa1fed473 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Sat, 31 Jul 2010 20:49:21 -0700 Subject: [PATCH 02/55] Recognize 'magic' kernel value that means "don't use a kernel" - currently aki-00000000 --- nova/endpoint/cloud.py | 10 ++++++++-- nova/flags.py | 3 +++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index b4157e48..344352a9 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -523,9 +523,15 @@ class CloudController(object): kernel_id = kwargs.get('kernel_id', kernel_id) ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id) + if kernel_id == str(FLAGS.null_kernel): + kernel_id = None + ramdisk_id = None + # make sure we have access to kernel and ramdisk - self._get_image(context, kernel_id) - self._get_image(context, ramdisk_id) + if kernel_id: + self._get_image(context, kernel_id) + if ramdisk_id: + self._get_image(context, ramdisk_id) logging.debug("Going to run instances...") reservation_id = utils.generate_uid('r') diff --git a/nova/flags.py b/nova/flags.py index f35f5fa1..caae33e1 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -57,6 +57,9 @@ DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud', 'Url to ec2 api server') +DEFINE_string('null_kernel', + 'aki-00000000', + 'Kernel image that indicates not to use a kernel (use a raw disk image instead)') DEFINE_string('default_image', 'ami-11111', 'default image to use, testing only') From f642a771892e345cc708f47db335810150480e4c Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Thu, 14 Oct 2010 13:38:35 -0700 Subject: [PATCH 03/55] Minimized diff, fixed formatting --- nova/flags.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/flags.py b/nova/flags.py index 2b96a15f..d2c22e46 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -202,7 +202,8 @@ DEFINE_string('default_ramdisk', 'ari-11111', DEFINE_string('default_instance_type', 'm1.small', 'default instance type to use, testing only') DEFINE_string('null_kernel', 'aki-00000000', - 'kernel image that indicates not to use a kernel, to use a raw disk image instead') + 'kernel image that indicates not to use a kernel, ' + ' but to use a raw disk image instead') DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE', 'AMI for cloudpipe vpn server') DEFINE_string('vpn_key_suffix', From 613a4ac96f4c5b8ede78d1a5a5910e69139e9bee Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sat, 6 Nov 2010 00:02:36 +0000 Subject: [PATCH 04/55] Per-project vpns, certificates, and revocation --- CA/geninter.sh | 26 ++++++---- CA/genrootca.sh | 1 + CA/openssl.cnf.tmpl | 3 +- CA/{INTER => projects}/.gitignore | 0 CA/{INTER => projects}/.placeholder | 0 bin/nova-manage | 75 ++++++++++++++++++++--------- nova/auth/manager.py | 73 +++++++++++++++------------- nova/tests/auth_unittest.py | 17 +++---- 8 files changed, 118 insertions(+), 77 deletions(-) rename CA/{INTER => projects}/.gitignore (100%) rename CA/{INTER => projects}/.placeholder (100%) diff --git a/CA/geninter.sh b/CA/geninter.sh index 7d6c280d..1fbcc9e7 100755 --- a/CA/geninter.sh +++ b/CA/geninter.sh @@ -16,16 +16,24 @@ # License for the specific language governing permissions and limitations # under the License. -# ARG is the id of the user -export SUBJ="/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=customer-intCA-$1" -mkdir INTER/$1 -cd INTER/$1 +# $1 is the id of the project and $2 is the subject of the cert +NAME=$1 +SUBJ=$2 +mkdir -p projects/$NAME +cd projects/$NAME cp ../../openssl.cnf.tmpl openssl.cnf -sed -i -e s/%USERNAME%/$1/g openssl.cnf +sed -i -e s/%USERNAME%/$NAME/g openssl.cnf mkdir certs crl newcerts private +openssl req -new -x509 -extensions v3_ca -keyout private/cakey.pem -out cacert.pem -days 365 -config ./openssl.cnf -batch -nodes echo "10" > serial touch index.txt -openssl genrsa -out private/cakey.pem 1024 -config ./openssl.cnf -batch -nodes -openssl req -new -sha2 -key private/cakey.pem -out ../../reqs/inter$1.csr -batch -subj "$SUBJ" -cd ../../ -openssl ca -extensions v3_ca -days 365 -out INTER/$1/cacert.pem -in reqs/inter$1.csr -config openssl.cnf -batch +# NOTE(vish): Disabling intermediate ca's because we don't actually need them. +# It makes more sense to have each project have its own root ca. +# openssl genrsa -out private/cakey.pem 1024 -config ./openssl.cnf -batch -nodes +# openssl req -new -sha256 -key private/cakey.pem -out ../../reqs/inter$NAME.csr -batch -subj "$SUBJ" +openssl ca -gencrl -config ./openssl.cnf -out crl.pem +if [ "`id -u`" != "`grep nova /etc/passwd | cut -d':' -f3`" ]; then + sudo chown -R nova:nogroup . +fi +# cd ../../ +# openssl ca -extensions v3_ca -days 365 -out INTER/$NAME/cacert.pem -in reqs/inter$NAME.csr -config openssl.cnf -batch diff --git a/CA/genrootca.sh b/CA/genrootca.sh index 31976092..8f2c3ee3 100755 --- a/CA/genrootca.sh +++ b/CA/genrootca.sh @@ -25,4 +25,5 @@ else openssl req -new -x509 -extensions v3_ca -keyout private/cakey.pem -out cacert.pem -days 365 -config ./openssl.cnf -batch -nodes touch index.txt echo "10" > serial + openssl ca -gencrl -config ./openssl.cnf -out crl.pem fi diff --git a/CA/openssl.cnf.tmpl b/CA/openssl.cnf.tmpl index 639b8e80..dd81f1c2 100644 --- a/CA/openssl.cnf.tmpl +++ b/CA/openssl.cnf.tmpl @@ -24,7 +24,6 @@ dir = . [ ca ] default_ca = CA_default -unique_subject = no [ CA_default ] serial = $dir/serial @@ -32,6 +31,8 @@ database = $dir/index.txt new_certs_dir = $dir/newcerts certificate = $dir/cacert.pem private_key = $dir/private/cakey.pem +unique_subject = no +default_crl_days = 365 default_days = 365 default_md = md5 preserve = no diff --git a/CA/INTER/.gitignore b/CA/projects/.gitignore similarity index 100% rename from CA/INTER/.gitignore rename to CA/projects/.gitignore diff --git a/CA/INTER/.placeholder b/CA/projects/.placeholder similarity index 100% rename from CA/INTER/.placeholder rename to CA/projects/.placeholder diff --git a/bin/nova-manage b/bin/nova-manage index 08b3da12..b788ee62 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -69,6 +69,7 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) from nova import context +from nova import crypto from nova import db from nova import exception from nova import flags @@ -93,32 +94,36 @@ class VpnCommands(object): self.manager = manager.AuthManager() self.pipe = pipelib.CloudPipe() - def list(self): - """Print a listing of the VPNs for all projects.""" + def list(self, project=None): + """Print a listing of the VPN data for one or all projects. + + args: [project=all]""" print "%-12s\t" % 'project', print "%-20s\t" % 'ip:port', + print "%-20s\t" % 'private_ip', print "%s" % 'state' - for project in self.manager.get_projects(): + if project: + projects = [self.manager.get_project(project)] + else: + projects = self.manager.get_projects() + for project in projects: print "%-12s\t" % project.name, - - try: - s = "%s:%s" % (project.vpn_ip, project.vpn_port) - except exception.NotFound: - s = "None" - print "%-20s\t" % s, - + ipport = "%s:%s" % (project.vpn_ip, project.vpn_port) + print "%-20s\t" % ipport, vpn = self._vpn_for(project.id) if vpn: - command = "ping -c1 -w1 %s > /dev/null; echo $?" - out, _err = utils.execute(command % vpn['private_dns_name'], - check_exit_code=False) - if out.strip() == '0': - net = 'up' - else: - net = 'down' - print vpn['private_dns_name'], - print vpn['node_name'], - print vpn['instance_id'], + net = 'down' + address = None + if vpn.get('fixed_ip', None): + address = vpn['fixed_ip']['address'] + command = "ping -c1 -w1 %s > /dev/null; echo $?" + out, _err = utils.execute(command % address, + check_exit_code=False) + if out.strip() == '0': + net = 'up' + print address, + print vpn['host'], + print vpn['ec2_id'], print vpn['state_description'], print net @@ -127,11 +132,11 @@ class VpnCommands(object): def _vpn_for(self, project_id): """Get the VPN instance for a project ID.""" - for instance in db.instance_get_all(context.get_admin_context()): + ctxt = context.get_admin_context() + for instance in db.instance_get_all_by_project(ctxt, project_id): if (instance['image_id'] == FLAGS.vpn_image_id and not instance['state_description'] in - ['shutting_down', 'shutdown'] - and instance['project_id'] == project_id): + ['shutting_down', 'shutdown']): return instance def spawn(self): @@ -146,6 +151,22 @@ class VpnCommands(object): """Start the VPN for a given project.""" self.pipe.launch_vpn_instance(project_id) + def change(self, project_id, ip, port): + """Change the ip and port for a vpn. + + args: project, ip, port""" + project = self.manager.get_project(project_id) + if not project: + print 'No project %s' % (project_id) + return + admin = context.get_admin_context() + network_ref = db.project_get_network(admin, project_id) + db.network_update(admin, + network_ref['id'], + {'vpn_public_address': ip, + 'vpn_public_port': int(port)}) + + class ShellCommands(object): def bpython(self): @@ -292,6 +313,14 @@ class UserCommands(object): is_admin = False self.manager.modify_user(name, access_key, secret_key, is_admin) + def revoke(self, user_id, project_id=None): + """revoke certs for a user + arguments: user_id [project_id]""" + if project_id: + crypto.revoke_certs_by_user_and_project(user_id, project_id) + else: + crypto.revoke_certs_by_user(user_id) + class ProjectCommands(object): """Class for managing projects.""" diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 001a9687..c6d4b6e5 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -64,12 +64,8 @@ flags.DEFINE_string('credential_key_file', 'pk.pem', 'Filename of private key in credentials zip') flags.DEFINE_string('credential_cert_file', 'cert.pem', 'Filename of certificate in credentials zip') -flags.DEFINE_string('credential_rc_file', 'novarc', +flags.DEFINE_string('credential_rc_file', '%src', 'Filename of rc in credentials zip') -flags.DEFINE_string('credential_cert_subject', - '/C=US/ST=California/L=MountainView/O=AnsoLabs/' - 'OU=NovaDev/CN=%s-%s', - 'Subject for certificate for users') flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver', 'Driver that auth manager uses') @@ -625,27 +621,37 @@ class AuthManager(object): with self.driver() as drv: drv.modify_user(uid, access_key, secret_key, admin) - def get_credentials(self, user, project=None): + def get_credentials(self, user, project=None, use_dmz=True): """Get credential zip for user in project""" if not isinstance(user, User): user = self.get_user(user) if project is None: project = user.id pid = Project.safe_id(project) - rc = self.__generate_rc(user.access, user.secret, pid) - private_key, signed_cert = self._generate_x509_cert(user.id, pid) + private_key, signed_cert = crypto.generate_x509_cert(user.id, pid) tmpdir = tempfile.mkdtemp() zf = os.path.join(tmpdir, "temp.zip") zippy = zipfile.ZipFile(zf, 'w') - zippy.writestr(FLAGS.credential_rc_file, rc) + if use_dmz and FLAGS.region_list: + regions = {} + for item in FLAGS.region_list: + region, _sep, region_host = item.partition("=") + regions[region] = region_host + else: + regions = {'nova': FLAGS.cc_host} + for region, host in regions.iteritems(): + rc = self.__generate_rc(user.access, + user.secret, + pid, + use_dmz, + host) + zippy.writestr(FLAGS.credential_rc_file % region, rc) + zippy.writestr(FLAGS.credential_key_file, private_key) zippy.writestr(FLAGS.credential_cert_file, signed_cert) - try: - (vpn_ip, vpn_port) = self.get_project_vpn_data(project) - except exception.NotFound: - vpn_ip = None + (vpn_ip, vpn_port) = self.get_project_vpn_data(project) if vpn_ip: configfile = open(FLAGS.vpn_client_template, "r") s = string.Template(configfile.read()) @@ -656,10 +662,9 @@ class AuthManager(object): port=vpn_port) zippy.writestr(FLAGS.credential_vpn_file, config) else: - logging.warn("No vpn data for project %s" % - pid) + LOG.warn("No vpn data for project %s", pid) - zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(user.id)) + zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(pid)) zippy.close() with open(zf, 'rb') as f: read_buffer = f.read() @@ -667,38 +672,38 @@ class AuthManager(object): shutil.rmtree(tmpdir) return read_buffer - def get_environment_rc(self, user, project=None): + def get_environment_rc(self, user, project=None, use_dmz=True): """Get credential zip for user in project""" if not isinstance(user, User): user = self.get_user(user) if project is None: project = user.id pid = Project.safe_id(project) - return self.__generate_rc(user.access, user.secret, pid) + return self.__generate_rc(user.access, user.secret, pid, use_dmz) @staticmethod - def __generate_rc(access, secret, pid): + def __generate_rc(access, secret, pid, use_dmz=True, host=None): """Generate rc file for user""" + if use_dmz: + cc_host = FLAGS.cc_dmz + else: + cc_host = FLAGS.cc_host + # NOTE(vish): Always use the dmz since it is used from inside the + # instance + s3_host = FLAGS.s3_dmz + if host: + s3_host = host + cc_host = host rc = open(FLAGS.credentials_template).read() rc = rc % {'access': access, 'project': pid, 'secret': secret, - 'ec2': FLAGS.ec2_url, - 's3': 'http://%s:%s' % (FLAGS.s3_host, FLAGS.s3_port), + 'ec2': '%s://%s:%s%s' % (FLAGS.ec2_prefix, + cc_host, + FLAGS.cc_port, + FLAGS.ec2_suffix), + 's3': 'http://%s:%s' % (s3_host, FLAGS.s3_port), 'nova': FLAGS.ca_file, 'cert': FLAGS.credential_cert_file, 'key': FLAGS.credential_key_file} return rc - - def _generate_x509_cert(self, uid, pid): - """Generate x509 cert for user""" - (private_key, csr) = crypto.generate_x509_cert( - self.__cert_subject(uid)) - # TODO(joshua): This should be async call back to the cloud controller - signed_cert = crypto.sign_csr(csr, pid) - return (private_key, signed_cert) - - @staticmethod - def __cert_subject(uid): - """Helper to generate cert subject""" - return FLAGS.credential_cert_subject % (uid, utils.isotime()) diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py index fe891bee..0d2082bd 100644 --- a/nova/tests/auth_unittest.py +++ b/nova/tests/auth_unittest.py @@ -208,17 +208,13 @@ class AuthManagerTestCase(object): # so it probably belongs in crypto_unittest # but I'm leaving it where I found it. with user_and_project_generator(self.manager) as (user, project): - # NOTE(todd): Should mention why we must setup controller first - # (somebody please clue me in) - cloud_controller = cloud.CloudController() - cloud_controller.setup() - _key, cert_str = self.manager._generate_x509_cert('test1', - 'testproj') + # NOTE(vish): Setup runs genroot.sh if it hasn't been run + cloud.CloudController().setup() + _key, cert_str = crypto.generate_x509_cert(user.id, project.id) logging.debug(cert_str) - # Need to verify that it's signed by the right intermediate CA - full_chain = crypto.fetch_ca(project_id='testproj', chain=True) - int_cert = crypto.fetch_ca(project_id='testproj', chain=False) + full_chain = crypto.fetch_ca(project_id=project.id, chain=True) + int_cert = crypto.fetch_ca(project_id=project.id, chain=False) cloud_cert = crypto.fetch_ca() logging.debug("CA chain:\n\n =====\n%s\n\n=====" % full_chain) signed_cert = X509.load_cert_string(cert_str) @@ -227,7 +223,8 @@ class AuthManagerTestCase(object): cloud_cert = X509.load_cert_string(cloud_cert) self.assertTrue(signed_cert.verify(chain_cert.get_pubkey())) self.assertTrue(signed_cert.verify(int_cert.get_pubkey())) - if not FLAGS.use_intermediate_ca: + + if not FLAGS.use_project_ca: self.assertTrue(signed_cert.verify(cloud_cert.get_pubkey())) else: self.assertFalse(signed_cert.verify(cloud_cert.get_pubkey())) From 645fe4a8b825cb0cb4dc72836fed51a2939a0422 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sat, 6 Nov 2010 00:58:05 +0000 Subject: [PATCH 05/55] add dmz to flags and change a couple defaults --- nova/flags.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index 4ae86d9b..acdfc6f6 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -180,7 +180,8 @@ DEFINE_list('region_list', 'list of region=url pairs separated by commas') DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake') DEFINE_integer('s3_port', 3333, 's3 port') -DEFINE_string('s3_host', '127.0.0.1', 's3 host') +DEFINE_string('s3_host', '127.0.0.1', 's3 host (for infrastructure)') +DEFINE_string('s3_dmz', '127.0.0.1', 's3 dmz ip (for instances)') DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on') DEFINE_string('scheduler_topic', 'scheduler', 'the topic scheduler nodes listen on') @@ -197,7 +198,8 @@ DEFINE_string('rabbit_userid', 'guest', 'rabbit userid') DEFINE_string('rabbit_password', 'guest', 'rabbit password') DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') -DEFINE_string('cc_host', '127.0.0.1', 'ip of api server') +DEFINE_string('cc_host', '127.0.0.1', 'ip of api server (for infrastructure') +DEFINE_string('cc_dmz', '127.0.0.1', 'ip of api server (for instances)') DEFINE_integer('cc_port', 8773, 'cloud controller port') DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud', 'Url to ec2 api server') @@ -211,10 +213,10 @@ DEFINE_string('default_ramdisk', 'ari-11111', DEFINE_string('default_instance_type', 'm1.small', 'default instance type to use, testing only') -DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE', 'AMI for cloudpipe vpn server') +DEFINE_string('vpn_image_id', 'ami-cloudpipe', 'AMI for cloudpipe vpn server') DEFINE_string('vpn_key_suffix', - '-key', - 'Suffix to add to project name for vpn key') + '-vpn', + 'Suffix to add to project name for vpn key and secgroups') DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger') From 0bcf74f055ecf41c9dc4129bb435169a51ecc170 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 17 Nov 2010 02:33:09 +0000 Subject: [PATCH 06/55] remove extra line and ref. to LOG that doesn't exist --- bin/nova-manage | 1 - nova/auth/manager.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index b788ee62..4ab2e983 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -167,7 +167,6 @@ class VpnCommands(object): 'vpn_public_port': int(port)}) - class ShellCommands(object): def bpython(self): """Runs a bpython shell. diff --git a/nova/auth/manager.py b/nova/auth/manager.py index c6d4b6e5..252c5e65 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -662,7 +662,7 @@ class AuthManager(object): port=vpn_port) zippy.writestr(FLAGS.credential_vpn_file, config) else: - LOG.warn("No vpn data for project %s", pid) + logging.warn("No vpn data for project %s", pid) zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(pid)) zippy.close() From 3f0365eb87fbe777620df2f79fd928275c0b62b6 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 23 Nov 2010 21:48:32 +0000 Subject: [PATCH 07/55] add vpn ping and optimize vpn list --- bin/nova-manage | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 4ab2e983..3d012263 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -106,39 +106,30 @@ class VpnCommands(object): projects = [self.manager.get_project(project)] else: projects = self.manager.get_projects() + # NOTE(vish): This hits the database a lot. We could optimize + # by getting all networks in one query and all vpns + # in aother query, then doing lookups by project for project in projects: print "%-12s\t" % project.name, ipport = "%s:%s" % (project.vpn_ip, project.vpn_port) print "%-20s\t" % ipport, - vpn = self._vpn_for(project.id) + ctxt = context.get_admin_context() + vpn = db.instance_get_project_vpn(ctxt, project.id) if vpn: - net = 'down' address = None + state = 'down' if vpn.get('fixed_ip', None): address = vpn['fixed_ip']['address'] - command = "ping -c1 -w1 %s > /dev/null; echo $?" - out, _err = utils.execute(command % address, - check_exit_code=False) - if out.strip() == '0': - net = 'up' + if utils.vpn_ping(project.vpn_ip, project.vpn_port): + state = 'up' print address, print vpn['host'], print vpn['ec2_id'], print vpn['state_description'], - print net - + print state else: print None - def _vpn_for(self, project_id): - """Get the VPN instance for a project ID.""" - ctxt = context.get_admin_context() - for instance in db.instance_get_all_by_project(ctxt, project_id): - if (instance['image_id'] == FLAGS.vpn_image_id - and not instance['state_description'] in - ['shutting_down', 'shutdown']): - return instance - def spawn(self): """Run all VPNs.""" for p in reversed(self.manager.get_projects()): From 58c765cfc60276bff7520bb373f43ead42e01832 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 24 Nov 2010 21:40:41 +0000 Subject: [PATCH 08/55] don't error on edge case where vpn has been launched but fails to get a network --- bin/nova-manage | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bin/nova-manage b/bin/nova-manage index 3d012263..7c07ce3f 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -120,7 +120,8 @@ class VpnCommands(object): state = 'down' if vpn.get('fixed_ip', None): address = vpn['fixed_ip']['address'] - if utils.vpn_ping(project.vpn_ip, project.vpn_port): + if project.vpn_ip and utils.vpn_ping(project.vpn_ip, + project.vpn_port): state = 'up' print address, print vpn['host'], From e555d68745023a5ec054deec6d9748c03d1c6d42 Mon Sep 17 00:00:00 2001 From: Rick Clark Date: Tue, 30 Nov 2010 16:09:31 -0600 Subject: [PATCH 09/55] Fixed termie's tiny bits from the prior merge request --- nova/flags.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/flags.py b/nova/flags.py index cf481b55..a6103320 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -237,7 +237,7 @@ DEFINE_string('default_ramdisk', 'ari-11111', DEFINE_string('default_instance_type', 'm1.small', 'default instance type to use, testing only') DEFINE_string('null_kernel', 'aki-00000000', - 'kernel image that indicates not to use a kernel, ' + 'kernel image that indicates not to use a kernel,' ' but to use a raw disk image instead') DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE', 'AMI for cloudpipe vpn server') From 8d70f55ce0611e4234e39552f0ec6a90a83cc188 Mon Sep 17 00:00:00 2001 From: Rick Clark Date: Wed, 1 Dec 2010 16:23:34 -0600 Subject: [PATCH 10/55] Changed null_kernel flag from aki-00000000 to nokernel --- nova/flags.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/flags.py b/nova/flags.py index a6103320..be81fd7e 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -236,7 +236,7 @@ DEFINE_string('default_ramdisk', 'ari-11111', 'default ramdisk to use, testing only') DEFINE_string('default_instance_type', 'm1.small', 'default instance type to use, testing only') -DEFINE_string('null_kernel', 'aki-00000000', +DEFINE_string('null_kernel', 'nokernel', 'kernel image that indicates not to use a kernel,' ' but to use a raw disk image instead') From 47ec3fa70e6cda4eecc165c189a735db424c45b4 Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Fri, 3 Dec 2010 00:01:21 +0000 Subject: [PATCH 11/55] * Removes unused schema * Removes MUST uid from novaUser * Changes isAdmin to isNovaAdmin * Adds two new configuration options: ** ldap_user_id_attribute, with a default of uid ** ldap_user_name_attribute, with a default of cn * ldapdriver.py has been modified to use these changes Rationale: Removing uid from novaUser: Requiring uid makes the schema very posix specific. Other schemas don't use uid for identifiers at all. This change makes the schema more interoperable. Changing isAdmin to isNovaAdmin: This attribute is too generic. It doesn't describe what the user is an admin of, and in a pre-existing directory is out of place. This change is to make the attribute more specific to the software. Adding config options for id and name: This is another interoperability change. This change makes the driver more compatible with directories like AD, where sAMAccountName is used instead of uid. Also, some directory admins prefer to use displayName rather than CN for full names of users. --- nova/auth/ldapdriver.py | 21 ++++++++++++--------- nova/auth/nova_openldap.schema | 26 +++----------------------- nova/auth/nova_sun.schema | 6 ++---- nova/auth/openssh-lpk_openldap.schema | 19 ------------------- nova/auth/openssh-lpk_sun.schema | 10 ---------- 5 files changed, 17 insertions(+), 65 deletions(-) delete mode 100644 nova/auth/openssh-lpk_openldap.schema delete mode 100644 nova/auth/openssh-lpk_sun.schema diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index ceade1d6..e4c36c28 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -37,6 +37,8 @@ flags.DEFINE_string('ldap_url', 'ldap://localhost', flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password') flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com', 'DN of admin user') +flags.DEFINE_string('ldap_user_id_attribute', 'uid', 'Attribute to use as id') +flags.DEFINE_string('ldap_user_name_attribute', 'cn', 'Attribute to use as name') flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users') flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com', 'OU for Users') @@ -131,12 +133,12 @@ class LdapDriver(object): 'inetOrgPerson', 'novaUser']), ('ou', [FLAGS.ldap_user_unit]), - ('uid', [name]), + (FLAGS.ldap_user_id_attribute, [name]), ('sn', [name]), - ('cn', [name]), + (FLAGS.ldap_user_name_attribute, [name]), ('secretKey', [secret_key]), ('accessKey', [access_key]), - ('isAdmin', [str(is_admin).upper()]), + ('isNovaAdmin', [str(is_admin).upper()]), ] self.conn.add_s(self.__uid_to_dn(name), attr) return self.__to_user(dict(attr)) @@ -274,7 +276,7 @@ class LdapDriver(object): if secret_key: attr.append((self.ldap.MOD_REPLACE, 'secretKey', secret_key)) if admin is not None: - attr.append((self.ldap.MOD_REPLACE, 'isAdmin', str(admin).upper())) + attr.append((self.ldap.MOD_REPLACE, 'isNovaAdmin', str(admin).upper())) self.conn.modify_s(self.__uid_to_dn(uid), attr) def __user_exists(self, uid): @@ -450,11 +452,11 @@ class LdapDriver(object): if attr == None: return None return { - 'id': attr['uid'][0], - 'name': attr['cn'][0], + 'id': attr[FLAGS.ldap_user_id_attribute][0], + 'name': attr[FLAGS.ldap_user_name_attribute][0], 'access': attr['accessKey'][0], 'secret': attr['secretKey'][0], - 'admin': (attr['isAdmin'][0] == 'TRUE')} + 'admin': (attr['isNovaAdmin'][0] == 'TRUE')} def __to_project(self, attr): """Convert ldap attributes to Project object""" @@ -474,9 +476,10 @@ class LdapDriver(object): return dn.split(',')[0].split('=')[1] @staticmethod - def __uid_to_dn(dn): + def __uid_to_dn(uid): """Convert uid to dn""" - return 'uid=%s,%s' % (dn, FLAGS.ldap_user_subtree) + return FLAGS.ldap_user_id_attribute + '=%s,%s' \ + % (uid, FLAGS.ldap_user_subtree) class FakeLdapDriver(LdapDriver): diff --git a/nova/auth/nova_openldap.schema b/nova/auth/nova_openldap.schema index 4047361d..9e528f58 100644 --- a/nova/auth/nova_openldap.schema +++ b/nova/auth/nova_openldap.schema @@ -30,20 +30,10 @@ attributetype ( SINGLE-VALUE ) -attributetype ( - novaAttrs:3 - NAME 'keyFingerprint' - DESC 'Fingerprint of private key' - EQUALITY caseIgnoreMatch - SUBSTR caseIgnoreSubstringsMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 - SINGLE-VALUE - ) - attributetype ( novaAttrs:4 - NAME 'isAdmin' - DESC 'Is user an administrator?' + NAME 'isNovaAdmin' + DESC 'Is user an nova administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE @@ -61,17 +51,7 @@ objectClass ( NAME 'novaUser' DESC 'access and secret keys' AUXILIARY - MUST ( uid ) - MAY ( accessKey $ secretKey $ isAdmin ) - ) - -objectClass ( - novaOCs:2 - NAME 'novaKeyPair' - DESC 'Key pair for User' - SUP top - STRUCTURAL - MUST ( cn $ sshPublicKey $ keyFingerprint ) + MAY ( accessKey $ secretKey $ isNovaAdmin ) ) objectClass ( diff --git a/nova/auth/nova_sun.schema b/nova/auth/nova_sun.schema index e925e05e..decf10f0 100644 --- a/nova/auth/nova_sun.schema +++ b/nova/auth/nova_sun.schema @@ -8,9 +8,7 @@ dn: cn=schema attributeTypes: ( 1.3.6.1.3.1.666.666.3.1 NAME 'accessKey' DESC 'Key for accessing data' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributeTypes: ( 1.3.6.1.3.1.666.666.3.2 NAME 'secretKey' DESC 'Secret key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) -attributeTypes: ( 1.3.6.1.3.1.666.666.3.3 NAME 'keyFingerprint' DESC 'Fingerprint of private key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE) -attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isAdmin' DESC 'Is user an administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE ) +attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isNovaAdmin' DESC 'Is user a nova administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE ) attributeTypes: ( 1.3.6.1.3.1.666.666.3.5 NAME 'projectManager' DESC 'Project Managers of a project' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 ) -objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MUST ( uid ) MAY ( accessKey $ secretKey $ isAdmin ) ) -objectClasses: ( 1.3.6.1.3.1.666.666.4.2 NAME 'novaKeyPair' DESC 'Key pair for User' SUP top STRUCTURAL MUST ( cn $ sshPublicKey $ keyFingerprint ) ) +objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MAY ( accessKey $ secretKey $ isNovaAdmin ) ) objectClasses: ( 1.3.6.1.3.1.666.666.4.3 NAME 'novaProject' DESC 'Container for project' SUP groupOfNames STRUCTURAL MUST ( cn $ projectManager ) ) diff --git a/nova/auth/openssh-lpk_openldap.schema b/nova/auth/openssh-lpk_openldap.schema deleted file mode 100644 index 93351da6..00000000 --- a/nova/auth/openssh-lpk_openldap.schema +++ /dev/null @@ -1,19 +0,0 @@ -# -# LDAP Public Key Patch schema for use with openssh-ldappubkey -# Author: Eric AUGE -# -# Based on the proposal of : Mark Ruijter -# - - -# octetString SYNTAX -attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' - DESC 'MANDATORY: OpenSSH Public key' - EQUALITY octetStringMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) - -# printableString SYNTAX yes|no -objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY - DESC 'MANDATORY: OpenSSH LPK objectclass' - MAY ( sshPublicKey $ uid ) - ) diff --git a/nova/auth/openssh-lpk_sun.schema b/nova/auth/openssh-lpk_sun.schema deleted file mode 100644 index 5f52db3b..00000000 --- a/nova/auth/openssh-lpk_sun.schema +++ /dev/null @@ -1,10 +0,0 @@ -# -# LDAP Public Key Patch schema for use with openssh-ldappubkey -# Author: Eric AUGE -# -# Schema for Sun Directory Server. -# Based on the original schema, modified by Stefan Fischer. -# -dn: cn=schema -attributeTypes: ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' DESC 'MANDATORY: OpenSSH Public key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) -objectClasses: ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY DESC 'MANDATORY: OpenSSH LPK objectclass' MAY ( sshPublicKey $ uid ) ) From fc4776319f9cd7e1c89a4210af2401eb416c0f4b Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Wed, 8 Dec 2010 10:22:29 +0000 Subject: [PATCH 12/55] Removing novaProject from the schema. This change may look odd at first; here's how it works: Both roles are projects are groupOfNames. Previously, we were differentiating projects from project roles by using the novaProject objectclass on the project, and not on the roles. This change removes novaProject, and uses the owner attribute instead of the projectManager attribute. Only projects should have an owner. We can differentiate projects from project roles by checking for the existence of this attribute. To check for the existence of an attribute in LDAP, a wildcard search is used. The fake LDAP driver did not support wildcard searches, so I put in "all or nothing" support for it. The wildcard search support doesn't work exactly like wildcard searches in LDAP, but will work for the case that's required. --- nova/auth/fakeldap.py | 3 +++ nova/auth/ldapdriver.py | 16 ++++++++-------- nova/auth/nova_openldap.schema | 16 ---------------- nova/auth/nova_sun.schema | 2 -- nova/auth/opendj.sh | 2 -- nova/auth/slap.sh | 4 +--- 6 files changed, 12 insertions(+), 31 deletions(-) diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py index 46e0135b..2dcb6926 100644 --- a/nova/auth/fakeldap.py +++ b/nova/auth/fakeldap.py @@ -119,6 +119,9 @@ def _match(key, value, attrs): """Match a given key and value against an attribute list.""" if key not in attrs: return False + # This is a wild card search. Implemented as all or nothing for now. + if value == "*": + return True if key != "objectclass": return value in attrs[key] # it is an objectclass check, so check subclasses diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 87151566..705e89ee 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -106,7 +106,7 @@ class LdapDriver(object): """Retrieve project by id""" dn = 'cn=%s,%s' % (pid, FLAGS.ldap_project_subtree) - attr = self.__find_object(dn, '(objectclass=novaProject)') + attr = self.__find_object(dn, '(owner=*)') return self.__to_project(attr) def get_users(self): @@ -122,7 +122,7 @@ class LdapDriver(object): def get_projects(self, uid=None): """Retrieve list of projects""" - pattern = '(objectclass=novaProject)' + pattern = '(owner=*)' if uid: pattern = "(&%s(member=%s))" % (pattern, self.__uid_to_dn(uid)) attrs = self.__find_objects(FLAGS.ldap_project_subtree, @@ -205,10 +205,10 @@ class LdapDriver(object): if not manager_dn in members: members.append(manager_dn) attr = [ - ('objectclass', ['novaProject']), + ('objectclass', ['groupOfNames']), ('cn', [name]), ('description', [description]), - ('projectManager', [manager_dn]), + ('owner', [manager_dn]), ('member', members)] self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr) return self.__to_project(dict(attr)) @@ -224,7 +224,7 @@ class LdapDriver(object): "manager %s doesn't exist" % manager_uid) manager_dn = self.__uid_to_dn(manager_uid) - attr.append((self.ldap.MOD_REPLACE, 'projectManager', manager_dn)) + attr.append((self.ldap.MOD_REPLACE, 'owner', manager_dn)) if description: attr.append((self.ldap.MOD_REPLACE, 'description', description)) self.conn.modify_s('cn=%s,%s' % (project_id, @@ -286,7 +286,7 @@ class LdapDriver(object): project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) roles = self.__find_objects(project_dn, '(&(&(objectclass=groupOfNames)' - '(!(objectclass=novaProject)))' + '(!(owner=*)))' '(member=%s))' % self.__uid_to_dn(uid)) return [role['cn'][0] for role in roles] @@ -385,7 +385,7 @@ class LdapDriver(object): def __find_role_dns(self, tree): """Find dns of role objects in given tree""" return self.__find_dns(tree, - '(&(objectclass=groupOfNames)(!(objectclass=novaProject)))') + '(&(objectclass=groupOfNames)(!(owner=*)))') def __find_group_dns_with_member(self, tree, uid): """Find dns of group objects in a given tree that contain member""" @@ -534,7 +534,7 @@ class LdapDriver(object): return { 'id': attr['cn'][0], 'name': attr['cn'][0], - 'project_manager_id': self.__dn_to_uid(attr['projectManager'][0]), + 'project_manager_id': self.__dn_to_uid(attr['owner'][0]), 'description': attr.get('description', [None])[0], 'member_ids': [self.__dn_to_uid(x) for x in member_dns]} diff --git a/nova/auth/nova_openldap.schema b/nova/auth/nova_openldap.schema index 9e528f58..1a10a445 100644 --- a/nova/auth/nova_openldap.schema +++ b/nova/auth/nova_openldap.schema @@ -39,13 +39,6 @@ attributetype ( SINGLE-VALUE ) -attributetype ( - novaAttrs:5 - NAME 'projectManager' - DESC 'Project Managers of a project' - SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 - ) - objectClass ( novaOCs:1 NAME 'novaUser' @@ -53,12 +46,3 @@ objectClass ( AUXILIARY MAY ( accessKey $ secretKey $ isNovaAdmin ) ) - -objectClass ( - novaOCs:3 - NAME 'novaProject' - DESC 'Container for project' - SUP groupOfNames - STRUCTURAL - MUST ( cn $ projectManager ) - ) diff --git a/nova/auth/nova_sun.schema b/nova/auth/nova_sun.schema index decf10f0..1a04601b 100644 --- a/nova/auth/nova_sun.schema +++ b/nova/auth/nova_sun.schema @@ -9,6 +9,4 @@ dn: cn=schema attributeTypes: ( 1.3.6.1.3.1.666.666.3.1 NAME 'accessKey' DESC 'Key for accessing data' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributeTypes: ( 1.3.6.1.3.1.666.666.3.2 NAME 'secretKey' DESC 'Secret key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isNovaAdmin' DESC 'Is user a nova administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE ) -attributeTypes: ( 1.3.6.1.3.1.666.666.3.5 NAME 'projectManager' DESC 'Project Managers of a project' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 ) objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MAY ( accessKey $ secretKey $ isNovaAdmin ) ) -objectClasses: ( 1.3.6.1.3.1.666.666.4.3 NAME 'novaProject' DESC 'Container for project' SUP groupOfNames STRUCTURAL MUST ( cn $ projectManager ) ) diff --git a/nova/auth/opendj.sh b/nova/auth/opendj.sh index 8052c077..9a960034 100755 --- a/nova/auth/opendj.sh +++ b/nova/auth/opendj.sh @@ -30,9 +30,7 @@ fi abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"` schemapath='/var/opendj/instance/config/schema' -cp $abspath/openssh-lpk_sun.schema $schemapath/97-openssh-lpk_sun.ldif cp $abspath/nova_sun.schema $schemapath/98-nova_sun.ldif -chown opendj:opendj $schemapath/97-openssh-lpk_sun.ldif chown opendj:opendj $schemapath/98-nova_sun.ldif cat >/etc/ldap/ldap.conf </dev/null; echo "$PWD"/"${0##*/}")"` -cp $abspath/openssh-lpk_openldap.schema /etc/ldap/schema/openssh-lpk_openldap.schema -cp $abspath/nova_openldap.schema /etc/ldap/schema/nova_openldap.schema +cp $abspath/nova_openldap.schema /etc/ldap/schema/nova.schema mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig cat >/etc/ldap/slapd.conf </etc/ldap/slapd.conf < Date: Wed, 8 Dec 2010 16:23:59 +0000 Subject: [PATCH 13/55] Adding support for choosing a schema version, so that users can more easily migrate from an old schema to the new schema. --- nova/auth/ldapdriver.py | 79 ++++++++++++++++++++-------------- nova/auth/nova_openldap.schema | 4 +- nova/auth/nova_sun.schema | 5 ++- 3 files changed, 52 insertions(+), 36 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 705e89ee..21d8f806 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -32,6 +32,8 @@ from nova import flags FLAGS = flags.FLAGS +flags.DEFINE_integer('ldap_schema_version', 1, + 'Current version of the LDAP schema') flags.DEFINE_string('ldap_url', 'ldap://localhost', 'Point this at your ldap server') flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password') @@ -75,10 +77,20 @@ class LdapDriver(object): Defines enter and exit and therefore supports the with/as syntax. """ + project_pattern = '(owner=*)' + isadmin_attribute = 'isNovaAdmin' + project_attribute = 'owner' + project_objectclass = 'groupOfNames' + def __init__(self): """Imports the LDAP module""" self.ldap = __import__('ldap') self.conn = None + if FLAGS.ldap_schema_version == 1: + LdapDriver.project_pattern = '(objectclass=novaProject)' + LdapDriver.isadmin_attribute = 'isAdmin' + LdapDriver.project_attribute = 'projectManager' + LdapDriver.project_objectclass = 'novaProject' def __enter__(self): """Creates the connection to LDAP""" @@ -106,7 +118,7 @@ class LdapDriver(object): """Retrieve project by id""" dn = 'cn=%s,%s' % (pid, FLAGS.ldap_project_subtree) - attr = self.__find_object(dn, '(owner=*)') + attr = self.__find_object(dn, LdapDriver.project_pattern) return self.__to_project(attr) def get_users(self): @@ -122,7 +134,7 @@ class LdapDriver(object): def get_projects(self, uid=None): """Retrieve list of projects""" - pattern = '(owner=*)' + pattern = LdapDriver.project_pattern if uid: pattern = "(&%s(member=%s))" % (pattern, self.__uid_to_dn(uid)) attrs = self.__find_objects(FLAGS.ldap_project_subtree, @@ -152,11 +164,11 @@ class LdapDriver(object): else: attr.append((self.ldap.MOD_ADD, 'accessKey', \ [access_key])) - if 'isNovaAdmin' in user.keys(): - attr.append((self.ldap.MOD_REPLACE, 'isNovaAdmin', \ + if LdapDriver.isadmin_attribute in user.keys(): + attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute, \ [str(is_admin).upper()])) else: - attr.append((self.ldap.MOD_ADD, 'isNovaAdmin', \ + attr.append((self.ldap.MOD_ADD, LdapDriver.isadmin_attribute, \ [str(is_admin).upper()])) self.conn.modify_s(self.__uid_to_dn(name), attr) return self.get_user(name) @@ -175,7 +187,7 @@ class LdapDriver(object): (FLAGS.ldap_user_name_attribute, [name]), ('secretKey', [secret_key]), ('accessKey', [access_key]), - ('isNovaAdmin', [str(is_admin).upper()]), + (LdapDriver.isadmin_attribute, [str(is_admin).upper()]), ] self.conn.add_s(self.__uid_to_dn(name), attr) return self.__to_user(dict(attr)) @@ -205,10 +217,10 @@ class LdapDriver(object): if not manager_dn in members: members.append(manager_dn) attr = [ - ('objectclass', ['groupOfNames']), + ('objectclass', [LdapDriver.project_objectclass]), ('cn', [name]), ('description', [description]), - ('owner', [manager_dn]), + (LdapDriver.project_attribute, [manager_dn]), ('member', members)] self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr) return self.__to_project(dict(attr)) @@ -224,7 +236,7 @@ class LdapDriver(object): "manager %s doesn't exist" % manager_uid) manager_dn = self.__uid_to_dn(manager_uid) - attr.append((self.ldap.MOD_REPLACE, 'owner', manager_dn)) + attr.append((self.ldap.MOD_REPLACE, LdapDriver.project_attribute, manager_dn)) if description: attr.append((self.ldap.MOD_REPLACE, 'description', description)) self.conn.modify_s('cn=%s,%s' % (project_id, @@ -284,10 +296,9 @@ class LdapDriver(object): return roles else: project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) - roles = self.__find_objects(project_dn, - '(&(&(objectclass=groupOfNames)' - '(!(owner=*)))' - '(member=%s))' % self.__uid_to_dn(uid)) + query = ('(&(&(objectclass=groupOfNames)(!%s))(member=%s))' % + (LdapDriver.project_pattern, self.__uid_to_dn(uid))) + roles = self.__find_objects(project_dn, query) return [role['cn'][0] for role in roles] def delete_user(self, uid): @@ -306,9 +317,9 @@ class LdapDriver(object): if 'accessKey' in user.keys(): attr.append((self.ldap.MOD_DELETE, 'accessKey', \ user['accessKey'])) - if 'isNovaAdmin' in user.keys(): - attr.append((self.ldap.MOD_DELETE, 'isNovaAdmin', \ - user['isNovaAdmin'])) + if LdapDriver.isadmin_attribute in user.keys(): + attr.append((self.ldap.MOD_DELETE, LdapDriver.isadmin_attribute, \ + user[LdapDriver.isadmin_attribute])) self.conn.modify_s(self.__uid_to_dn(uid), attr) else: # Delete entry @@ -330,7 +341,7 @@ class LdapDriver(object): if secret_key: attr.append((self.ldap.MOD_REPLACE, 'secretKey', secret_key)) if admin is not None: - attr.append((self.ldap.MOD_REPLACE, 'isNovaAdmin', str(admin).upper())) + attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute, str(admin).upper())) self.conn.modify_s(self.__uid_to_dn(uid), attr) def __user_exists(self, uid): @@ -384,19 +395,20 @@ class LdapDriver(object): def __find_role_dns(self, tree): """Find dns of role objects in given tree""" - return self.__find_dns(tree, - '(&(objectclass=groupOfNames)(!(owner=*)))') + query = '(&(objectclass=groupOfNames)(!%s))' % LdapDriver.project_pattern + return self.__find_dns(tree, query) def __find_group_dns_with_member(self, tree, uid): """Find dns of group objects in a given tree that contain member""" - dns = self.__find_dns(tree, - '(&(objectclass=groupOfNames)(member=%s))' % - self.__uid_to_dn(uid)) + query = ('(&(objectclass=groupOfNames)(member=%s))' % + self.__uid_to_dn(uid)) + dns = self.__find_dns(tree, query) return dns def __group_exists(self, dn): """Check if group exists""" - return self.__find_object(dn, '(objectclass=groupOfNames)') is not None + query = '(objectclass=groupOfNames)' + return self.__find_object(dn, query) is not None @staticmethod def __role_to_dn(role, project_id=None): @@ -435,7 +447,7 @@ class LdapDriver(object): """Check if user is in group""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be searched in group " - "becuase the user doesn't exist" % (uid,)) + "because the user doesn't exist" % uid) if not self.__group_exists(group_dn): return False res = self.__find_object(group_dn, @@ -447,10 +459,10 @@ class LdapDriver(object): """Add user to group""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be added to the group " - "becuase the user doesn't exist" % (uid,)) + "because the user doesn't exist" % uid) if not self.__group_exists(group_dn): raise exception.NotFound("The group at dn %s doesn't exist" % - (group_dn,)) + group_dn) if self.__is_in_group(uid, group_dn): raise exception.Duplicate("User %s is already a member of " "the group %s" % (uid, group_dn)) @@ -461,13 +473,13 @@ class LdapDriver(object): """Remove user from group""" if not self.__group_exists(group_dn): raise exception.NotFound("The group at dn %s doesn't exist" % - (group_dn,)) + group_dn) if not self.__user_exists(uid): raise exception.NotFound("User %s can't be removed from the " - "group because the user doesn't exist" % (uid,)) + "group because the user doesn't exist" % uid) if not self.__is_in_group(uid, group_dn): raise exception.NotFound("User %s is not a member of the group" % - (uid,)) + uid) # NOTE(vish): remove user from group and any sub_groups sub_dns = self.__find_group_dns_with_member( group_dn, uid) @@ -489,7 +501,7 @@ class LdapDriver(object): """Remove user from all roles and projects""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be removed from all " - "because the user doesn't exist" % (uid,)) + "because the user doesn't exist" % uid) role_dns = self.__find_group_dns_with_member( FLAGS.role_project_subtree, uid) for role_dn in role_dns: @@ -516,13 +528,13 @@ class LdapDriver(object): if attr is None: return None if ('accessKey' in attr.keys() and 'secretKey' in attr.keys() \ - and 'isNovaAdmin' in attr.keys()): + and LdapDriver.isadmin_attribute in attr.keys()): return { 'id': attr[FLAGS.ldap_user_id_attribute][0], 'name': attr[FLAGS.ldap_user_name_attribute][0], 'access': attr['accessKey'][0], 'secret': attr['secretKey'][0], - 'admin': (attr['isNovaAdmin'][0] == 'TRUE')} + 'admin': (attr[LdapDriver.isadmin_attribute][0] == 'TRUE')} else: return None @@ -534,7 +546,8 @@ class LdapDriver(object): return { 'id': attr['cn'][0], 'name': attr['cn'][0], - 'project_manager_id': self.__dn_to_uid(attr['owner'][0]), + 'project_manager_id': + self.__dn_to_uid(attr[LdapDriver.project_attribute][0]), 'description': attr.get('description', [None])[0], 'member_ids': [self.__dn_to_uid(x) for x in member_dns]} diff --git a/nova/auth/nova_openldap.schema b/nova/auth/nova_openldap.schema index 1a10a445..daa3a844 100644 --- a/nova/auth/nova_openldap.schema +++ b/nova/auth/nova_openldap.schema @@ -1,7 +1,9 @@ # # Person object for Nova # inetorgperson with extra attributes -# Author: Vishvananda Ishaya +# Schema version: 2 +# Authors: Vishvananda Ishaya +# Ryan Lane # # diff --git a/nova/auth/nova_sun.schema b/nova/auth/nova_sun.schema index 1a04601b..8e9052de 100644 --- a/nova/auth/nova_sun.schema +++ b/nova/auth/nova_sun.schema @@ -1,8 +1,9 @@ # # Person object for Nova # inetorgperson with extra attributes -# Author: Vishvananda Ishaya -# Modified for strict RFC 4512 compatibility by: Ryan Lane +# Schema version: 2 +# Authors: Vishvananda Ishaya +# Ryan Lane # # using internet experimental oid arc as per BP64 3.1 dn: cn=schema From e39d984a88eb9796de0a834a3a17a2c2fd683db8 Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Wed, 8 Dec 2010 16:26:12 +0000 Subject: [PATCH 14/55] Setting the default schema version to the new schema --- nova/auth/ldapdriver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 21d8f806..eac1db54 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -32,7 +32,7 @@ from nova import flags FLAGS = flags.FLAGS -flags.DEFINE_integer('ldap_schema_version', 1, +flags.DEFINE_integer('ldap_schema_version', 2, 'Current version of the LDAP schema') flags.DEFINE_string('ldap_url', 'ldap://localhost', 'Point this at your ldap server') From 2a4c32b57cf73dda36fc9d74dd4c99f227a0ba1e Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Wed, 8 Dec 2010 16:38:35 +0000 Subject: [PATCH 15/55] PEP8 fixes --- nova/auth/ldapdriver.py | 41 +++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index eac1db54..870262a1 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -40,7 +40,8 @@ flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password') flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com', 'DN of admin user') flags.DEFINE_string('ldap_user_id_attribute', 'uid', 'Attribute to use as id') -flags.DEFINE_string('ldap_user_name_attribute', 'cn', 'Attribute to use as name') +flags.DEFINE_string('ldap_user_name_attribute', 'cn', + 'Attribute to use as name') flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users') flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com', 'OU for Users') @@ -153,23 +154,23 @@ class LdapDriver(object): # Malformed entries are useless, replace attributes found. attr = [] if 'secretKey' in user.keys(): - attr.append((self.ldap.MOD_REPLACE, 'secretKey', \ + attr.append((self.ldap.MOD_REPLACE, 'secretKey', [secret_key])) else: - attr.append((self.ldap.MOD_ADD, 'secretKey', \ + attr.append((self.ldap.MOD_ADD, 'secretKey', [secret_key])) if 'accessKey' in user.keys(): - attr.append((self.ldap.MOD_REPLACE, 'accessKey', \ + attr.append((self.ldap.MOD_REPLACE, 'accessKey', [access_key])) else: - attr.append((self.ldap.MOD_ADD, 'accessKey', \ + attr.append((self.ldap.MOD_ADD, 'accessKey', [access_key])) if LdapDriver.isadmin_attribute in user.keys(): - attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute, \ - [str(is_admin).upper()])) + attr.append((self.ldap.MOD_REPLACE, + LdapDriver.isadmin_attribute, [str(is_admin).upper()])) else: - attr.append((self.ldap.MOD_ADD, LdapDriver.isadmin_attribute, \ - [str(is_admin).upper()])) + attr.append((self.ldap.MOD_ADD, + LdapDriver.isadmin_attribute, [str(is_admin).upper()])) self.conn.modify_s(self.__uid_to_dn(name), attr) return self.get_user(name) else: @@ -236,7 +237,8 @@ class LdapDriver(object): "manager %s doesn't exist" % manager_uid) manager_dn = self.__uid_to_dn(manager_uid) - attr.append((self.ldap.MOD_REPLACE, LdapDriver.project_attribute, manager_dn)) + attr.append((self.ldap.MOD_REPLACE, LdapDriver.project_attribute, + manager_dn)) if description: attr.append((self.ldap.MOD_REPLACE, 'description', description)) self.conn.modify_s('cn=%s,%s' % (project_id, @@ -312,14 +314,15 @@ class LdapDriver(object): # Retrieve user by name user = self.__get_ldap_user(uid) if 'secretKey' in user.keys(): - attr.append((self.ldap.MOD_DELETE, 'secretKey', \ - user['secretKey'])) + attr.append((self.ldap.MOD_DELETE, 'secretKey', + user['secretKey'])) if 'accessKey' in user.keys(): - attr.append((self.ldap.MOD_DELETE, 'accessKey', \ - user['accessKey'])) + attr.append((self.ldap.MOD_DELETE, 'accessKey', + user['accessKey'])) if LdapDriver.isadmin_attribute in user.keys(): - attr.append((self.ldap.MOD_DELETE, LdapDriver.isadmin_attribute, \ - user[LdapDriver.isadmin_attribute])) + attr.append((self.ldap.MOD_DELETE, + LdapDriver.isadmin_attribute, + user[LdapDriver.isadmin_attribute])) self.conn.modify_s(self.__uid_to_dn(uid), attr) else: # Delete entry @@ -341,7 +344,8 @@ class LdapDriver(object): if secret_key: attr.append((self.ldap.MOD_REPLACE, 'secretKey', secret_key)) if admin is not None: - attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute, str(admin).upper())) + attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute, + str(admin).upper())) self.conn.modify_s(self.__uid_to_dn(uid), attr) def __user_exists(self, uid): @@ -395,7 +399,8 @@ class LdapDriver(object): def __find_role_dns(self, tree): """Find dns of role objects in given tree""" - query = '(&(objectclass=groupOfNames)(!%s))' % LdapDriver.project_pattern + query = ('(&(objectclass=groupOfNames)(!%s))' % + LdapDriver.project_pattern) return self.__find_dns(tree, query) def __find_group_dns_with_member(self, tree, uid): From 5e40e99b4ac2ec97fe227d16f65fae8305e143ce Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 10 Dec 2010 16:28:23 +0000 Subject: [PATCH 16/55] added unittest for pause --- nova/tests/compute_unittest.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py index 6f3ef96c..ad191795 100644 --- a/nova/tests/compute_unittest.py +++ b/nova/tests/compute_unittest.py @@ -138,6 +138,15 @@ class ComputeTestCase(test.TrialTestCase): yield self.compute.reboot_instance(self.context, instance_id) yield self.compute.terminate_instance(self.context, instance_id) + @defer.inlineCallbacks + def test_pause(self): + """Ensure instance can be paused""" + instance_id = self._create_instance() + yield self.compute.run_instance(self.context, instance_id) + yield self.compute.pause_instance(self.context, instance_id) + yield self.compute.unpause_instance(self.context, instance_id) + yield self.compute.terminate_instance(self.context, instance_id) + @defer.inlineCallbacks def test_console_output(self): """Make sure we can get console output from instance""" From 8444d0e5824f85701d7c819ed409bc66e4df5191 Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Fri, 10 Dec 2010 18:49:54 +0000 Subject: [PATCH 17/55] Format fixes and modification of Vish's email address. --- nova/auth/ldapdriver.py | 41 ++++++++++++++++++---------------- nova/auth/nova_openldap.schema | 2 +- nova/auth/nova_sun.schema | 2 +- 3 files changed, 24 insertions(+), 21 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 870262a1..1b928e7d 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -125,7 +125,7 @@ class LdapDriver(object): def get_users(self): """Retrieve list of users""" attrs = self.__find_objects(FLAGS.ldap_user_subtree, - '(objectclass=novaUser)') + '(objectclass=novaUser)') users = [] for attr in attrs: user = self.__to_user(attr) @@ -155,22 +155,24 @@ class LdapDriver(object): attr = [] if 'secretKey' in user.keys(): attr.append((self.ldap.MOD_REPLACE, 'secretKey', - [secret_key])) + [secret_key])) else: attr.append((self.ldap.MOD_ADD, 'secretKey', - [secret_key])) + [secret_key])) if 'accessKey' in user.keys(): attr.append((self.ldap.MOD_REPLACE, 'accessKey', - [access_key])) + [access_key])) else: attr.append((self.ldap.MOD_ADD, 'accessKey', - [access_key])) + [access_key])) if LdapDriver.isadmin_attribute in user.keys(): attr.append((self.ldap.MOD_REPLACE, - LdapDriver.isadmin_attribute, [str(is_admin).upper()])) + LdapDriver.isadmin_attribute, + [str(is_admin).upper()])) else: attr.append((self.ldap.MOD_ADD, - LdapDriver.isadmin_attribute, [str(is_admin).upper()])) + LdapDriver.isadmin_attribute, + [str(is_admin).upper()])) self.conn.modify_s(self.__uid_to_dn(name), attr) return self.get_user(name) else: @@ -299,7 +301,7 @@ class LdapDriver(object): else: project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) query = ('(&(&(objectclass=groupOfNames)(!%s))(member=%s))' % - (LdapDriver.project_pattern, self.__uid_to_dn(uid))) + (LdapDriver.project_pattern, self.__uid_to_dn(uid))) roles = self.__find_objects(project_dn, query) return [role['cn'][0] for role in roles] @@ -363,7 +365,7 @@ class LdapDriver(object): def __get_ldap_user(self, uid): """Retrieve LDAP user entry by id""" attr = self.__find_object(self.__uid_to_dn(uid), - '(objectclass=novaUser)') + '(objectclass=novaUser)') return attr def __find_object(self, dn, query=None, scope=None): @@ -406,7 +408,7 @@ class LdapDriver(object): def __find_group_dns_with_member(self, tree, uid): """Find dns of group objects in a given tree that contain member""" query = ('(&(objectclass=groupOfNames)(member=%s))' % - self.__uid_to_dn(uid)) + self.__uid_to_dn(uid)) dns = self.__find_dns(tree, query) return dns @@ -436,7 +438,8 @@ class LdapDriver(object): for member_uid in member_uids: if not self.__user_exists(member_uid): raise exception.NotFound("Group can't be created " - "because user %s doesn't exist" % member_uid) + "because user %s doesn't exist" % + member_uid) members.append(self.__uid_to_dn(member_uid)) dn = self.__uid_to_dn(uid) if not dn in members: @@ -452,7 +455,7 @@ class LdapDriver(object): """Check if user is in group""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be searched in group " - "because the user doesn't exist" % uid) + "because the user doesn't exist" % uid) if not self.__group_exists(group_dn): return False res = self.__find_object(group_dn, @@ -464,7 +467,7 @@ class LdapDriver(object): """Add user to group""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be added to the group " - "because the user doesn't exist" % uid) + "because the user doesn't exist" % uid) if not self.__group_exists(group_dn): raise exception.NotFound("The group at dn %s doesn't exist" % group_dn) @@ -481,13 +484,13 @@ class LdapDriver(object): group_dn) if not self.__user_exists(uid): raise exception.NotFound("User %s can't be removed from the " - "group because the user doesn't exist" % uid) + "group because the user doesn't exist" % + uid) if not self.__is_in_group(uid, group_dn): raise exception.NotFound("User %s is not a member of the group" % uid) # NOTE(vish): remove user from group and any sub_groups - sub_dns = self.__find_group_dns_with_member( - group_dn, uid) + sub_dns = self.__find_group_dns_with_member(group_dn, uid) for sub_dn in sub_dns: self.__safe_remove_from_group(uid, sub_dn) @@ -506,7 +509,7 @@ class LdapDriver(object): """Remove user from all roles and projects""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be removed from all " - "because the user doesn't exist" % uid) + "because the user doesn't exist" % uid) role_dns = self.__find_group_dns_with_member( FLAGS.role_project_subtree, uid) for role_dn in role_dns: @@ -564,8 +567,8 @@ class LdapDriver(object): @staticmethod def __uid_to_dn(uid): """Convert uid to dn""" - return FLAGS.ldap_user_id_attribute + '=%s,%s' \ - % (uid, FLAGS.ldap_user_subtree) + return (FLAGS.ldap_user_id_attribute + '=%s,%s' + % (uid, FLAGS.ldap_user_subtree)) class FakeLdapDriver(LdapDriver): diff --git a/nova/auth/nova_openldap.schema b/nova/auth/nova_openldap.schema index daa3a844..539a5c42 100644 --- a/nova/auth/nova_openldap.schema +++ b/nova/auth/nova_openldap.schema @@ -2,7 +2,7 @@ # Person object for Nova # inetorgperson with extra attributes # Schema version: 2 -# Authors: Vishvananda Ishaya +# Authors: Vishvananda Ishaya # Ryan Lane # # diff --git a/nova/auth/nova_sun.schema b/nova/auth/nova_sun.schema index 8e9052de..4a6a7883 100644 --- a/nova/auth/nova_sun.schema +++ b/nova/auth/nova_sun.schema @@ -2,7 +2,7 @@ # Person object for Nova # inetorgperson with extra attributes # Schema version: 2 -# Authors: Vishvananda Ishaya +# Authors: Vishvananda Ishaya # Ryan Lane # # using internet experimental oid arc as per BP64 3.1 From d2b316f8c624770860c4928aa56b54c13ad6f7d2 Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Sat, 11 Dec 2010 15:23:40 -0500 Subject: [PATCH 18/55] First round of i18n-ifying strings in Nova --- nova/auth/dbdriver.py | 20 ++++++------ nova/auth/fakeldap.py | 2 +- nova/auth/ldapdriver.py | 69 ++++++++++++++++++++++------------------ nova/auth/manager.py | 30 ++++++++--------- nova/fakerabbit.py | 12 +++---- nova/process.py | 2 +- nova/rpc.py | 36 ++++++++++----------- nova/scheduler/simple.py | 13 ++++---- nova/server.py | 4 +-- nova/twistd.py | 6 ++-- nova/validate.py | 12 +++---- 11 files changed, 107 insertions(+), 99 deletions(-) diff --git a/nova/auth/dbdriver.py b/nova/auth/dbdriver.py index a1584322..47e435cb 100644 --- a/nova/auth/dbdriver.py +++ b/nova/auth/dbdriver.py @@ -37,7 +37,6 @@ class DbDriver(object): def __init__(self): """Imports the LDAP module""" pass - db def __enter__(self): return self @@ -83,7 +82,7 @@ class DbDriver(object): user_ref = db.user_create(context.get_admin_context(), values) return self._db_user_to_auth_user(user_ref) except exception.Duplicate, e: - raise exception.Duplicate('User %s already exists' % name) + raise exception.Duplicate(_('User %s already exists') % name) def _db_user_to_auth_user(self, user_ref): return {'id': user_ref['id'], @@ -105,8 +104,9 @@ class DbDriver(object): """Create a project""" manager = db.user_get(context.get_admin_context(), manager_uid) if not manager: - raise exception.NotFound("Project can't be created because " - "manager %s doesn't exist" % manager_uid) + raise exception.NotFound(_("Project can't be created because " + "manager %s doesn't exist") + % manager_uid) # description is a required attribute if description is None: @@ -133,8 +133,8 @@ class DbDriver(object): try: project = db.project_create(context.get_admin_context(), values) except exception.Duplicate: - raise exception.Duplicate("Project can't be created because " - "project %s already exists" % name) + raise exception.Duplicate(_("Project can't be created because " + "project %s already exists") % name) for member in members: db.project_add_member(context.get_admin_context(), @@ -155,8 +155,8 @@ class DbDriver(object): if manager_uid: manager = db.user_get(context.get_admin_context(), manager_uid) if not manager: - raise exception.NotFound("Project can't be modified because " - "manager %s doesn't exist" % + raise exception.NotFound(_("Project can't be modified because " + "manager %s doesn't exist") % manager_uid) values['project_manager'] = manager['id'] if description: @@ -243,8 +243,8 @@ class DbDriver(object): def _validate_user_and_project(self, user_id, project_id): user = db.user_get(context.get_admin_context(), user_id) if not user: - raise exception.NotFound('User "%s" not found' % user_id) + raise exception.NotFound(_('User "%s" not found') % user_id) project = db.project_get(context.get_admin_context(), project_id) if not project: - raise exception.NotFound('Project "%s" not found' % project_id) + raise exception.NotFound(_('Project "%s" not found') % project_id) return user, project diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py index 46e0135b..cdab96b7 100644 --- a/nova/auth/fakeldap.py +++ b/nova/auth/fakeldap.py @@ -39,7 +39,7 @@ flags.DEFINE_integer('redis_db', 0, 'Multiple DB keeps tests away') class Redis(object): def __init__(self): if hasattr(self.__class__, '_instance'): - raise Exception('Attempted to instantiate singleton') + raise Exception(_('Attempted to instantiate singleton')) @classmethod def instance(cls): diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index c10939d7..e289ea5a 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -159,7 +159,7 @@ class LdapDriver(object): self.conn.modify_s(self.__uid_to_dn(name), attr) return self.get_user(name) else: - raise exception.NotFound("LDAP object for %s doesn't exist" + raise exception.NotFound(_("LDAP object for %s doesn't exist") % name) else: attr = [ @@ -182,11 +182,12 @@ class LdapDriver(object): description=None, member_uids=None): """Create a project""" if self.__project_exists(name): - raise exception.Duplicate("Project can't be created because " - "project %s already exists" % name) + raise exception.Duplicate(_("Project can't be created because " + "project %s already exists") % name) if not self.__user_exists(manager_uid): - raise exception.NotFound("Project can't be created because " - "manager %s doesn't exist" % manager_uid) + raise exception.NotFound(_("Project can't be created because " + "manager %s doesn't exist") + % manager_uid) manager_dn = self.__uid_to_dn(manager_uid) # description is a required attribute if description is None: @@ -195,8 +196,8 @@ class LdapDriver(object): if member_uids is not None: for member_uid in member_uids: if not self.__user_exists(member_uid): - raise exception.NotFound("Project can't be created " - "because user %s doesn't exist" + raise exception.NotFound(_("Project can't be created " + "because user %s doesn't exist") % member_uid) members.append(self.__uid_to_dn(member_uid)) # always add the manager as a member because members is required @@ -218,9 +219,9 @@ class LdapDriver(object): attr = [] if manager_uid: if not self.__user_exists(manager_uid): - raise exception.NotFound("Project can't be modified because " - "manager %s doesn't exist" % - manager_uid) + raise exception.NotFound(_("Project can't be modified because " + "manager %s doesn't exist") + % manager_uid) manager_dn = self.__uid_to_dn(manager_uid) attr.append((self.ldap.MOD_REPLACE, 'projectManager', manager_dn)) if description: @@ -416,8 +417,9 @@ class LdapDriver(object): if member_uids is not None: for member_uid in member_uids: if not self.__user_exists(member_uid): - raise exception.NotFound("Group can't be created " - "because user %s doesn't exist" % member_uid) + raise exception.NotFound(_("Group can't be created " + "because user %s doesn't exist") + % member_uid) members.append(self.__uid_to_dn(member_uid)) dn = self.__uid_to_dn(uid) if not dn in members: @@ -432,8 +434,9 @@ class LdapDriver(object): def __is_in_group(self, uid, group_dn): """Check if user is in group""" if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be searched in group " - "becuase the user doesn't exist" % (uid,)) + raise exception.NotFound(_("User %s can't be searched in group " + "because the user doesn't exist") + % uid) if not self.__group_exists(group_dn): return False res = self.__find_object(group_dn, @@ -444,28 +447,30 @@ class LdapDriver(object): def __add_to_group(self, uid, group_dn): """Add user to group""" if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be added to the group " - "becuase the user doesn't exist" % (uid,)) + raise exception.NotFound(_("User %s can't be added to the group " + "because the user doesn't exist") + % uid) if not self.__group_exists(group_dn): - raise exception.NotFound("The group at dn %s doesn't exist" % - (group_dn,)) + raise exception.NotFound(_("The group at dn %s doesn't exist") + % group_dn) if self.__is_in_group(uid, group_dn): - raise exception.Duplicate("User %s is already a member of " - "the group %s" % (uid, group_dn)) + raise exception.Duplicate(_("User %s is already a member of " + "the group %s") % (uid, group_dn)) attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))] self.conn.modify_s(group_dn, attr) def __remove_from_group(self, uid, group_dn): """Remove user from group""" if not self.__group_exists(group_dn): - raise exception.NotFound("The group at dn %s doesn't exist" % - (group_dn,)) + raise exception.NotFound(_("The group at dn %s doesn't exist") + % group_dn) if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be removed from the " - "group because the user doesn't exist" % (uid,)) + raise exception.NotFound(_("User %s can't be removed from the " + "group because the user doesn't exist") + % uid) if not self.__is_in_group(uid, group_dn): - raise exception.NotFound("User %s is not a member of the group" % - (uid,)) + raise exception.NotFound(_("User %s is not a member of the group") + % uid) # NOTE(vish): remove user from group and any sub_groups sub_dns = self.__find_group_dns_with_member( group_dn, uid) @@ -479,15 +484,16 @@ class LdapDriver(object): try: self.conn.modify_s(group_dn, attr) except self.ldap.OBJECT_CLASS_VIOLATION: - logging.debug("Attempted to remove the last member of a group. " - "Deleting the group at %s instead.", group_dn) + logging.debug(_("Attempted to remove the last member of a group. " + "Deleting the group at %s instead."), group_dn) self.__delete_group(group_dn) def __remove_from_all(self, uid): """Remove user from all roles and projects""" if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be removed from all " - "because the user doesn't exist" % (uid,)) + raise exception.NotFound(_("User %s can't be removed from all " + "because the user doesn't exist") + % uid) role_dns = self.__find_group_dns_with_member( FLAGS.role_project_subtree, uid) for role_dn in role_dns: @@ -500,7 +506,8 @@ class LdapDriver(object): def __delete_group(self, group_dn): """Delete Group""" if not self.__group_exists(group_dn): - raise exception.NotFound("Group at dn %s doesn't exist" % group_dn) + raise exception.NotFound(_("Group at dn %s doesn't exist") + % group_dn) self.conn.delete_s(group_dn) def __delete_roles(self, project_dn): diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 11c3bd6d..417f2b76 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -257,12 +257,12 @@ class AuthManager(object): # TODO(vish): check for valid timestamp (access_key, _sep, project_id) = access.partition(':') - logging.info('Looking up user: %r', access_key) + logging.info(_('Looking up user: %r'), access_key) user = self.get_user_from_access_key(access_key) logging.info('user: %r', user) if user == None: - raise exception.NotFound('No user found for access key %s' % - access_key) + raise exception.NotFound(_('No user found for access key %s') + % access_key) # NOTE(vish): if we stop using project name as id we need better # logic to find a default project for user @@ -271,12 +271,12 @@ class AuthManager(object): project = self.get_project(project_id) if project == None: - raise exception.NotFound('No project called %s could be found' % - project_id) + raise exception.NotFound(_('No project called %s could be found') + % project_id) if not self.is_admin(user) and not self.is_project_member(user, project): - raise exception.NotFound('User %s is not a member of project %s' % - (user.id, project.id)) + raise exception.NotFound(_('User %s is not a member of project %s') + % (user.id, project.id)) if check_type == 's3': sign = signer.Signer(user.secret.encode()) expected_signature = sign.s3_authorization(headers, verb, path) @@ -284,7 +284,7 @@ class AuthManager(object): logging.debug('expected_signature: %s', expected_signature) logging.debug('signature: %s', signature) if signature != expected_signature: - raise exception.NotAuthorized('Signature does not match') + raise exception.NotAuthorized(_('Signature does not match')) elif check_type == 'ec2': # NOTE(vish): hmac can't handle unicode, so encode ensures that # secret isn't unicode @@ -294,7 +294,7 @@ class AuthManager(object): logging.debug('expected_signature: %s', expected_signature) logging.debug('signature: %s', signature) if signature != expected_signature: - raise exception.NotAuthorized('Signature does not match') + raise exception.NotAuthorized(_('Signature does not match')) return (user, project) def get_access_key(self, user, project): @@ -364,7 +364,7 @@ class AuthManager(object): with self.driver() as drv: if role == 'projectmanager': if not project: - raise exception.Error("Must specify project") + raise exception.Error(_("Must specify project")) return self.is_project_manager(user, project) global_role = drv.has_role(User.safe_id(user), @@ -398,9 +398,9 @@ class AuthManager(object): @param project: Project in which to add local role. """ if role not in FLAGS.allowed_roles: - raise exception.NotFound("The %s role can not be found" % role) + raise exception.NotFound(_("The %s role can not be found") % role) if project is not None and role in FLAGS.global_roles: - raise exception.NotFound("The %s role is global only" % role) + raise exception.NotFound(_("The %s role is global only") % role) with self.driver() as drv: drv.add_role(User.safe_id(user), role, Project.safe_id(project)) @@ -546,7 +546,8 @@ class AuthManager(object): Project.safe_id(project)) if not network_ref['vpn_public_port']: - raise exception.NotFound('project network data has not been set') + raise exception.NotFound(_('project network data has not ' + 'been set')) return (network_ref['vpn_public_address'], network_ref['vpn_public_port']) @@ -659,8 +660,7 @@ class AuthManager(object): port=vpn_port) zippy.writestr(FLAGS.credential_vpn_file, config) else: - logging.warn("No vpn data for project %s" % - pid) + logging.warn(_("No vpn data for project %s"), pid) zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(user.id)) zippy.close() diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index c6461793..41e686cf 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -37,12 +37,12 @@ class Exchange(object): self._routes = {} def publish(self, message, routing_key=None): - logging.debug('(%s) publish (key: %s) %s', + logging.debug(_('(%s) publish (key: %s) %s'), self.name, routing_key, message) routing_key = routing_key.split('.')[0] if routing_key in self._routes: for f in self._routes[routing_key]: - logging.debug('Publishing to route %s', f) + logging.debug(_('Publishing to route %s'), f) f(message, routing_key=routing_key) def bind(self, callback, routing_key): @@ -82,16 +82,16 @@ class Backend(object): def queue_declare(self, queue, **kwargs): if queue not in self._queues: - logging.debug('Declaring queue %s', queue) + logging.debug(_('Declaring queue %s'), queue) self._queues[queue] = Queue(queue) def exchange_declare(self, exchange, type, *args, **kwargs): if exchange not in self._exchanges: - logging.debug('Declaring exchange %s', exchange) + logging.debug(_('Declaring exchange %s'), exchange) self._exchanges[exchange] = Exchange(exchange, type) def queue_bind(self, queue, exchange, routing_key, **kwargs): - logging.debug('Binding %s to %s with key %s', + logging.debug(_('Binding %s to %s with key %s'), queue, exchange, routing_key) self._exchanges[exchange].bind(self._queues[queue].push, routing_key) @@ -117,7 +117,7 @@ class Backend(object): content_type=content_type, content_encoding=content_encoding) message.result = True - logging.debug('Getting from %s: %s', queue, message) + logging.debug(_('Getting from %s: %s'), queue, message) return message def prepare_message(self, message_data, delivery_mode, diff --git a/nova/process.py b/nova/process.py index b33df048..25b6723e 100644 --- a/nova/process.py +++ b/nova/process.py @@ -131,7 +131,7 @@ def get_process_output(executable, args=None, env=None, path=None, cmd = executable if args: cmd = " ".join([cmd] + args) - logging.debug("Running cmd: %s", cmd) + logging.debug(_("Running cmd: %s"), cmd) process_handler = BackRelayWithInput( deferred, cmd, diff --git a/nova/rpc.py b/nova/rpc.py index 86a29574..cc3c7dfc 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -91,15 +91,15 @@ class Consumer(messaging.Consumer): self.failed_connection = False break except: # Catching all because carrot sucks - logging.exception("AMQP server on %s:%d is unreachable." \ - " Trying again in %d seconds." % ( + logging.exception(_("AMQP server on %s:%d is unreachable." + " Trying again in %d seconds.") % ( FLAGS.rabbit_host, FLAGS.rabbit_port, FLAGS.rabbit_retry_interval)) self.failed_connection = True if self.failed_connection: - logging.exception("Unable to connect to AMQP server" \ - " after %d tries. Shutting down." % FLAGS.rabbit_max_retries) + logging.exception(_("Unable to connect to AMQP server" + " after %d tries. Shutting down.") % FLAGS.rabbit_max_retries) sys.exit(1) def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): @@ -116,14 +116,14 @@ class Consumer(messaging.Consumer): self.declare() super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) if self.failed_connection: - logging.error("Reconnected to queue") + logging.error(_("Reconnected to queue")) self.failed_connection = False # NOTE(vish): This is catching all errors because we really don't # exceptions to be logged 10 times a second if some # persistent failure occurs. except Exception: # pylint: disable-msg=W0703 if not self.failed_connection: - logging.exception("Failed to fetch message from queue") + logging.exception(_("Failed to fetch message from queue")) self.failed_connection = True def attach_to_eventlet(self): @@ -161,7 +161,7 @@ class TopicConsumer(Consumer): class AdapterConsumer(TopicConsumer): """Calls methods on a proxy object based on method and args""" def __init__(self, connection=None, topic="broadcast", proxy=None): - LOG.debug('Initing the Adapter Consumer for %s' % (topic)) + LOG.debug(_('Initing the Adapter Consumer for %s') % (topic)) self.proxy = proxy super(AdapterConsumer, self).__init__(connection=connection, topic=topic) @@ -176,7 +176,7 @@ class AdapterConsumer(TopicConsumer): Example: {'method': 'echo', 'args': {'value': 42}} """ - LOG.debug('received %s' % (message_data)) + LOG.debug(_('received %s') % (message_data)) msg_id = message_data.pop('_msg_id', None) ctxt = _unpack_context(message_data) @@ -189,8 +189,8 @@ class AdapterConsumer(TopicConsumer): # messages stay in the queue indefinitely, so for now # we just log the message and send an error string # back to the caller - LOG.warn('no method for message: %s' % (message_data)) - msg_reply(msg_id, 'No method for message: %s' % message_data) + LOG.warn(_('no method for message: %s') % (message_data)) + msg_reply(msg_id, _('No method for message: %s') % message_data) return node_func = getattr(self.proxy, str(method)) @@ -246,7 +246,7 @@ def msg_reply(msg_id, reply=None, failure=None): if failure: message = failure.getErrorMessage() traceback = failure.getTraceback() - logging.error("Returning exception %s to caller", message) + logging.error(_("Returning exception %s to caller"), message) logging.error(traceback) failure = (failure.type.__name__, str(failure.value), traceback) conn = Connection.instance() @@ -287,7 +287,7 @@ def _unpack_context(msg): if key.startswith('_context_'): value = msg.pop(key) context_dict[key[9:]] = value - LOG.debug('unpacked context: %s', context_dict) + LOG.debug(_('unpacked context: %s'), context_dict) return context.RequestContext.from_dict(context_dict) @@ -306,10 +306,10 @@ def _pack_context(msg, context): def call(context, topic, msg): """Sends a message on a topic and wait for a response""" - LOG.debug("Making asynchronous call...") + LOG.debug(_("Making asynchronous call...")) msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) - LOG.debug("MSG_ID is %s" % (msg_id)) + LOG.debug(_("MSG_ID is %s") % (msg_id)) _pack_context(msg, context) class WaitMessage(object): @@ -345,7 +345,7 @@ def call_twisted(context, topic, msg): LOG.debug("Making asynchronous call...") msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) - LOG.debug("MSG_ID is %s" % (msg_id)) + LOG.debug(_("MSG_ID is %s") % (msg_id)) _pack_context(msg, context) conn = Connection.instance() @@ -384,7 +384,7 @@ def cast(context, topic, msg): def generic_response(message_data, message): """Logs a result and exits""" - LOG.debug('response %s', message_data) + LOG.debug(_('response %s'), message_data) message.ack() sys.exit(0) @@ -393,8 +393,8 @@ def send_message(topic, message, wait=True): """Sends a message for testing""" msg_id = uuid.uuid4().hex message.update({'_msg_id': msg_id}) - LOG.debug('topic is %s', topic) - LOG.debug('message %s', message) + LOG.debug(_('topic is %s'), topic) + LOG.debug(_('message %s'), message) if wait: consumer = messaging.Consumer(connection=Connection.instance(), diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py index 7f509365..f9171ab3 100644 --- a/nova/scheduler/simple.py +++ b/nova/scheduler/simple.py @@ -47,7 +47,7 @@ class SimpleScheduler(chance.ChanceScheduler): for result in results: (service, instance_cores) = result if instance_cores + instance_ref['vcpus'] > FLAGS.max_cores: - raise driver.NoValidHost("All hosts have too many cores") + raise driver.NoValidHost(_("All hosts have too many cores")) if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow @@ -57,7 +57,7 @@ class SimpleScheduler(chance.ChanceScheduler): {'host': service['host'], 'scheduled_at': now}) return service['host'] - raise driver.NoValidHost("No hosts found") + raise driver.NoValidHost(_("No hosts found")) def schedule_create_volume(self, context, volume_id, *_args, **_kwargs): """Picks a host that is up and has the fewest volumes.""" @@ -66,7 +66,8 @@ class SimpleScheduler(chance.ChanceScheduler): for result in results: (service, volume_gigabytes) = result if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes: - raise driver.NoValidHost("All hosts have too many gigabytes") + raise driver.NoValidHost(_("All hosts have too many " + "gigabytes")) if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow @@ -76,7 +77,7 @@ class SimpleScheduler(chance.ChanceScheduler): {'host': service['host'], 'scheduled_at': now}) return service['host'] - raise driver.NoValidHost("No hosts found") + raise driver.NoValidHost(_("No hosts found")) def schedule_set_network_host(self, context, *_args, **_kwargs): """Picks a host that is up and has the fewest networks.""" @@ -85,7 +86,7 @@ class SimpleScheduler(chance.ChanceScheduler): for result in results: (service, instance_count) = result if instance_count >= FLAGS.max_networks: - raise driver.NoValidHost("All hosts have too many networks") + raise driver.NoValidHost(_("All hosts have too many networks")) if self.service_is_up(service): return service['host'] - raise driver.NoValidHost("No hosts found") + raise driver.NoValidHost(_("No hosts found")) diff --git a/nova/server.py b/nova/server.py index a0ee5468..e5ce4475 100644 --- a/nova/server.py +++ b/nova/server.py @@ -58,7 +58,7 @@ def stop(pidfile): try: pid = int(open(pidfile, 'r').read().strip()) except IOError: - message = "pidfile %s does not exist. Daemon not running?\n" + message = _("pidfile %s does not exist. Daemon not running?\n") sys.stderr.write(message % pidfile) return @@ -84,7 +84,7 @@ def serve(name, main): if not FLAGS.pidfile: FLAGS.pidfile = '%s.pid' % name - logging.debug("Full set of FLAGS: \n\n\n") + logging.debug(_("Full set of FLAGS: \n\n\n")) for flag in FLAGS: logging.debug("%s : %s", flag, FLAGS.get(flag, None)) diff --git a/nova/twistd.py b/nova/twistd.py index cb5648ce..c5b7fed8 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -208,7 +208,7 @@ def stop(pidfile): pid = None if not pid: - message = "pidfile %s does not exist. Daemon not running?\n" + message = _("pidfile %s does not exist. Daemon not running?\n") sys.stderr.write(message % pidfile) # Not an error in a restart return @@ -229,7 +229,7 @@ def stop(pidfile): def serve(filename): - logging.debug("Serving %s" % filename) + logging.debug(_("Serving %s") % filename) name = os.path.basename(filename) OptionsClass = WrapTwistedOptions(TwistdServerOptions) options = OptionsClass() @@ -281,7 +281,7 @@ def serve(filename): else: logging.getLogger().setLevel(logging.WARNING) - logging.debug("Full set of FLAGS:") + logging.debug(_("Full set of FLAGS:")) for flag in FLAGS: logging.debug("%s : %s" % (flag, FLAGS.get(flag, None))) diff --git a/nova/validate.py b/nova/validate.py index 7ea27daa..49578a24 100644 --- a/nova/validate.py +++ b/nova/validate.py @@ -42,7 +42,7 @@ def rangetest(**argchecks): # was passed by name if float(kargs[argname]) < low or \ float(kargs[argname]) > high: - errmsg = '{0} argument "{1}" not in {2}..{3}' + errmsg = _('{0} argument "{1}" not in {2}..{3}') errmsg = errmsg.format(funcname, argname, low, high) raise TypeError(errmsg) @@ -51,8 +51,8 @@ def rangetest(**argchecks): position = positionals.index(argname) if float(pargs[position]) < low or \ float(pargs[position]) > high: - errmsg = '{0} argument "{1}" with value of {4} ' \ - 'not in {2}..{3}' + errmsg = _('{0} argument "{1}" with value of {4} ' + 'not in {2}..{3}') errmsg = errmsg.format(funcname, argname, low, high, pargs[position]) raise TypeError(errmsg) @@ -76,14 +76,14 @@ def typetest(**argchecks): for (argname, typeof) in argchecks.items(): if argname in kargs: if not isinstance(kargs[argname], typeof): - errmsg = '{0} argument "{1}" not of type {2}' + errmsg = _('{0} argument "{1}" not of type {2}') errmsg = errmsg.format(funcname, argname, typeof) raise TypeError(errmsg) elif argname in positionals: position = positionals.index(argname) if not isinstance(pargs[position], typeof): - errmsg = '{0} argument "{1}" with value of {2} ' \ - 'not of type {3}' + errmsg = _('{0} argument "{1}" with value of {2} ' + 'not of type {3}') errmsg = errmsg.format(funcname, argname, pargs[position], typeof) raise TypeError(errmsg) From 2a751f29a5e4b1146e5be9f2ff8f2144d85a8790 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 13 Dec 2010 18:22:56 +0000 Subject: [PATCH 19/55] second round for unit testing framework --- nova/tests/virt_unittest.py | 65 +++++++++++++++++++++++++++---------- 1 file changed, 47 insertions(+), 18 deletions(-) diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index ba3fba83..61102263 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -30,6 +30,9 @@ from nova.virt.xenapi import volume_utils FLAGS = flags.FLAGS flags.DECLARE('instances_path', 'nova.compute.manager') +# Those are XenAPI related +flags.DECLARE('target_host', 'nova.virt.xenapi_conn') +FLAGS.target_host = '127.0.0.1' class LibvirtConnTestCase(test.TrialTestCase): @@ -270,36 +273,62 @@ class XenAPIVolumeTestCase(test.TrialTestCase): self.helper = volume_utils.VolumeHelper self.helper.late_import() + def _create_volume(self, size='0'): + """Create a volume object.""" + vol = {} + vol['size'] = size + vol['user_id'] = 'fake' + vol['project_id'] = 'fake' + vol['host'] = 'localhost' + vol['availability_zone'] = FLAGS.storage_availability_zone + vol['status'] = "creating" + vol['attach_status'] = "detached" + return db.volume_create(context.get_admin_context(), vol) + def test_create_iscsi_storage_raise_no_exception(self): - info = self.helper.parse_volume_info(None, None) - label = 'SR-' - description = '' - self.helper.create_iscsi_storage_blocking(self.session, - info, - label, - description) + vol = self._create_volume() + info = yield self.helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') + label = None # For testing new SRs + description = 'Test-SR' + self.session.fail_next_call = False + sr_ref = self.helper.create_iscsi_storage_blocking(self.session, + info, + label, + description) + self.assertEqual(sr_ref, self.session.SR.FAKE_REF) + db.volume_destroy(context.get_admin_context(), vol['id']) def test_create_iscsi_storage_raise_unable_to_create_sr_exception(self): - info = self.helper.parse_volume_info(None, None) - label = None + vol = self._create_volume() + info = yield self.helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') + label = None # For testing new SRs description = None - self.assertFailure(self.helper.create_iscsi_storage_blocking(self.session, - info, - label, - description), - StorageError) + self.session.fail_next_call = True + self.assertRaises(volume_utils.StorageError, + self.helper.create_iscsi_storage_blocking, + self.session, + info, + label, + description) def test_find_sr_from_vbd_raise_no_exception(self): - pass + sr_ref = yield self.helper.find_sr_from_vbd(self.session, + self.session.VBD.FAKE_REF) + self.assertEqual(sr_ref, self.session.SR.FAKE_REF) - def test_destroy_iscsi_storage_raise_no_exception(self): + def test_destroy_iscsi_storage(self): pass def test_introduce_vdi_raise_no_exception(self): - pass + sr_ref = self.session.SR.FAKE_REF + self.helper.introduce_vdi_blocking(self.session, sr_ref) def test_introduce_vdi_raise_unable_get_vdi_record_exception(self): - pass + sr_ref = self.session.SR.FAKE_REF + self.session.fail_next_call = True + self.assertRaises(volume_utils.StorageError, + self.helper.introduce_vdi_blocking, + self.session, sr_ref) def tearDown(self): super(XenAPIVolumeTestCase, self).tearDown() From 478fb2844eaf0f04c578fd4d109b712797f9251a Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 13 Dec 2010 18:43:24 +0000 Subject: [PATCH 20/55] removing imports that should have not been there --- nova/tests/virt_unittest.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index 61102263..1095662c 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -317,7 +317,8 @@ class XenAPIVolumeTestCase(test.TrialTestCase): self.assertEqual(sr_ref, self.session.SR.FAKE_REF) def test_destroy_iscsi_storage(self): - pass + sr_ref = self.session.SR.FAKE_REF + self.helper.destroy_iscsi_storage_blocking(self.session, sr_ref) def test_introduce_vdi_raise_no_exception(self): sr_ref = self.session.SR.FAKE_REF From 9ec2c03b2220fa07f6ed7297d35712a47b4b791b Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 13 Dec 2010 20:31:33 +0000 Subject: [PATCH 21/55] moving xenapi unittests changes into another branch --- nova/tests/virt_unittest.py | 76 ------------------------------------- 1 file changed, 76 deletions(-) diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index 1095662c..d49383fb 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -25,14 +25,9 @@ from nova import utils from nova.api.ec2 import cloud from nova.auth import manager from nova.virt import libvirt_conn -from nova.virt.xenapi import fake -from nova.virt.xenapi import volume_utils FLAGS = flags.FLAGS flags.DECLARE('instances_path', 'nova.compute.manager') -# Those are XenAPI related -flags.DECLARE('target_host', 'nova.virt.xenapi_conn') -FLAGS.target_host = '127.0.0.1' class LibvirtConnTestCase(test.TrialTestCase): @@ -262,74 +257,3 @@ class NWFilterTestCase(test.TrialTestCase): d.addCallback(lambda _: self.teardown_security_group()) return d - - -class XenAPIVolumeTestCase(test.TrialTestCase): - - def setUp(self): - super(XenAPIVolumeTestCase, self).setUp() - self.flags(xenapi_use_fake_session=True) - self.session = fake.FakeXenAPISession() - self.helper = volume_utils.VolumeHelper - self.helper.late_import() - - def _create_volume(self, size='0'): - """Create a volume object.""" - vol = {} - vol['size'] = size - vol['user_id'] = 'fake' - vol['project_id'] = 'fake' - vol['host'] = 'localhost' - vol['availability_zone'] = FLAGS.storage_availability_zone - vol['status'] = "creating" - vol['attach_status'] = "detached" - return db.volume_create(context.get_admin_context(), vol) - - def test_create_iscsi_storage_raise_no_exception(self): - vol = self._create_volume() - info = yield self.helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') - label = None # For testing new SRs - description = 'Test-SR' - self.session.fail_next_call = False - sr_ref = self.helper.create_iscsi_storage_blocking(self.session, - info, - label, - description) - self.assertEqual(sr_ref, self.session.SR.FAKE_REF) - db.volume_destroy(context.get_admin_context(), vol['id']) - - def test_create_iscsi_storage_raise_unable_to_create_sr_exception(self): - vol = self._create_volume() - info = yield self.helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') - label = None # For testing new SRs - description = None - self.session.fail_next_call = True - self.assertRaises(volume_utils.StorageError, - self.helper.create_iscsi_storage_blocking, - self.session, - info, - label, - description) - - def test_find_sr_from_vbd_raise_no_exception(self): - sr_ref = yield self.helper.find_sr_from_vbd(self.session, - self.session.VBD.FAKE_REF) - self.assertEqual(sr_ref, self.session.SR.FAKE_REF) - - def test_destroy_iscsi_storage(self): - sr_ref = self.session.SR.FAKE_REF - self.helper.destroy_iscsi_storage_blocking(self.session, sr_ref) - - def test_introduce_vdi_raise_no_exception(self): - sr_ref = self.session.SR.FAKE_REF - self.helper.introduce_vdi_blocking(self.session, sr_ref) - - def test_introduce_vdi_raise_unable_get_vdi_record_exception(self): - sr_ref = self.session.SR.FAKE_REF - self.session.fail_next_call = True - self.assertRaises(volume_utils.StorageError, - self.helper.introduce_vdi_blocking, - self.session, sr_ref) - - def tearDown(self): - super(XenAPIVolumeTestCase, self).tearDown() From ff17b400bb700d5ce587448a126e76bb25607efa Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Tue, 14 Dec 2010 15:33:18 +0100 Subject: [PATCH 22/55] Now that we have a templating engine, let's use it. Consolidate all the libvirt templates into one, extending the unit tests to make sure I didn't mess up. --- nova/tests/virt_unittest.py | 131 +++++++++++++++++++++++++++++------- 1 file changed, 108 insertions(+), 23 deletions(-) diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index d49383fb..bcc995a5 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -40,19 +40,53 @@ class LibvirtConnTestCase(test.TrialTestCase): self.network = utils.import_object(FLAGS.network_manager) FLAGS.instances_path = '' - def test_get_uri_and_template(self): - ip = '10.11.12.13' + test_ip = '10.11.12.13' + test_instance = { + 'memory_kb' : '1024000', + 'basepath' : '/some/path', + 'bridge_name' : 'br100', + 'mac_address' : '02:12:34:46:56:67', + 'vcpus' : 2, + 'project_id' : 'fake', + 'bridge' : 'br101', + 'instance_type' : 'm1.small'} - instance = {'internal_id': 1, - 'memory_kb': '1024000', - 'basepath': '/some/path', - 'bridge_name': 'br100', - 'mac_address': '02:12:34:46:56:67', - 'vcpus': 2, - 'project_id': 'fake', - 'bridge': 'br101', - 'instance_type': 'm1.small'} + def test_xml_and_uri_no_ramdisk_no_kernel(self): + instance_data = dict(self.test_instance) + self.do_test_xml_and_uri(instance_data, + expect_kernel=False, expect_ramdisk=False) + def test_xml_and_uri_no_ramdisk(self): + instance_data = dict(self.test_instance) + instance_data['kernel_id'] = 'aki-deadbeef' + self.do_test_xml_and_uri(instance_data, + expect_kernel=True, expect_ramdisk=False) + + def test_xml_and_uri_no_kernel(self): + instance_data = dict(self.test_instance) + instance_data['ramdisk_id'] = 'ari-deadbeef' + self.do_test_xml_and_uri(instance_data, + expect_kernel=False, expect_ramdisk=False) + + def test_xml_and_uri(self): + instance_data = dict(self.test_instance) + instance_data['ramdisk_id'] = 'ari-deadbeef' + instance_data['kernel_id'] = 'aki-deadbeef' + self.do_test_xml_and_uri(instance_data, + expect_kernel=True, expect_ramdisk=True) + + def test_xml_and_uri_rescue(self): + instance_data = dict(self.test_instance) + instance_data['ramdisk_id'] = 'ari-deadbeef' + instance_data['kernel_id'] = 'aki-deadbeef' + self.do_test_xml_and_uri(instance_data, + expect_kernel=True, expect_ramdisk=True, + rescue=True) + + + def do_test_xml_and_uri(self, instance, + expect_ramdisk, expect_kernel, + rescue=False): user_context = context.RequestContext(project=self.project, user=self.user) instance_ref = db.instance_create(user_context, instance) @@ -60,13 +94,14 @@ class LibvirtConnTestCase(test.TrialTestCase): self.network.set_network_host(context.get_admin_context(), network_ref['id']) - fixed_ip = {'address': ip, - 'network_id': network_ref['id']} + fixed_ip = { 'address' : self.test_ip, + 'network_id' : network_ref['id'] } ctxt = context.get_admin_context() fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip) - db.fixed_ip_update(ctxt, ip, {'allocated': True, - 'instance_id': instance_ref['id']}) + db.fixed_ip_update(ctxt, self.test_ip, + { 'allocated': True, + 'instance_id': instance_ref['id'] }) type_uri_map = {'qemu': ('qemu:///system', [(lambda t: t.find('.').get('type'), 'qemu'), @@ -78,23 +113,71 @@ class LibvirtConnTestCase(test.TrialTestCase): (lambda t: t.find('./devices/emulator'), None)]), 'uml': ('uml:///system', [(lambda t: t.find('.').get('type'), 'uml'), - (lambda t: t.find('./os/type').text, 'uml')])} + (lambda t: t.find('./os/type').text, 'uml')]), + 'xen': ('xen:///', + [(lambda t: t.find('.').get('type'), 'xen'), + (lambda t: t.find('./os/type').text, 'linux')]), + } + + for hypervisor_type in ['qemu', 'kvm', 'xen']: + check_list = type_uri_map[hypervisor_type][1] + + if rescue: + check = (lambda t: t.find('./os/kernel').text.split('/')[1], + 'rescue-kernel') + check_list.append(check) + check = (lambda t: t.find('./os/initrd').text.split('/')[1], + 'rescue-ramdisk') + check_list.append(check) + else: + if expect_kernel: + check = (lambda t: t.find('./os/kernel').text.split('/')[1], + 'kernel') + else: + check = (lambda t: t.find('./os/kernel'), None) + check_list.append(check) + + if expect_ramdisk: + check = (lambda t: t.find('./os/initrd').text.split('/')[1], + 'ramdisk') + else: + check = (lambda t: t.find('./os/initrd'), None) + check_list.append(check) common_checks = [ (lambda t: t.find('.').tag, 'domain'), - (lambda t: t.find('./devices/interface/filterref/parameter').\ - get('name'), 'IP'), - (lambda t: t.find('./devices/interface/filterref/parameter').\ - get('value'), '10.11.12.13')] + (lambda t: t.find('./devices/interface/filterref/parameter' + ).get('name'), 'IP'), + (lambda t: t.find('./devices/interface/filterref/parameter' + ).get('value'), '10.11.12.13'), + (lambda t: t.findall('./devices/interface/filterref/parameter' + )[1].get('name'), 'DHCPSERVER'), + (lambda t: t.findall('./devices/interface/filterref/parameter' + )[1].get('value'), '10.0.0.1'), + (lambda t: t.find('./devices/serial/source').get('path' + ).split('/')[1], 'console.log'), + (lambda t: t.find('./memory').text, '2097152')] + + if rescue: + common_checks += [(lambda t: t.findall('./devices/disk/source' + )[0].get('file').split('/')[1], + 'rescue-disk'), + (lambda t: t.findall('./devices/disk/source' + )[1].get('file').split('/')[1], + 'disk')] + else: + common_checks += [(lambda t: t.findall('./devices/disk/source' + )[0].get('file').split('/')[1], + 'disk')] for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): FLAGS.libvirt_type = libvirt_type conn = libvirt_conn.LibvirtConnection(True) - uri, _template, _rescue = conn.get_uri_and_templates() + uri = conn.get_uri() self.assertEquals(uri, expected_uri) - xml = conn.to_xml(instance_ref) + xml = conn.to_xml(instance_ref, rescue) tree = xml_to_tree(xml) for i, (check, expected_result) in enumerate(checks): self.assertEqual(check(tree), @@ -106,6 +189,8 @@ class LibvirtConnTestCase(test.TrialTestCase): expected_result, '%s failed common check %d' % (xml, i)) + # This test is supposed to make sure we don't override a specifically set uri + # # Deliberately not just assigning this string to FLAGS.libvirt_uri and # checking against that later on. This way we make sure the # implementation doesn't fiddle around with the FLAGS. @@ -114,7 +199,7 @@ class LibvirtConnTestCase(test.TrialTestCase): for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): FLAGS.libvirt_type = libvirt_type conn = libvirt_conn.LibvirtConnection(True) - uri, _template, _rescue = conn.get_uri_and_templates() + uri = conn.get_uri() self.assertEquals(uri, testuri) def tearDown(self): From 39b82d7804d6a7f1e65172f3c898982d5b0e7360 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Tue, 14 Dec 2010 17:56:42 -0400 Subject: [PATCH 23/55] pep8 --- nova/adminclient.py | 1 + nova/process.py | 2 ++ nova/server.py | 2 +- nova/twistd.py | 2 +- 4 files changed, 5 insertions(+), 2 deletions(-) diff --git a/nova/adminclient.py b/nova/adminclient.py index 5a62cce7..6ae9f0c0 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -194,6 +194,7 @@ class HostInfo(object): class NovaAdminClient(object): + def __init__( self, clc_url=DEFAULT_CLC_URL, diff --git a/nova/process.py b/nova/process.py index b33df048..39fddef6 100644 --- a/nova/process.py +++ b/nova/process.py @@ -40,6 +40,8 @@ flags.DEFINE_integer('process_pool_size', 4, # This is based on _BackRelay from twister.internal.utils, but modified to # capture both stdout and stderr, without odd stderr handling, and also to # handle stdin + + class BackRelayWithInput(protocol.ProcessProtocol): """ Trivial protocol for communicating with a process and turning its output diff --git a/nova/server.py b/nova/server.py index a0ee5468..3b908617 100644 --- a/nova/server.py +++ b/nova/server.py @@ -42,7 +42,7 @@ flags.DEFINE_bool('daemonize', False, 'daemonize this process') # clutter. flags.DEFINE_bool('use_syslog', True, 'output to syslog when daemonizing') flags.DEFINE_string('logfile', None, 'log file to output to') -flags.DEFINE_string('logdir', None, 'directory to keep log files in ' +flags.DEFINE_string('logdir', None, 'directory to keep log files in ' '(will be prepended to $logfile)') flags.DEFINE_string('pidfile', None, 'pid file to output to') flags.DEFINE_string('working_directory', './', 'working directory...') diff --git a/nova/twistd.py b/nova/twistd.py index cb5648ce..e6c3101f 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -43,7 +43,7 @@ else: FLAGS = flags.FLAGS -flags.DEFINE_string('logdir', None, 'directory to keep log files in ' +flags.DEFINE_string('logdir', None, 'directory to keep log files in ' '(will be prepended to $logfile)') From 2aab09ab60a8209bc5c26d905a2fd15befb83b4d Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Tue, 14 Dec 2010 23:35:54 +0100 Subject: [PATCH 24/55] Remove default_{kernel,ramdisk} flags. They are not used anymore. --- nova/flags.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index 45f5d746..5c265f4e 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -228,10 +228,6 @@ DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud', DEFINE_string('default_image', 'ami-11111', 'default image to use, testing only') -DEFINE_string('default_kernel', 'aki-11111', - 'default kernel to use, testing only') -DEFINE_string('default_ramdisk', 'ari-11111', - 'default ramdisk to use, testing only') DEFINE_string('default_instance_type', 'm1.small', 'default instance type to use, testing only') DEFINE_string('null_kernel', 'nokernel', From d655e89908fa167a612b78b2768c3f972ef632d3 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 15 Dec 2010 00:25:04 +0000 Subject: [PATCH 25/55] Lockout middleware for ec2 api --- nova/fakememcache.py | 50 +++++++++++++++++++ nova/tests/middleware_unittest.py | 82 +++++++++++++++++++++++++++++++ run_tests.py | 1 + 3 files changed, 133 insertions(+) create mode 100644 nova/fakememcache.py create mode 100644 nova/tests/middleware_unittest.py diff --git a/nova/fakememcache.py b/nova/fakememcache.py new file mode 100644 index 00000000..0b2e3b6c --- /dev/null +++ b/nova/fakememcache.py @@ -0,0 +1,50 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Super simple fake memcache client.""" + + +class Client(object): + """Replicates a tiny subset of memcached client interface.""" + __cache = {} + + def __init__(self, *args, **kwargs): + """Ignores all constructor params.""" + pass + + def get(self, key): + """Retrieves the value for a key or None.""" + return self.__cache.get(key, None) + + def set(self, key, value): + """Sets the value for a key.""" + self.__cache[key] = value + return True + + def add(self, key, value): + """Sets the value for a key if it doesn't exist.""" + if key in self.__cache: + return False + return self.set(key, value) + + def incr(self, key, delta=1): + """Increments the value for a key.""" + if not key in self.__cache: + return 0 + self.__cache[key] = str(int(self.__cache[key]) + 1) + return self.__cache[key] diff --git a/nova/tests/middleware_unittest.py b/nova/tests/middleware_unittest.py new file mode 100644 index 00000000..bbbd4a5a --- /dev/null +++ b/nova/tests/middleware_unittest.py @@ -0,0 +1,82 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob +import webob.dec +import webob.exc + +from nova.api import ec2 +from nova import flags +from nova import test + + +FLAGS = flags.FLAGS + + +@webob.dec.wsgify +def conditional_forbid(req): + """Helper wsgi app returns 403 if param 'die' is 1.""" + if 'die' in req.params and req.params['die'] == '1': + raise webob.exc.HTTPForbidden() + return 'OK' + + +class LockoutTestCase(test.TrialTestCase): + """Test case for the Lockout middleware.""" + def setUp(self): # pylint: disable-msg=C0103 + self.local_time = 0 + self.lockout = ec2.Lockout(conditional_forbid, + time_fn=self._constant_time) + super(LockoutTestCase, self).setUp() + + def _constant_time(self): + """Helper method to force timeouts.""" + return self.local_time + + def _trigger_lockout(self, access_key): + """Send x failed requests where x = lockout_attempts.""" + for i in xrange(FLAGS.lockout_attempts): + req = webob.Request.blank('/?AWSAccessKeyId=%s&die=1' % access_key) + self.assertEqual(req.get_response(self.lockout).status_int, 403) + + def _is_locked_out(self, access_key): + """Sends a test request to see if key is locked out.""" + req = webob.Request.blank('/?AWSAccessKeyId=%s' % access_key) + return (req.get_response(self.lockout).status_int == 403) + + def _timeout(self): + """Increment time to 1 second past the lockout.""" + self.local_time = 1 + self.local_time + FLAGS.lockout_minutes * 60 + + def test_lockout(self): + self._trigger_lockout('test') + self.assertTrue(self._is_locked_out('test')) + + def test_timeout(self): + self._trigger_lockout('test') + self.assertTrue(self._is_locked_out('test')) + self._timeout() + self.assertFalse(self._is_locked_out('test')) + + def test_multiple_keys(self): + self._trigger_lockout('test1') + self.assertTrue(self._is_locked_out('test1')) + self.assertFalse(self._is_locked_out('test2')) + self._timeout() + self.assertFalse(self._is_locked_out('test1')) + self.assertFalse(self._is_locked_out('test2')) diff --git a/run_tests.py b/run_tests.py index 37a548e4..a0ef3fd9 100644 --- a/run_tests.py +++ b/run_tests.py @@ -57,6 +57,7 @@ from nova.tests.auth_unittest import * from nova.tests.cloud_unittest import * from nova.tests.compute_unittest import * from nova.tests.flags_unittest import * +from nova.tests.middleware_unittest import * from nova.tests.misc_unittest import * from nova.tests.network_unittest import * from nova.tests.objectstore_unittest import * From e086f8c0b18cb6c42062a52a5cf31bfead8d7494 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Wed, 15 Dec 2010 13:15:19 +0100 Subject: [PATCH 26/55] Make sure the new, consolidated template gets included. --- MANIFEST.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MANIFEST.in b/MANIFEST.in index 982b727a..199ce30b 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -13,7 +13,7 @@ include nova/cloudpipe/client.ovpn.template include nova/compute/fakevirtinstance.xml include nova/compute/interfaces.template include nova/virt/interfaces.template -include nova/virt/libvirt.*.xml.template +include nova/virt/libvirt*.xml.template include nova/tests/CA/ include nova/tests/CA/cacert.pem include nova/tests/CA/private/ From 6db5d941947b224d1b56234d4eb697bf20510cca Mon Sep 17 00:00:00 2001 From: root Date: Wed, 15 Dec 2010 09:38:38 -0800 Subject: [PATCH 27/55] clean up code to use timeout instead of two keys --- nova/fakememcache.py | 38 +++++++++++++++++++------------ nova/tests/middleware_unittest.py | 27 ++++++++++++++-------- 2 files changed, 41 insertions(+), 24 deletions(-) diff --git a/nova/fakememcache.py b/nova/fakememcache.py index 0b2e3b6c..0b4037ef 100644 --- a/nova/fakememcache.py +++ b/nova/fakememcache.py @@ -18,33 +18,43 @@ """Super simple fake memcache client.""" +import time + class Client(object): """Replicates a tiny subset of memcached client interface.""" - __cache = {} - def __init__(self, *args, **kwargs): - """Ignores all constructor params.""" - pass + def __init__(self, time_fn=time.time, *args, **kwargs): + """Time fn is to allow testing through a custom function""" + self.time_fn = time_fn + self.cache = {} def get(self, key): """Retrieves the value for a key or None.""" - return self.__cache.get(key, None) + (timeout, value) = self.cache.get(key, (0, None)) + if timeout == 0 or self.time_fn() < timeout: + return value + return None - def set(self, key, value): + def set(self, key, value, time=0, min_compress_len=0): """Sets the value for a key.""" - self.__cache[key] = value + timeout = 0 + if time != 0: + timeout = self.time_fn() + time + self.cache[key] = (timeout, value) return True - def add(self, key, value): + def add(self, key, value, time=0, min_compress_len=0): """Sets the value for a key if it doesn't exist.""" - if key in self.__cache: + if not self.get(key) is None: return False - return self.set(key, value) + return self.set(key, value, time, min_compress_len) def incr(self, key, delta=1): """Increments the value for a key.""" - if not key in self.__cache: - return 0 - self.__cache[key] = str(int(self.__cache[key]) + 1) - return self.__cache[key] + value = self.get(key) + if value is None: + return None + new_value = int(value) + delta + self.cache[key] = (self.cache[key][0], str(new_value)) + return new_value diff --git a/nova/tests/middleware_unittest.py b/nova/tests/middleware_unittest.py index bbbd4a5a..61a790c1 100644 --- a/nova/tests/middleware_unittest.py +++ b/nova/tests/middleware_unittest.py @@ -48,9 +48,9 @@ class LockoutTestCase(test.TrialTestCase): """Helper method to force timeouts.""" return self.local_time - def _trigger_lockout(self, access_key): - """Send x failed requests where x = lockout_attempts.""" - for i in xrange(FLAGS.lockout_attempts): + def _send_bad_attempts(self, access_key, num_attempts=1): + """Fail x.""" + for i in xrange(num_attempts): req = webob.Request.blank('/?AWSAccessKeyId=%s&die=1' % access_key) self.assertEqual(req.get_response(self.lockout).status_int, 403) @@ -59,24 +59,31 @@ class LockoutTestCase(test.TrialTestCase): req = webob.Request.blank('/?AWSAccessKeyId=%s' % access_key) return (req.get_response(self.lockout).status_int == 403) - def _timeout(self): + def _advance_time(self, time): """Increment time to 1 second past the lockout.""" - self.local_time = 1 + self.local_time + FLAGS.lockout_minutes * 60 + self.local_time = self.local_time + time def test_lockout(self): - self._trigger_lockout('test') + self._send_bad_attempts('test', FLAGS.lockout_attempts) self.assertTrue(self._is_locked_out('test')) def test_timeout(self): - self._trigger_lockout('test') + self._send_bad_attempts('test', FLAGS.lockout_attempts) self.assertTrue(self._is_locked_out('test')) - self._timeout() + self._advance_time(FLAGS.lockout_minutes * 60) self.assertFalse(self._is_locked_out('test')) def test_multiple_keys(self): - self._trigger_lockout('test1') + self._send_bad_attempts('test1', FLAGS.lockout_attempts) self.assertTrue(self._is_locked_out('test1')) self.assertFalse(self._is_locked_out('test2')) - self._timeout() + self._advance_time(FLAGS.lockout_minutes * 60) self.assertFalse(self._is_locked_out('test1')) self.assertFalse(self._is_locked_out('test2')) + + def test_window_timeout(self): + self._send_bad_attempts('test', FLAGS.lockout_attempts - 1) + self.assertFalse(self._is_locked_out('test')) + self._advance_time(FLAGS.lockout_window * 60) + self._send_bad_attempts('test', FLAGS.lockout_attempts - 1) + self.assertFalse(self._is_locked_out('test')) From cad11d8ad02121df6375f0a15fb39ca30925098d Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Wed, 15 Dec 2010 17:50:05 +0000 Subject: [PATCH 28/55] * code cleanup * revised unittest approach * added stubout and a number of tests --- nova/tests/xenapi_unittest.py | 136 +++++++++++++++++++++++++++++++--- 1 file changed, 126 insertions(+), 10 deletions(-) diff --git a/nova/tests/xenapi_unittest.py b/nova/tests/xenapi_unittest.py index c9be1732..b9955a94 100644 --- a/nova/tests/xenapi_unittest.py +++ b/nova/tests/xenapi_unittest.py @@ -31,7 +31,7 @@ # under the License. -import mox +import stubout import uuid from twisted.internet import defer @@ -51,20 +51,34 @@ from nova.virt.xenapi import fake from nova.virt.xenapi import volume_utils from nova.virt.xenapi import vm_utils from nova.virt.xenapi import volumeops +from boto.ec2.volume import Volume FLAGS = flags.FLAGS +def stubout_session(stubs, cls): + def fake_import(self): + fake_module = 'nova.virt.xenapi.fake' + from_list = ['fake'] + return __import__(fake_module, globals(), locals(), from_list, -1) + + stubs.Set(xenapi_conn.XenAPISession, '_create_session', + lambda s, url: cls(url)) + stubs.Set(xenapi_conn.XenAPISession, 'get_imported_xenapi', + fake_import) + + class XenAPIVolumeTestCase(test.TrialTestCase): """ - This uses Ewan's fake session approach + Unit tests for VM operations """ def setUp(self): super(XenAPIVolumeTestCase, self).setUp() - FLAGS.xenapi_use_fake_session = True + self.stubs = stubout.StubOutForTesting() FLAGS.target_host = '127.0.0.1' FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' + fake.reset() def _create_volume(self, size='0'): """Create a volume object.""" @@ -78,11 +92,12 @@ class XenAPIVolumeTestCase(test.TrialTestCase): vol['attach_status'] = "detached" return db.volume_create(context.get_admin_context(), vol) - def test_create_iscsi_storage_raise_no_exception(self): - fake.reset() + def test_create_iscsi_storage(self): + """ This shows how to test helper classes' methods """ + stubout_session(self.stubs, FakeSessionForVolumeTests) session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass') helper = volume_utils.VolumeHelper - helper.late_import(FLAGS) + helper.XenAPI = session.get_imported_xenapi() vol = self._create_volume() info = yield helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') label = 'SR-%s' % vol['ec2_id'] @@ -93,8 +108,25 @@ class XenAPIVolumeTestCase(test.TrialTestCase): description) db.volume_destroy(context.get_admin_context(), vol['id']) + def test_parse_volume_info_raise_exception(self): + """ This shows how to test helper classes' methods """ + stubout_session(self.stubs, FakeSessionForVolumeTests) + session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass') + helper = volume_utils.VolumeHelper + helper.XenAPI = session.get_imported_xenapi() + vol = self._create_volume() + # oops, wrong mount point! + info = helper.parse_volume_info(vol['ec2_id'], '/dev/sd') + + def check(exc): + self.assertIsInstance(exc.value, volume_utils.StorageError) + + info.addErrback(check) + db.volume_destroy(context.get_admin_context(), vol['id']) + def test_attach_volume(self): - fake.reset() + """ This shows how to test Ops classes' methods """ + stubout_session(self.stubs, FakeSessionForVolumeTests) conn = xenapi_conn.get_connection(False) volume = self._create_volume() instance = FakeInstance(1, 'fake', 'fake', 1, 2, 3, @@ -116,13 +148,34 @@ class XenAPIVolumeTestCase(test.TrialTestCase): result.addCallback(check) return result + def test_attach_volume_raise_exception(self): + """ This shows how to test when exceptions are raised """ + stubout_session(self.stubs, FakeSessionForVolumeFailedTests) + conn = xenapi_conn.get_connection(False) + volume = self._create_volume() + instance = FakeInstance(1, 'fake', 'fake', 1, 2, 3, + 'm1.large', 'aa:bb:cc:dd:ee:ff') + fake.create_vm(instance.name, 'Running') + result = conn.attach_volume(instance.name, volume['ec2_id'], + '/dev/sdc') + + def check(exc): + if exc: + pass + else: + self.fail('Oops, no exception has been raised!') + + result.addErrback(check) + return result + def tearDown(self): super(XenAPIVolumeTestCase, self).tearDown() + self.stubs.UnsetAll() class XenAPIVMTestCase(test.TrialTestCase): """ - This uses Ewan's fake session approach + Unit tests for VM operations """ def setUp(self): super(XenAPIVMTestCase, self).setUp() @@ -131,19 +184,20 @@ class XenAPIVMTestCase(test.TrialTestCase): admin=True) self.project = self.manager.create_project('fake', 'fake', 'fake') self.network = utils.import_object(FLAGS.network_manager) - FLAGS.xenapi_use_fake_session = True + self.stubs = stubout.StubOutForTesting() FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' fake.reset() fake.create_network('fake', FLAGS.flat_network_bridge) def test_list_instances_0(self): + stubout_session(self.stubs, FakeSessionForVMTests) conn = xenapi_conn.get_connection(False) instances = conn.list_instances() self.assertEquals(instances, []) - #test_list_instances_0.skip = "E" def test_spawn(self): + stubout_session(self.stubs, FakeSessionForVMTests) conn = xenapi_conn.get_connection(False) instance = FakeInstance(1, self.project.id, self.user.id, 1, 2, 3, 'm1.large', 'aa:bb:cc:dd:ee:ff') @@ -186,6 +240,7 @@ class XenAPIVMTestCase(test.TrialTestCase): super(XenAPIVMTestCase, self).tearDown() self.manager.delete_project(self.project) self.manager.delete_user(self.user) + self.stubs.UnsetAll() class FakeInstance(): @@ -199,3 +254,64 @@ class FakeInstance(): self.ramdisk_id = ramdisk_id self.instance_type = instance_type self.mac_address = mac_address + + +class FakeSessionForVMTests(fake.SessionBase): + def __init__(self, uri): + super(FakeSessionForVMTests, self).__init__(uri) + + def network_get_all_records_where(self, _1, _2): + return self.xenapi.network.get_all_records() + + def host_call_plugin(self, _1, _2, _3, _4, _5): + return '' + + def VM_start(self, _1, ref, _2, _3): + vm = fake.get_record('VM', ref) + if vm['power_state'] != 'Halted': + raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted', + vm['power_state']]) + vm['power_state'] = 'Running' + + +class FakeSessionForVolumeTests(fake.SessionBase): + def __init__(self, uri): + super(FakeSessionForVolumeTests, self).__init__(uri) + + def VBD_plug(self, _1, _2): + #FIXME(armando):make proper plug + pass + + def PBD_unplug(self, _1, _2): + #FIXME(armando):make proper unplug + pass + + def SR_forget(self, _1, _2): + #FIXME(armando):make proper forget + pass + + def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, + _6, _7, _8, _9, _10, _11): + #FIXME(armando):make proper introduce + valid_vdi = False + refs = fake.get_all('VDI') + for ref in refs: + rec = fake.get_record('VDI', ref) + if rec['uuid'] == uuid: + valid_vdi = True + if not valid_vdi: + raise fake.Failure([['INVALID_VDI', 'session', self._session]]) + + +class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests): + def __init__(self, uri): + super(FakeSessionForVolumeFailedTests, self).__init__(uri) + + def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, + _6, _7, _8, _9, _10, _11): + # test failure + raise fake.Failure([['INVALID_VDI', 'session', self._session]]) + + def VBD_plug(self, _1, _2): + # test failure + raise fake.Failure([['INVALID_VBD', 'session', self._session]]) From 0534153594ace6d3c0080c26dfb4ee0486977212 Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Wed, 15 Dec 2010 18:28:00 +0000 Subject: [PATCH 29/55] Adding back in openssh-lpk schema, as keys will likely be stored in LDAP again. --- nova/auth/opendj.sh | 1 + nova/auth/openssh-lpk_openldap.schema | 19 +++++++++++++++++++ nova/auth/openssh-lpk_sun.schema | 10 ++++++++++ nova/auth/slap.sh | 1 + 4 files changed, 31 insertions(+) create mode 100644 nova/auth/openssh-lpk_openldap.schema create mode 100644 nova/auth/openssh-lpk_sun.schema diff --git a/nova/auth/opendj.sh b/nova/auth/opendj.sh index 9a960034..1a280e5a 100755 --- a/nova/auth/opendj.sh +++ b/nova/auth/opendj.sh @@ -30,6 +30,7 @@ fi abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"` schemapath='/var/opendj/instance/config/schema' +cp $abspath/openssh-lpk_sun.schema $schemapath/97-openssh-lpk_sun.ldif cp $abspath/nova_sun.schema $schemapath/98-nova_sun.ldif chown opendj:opendj $schemapath/98-nova_sun.ldif diff --git a/nova/auth/openssh-lpk_openldap.schema b/nova/auth/openssh-lpk_openldap.schema new file mode 100644 index 00000000..93351da6 --- /dev/null +++ b/nova/auth/openssh-lpk_openldap.schema @@ -0,0 +1,19 @@ +# +# LDAP Public Key Patch schema for use with openssh-ldappubkey +# Author: Eric AUGE +# +# Based on the proposal of : Mark Ruijter +# + + +# octetString SYNTAX +attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' + DESC 'MANDATORY: OpenSSH Public key' + EQUALITY octetStringMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) + +# printableString SYNTAX yes|no +objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY + DESC 'MANDATORY: OpenSSH LPK objectclass' + MAY ( sshPublicKey $ uid ) + ) diff --git a/nova/auth/openssh-lpk_sun.schema b/nova/auth/openssh-lpk_sun.schema new file mode 100644 index 00000000..5f52db3b --- /dev/null +++ b/nova/auth/openssh-lpk_sun.schema @@ -0,0 +1,10 @@ +# +# LDAP Public Key Patch schema for use with openssh-ldappubkey +# Author: Eric AUGE +# +# Schema for Sun Directory Server. +# Based on the original schema, modified by Stefan Fischer. +# +dn: cn=schema +attributeTypes: ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' DESC 'MANDATORY: OpenSSH Public key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) +objectClasses: ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY DESC 'MANDATORY: OpenSSH LPK objectclass' MAY ( sshPublicKey $ uid ) ) diff --git a/nova/auth/slap.sh b/nova/auth/slap.sh index 36c4ba37..95c61daf 100755 --- a/nova/auth/slap.sh +++ b/nova/auth/slap.sh @@ -21,6 +21,7 @@ apt-get install -y slapd ldap-utils python-ldap abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"` +cp $abspath/openssh-lpk_openldap.schema /etc/ldap/schema/openssh-lpk_openldap.schema cp $abspath/nova_openldap.schema /etc/ldap/schema/nova.schema mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig From 8c70162a5c05f3ac1ac9b2d03df82dd4a9583569 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 15 Dec 2010 11:23:33 -0800 Subject: [PATCH 30/55] clean up tests and add overriden time method to utils --- nova/fakememcache.py | 11 +++++------ nova/tests/middleware_unittest.py | 23 ++++++++++------------- 2 files changed, 15 insertions(+), 19 deletions(-) diff --git a/nova/fakememcache.py b/nova/fakememcache.py index 0b4037ef..67f46dbd 100644 --- a/nova/fakememcache.py +++ b/nova/fakememcache.py @@ -18,21 +18,20 @@ """Super simple fake memcache client.""" -import time +import utils class Client(object): """Replicates a tiny subset of memcached client interface.""" - def __init__(self, time_fn=time.time, *args, **kwargs): - """Time fn is to allow testing through a custom function""" - self.time_fn = time_fn + def __init__(self, *args, **kwargs): + """Ignores the passed in args""" self.cache = {} def get(self, key): """Retrieves the value for a key or None.""" (timeout, value) = self.cache.get(key, (0, None)) - if timeout == 0 or self.time_fn() < timeout: + if timeout == 0 or utils.utcnow_ts() < timeout: return value return None @@ -40,7 +39,7 @@ class Client(object): """Sets the value for a key.""" timeout = 0 if time != 0: - timeout = self.time_fn() + time + timeout = utils.utcnow_ts() + time self.cache[key] = (timeout, value) return True diff --git a/nova/tests/middleware_unittest.py b/nova/tests/middleware_unittest.py index 61a790c1..0febf52d 100644 --- a/nova/tests/middleware_unittest.py +++ b/nova/tests/middleware_unittest.py @@ -16,6 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. +import datetime import webob import webob.dec import webob.exc @@ -23,6 +24,7 @@ import webob.exc from nova.api import ec2 from nova import flags from nova import test +from nova import utils FLAGS = flags.FLAGS @@ -39,14 +41,13 @@ def conditional_forbid(req): class LockoutTestCase(test.TrialTestCase): """Test case for the Lockout middleware.""" def setUp(self): # pylint: disable-msg=C0103 - self.local_time = 0 - self.lockout = ec2.Lockout(conditional_forbid, - time_fn=self._constant_time) super(LockoutTestCase, self).setUp() + utils.set_time_override() + self.lockout = ec2.Lockout(conditional_forbid) - def _constant_time(self): - """Helper method to force timeouts.""" - return self.local_time + def tearDown(self): # pylint: disable-msg=C0103 + utils.clear_time_override() + super(LockoutTestCase, self).tearDown() def _send_bad_attempts(self, access_key, num_attempts=1): """Fail x.""" @@ -59,10 +60,6 @@ class LockoutTestCase(test.TrialTestCase): req = webob.Request.blank('/?AWSAccessKeyId=%s' % access_key) return (req.get_response(self.lockout).status_int == 403) - def _advance_time(self, time): - """Increment time to 1 second past the lockout.""" - self.local_time = self.local_time + time - def test_lockout(self): self._send_bad_attempts('test', FLAGS.lockout_attempts) self.assertTrue(self._is_locked_out('test')) @@ -70,20 +67,20 @@ class LockoutTestCase(test.TrialTestCase): def test_timeout(self): self._send_bad_attempts('test', FLAGS.lockout_attempts) self.assertTrue(self._is_locked_out('test')) - self._advance_time(FLAGS.lockout_minutes * 60) + utils.advance_time_seconds(FLAGS.lockout_minutes * 60) self.assertFalse(self._is_locked_out('test')) def test_multiple_keys(self): self._send_bad_attempts('test1', FLAGS.lockout_attempts) self.assertTrue(self._is_locked_out('test1')) self.assertFalse(self._is_locked_out('test2')) - self._advance_time(FLAGS.lockout_minutes * 60) + utils.advance_time_seconds(FLAGS.lockout_minutes * 60) self.assertFalse(self._is_locked_out('test1')) self.assertFalse(self._is_locked_out('test2')) def test_window_timeout(self): self._send_bad_attempts('test', FLAGS.lockout_attempts - 1) self.assertFalse(self._is_locked_out('test')) - self._advance_time(FLAGS.lockout_window * 60) + utils.advance_time_seconds(FLAGS.lockout_window * 60) self._send_bad_attempts('test', FLAGS.lockout_attempts - 1) self.assertFalse(self._is_locked_out('test')) From 331d83b5a2f01a35cda49bb976ce6f0efd851a9e Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 16 Dec 2010 16:33:38 +0000 Subject: [PATCH 31/55] Removed FakeInstance and introduced stubout for DB. Code clean-up --- nova/tests/xenapi/__init__.py | 33 +++++++ nova/tests/xenapi/stubs.py | 98 ++++++++++++++++++++ nova/tests/xenapi_unittest.py | 165 +++++++++------------------------- 3 files changed, 173 insertions(+), 123 deletions(-) create mode 100644 nova/tests/xenapi/__init__.py create mode 100644 nova/tests/xenapi/stubs.py diff --git a/nova/tests/xenapi/__init__.py b/nova/tests/xenapi/__init__.py new file mode 100644 index 00000000..a157d759 --- /dev/null +++ b/nova/tests/xenapi/__init__.py @@ -0,0 +1,33 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Stubouts, mocks and fixtures for the test suite""" diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py new file mode 100644 index 00000000..52518938 --- /dev/null +++ b/nova/tests/xenapi/stubs.py @@ -0,0 +1,98 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Stubouts, mocks and fixtures for the test suite""" + +from nova.virt import xenapi_conn +from nova.virt.xenapi import fake + + +def stubout_session(stubs, cls): + """ Stubs out two methods from XenAPISession """ + def fake_import(self): + """ Stubs out get_imported_xenapi of XenAPISession """ + fake_module = 'nova.virt.xenapi.fake' + from_list = ['fake'] + return __import__(fake_module, globals(), locals(), from_list, -1) + + stubs.Set(xenapi_conn.XenAPISession, '_create_session', + lambda s, url: cls(url)) + stubs.Set(xenapi_conn.XenAPISession, 'get_imported_xenapi', + fake_import) + + +class FakeSessionForVMTests(fake.SessionBase): + """ Stubs out a XenAPISession for VM tests """ + def __init__(self, uri): + super(FakeSessionForVMTests, self).__init__(uri) + + def network_get_all_records_where(self, _1, _2): + return self.xenapi.network.get_all_records() + + def host_call_plugin(self, _1, _2, _3, _4, _5): + return '' + + def VM_start(self, _1, ref, _2, _3): + vm = fake.get_record('VM', ref) + if vm['power_state'] != 'Halted': + raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted', + vm['power_state']]) + vm['power_state'] = 'Running' + + +class FakeSessionForVolumeTests(fake.SessionBase): + """ Stubs out a XenAPISession for Volume tests """ + def __init__(self, uri): + super(FakeSessionForVolumeTests, self).__init__(uri) + + def VBD_plug(self, _1, _2): + #FIXME(armando):make proper plug + pass + + def PBD_unplug(self, _1, _2): + #FIXME(armando):make proper unplug + pass + + def SR_forget(self, _1, _2): + #FIXME(armando):make proper forget + pass + + def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, + _6, _7, _8, _9, _10, _11): + #FIXME(armando):make proper introduce + valid_vdi = False + refs = fake.get_all('VDI') + for ref in refs: + rec = fake.get_record('VDI', ref) + if rec['uuid'] == uuid: + valid_vdi = True + if not valid_vdi: + raise fake.Failure([['INVALID_VDI', 'session', self._session]]) + + +class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests): + """ Stubs out a XenAPISession for Volume tests: it injects failures """ + def __init__(self, uri): + super(FakeSessionForVolumeFailedTests, self).__init__(uri) + + def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, + _6, _7, _8, _9, _10, _11): + # This is for testing failure + raise fake.Failure([['INVALID_VDI', 'session', self._session]]) + + def VBD_plug(self, _1, _2): + # This is for testing failure + raise fake.Failure([['INVALID_VBD', 'session', self._session]]) diff --git a/nova/tests/xenapi_unittest.py b/nova/tests/xenapi_unittest.py index b9955a94..c2612a4c 100644 --- a/nova/tests/xenapi_unittest.py +++ b/nova/tests/xenapi_unittest.py @@ -1,21 +1,5 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# # Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -30,16 +14,14 @@ # License for the specific language governing permissions and limitations # under the License. +""" +Test suite for XenAPI +""" import stubout -import uuid - -from twisted.internet import defer -from twisted.internet import threads from nova import db from nova import context -from nova import exception from nova import flags from nova import test from nova import utils @@ -49,28 +31,15 @@ from nova.compute import power_state from nova.virt import xenapi_conn from nova.virt.xenapi import fake from nova.virt.xenapi import volume_utils -from nova.virt.xenapi import vm_utils -from nova.virt.xenapi import volumeops -from boto.ec2.volume import Volume +from nova.tests.db import fakes +from nova.tests.xenapi import stubs FLAGS = flags.FLAGS -def stubout_session(stubs, cls): - def fake_import(self): - fake_module = 'nova.virt.xenapi.fake' - from_list = ['fake'] - return __import__(fake_module, globals(), locals(), from_list, -1) - - stubs.Set(xenapi_conn.XenAPISession, '_create_session', - lambda s, url: cls(url)) - stubs.Set(xenapi_conn.XenAPISession, 'get_imported_xenapi', - fake_import) - - class XenAPIVolumeTestCase(test.TrialTestCase): """ - Unit tests for VM operations + Unit tests for Volume operations """ def setUp(self): super(XenAPIVolumeTestCase, self).setUp() @@ -78,7 +47,17 @@ class XenAPIVolumeTestCase(test.TrialTestCase): FLAGS.target_host = '127.0.0.1' FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' + fakes.stub_out_db_instance_api(self.stubs) fake.reset() + self.values = {'name': 1, + 'project_id': 'fake', + 'user_id': 'fake', + 'image_id': 1, + 'kernel_id': 2, + 'ramdisk_id': 3, + 'instance_type': 'm1.large', + 'mac_address': 'aa:bb:cc:dd:ee:ff', + } def _create_volume(self, size='0'): """Create a volume object.""" @@ -94,7 +73,7 @@ class XenAPIVolumeTestCase(test.TrialTestCase): def test_create_iscsi_storage(self): """ This shows how to test helper classes' methods """ - stubout_session(self.stubs, FakeSessionForVolumeTests) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests) session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass') helper = volume_utils.VolumeHelper helper.XenAPI = session.get_imported_xenapi() @@ -106,11 +85,13 @@ class XenAPIVolumeTestCase(test.TrialTestCase): info, label, description) + srs = fake.get_all('SR') + self.assertEqual(sr_ref, srs[0]) db.volume_destroy(context.get_admin_context(), vol['id']) def test_parse_volume_info_raise_exception(self): """ This shows how to test helper classes' methods """ - stubout_session(self.stubs, FakeSessionForVolumeTests) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests) session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass') helper = volume_utils.VolumeHelper helper.XenAPI = session.get_imported_xenapi() @@ -119,6 +100,7 @@ class XenAPIVolumeTestCase(test.TrialTestCase): info = helper.parse_volume_info(vol['ec2_id'], '/dev/sd') def check(exc): + """ handler """ self.assertIsInstance(exc.value, volume_utils.StorageError) info.addErrback(check) @@ -126,16 +108,16 @@ class XenAPIVolumeTestCase(test.TrialTestCase): def test_attach_volume(self): """ This shows how to test Ops classes' methods """ - stubout_session(self.stubs, FakeSessionForVolumeTests) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests) conn = xenapi_conn.get_connection(False) volume = self._create_volume() - instance = FakeInstance(1, 'fake', 'fake', 1, 2, 3, - 'm1.large', 'aa:bb:cc:dd:ee:ff') + instance = db.instance_create(self.values) fake.create_vm(instance.name, 'Running') result = conn.attach_volume(instance.name, volume['ec2_id'], '/dev/sdc') def check(_): + """ handler """ # check that the VM has a VBD attached to it # Get XenAPI reference for the VM vms = fake.get_all('VM') @@ -150,16 +132,17 @@ class XenAPIVolumeTestCase(test.TrialTestCase): def test_attach_volume_raise_exception(self): """ This shows how to test when exceptions are raised """ - stubout_session(self.stubs, FakeSessionForVolumeFailedTests) + stubs.stubout_session(self.stubs, + stubs.FakeSessionForVolumeFailedTests) conn = xenapi_conn.get_connection(False) volume = self._create_volume() - instance = FakeInstance(1, 'fake', 'fake', 1, 2, 3, - 'm1.large', 'aa:bb:cc:dd:ee:ff') + instance = db.instance_create(self.values) fake.create_vm(instance.name, 'Running') result = conn.attach_volume(instance.name, volume['ec2_id'], '/dev/sdc') def check(exc): + """ handler """ if exc: pass else: @@ -188,22 +171,32 @@ class XenAPIVMTestCase(test.TrialTestCase): FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' fake.reset() + fakes.stub_out_db_instance_api(self.stubs) fake.create_network('fake', FLAGS.flat_network_bridge) def test_list_instances_0(self): - stubout_session(self.stubs, FakeSessionForVMTests) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) conn = xenapi_conn.get_connection(False) instances = conn.list_instances() self.assertEquals(instances, []) def test_spawn(self): - stubout_session(self.stubs, FakeSessionForVMTests) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + values = {'name': 1, + 'project_id': self.project.id, + 'user_id': self.user.id, + 'image_id': 1, + 'kernel_id': 2, + 'ramdisk_id': 3, + 'instance_type': 'm1.large', + 'mac_address': 'aa:bb:cc:dd:ee:ff', + } conn = xenapi_conn.get_connection(False) - instance = FakeInstance(1, self.project.id, self.user.id, 1, 2, 3, - 'm1.large', 'aa:bb:cc:dd:ee:ff') + instance = db.instance_create(values) result = conn.spawn(instance) def check(_): + """ handler """ instances = conn.list_instances() self.assertEquals(instances, [1]) @@ -241,77 +234,3 @@ class XenAPIVMTestCase(test.TrialTestCase): self.manager.delete_project(self.project) self.manager.delete_user(self.user) self.stubs.UnsetAll() - - -class FakeInstance(): - def __init__(self, name, project_id, user_id, image_id, kernel_id, - ramdisk_id, instance_type, mac_address): - self.name = name - self.project_id = project_id - self.user_id = user_id - self.image_id = image_id - self.kernel_id = kernel_id - self.ramdisk_id = ramdisk_id - self.instance_type = instance_type - self.mac_address = mac_address - - -class FakeSessionForVMTests(fake.SessionBase): - def __init__(self, uri): - super(FakeSessionForVMTests, self).__init__(uri) - - def network_get_all_records_where(self, _1, _2): - return self.xenapi.network.get_all_records() - - def host_call_plugin(self, _1, _2, _3, _4, _5): - return '' - - def VM_start(self, _1, ref, _2, _3): - vm = fake.get_record('VM', ref) - if vm['power_state'] != 'Halted': - raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted', - vm['power_state']]) - vm['power_state'] = 'Running' - - -class FakeSessionForVolumeTests(fake.SessionBase): - def __init__(self, uri): - super(FakeSessionForVolumeTests, self).__init__(uri) - - def VBD_plug(self, _1, _2): - #FIXME(armando):make proper plug - pass - - def PBD_unplug(self, _1, _2): - #FIXME(armando):make proper unplug - pass - - def SR_forget(self, _1, _2): - #FIXME(armando):make proper forget - pass - - def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, - _6, _7, _8, _9, _10, _11): - #FIXME(armando):make proper introduce - valid_vdi = False - refs = fake.get_all('VDI') - for ref in refs: - rec = fake.get_record('VDI', ref) - if rec['uuid'] == uuid: - valid_vdi = True - if not valid_vdi: - raise fake.Failure([['INVALID_VDI', 'session', self._session]]) - - -class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests): - def __init__(self, uri): - super(FakeSessionForVolumeFailedTests, self).__init__(uri) - - def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, - _6, _7, _8, _9, _10, _11): - # test failure - raise fake.Failure([['INVALID_VDI', 'session', self._session]]) - - def VBD_plug(self, _1, _2): - # test failure - raise fake.Failure([['INVALID_VBD', 'session', self._session]]) From e0b5318e54373ac1d53ad12e237ca0cb6795ed3f Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 16 Dec 2010 17:47:48 +0000 Subject: [PATCH 32/55] fake session clean-up --- nova/tests/xenapi/__init__.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/nova/tests/xenapi/__init__.py b/nova/tests/xenapi/__init__.py index a157d759..dcd81b74 100644 --- a/nova/tests/xenapi/__init__.py +++ b/nova/tests/xenapi/__init__.py @@ -1,21 +1,5 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# # Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may From 73e1126bc05e22688ff9ccfa62ebaa5a1c6412ea Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 16 Dec 2010 18:44:42 +0000 Subject: [PATCH 33/55] reviewed the FIXMEs, and spotted an uncaught exception in volume_utils...yay! --- nova/tests/virt_unittest.py | 2 -- nova/tests/xenapi/stubs.py | 24 +++++++++--------------- nova/tests/xenapi_unittest.py | 5 ++--- 3 files changed, 11 insertions(+), 20 deletions(-) diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index 52843b70..d49383fb 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -25,8 +25,6 @@ from nova import utils from nova.api.ec2 import cloud from nova.auth import manager from nova.virt import libvirt_conn -from nova.virt.xenapi import fake -from nova.virt.xenapi import volume_utils FLAGS = flags.FLAGS flags.DECLARE('instances_path', 'nova.compute.manager') diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 52518938..11dd535d 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -58,21 +58,12 @@ class FakeSessionForVolumeTests(fake.SessionBase): def __init__(self, uri): super(FakeSessionForVolumeTests, self).__init__(uri) - def VBD_plug(self, _1, _2): - #FIXME(armando):make proper plug - pass - - def PBD_unplug(self, _1, _2): - #FIXME(armando):make proper unplug - pass - - def SR_forget(self, _1, _2): - #FIXME(armando):make proper forget - pass + def VBD_plug(self, _1, ref): + rec = fake.get_record('VBD', ref) + rec['currently-attached'] = True def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11): - #FIXME(armando):make proper introduce valid_vdi = False refs = fake.get_all('VDI') for ref in refs: @@ -93,6 +84,9 @@ class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests): # This is for testing failure raise fake.Failure([['INVALID_VDI', 'session', self._session]]) - def VBD_plug(self, _1, _2): - # This is for testing failure - raise fake.Failure([['INVALID_VBD', 'session', self._session]]) + def PBD_unplug(self, _1, ref): + rec = fake.get_record('PBD', ref) + rec['currently-attached'] = False + + def SR_forget(self, _1, ref): + pass diff --git a/nova/tests/xenapi_unittest.py b/nova/tests/xenapi_unittest.py index c2612a4c..839d6aa4 100644 --- a/nova/tests/xenapi_unittest.py +++ b/nova/tests/xenapi_unittest.py @@ -141,14 +141,13 @@ class XenAPIVolumeTestCase(test.TrialTestCase): result = conn.attach_volume(instance.name, volume['ec2_id'], '/dev/sdc') - def check(exc): + def check_exception(exc): """ handler """ if exc: pass else: self.fail('Oops, no exception has been raised!') - - result.addErrback(check) + result.addErrback(check_exception) return result def tearDown(self): From 65499d5d668e1a3204b646824d3474f73648f412 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 16 Dec 2010 19:13:37 +0000 Subject: [PATCH 34/55] use getent, update docstring --- CA/genvpn.sh | 36 ++++++++++++++++++++++++++++++++++++ nova/auth/manager.py | 3 ++- 2 files changed, 38 insertions(+), 1 deletion(-) create mode 100755 CA/genvpn.sh diff --git a/CA/genvpn.sh b/CA/genvpn.sh new file mode 100755 index 00000000..7e7db185 --- /dev/null +++ b/CA/genvpn.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This gets zipped and run on the cloudpipe-managed OpenVPN server +NAME=$1 +SUBJ=$2 + +mkdir -p projects/$NAME +cd projects/$NAME + +# generate a server priv key +openssl genrsa -out server.key 2048 + +# generate a server CSR +openssl req -new -key server.key -out server.csr -batch -subj "$SUBJ" + +novauid=`getent passwd nova | awk -F: '{print $3}'` +if [ ! -z "${novauid}" ] && [ "`id -u`" != "${novauid}" ]; then + sudo chown -R nova:nogroup . +fi diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 783ef51a..73547302 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -65,7 +65,8 @@ flags.DEFINE_string('credential_key_file', 'pk.pem', flags.DEFINE_string('credential_cert_file', 'cert.pem', 'Filename of certificate in credentials zip') flags.DEFINE_string('credential_rc_file', '%src', - 'Filename of rc in credentials zip') + 'Filename of rc in credentials zip, %s will be ' + 'replaced by name of the region (nova by default)') flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver', 'Driver that auth manager uses') From 96d42b1404e93f9d6d8d4a8e8a632717a19bd8cb Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 17 Dec 2010 00:43:18 +0000 Subject: [PATCH 36/55] move some flags around --- nova/flags.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index 9e99ffb5..95bfd377 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -29,6 +29,7 @@ import sys import gflags +from nova import utils class FlagValues(gflags.FlagValues): """Extension of gflags.FlagValues that allows undefined and runtime flags. @@ -211,8 +212,8 @@ DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake') DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID') DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key') DEFINE_integer('s3_port', 3333, 's3 port') -DEFINE_string('s3_host', '127.0.0.1', 's3 host (for infrastructure)') -DEFINE_string('s3_dmz', '127.0.0.1', 's3 dmz ip (for instances)') +DEFINE_string('s3_host', utils.get_my_ip(), 's3 host (for infrastructure)') +DEFINE_string('s3_dmz', utils.get_my_ip(), 's3 dmz ip (for instances)') DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on') DEFINE_string('scheduler_topic', 'scheduler', 'the topic scheduler nodes listen on') @@ -231,10 +232,11 @@ DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval') DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') -DEFINE_string('cc_dmz', '127.0.0.1', 'ip of api server (for instances)') +DEFINE_string('ec2_prefix', 'http', 'prefix for ec2') +DEFINE_string('cc_host', utils.get_my_ip(), 'ip of api server') +DEFINE_string('cc_dmz', utils.get_my_ip(), 'internal ip of api server') DEFINE_integer('cc_port', 8773, 'cloud controller port') -DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud', - 'Url to ec2 api server') +DEFINE_string('ec2_suffix', '/services/Cloud', 'suffix for ec2') DEFINE_string('default_image', 'ami-11111', 'default image to use, testing only') From 7f651bc3b57011097a3b8e2f945f5f8f5a04362a Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 17 Dec 2010 00:52:17 +0000 Subject: [PATCH 37/55] pep8 --- nova/flags.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/flags.py b/nova/flags.py index 95bfd377..74badf6f 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -31,6 +31,7 @@ import gflags from nova import utils + class FlagValues(gflags.FlagValues): """Extension of gflags.FlagValues that allows undefined and runtime flags. From e9031d57e3d9805c2eabfe4b344cfd902540d628 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Fri, 17 Dec 2010 11:07:59 -0600 Subject: [PATCH 38/55] Replaced redis with a modified dict class --- nova/auth/fakeldap.py | 102 +++++++++++++++++++++++------------- nova/auth/manager.py | 10 +++- nova/tests/auth_unittest.py | 5 +- 3 files changed, 77 insertions(+), 40 deletions(-) diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py index 46e0135b..e46bb91a 100644 --- a/nova/auth/fakeldap.py +++ b/nova/auth/fakeldap.py @@ -15,7 +15,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -"""Fake LDAP server for test harness, backs to ReDIS. +"""Fake LDAP server for test harness. This class does very little error checking, and knows nothing about ldap class definitions. It implements the minimum emulation of the python ldap @@ -23,20 +23,11 @@ library to work with nova. """ +import fnmatch import json -import redis - -from nova import flags - -FLAGS = flags.FLAGS -flags.DEFINE_string('redis_host', '127.0.0.1', - 'Host that redis is running on.') -flags.DEFINE_integer('redis_port', 6379, - 'Port that redis is running on.') -flags.DEFINE_integer('redis_db', 0, 'Multiple DB keeps tests away') -class Redis(object): +class Store(object): def __init__(self): if hasattr(self.__class__, '_instance'): raise Exception('Attempted to instantiate singleton') @@ -44,13 +35,55 @@ class Redis(object): @classmethod def instance(cls): if not hasattr(cls, '_instance'): - inst = redis.Redis(host=FLAGS.redis_host, - port=FLAGS.redis_port, - db=FLAGS.redis_db) - cls._instance = inst + cls._instance = _StorageDict() return cls._instance +class _StorageDict(dict): + def keys(self, pat=None): + ret = super(_StorageDict, self).keys() + if pat is not None: + ret = fnmatch.filter(ret, pat) + return ret + + def delete(self, key): + try: + del self[key] + except KeyError: + pass + + def flushdb(self): + self.clear() + + def hgetall(self, key): + """Returns the hash for the given key; creates + the hash if the key doesn't exist.""" + try: + return self[key] + except KeyError: + self[key] = {} + return self[key] + + def hget(self, key, field): + hashdict = self.hgetall(key) + try: + return hashdict[field] + except KeyError: + hashdict[field] = {} + return hashdict[field] + + def hset(self, key, field, val): + hashdict = self.hgetall(key) + hashdict[field] = val + + def hmset(self, key, value_dict): + hashdict = self.hgetall(key) + for field, val in value_dict.items(): + hashdict[field] = val + + + + SCOPE_BASE = 0 SCOPE_ONELEVEL = 1 # Not implemented SCOPE_SUBTREE = 2 @@ -169,8 +202,6 @@ def _to_json(unencoded): class FakeLDAP(object): - #TODO(vish): refactor this class to use a wrapper instead of accessing - # redis directly """Fake LDAP connection.""" def simple_bind_s(self, dn, password): @@ -183,14 +214,14 @@ class FakeLDAP(object): def add_s(self, dn, attr): """Add an object with the specified attributes at dn.""" - key = "%s%s" % (self.__redis_prefix, dn) - + key = "%s%s" % (self.__prefix, dn) value_dict = dict([(k, _to_json(v)) for k, v in attr]) - Redis.instance().hmset(key, value_dict) + Store.instance().hmset(key, value_dict) + def delete_s(self, dn): """Remove the ldap object at specified dn.""" - Redis.instance().delete("%s%s" % (self.__redis_prefix, dn)) + Store.instance().delete("%s%s" % (self.__prefix, dn)) def modify_s(self, dn, attrs): """Modify the object at dn using the attribute list. @@ -201,18 +232,18 @@ class FakeLDAP(object): ([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value) """ - redis = Redis.instance() - key = "%s%s" % (self.__redis_prefix, dn) + store = Store.instance() + key = "%s%s" % (self.__prefix, dn) for cmd, k, v in attrs: - values = _from_json(redis.hget(key, k)) + values = _from_json(store.hget(key, k)) if cmd == MOD_ADD: values.append(v) elif cmd == MOD_REPLACE: values = [v] else: values.remove(v) - values = redis.hset(key, k, _to_json(values)) + values = store.hset(key, k, _to_json(values)) def search_s(self, dn, scope, query=None, fields=None): """Search for all matching objects under dn using the query. @@ -226,16 +257,17 @@ class FakeLDAP(object): """ if scope != SCOPE_BASE and scope != SCOPE_SUBTREE: raise NotImplementedError(str(scope)) - redis = Redis.instance() + store = Store.instance() if scope == SCOPE_BASE: - keys = ["%s%s" % (self.__redis_prefix, dn)] + keys = ["%s%s" % (self.__prefix, dn)] else: - keys = redis.keys("%s*%s" % (self.__redis_prefix, dn)) + keys = store.keys("%s*%s" % (self.__prefix, dn)) + objects = [] for key in keys: - # get the attributes from redis - attrs = redis.hgetall(key) - # turn the values from redis into lists + # get the attributes from the store + attrs = store.hgetall(key) + # turn the values from the store into lists # pylint: disable-msg=E1103 attrs = dict([(k, _from_json(v)) for k, v in attrs.iteritems()]) @@ -244,13 +276,13 @@ class FakeLDAP(object): # filter the attributes by fields attrs = dict([(k, v) for k, v in attrs.iteritems() if not fields or k in fields]) - objects.append((key[len(self.__redis_prefix):], attrs)) + objects.append((key[len(self.__prefix):], attrs)) # pylint: enable-msg=E1103 if objects == []: raise NO_SUCH_OBJECT() return objects @property - def __redis_prefix(self): # pylint: disable-msg=R0201 - """Get the prefix to use for all redis keys.""" + def __prefix(self): # pylint: disable-msg=R0201 + """Get the prefix to use for all keys.""" return 'ldap:' diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 11c3bd6d..5a7020a9 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -478,10 +478,13 @@ class AuthManager(object): if member_users: member_users = [User.safe_id(u) for u in member_users] with self.driver() as drv: - project_dict = drv.create_project(name, + try: + project_dict = drv.create_project(name, User.safe_id(manager_user), description, member_users) + except: + project_dict = drv.get_project(name) if project_dict: project = Project(**project_dict) return project @@ -604,7 +607,10 @@ class AuthManager(object): if secret == None: secret = str(uuid.uuid4()) with self.driver() as drv: - user_dict = drv.create_user(name, access, secret, admin) + try: + user_dict = drv.create_user(name, access, secret, admin) + except: + user_dict = drv.get_user(name) if user_dict: return User(**user_dict) diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py index 4508d672..32cb2c54 100644 --- a/nova/tests/auth_unittest.py +++ b/nova/tests/auth_unittest.py @@ -333,11 +333,10 @@ class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase): AuthManagerTestCase.__init__(self) test.TestCase.__init__(self, *args, **kwargs) import nova.auth.fakeldap as fakeldap - FLAGS.redis_db = 8 if FLAGS.flush_db: - logging.info("Flushing redis datastore") + logging.info("Flushing datastore") try: - r = fakeldap.Redis.instance() + r = fakeldap.Store.instance() r.flushdb() except: self.skip = True From 6900eddc304bb59f715814499f100f162a82b8b3 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Fri, 17 Dec 2010 11:14:32 -0600 Subject: [PATCH 39/55] Fixed some old code that was merged incorrectly --- nova/auth/manager.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 5a7020a9..11c3bd6d 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -478,13 +478,10 @@ class AuthManager(object): if member_users: member_users = [User.safe_id(u) for u in member_users] with self.driver() as drv: - try: - project_dict = drv.create_project(name, + project_dict = drv.create_project(name, User.safe_id(manager_user), description, member_users) - except: - project_dict = drv.get_project(name) if project_dict: project = Project(**project_dict) return project @@ -607,10 +604,7 @@ class AuthManager(object): if secret == None: secret = str(uuid.uuid4()) with self.driver() as drv: - try: - user_dict = drv.create_user(name, access, secret, admin) - except: - user_dict = drv.get_user(name) + user_dict = drv.create_user(name, access, secret, admin) if user_dict: return User(**user_dict) From 20936a664d267f88557d213bc36e25282af67d4b Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Fri, 17 Dec 2010 11:24:06 -0600 Subject: [PATCH 40/55] pep8 cleanup --- nova/auth/fakeldap.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py index e46bb91a..1ac579db 100644 --- a/nova/auth/fakeldap.py +++ b/nova/auth/fakeldap.py @@ -82,8 +82,6 @@ class _StorageDict(dict): hashdict[field] = val - - SCOPE_BASE = 0 SCOPE_ONELEVEL = 1 # Not implemented SCOPE_SUBTREE = 2 @@ -218,7 +216,6 @@ class FakeLDAP(object): value_dict = dict([(k, _to_json(v)) for k, v in attr]) Store.instance().hmset(key, value_dict) - def delete_s(self, dn): """Remove the ldap object at specified dn.""" Store.instance().delete("%s%s" % (self.__prefix, dn)) From 0e70277b39db47f165349f7afdd4bd9740c22c69 Mon Sep 17 00:00:00 2001 From: Jonathan Bryce Date: Fri, 17 Dec 2010 15:25:44 -0600 Subject: [PATCH 41/55] Adding in Ed Leafe so we can land his remove-redis test branch --- Authors | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Authors b/Authors index 565444ee..fa38ef0b 100644 --- a/Authors +++ b/Authors @@ -6,6 +6,7 @@ Chris Behrens Chmouel Boudjnah Dean Troyer Devin Carlen +Ed Leafe Eldar Nugaev Eric Day Ewan Mellor @@ -14,6 +15,7 @@ Jay Pipes Jesse Andrews Joe Heck Joel Moore +Jonathan Bryce Josh Kearney Joshua McKenty Justin Santa Barbara From 27ee813969e6f2cddb505d0f63daeb5853770a77 Mon Sep 17 00:00:00 2001 From: Jonathan Bryce Date: Fri, 17 Dec 2010 16:29:55 -0600 Subject: [PATCH 42/55] Removing unneeded Trial specific code --- nova/tests/auth_unittest.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py index 32cb2c54..61ae43fb 100644 --- a/nova/tests/auth_unittest.py +++ b/nova/tests/auth_unittest.py @@ -335,11 +335,8 @@ class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase): import nova.auth.fakeldap as fakeldap if FLAGS.flush_db: logging.info("Flushing datastore") - try: - r = fakeldap.Store.instance() - r.flushdb() - except: - self.skip = True + r = fakeldap.Store.instance() + r.flushdb() class AuthManagerDbTestCase(AuthManagerTestCase, test.TestCase): From 9c8255b2cc824c298cb6bdf15bbf4574691309fd Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Sat, 18 Dec 2010 00:50:49 +0000 Subject: [PATCH 43/55] fixed unittests and further clean-up post-eventlet merge --- nova/tests/xenapi_unittest.py | 48 ++++++++++++----------------------- 1 file changed, 16 insertions(+), 32 deletions(-) diff --git a/nova/tests/xenapi_unittest.py b/nova/tests/xenapi_unittest.py index 839d6aa4..74401ce5 100644 --- a/nova/tests/xenapi_unittest.py +++ b/nova/tests/xenapi_unittest.py @@ -78,13 +78,10 @@ class XenAPIVolumeTestCase(test.TrialTestCase): helper = volume_utils.VolumeHelper helper.XenAPI = session.get_imported_xenapi() vol = self._create_volume() - info = yield helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') + info = helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') label = 'SR-%s' % vol['ec2_id'] description = 'Test-SR' - sr_ref = helper.create_iscsi_storage_blocking(session, - info, - label, - description) + sr_ref = helper.create_iscsi_storage(session, info, label, description) srs = fake.get_all('SR') self.assertEqual(sr_ref, srs[0]) db.volume_destroy(context.get_admin_context(), vol['id']) @@ -97,13 +94,10 @@ class XenAPIVolumeTestCase(test.TrialTestCase): helper.XenAPI = session.get_imported_xenapi() vol = self._create_volume() # oops, wrong mount point! - info = helper.parse_volume_info(vol['ec2_id'], '/dev/sd') - - def check(exc): - """ handler """ - self.assertIsInstance(exc.value, volume_utils.StorageError) - - info.addErrback(check) + self.assertRaises(volume_utils.StorageError, + helper.parse_volume_info, + vol['ec2_id'], + '/dev/sd') db.volume_destroy(context.get_admin_context(), vol['id']) def test_attach_volume(self): @@ -116,8 +110,7 @@ class XenAPIVolumeTestCase(test.TrialTestCase): result = conn.attach_volume(instance.name, volume['ec2_id'], '/dev/sdc') - def check(_): - """ handler """ + def check(): # check that the VM has a VBD attached to it # Get XenAPI reference for the VM vms = fake.get_all('VM') @@ -127,8 +120,7 @@ class XenAPIVolumeTestCase(test.TrialTestCase): vm_ref = vbd['VM'] self.assertEqual(vm_ref, vms[0]) - result.addCallback(check) - return result + check() def test_attach_volume_raise_exception(self): """ This shows how to test when exceptions are raised """ @@ -138,17 +130,11 @@ class XenAPIVolumeTestCase(test.TrialTestCase): volume = self._create_volume() instance = db.instance_create(self.values) fake.create_vm(instance.name, 'Running') - result = conn.attach_volume(instance.name, volume['ec2_id'], - '/dev/sdc') - - def check_exception(exc): - """ handler """ - if exc: - pass - else: - self.fail('Oops, no exception has been raised!') - result.addErrback(check_exception) - return result + self.assertRaises(Exception, + conn.attach_volume, + instance.name, + volume['ec2_id'], + '/dev/sdc') def tearDown(self): super(XenAPIVolumeTestCase, self).tearDown() @@ -192,10 +178,9 @@ class XenAPIVMTestCase(test.TrialTestCase): } conn = xenapi_conn.get_connection(False) instance = db.instance_create(values) - result = conn.spawn(instance) + conn.spawn(instance) - def check(_): - """ handler """ + def check(): instances = conn.list_instances() self.assertEquals(instances, [1]) @@ -225,8 +210,7 @@ class XenAPIVMTestCase(test.TrialTestCase): # Check that the VM is running according to XenAPI. self.assertEquals(vm['power_state'], 'Running') - result.addCallback(check) - return result + check() def tearDown(self): super(XenAPIVMTestCase, self).tearDown() From b46386c82dad1d6760833e01872ba0f34589af5a Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 20 Dec 2010 22:53:07 +0100 Subject: [PATCH 44/55] Add my @linux2go.dk address to .mailmap --- .mailmap | 1 + 1 file changed, 1 insertion(+) diff --git a/.mailmap b/.mailmap index 2a6eb8d7..8041e234 100644 --- a/.mailmap +++ b/.mailmap @@ -19,6 +19,7 @@ + From 3d0d75b7917d770c7247bcf87a8713d196e6379e Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 21 Dec 2010 03:43:47 +0000 Subject: [PATCH 45/55] pep8 and removed extra imports --- nova/tests/cloud_unittest.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py index 185e4b4e..af544e3c 100644 --- a/nova/tests/cloud_unittest.py +++ b/nova/tests/cloud_unittest.py @@ -22,12 +22,10 @@ import logging from M2Crypto import BIO from M2Crypto import RSA import os -import StringIO import tempfile import time from eventlet import greenthread -from xml.etree import ElementTree from nova import context from nova import crypto @@ -36,7 +34,6 @@ from nova import flags from nova import rpc from nova import service from nova import test -from nova import utils from nova.auth import manager from nova.compute import power_state from nova.api.ec2 import cloud @@ -75,7 +72,8 @@ class CloudTestCase(test.TestCase): self.user = self.manager.create_user('admin', 'admin', 'admin', True) self.project = self.manager.create_project('proj', 'admin', 'proj') self.context = context.RequestContext(user=self.user, - project=self.project) + project=self.project) + def tearDown(self): self.manager.delete_project(self.project) self.manager.delete_user(self.user) From de7850f078f1d7eb4fbf36c2c40e3cadb1f8a738 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 21 Dec 2010 18:43:41 +0000 Subject: [PATCH 46/55] don't allocate networks when getting vpn info --- nova/auth/manager.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 74da8e04..f9a7dd00 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -540,10 +540,10 @@ class AuthManager(object): """ network_ref = db.project_get_network(context.get_admin_context(), - Project.safe_id(project)) + Project.safe_id(project), False) - if not network_ref['vpn_public_port']: - raise exception.NotFound('project network data has not been set') + if not network_ref: + return (None, None) return (network_ref['vpn_public_address'], network_ref['vpn_public_port']) From f0a3f63219114f1dc1dfc9ba161d2051ba8b4209 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 21 Dec 2010 19:24:12 +0000 Subject: [PATCH 47/55] activate fake rabbit for debugging --- nova/tests/rpc_unittest.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/tests/rpc_unittest.py b/nova/tests/rpc_unittest.py index 8c3e3103..6ea2edca 100644 --- a/nova/tests/rpc_unittest.py +++ b/nova/tests/rpc_unittest.py @@ -33,7 +33,6 @@ class RpcTestCase(test.TestCase): """Test cases for rpc""" def setUp(self): super(RpcTestCase, self).setUp() - self.flags(fake_rabbit=False) self.conn = rpc.Connection.instance(True) self.receiver = TestReceiver() self.consumer = rpc.AdapterConsumer(connection=self.conn, From 7bcca56ec640aa23d6455aec84ec767408504ec7 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 21 Dec 2010 14:28:20 -0600 Subject: [PATCH 48/55] PEP8 cleanup --- nova/tests/virt_unittest.py | 46 +++++++++++++++++++------------------ 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index 9bbba4ba..0cf01638 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -129,43 +129,45 @@ class LibvirtConnTestCase(test.TestCase): check_list.append(check) else: if expect_kernel: - check = (lambda t: t.find('./os/kernel').text.split('/' - )[1], 'kernel') + check = (lambda t: t.find('./os/kernel').text.split( + '/')[1], 'kernel') else: check = (lambda t: t.find('./os/kernel'), None) check_list.append(check) if expect_ramdisk: - check = (lambda t: t.find('./os/initrd').text.split('/' - )[1], 'ramdisk') + check = (lambda t: t.find('./os/initrd').text.split( + '/')[1], 'ramdisk') else: check = (lambda t: t.find('./os/initrd'), None) check_list.append(check) common_checks = [ (lambda t: t.find('.').tag, 'domain'), - (lambda t: t.find('./devices/interface/filterref/parameter' - ).get('name'), 'IP'), - (lambda t: t.find('./devices/interface/filterref/parameter' - ).get('value'), '10.11.12.13'), - (lambda t: t.findall('./devices/interface/filterref/parameter' - )[1].get('name'), 'DHCPSERVER'), - (lambda t: t.findall('./devices/interface/filterref/parameter' - )[1].get('value'), '10.0.0.1'), - (lambda t: t.find('./devices/serial/source').get('path' - ).split('/')[1], 'console.log'), + (lambda t: t.find( + './devices/interface/filterref/parameter').get('name'), 'IP'), + (lambda t: t.find( + './devices/interface/filterref/parameter').get( + 'value'), '10.11.12.13'), + (lambda t: t.findall( + './devices/interface/filterref/parameter')[1].get( + 'name'), 'DHCPSERVER'), + (lambda t: t.findall( + './devices/interface/filterref/parameter')[1].get( + 'value'), '10.0.0.1'), + (lambda t: t.find('./devices/serial/source').get( + 'path').split('/')[1], 'console.log'), (lambda t: t.find('./memory').text, '2097152')] if rescue: - common_checks += [(lambda t: t.findall('./devices/disk/source' - )[0].get('file').split('/')[1], - 'rescue-disk'), - (lambda t: t.findall('./devices/disk/source' - )[1].get('file').split('/')[1], - 'disk')] + common_checks += [(lambda t: t.findall( + './devices/disk/source')[0].get('file').split('/')[1], + 'rescue-disk'), (lambda t: t.findall( + './devices/disk/source')[1].get( + 'file').split('/')[1], 'disk')] else: - common_checks += [(lambda t: t.findall('./devices/disk/source' - )[0].get('file').split('/')[1], + common_checks += [(lambda t: t.findall( + './devices/disk/source')[0].get('file').split('/')[1], 'disk')] for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): From f8e96282d5f876e9c183d2049edb8d0b68b53a1f Mon Sep 17 00:00:00 2001 From: Eric Day Date: Tue, 21 Dec 2010 13:00:30 -0800 Subject: [PATCH 49/55] Reworked fakerabbit backend so each connection has it's own. Moved queues and exchanges to be globals. --- nova/fakerabbit.py | 121 ++++++++++++++++++++------------------------- 1 file changed, 53 insertions(+), 68 deletions(-) diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index c6461793..792e4c34 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -25,6 +25,9 @@ from carrot.backends import base from eventlet import greenthread +EXCHANGES = {} +QUEUES = {} + class Message(base.BaseMessage): pass @@ -68,81 +71,63 @@ class Queue(object): return self._queue.get() -class Backend(object): - """ Singleton backend for testing """ - class __impl(base.BaseBackend): - def __init__(self, *args, **kwargs): - #super(__impl, self).__init__(*args, **kwargs) - self._exchanges = {} - self._queues = {} +class Backend(base.BaseBackend): + def queue_declare(self, queue, **kwargs): + global QUEUES + if queue not in QUEUES: + logging.debug('Declaring queue %s', queue) + QUEUES[queue] = Queue(queue) - def _reset_all(self): - self._exchanges = {} - self._queues = {} + def exchange_declare(self, exchange, type, *args, **kwargs): + global EXCHANGES + if exchange not in EXCHANGES: + logging.debug('Declaring exchange %s', exchange) + EXCHANGES[exchange] = Exchange(exchange, type) - def queue_declare(self, queue, **kwargs): - if queue not in self._queues: - logging.debug('Declaring queue %s', queue) - self._queues[queue] = Queue(queue) + def queue_bind(self, queue, exchange, routing_key, **kwargs): + global EXCHANGES + global QUEUES + logging.debug('Binding %s to %s with key %s', + queue, exchange, routing_key) + EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key) - def exchange_declare(self, exchange, type, *args, **kwargs): - if exchange not in self._exchanges: - logging.debug('Declaring exchange %s', exchange) - self._exchanges[exchange] = Exchange(exchange, type) + def declare_consumer(self, queue, callback, *args, **kwargs): + self.current_queue = queue + self.current_callback = callback - def queue_bind(self, queue, exchange, routing_key, **kwargs): - logging.debug('Binding %s to %s with key %s', - queue, exchange, routing_key) - self._exchanges[exchange].bind(self._queues[queue].push, - routing_key) + def consume(self, limit=None): + while True: + item = self.get(self.current_queue) + if item: + self.current_callback(item) + raise StopIteration() + greenthread.sleep(0) - def declare_consumer(self, queue, callback, *args, **kwargs): - self.current_queue = queue - self.current_callback = callback + def get(self, queue, no_ack=False): + global QUEUES + if not queue in QUEUES or not QUEUES[queue].size(): + return None + (message_data, content_type, content_encoding) = QUEUES[queue].pop() + message = Message(backend=self, body=message_data, + content_type=content_type, + content_encoding=content_encoding) + message.result = True + logging.debug('Getting from %s: %s', queue, message) + return message - def consume(self, *args, **kwargs): - while True: - item = self.get(self.current_queue) - if item: - self.current_callback(item) - raise StopIteration() - greenthread.sleep(0) + def prepare_message(self, message_data, delivery_mode, + content_type, content_encoding, **kwargs): + """Prepare message for sending.""" + return (message_data, content_type, content_encoding) - def get(self, queue, no_ack=False): - if not queue in self._queues or not self._queues[queue].size(): - return None - (message_data, content_type, content_encoding) = \ - self._queues[queue].pop() - message = Message(backend=self, body=message_data, - content_type=content_type, - content_encoding=content_encoding) - message.result = True - logging.debug('Getting from %s: %s', queue, message) - return message - - def prepare_message(self, message_data, delivery_mode, - content_type, content_encoding, **kwargs): - """Prepare message for sending.""" - return (message_data, content_type, content_encoding) - - def publish(self, message, exchange, routing_key, **kwargs): - if exchange in self._exchanges: - self._exchanges[exchange].publish( - message, routing_key=routing_key) - - __instance = None - - def __init__(self, *args, **kwargs): - if Backend.__instance is None: - Backend.__instance = Backend.__impl(*args, **kwargs) - self.__dict__['_Backend__instance'] = Backend.__instance - - def __getattr__(self, attr): - return getattr(self.__instance, attr) - - def __setattr__(self, attr, value): - return setattr(self.__instance, attr, value) + def publish(self, message, exchange, routing_key, **kwargs): + global EXCHANGES + if exchange in EXCHANGES: + EXCHANGES[exchange].publish(message, routing_key=routing_key) def reset_all(): - Backend()._reset_all() + global EXCHANGES + global QUEUES + EXCHANGES = {} + QUEUES = {} From 0a2cad0e788fa72ab14f8db35d02791277a7bbdc Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 21 Dec 2010 15:56:12 -0600 Subject: [PATCH 50/55] Style correction --- nova/tests/virt_unittest.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index 0cf01638..cb35db1e 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -160,11 +160,11 @@ class LibvirtConnTestCase(test.TestCase): (lambda t: t.find('./memory').text, '2097152')] if rescue: - common_checks += [(lambda t: t.findall( - './devices/disk/source')[0].get('file').split('/')[1], - 'rescue-disk'), (lambda t: t.findall( - './devices/disk/source')[1].get( - 'file').split('/')[1], 'disk')] + common_checks += [ + (lambda t: t.findall('./devices/disk/source')[0].get( + 'file').split('/')[1], 'rescue-disk'), + (lambda t: t.findall('./devices/disk/source')[1].get( + 'file').split('/')[1], 'disk')] else: common_checks += [(lambda t: t.findall( './devices/disk/source')[0].get('file').split('/')[1], From 3bfd33ec0db77a35ab8ff5e5d3299a1e090719ad Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Wed, 22 Dec 2010 13:52:44 +0100 Subject: [PATCH 51/55] Adding me in the Authors file --- Authors | 1 + 1 file changed, 1 insertion(+) diff --git a/Authors b/Authors index fa38ef0b..0b048bec 100644 --- a/Authors +++ b/Authors @@ -27,6 +27,7 @@ Rick Clark Ryan Lucio Sandy Walsh Soren Hansen +Thierry Carrez Todd Willey Trey Morris Vishvananda Ishaya From 219c736d656a43992b21481b31d81621731383b0 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 22 Dec 2010 20:59:53 +0000 Subject: [PATCH 52/55] merge trunk and upgrade to cheetah templating --- .mailmap | 1 + Authors | 2 + MANIFEST.in | 2 +- nova/adminclient.py | 1 + nova/auth/dbdriver.py | 20 ++--- nova/auth/fakeldap.py | 101 ++++++++++++++++--------- nova/auth/ldapdriver.py | 69 +++++++++-------- nova/auth/manager.py | 30 ++++---- nova/fakerabbit.py | 12 +-- nova/flags.py | 7 +- nova/rpc.py | 34 ++++----- nova/scheduler/simple.py | 13 ++-- nova/tests/auth_unittest.py | 10 +-- nova/tests/compute_unittest.py | 8 ++ nova/tests/virt_unittest.py | 130 +++++++++++++++++++++++++++------ nova/twistd.py | 8 +- 16 files changed, 289 insertions(+), 159 deletions(-) diff --git a/.mailmap b/.mailmap index 2a6eb8d7..8041e234 100644 --- a/.mailmap +++ b/.mailmap @@ -19,6 +19,7 @@ + diff --git a/Authors b/Authors index 565444ee..fa38ef0b 100644 --- a/Authors +++ b/Authors @@ -6,6 +6,7 @@ Chris Behrens Chmouel Boudjnah Dean Troyer Devin Carlen +Ed Leafe Eldar Nugaev Eric Day Ewan Mellor @@ -14,6 +15,7 @@ Jay Pipes Jesse Andrews Joe Heck Joel Moore +Jonathan Bryce Josh Kearney Joshua McKenty Justin Santa Barbara diff --git a/MANIFEST.in b/MANIFEST.in index 982b727a..199ce30b 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -13,7 +13,7 @@ include nova/cloudpipe/client.ovpn.template include nova/compute/fakevirtinstance.xml include nova/compute/interfaces.template include nova/virt/interfaces.template -include nova/virt/libvirt.*.xml.template +include nova/virt/libvirt*.xml.template include nova/tests/CA/ include nova/tests/CA/cacert.pem include nova/tests/CA/private/ diff --git a/nova/adminclient.py b/nova/adminclient.py index 5a62cce7..6ae9f0c0 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -194,6 +194,7 @@ class HostInfo(object): class NovaAdminClient(object): + def __init__( self, clc_url=DEFAULT_CLC_URL, diff --git a/nova/auth/dbdriver.py b/nova/auth/dbdriver.py index a1584322..47e435cb 100644 --- a/nova/auth/dbdriver.py +++ b/nova/auth/dbdriver.py @@ -37,7 +37,6 @@ class DbDriver(object): def __init__(self): """Imports the LDAP module""" pass - db def __enter__(self): return self @@ -83,7 +82,7 @@ class DbDriver(object): user_ref = db.user_create(context.get_admin_context(), values) return self._db_user_to_auth_user(user_ref) except exception.Duplicate, e: - raise exception.Duplicate('User %s already exists' % name) + raise exception.Duplicate(_('User %s already exists') % name) def _db_user_to_auth_user(self, user_ref): return {'id': user_ref['id'], @@ -105,8 +104,9 @@ class DbDriver(object): """Create a project""" manager = db.user_get(context.get_admin_context(), manager_uid) if not manager: - raise exception.NotFound("Project can't be created because " - "manager %s doesn't exist" % manager_uid) + raise exception.NotFound(_("Project can't be created because " + "manager %s doesn't exist") + % manager_uid) # description is a required attribute if description is None: @@ -133,8 +133,8 @@ class DbDriver(object): try: project = db.project_create(context.get_admin_context(), values) except exception.Duplicate: - raise exception.Duplicate("Project can't be created because " - "project %s already exists" % name) + raise exception.Duplicate(_("Project can't be created because " + "project %s already exists") % name) for member in members: db.project_add_member(context.get_admin_context(), @@ -155,8 +155,8 @@ class DbDriver(object): if manager_uid: manager = db.user_get(context.get_admin_context(), manager_uid) if not manager: - raise exception.NotFound("Project can't be modified because " - "manager %s doesn't exist" % + raise exception.NotFound(_("Project can't be modified because " + "manager %s doesn't exist") % manager_uid) values['project_manager'] = manager['id'] if description: @@ -243,8 +243,8 @@ class DbDriver(object): def _validate_user_and_project(self, user_id, project_id): user = db.user_get(context.get_admin_context(), user_id) if not user: - raise exception.NotFound('User "%s" not found' % user_id) + raise exception.NotFound(_('User "%s" not found') % user_id) project = db.project_get(context.get_admin_context(), project_id) if not project: - raise exception.NotFound('Project "%s" not found' % project_id) + raise exception.NotFound(_('Project "%s" not found') % project_id) return user, project diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py index 46e0135b..33cd0343 100644 --- a/nova/auth/fakeldap.py +++ b/nova/auth/fakeldap.py @@ -15,7 +15,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -"""Fake LDAP server for test harness, backs to ReDIS. +"""Fake LDAP server for test harness. This class does very little error checking, and knows nothing about ldap class definitions. It implements the minimum emulation of the python ldap @@ -23,34 +23,65 @@ library to work with nova. """ +import fnmatch import json -import redis - -from nova import flags - -FLAGS = flags.FLAGS -flags.DEFINE_string('redis_host', '127.0.0.1', - 'Host that redis is running on.') -flags.DEFINE_integer('redis_port', 6379, - 'Port that redis is running on.') -flags.DEFINE_integer('redis_db', 0, 'Multiple DB keeps tests away') -class Redis(object): +class Store(object): def __init__(self): if hasattr(self.__class__, '_instance'): - raise Exception('Attempted to instantiate singleton') + raise Exception(_('Attempted to instantiate singleton')) @classmethod def instance(cls): if not hasattr(cls, '_instance'): - inst = redis.Redis(host=FLAGS.redis_host, - port=FLAGS.redis_port, - db=FLAGS.redis_db) - cls._instance = inst + cls._instance = _StorageDict() return cls._instance +class _StorageDict(dict): + def keys(self, pat=None): + ret = super(_StorageDict, self).keys() + if pat is not None: + ret = fnmatch.filter(ret, pat) + return ret + + def delete(self, key): + try: + del self[key] + except KeyError: + pass + + def flushdb(self): + self.clear() + + def hgetall(self, key): + """Returns the hash for the given key; creates + the hash if the key doesn't exist.""" + try: + return self[key] + except KeyError: + self[key] = {} + return self[key] + + def hget(self, key, field): + hashdict = self.hgetall(key) + try: + return hashdict[field] + except KeyError: + hashdict[field] = {} + return hashdict[field] + + def hset(self, key, field, val): + hashdict = self.hgetall(key) + hashdict[field] = val + + def hmset(self, key, value_dict): + hashdict = self.hgetall(key) + for field, val in value_dict.items(): + hashdict[field] = val + + SCOPE_BASE = 0 SCOPE_ONELEVEL = 1 # Not implemented SCOPE_SUBTREE = 2 @@ -169,8 +200,6 @@ def _to_json(unencoded): class FakeLDAP(object): - #TODO(vish): refactor this class to use a wrapper instead of accessing - # redis directly """Fake LDAP connection.""" def simple_bind_s(self, dn, password): @@ -183,14 +212,13 @@ class FakeLDAP(object): def add_s(self, dn, attr): """Add an object with the specified attributes at dn.""" - key = "%s%s" % (self.__redis_prefix, dn) - + key = "%s%s" % (self.__prefix, dn) value_dict = dict([(k, _to_json(v)) for k, v in attr]) - Redis.instance().hmset(key, value_dict) + Store.instance().hmset(key, value_dict) def delete_s(self, dn): """Remove the ldap object at specified dn.""" - Redis.instance().delete("%s%s" % (self.__redis_prefix, dn)) + Store.instance().delete("%s%s" % (self.__prefix, dn)) def modify_s(self, dn, attrs): """Modify the object at dn using the attribute list. @@ -201,18 +229,18 @@ class FakeLDAP(object): ([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value) """ - redis = Redis.instance() - key = "%s%s" % (self.__redis_prefix, dn) + store = Store.instance() + key = "%s%s" % (self.__prefix, dn) for cmd, k, v in attrs: - values = _from_json(redis.hget(key, k)) + values = _from_json(store.hget(key, k)) if cmd == MOD_ADD: values.append(v) elif cmd == MOD_REPLACE: values = [v] else: values.remove(v) - values = redis.hset(key, k, _to_json(values)) + values = store.hset(key, k, _to_json(values)) def search_s(self, dn, scope, query=None, fields=None): """Search for all matching objects under dn using the query. @@ -226,16 +254,17 @@ class FakeLDAP(object): """ if scope != SCOPE_BASE and scope != SCOPE_SUBTREE: raise NotImplementedError(str(scope)) - redis = Redis.instance() + store = Store.instance() if scope == SCOPE_BASE: - keys = ["%s%s" % (self.__redis_prefix, dn)] + keys = ["%s%s" % (self.__prefix, dn)] else: - keys = redis.keys("%s*%s" % (self.__redis_prefix, dn)) + keys = store.keys("%s*%s" % (self.__prefix, dn)) + objects = [] for key in keys: - # get the attributes from redis - attrs = redis.hgetall(key) - # turn the values from redis into lists + # get the attributes from the store + attrs = store.hgetall(key) + # turn the values from the store into lists # pylint: disable-msg=E1103 attrs = dict([(k, _from_json(v)) for k, v in attrs.iteritems()]) @@ -244,13 +273,13 @@ class FakeLDAP(object): # filter the attributes by fields attrs = dict([(k, v) for k, v in attrs.iteritems() if not fields or k in fields]) - objects.append((key[len(self.__redis_prefix):], attrs)) + objects.append((key[len(self.__prefix):], attrs)) # pylint: enable-msg=E1103 if objects == []: raise NO_SUCH_OBJECT() return objects @property - def __redis_prefix(self): # pylint: disable-msg=R0201 - """Get the prefix to use for all redis keys.""" + def __prefix(self): # pylint: disable-msg=R0201 + """Get the prefix to use for all keys.""" return 'ldap:' diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index c10939d7..e289ea5a 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -159,7 +159,7 @@ class LdapDriver(object): self.conn.modify_s(self.__uid_to_dn(name), attr) return self.get_user(name) else: - raise exception.NotFound("LDAP object for %s doesn't exist" + raise exception.NotFound(_("LDAP object for %s doesn't exist") % name) else: attr = [ @@ -182,11 +182,12 @@ class LdapDriver(object): description=None, member_uids=None): """Create a project""" if self.__project_exists(name): - raise exception.Duplicate("Project can't be created because " - "project %s already exists" % name) + raise exception.Duplicate(_("Project can't be created because " + "project %s already exists") % name) if not self.__user_exists(manager_uid): - raise exception.NotFound("Project can't be created because " - "manager %s doesn't exist" % manager_uid) + raise exception.NotFound(_("Project can't be created because " + "manager %s doesn't exist") + % manager_uid) manager_dn = self.__uid_to_dn(manager_uid) # description is a required attribute if description is None: @@ -195,8 +196,8 @@ class LdapDriver(object): if member_uids is not None: for member_uid in member_uids: if not self.__user_exists(member_uid): - raise exception.NotFound("Project can't be created " - "because user %s doesn't exist" + raise exception.NotFound(_("Project can't be created " + "because user %s doesn't exist") % member_uid) members.append(self.__uid_to_dn(member_uid)) # always add the manager as a member because members is required @@ -218,9 +219,9 @@ class LdapDriver(object): attr = [] if manager_uid: if not self.__user_exists(manager_uid): - raise exception.NotFound("Project can't be modified because " - "manager %s doesn't exist" % - manager_uid) + raise exception.NotFound(_("Project can't be modified because " + "manager %s doesn't exist") + % manager_uid) manager_dn = self.__uid_to_dn(manager_uid) attr.append((self.ldap.MOD_REPLACE, 'projectManager', manager_dn)) if description: @@ -416,8 +417,9 @@ class LdapDriver(object): if member_uids is not None: for member_uid in member_uids: if not self.__user_exists(member_uid): - raise exception.NotFound("Group can't be created " - "because user %s doesn't exist" % member_uid) + raise exception.NotFound(_("Group can't be created " + "because user %s doesn't exist") + % member_uid) members.append(self.__uid_to_dn(member_uid)) dn = self.__uid_to_dn(uid) if not dn in members: @@ -432,8 +434,9 @@ class LdapDriver(object): def __is_in_group(self, uid, group_dn): """Check if user is in group""" if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be searched in group " - "becuase the user doesn't exist" % (uid,)) + raise exception.NotFound(_("User %s can't be searched in group " + "because the user doesn't exist") + % uid) if not self.__group_exists(group_dn): return False res = self.__find_object(group_dn, @@ -444,28 +447,30 @@ class LdapDriver(object): def __add_to_group(self, uid, group_dn): """Add user to group""" if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be added to the group " - "becuase the user doesn't exist" % (uid,)) + raise exception.NotFound(_("User %s can't be added to the group " + "because the user doesn't exist") + % uid) if not self.__group_exists(group_dn): - raise exception.NotFound("The group at dn %s doesn't exist" % - (group_dn,)) + raise exception.NotFound(_("The group at dn %s doesn't exist") + % group_dn) if self.__is_in_group(uid, group_dn): - raise exception.Duplicate("User %s is already a member of " - "the group %s" % (uid, group_dn)) + raise exception.Duplicate(_("User %s is already a member of " + "the group %s") % (uid, group_dn)) attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))] self.conn.modify_s(group_dn, attr) def __remove_from_group(self, uid, group_dn): """Remove user from group""" if not self.__group_exists(group_dn): - raise exception.NotFound("The group at dn %s doesn't exist" % - (group_dn,)) + raise exception.NotFound(_("The group at dn %s doesn't exist") + % group_dn) if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be removed from the " - "group because the user doesn't exist" % (uid,)) + raise exception.NotFound(_("User %s can't be removed from the " + "group because the user doesn't exist") + % uid) if not self.__is_in_group(uid, group_dn): - raise exception.NotFound("User %s is not a member of the group" % - (uid,)) + raise exception.NotFound(_("User %s is not a member of the group") + % uid) # NOTE(vish): remove user from group and any sub_groups sub_dns = self.__find_group_dns_with_member( group_dn, uid) @@ -479,15 +484,16 @@ class LdapDriver(object): try: self.conn.modify_s(group_dn, attr) except self.ldap.OBJECT_CLASS_VIOLATION: - logging.debug("Attempted to remove the last member of a group. " - "Deleting the group at %s instead.", group_dn) + logging.debug(_("Attempted to remove the last member of a group. " + "Deleting the group at %s instead."), group_dn) self.__delete_group(group_dn) def __remove_from_all(self, uid): """Remove user from all roles and projects""" if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be removed from all " - "because the user doesn't exist" % (uid,)) + raise exception.NotFound(_("User %s can't be removed from all " + "because the user doesn't exist") + % uid) role_dns = self.__find_group_dns_with_member( FLAGS.role_project_subtree, uid) for role_dn in role_dns: @@ -500,7 +506,8 @@ class LdapDriver(object): def __delete_group(self, group_dn): """Delete Group""" if not self.__group_exists(group_dn): - raise exception.NotFound("Group at dn %s doesn't exist" % group_dn) + raise exception.NotFound(_("Group at dn %s doesn't exist") + % group_dn) self.conn.delete_s(group_dn) def __delete_roles(self, project_dn): diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 11c3bd6d..417f2b76 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -257,12 +257,12 @@ class AuthManager(object): # TODO(vish): check for valid timestamp (access_key, _sep, project_id) = access.partition(':') - logging.info('Looking up user: %r', access_key) + logging.info(_('Looking up user: %r'), access_key) user = self.get_user_from_access_key(access_key) logging.info('user: %r', user) if user == None: - raise exception.NotFound('No user found for access key %s' % - access_key) + raise exception.NotFound(_('No user found for access key %s') + % access_key) # NOTE(vish): if we stop using project name as id we need better # logic to find a default project for user @@ -271,12 +271,12 @@ class AuthManager(object): project = self.get_project(project_id) if project == None: - raise exception.NotFound('No project called %s could be found' % - project_id) + raise exception.NotFound(_('No project called %s could be found') + % project_id) if not self.is_admin(user) and not self.is_project_member(user, project): - raise exception.NotFound('User %s is not a member of project %s' % - (user.id, project.id)) + raise exception.NotFound(_('User %s is not a member of project %s') + % (user.id, project.id)) if check_type == 's3': sign = signer.Signer(user.secret.encode()) expected_signature = sign.s3_authorization(headers, verb, path) @@ -284,7 +284,7 @@ class AuthManager(object): logging.debug('expected_signature: %s', expected_signature) logging.debug('signature: %s', signature) if signature != expected_signature: - raise exception.NotAuthorized('Signature does not match') + raise exception.NotAuthorized(_('Signature does not match')) elif check_type == 'ec2': # NOTE(vish): hmac can't handle unicode, so encode ensures that # secret isn't unicode @@ -294,7 +294,7 @@ class AuthManager(object): logging.debug('expected_signature: %s', expected_signature) logging.debug('signature: %s', signature) if signature != expected_signature: - raise exception.NotAuthorized('Signature does not match') + raise exception.NotAuthorized(_('Signature does not match')) return (user, project) def get_access_key(self, user, project): @@ -364,7 +364,7 @@ class AuthManager(object): with self.driver() as drv: if role == 'projectmanager': if not project: - raise exception.Error("Must specify project") + raise exception.Error(_("Must specify project")) return self.is_project_manager(user, project) global_role = drv.has_role(User.safe_id(user), @@ -398,9 +398,9 @@ class AuthManager(object): @param project: Project in which to add local role. """ if role not in FLAGS.allowed_roles: - raise exception.NotFound("The %s role can not be found" % role) + raise exception.NotFound(_("The %s role can not be found") % role) if project is not None and role in FLAGS.global_roles: - raise exception.NotFound("The %s role is global only" % role) + raise exception.NotFound(_("The %s role is global only") % role) with self.driver() as drv: drv.add_role(User.safe_id(user), role, Project.safe_id(project)) @@ -546,7 +546,8 @@ class AuthManager(object): Project.safe_id(project)) if not network_ref['vpn_public_port']: - raise exception.NotFound('project network data has not been set') + raise exception.NotFound(_('project network data has not ' + 'been set')) return (network_ref['vpn_public_address'], network_ref['vpn_public_port']) @@ -659,8 +660,7 @@ class AuthManager(object): port=vpn_port) zippy.writestr(FLAGS.credential_vpn_file, config) else: - logging.warn("No vpn data for project %s" % - pid) + logging.warn(_("No vpn data for project %s"), pid) zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(user.id)) zippy.close() diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index c6461793..41e686cf 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -37,12 +37,12 @@ class Exchange(object): self._routes = {} def publish(self, message, routing_key=None): - logging.debug('(%s) publish (key: %s) %s', + logging.debug(_('(%s) publish (key: %s) %s'), self.name, routing_key, message) routing_key = routing_key.split('.')[0] if routing_key in self._routes: for f in self._routes[routing_key]: - logging.debug('Publishing to route %s', f) + logging.debug(_('Publishing to route %s'), f) f(message, routing_key=routing_key) def bind(self, callback, routing_key): @@ -82,16 +82,16 @@ class Backend(object): def queue_declare(self, queue, **kwargs): if queue not in self._queues: - logging.debug('Declaring queue %s', queue) + logging.debug(_('Declaring queue %s'), queue) self._queues[queue] = Queue(queue) def exchange_declare(self, exchange, type, *args, **kwargs): if exchange not in self._exchanges: - logging.debug('Declaring exchange %s', exchange) + logging.debug(_('Declaring exchange %s'), exchange) self._exchanges[exchange] = Exchange(exchange, type) def queue_bind(self, queue, exchange, routing_key, **kwargs): - logging.debug('Binding %s to %s with key %s', + logging.debug(_('Binding %s to %s with key %s'), queue, exchange, routing_key) self._exchanges[exchange].bind(self._queues[queue].push, routing_key) @@ -117,7 +117,7 @@ class Backend(object): content_type=content_type, content_encoding=content_encoding) message.result = True - logging.debug('Getting from %s: %s', queue, message) + logging.debug(_('Getting from %s: %s'), queue, message) return message def prepare_message(self, message_data, delivery_mode, diff --git a/nova/flags.py b/nova/flags.py index 87444565..8fa0beb7 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -235,12 +235,11 @@ DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud', DEFINE_string('default_image', 'ami-11111', 'default image to use, testing only') -DEFINE_string('default_kernel', 'aki-11111', - 'default kernel to use, testing only') -DEFINE_string('default_ramdisk', 'ari-11111', - 'default ramdisk to use, testing only') DEFINE_string('default_instance_type', 'm1.small', 'default instance type to use, testing only') +DEFINE_string('null_kernel', 'nokernel', + 'kernel image that indicates not to use a kernel,' + ' but to use a raw disk image instead') DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE', 'AMI for cloudpipe vpn server') DEFINE_string('vpn_key_suffix', diff --git a/nova/rpc.py b/nova/rpc.py index 6a3f552d..6e2cf051 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -91,15 +91,15 @@ class Consumer(messaging.Consumer): self.failed_connection = False break except: # Catching all because carrot sucks - logging.exception("AMQP server on %s:%d is unreachable." \ - " Trying again in %d seconds." % ( + logging.exception(_("AMQP server on %s:%d is unreachable." + " Trying again in %d seconds.") % ( FLAGS.rabbit_host, FLAGS.rabbit_port, FLAGS.rabbit_retry_interval)) self.failed_connection = True if self.failed_connection: - logging.exception("Unable to connect to AMQP server" \ - " after %d tries. Shutting down." % FLAGS.rabbit_max_retries) + logging.exception(_("Unable to connect to AMQP server" + " after %d tries. Shutting down.") % FLAGS.rabbit_max_retries) sys.exit(1) def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): @@ -116,14 +116,14 @@ class Consumer(messaging.Consumer): self.declare() super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) if self.failed_connection: - logging.error("Reconnected to queue") + logging.error(_("Reconnected to queue")) self.failed_connection = False # NOTE(vish): This is catching all errors because we really don't # exceptions to be logged 10 times a second if some # persistent failure occurs. except Exception: # pylint: disable-msg=W0703 if not self.failed_connection: - logging.exception("Failed to fetch message from queue") + logging.exception(_("Failed to fetch message from queue")) self.failed_connection = True def attach_to_eventlet(self): @@ -153,7 +153,7 @@ class TopicConsumer(Consumer): class AdapterConsumer(TopicConsumer): """Calls methods on a proxy object based on method and args""" def __init__(self, connection=None, topic="broadcast", proxy=None): - LOG.debug('Initing the Adapter Consumer for %s' % (topic)) + LOG.debug(_('Initing the Adapter Consumer for %s') % (topic)) self.proxy = proxy super(AdapterConsumer, self).__init__(connection=connection, topic=topic) @@ -168,7 +168,7 @@ class AdapterConsumer(TopicConsumer): Example: {'method': 'echo', 'args': {'value': 42}} """ - LOG.debug('received %s' % (message_data)) + LOG.debug(_('received %s') % (message_data)) msg_id = message_data.pop('_msg_id', None) ctxt = _unpack_context(message_data) @@ -181,8 +181,8 @@ class AdapterConsumer(TopicConsumer): # messages stay in the queue indefinitely, so for now # we just log the message and send an error string # back to the caller - LOG.warn('no method for message: %s' % (message_data)) - msg_reply(msg_id, 'No method for message: %s' % message_data) + LOG.warn(_('no method for message: %s') % (message_data)) + msg_reply(msg_id, _('No method for message: %s') % message_data) return node_func = getattr(self.proxy, str(method)) @@ -242,7 +242,7 @@ def msg_reply(msg_id, reply=None, failure=None): if failure: message = str(failure[1]) tb = traceback.format_exception(*failure) - logging.error("Returning exception %s to caller", message) + logging.error(_("Returning exception %s to caller"), message) logging.error(tb) failure = (failure[0].__name__, str(failure[1]), tb) conn = Connection.instance() @@ -283,7 +283,7 @@ def _unpack_context(msg): if key.startswith('_context_'): value = msg.pop(key) context_dict[key[9:]] = value - LOG.debug('unpacked context: %s', context_dict) + LOG.debug(_('unpacked context: %s'), context_dict) return context.RequestContext.from_dict(context_dict) @@ -302,10 +302,10 @@ def _pack_context(msg, context): def call(context, topic, msg): """Sends a message on a topic and wait for a response""" - LOG.debug("Making asynchronous call...") + LOG.debug(_("Making asynchronous call...")) msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) - LOG.debug("MSG_ID is %s" % (msg_id)) + LOG.debug(_("MSG_ID is %s") % (msg_id)) _pack_context(msg, context) class WaitMessage(object): @@ -353,7 +353,7 @@ def cast(context, topic, msg): def generic_response(message_data, message): """Logs a result and exits""" - LOG.debug('response %s', message_data) + LOG.debug(_('response %s'), message_data) message.ack() sys.exit(0) @@ -362,8 +362,8 @@ def send_message(topic, message, wait=True): """Sends a message for testing""" msg_id = uuid.uuid4().hex message.update({'_msg_id': msg_id}) - LOG.debug('topic is %s', topic) - LOG.debug('message %s', message) + LOG.debug(_('topic is %s'), topic) + LOG.debug(_('message %s'), message) if wait: consumer = messaging.Consumer(connection=Connection.instance(), diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py index 7f509365..f9171ab3 100644 --- a/nova/scheduler/simple.py +++ b/nova/scheduler/simple.py @@ -47,7 +47,7 @@ class SimpleScheduler(chance.ChanceScheduler): for result in results: (service, instance_cores) = result if instance_cores + instance_ref['vcpus'] > FLAGS.max_cores: - raise driver.NoValidHost("All hosts have too many cores") + raise driver.NoValidHost(_("All hosts have too many cores")) if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow @@ -57,7 +57,7 @@ class SimpleScheduler(chance.ChanceScheduler): {'host': service['host'], 'scheduled_at': now}) return service['host'] - raise driver.NoValidHost("No hosts found") + raise driver.NoValidHost(_("No hosts found")) def schedule_create_volume(self, context, volume_id, *_args, **_kwargs): """Picks a host that is up and has the fewest volumes.""" @@ -66,7 +66,8 @@ class SimpleScheduler(chance.ChanceScheduler): for result in results: (service, volume_gigabytes) = result if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes: - raise driver.NoValidHost("All hosts have too many gigabytes") + raise driver.NoValidHost(_("All hosts have too many " + "gigabytes")) if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow @@ -76,7 +77,7 @@ class SimpleScheduler(chance.ChanceScheduler): {'host': service['host'], 'scheduled_at': now}) return service['host'] - raise driver.NoValidHost("No hosts found") + raise driver.NoValidHost(_("No hosts found")) def schedule_set_network_host(self, context, *_args, **_kwargs): """Picks a host that is up and has the fewest networks.""" @@ -85,7 +86,7 @@ class SimpleScheduler(chance.ChanceScheduler): for result in results: (service, instance_count) = result if instance_count >= FLAGS.max_networks: - raise driver.NoValidHost("All hosts have too many networks") + raise driver.NoValidHost(_("All hosts have too many networks")) if self.service_is_up(service): return service['host'] - raise driver.NoValidHost("No hosts found") + raise driver.NoValidHost(_("No hosts found")) diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py index 4508d672..61ae43fb 100644 --- a/nova/tests/auth_unittest.py +++ b/nova/tests/auth_unittest.py @@ -333,14 +333,10 @@ class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase): AuthManagerTestCase.__init__(self) test.TestCase.__init__(self, *args, **kwargs) import nova.auth.fakeldap as fakeldap - FLAGS.redis_db = 8 if FLAGS.flush_db: - logging.info("Flushing redis datastore") - try: - r = fakeldap.Redis.instance() - r.flushdb() - except: - self.skip = True + logging.info("Flushing datastore") + r = fakeldap.Store.instance() + r.flushdb() class AuthManagerDbTestCase(AuthManagerTestCase, test.TestCase): diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py index c6353d35..187ca31d 100644 --- a/nova/tests/compute_unittest.py +++ b/nova/tests/compute_unittest.py @@ -127,6 +127,14 @@ class ComputeTestCase(test.TestCase): self.assert_(instance_ref['launched_at'] < terminate) self.assert_(instance_ref['deleted_at'] > terminate) + def test_pause(self): + """Ensure instance can be paused""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + self.compute.pause_instance(self.context, instance_id) + self.compute.unpause_instance(self.context, instance_id) + self.compute.terminate_instance(self.context, instance_id) + def test_reboot(self): """Ensure instance can be rebooted""" instance_id = self._create_instance() diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index d190cdab..7682f966 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -40,19 +40,51 @@ class LibvirtConnTestCase(test.TestCase): self.network = utils.import_object(FLAGS.network_manager) FLAGS.instances_path = '' - def test_get_uri_and_template(self): - ip = '10.11.12.13' + test_ip = '10.11.12.13' + test_instance = {'memory_kb': '1024000', + 'basepath': '/some/path', + 'bridge_name': 'br100', + 'mac_address': '02:12:34:46:56:67', + 'vcpus': 2, + 'project_id': 'fake', + 'bridge': 'br101', + 'instance_type': 'm1.small'} - instance = {'internal_id': 1, - 'memory_kb': '1024000', - 'basepath': '/some/path', - 'bridge_name': 'br100', - 'mac_address': '02:12:34:46:56:67', - 'vcpus': 2, - 'project_id': 'fake', - 'bridge': 'br101', - 'instance_type': 'm1.small'} + def test_xml_and_uri_no_ramdisk_no_kernel(self): + instance_data = dict(self.test_instance) + self.do_test_xml_and_uri(instance_data, + expect_kernel=False, expect_ramdisk=False) + def test_xml_and_uri_no_ramdisk(self): + instance_data = dict(self.test_instance) + instance_data['kernel_id'] = 'aki-deadbeef' + self.do_test_xml_and_uri(instance_data, + expect_kernel=True, expect_ramdisk=False) + + def test_xml_and_uri_no_kernel(self): + instance_data = dict(self.test_instance) + instance_data['ramdisk_id'] = 'ari-deadbeef' + self.do_test_xml_and_uri(instance_data, + expect_kernel=False, expect_ramdisk=False) + + def test_xml_and_uri(self): + instance_data = dict(self.test_instance) + instance_data['ramdisk_id'] = 'ari-deadbeef' + instance_data['kernel_id'] = 'aki-deadbeef' + self.do_test_xml_and_uri(instance_data, + expect_kernel=True, expect_ramdisk=True) + + def test_xml_and_uri_rescue(self): + instance_data = dict(self.test_instance) + instance_data['ramdisk_id'] = 'ari-deadbeef' + instance_data['kernel_id'] = 'aki-deadbeef' + self.do_test_xml_and_uri(instance_data, + expect_kernel=True, expect_ramdisk=True, + rescue=True) + + def do_test_xml_and_uri(self, instance, + expect_ramdisk, expect_kernel, + rescue=False): user_context = context.RequestContext(project=self.project, user=self.user) instance_ref = db.instance_create(user_context, instance) @@ -60,13 +92,14 @@ class LibvirtConnTestCase(test.TestCase): self.network.set_network_host(context.get_admin_context(), network_ref['id']) - fixed_ip = {'address': ip, + fixed_ip = {'address': self.test_ip, 'network_id': network_ref['id']} ctxt = context.get_admin_context() fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip) - db.fixed_ip_update(ctxt, ip, {'allocated': True, - 'instance_id': instance_ref['id']}) + db.fixed_ip_update(ctxt, self.test_ip, + {'allocated': True, + 'instance_id': instance_ref['id']}) type_uri_map = {'qemu': ('qemu:///system', [(lambda t: t.find('.').get('type'), 'qemu'), @@ -78,23 +111,73 @@ class LibvirtConnTestCase(test.TestCase): (lambda t: t.find('./devices/emulator'), None)]), 'uml': ('uml:///system', [(lambda t: t.find('.').get('type'), 'uml'), - (lambda t: t.find('./os/type').text, 'uml')])} + (lambda t: t.find('./os/type').text, 'uml')]), + 'xen': ('xen:///', + [(lambda t: t.find('.').get('type'), 'xen'), + (lambda t: t.find('./os/type').text, 'linux')]), + } + + for hypervisor_type in ['qemu', 'kvm', 'xen']: + check_list = type_uri_map[hypervisor_type][1] + + if rescue: + check = (lambda t: t.find('./os/kernel').text.split('/')[1], + 'rescue-kernel') + check_list.append(check) + check = (lambda t: t.find('./os/initrd').text.split('/')[1], + 'rescue-ramdisk') + check_list.append(check) + else: + if expect_kernel: + check = (lambda t: t.find('./os/kernel').text.split( + '/')[1], 'kernel') + else: + check = (lambda t: t.find('./os/kernel'), None) + check_list.append(check) + + if expect_ramdisk: + check = (lambda t: t.find('./os/initrd').text.split( + '/')[1], 'ramdisk') + else: + check = (lambda t: t.find('./os/initrd'), None) + check_list.append(check) common_checks = [ (lambda t: t.find('.').tag, 'domain'), - (lambda t: t.find('./devices/interface/filterref/parameter').\ - get('name'), 'IP'), - (lambda t: t.find('./devices/interface/filterref/parameter').\ - get('value'), '10.11.12.13')] + (lambda t: t.find( + './devices/interface/filterref/parameter').get('name'), 'IP'), + (lambda t: t.find( + './devices/interface/filterref/parameter').get( + 'value'), '10.11.12.13'), + (lambda t: t.findall( + './devices/interface/filterref/parameter')[1].get( + 'name'), 'DHCPSERVER'), + (lambda t: t.findall( + './devices/interface/filterref/parameter')[1].get( + 'value'), '10.0.0.1'), + (lambda t: t.find('./devices/serial/source').get( + 'path').split('/')[1], 'console.log'), + (lambda t: t.find('./memory').text, '2097152')] + + if rescue: + common_checks += [ + (lambda t: t.findall('./devices/disk/source')[0].get( + 'file').split('/')[1], 'rescue-disk'), + (lambda t: t.findall('./devices/disk/source')[1].get( + 'file').split('/')[1], 'disk')] + else: + common_checks += [(lambda t: t.findall( + './devices/disk/source')[0].get('file').split('/')[1], + 'disk')] for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): FLAGS.libvirt_type = libvirt_type conn = libvirt_conn.LibvirtConnection(True) - uri, _template, _rescue = conn.get_uri_and_templates() + uri = conn.get_uri() self.assertEquals(uri, expected_uri) - xml = conn.to_xml(instance_ref) + xml = conn.to_xml(instance_ref, rescue) tree = xml_to_tree(xml) for i, (check, expected_result) in enumerate(checks): self.assertEqual(check(tree), @@ -106,6 +189,9 @@ class LibvirtConnTestCase(test.TestCase): expected_result, '%s failed common check %d' % (xml, i)) + # This test is supposed to make sure we don't override a specifically + # set uri + # # Deliberately not just assigning this string to FLAGS.libvirt_uri and # checking against that later on. This way we make sure the # implementation doesn't fiddle around with the FLAGS. @@ -114,7 +200,7 @@ class LibvirtConnTestCase(test.TestCase): for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): FLAGS.libvirt_type = libvirt_type conn = libvirt_conn.LibvirtConnection(True) - uri, _template, _rescue = conn.get_uri_and_templates() + uri = conn.get_uri() self.assertEquals(uri, testuri) def tearDown(self): diff --git a/nova/twistd.py b/nova/twistd.py index cb5648ce..29be9c4e 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -43,7 +43,7 @@ else: FLAGS = flags.FLAGS -flags.DEFINE_string('logdir', None, 'directory to keep log files in ' +flags.DEFINE_string('logdir', None, 'directory to keep log files in ' '(will be prepended to $logfile)') @@ -208,7 +208,7 @@ def stop(pidfile): pid = None if not pid: - message = "pidfile %s does not exist. Daemon not running?\n" + message = _("pidfile %s does not exist. Daemon not running?\n") sys.stderr.write(message % pidfile) # Not an error in a restart return @@ -229,7 +229,7 @@ def stop(pidfile): def serve(filename): - logging.debug("Serving %s" % filename) + logging.debug(_("Serving %s") % filename) name = os.path.basename(filename) OptionsClass = WrapTwistedOptions(TwistdServerOptions) options = OptionsClass() @@ -281,7 +281,7 @@ def serve(filename): else: logging.getLogger().setLevel(logging.WARNING) - logging.debug("Full set of FLAGS:") + logging.debug(_("Full set of FLAGS:")) for flag in FLAGS: logging.debug("%s : %s" % (flag, FLAGS.get(flag, None))) From 03a7164df99268d574665b8d7c737d24b33c2dc6 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 22 Dec 2010 21:55:11 +0000 Subject: [PATCH 53/55] pep8 fix --- nova/fakerabbit.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index 42daa976..79d8b894 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -28,6 +28,7 @@ from eventlet import greenthread EXCHANGES = {} QUEUES = {} + class Message(base.BaseMessage): pass From 7e9e072cbec3a8af79895f02a3dc645df699eb8b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 23 Dec 2010 01:32:57 +0000 Subject: [PATCH 54/55] fix commits from Anthony and Vish that were committed with the wrong email --- .mailmap | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.mailmap b/.mailmap index 8041e234..c01f964d 100644 --- a/.mailmap +++ b/.mailmap @@ -24,7 +24,7 @@ + + # These are from people who failed to set a proper committer -. . -. From dfaffd1d21f93304fb9be76e2d27a504f1305cd5 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 23 Dec 2010 02:03:39 +0000 Subject: [PATCH 55/55] Add Ryan Lane as well --- .mailmap | 3 +-- Authors | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.mailmap b/.mailmap index c01f964d..9ab7db74 100644 --- a/.mailmap +++ b/.mailmap @@ -26,5 +26,4 @@ -# These are from people who failed to set a proper committer -. + diff --git a/Authors b/Authors index 0b048bec..250b3b2a 100644 --- a/Authors +++ b/Authors @@ -24,6 +24,7 @@ Michael Gundlach Monty Taylor Paul Voccio Rick Clark +Ryan Lane Ryan Lucio Sandy Walsh Soren Hansen