Use shade for all OpenStack interactions
We wrote shade as an extraction of the logic we had in nodepool, and have since expanded it to support more clouds. It's time to start using it in nodepool, since that will allow us to add more clouds and also to handle a wider variety of them. Making a patch series was too tricky because of the way fakes and threading work, so this is everything in one stab. Depends-On: I557694b3931d81a3524c781ab5dabfb5995557f5 Change-Id: I423716d619aafb2eca5c1748bc65b38603a97b6a Co-Authored-By: James E. Blair <jeblair@linux.vnet.ibm.com> Co-Authored-By: David Shrewsbury <shrewsbury.dave@gmail.com> Co-Authored-By: Yolanda Robla <yolanda.robla-mota@hpe.com>
This commit is contained in:

committed by
Yolanda Robla

parent
afdd58c10a
commit
e1f4a12949
@@ -97,7 +97,7 @@ function nodepool_write_config {
|
|||||||
keys=simple
|
keys=simple
|
||||||
|
|
||||||
[loggers]
|
[loggers]
|
||||||
keys=root,nodepool
|
keys=root,nodepool,shade
|
||||||
|
|
||||||
[handlers]
|
[handlers]
|
||||||
keys=console
|
keys=console
|
||||||
@@ -112,6 +112,12 @@ handlers=console
|
|||||||
qualname=nodepool
|
qualname=nodepool
|
||||||
propagate=0
|
propagate=0
|
||||||
|
|
||||||
|
[logger_shade]
|
||||||
|
level=DEBUG
|
||||||
|
handlers=console
|
||||||
|
qualname=shade
|
||||||
|
propagate=0
|
||||||
|
|
||||||
[handler_console]
|
[handler_console]
|
||||||
level=DEBUG
|
level=DEBUG
|
||||||
class=StreamHandler
|
class=StreamHandler
|
||||||
|
@@ -347,7 +347,7 @@ class NodePoolBuilder(object):
|
|||||||
|
|
||||||
image_file = image_files[0]
|
image_file = image_files[0]
|
||||||
filename = image_file.to_path(self._config.imagesdir,
|
filename = image_file.to_path(self._config.imagesdir,
|
||||||
with_extension=False)
|
with_extension=True)
|
||||||
|
|
||||||
dummy_image = type('obj', (object,),
|
dummy_image = type('obj', (object,),
|
||||||
{'name': image_name})
|
{'name': image_name})
|
||||||
@@ -364,13 +364,11 @@ class NodePoolBuilder(object):
|
|||||||
"Could not find matching provider image for %s", image_name
|
"Could not find matching provider image for %s", image_name
|
||||||
)
|
)
|
||||||
image_meta = provider_image.meta
|
image_meta = provider_image.meta
|
||||||
external_id = manager.uploadImage(ext_image_name, filename,
|
# uploadImage is synchronous
|
||||||
image_file.extension, 'bare',
|
external_id = manager.uploadImage(
|
||||||
image_meta)
|
ext_image_name, filename,
|
||||||
self.log.info("Saving image id: %s with external id %s" %
|
image_type=image_file.extension,
|
||||||
(image_id, external_id))
|
meta=image_meta)
|
||||||
# It can take a _very_ long time for Rackspace 1.0 to save an image
|
|
||||||
manager.waitForImage(external_id, IMAGE_TIMEOUT)
|
|
||||||
|
|
||||||
if self.statsd:
|
if self.statsd:
|
||||||
dt = int((time.time() - start_time) * 1000)
|
dt = int((time.time() - start_time) * 1000)
|
||||||
|
@@ -145,7 +145,7 @@ def loadConfig(config_path):
|
|||||||
p.region_name = provider.get('region-name')
|
p.region_name = provider.get('region-name')
|
||||||
p.max_servers = provider['max-servers']
|
p.max_servers = provider['max-servers']
|
||||||
p.keypair = provider.get('keypair', None)
|
p.keypair = provider.get('keypair', None)
|
||||||
p.pool = provider.get('pool')
|
p.pool = provider.get('pool', None)
|
||||||
p.rate = provider.get('rate', 1.0)
|
p.rate = provider.get('rate', 1.0)
|
||||||
p.api_timeout = provider.get('api-timeout')
|
p.api_timeout = provider.get('api-timeout')
|
||||||
p.boot_timeout = provider.get('boot-timeout', 60)
|
p.boot_timeout = provider.get('boot-timeout', 60)
|
||||||
@@ -171,7 +171,8 @@ def loadConfig(config_path):
|
|||||||
'template-hostname',
|
'template-hostname',
|
||||||
'template-{image.name}-{timestamp}'
|
'template-{image.name}-{timestamp}'
|
||||||
)
|
)
|
||||||
p.image_type = provider.get('image-type', 'qcow2')
|
p.image_type = provider.get(
|
||||||
|
'image-type', p.cloud_config.config['image_format'])
|
||||||
p.images = {}
|
p.images = {}
|
||||||
for image in provider['images']:
|
for image in provider['images']:
|
||||||
i = ProviderImage()
|
i = ProviderImage()
|
||||||
@@ -195,7 +196,7 @@ def loadConfig(config_path):
|
|||||||
# custom properties when the image is uploaded.
|
# custom properties when the image is uploaded.
|
||||||
i.meta = image.get('meta', {})
|
i.meta = image.get('meta', {})
|
||||||
# 5 elements, and no key or value can be > 255 chars
|
# 5 elements, and no key or value can be > 255 chars
|
||||||
# per novaclient.servers.create() rules
|
# per Nova API rules
|
||||||
if i.meta:
|
if i.meta:
|
||||||
if len(i.meta) > 5 or \
|
if len(i.meta) > 5 or \
|
||||||
any([len(k) > 255 or len(v) > 255
|
any([len(k) > 255 or len(v) > 255
|
||||||
|
@@ -39,3 +39,7 @@ class IPAddTimeoutException(TimeoutException):
|
|||||||
|
|
||||||
class ServerDeleteException(TimeoutException):
|
class ServerDeleteException(TimeoutException):
|
||||||
statsd_key = 'error.serverdelete'
|
statsd_key = 'error.serverdelete'
|
||||||
|
|
||||||
|
|
||||||
|
class ImageCreateException(TimeoutException):
|
||||||
|
statsd_key = 'error.imagetimeout'
|
||||||
|
@@ -16,27 +16,33 @@
|
|||||||
|
|
||||||
import StringIO
|
import StringIO
|
||||||
import logging
|
import logging
|
||||||
import novaclient
|
|
||||||
import requests.exceptions
|
|
||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
import exceptions
|
|
||||||
from jenkins import JenkinsException
|
from jenkins import JenkinsException
|
||||||
import shade
|
import shade
|
||||||
|
|
||||||
|
import exceptions
|
||||||
|
|
||||||
|
|
||||||
class Dummy(object):
|
class Dummy(object):
|
||||||
IMAGE = 'Image'
|
IMAGE = 'Image'
|
||||||
INSTANCE = 'Instance'
|
INSTANCE = 'Instance'
|
||||||
FLAVOR = 'Flavor'
|
FLAVOR = 'Flavor'
|
||||||
|
KEYPAIR = 'Keypair'
|
||||||
|
|
||||||
def __init__(self, kind, **kw):
|
def __init__(self, kind, **kw):
|
||||||
self.__kind = kind
|
self.__kind = kind
|
||||||
self.__kw = kw
|
self.__kw = kw
|
||||||
for k, v in kw.items():
|
for k, v in kw.items():
|
||||||
setattr(self, k, v)
|
setattr(self, k, v)
|
||||||
|
try:
|
||||||
|
if self.should_fail:
|
||||||
|
raise shade.OpenStackCloudException('This image has '
|
||||||
|
'SHOULD_FAIL set to True.')
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
args = []
|
args = []
|
||||||
@@ -45,16 +51,17 @@ class Dummy(object):
|
|||||||
args = ' '.join(args)
|
args = ' '.join(args)
|
||||||
return '<%s %s %s>' % (self.__kind, id(self), args)
|
return '<%s %s %s>' % (self.__kind, id(self), args)
|
||||||
|
|
||||||
def delete(self):
|
def __getitem__(self, key, default=None):
|
||||||
self.manager.delete(self)
|
return getattr(self, key, default)
|
||||||
|
|
||||||
def update(self, data):
|
def __setitem__(self, key, value):
|
||||||
try:
|
setattr(self, key, value)
|
||||||
if self.should_fail:
|
|
||||||
raise shade.OpenStackCloudException('This image has '
|
def get(self, key, default=None):
|
||||||
'SHOULD_FAIL set to True.')
|
return getattr(self, key, default)
|
||||||
except AttributeError:
|
|
||||||
pass
|
def set(self, key, value):
|
||||||
|
setattr(self, key, value)
|
||||||
|
|
||||||
|
|
||||||
def fake_get_one_cloud(cloud_config, cloud_kwargs):
|
def fake_get_one_cloud(cloud_config, cloud_kwargs):
|
||||||
@@ -62,47 +69,47 @@ def fake_get_one_cloud(cloud_config, cloud_kwargs):
|
|||||||
return cloud_config.get_one_cloud(**cloud_kwargs)
|
return cloud_config.get_one_cloud(**cloud_kwargs)
|
||||||
|
|
||||||
|
|
||||||
class FakeList(object):
|
class FakeOpenStackCloud(object):
|
||||||
log = logging.getLogger("nodepool.FakeList")
|
log = logging.getLogger("nodepool.FakeOpenStackCloud")
|
||||||
|
|
||||||
def __init__(self, l):
|
def __init__(self, images=None, networks=None):
|
||||||
self._list = l
|
self._image_list = images
|
||||||
|
if self._image_list is None:
|
||||||
|
self._image_list = [
|
||||||
|
Dummy(
|
||||||
|
Dummy.IMAGE,
|
||||||
|
id='fake-image-id',
|
||||||
|
status='READY',
|
||||||
|
name='Fake Precise',
|
||||||
|
metadata={})
|
||||||
|
]
|
||||||
|
if networks is None:
|
||||||
|
networks = [dict(id='fake-public-network-uuid',
|
||||||
|
name='fake-public-network-name'),
|
||||||
|
dict(id='fake-private-network-uuid',
|
||||||
|
name='fake-private-network-name')]
|
||||||
|
self.networks = networks
|
||||||
|
self._flavor_list = [
|
||||||
|
Dummy(Dummy.FLAVOR, id='f1', ram=8192, name='Fake Flavor'),
|
||||||
|
Dummy(Dummy.FLAVOR, id='f2', ram=8192, name='Unreal Flavor'),
|
||||||
|
]
|
||||||
|
self._server_list = []
|
||||||
|
self._keypair_list = []
|
||||||
|
|
||||||
def list(self):
|
def _get(self, name_or_id, instance_list):
|
||||||
self.log.debug("List %s" % repr(self._list))
|
self.log.debug("Get %s in %s" % (name_or_id, repr(instance_list)))
|
||||||
return self._list
|
for instance in instance_list:
|
||||||
|
if instance.name == name_or_id or instance.id == name_or_id:
|
||||||
|
return instance
|
||||||
|
return None
|
||||||
|
|
||||||
def find(self, name):
|
def get_network(self, name_or_id, filters=None):
|
||||||
for x in self._list:
|
return dict(id='fake-network-uuid',
|
||||||
if x.name == name:
|
name='fake-network-name')
|
||||||
return x
|
|
||||||
|
|
||||||
def get(self, image=None):
|
def _create(
|
||||||
if image:
|
self, instance_list, instance_type=Dummy.INSTANCE,
|
||||||
id = image
|
done_status='ACTIVE', **kw):
|
||||||
self.log.debug("Get %s in %s" % (id, repr(self._list)))
|
|
||||||
for x in self._list:
|
|
||||||
if x.id == id:
|
|
||||||
return x
|
|
||||||
raise novaclient.exceptions.NotFound(404)
|
|
||||||
|
|
||||||
def _finish(self, obj, delay, status):
|
|
||||||
time.sleep(delay)
|
|
||||||
obj.status = status
|
|
||||||
|
|
||||||
def delete(self, *args, **kw):
|
|
||||||
self.log.debug("Delete from %s" % repr(self._list))
|
|
||||||
if 'image' in kw:
|
|
||||||
self._list.remove(self.get(kw['image']))
|
|
||||||
else:
|
|
||||||
obj = args[0]
|
|
||||||
if hasattr(obj, 'id'):
|
|
||||||
self._list.remove(obj)
|
|
||||||
else:
|
|
||||||
self._list.remove(self.get(obj))
|
|
||||||
self.log.debug("Deleted from %s" % repr(self._list))
|
|
||||||
|
|
||||||
def create(self, **kw):
|
|
||||||
should_fail = kw.get('SHOULD_FAIL', '').lower() == 'true'
|
should_fail = kw.get('SHOULD_FAIL', '').lower() == 'true'
|
||||||
nics = kw.get('nics', [])
|
nics = kw.get('nics', [])
|
||||||
addresses = None
|
addresses = None
|
||||||
@@ -116,112 +123,114 @@ class FakeList(object):
|
|||||||
dict(version=6, addr='fake_v6')],
|
dict(version=6, addr='fake_v6')],
|
||||||
private=[dict(version=4, addr='fake')]
|
private=[dict(version=4, addr='fake')]
|
||||||
)
|
)
|
||||||
|
public_v6 = 'fake_v6'
|
||||||
|
public_v4 = 'fake'
|
||||||
|
private_v4 = 'fake'
|
||||||
break
|
break
|
||||||
if not addresses:
|
if not addresses:
|
||||||
addresses = dict(
|
addresses = dict(
|
||||||
public=[dict(version=4, addr='fake')],
|
public=[dict(version=4, addr='fake')],
|
||||||
private=[dict(version=4, addr='fake')]
|
private=[dict(version=4, addr='fake')]
|
||||||
)
|
)
|
||||||
s = Dummy(Dummy.INSTANCE,
|
public_v6 = ''
|
||||||
|
public_v4 = 'fake'
|
||||||
|
private_v4 = 'fake'
|
||||||
|
|
||||||
|
s = Dummy(instance_type,
|
||||||
id=uuid.uuid4().hex,
|
id=uuid.uuid4().hex,
|
||||||
name=kw['name'],
|
name=kw['name'],
|
||||||
status='BUILD',
|
status='BUILD',
|
||||||
adminPass='fake',
|
adminPass='fake',
|
||||||
addresses=addresses,
|
addresses=addresses,
|
||||||
|
public_v4=public_v4,
|
||||||
|
public_v6=public_v6,
|
||||||
|
private_v4=private_v4,
|
||||||
metadata=kw.get('meta', {}),
|
metadata=kw.get('meta', {}),
|
||||||
manager=self,
|
manager=self,
|
||||||
|
key_name=kw.get('key_name', None),
|
||||||
should_fail=should_fail)
|
should_fail=should_fail)
|
||||||
self._list.append(s)
|
instance_list.append(s)
|
||||||
t = threading.Thread(target=self._finish,
|
t = threading.Thread(target=self._finish,
|
||||||
name='FakeProvider create',
|
name='FakeProvider create',
|
||||||
args=(s, 0.1, 'ACTIVE'))
|
args=(s, 0.1, done_status))
|
||||||
t.start()
|
t.start()
|
||||||
return s
|
return s
|
||||||
|
|
||||||
def create_image(self, server, image_name, metadata):
|
def _delete(self, name_or_id, instance_list):
|
||||||
|
self.log.debug("Delete from %s" % (repr(instance_list),))
|
||||||
|
instance = None
|
||||||
|
for maybe in instance_list:
|
||||||
|
if maybe.name == name_or_id or maybe.id == name_or_id:
|
||||||
|
instance = maybe
|
||||||
|
if instance:
|
||||||
|
instance_list.remove(instance)
|
||||||
|
self.log.debug("Deleted from %s" % (repr(instance_list),))
|
||||||
|
|
||||||
|
def _finish(self, obj, delay, status):
|
||||||
|
time.sleep(delay)
|
||||||
|
obj.status = status
|
||||||
|
|
||||||
|
def create_image(self, **kwargs):
|
||||||
|
return self._create(
|
||||||
|
self._image_list, instance_type=Dummy.IMAGE,
|
||||||
|
done_status='READY', **kwargs)
|
||||||
|
|
||||||
|
def get_image(self, name_or_id):
|
||||||
|
return self._get(name_or_id, self._image_list)
|
||||||
|
|
||||||
|
def list_images(self):
|
||||||
|
return self._image_list
|
||||||
|
|
||||||
|
def delete_image(self, name_or_id):
|
||||||
|
self._delete(name_or_id, self._image_list)
|
||||||
|
|
||||||
|
def create_image_snapshot(self, server, image_name, **metadata):
|
||||||
# XXX : validate metadata?
|
# XXX : validate metadata?
|
||||||
x = self.api.images.create(name=image_name)
|
return self._create(
|
||||||
return x.id
|
self._image_list, instance_type=Dummy.IMAGE,
|
||||||
|
name=image_name, **metadata)
|
||||||
|
|
||||||
|
def list_flavors(self):
|
||||||
|
return self._flavor_list
|
||||||
|
|
||||||
class FakeHTTPClient(object):
|
def create_keypair(self, name, public_key):
|
||||||
def get(self, path):
|
return self._create(
|
||||||
if path == '/extensions':
|
self._image_list, instance_type=Dummy.KEYPAIR,
|
||||||
return None, dict(extensions=dict())
|
name=name, public_key=public_key)
|
||||||
|
|
||||||
|
def list_keypairs(self):
|
||||||
|
return self._keypair_list
|
||||||
|
|
||||||
class BadHTTPClient(object):
|
def delete_keypair(self, name):
|
||||||
'''Always raises a ProxyError'''
|
self._delete(name, self._keypair_list)
|
||||||
def get(self, path):
|
|
||||||
raise requests.exceptions.ProxyError
|
|
||||||
|
|
||||||
|
def get_openstack_vars(self, server):
|
||||||
|
server.public_v4 = 'fake'
|
||||||
|
server.public_v6 = 'fake'
|
||||||
|
server.private_v4 = 'fake'
|
||||||
|
return server
|
||||||
|
|
||||||
class FakeClient(object):
|
def create_server(self, **kw):
|
||||||
def __init__(self, images, *args, **kwargs):
|
return self._create(self._server_list, **kw)
|
||||||
self.flavors = FakeList([
|
|
||||||
Dummy(Dummy.FLAVOR, id='f1', ram=8192, name='Fake Flavor'),
|
|
||||||
Dummy(Dummy.FLAVOR, id='f2', ram=8192, name='Unreal Flavor'),
|
|
||||||
])
|
|
||||||
self.images = images
|
|
||||||
self.client = FakeHTTPClient()
|
|
||||||
self.servers = FakeList([])
|
|
||||||
self.servers.api = self
|
|
||||||
|
|
||||||
|
def get_server(self, name_or_id):
|
||||||
|
result = self._get(name_or_id, self._server_list)
|
||||||
|
return result
|
||||||
|
|
||||||
class BadClient(FakeClient):
|
def wait_for_server(self, server, **kwargs):
|
||||||
def __init__(self, images):
|
server.status = 'ACTIVE'
|
||||||
super(BadClient, self).__init__(images)
|
return server
|
||||||
self.client = BadHTTPClient()
|
|
||||||
|
|
||||||
|
def list_servers(self):
|
||||||
|
return self._server_list
|
||||||
|
|
||||||
class BadOpenstackCloud(object):
|
def delete_server(self, name_or_id, delete_ips=True):
|
||||||
def __init__(self, images=None):
|
self._delete(name_or_id, self._server_list)
|
||||||
if images is None:
|
|
||||||
images = FakeList([Dummy(Dummy.IMAGE,
|
|
||||||
id='fake-image-id',
|
|
||||||
status='READY',
|
|
||||||
name='Fake Precise',
|
|
||||||
metadata={})])
|
|
||||||
self.nova_client = BadClient(images)
|
|
||||||
|
|
||||||
|
|
||||||
class FakeGlanceClient(object):
|
|
||||||
def __init__(self, images, **kwargs):
|
|
||||||
self.kwargs = kwargs
|
|
||||||
self.images = images
|
|
||||||
|
|
||||||
|
|
||||||
class FakeNeutronClient(object):
|
|
||||||
def __init__(self, networks=None):
|
|
||||||
if networks is None:
|
|
||||||
networks = [dict(id='fake-public-network-uuid',
|
|
||||||
name='fake-public-network-name'),
|
|
||||||
dict(id='fake-private-network-uuid',
|
|
||||||
name='fake-private-network-name')]
|
|
||||||
self.networks = networks
|
|
||||||
|
|
||||||
def list_networks(self):
|
def list_networks(self):
|
||||||
return dict(networks=self.networks)
|
return dict(networks=self.networks)
|
||||||
|
|
||||||
|
|
||||||
class FakeOpenStackCloud(object):
|
|
||||||
def __init__(self, images=None):
|
|
||||||
if images is None:
|
|
||||||
images = FakeList([Dummy(Dummy.IMAGE,
|
|
||||||
id='fake-image-id',
|
|
||||||
status='READY',
|
|
||||||
name='Fake Precise',
|
|
||||||
metadata={})])
|
|
||||||
self.nova_client = FakeClient(images)
|
|
||||||
self._glance_client = FakeGlanceClient(images)
|
|
||||||
self.neutron_client = FakeNeutronClient()
|
|
||||||
|
|
||||||
def create_image(self, **kwargs):
|
|
||||||
image = self._glance_client.images.create(**kwargs)
|
|
||||||
image.update('fake data')
|
|
||||||
return image
|
|
||||||
|
|
||||||
|
|
||||||
class FakeUploadFailCloud(FakeOpenStackCloud):
|
class FakeUploadFailCloud(FakeOpenStackCloud):
|
||||||
def create_image(self, **kwargs):
|
def create_image(self, **kwargs):
|
||||||
raise exceptions.BuilderError("Test fail image upload.")
|
raise exceptions.BuilderError("Test fail image upload.")
|
||||||
|
@@ -470,18 +470,19 @@ class NodeLauncher(threading.Thread):
|
|||||||
self.log.info("Creating server with hostname %s in %s from image %s "
|
self.log.info("Creating server with hostname %s in %s from image %s "
|
||||||
"for node id: %s" % (hostname, self.provider.name,
|
"for node id: %s" % (hostname, self.provider.name,
|
||||||
self.image.name, self.node_id))
|
self.image.name, self.node_id))
|
||||||
server_id = self.manager.createServer(
|
server = self.manager.createServer(
|
||||||
hostname, self.image.min_ram, snap_image.external_id,
|
hostname, self.image.min_ram, snap_image.external_id,
|
||||||
name_filter=self.image.name_filter, az=self.node.az,
|
name_filter=self.image.name_filter, az=self.node.az,
|
||||||
config_drive=self.image.config_drive,
|
config_drive=self.image.config_drive,
|
||||||
nodepool_node_id=self.node_id,
|
nodepool_node_id=self.node_id,
|
||||||
nodepool_image_name=self.image.name)
|
nodepool_image_name=self.image.name)
|
||||||
|
server_id = server['id']
|
||||||
self.node.external_id = server_id
|
self.node.external_id = server_id
|
||||||
session.commit()
|
session.commit()
|
||||||
|
|
||||||
self.log.debug("Waiting for server %s for node id: %s" %
|
self.log.debug("Waiting for server %s for node id: %s" %
|
||||||
(server_id, self.node.id))
|
(server_id, self.node.id))
|
||||||
server = self.manager.waitForServer(server_id, self.launch_timeout)
|
server = self.manager.waitForServer(server, self.launch_timeout)
|
||||||
if server['status'] != 'ACTIVE':
|
if server['status'] != 'ACTIVE':
|
||||||
raise LaunchStatusException("Server %s for node id: %s "
|
raise LaunchStatusException("Server %s for node id: %s "
|
||||||
"status: %s" %
|
"status: %s" %
|
||||||
@@ -496,9 +497,6 @@ class NodeLauncher(threading.Thread):
|
|||||||
else:
|
else:
|
||||||
self.log.warning('Preferred ipv6 not available, '
|
self.log.warning('Preferred ipv6 not available, '
|
||||||
'falling back to ipv4.')
|
'falling back to ipv4.')
|
||||||
if not ip and self.manager.hasExtension('os-floating-ips'):
|
|
||||||
ip = self.manager.addPublicIP(server_id,
|
|
||||||
pool=self.provider.pool)
|
|
||||||
if not ip:
|
if not ip:
|
||||||
self.log.debug(
|
self.log.debug(
|
||||||
"Server data for failed IP: %s" % pprint.pformat(
|
"Server data for failed IP: %s" % pprint.pformat(
|
||||||
@@ -763,19 +761,20 @@ class SubNodeLauncher(threading.Thread):
|
|||||||
"for subnode id: %s for node id: %s"
|
"for subnode id: %s for node id: %s"
|
||||||
% (hostname, self.provider.name,
|
% (hostname, self.provider.name,
|
||||||
self.image.name, self.subnode_id, self.node_id))
|
self.image.name, self.subnode_id, self.node_id))
|
||||||
server_id = self.manager.createServer(
|
server = self.manager.createServer(
|
||||||
hostname, self.image.min_ram, snap_image.external_id,
|
hostname, self.image.min_ram, snap_image.external_id,
|
||||||
name_filter=self.image.name_filter, az=self.node_az,
|
name_filter=self.image.name_filter, az=self.node_az,
|
||||||
config_drive=self.image.config_drive,
|
config_drive=self.image.config_drive,
|
||||||
nodepool_node_id=self.node_id,
|
nodepool_node_id=self.node_id,
|
||||||
nodepool_image_name=self.image.name)
|
nodepool_image_name=self.image.name)
|
||||||
|
server_id = server['id']
|
||||||
self.subnode.external_id = server_id
|
self.subnode.external_id = server_id
|
||||||
session.commit()
|
session.commit()
|
||||||
|
|
||||||
self.log.debug("Waiting for server %s for subnode id: %s for "
|
self.log.debug("Waiting for server %s for subnode id: %s for "
|
||||||
"node id: %s" %
|
"node id: %s" %
|
||||||
(server_id, self.subnode_id, self.node_id))
|
(server_id, self.subnode_id, self.node_id))
|
||||||
server = self.manager.waitForServer(server_id, self.launch_timeout)
|
server = self.manager.waitForServer(server, self.launch_timeout)
|
||||||
if server['status'] != 'ACTIVE':
|
if server['status'] != 'ACTIVE':
|
||||||
raise LaunchStatusException("Server %s for subnode id: "
|
raise LaunchStatusException("Server %s for subnode id: "
|
||||||
"%s for node id: %s "
|
"%s for node id: %s "
|
||||||
@@ -791,9 +790,6 @@ class SubNodeLauncher(threading.Thread):
|
|||||||
else:
|
else:
|
||||||
self.log.warning('Preferred ipv6 not available, '
|
self.log.warning('Preferred ipv6 not available, '
|
||||||
'falling back to ipv4.')
|
'falling back to ipv4.')
|
||||||
if not ip and self.manager.hasExtension('os-floating-ips'):
|
|
||||||
ip = self.manager.addPublicIP(server_id,
|
|
||||||
pool=self.provider.pool)
|
|
||||||
if not ip:
|
if not ip:
|
||||||
raise LaunchNetworkException("Unable to find public IP of server")
|
raise LaunchNetworkException("Unable to find public IP of server")
|
||||||
|
|
||||||
@@ -908,11 +904,12 @@ class SnapshotImageUpdater(ImageUpdater):
|
|||||||
key_name = self.provider.keypair
|
key_name = self.provider.keypair
|
||||||
key = None
|
key = None
|
||||||
use_password = False
|
use_password = False
|
||||||
elif self.manager.hasExtension('os-keypairs'):
|
else:
|
||||||
|
try:
|
||||||
key_name = hostname.split('.')[0]
|
key_name = hostname.split('.')[0]
|
||||||
key = self.manager.addKeypair(key_name)
|
key = self.manager.addKeypair(key_name)
|
||||||
use_password = False
|
use_password = False
|
||||||
else:
|
except Exception:
|
||||||
key_name = None
|
key_name = None
|
||||||
key = None
|
key = None
|
||||||
use_password = True
|
use_password = True
|
||||||
@@ -926,20 +923,18 @@ class SnapshotImageUpdater(ImageUpdater):
|
|||||||
image_name = self.image.base_image
|
image_name = self.image.base_image
|
||||||
image_id = None
|
image_id = None
|
||||||
try:
|
try:
|
||||||
server_id = self.manager.createServer(
|
server = self.manager.createServer(
|
||||||
hostname, self.image.min_ram, image_name=image_name,
|
hostname, self.image.min_ram, image_name=image_name,
|
||||||
key_name=key_name, name_filter=self.image.name_filter,
|
key_name=key_name, name_filter=self.image.name_filter,
|
||||||
image_id=image_id, config_drive=self.image.config_drive,
|
image_id=image_id, config_drive=self.image.config_drive,
|
||||||
nodepool_snapshot_image_id=self.snap_image.id)
|
nodepool_snapshot_image_id=self.snap_image.id)
|
||||||
|
server_id = server['id']
|
||||||
except Exception:
|
except Exception:
|
||||||
if (self.manager.hasExtension('os-keypairs') and
|
if self.manager.deleteKeypair(key_name):
|
||||||
not self.provider.keypair):
|
# Only log success - failure is logged inside of shade
|
||||||
for kp in self.manager.listKeypairs():
|
|
||||||
if kp['name'] == key_name:
|
|
||||||
self.log.debug(
|
self.log.debug(
|
||||||
'Deleting keypair for failed image build %s' %
|
'Deleted keypair for failed image build %s' %
|
||||||
self.snap_image.id)
|
self.snap_image.id)
|
||||||
self.manager.deleteKeypair(kp['name'])
|
|
||||||
raise
|
raise
|
||||||
|
|
||||||
self.snap_image.hostname = hostname
|
self.snap_image.hostname = hostname
|
||||||
@@ -949,7 +944,7 @@ class SnapshotImageUpdater(ImageUpdater):
|
|||||||
|
|
||||||
self.log.debug("Image id: %s waiting for server %s" %
|
self.log.debug("Image id: %s waiting for server %s" %
|
||||||
(self.snap_image.id, server_id))
|
(self.snap_image.id, server_id))
|
||||||
server = self.manager.waitForServer(server_id)
|
server = self.manager.waitForServer(server)
|
||||||
if server['status'] != 'ACTIVE':
|
if server['status'] != 'ACTIVE':
|
||||||
raise Exception("Server %s for image id: %s status: %s" %
|
raise Exception("Server %s for image id: %s status: %s" %
|
||||||
(server_id, self.snap_image.id, server['status']))
|
(server_id, self.snap_image.id, server['status']))
|
||||||
@@ -962,24 +957,25 @@ class SnapshotImageUpdater(ImageUpdater):
|
|||||||
else:
|
else:
|
||||||
self.log.warning('Preferred ipv6 not available, '
|
self.log.warning('Preferred ipv6 not available, '
|
||||||
'falling back to ipv4.')
|
'falling back to ipv4.')
|
||||||
if not ip and self.manager.hasExtension('os-floating-ips'):
|
|
||||||
ip = self.manager.addPublicIP(server_id,
|
|
||||||
pool=self.provider.pool)
|
|
||||||
if not ip:
|
if not ip:
|
||||||
|
self.log.error("Server dict {server}".format(
|
||||||
|
server=pprint.pformat(dict(server))))
|
||||||
raise Exception("Unable to find public IP of server")
|
raise Exception("Unable to find public IP of server")
|
||||||
server['public_ip'] = ip
|
server['public_ip'] = ip
|
||||||
|
|
||||||
self.bootstrapServer(server, key, use_password=use_password)
|
self.bootstrapServer(server, key, use_password=use_password)
|
||||||
|
|
||||||
image_id = self.manager.createImage(server_id, hostname,
|
image_id = self.manager.createImage(server, hostname,
|
||||||
self.image.meta)
|
self.image.meta)['id']
|
||||||
self.snap_image.external_id = image_id
|
self.snap_image.external_id = image_id
|
||||||
session.commit()
|
session.commit()
|
||||||
self.log.debug("Image id: %s building image %s" %
|
self.log.debug("Image id: %s building image %s" %
|
||||||
(self.snap_image.id, image_id))
|
(self.snap_image.id, image_id))
|
||||||
# It can take a _very_ long time for Rackspace 1.0 to save an image
|
# It can take a _very_ long time for Rackspace 1.0 to save an image
|
||||||
image = self.manager.waitForImage(image_id, IMAGE_TIMEOUT)
|
image = self.manager.waitForImage(image_id, IMAGE_TIMEOUT)
|
||||||
if image['status'] != 'ACTIVE':
|
# Throw exception here and not in waitForImage so that we can log
|
||||||
|
# the snap_image.id as well, which waitForImage does not know
|
||||||
|
if image['status'].lower() != 'active':
|
||||||
raise Exception("Image %s for image id: %s status: %s" %
|
raise Exception("Image %s for image id: %s status: %s" %
|
||||||
(image_id, self.snap_image.id, image['status']))
|
(image_id, self.snap_image.id, image['status']))
|
||||||
|
|
||||||
@@ -1865,23 +1861,26 @@ class NodePool(threading.Thread):
|
|||||||
if snap_image.server_external_id:
|
if snap_image.server_external_id:
|
||||||
try:
|
try:
|
||||||
server = manager.getServer(snap_image.server_external_id)
|
server = manager.getServer(snap_image.server_external_id)
|
||||||
|
if server:
|
||||||
self.log.debug('Deleting server %s for image id: %s' %
|
self.log.debug('Deleting server %s for image id: %s' %
|
||||||
(snap_image.server_external_id,
|
(snap_image.server_external_id,
|
||||||
snap_image.id))
|
snap_image.id))
|
||||||
manager.cleanupServer(server['id'])
|
manager.cleanupServer(server['id'])
|
||||||
manager.waitForServerDeletion(server['id'])
|
manager.waitForServerDeletion(server['id'])
|
||||||
|
else:
|
||||||
|
raise provider_manager.NotFound
|
||||||
except provider_manager.NotFound:
|
except provider_manager.NotFound:
|
||||||
self.log.warning('Image server id %s not found' %
|
self.log.warning('Image server id %s not found' %
|
||||||
snap_image.server_external_id)
|
snap_image.server_external_id)
|
||||||
|
|
||||||
if snap_image.external_id:
|
if snap_image.external_id:
|
||||||
try:
|
|
||||||
remote_image = manager.getImage(snap_image.external_id)
|
remote_image = manager.getImage(snap_image.external_id)
|
||||||
self.log.debug('Deleting image %s' % remote_image['id'])
|
if remote_image is None:
|
||||||
manager.deleteImage(remote_image['id'])
|
|
||||||
except provider_manager.NotFound:
|
|
||||||
self.log.warning('Image id %s not found' %
|
self.log.warning('Image id %s not found' %
|
||||||
snap_image.external_id)
|
snap_image.external_id)
|
||||||
|
else:
|
||||||
|
self.log.debug('Deleting image %s' % remote_image['id'])
|
||||||
|
manager.deleteImage(remote_image['id'])
|
||||||
|
|
||||||
snap_image.delete()
|
snap_image.delete()
|
||||||
self.log.info("Deleted image id: %s" % snap_image.id)
|
self.log.info("Deleted image id: %s" % snap_image.id)
|
||||||
|
@@ -21,31 +21,16 @@ import logging
|
|||||||
import paramiko
|
import paramiko
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
|
|
||||||
import threading
|
|
||||||
import time
|
|
||||||
import requests.exceptions
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import shade
|
import shade
|
||||||
import novaclient
|
|
||||||
|
|
||||||
import exceptions
|
import exceptions
|
||||||
from nodeutils import iterate_timeout
|
from nodeutils import iterate_timeout
|
||||||
from task_manager import Task, TaskManager, ManagerStoppedException
|
from task_manager import TaskManager, ManagerStoppedException
|
||||||
|
|
||||||
|
|
||||||
SERVER_LIST_AGE = 5 # How long to keep a cached copy of the server list
|
|
||||||
IPS_LIST_AGE = 5 # How long to keep a cached copy of the ip list
|
IPS_LIST_AGE = 5 # How long to keep a cached copy of the ip list
|
||||||
|
|
||||||
|
|
||||||
class ServerCreateException(exceptions.TimeoutException):
|
|
||||||
statsd_key = 'error.servertimeout'
|
|
||||||
|
|
||||||
|
|
||||||
class ImageCreateException(exceptions.TimeoutException):
|
|
||||||
statsd_key = 'error.imagetimeout'
|
|
||||||
|
|
||||||
|
|
||||||
def get_public_ip(server, provider, version=4):
|
def get_public_ip(server, provider, version=4):
|
||||||
for addr in server.addresses.get('public', []):
|
for addr in server.addresses.get('public', []):
|
||||||
if type(addr) == type(u''): # Rackspace/openstack 1.0
|
if type(addr) == type(u''): # Rackspace/openstack 1.0
|
||||||
@@ -99,25 +84,6 @@ def get_private_ip(server):
|
|||||||
return ret[0]
|
return ret[0]
|
||||||
|
|
||||||
|
|
||||||
def make_server_dict(server, provider):
|
|
||||||
d = dict(id=str(server.id),
|
|
||||||
name=server.name,
|
|
||||||
status=server.status,
|
|
||||||
addresses=server.addresses)
|
|
||||||
if hasattr(server, 'adminPass'):
|
|
||||||
d['admin_pass'] = server.adminPass
|
|
||||||
if hasattr(server, 'key_name'):
|
|
||||||
d['key_name'] = server.key_name
|
|
||||||
if hasattr(server, 'progress'):
|
|
||||||
d['progress'] = server.progress
|
|
||||||
if hasattr(server, 'metadata'):
|
|
||||||
d['metadata'] = server.metadata
|
|
||||||
d['public_v4'] = get_public_ip(server, provider)
|
|
||||||
d['private_v4'] = get_private_ip(server)
|
|
||||||
d['public_v6'] = get_public_ip(server, provider, version=6)
|
|
||||||
return d
|
|
||||||
|
|
||||||
|
|
||||||
def make_image_dict(image):
|
def make_image_dict(image):
|
||||||
d = dict(id=str(image.id), name=image.name, status=image.status,
|
d = dict(id=str(image.id), name=image.name, status=image.status,
|
||||||
metadata=image.metadata)
|
metadata=image.metadata)
|
||||||
@@ -139,146 +105,6 @@ class NotFound(Exception):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class CreateServerTask(Task):
|
|
||||||
def main(self, client):
|
|
||||||
server = client.nova_client.servers.create(**self.args)
|
|
||||||
return str(server.id)
|
|
||||||
|
|
||||||
|
|
||||||
class GetServerTask(Task):
|
|
||||||
def main(self, client):
|
|
||||||
provider = self.args.pop('_nodepool_provider')
|
|
||||||
try:
|
|
||||||
server = client.nova_client.servers.get(self.args['server_id'])
|
|
||||||
except novaclient.exceptions.NotFound:
|
|
||||||
raise NotFound()
|
|
||||||
return make_server_dict(server, provider)
|
|
||||||
|
|
||||||
|
|
||||||
class DeleteServerTask(Task):
|
|
||||||
def main(self, client):
|
|
||||||
client.nova_client.servers.delete(self.args['server_id'])
|
|
||||||
|
|
||||||
|
|
||||||
class ListServersTask(Task):
|
|
||||||
def main(self, client):
|
|
||||||
provider = self.args.pop('_nodepool_provider')
|
|
||||||
servers = client.nova_client.servers.list()
|
|
||||||
return [make_server_dict(server, provider)
|
|
||||||
for server in servers]
|
|
||||||
|
|
||||||
|
|
||||||
class AddKeypairTask(Task):
|
|
||||||
def main(self, client):
|
|
||||||
client.nova_client.keypairs.create(**self.args)
|
|
||||||
|
|
||||||
|
|
||||||
class ListKeypairsTask(Task):
|
|
||||||
def main(self, client):
|
|
||||||
keys = client.nova_client.keypairs.list()
|
|
||||||
return [dict(id=str(key.id), name=key.name) for
|
|
||||||
key in keys]
|
|
||||||
|
|
||||||
|
|
||||||
class DeleteKeypairTask(Task):
|
|
||||||
def main(self, client):
|
|
||||||
client.nova_client.keypairs.delete(self.args['name'])
|
|
||||||
|
|
||||||
|
|
||||||
class CreateFloatingIPTask(Task):
|
|
||||||
def main(self, client):
|
|
||||||
ip = client.nova_client.floating_ips.create(**self.args)
|
|
||||||
return dict(id=str(ip.id), ip=ip.ip)
|
|
||||||
|
|
||||||
|
|
||||||
class AddFloatingIPTask(Task):
|
|
||||||
def main(self, client):
|
|
||||||
client.nova_client.servers.add_floating_ip(**self.args)
|
|
||||||
|
|
||||||
|
|
||||||
class GetFloatingIPTask(Task):
|
|
||||||
def main(self, client):
|
|
||||||
ip = client.nova_client.floating_ips.get(self.args['ip_id'])
|
|
||||||
return dict(id=str(ip.id), ip=ip.ip, instance_id=str(ip.instance_id))
|
|
||||||
|
|
||||||
|
|
||||||
class ListFloatingIPsTask(Task):
|
|
||||||
def main(self, client):
|
|
||||||
ips = client.nova_client.floating_ips.list()
|
|
||||||
return [dict(id=str(ip.id), ip=ip.ip,
|
|
||||||
instance_id=str(ip.instance_id)) for
|
|
||||||
ip in ips]
|
|
||||||
|
|
||||||
|
|
||||||
class RemoveFloatingIPTask(Task):
|
|
||||||
def main(self, client):
|
|
||||||
client.nova_client.servers.remove_floating_ip(**self.args)
|
|
||||||
|
|
||||||
|
|
||||||
class DeleteFloatingIPTask(Task):
|
|
||||||
def main(self, client):
|
|
||||||
client.nova_client.floating_ips.delete(self.args['ip_id'])
|
|
||||||
|
|
||||||
|
|
||||||
class CreateImageTask(Task):
|
|
||||||
def main(self, client):
|
|
||||||
# This returns an id
|
|
||||||
return str(client.nova_client.servers.create_image(**self.args))
|
|
||||||
|
|
||||||
|
|
||||||
class GetImageTask(Task):
|
|
||||||
def main(self, client):
|
|
||||||
try:
|
|
||||||
image = client.nova_client.images.get(**self.args)
|
|
||||||
except novaclient.exceptions.NotFound:
|
|
||||||
raise NotFound()
|
|
||||||
# HP returns 404, rackspace can return a 'DELETED' image.
|
|
||||||
if image.status == 'DELETED':
|
|
||||||
raise NotFound()
|
|
||||||
return make_image_dict(image)
|
|
||||||
|
|
||||||
|
|
||||||
class ListExtensionsTask(Task):
|
|
||||||
def main(self, client):
|
|
||||||
try:
|
|
||||||
resp, body = client.nova_client.client.get('/extensions')
|
|
||||||
return [x['alias'] for x in body['extensions']]
|
|
||||||
except novaclient.exceptions.NotFound:
|
|
||||||
# No extensions present.
|
|
||||||
return []
|
|
||||||
|
|
||||||
|
|
||||||
class ListFlavorsTask(Task):
|
|
||||||
def main(self, client):
|
|
||||||
flavors = client.nova_client.flavors.list()
|
|
||||||
return [dict(id=str(flavor.id), ram=flavor.ram, name=flavor.name)
|
|
||||||
for flavor in flavors]
|
|
||||||
|
|
||||||
|
|
||||||
class ListImagesTask(Task):
|
|
||||||
def main(self, client):
|
|
||||||
images = client.nova_client.images.list()
|
|
||||||
return [make_image_dict(image) for image in images]
|
|
||||||
|
|
||||||
|
|
||||||
class FindImageTask(Task):
|
|
||||||
def main(self, client):
|
|
||||||
image = client.nova_client.images.find(**self.args)
|
|
||||||
return dict(id=str(image.id))
|
|
||||||
|
|
||||||
|
|
||||||
class DeleteImageTask(Task):
|
|
||||||
def main(self, client):
|
|
||||||
client.nova_client.images.delete(**self.args)
|
|
||||||
|
|
||||||
|
|
||||||
class FindNetworkTask(Task):
|
|
||||||
def main(self, client):
|
|
||||||
for network in client.neutron_client.list_networks()['networks']:
|
|
||||||
if self.args['name'] == network['name']:
|
|
||||||
return dict(id=str(network['id']))
|
|
||||||
|
|
||||||
|
|
||||||
class ProviderManager(TaskManager):
|
class ProviderManager(TaskManager):
|
||||||
log = logging.getLogger("nodepool.ProviderManager")
|
log = logging.getLogger("nodepool.ProviderManager")
|
||||||
|
|
||||||
@@ -310,52 +136,25 @@ class ProviderManager(TaskManager):
|
|||||||
self.resetClient()
|
self.resetClient()
|
||||||
self._images = {}
|
self._images = {}
|
||||||
self._networks = {}
|
self._networks = {}
|
||||||
self._cloud_metadata_read = False
|
|
||||||
self.__flavors = {}
|
self.__flavors = {}
|
||||||
self.__extensions = {}
|
|
||||||
self._servers = []
|
|
||||||
self._servers_time = 0
|
|
||||||
self._servers_lock = threading.Lock()
|
|
||||||
self._ips = []
|
|
||||||
self._ips_time = 0
|
|
||||||
self._ips_lock = threading.Lock()
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _flavors(self):
|
def _flavors(self):
|
||||||
if not self._cloud_metadata_read:
|
if not self.__flavors:
|
||||||
self._getCloudMetadata()
|
|
||||||
return self.__flavors
|
|
||||||
|
|
||||||
@property
|
|
||||||
def _extensions(self):
|
|
||||||
if not self._cloud_metadata_read:
|
|
||||||
self._getCloudMetadata()
|
|
||||||
return self.__extensions
|
|
||||||
|
|
||||||
def _getCloudMetadata(self):
|
|
||||||
self.__flavors = self._getFlavors()
|
self.__flavors = self._getFlavors()
|
||||||
self.__extensions = self.listExtensions()
|
return self.__flavors
|
||||||
self._cloud_metadata_read = True
|
|
||||||
|
|
||||||
def _getClient(self):
|
def _getClient(self):
|
||||||
return shade.OpenStackCloud(
|
return shade.OpenStackCloud(
|
||||||
cloud_config=self.provider.cloud_config,
|
cloud_config=self.provider.cloud_config,
|
||||||
|
manager=self,
|
||||||
**self.provider.cloud_config.config)
|
**self.provider.cloud_config.config)
|
||||||
|
|
||||||
def runTask(self, task):
|
def runTask(self, task):
|
||||||
try:
|
# Run the given task in the TaskManager passed to shade. It turns
|
||||||
|
# out that this provider manager is the TaskManager we pass, so
|
||||||
|
# this is a way of running each cloud operation in its own thread
|
||||||
task.run(self._client)
|
task.run(self._client)
|
||||||
except requests.exceptions.ProxyError:
|
|
||||||
# Try to get a new client object if we get a ProxyError
|
|
||||||
self.log.exception('Resetting client due to ProxyError')
|
|
||||||
self.resetClient()
|
|
||||||
try:
|
|
||||||
task.run(self._client)
|
|
||||||
except requests.exceptions.ProxyError as e:
|
|
||||||
# If we get a second ProxyError, then make sure it gets raised
|
|
||||||
# the same way all other Exceptions from the Task object do.
|
|
||||||
# This will move the Exception to the main thread.
|
|
||||||
task.exception(e, sys.exc_info()[2])
|
|
||||||
|
|
||||||
def resetClient(self):
|
def resetClient(self):
|
||||||
self._client = self._getClient()
|
self._client = self._getClient()
|
||||||
@@ -365,13 +164,6 @@ class ProviderManager(TaskManager):
|
|||||||
flavors.sort(lambda a, b: cmp(a['ram'], b['ram']))
|
flavors.sort(lambda a, b: cmp(a['ram'], b['ram']))
|
||||||
return flavors
|
return flavors
|
||||||
|
|
||||||
def hasExtension(self, extension):
|
|
||||||
# Note: this will throw an error if the provider is offline
|
|
||||||
# but all the callers are in threads so the mainloop won't be affected.
|
|
||||||
if extension in self._extensions:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def findFlavor(self, min_ram, name_filter=None):
|
def findFlavor(self, min_ram, name_filter=None):
|
||||||
# Note: this will throw an error if the provider is offline
|
# Note: this will throw an error if the provider is offline
|
||||||
# but all the callers are in threads (they call in via CreateServer) so
|
# but all the callers are in threads (they call in via CreateServer) so
|
||||||
@@ -385,33 +177,43 @@ class ProviderManager(TaskManager):
|
|||||||
def findImage(self, name):
|
def findImage(self, name):
|
||||||
if name in self._images:
|
if name in self._images:
|
||||||
return self._images[name]
|
return self._images[name]
|
||||||
image = self.submitTask(FindImageTask(name=name))
|
|
||||||
|
with shade_inner_exceptions():
|
||||||
|
image = self._client.get_image(name)
|
||||||
self._images[name] = image
|
self._images[name] = image
|
||||||
return image
|
return image
|
||||||
|
|
||||||
def findNetwork(self, name):
|
def findNetwork(self, name):
|
||||||
if name in self._networks:
|
if name in self._networks:
|
||||||
return self._networks[name]
|
return self._networks[name]
|
||||||
network = self.submitTask(FindNetworkTask(name=name))
|
|
||||||
|
with shade_inner_exceptions():
|
||||||
|
network = self._client.get_network(name)
|
||||||
self._networks[name] = network
|
self._networks[name] = network
|
||||||
return network
|
return network
|
||||||
|
|
||||||
def deleteImage(self, name):
|
def deleteImage(self, name):
|
||||||
if name in self._images:
|
if name in self._images:
|
||||||
del self._images[name]
|
del self._images[name]
|
||||||
return self.submitTask(DeleteImageTask(image=name))
|
|
||||||
|
with shade_inner_exceptions():
|
||||||
|
return self._client.delete_image(name)
|
||||||
|
|
||||||
def addKeypair(self, name):
|
def addKeypair(self, name):
|
||||||
key = paramiko.RSAKey.generate(2048)
|
key = paramiko.RSAKey.generate(2048)
|
||||||
public_key = key.get_name() + ' ' + key.get_base64()
|
public_key = key.get_name() + ' ' + key.get_base64()
|
||||||
self.submitTask(AddKeypairTask(name=name, public_key=public_key))
|
with shade_inner_exceptions():
|
||||||
|
self._client.create_keypair(name=name, public_key=public_key)
|
||||||
return key
|
return key
|
||||||
|
|
||||||
def listKeypairs(self):
|
def listKeypairs(self):
|
||||||
return self.submitTask(ListKeypairsTask())
|
with shade_inner_exceptions():
|
||||||
|
keypairs = self._client.list_keypairs()
|
||||||
|
return keypairs
|
||||||
|
|
||||||
def deleteKeypair(self, name):
|
def deleteKeypair(self, name):
|
||||||
return self.submitTask(DeleteKeypairTask(name=name))
|
with shade_inner_exceptions():
|
||||||
|
return self._client.delete_keypair(name=name)
|
||||||
|
|
||||||
def createServer(self, name, min_ram, image_id=None, image_name=None,
|
def createServer(self, name, min_ram, image_id=None, image_name=None,
|
||||||
az=None, key_name=None, name_filter=None,
|
az=None, key_name=None, name_filter=None,
|
||||||
@@ -437,6 +239,7 @@ class ProviderManager(TaskManager):
|
|||||||
nics.append({'net-id': net_id})
|
nics.append({'net-id': net_id})
|
||||||
else:
|
else:
|
||||||
raise Exception("Invalid 'networks' configuration.")
|
raise Exception("Invalid 'networks' configuration.")
|
||||||
|
if nics:
|
||||||
create_args['nics'] = nics
|
create_args['nics'] = nics
|
||||||
# Put provider.name and image_name in as groups so that ansible
|
# Put provider.name and image_name in as groups so that ansible
|
||||||
# inventory can auto-create groups for us based on each of those
|
# inventory can auto-create groups for us based on each of those
|
||||||
@@ -458,232 +261,120 @@ class ProviderManager(TaskManager):
|
|||||||
nodepool=json.dumps(nodepool_meta)
|
nodepool=json.dumps(nodepool_meta)
|
||||||
)
|
)
|
||||||
|
|
||||||
return self.submitTask(CreateServerTask(**create_args))
|
with shade_inner_exceptions():
|
||||||
|
return self._client.create_server(wait=False, **create_args)
|
||||||
|
|
||||||
def getServer(self, server_id):
|
def getServer(self, server_id):
|
||||||
return self.submitTask(GetServerTask(server_id=server_id,
|
with shade_inner_exceptions():
|
||||||
_nodepool_provider=self.provider))
|
return self._client.get_server(server_id)
|
||||||
|
|
||||||
def getFloatingIP(self, ip_id):
|
def waitForServer(self, server, timeout=3600):
|
||||||
return self.submitTask(GetFloatingIPTask(ip_id=ip_id))
|
with shade_inner_exceptions():
|
||||||
|
return self._client.wait_for_server(
|
||||||
def getServerFromList(self, server_id):
|
server=server, auto_ip=True, reuse=False,
|
||||||
for s in self.listServers():
|
timeout=timeout)
|
||||||
if s['id'] == server_id:
|
|
||||||
return s
|
|
||||||
raise NotFound()
|
|
||||||
|
|
||||||
def _waitForResource(self, resource_type, resource_id, timeout):
|
|
||||||
last_status = None
|
|
||||||
if resource_type == 'server':
|
|
||||||
exc = ServerCreateException
|
|
||||||
elif resource_type == 'image':
|
|
||||||
exc = ImageCreateException
|
|
||||||
for count in iterate_timeout(timeout, exc,
|
|
||||||
"%s creation" % resource_type):
|
|
||||||
try:
|
|
||||||
if resource_type == 'server':
|
|
||||||
resource = self.getServerFromList(resource_id)
|
|
||||||
elif resource_type == 'image':
|
|
||||||
resource = self.getImage(resource_id)
|
|
||||||
except NotFound:
|
|
||||||
continue
|
|
||||||
except ManagerStoppedException:
|
|
||||||
raise
|
|
||||||
except Exception:
|
|
||||||
self.log.exception('Unable to list %ss while waiting for '
|
|
||||||
'%s will retry' % (resource_type,
|
|
||||||
resource_id))
|
|
||||||
continue
|
|
||||||
|
|
||||||
status = resource.get('status')
|
|
||||||
if (last_status != status):
|
|
||||||
self.log.debug(
|
|
||||||
'Status of {type} in {provider} {id}: {status}'.format(
|
|
||||||
type=resource_type,
|
|
||||||
provider=self.provider.name,
|
|
||||||
id=resource_id,
|
|
||||||
status=status))
|
|
||||||
if status == 'ERROR' and 'fault' in resource:
|
|
||||||
self.log.debug(
|
|
||||||
'ERROR in {provider} on {id}: {resason}'.format(
|
|
||||||
provider=self.provider.name,
|
|
||||||
id=resource_id,
|
|
||||||
resason=resource['fault']['message']))
|
|
||||||
last_status = status
|
|
||||||
if status in ['ACTIVE', 'ERROR']:
|
|
||||||
return resource
|
|
||||||
|
|
||||||
def waitForServer(self, server_id, timeout=3600):
|
|
||||||
return self._waitForResource('server', server_id, timeout)
|
|
||||||
|
|
||||||
def waitForServerDeletion(self, server_id, timeout=600):
|
def waitForServerDeletion(self, server_id, timeout=600):
|
||||||
for count in iterate_timeout(600,
|
for count in iterate_timeout(
|
||||||
exceptions.ServerDeleteException,
|
timeout, exceptions.ServerDeleteException,
|
||||||
"server %s deletion " % server_id):
|
"server %s deletion" % server_id):
|
||||||
try:
|
if not self.getServer(server_id):
|
||||||
self.getServerFromList(server_id)
|
|
||||||
except NotFound:
|
|
||||||
return
|
return
|
||||||
|
|
||||||
def waitForImage(self, image_id, timeout=3600):
|
def waitForImage(self, image_id, timeout=3600):
|
||||||
# TODO(mordred): This should just be handled by the Fake, but we're
|
last_status = None
|
||||||
# not quite plumbed through for that yet
|
for count in iterate_timeout(
|
||||||
if image_id == 'fake-glance-id':
|
timeout, exceptions.ImageCreateException, "image creation"):
|
||||||
return True
|
|
||||||
return self._waitForResource('image', image_id, timeout)
|
|
||||||
|
|
||||||
def createFloatingIP(self, pool=None):
|
|
||||||
return self.submitTask(CreateFloatingIPTask(pool=pool))
|
|
||||||
|
|
||||||
def addFloatingIP(self, server_id, address):
|
|
||||||
self.submitTask(AddFloatingIPTask(server=server_id,
|
|
||||||
address=address))
|
|
||||||
|
|
||||||
def addPublicIP(self, server_id, pool=None):
|
|
||||||
ip = self.createFloatingIP(pool)
|
|
||||||
try:
|
try:
|
||||||
self.addFloatingIP(server_id, ip['ip'])
|
image = self.getImage(image_id)
|
||||||
except novaclient.exceptions.ClientException:
|
except NotFound:
|
||||||
# Delete the floating IP here as cleanupServer will not
|
continue
|
||||||
# have access to the ip -> server mapping preventing it
|
|
||||||
# from removing this IP.
|
|
||||||
self.deleteFloatingIP(ip['id'])
|
|
||||||
raise
|
|
||||||
for count in iterate_timeout(600,
|
|
||||||
exceptions.IPAddTimeoutException,
|
|
||||||
"ip to be added to %s" % server_id):
|
|
||||||
try:
|
|
||||||
newip = self.getFloatingIP(ip['id'])
|
|
||||||
except ManagerStoppedException:
|
except ManagerStoppedException:
|
||||||
raise
|
raise
|
||||||
except Exception:
|
except Exception:
|
||||||
self.log.exception('Unable to get IP details for server %s, '
|
self.log.exception('Unable to list images while waiting for '
|
||||||
'will retry' % (server_id))
|
'%s will retry' % (image_id))
|
||||||
continue
|
continue
|
||||||
if newip['instance_id'] == server_id:
|
|
||||||
return newip['ip']
|
|
||||||
|
|
||||||
def createImage(self, server_id, image_name, meta):
|
# shade returns None when not found
|
||||||
return self.submitTask(CreateImageTask(server=server_id,
|
if not image:
|
||||||
image_name=image_name,
|
continue
|
||||||
metadata=meta))
|
|
||||||
|
status = image['status']
|
||||||
|
if (last_status != status):
|
||||||
|
self.log.debug(
|
||||||
|
'Status of image in {provider} {id}: {status}'.format(
|
||||||
|
provider=self.provider.name,
|
||||||
|
id=image_id,
|
||||||
|
status=status))
|
||||||
|
if status == 'ERROR' and 'fault' in image:
|
||||||
|
self.log.debug(
|
||||||
|
'ERROR in {provider} on {id}: {resason}'.format(
|
||||||
|
provider=self.provider.name,
|
||||||
|
id=image_id,
|
||||||
|
resason=image['fault']['message']))
|
||||||
|
last_status = status
|
||||||
|
# Glance client returns lower case statuses - but let's be sure
|
||||||
|
if status.lower() in ['active', 'error']:
|
||||||
|
return image
|
||||||
|
|
||||||
|
def createImage(self, server, image_name, meta):
|
||||||
|
with shade_inner_exceptions():
|
||||||
|
return self._client.create_image_snapshot(
|
||||||
|
image_name, server, **meta)
|
||||||
|
|
||||||
def getImage(self, image_id):
|
def getImage(self, image_id):
|
||||||
return self.submitTask(GetImageTask(image=image_id))
|
with shade_inner_exceptions():
|
||||||
|
return self._client.get_image(image_id)
|
||||||
|
|
||||||
def uploadImage(self, image_name, filename, disk_format, container_format,
|
def uploadImage(self, image_name, filename, image_type=None, meta=None):
|
||||||
meta):
|
|
||||||
# configure glance and upload image. Note the meta flags
|
# configure glance and upload image. Note the meta flags
|
||||||
# are provided as custom glance properties
|
# are provided as custom glance properties
|
||||||
# NOTE: we have wait=True set here. This is not how we normally
|
# NOTE: we have wait=True set here. This is not how we normally
|
||||||
# do things in nodepool, preferring to poll ourselves thankyouverymuch.
|
# do things in nodepool, preferring to poll ourselves thankyouverymuch.
|
||||||
# However - two things to note:
|
# However - two things to note:
|
||||||
# - glance v1 has no aysnc mechanism, so we have to handle it anyway
|
# - PUT has no aysnc mechanism, so we have to handle it anyway
|
||||||
# - glance v2 waiting is very strange and complex - but we have to
|
# - v2 w/task waiting is very strange and complex - but we have to
|
||||||
# block for our v1 clouds anyway, so we might as well
|
# block for our v1 clouds anyway, so we might as well
|
||||||
# have the interface be the same and treat faking-out
|
# have the interface be the same and treat faking-out
|
||||||
# a shade-level fake-async interface later
|
# a shade-level fake-async interface later
|
||||||
|
if not meta:
|
||||||
|
meta = {}
|
||||||
|
if image_type:
|
||||||
|
meta['disk_format'] = image_type
|
||||||
with shade_inner_exceptions():
|
with shade_inner_exceptions():
|
||||||
image = self._client.create_image(
|
image = self._client.create_image(
|
||||||
name=image_name,
|
name=image_name,
|
||||||
filename='%s.%s' % (filename, disk_format),
|
filename=filename,
|
||||||
is_public=False,
|
is_public=False,
|
||||||
disk_format=disk_format,
|
|
||||||
container_format=container_format,
|
|
||||||
wait=True,
|
wait=True,
|
||||||
**meta)
|
**meta)
|
||||||
return image.id
|
return image.id
|
||||||
|
|
||||||
def listExtensions(self):
|
|
||||||
return self.submitTask(ListExtensionsTask())
|
|
||||||
|
|
||||||
def listImages(self):
|
def listImages(self):
|
||||||
return self.submitTask(ListImagesTask())
|
with shade_inner_exceptions():
|
||||||
|
return self._client.list_images()
|
||||||
|
|
||||||
def listFlavors(self):
|
def listFlavors(self):
|
||||||
return self.submitTask(ListFlavorsTask())
|
with shade_inner_exceptions():
|
||||||
|
return self._client.list_flavors()
|
||||||
|
|
||||||
def listFloatingIPs(self):
|
def listServers(self):
|
||||||
if time.time() - self._ips_time >= IPS_LIST_AGE:
|
# shade list_servers carries the nodepool server list caching logic
|
||||||
if self._ips_lock.acquire(False):
|
with shade_inner_exceptions():
|
||||||
try:
|
return self._client.list_servers()
|
||||||
self._ips = self.submitTask(ListFloatingIPsTask())
|
|
||||||
self._ips_time = time.time()
|
|
||||||
finally:
|
|
||||||
self._ips_lock.release()
|
|
||||||
return self._ips
|
|
||||||
|
|
||||||
def removeFloatingIP(self, server_id, address):
|
|
||||||
return self.submitTask(RemoveFloatingIPTask(server=server_id,
|
|
||||||
address=address))
|
|
||||||
|
|
||||||
def deleteFloatingIP(self, ip_id):
|
|
||||||
return self.submitTask(DeleteFloatingIPTask(ip_id=ip_id))
|
|
||||||
|
|
||||||
def listServers(self, cache=True):
|
|
||||||
if (not cache or
|
|
||||||
time.time() - self._servers_time >= SERVER_LIST_AGE):
|
|
||||||
# Since we're using cached data anyway, we don't need to
|
|
||||||
# have more than one thread actually submit the list
|
|
||||||
# servers task. Let the first one submit it while holding
|
|
||||||
# a lock, and the non-blocking acquire method will cause
|
|
||||||
# subsequent threads to just skip this and use the old
|
|
||||||
# data until it succeeds.
|
|
||||||
if self._servers_lock.acquire(False):
|
|
||||||
try:
|
|
||||||
self._servers = self.submitTask(ListServersTask(
|
|
||||||
_nodepool_provider=self.provider))
|
|
||||||
self._servers_time = time.time()
|
|
||||||
finally:
|
|
||||||
self._servers_lock.release()
|
|
||||||
return self._servers
|
|
||||||
|
|
||||||
def deleteServer(self, server_id):
|
def deleteServer(self, server_id):
|
||||||
return self.submitTask(DeleteServerTask(server_id=server_id))
|
with shade_inner_exceptions():
|
||||||
|
return self._client.delete_server(server_id, delete_ips=True)
|
||||||
|
|
||||||
def cleanupServer(self, server_id):
|
def cleanupServer(self, server_id):
|
||||||
done = False
|
server = self.getServer(server_id)
|
||||||
while not done:
|
if not server:
|
||||||
try:
|
raise NotFound()
|
||||||
server = self.getServerFromList(server_id)
|
|
||||||
done = True
|
|
||||||
except NotFound:
|
|
||||||
# If we have old data, that's fine, it should only
|
|
||||||
# indicate that a server exists when it doesn't; we'll
|
|
||||||
# recover from that. However, if we have no data at
|
|
||||||
# all, wait until the first server list task
|
|
||||||
# completes.
|
|
||||||
if self._servers_time == 0:
|
|
||||||
time.sleep(SERVER_LIST_AGE + 1)
|
|
||||||
else:
|
|
||||||
done = True
|
|
||||||
|
|
||||||
# This will either get the server or raise an exception
|
with shade_inner_exceptions():
|
||||||
server = self.getServerFromList(server_id)
|
self._client.delete_keypair(name=server['key_name'])
|
||||||
|
|
||||||
has_floating_ip = False
|
|
||||||
for (name, network) in server['addresses'].iteritems():
|
|
||||||
for interface_spec in network:
|
|
||||||
if interface_spec['version'] != 4:
|
|
||||||
continue
|
|
||||||
if ('OS-EXT-IPS:type' in interface_spec
|
|
||||||
and interface_spec['OS-EXT-IPS:type'] == 'floating'):
|
|
||||||
has_floating_ip = True
|
|
||||||
if has_floating_ip:
|
|
||||||
for ip in self.listFloatingIPs():
|
|
||||||
if ip['instance_id'] == server_id:
|
|
||||||
self.log.debug('Deleting floating ip for server %s' %
|
|
||||||
server_id)
|
|
||||||
self.deleteFloatingIP(ip['id'])
|
|
||||||
|
|
||||||
if (self.hasExtension('os-keypairs') and
|
|
||||||
server['key_name'] != self.provider.keypair):
|
|
||||||
for kp in self.listKeypairs():
|
|
||||||
if kp['name'] == server['key_name']:
|
|
||||||
self.log.debug('Deleting keypair for server %s' %
|
|
||||||
server_id)
|
|
||||||
self.deleteKeypair(kp['name'])
|
|
||||||
|
|
||||||
self.log.debug('Deleting server %s' % server_id)
|
self.log.debug('Deleting server %s' % server_id)
|
||||||
self.deleteServer(server_id)
|
self.deleteServer(server_id)
|
||||||
|
@@ -115,8 +115,9 @@ class TestNodepoolCMD(tests.DBTestCase):
|
|||||||
def test_alien_list_fail(self):
|
def test_alien_list_fail(self):
|
||||||
def fail_list(self):
|
def fail_list(self):
|
||||||
raise RuntimeError('Fake list error')
|
raise RuntimeError('Fake list error')
|
||||||
self.useFixture(fixtures.MonkeyPatch('nodepool.fakeprovider.FakeList'
|
self.useFixture(fixtures.MonkeyPatch(
|
||||||
'.list', fail_list))
|
'nodepool.fakeprovider.FakeOpenStackCloud.list_servers',
|
||||||
|
fail_list))
|
||||||
|
|
||||||
configfile = self.setup_config("node_cmd.yaml")
|
configfile = self.setup_config("node_cmd.yaml")
|
||||||
self.patch_argv("-c", configfile, "alien-list")
|
self.patch_argv("-c", configfile, "alien-list")
|
||||||
@@ -125,8 +126,9 @@ class TestNodepoolCMD(tests.DBTestCase):
|
|||||||
def test_alien_image_list_fail(self):
|
def test_alien_image_list_fail(self):
|
||||||
def fail_list(self):
|
def fail_list(self):
|
||||||
raise RuntimeError('Fake list error')
|
raise RuntimeError('Fake list error')
|
||||||
self.useFixture(fixtures.MonkeyPatch('nodepool.fakeprovider.FakeList'
|
self.useFixture(fixtures.MonkeyPatch(
|
||||||
'.list', fail_list))
|
'nodepool.fakeprovider.FakeOpenStackCloud.list_servers',
|
||||||
|
fail_list))
|
||||||
|
|
||||||
configfile = self.setup_config("node_cmd.yaml")
|
configfile = self.setup_config("node_cmd.yaml")
|
||||||
self.patch_argv("-c", configfile, "alien-image-list")
|
self.patch_argv("-c", configfile, "alien-image-list")
|
||||||
|
@@ -25,9 +25,6 @@ from nodepool import nodedb
|
|||||||
import nodepool.fakeprovider
|
import nodepool.fakeprovider
|
||||||
import nodepool.nodepool
|
import nodepool.nodepool
|
||||||
|
|
||||||
import requests.exceptions
|
|
||||||
from testtools import ExpectedException
|
|
||||||
|
|
||||||
|
|
||||||
class TestNodepool(tests.DBTestCase):
|
class TestNodepool(tests.DBTestCase):
|
||||||
log = logging.getLogger("nodepool.TestNodepool")
|
log = logging.getLogger("nodepool.TestNodepool")
|
||||||
@@ -401,39 +398,6 @@ class TestNodepool(tests.DBTestCase):
|
|||||||
self.assertEqual(len(deleted_nodes), 1)
|
self.assertEqual(len(deleted_nodes), 1)
|
||||||
self.assertEqual(node_id, deleted_nodes[0].id)
|
self.assertEqual(node_id, deleted_nodes[0].id)
|
||||||
|
|
||||||
def test_proxy_timeout(self):
|
|
||||||
"""Test that we re-run a task after a ProxyError"""
|
|
||||||
configfile = self.setup_config('node.yaml')
|
|
||||||
pool = self.useNodepool(configfile, watermark_sleep=1)
|
|
||||||
pool.start()
|
|
||||||
self.waitForNodes(pool)
|
|
||||||
|
|
||||||
provider = pool.config.providers['fake-provider']
|
|
||||||
manager = pool.getProviderManager(provider)
|
|
||||||
|
|
||||||
def get_bad_client(manager):
|
|
||||||
return nodepool.fakeprovider.BadOpenstackCloud(
|
|
||||||
manager._client.nova_client.images)
|
|
||||||
|
|
||||||
# In order to test recovering from a ProxyError from the client
|
|
||||||
# we are going manually set the client object to be a bad client that
|
|
||||||
# always raises a ProxyError. If our client reset works correctly
|
|
||||||
# then we will create a new client object, which in this case would
|
|
||||||
# be a new fake client in place of the bad client.
|
|
||||||
manager._client = get_bad_client(manager)
|
|
||||||
|
|
||||||
# The only implemented function for the fake and bad clients
|
|
||||||
# If we don't raise an uncaught exception, we pass
|
|
||||||
manager.listExtensions()
|
|
||||||
|
|
||||||
# Now let's do it again, but let's prevent the client object from being
|
|
||||||
# replaced and then assert that we raised the exception that we expect.
|
|
||||||
manager._client = get_bad_client(manager)
|
|
||||||
manager._getClient = lambda: get_bad_client(manager)
|
|
||||||
|
|
||||||
with ExpectedException(requests.exceptions.ProxyError):
|
|
||||||
manager.listExtensions()
|
|
||||||
|
|
||||||
def test_leaked_node(self):
|
def test_leaked_node(self):
|
||||||
"""Test that a leaked node is deleted"""
|
"""Test that a leaked node is deleted"""
|
||||||
configfile = self.setup_config('leaked_node.yaml')
|
configfile = self.setup_config('leaked_node.yaml')
|
||||||
@@ -447,7 +411,7 @@ class TestNodepool(tests.DBTestCase):
|
|||||||
# Make sure we have a node built and ready
|
# Make sure we have a node built and ready
|
||||||
provider = pool.config.providers['fake-provider']
|
provider = pool.config.providers['fake-provider']
|
||||||
manager = pool.getProviderManager(provider)
|
manager = pool.getProviderManager(provider)
|
||||||
servers = manager.listServers(cache=False)
|
servers = manager.listServers()
|
||||||
self.assertEqual(len(servers), 1)
|
self.assertEqual(len(servers), 1)
|
||||||
|
|
||||||
with pool.getDB().getSession() as session:
|
with pool.getDB().getSession() as session:
|
||||||
@@ -475,7 +439,7 @@ class TestNodepool(tests.DBTestCase):
|
|||||||
self.log.debug("...done waiting for replacement pool.")
|
self.log.debug("...done waiting for replacement pool.")
|
||||||
|
|
||||||
# Make sure we end up with only one server (the replacement)
|
# Make sure we end up with only one server (the replacement)
|
||||||
servers = manager.listServers(cache=False)
|
servers = manager.listServers()
|
||||||
self.assertEqual(len(servers), 1)
|
self.assertEqual(len(servers), 1)
|
||||||
with pool.getDB().getSession() as session:
|
with pool.getDB().getSession() as session:
|
||||||
nodes = session.getNodes(provider_name='fake-provider',
|
nodes = session.getNodes(provider_name='fake-provider',
|
||||||
|
@@ -10,12 +10,11 @@ statsd>=3.0
|
|||||||
apscheduler>=3.0
|
apscheduler>=3.0
|
||||||
sqlalchemy>=0.8.2,<1.1.0
|
sqlalchemy>=0.8.2,<1.1.0
|
||||||
pyzmq>=13.1.0
|
pyzmq>=13.1.0
|
||||||
python-novaclient>=2.21.0
|
|
||||||
PyMySQL
|
PyMySQL
|
||||||
PrettyTable>=0.6,<0.8
|
PrettyTable>=0.6,<0.8
|
||||||
# shade has a looser requirement on six than nodepool, so install six first
|
# shade has a looser requirement on six than nodepool, so install six first
|
||||||
six>=1.7.0
|
six>=1.7.0
|
||||||
os-client-config>=1.2.0
|
os-client-config>=1.2.0
|
||||||
shade>=0.12.0
|
shade>=1.6.2
|
||||||
diskimage-builder
|
diskimage-builder
|
||||||
voluptuous
|
voluptuous
|
||||||
|
@@ -39,3 +39,14 @@ waitforimage ubuntu-dib
|
|||||||
waitfornode trusty-server
|
waitfornode trusty-server
|
||||||
# check dib image was bootable
|
# check dib image was bootable
|
||||||
waitfornode ubuntu-dib
|
waitfornode ubuntu-dib
|
||||||
|
|
||||||
|
set -o errexit
|
||||||
|
# Show the built nodes
|
||||||
|
$NODEPOOL list
|
||||||
|
|
||||||
|
# Try to delete the nodes that were just built
|
||||||
|
$NODEPOOL delete --now 1
|
||||||
|
$NODEPOOL delete --now 2
|
||||||
|
|
||||||
|
# show the deleted nodes (and their replacements may be building)
|
||||||
|
$NODEPOOL list
|
||||||
|
Reference in New Issue
Block a user