Update nodepool config syntax
This implements the changes described in: http://lists.openstack.org/pipermail/openstack-infra/2017-January/005018.html It also removes some, but not all, extraneous keys from test config files. Change-Id: Iebc941b4505d6ad46c882799b6230eb23545e5c0
This commit is contained in:
parent
c5c5be30f9
commit
dcc3b5e071
@ -172,30 +172,15 @@ cron:
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: centos-7
|
- name: centos-7
|
||||||
image: centos-7
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: devstack
|
|
||||||
- name: fedora-25
|
- name: fedora-25
|
||||||
image: fedora-25
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: devstack
|
|
||||||
- name: ubuntu-precise
|
- name: ubuntu-precise
|
||||||
image: ubuntu-precise
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: devstack
|
|
||||||
- name: ubuntu-trusty
|
- name: ubuntu-trusty
|
||||||
image: ubuntu-trusty
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: devstack
|
|
||||||
- name: ubuntu-xenial
|
- name: ubuntu-xenial
|
||||||
image: ubuntu-xenial
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: devstack
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: devstack
|
- name: devstack
|
||||||
@ -205,29 +190,42 @@ providers:
|
|||||||
# Long boot timeout to deal with potentially nested virt.
|
# Long boot timeout to deal with potentially nested virt.
|
||||||
boot-timeout: 600
|
boot-timeout: 600
|
||||||
launch-timeout: 900
|
launch-timeout: 900
|
||||||
max-servers: 5
|
|
||||||
rate: 0.25
|
rate: 0.25
|
||||||
images:
|
diskimages:
|
||||||
- name: centos-7
|
- name: centos-7
|
||||||
min-ram: 1024
|
|
||||||
name-filter: 'nodepool'
|
|
||||||
config-drive: true
|
config-drive: true
|
||||||
- name: fedora-25
|
- name: fedora-25
|
||||||
min-ram: 1024
|
|
||||||
name-filter: 'nodepool'
|
|
||||||
config-drive: true
|
config-drive: true
|
||||||
- name: ubuntu-precise
|
- name: ubuntu-precise
|
||||||
min-ram: 512
|
|
||||||
name-filter: 'nodepool'
|
|
||||||
config-drive: true
|
config-drive: true
|
||||||
- name: ubuntu-trusty
|
- name: ubuntu-trusty
|
||||||
min-ram: 512
|
|
||||||
name-filter: 'nodepool'
|
|
||||||
config-drive: true
|
config-drive: true
|
||||||
- name: ubuntu-xenial
|
- name: ubuntu-xenial
|
||||||
|
config-drive: true
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
max-servers: 5
|
||||||
|
labels:
|
||||||
|
- name: centos-7
|
||||||
|
diskimage: centos-7
|
||||||
|
min-ram: 1024
|
||||||
|
name-filter: 'nodepool'
|
||||||
|
- name: fedora-25
|
||||||
|
diskimage: fedora-25
|
||||||
|
min-ram: 1024
|
||||||
|
name-filter: 'nodepool'
|
||||||
|
- name: ubuntu-precise
|
||||||
|
diskimage: ubuntu-precise
|
||||||
|
min-ram: 512
|
||||||
|
name-filter: 'nodepool'
|
||||||
|
- name: ubuntu-trusty
|
||||||
|
diskimage: ubuntu-trusty
|
||||||
|
min-ram: 512
|
||||||
|
name-filter: 'nodepool'
|
||||||
|
- name: ubuntu-xenial
|
||||||
|
diskimage: ubuntu-xenial
|
||||||
min-ram: 512
|
min-ram: 512
|
||||||
name-filter: 'nodepool'
|
name-filter: 'nodepool'
|
||||||
config-drive: true
|
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
- name: centos-7
|
- name: centos-7
|
||||||
|
@ -271,7 +271,7 @@ class CleanupWorker(BaseWorker):
|
|||||||
self._deleteUpload(upload)
|
self._deleteUpload(upload)
|
||||||
|
|
||||||
def _cleanupObsoleteProviderUploads(self, provider, image, build_id):
|
def _cleanupObsoleteProviderUploads(self, provider, image, build_id):
|
||||||
image_names_for_provider = provider.images.keys()
|
image_names_for_provider = provider.diskimages.keys()
|
||||||
if image in image_names_for_provider:
|
if image in image_names_for_provider:
|
||||||
# This image is in use for this provider
|
# This image is in use for this provider
|
||||||
return
|
return
|
||||||
@ -849,7 +849,7 @@ class UploadWorker(BaseWorker):
|
|||||||
(build_id, filename, provider.name))
|
(build_id, filename, provider.name))
|
||||||
|
|
||||||
manager = self._config.provider_managers[provider.name]
|
manager = self._config.provider_managers[provider.name]
|
||||||
provider_image = provider.images.get(image_name)
|
provider_image = provider.diskimages.get(image_name)
|
||||||
if provider_image is None:
|
if provider_image is None:
|
||||||
raise exceptions.BuilderInvalidCommandError(
|
raise exceptions.BuilderInvalidCommandError(
|
||||||
"Could not find matching provider image for %s" % image_name
|
"Could not find matching provider image for %s" % image_name
|
||||||
@ -899,7 +899,7 @@ class UploadWorker(BaseWorker):
|
|||||||
to providers, do the upload if they are available on the local disk.
|
to providers, do the upload if they are available on the local disk.
|
||||||
'''
|
'''
|
||||||
for provider in self._config.providers.values():
|
for provider in self._config.providers.values():
|
||||||
for image in provider.images.values():
|
for image in provider.diskimages.values():
|
||||||
uploaded = False
|
uploaded = False
|
||||||
|
|
||||||
# Check if we've been told to shutdown
|
# Check if we've been told to shutdown
|
||||||
@ -931,7 +931,7 @@ class UploadWorker(BaseWorker):
|
|||||||
:returns: True if an upload was attempted, False otherwise.
|
:returns: True if an upload was attempted, False otherwise.
|
||||||
'''
|
'''
|
||||||
# Check if image uploads are paused.
|
# Check if image uploads are paused.
|
||||||
if provider.images.get(image.name).pause:
|
if provider.diskimages.get(image.name).pause:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Search for the most recent 'ready' image build
|
# Search for the most recent 'ready' image build
|
||||||
|
@ -29,16 +29,6 @@ class ConfigValidator:
|
|||||||
'cleanup': str,
|
'cleanup': str,
|
||||||
}
|
}
|
||||||
|
|
||||||
images = {
|
|
||||||
'name': str,
|
|
||||||
'pause': bool,
|
|
||||||
'min-ram': int,
|
|
||||||
'name-filter': str,
|
|
||||||
'diskimage': str,
|
|
||||||
'meta': dict,
|
|
||||||
'config-drive': bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
old_network = {
|
old_network = {
|
||||||
'net-id': str,
|
'net-id': str,
|
||||||
'net-label': str,
|
'net-label': str,
|
||||||
@ -49,38 +39,53 @@ class ConfigValidator:
|
|||||||
'public': bool, # Ignored, but kept for backwards compat
|
'public': bool, # Ignored, but kept for backwards compat
|
||||||
}
|
}
|
||||||
|
|
||||||
providers = {
|
pool_label = {
|
||||||
|
v.Required('name'): str,
|
||||||
|
v.Required('diskimage'): str,
|
||||||
|
'min-ram': int,
|
||||||
|
'name-filter': str,
|
||||||
|
}
|
||||||
|
|
||||||
|
pool = {
|
||||||
|
'name': str,
|
||||||
|
'networks': [v.Any(old_network, network)],
|
||||||
|
'max-servers': int,
|
||||||
|
'labels': [pool_label],
|
||||||
|
'availability-zones': [str],
|
||||||
|
}
|
||||||
|
|
||||||
|
provider_diskimage = {
|
||||||
|
'name': str,
|
||||||
|
'pause': bool,
|
||||||
|
'meta': dict,
|
||||||
|
'config-drive': bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
provider = {
|
||||||
'name': str,
|
'name': str,
|
||||||
'region-name': str,
|
'region-name': str,
|
||||||
'availability-zones': [str],
|
|
||||||
'cloud': str,
|
'cloud': str,
|
||||||
'max-servers': int,
|
|
||||||
'max-concurrency': int,
|
'max-concurrency': int,
|
||||||
'pool': str, # Ignored, but kept for backwards compat
|
|
||||||
'image-type': str,
|
'image-type': str,
|
||||||
'networks': [v.Any(old_network, network)],
|
|
||||||
'ipv6-preferred': bool,
|
'ipv6-preferred': bool,
|
||||||
'boot-timeout': int,
|
'boot-timeout': int,
|
||||||
'api-timeout': int,
|
'api-timeout': int,
|
||||||
'launch-timeout': int,
|
'launch-timeout': int,
|
||||||
'launch-retries': int,
|
'launch-retries': int,
|
||||||
'rate': float,
|
'rate': float,
|
||||||
'images': [images],
|
|
||||||
'hostname-format': str,
|
'hostname-format': str,
|
||||||
'image-name-format': str,
|
'image-name-format': str,
|
||||||
'clean-floating-ips': bool,
|
'clean-floating-ips': bool,
|
||||||
|
'pools': [pool],
|
||||||
|
'diskimages': [provider_diskimage],
|
||||||
}
|
}
|
||||||
|
|
||||||
labels = {
|
label = {
|
||||||
'name': str,
|
'name': str,
|
||||||
'image': str,
|
|
||||||
'min-ready': int,
|
'min-ready': int,
|
||||||
'providers': [{
|
|
||||||
'name': str,
|
|
||||||
}],
|
|
||||||
}
|
}
|
||||||
|
|
||||||
diskimages = {
|
diskimage = {
|
||||||
'name': str,
|
'name': str,
|
||||||
'pause': bool,
|
'pause': bool,
|
||||||
'elements': [str],
|
'elements': [str],
|
||||||
@ -99,9 +104,9 @@ class ConfigValidator:
|
|||||||
'chroot': str,
|
'chroot': str,
|
||||||
}],
|
}],
|
||||||
'cron': cron,
|
'cron': cron,
|
||||||
'providers': [providers],
|
'providers': [provider],
|
||||||
'labels': [labels],
|
'labels': [label],
|
||||||
'diskimages': [diskimages],
|
'diskimages': [diskimage],
|
||||||
}
|
}
|
||||||
|
|
||||||
log.info("validating %s" % self.config_file)
|
log.info("validating %s" % self.config_file)
|
||||||
@ -110,12 +115,3 @@ class ConfigValidator:
|
|||||||
# validate the overall schema
|
# validate the overall schema
|
||||||
schema = v.Schema(top_level)
|
schema = v.Schema(top_level)
|
||||||
schema(config)
|
schema(config)
|
||||||
|
|
||||||
# labels must list valid providers
|
|
||||||
all_providers = [p['name'] for p in config['providers']]
|
|
||||||
for label in config['labels']:
|
|
||||||
for provider in label['providers']:
|
|
||||||
if not provider['name'] in all_providers:
|
|
||||||
raise AssertionError('label %s requests '
|
|
||||||
'non-existent provider %s'
|
|
||||||
% (label['name'], provider['name']))
|
|
||||||
|
@ -221,7 +221,7 @@ class NodePoolCmd(NodepoolApp):
|
|||||||
|
|
||||||
alien_ids = []
|
alien_ids = []
|
||||||
uploads = []
|
uploads = []
|
||||||
for image in provider.images:
|
for image in provider.diskimages:
|
||||||
# Build list of provider images as recorded in ZK
|
# Build list of provider images as recorded in ZK
|
||||||
for bnum in self.zk.getBuildNumbers(image):
|
for bnum in self.zk.getBuildNumbers(image):
|
||||||
uploads.extend(
|
uploads.extend(
|
||||||
|
@ -40,31 +40,18 @@ class Config(ConfigValue):
|
|||||||
class Provider(ConfigValue):
|
class Provider(ConfigValue):
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
if (other.cloud_config != self.cloud_config or
|
if (other.cloud_config != self.cloud_config or
|
||||||
other.max_servers != self.max_servers or
|
other.pools != self.pools or
|
||||||
other.pool != self.pool or
|
|
||||||
other.image_type != self.image_type or
|
other.image_type != self.image_type or
|
||||||
other.rate != self.rate or
|
other.rate != self.rate or
|
||||||
other.api_timeout != self.api_timeout or
|
other.api_timeout != self.api_timeout or
|
||||||
other.boot_timeout != self.boot_timeout or
|
other.boot_timeout != self.boot_timeout or
|
||||||
other.launch_timeout != self.launch_timeout or
|
other.launch_timeout != self.launch_timeout or
|
||||||
other.networks != self.networks or
|
|
||||||
other.ipv6_preferred != self.ipv6_preferred or
|
other.ipv6_preferred != self.ipv6_preferred or
|
||||||
other.clean_floating_ips != self.clean_floating_ips or
|
other.clean_floating_ips != self.clean_floating_ips or
|
||||||
other.max_concurrency != self.max_concurrency or
|
other.max_concurrency != self.max_concurrency or
|
||||||
other.azs != self.azs):
|
other.diskimages != self.diskimages):
|
||||||
return False
|
|
||||||
new_images = other.images
|
|
||||||
old_images = self.images
|
|
||||||
# Check if images have been added or removed
|
|
||||||
if set(new_images.keys()) != set(old_images.keys()):
|
|
||||||
return False
|
|
||||||
# check if existing images have been updated
|
|
||||||
for k in new_images:
|
|
||||||
if (new_images[k].min_ram != old_images[k].min_ram or
|
|
||||||
new_images[k].name_filter != old_images[k].name_filter or
|
|
||||||
new_images[k].meta != old_images[k].meta or
|
|
||||||
new_images[k].config_drive != old_images[k].config_drive):
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def __ne__(self, other):
|
def __ne__(self, other):
|
||||||
@ -74,9 +61,25 @@ class Provider(ConfigValue):
|
|||||||
return "<Provider %s>" % self.name
|
return "<Provider %s>" % self.name
|
||||||
|
|
||||||
|
|
||||||
class ProviderImage(ConfigValue):
|
class ProviderPool(ConfigValue):
|
||||||
|
def __eq__(self, other):
|
||||||
|
if (other.labels != self.labels or
|
||||||
|
other.max_servers != self.max_servers or
|
||||||
|
other.azs != self.azs or
|
||||||
|
other.networks != self.networks):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def __ne__(self, other):
|
||||||
|
return not self.__eq__(other)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<ProviderImage %s>" % self.name
|
return "<ProviderPool %s>" % self.name
|
||||||
|
|
||||||
|
|
||||||
|
class ProviderDiskImage(ConfigValue):
|
||||||
|
def __repr__(self):
|
||||||
|
return "<ProviderDiskImage %s>" % self.name
|
||||||
|
|
||||||
|
|
||||||
class Label(ConfigValue):
|
class Label(ConfigValue):
|
||||||
@ -84,9 +87,19 @@ class Label(ConfigValue):
|
|||||||
return "<Label %s>" % self.name
|
return "<Label %s>" % self.name
|
||||||
|
|
||||||
|
|
||||||
class LabelProvider(ConfigValue):
|
class ProviderLabel(ConfigValue):
|
||||||
|
def __eq__(self, other):
|
||||||
|
if (other.diskimage != self.diskimage or
|
||||||
|
other.min_ram != self.min_ram or
|
||||||
|
other.name_filter != self.name_filter):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def __ne__(self, other):
|
||||||
|
return not self.__eq__(other)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<LabelProvider %s>" % self.name
|
return "<ProviderLabel %s>" % self.name
|
||||||
|
|
||||||
|
|
||||||
class Cron(ConfigValue):
|
class Cron(ConfigValue):
|
||||||
@ -95,6 +108,20 @@ class Cron(ConfigValue):
|
|||||||
|
|
||||||
|
|
||||||
class DiskImage(ConfigValue):
|
class DiskImage(ConfigValue):
|
||||||
|
def __eq__(self, other):
|
||||||
|
if (other.name != self.name or
|
||||||
|
other.elements != self.elements or
|
||||||
|
other.release != self.release or
|
||||||
|
other.rebuild_age != self.rebuild_age or
|
||||||
|
other.env_vars != self.env_vars or
|
||||||
|
other.image_types != self.image_types or
|
||||||
|
other.pause != self.pause):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def __ne__(self, other):
|
||||||
|
return not self.__eq__(other)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<DiskImage %s>" % self.name
|
return "<DiskImage %s>" % self.name
|
||||||
|
|
||||||
@ -154,74 +181,7 @@ def loadConfig(config_path):
|
|||||||
name = z.host + '_' + str(z.port)
|
name = z.host + '_' + str(z.port)
|
||||||
newconfig.zookeeper_servers[name] = z
|
newconfig.zookeeper_servers[name] = z
|
||||||
|
|
||||||
for provider in config.get('providers', []):
|
for diskimage in config.get('diskimages', []):
|
||||||
p = Provider()
|
|
||||||
p.name = provider['name']
|
|
||||||
newconfig.providers[p.name] = p
|
|
||||||
|
|
||||||
cloud_kwargs = _cloudKwargsFromProvider(provider)
|
|
||||||
p.cloud_config = _get_one_cloud(cloud_config, cloud_kwargs)
|
|
||||||
p.region_name = provider.get('region-name')
|
|
||||||
p.max_servers = provider['max-servers']
|
|
||||||
p.max_concurrency = provider.get('max-concurrency', -1)
|
|
||||||
p.pool = provider.get('pool', None)
|
|
||||||
p.rate = provider.get('rate', 1.0)
|
|
||||||
p.api_timeout = provider.get('api-timeout')
|
|
||||||
p.boot_timeout = provider.get('boot-timeout', 60)
|
|
||||||
p.launch_timeout = provider.get('launch-timeout', 3600)
|
|
||||||
p.launch_retries = provider.get('launch-retries', 3)
|
|
||||||
p.networks = []
|
|
||||||
for network in provider.get('networks', []):
|
|
||||||
n = Network()
|
|
||||||
p.networks.append(n)
|
|
||||||
if 'net-id' in network:
|
|
||||||
n.id = network['net-id']
|
|
||||||
n.name = None
|
|
||||||
elif 'net-label' in network:
|
|
||||||
n.name = network['net-label']
|
|
||||||
n.id = None
|
|
||||||
else:
|
|
||||||
n.name = network.get('name')
|
|
||||||
n.id = None
|
|
||||||
p.ipv6_preferred = provider.get('ipv6-preferred')
|
|
||||||
p.clean_floating_ips = provider.get('clean-floating-ips')
|
|
||||||
p.azs = provider.get('availability-zones')
|
|
||||||
p.hostname_format = provider.get(
|
|
||||||
'hostname-format',
|
|
||||||
'{label.name}-{provider.name}-{node.id}'
|
|
||||||
)
|
|
||||||
p.image_name_format = provider.get(
|
|
||||||
'image-name-format',
|
|
||||||
'{image_name}-{timestamp}'
|
|
||||||
)
|
|
||||||
p.image_type = provider.get(
|
|
||||||
'image-type', p.cloud_config.config['image_format'])
|
|
||||||
p.images = {}
|
|
||||||
for image in provider['images']:
|
|
||||||
i = ProviderImage()
|
|
||||||
i.name = image['name']
|
|
||||||
p.images[i.name] = i
|
|
||||||
i.min_ram = image['min-ram']
|
|
||||||
i.name_filter = image.get('name-filter', None)
|
|
||||||
i.pause = bool(image.get('pause', False))
|
|
||||||
i.config_drive = image.get('config-drive', None)
|
|
||||||
|
|
||||||
# This dict is expanded and used as custom properties when
|
|
||||||
# the image is uploaded.
|
|
||||||
i.meta = image.get('meta', {})
|
|
||||||
# 5 elements, and no key or value can be > 255 chars
|
|
||||||
# per Nova API rules
|
|
||||||
if i.meta:
|
|
||||||
if len(i.meta) > 5 or \
|
|
||||||
any([len(k) > 255 or len(v) > 255
|
|
||||||
for k, v in i.meta.iteritems()]):
|
|
||||||
# soft-fail
|
|
||||||
#self.log.error("Invalid metadata for %s; ignored"
|
|
||||||
# % i.name)
|
|
||||||
i.meta = {}
|
|
||||||
|
|
||||||
if 'diskimages' in config:
|
|
||||||
for diskimage in config['diskimages']:
|
|
||||||
d = DiskImage()
|
d = DiskImage()
|
||||||
d.name = diskimage['name']
|
d.name = diskimage['name']
|
||||||
newconfig.diskimages[d.name] = d
|
newconfig.diskimages[d.name] = d
|
||||||
@ -241,23 +201,98 @@ def loadConfig(config_path):
|
|||||||
d.env_vars = {}
|
d.env_vars = {}
|
||||||
d.image_types = set(diskimage.get('formats', []))
|
d.image_types = set(diskimage.get('formats', []))
|
||||||
d.pause = bool(diskimage.get('pause', False))
|
d.pause = bool(diskimage.get('pause', False))
|
||||||
# Do this after providers to build the image-types
|
|
||||||
for provider in newconfig.providers.values():
|
|
||||||
for image in provider.images.values():
|
|
||||||
diskimage = newconfig.diskimages[image.name]
|
|
||||||
diskimage.image_types.add(provider.image_type)
|
|
||||||
|
|
||||||
for label in config.get('labels', []):
|
for label in config.get('labels', []):
|
||||||
l = Label()
|
l = Label()
|
||||||
l.name = label['name']
|
l.name = label['name']
|
||||||
newconfig.labels[l.name] = l
|
newconfig.labels[l.name] = l
|
||||||
l.image = label['image']
|
|
||||||
l.min_ready = label.get('min-ready', 2)
|
l.min_ready = label.get('min-ready', 2)
|
||||||
l.providers = {}
|
l.pools = []
|
||||||
for provider in label['providers']:
|
|
||||||
p = LabelProvider()
|
for provider in config.get('providers', []):
|
||||||
|
p = Provider()
|
||||||
p.name = provider['name']
|
p.name = provider['name']
|
||||||
l.providers[p.name] = p
|
newconfig.providers[p.name] = p
|
||||||
|
|
||||||
|
cloud_kwargs = _cloudKwargsFromProvider(provider)
|
||||||
|
p.cloud_config = _get_one_cloud(cloud_config, cloud_kwargs)
|
||||||
|
p.region_name = provider.get('region-name')
|
||||||
|
p.max_concurrency = provider.get('max-concurrency', -1)
|
||||||
|
p.rate = provider.get('rate', 1.0)
|
||||||
|
p.api_timeout = provider.get('api-timeout')
|
||||||
|
p.boot_timeout = provider.get('boot-timeout', 60)
|
||||||
|
p.launch_timeout = provider.get('launch-timeout', 3600)
|
||||||
|
p.launch_retries = provider.get('launch-retries', 3)
|
||||||
|
p.ipv6_preferred = provider.get('ipv6-preferred')
|
||||||
|
p.clean_floating_ips = provider.get('clean-floating-ips')
|
||||||
|
p.hostname_format = provider.get(
|
||||||
|
'hostname-format',
|
||||||
|
'{label.name}-{provider.name}-{node.id}'
|
||||||
|
)
|
||||||
|
p.image_name_format = provider.get(
|
||||||
|
'image-name-format',
|
||||||
|
'{image_name}-{timestamp}'
|
||||||
|
)
|
||||||
|
p.image_type = provider.get(
|
||||||
|
'image-type', p.cloud_config.config['image_format'])
|
||||||
|
p.diskimages = {}
|
||||||
|
for image in provider.get('diskimages', []):
|
||||||
|
i = ProviderDiskImage()
|
||||||
|
i.name = image['name']
|
||||||
|
p.diskimages[i.name] = i
|
||||||
|
diskimage = newconfig.diskimages[i.name]
|
||||||
|
diskimage.image_types.add(p.image_type)
|
||||||
|
#i.min_ram = image['min-ram']
|
||||||
|
#i.name_filter = image.get('name-filter', None)
|
||||||
|
i.pause = bool(image.get('pause', False))
|
||||||
|
i.config_drive = image.get('config-drive', None)
|
||||||
|
|
||||||
|
# This dict is expanded and used as custom properties when
|
||||||
|
# the image is uploaded.
|
||||||
|
i.meta = image.get('meta', {})
|
||||||
|
# 5 elements, and no key or value can be > 255 chars
|
||||||
|
# per Nova API rules
|
||||||
|
if i.meta:
|
||||||
|
if len(i.meta) > 5 or \
|
||||||
|
any([len(k) > 255 or len(v) > 255
|
||||||
|
for k, v in i.meta.iteritems()]):
|
||||||
|
# soft-fail
|
||||||
|
#self.log.error("Invalid metadata for %s; ignored"
|
||||||
|
# % i.name)
|
||||||
|
i.meta = {}
|
||||||
|
p.pools = {}
|
||||||
|
for pool in provider.get('pools', []):
|
||||||
|
pp = ProviderPool()
|
||||||
|
pp.name = pool['name']
|
||||||
|
pp.provider = p
|
||||||
|
p.pools[pp.name] = pp
|
||||||
|
pp.max_servers = pool['max-servers']
|
||||||
|
pp.azs = pool.get('availability-zones')
|
||||||
|
pp.networks = []
|
||||||
|
for network in pool.get('networks', []):
|
||||||
|
n = Network()
|
||||||
|
pp.networks.append(n)
|
||||||
|
if 'net-id' in network:
|
||||||
|
n.id = network['net-id']
|
||||||
|
n.name = None
|
||||||
|
elif 'net-label' in network:
|
||||||
|
n.name = network['net-label']
|
||||||
|
n.id = None
|
||||||
|
else:
|
||||||
|
n.name = network.get('name')
|
||||||
|
n.id = None
|
||||||
|
pp.labels = {}
|
||||||
|
for label in pool.get('labels', []):
|
||||||
|
pl = ProviderLabel()
|
||||||
|
pl.name = label['name']
|
||||||
|
pl.pool = pp
|
||||||
|
pp.labels[pl.name] = pl
|
||||||
|
pl.diskimage = newconfig.diskimages[label['diskimage']]
|
||||||
|
pl.min_ram = label['min-ram']
|
||||||
|
pl.name_filter = label.get('name-filter', None)
|
||||||
|
|
||||||
|
top_label = newconfig.labels[pl.name]
|
||||||
|
top_label.pools.append(pp)
|
||||||
|
|
||||||
return newconfig
|
return newconfig
|
||||||
|
|
||||||
|
@ -65,6 +65,7 @@ class Dummy(object):
|
|||||||
|
|
||||||
def fake_get_one_cloud(cloud_config, cloud_kwargs):
|
def fake_get_one_cloud(cloud_config, cloud_kwargs):
|
||||||
cloud_kwargs['validate'] = False
|
cloud_kwargs['validate'] = False
|
||||||
|
if 'image_format' not in cloud_kwargs:
|
||||||
cloud_kwargs['image_format'] = 'qcow2'
|
cloud_kwargs['image_format'] = 'qcow2'
|
||||||
return cloud_config.get_one_cloud(**cloud_kwargs)
|
return cloud_config.get_one_cloud(**cloud_kwargs)
|
||||||
|
|
||||||
|
@ -229,14 +229,13 @@ class InstanceDeleter(threading.Thread, StatsReporter):
|
|||||||
|
|
||||||
class NodeLauncher(threading.Thread, StatsReporter):
|
class NodeLauncher(threading.Thread, StatsReporter):
|
||||||
|
|
||||||
def __init__(self, zk, provider, label, provider_manager, requestor,
|
def __init__(self, zk, provider_label, provider_manager, requestor,
|
||||||
node, retries):
|
node, retries):
|
||||||
'''
|
'''
|
||||||
Initialize the launcher.
|
Initialize the launcher.
|
||||||
|
|
||||||
:param ZooKeeper zk: A ZooKeeper object.
|
:param ZooKeeper zk: A ZooKeeper object.
|
||||||
:param Provider provider: A config Provider object.
|
:param ProviderLabel provider: A config ProviderLabel object.
|
||||||
:param Label label: The Label object for this node type.
|
|
||||||
:param ProviderManager provider_manager: The manager object used to
|
:param ProviderManager provider_manager: The manager object used to
|
||||||
interact with the selected provider.
|
interact with the selected provider.
|
||||||
:param str requestor: Identifier for the request originator.
|
:param str requestor: Identifier for the request originator.
|
||||||
@ -247,26 +246,24 @@ class NodeLauncher(threading.Thread, StatsReporter):
|
|||||||
StatsReporter.__init__(self)
|
StatsReporter.__init__(self)
|
||||||
self.log = logging.getLogger("nodepool.NodeLauncher-%s" % node.id)
|
self.log = logging.getLogger("nodepool.NodeLauncher-%s" % node.id)
|
||||||
self._zk = zk
|
self._zk = zk
|
||||||
self._provider = provider
|
self._label = provider_label
|
||||||
self._label = label
|
|
||||||
self._manager = provider_manager
|
self._manager = provider_manager
|
||||||
self._node = node
|
self._node = node
|
||||||
self._retries = retries
|
self._retries = retries
|
||||||
self._image_name = None
|
self._image_name = None
|
||||||
self._requestor = requestor
|
self._requestor = requestor
|
||||||
|
|
||||||
|
self._pool = self._label.pool
|
||||||
|
self._provider = self._pool.provider
|
||||||
|
self._diskimage = self._provider.diskimages[self._label.diskimage.name]
|
||||||
|
|
||||||
def _launchNode(self):
|
def _launchNode(self):
|
||||||
config_image = self._provider.images[self._label.image]
|
|
||||||
|
|
||||||
# Stored for statsd reporting
|
|
||||||
self._image_name = config_image.name
|
|
||||||
|
|
||||||
cloud_image = self._zk.getMostRecentImageUpload(
|
cloud_image = self._zk.getMostRecentImageUpload(
|
||||||
config_image.name, self._provider.name)
|
self._diskimage.name, self._provider.name)
|
||||||
if not cloud_image:
|
if not cloud_image:
|
||||||
raise LaunchNodepoolException(
|
raise LaunchNodepoolException(
|
||||||
"Unable to find current cloud image %s in %s" %
|
"Unable to find current cloud image %s in %s" %
|
||||||
(config_image.name, self._provider.name)
|
(self._diskimage.name, self._provider.name)
|
||||||
)
|
)
|
||||||
|
|
||||||
hostname = self._provider.hostname_format.format(
|
hostname = self._provider.hostname_format.format(
|
||||||
@ -275,7 +272,8 @@ class NodeLauncher(threading.Thread, StatsReporter):
|
|||||||
|
|
||||||
self.log.info("Creating server with hostname %s in %s from image %s "
|
self.log.info("Creating server with hostname %s in %s from image %s "
|
||||||
"for node id: %s" % (hostname, self._provider.name,
|
"for node id: %s" % (hostname, self._provider.name,
|
||||||
config_image.name, self._node.id))
|
self._diskimage.name,
|
||||||
|
self._node.id))
|
||||||
|
|
||||||
# NOTE: We store the node ID in the server metadata to use for leaked
|
# NOTE: We store the node ID in the server metadata to use for leaked
|
||||||
# instance detection. We cannot use the external server ID for this
|
# instance detection. We cannot use the external server ID for this
|
||||||
@ -284,13 +282,14 @@ class NodeLauncher(threading.Thread, StatsReporter):
|
|||||||
|
|
||||||
server = self._manager.createServer(
|
server = self._manager.createServer(
|
||||||
hostname,
|
hostname,
|
||||||
config_image.min_ram,
|
self._label.min_ram,
|
||||||
cloud_image.external_id,
|
cloud_image.external_id,
|
||||||
name_filter=config_image.name_filter,
|
name_filter=self._label.name_filter,
|
||||||
az=self._node.az,
|
az=self._node.az,
|
||||||
config_drive=config_image.config_drive,
|
config_drive=self._diskimage.config_drive,
|
||||||
nodepool_node_id=self._node.id,
|
nodepool_node_id=self._node.id,
|
||||||
nodepool_image_name=config_image.name)
|
nodepool_image_name=self._diskimage.name,
|
||||||
|
networks=self._pool.networks)
|
||||||
|
|
||||||
self._node.external_id = server.id
|
self._node.external_id = server.id
|
||||||
self._node.hostname = hostname
|
self._node.hostname = hostname
|
||||||
@ -417,14 +416,13 @@ class NodeLaunchManager(object):
|
|||||||
'''
|
'''
|
||||||
Handle launching multiple nodes in parallel.
|
Handle launching multiple nodes in parallel.
|
||||||
'''
|
'''
|
||||||
def __init__(self, zk, provider, labels, provider_manager,
|
def __init__(self, zk, pool, provider_manager,
|
||||||
requestor, retries):
|
requestor, retries):
|
||||||
'''
|
'''
|
||||||
Initialize the launch manager.
|
Initialize the launch manager.
|
||||||
|
|
||||||
:param ZooKeeper zk: A ZooKeeper object.
|
:param ZooKeeper zk: A ZooKeeper object.
|
||||||
:param Provider provider: A config Provider object.
|
:param ProviderPool pool: A config ProviderPool object.
|
||||||
:param dict labels: A dict of config Label objects.
|
|
||||||
:param ProviderManager provider_manager: The manager object used to
|
:param ProviderManager provider_manager: The manager object used to
|
||||||
interact with the selected provider.
|
interact with the selected provider.
|
||||||
:param str requestor: Identifier for the request originator.
|
:param str requestor: Identifier for the request originator.
|
||||||
@ -436,8 +434,7 @@ class NodeLaunchManager(object):
|
|||||||
self._ready_nodes = []
|
self._ready_nodes = []
|
||||||
self._threads = []
|
self._threads = []
|
||||||
self._zk = zk
|
self._zk = zk
|
||||||
self._provider = provider
|
self._pool = pool
|
||||||
self._labels = labels
|
|
||||||
self._manager = provider_manager
|
self._manager = provider_manager
|
||||||
self._requestor = requestor
|
self._requestor = requestor
|
||||||
|
|
||||||
@ -468,8 +465,8 @@ class NodeLaunchManager(object):
|
|||||||
:param Node node: The node object.
|
:param Node node: The node object.
|
||||||
'''
|
'''
|
||||||
self._nodes.append(node)
|
self._nodes.append(node)
|
||||||
label = self._labels[node.type]
|
provider_label = self._pool.labels[node.type]
|
||||||
t = NodeLauncher(self._zk, self._provider, label, self._manager,
|
t = NodeLauncher(self._zk, provider_label, self._manager,
|
||||||
self._requestor, node, self._retries)
|
self._requestor, node, self._retries)
|
||||||
t.start()
|
t.start()
|
||||||
self._threads.append(t)
|
self._threads.append(t)
|
||||||
@ -508,13 +505,13 @@ class NodeRequestHandler(object):
|
|||||||
'''
|
'''
|
||||||
Class to process a single node request.
|
Class to process a single node request.
|
||||||
|
|
||||||
The ProviderWorker thread will instantiate a class of this type for each
|
The PoolWorker thread will instantiate a class of this type for each
|
||||||
node request that it pulls from ZooKeeper.
|
node request that it pulls from ZooKeeper.
|
||||||
'''
|
'''
|
||||||
|
|
||||||
def __init__(self, pw, request):
|
def __init__(self, pw, request):
|
||||||
'''
|
'''
|
||||||
:param ProviderWorker pw: The parent ProviderWorker object.
|
:param PoolWorker pw: The parent PoolWorker object.
|
||||||
:param NodeRequest request: The request to handle.
|
:param NodeRequest request: The request to handle.
|
||||||
'''
|
'''
|
||||||
self.log = logging.getLogger("nodepool.NodeRequestHandler")
|
self.log = logging.getLogger("nodepool.NodeRequestHandler")
|
||||||
@ -526,16 +523,16 @@ class NodeRequestHandler(object):
|
|||||||
self.chosen_az = None
|
self.chosen_az = None
|
||||||
self.paused = False
|
self.paused = False
|
||||||
|
|
||||||
def _setFromProviderWorker(self):
|
def _setFromPoolWorker(self):
|
||||||
'''
|
'''
|
||||||
Set values that we pull from the parent ProviderWorker.
|
Set values that we pull from the parent PoolWorker.
|
||||||
|
|
||||||
We don't do this in __init__ because this class is re-entrant and we
|
We don't do this in __init__ because this class is re-entrant and we
|
||||||
want the updated values.
|
want the updated values.
|
||||||
'''
|
'''
|
||||||
self.provider = self.pw.getProviderConfig()
|
self.provider = self.pw.getProviderConfig()
|
||||||
|
self.pool = self.pw.getPoolConfig()
|
||||||
self.zk = self.pw.getZK()
|
self.zk = self.pw.getZK()
|
||||||
self.labels = self.pw.getLabelsConfig()
|
|
||||||
self.manager = self.pw.getProviderManager()
|
self.manager = self.pw.getProviderManager()
|
||||||
self.launcher_id = self.pw.launcher_id
|
self.launcher_id = self.pw.launcher_id
|
||||||
|
|
||||||
@ -549,11 +546,7 @@ class NodeRequestHandler(object):
|
|||||||
:returns: True if it is available, False otherwise.
|
:returns: True if it is available, False otherwise.
|
||||||
'''
|
'''
|
||||||
for label in self.request.node_types:
|
for label in self.request.node_types:
|
||||||
try:
|
img = self.pool.labels[label].diskimage.name
|
||||||
img = self.labels[label].image
|
|
||||||
except KeyError:
|
|
||||||
self.log.error("Node type %s not a defined label", label)
|
|
||||||
return False
|
|
||||||
|
|
||||||
if not self.zk.getMostRecentImageUpload(img, self.provider.name):
|
if not self.zk.getMostRecentImageUpload(img, self.provider.name):
|
||||||
return False
|
return False
|
||||||
@ -568,11 +561,7 @@ class NodeRequestHandler(object):
|
|||||||
'''
|
'''
|
||||||
invalid = []
|
invalid = []
|
||||||
for ntype in self.request.node_types:
|
for ntype in self.request.node_types:
|
||||||
if ntype not in self.labels:
|
if ntype not in self.pool.labels:
|
||||||
invalid.append(ntype)
|
|
||||||
else:
|
|
||||||
label = self.labels[ntype]
|
|
||||||
if self.provider.name not in label.providers.keys():
|
|
||||||
invalid.append(ntype)
|
invalid.append(ntype)
|
||||||
return invalid
|
return invalid
|
||||||
|
|
||||||
@ -584,7 +573,8 @@ class NodeRequestHandler(object):
|
|||||||
'''
|
'''
|
||||||
count = 0
|
count = 0
|
||||||
for node in self.zk.nodeIterator():
|
for node in self.zk.nodeIterator():
|
||||||
if node.provider == self.provider.name:
|
if (node.provider == self.provider.name and
|
||||||
|
node.pool == self.pool.name):
|
||||||
count += 1
|
count += 1
|
||||||
return count
|
return count
|
||||||
|
|
||||||
@ -614,7 +604,7 @@ class NodeRequestHandler(object):
|
|||||||
'''
|
'''
|
||||||
if not self.launch_manager:
|
if not self.launch_manager:
|
||||||
self.launch_manager = NodeLaunchManager(
|
self.launch_manager = NodeLaunchManager(
|
||||||
self.zk, self.provider, self.labels, self.manager,
|
self.zk, self.pool, self.manager,
|
||||||
self.request.requestor, retries=self.provider.launch_retries)
|
self.request.requestor, retries=self.provider.launch_retries)
|
||||||
|
|
||||||
# Since this code can be called more than once for the same request,
|
# Since this code can be called more than once for the same request,
|
||||||
@ -633,10 +623,12 @@ class NodeRequestHandler(object):
|
|||||||
got_a_node = False
|
got_a_node = False
|
||||||
if self.request.reuse and ntype in ready_nodes:
|
if self.request.reuse and ntype in ready_nodes:
|
||||||
for node in ready_nodes[ntype]:
|
for node in ready_nodes[ntype]:
|
||||||
# Only interested in nodes from this provider and within
|
# Only interested in nodes from this provider and
|
||||||
# the selected AZ.
|
# pool, and within the selected AZ.
|
||||||
if node.provider != self.provider.name:
|
if node.provider != self.provider.name:
|
||||||
continue
|
continue
|
||||||
|
if node.pool != self.pool.name:
|
||||||
|
continue
|
||||||
if self.chosen_az and node.az != self.chosen_az:
|
if self.chosen_az and node.az != self.chosen_az:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -669,12 +661,12 @@ class NodeRequestHandler(object):
|
|||||||
if not got_a_node:
|
if not got_a_node:
|
||||||
# Select grouping AZ if we didn't set AZ from a selected,
|
# Select grouping AZ if we didn't set AZ from a selected,
|
||||||
# pre-existing node
|
# pre-existing node
|
||||||
if not self.chosen_az and self.provider.azs:
|
if not self.chosen_az and self.pool.azs:
|
||||||
self.chosen_az = random.choice(self.provider.azs)
|
self.chosen_az = random.choice(self.pool.azs)
|
||||||
|
|
||||||
# If we calculate that we're at capacity, pause until nodes
|
# If we calculate that we're at capacity, pause until nodes
|
||||||
# are released by Zuul and removed by the DeletedNodeWorker.
|
# are released by Zuul and removed by the DeletedNodeWorker.
|
||||||
if self._countNodes() >= self.provider.max_servers:
|
if self._countNodes() >= self.pool.max_servers:
|
||||||
if not self.paused:
|
if not self.paused:
|
||||||
self.log.debug(
|
self.log.debug(
|
||||||
"Pausing request handling to satisfy request %s",
|
"Pausing request handling to satisfy request %s",
|
||||||
@ -690,6 +682,7 @@ class NodeRequestHandler(object):
|
|||||||
node.state = zk.INIT
|
node.state = zk.INIT
|
||||||
node.type = ntype
|
node.type = ntype
|
||||||
node.provider = self.provider.name
|
node.provider = self.provider.name
|
||||||
|
node.pool = self.pool.name
|
||||||
node.az = self.chosen_az
|
node.az = self.chosen_az
|
||||||
node.launcher = self.launcher_id
|
node.launcher = self.launcher_id
|
||||||
node.allocated_to = self.request.id
|
node.allocated_to = self.request.id
|
||||||
@ -714,17 +707,17 @@ class NodeRequestHandler(object):
|
|||||||
'''
|
'''
|
||||||
Main body for the NodeRequestHandler.
|
Main body for the NodeRequestHandler.
|
||||||
'''
|
'''
|
||||||
self._setFromProviderWorker()
|
self._setFromPoolWorker()
|
||||||
|
|
||||||
declined_reasons = []
|
declined_reasons = []
|
||||||
if not self._imagesAvailable():
|
|
||||||
declined_reasons.append('images are not available')
|
|
||||||
if len(self.request.node_types) > self.provider.max_servers:
|
|
||||||
declined_reasons.append('it would exceed quota')
|
|
||||||
invalid_types = self._invalidNodeTypes()
|
invalid_types = self._invalidNodeTypes()
|
||||||
if invalid_types:
|
if invalid_types:
|
||||||
declined_reasons.append('node type(s) [%s] not available' %
|
declined_reasons.append('node type(s) [%s] not available' %
|
||||||
','.join(invalid_types))
|
','.join(invalid_types))
|
||||||
|
elif not self._imagesAvailable():
|
||||||
|
declined_reasons.append('images are not available')
|
||||||
|
if len(self.request.node_types) > self.pool.max_servers:
|
||||||
|
declined_reasons.append('it would exceed quota')
|
||||||
|
|
||||||
if declined_reasons:
|
if declined_reasons:
|
||||||
self.log.debug("Declining node request %s because %s",
|
self.log.debug("Declining node request %s because %s",
|
||||||
@ -753,6 +746,8 @@ class NodeRequestHandler(object):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def alive_thread_count(self):
|
def alive_thread_count(self):
|
||||||
|
if not self.launch_manager:
|
||||||
|
return 0
|
||||||
return self.launch_manager.alive_thread_count
|
return self.launch_manager.alive_thread_count
|
||||||
|
|
||||||
#----------------------------------------------------------------
|
#----------------------------------------------------------------
|
||||||
@ -858,23 +853,25 @@ class NodeRequestHandler(object):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
class ProviderWorker(threading.Thread):
|
class PoolWorker(threading.Thread):
|
||||||
'''
|
'''
|
||||||
Class that manages node requests for a single provider.
|
Class that manages node requests for a single provider pool.
|
||||||
|
|
||||||
The NodePool thread will instantiate a class of this type for each
|
The NodePool thread will instantiate a class of this type for each
|
||||||
provider found in the nodepool configuration file. If the provider to
|
provider pool found in the nodepool configuration file. If the
|
||||||
which this thread is assigned is removed from the configuration file, then
|
pool or provider to which this thread is assigned is removed from
|
||||||
that will be recognized and this thread will shut itself down.
|
the configuration file, then that will be recognized and this
|
||||||
|
thread will shut itself down.
|
||||||
'''
|
'''
|
||||||
|
|
||||||
def __init__(self, nodepool, provider_name):
|
def __init__(self, nodepool, provider_name, pool_name):
|
||||||
threading.Thread.__init__(
|
threading.Thread.__init__(
|
||||||
self, name='ProviderWorker.%s' % provider_name
|
self, name='PoolWorker.%s-%s' % (provider_name, pool_name)
|
||||||
)
|
)
|
||||||
self.log = logging.getLogger("nodepool.%s" % self.name)
|
self.log = logging.getLogger("nodepool.%s" % self.name)
|
||||||
self.nodepool = nodepool
|
self.nodepool = nodepool
|
||||||
self.provider_name = provider_name
|
self.provider_name = provider_name
|
||||||
|
self.pool_name = pool_name
|
||||||
self.running = False
|
self.running = False
|
||||||
self.paused_handler = None
|
self.paused_handler = None
|
||||||
self.request_handlers = []
|
self.request_handlers = []
|
||||||
@ -888,19 +885,6 @@ class ProviderWorker(threading.Thread):
|
|||||||
# Private methods
|
# Private methods
|
||||||
#----------------------------------------------------------------
|
#----------------------------------------------------------------
|
||||||
|
|
||||||
def _activeThreads(self):
|
|
||||||
'''
|
|
||||||
Return the number of alive threads in use by this provider.
|
|
||||||
|
|
||||||
This is an approximate, top-end number for alive threads, since some
|
|
||||||
threads obviously may have finished by the time we finish the
|
|
||||||
calculation.
|
|
||||||
'''
|
|
||||||
total = 0
|
|
||||||
for r in self.request_handlers:
|
|
||||||
total += r.alive_thread_count
|
|
||||||
return total
|
|
||||||
|
|
||||||
def _assignHandlers(self):
|
def _assignHandlers(self):
|
||||||
'''
|
'''
|
||||||
For each request we can grab, create a NodeRequestHandler for it.
|
For each request we can grab, create a NodeRequestHandler for it.
|
||||||
@ -917,9 +901,15 @@ class ProviderWorker(threading.Thread):
|
|||||||
if self.paused_handler:
|
if self.paused_handler:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# Get active threads for all pools for this provider
|
||||||
|
active_threads = sum([
|
||||||
|
w.activeThreads() for
|
||||||
|
w in self.nodepool.getPoolWorkers(self.provider_name)
|
||||||
|
])
|
||||||
|
|
||||||
# Short-circuit for limited request handling
|
# Short-circuit for limited request handling
|
||||||
if (provider.max_concurrency > 0
|
if (provider.max_concurrency > 0 and
|
||||||
and self._activeThreads() >= provider.max_concurrency
|
active_threads >= provider.max_concurrency
|
||||||
):
|
):
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -968,18 +958,31 @@ class ProviderWorker(threading.Thread):
|
|||||||
# Public methods
|
# Public methods
|
||||||
#----------------------------------------------------------------
|
#----------------------------------------------------------------
|
||||||
|
|
||||||
|
def activeThreads(self):
|
||||||
|
'''
|
||||||
|
Return the number of alive threads in use by this provider.
|
||||||
|
|
||||||
|
This is an approximate, top-end number for alive threads, since some
|
||||||
|
threads obviously may have finished by the time we finish the
|
||||||
|
calculation.
|
||||||
|
'''
|
||||||
|
total = 0
|
||||||
|
for r in self.request_handlers:
|
||||||
|
total += r.alive_thread_count
|
||||||
|
return total
|
||||||
|
|
||||||
def getZK(self):
|
def getZK(self):
|
||||||
return self.nodepool.getZK()
|
return self.nodepool.getZK()
|
||||||
|
|
||||||
def getProviderConfig(self):
|
def getProviderConfig(self):
|
||||||
return self.nodepool.config.providers[self.provider_name]
|
return self.nodepool.config.providers[self.provider_name]
|
||||||
|
|
||||||
|
def getPoolConfig(self):
|
||||||
|
return self.getProviderConfig().pools[self.pool_name]
|
||||||
|
|
||||||
def getProviderManager(self):
|
def getProviderManager(self):
|
||||||
return self.nodepool.getProviderManager(self.provider_name)
|
return self.nodepool.getProviderManager(self.provider_name)
|
||||||
|
|
||||||
def getLabelsConfig(self):
|
|
||||||
return self.nodepool.config.labels
|
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
self.running = True
|
self.running = True
|
||||||
|
|
||||||
@ -1005,7 +1008,7 @@ class ProviderWorker(threading.Thread):
|
|||||||
|
|
||||||
self._removeCompletedHandlers()
|
self._removeCompletedHandlers()
|
||||||
except Exception:
|
except Exception:
|
||||||
self.log.exception("Error in ProviderWorker:")
|
self.log.exception("Error in PoolWorker:")
|
||||||
time.sleep(self.watermark_sleep)
|
time.sleep(self.watermark_sleep)
|
||||||
|
|
||||||
# Cleanup on exit
|
# Cleanup on exit
|
||||||
@ -1014,7 +1017,7 @@ class ProviderWorker(threading.Thread):
|
|||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
'''
|
'''
|
||||||
Shutdown the ProviderWorker thread.
|
Shutdown the PoolWorker thread.
|
||||||
|
|
||||||
Do not wait for the request handlers to finish. Any nodes
|
Do not wait for the request handlers to finish. Any nodes
|
||||||
that are in the process of launching will be cleaned up on a
|
that are in the process of launching will be cleaned up on a
|
||||||
@ -1293,7 +1296,7 @@ class NodePool(threading.Thread):
|
|||||||
self.config = None
|
self.config = None
|
||||||
self.zk = None
|
self.zk = None
|
||||||
self.statsd = stats.get_client()
|
self.statsd = stats.get_client()
|
||||||
self._provider_threads = {}
|
self._pool_threads = {}
|
||||||
self._cleanup_thread = None
|
self._cleanup_thread = None
|
||||||
self._delete_thread = None
|
self._delete_thread = None
|
||||||
self._wake_condition = threading.Condition()
|
self._wake_condition = threading.Condition()
|
||||||
@ -1315,10 +1318,10 @@ class NodePool(threading.Thread):
|
|||||||
self._delete_thread.stop()
|
self._delete_thread.stop()
|
||||||
self._delete_thread.join()
|
self._delete_thread.join()
|
||||||
|
|
||||||
# Don't let stop() return until all provider threads have been
|
# Don't let stop() return until all pool threads have been
|
||||||
# terminated.
|
# terminated.
|
||||||
self.log.debug("Stopping provider threads")
|
self.log.debug("Stopping pool threads")
|
||||||
for thd in self._provider_threads.values():
|
for thd in self._pool_threads.values():
|
||||||
if thd.isAlive():
|
if thd.isAlive():
|
||||||
thd.stop()
|
thd.stop()
|
||||||
self.log.debug("Waiting for %s" % thd.name)
|
self.log.debug("Waiting for %s" % thd.name)
|
||||||
@ -1361,6 +1364,10 @@ class NodePool(threading.Thread):
|
|||||||
def getProviderManager(self, provider_name):
|
def getProviderManager(self, provider_name):
|
||||||
return self.config.provider_managers[provider_name]
|
return self.config.provider_managers[provider_name]
|
||||||
|
|
||||||
|
def getPoolWorkers(self, provider_name):
|
||||||
|
return [t for t in self._pool_threads.values() if
|
||||||
|
t.provider_name == provider_name]
|
||||||
|
|
||||||
def updateConfig(self):
|
def updateConfig(self):
|
||||||
config = self.loadConfig()
|
config = self.loadConfig()
|
||||||
provider_manager.ProviderManager.reconfigure(self.config, config)
|
provider_manager.ProviderManager.reconfigure(self.config, config)
|
||||||
@ -1416,6 +1423,13 @@ class NodePool(threading.Thread):
|
|||||||
:returns: True if image associated with the label is uploaded and
|
:returns: True if image associated with the label is uploaded and
|
||||||
ready in at least one provider. False otherwise.
|
ready in at least one provider. False otherwise.
|
||||||
'''
|
'''
|
||||||
|
for pool in label.pools:
|
||||||
|
for pool_label in pool.labels.values():
|
||||||
|
if self.zk.getMostRecentImageUpload(pool_label.diskimage.name,
|
||||||
|
pool.provider.name):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
for provider_name in label.providers.keys():
|
for provider_name in label.providers.keys():
|
||||||
if self.zk.getMostRecentImageUpload(label.image, provider_name):
|
if self.zk.getMostRecentImageUpload(label.image, provider_name):
|
||||||
return True
|
return True
|
||||||
@ -1500,27 +1514,34 @@ class NodePool(threading.Thread):
|
|||||||
self, self.delete_interval)
|
self, self.delete_interval)
|
||||||
self._delete_thread.start()
|
self._delete_thread.start()
|
||||||
|
|
||||||
# Stop any ProviderWorker threads if the provider was removed
|
# Stop any PoolWorker threads if the pool was removed
|
||||||
# from the config.
|
# from the config.
|
||||||
for provider_name in self._provider_threads.keys():
|
pool_keys = set()
|
||||||
if provider_name not in self.config.providers.keys():
|
for provider in self.config.providers.values():
|
||||||
self._provider_threads[provider_name].stop()
|
for pool in provider.pools.values():
|
||||||
|
pool_keys.add(provider.name + '-' + pool.name)
|
||||||
|
|
||||||
|
for key in self._pool_threads.keys():
|
||||||
|
if key not in pool_keys:
|
||||||
|
self._pool_threads[key].stop()
|
||||||
|
|
||||||
# Start (or restart) provider threads for each provider in
|
# Start (or restart) provider threads for each provider in
|
||||||
# the config. Removing a provider from the config and then
|
# the config. Removing a provider from the config and then
|
||||||
# adding it back would cause a restart.
|
# adding it back would cause a restart.
|
||||||
for p in self.config.providers.values():
|
for provider in self.config.providers.values():
|
||||||
if p.name not in self._provider_threads.keys():
|
for pool in provider.pools.values():
|
||||||
t = ProviderWorker(self, p.name)
|
key = provider.name + '-' + pool.name
|
||||||
|
if key not in self._pool_threads.keys():
|
||||||
|
t = PoolWorker(self, provider.name, pool.name)
|
||||||
self.log.info( "Starting %s" % t.name)
|
self.log.info( "Starting %s" % t.name)
|
||||||
t.start()
|
t.start()
|
||||||
self._provider_threads[p.name] = t
|
self._pool_threads[key] = t
|
||||||
elif not self._provider_threads[p.name].isAlive():
|
elif not self._pool_threads[key].isAlive():
|
||||||
self._provider_threads[p.name].join()
|
self._pool_threads[key].join()
|
||||||
t = ProviderWorker(self, p.name)
|
t = PoolWorker(self, provider.name, pool.name)
|
||||||
self.log.info( "Restarting %s" % t.name)
|
self.log.info( "Restarting %s" % t.name)
|
||||||
t.start()
|
t.start()
|
||||||
self._provider_threads[p.name] = t
|
self._pool_threads[key] = t
|
||||||
except Exception:
|
except Exception:
|
||||||
self.log.exception("Exception in main loop:")
|
self.log.exception("Exception in main loop:")
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ class NotFound(Exception):
|
|||||||
|
|
||||||
|
|
||||||
def get_provider_manager(provider, use_taskmanager):
|
def get_provider_manager(provider, use_taskmanager):
|
||||||
if (provider.cloud_config.name == 'fake'):
|
if provider.name.startswith('fake'):
|
||||||
return FakeProviderManager(provider, use_taskmanager)
|
return FakeProviderManager(provider, use_taskmanager)
|
||||||
else:
|
else:
|
||||||
return ProviderManager(provider, use_taskmanager)
|
return ProviderManager(provider, use_taskmanager)
|
||||||
@ -168,7 +168,9 @@ class ProviderManager(object):
|
|||||||
def createServer(self, name, min_ram, image_id=None, image_name=None,
|
def createServer(self, name, min_ram, image_id=None, image_name=None,
|
||||||
az=None, key_name=None, name_filter=None,
|
az=None, key_name=None, name_filter=None,
|
||||||
config_drive=None, nodepool_node_id=None,
|
config_drive=None, nodepool_node_id=None,
|
||||||
nodepool_image_name=None):
|
nodepool_image_name=None, networks=None):
|
||||||
|
if not networks:
|
||||||
|
networks = []
|
||||||
if image_name:
|
if image_name:
|
||||||
image = self.findImage(image_name)
|
image = self.findImage(image_name)
|
||||||
else:
|
else:
|
||||||
@ -183,7 +185,7 @@ class ProviderManager(object):
|
|||||||
if az:
|
if az:
|
||||||
create_args['availability_zone'] = az
|
create_args['availability_zone'] = az
|
||||||
nics = []
|
nics = []
|
||||||
for network in self.provider.networks:
|
for network in networks:
|
||||||
if network.id:
|
if network.id:
|
||||||
nics.append({'net-id': network.id})
|
nics.append({'net-id': network.id})
|
||||||
elif network.name:
|
elif network.name:
|
||||||
|
@ -126,6 +126,9 @@ class BaseTestCase(testtools.TestCase):
|
|||||||
l = logging.getLogger('kazoo')
|
l = logging.getLogger('kazoo')
|
||||||
l.setLevel(logging.INFO)
|
l.setLevel(logging.INFO)
|
||||||
l.propagate=False
|
l.propagate=False
|
||||||
|
l = logging.getLogger('stevedore')
|
||||||
|
l.setLevel(logging.INFO)
|
||||||
|
l.propagate=False
|
||||||
self.useFixture(fixtures.NestedTempfile())
|
self.useFixture(fixtures.NestedTempfile())
|
||||||
|
|
||||||
self.subprocesses = []
|
self.subprocesses = []
|
||||||
@ -187,7 +190,7 @@ class BaseTestCase(testtools.TestCase):
|
|||||||
continue
|
continue
|
||||||
if t.name.startswith("CleanupWorker"):
|
if t.name.startswith("CleanupWorker"):
|
||||||
continue
|
continue
|
||||||
if t.name.startswith("ProviderWorker"):
|
if t.name.startswith("PoolWorker"):
|
||||||
continue
|
continue
|
||||||
if t.name not in whitelist:
|
if t.name not in whitelist:
|
||||||
done = False
|
done = False
|
||||||
|
2
nodepool/tests/fixtures/clouds.yaml
vendored
2
nodepool/tests/fixtures/clouds.yaml
vendored
@ -1,7 +1,7 @@
|
|||||||
clouds:
|
clouds:
|
||||||
fake:
|
fake:
|
||||||
auth:
|
auth:
|
||||||
usernmae: 'fake'
|
username: 'fake'
|
||||||
password: 'fake'
|
password: 'fake'
|
||||||
project_id: 'fake'
|
project_id: 'fake'
|
||||||
auth_url: 'fake'
|
auth_url: 'fake'
|
||||||
|
@ -12,37 +12,46 @@ zookeeper-servers:
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: trusty
|
- name: trusty
|
||||||
image: trusty
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: cloud1
|
|
||||||
- name: cloud2
|
|
||||||
- name: trusty-2-node
|
- name: trusty-2-node
|
||||||
image: trusty
|
|
||||||
min-ready: 0
|
min-ready: 0
|
||||||
providers:
|
|
||||||
- name: cloud1
|
|
||||||
- name: cloud2
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: cloud1
|
- name: cloud1
|
||||||
region-name: 'vanilla'
|
region-name: 'vanilla'
|
||||||
boot-timeout: 120
|
boot-timeout: 120
|
||||||
max-servers: 184
|
|
||||||
max-concurrency: 10
|
max-concurrency: 10
|
||||||
launch-retries: 3
|
launch-retries: 3
|
||||||
rate: 0.001
|
rate: 0.001
|
||||||
images:
|
diskimages:
|
||||||
- name: trusty
|
- name: trusty
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
max-servers: 184
|
||||||
|
labels:
|
||||||
|
- name: trusty
|
||||||
|
diskimage: trusty
|
||||||
min-ram: 8192
|
min-ram: 8192
|
||||||
|
- name: trusty-2-node
|
||||||
|
diskimage: trusty
|
||||||
|
min-ram: 8192
|
||||||
|
|
||||||
- name: cloud2
|
- name: cloud2
|
||||||
region-name: 'chocolate'
|
region-name: 'chocolate'
|
||||||
boot-timeout: 120
|
boot-timeout: 120
|
||||||
max-servers: 184
|
|
||||||
rate: 0.001
|
rate: 0.001
|
||||||
images:
|
diskimages:
|
||||||
- name: trusty
|
- name: trusty
|
||||||
pause: False
|
pause: False
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
max-servers: 184
|
||||||
|
labels:
|
||||||
|
- name: trusty
|
||||||
|
diskimage: trusty
|
||||||
|
min-ram: 8192
|
||||||
|
- name: trusty-2-node
|
||||||
|
diskimage: trusty
|
||||||
min-ram: 8192
|
min-ram: 8192
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
|
19
nodepool/tests/fixtures/integration.yaml
vendored
19
nodepool/tests/fixtures/integration.yaml
vendored
@ -9,26 +9,25 @@ zookeeper-servers:
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: real-label
|
- name: real-label
|
||||||
image: fake-image
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: real-provider
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: real-provider
|
- name: real-provider
|
||||||
region-name: real-region
|
region-name: real-region
|
||||||
max-servers: 96
|
|
||||||
pool: 'real'
|
|
||||||
networks:
|
|
||||||
- net-id: 'some-uuid'
|
|
||||||
rate: 0.0001
|
rate: 0.0001
|
||||||
images:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
min-ram: 8192
|
|
||||||
name-filter: 'Real'
|
|
||||||
meta:
|
meta:
|
||||||
key: value
|
key: value
|
||||||
key2: value
|
key2: value
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
max-servers: 96
|
||||||
|
labels:
|
||||||
|
- name: real-label
|
||||||
|
diskimage: fake-image
|
||||||
|
min-ram: 8192
|
||||||
|
name-filter: 'Real'
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
19
nodepool/tests/fixtures/integration_occ.yaml
vendored
19
nodepool/tests/fixtures/integration_occ.yaml
vendored
@ -9,26 +9,25 @@ zookeeper-servers:
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: fake-label
|
- name: fake-label
|
||||||
image: fake-image
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: real-provider
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: real-provider
|
- name: real-provider
|
||||||
cloud: real-cloud
|
cloud: real-cloud
|
||||||
max-servers: 96
|
|
||||||
pool: 'real'
|
|
||||||
networks:
|
|
||||||
- net-id: 'some-uuid'
|
|
||||||
rate: 0.0001
|
rate: 0.0001
|
||||||
images:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
min-ram: 8192
|
|
||||||
name-filter: 'Real'
|
|
||||||
meta:
|
meta:
|
||||||
key: value
|
key: value
|
||||||
key2: value
|
key2: value
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
max-servers: 96
|
||||||
|
labels:
|
||||||
|
- name: fake-label
|
||||||
|
diskimage: fake-image
|
||||||
|
min-ram: 8192
|
||||||
|
name-filter: 'Real'
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
20
nodepool/tests/fixtures/leaked_node.yaml
vendored
20
nodepool/tests/fixtures/leaked_node.yaml
vendored
@ -12,26 +12,22 @@ zookeeper-servers:
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: fake-label
|
- name: fake-label
|
||||||
image: fake-image
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: fake-provider
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: fake-provider
|
- name: fake-provider
|
||||||
|
cloud: fake
|
||||||
region-name: fake-region
|
region-name: fake-region
|
||||||
max-servers: 96
|
|
||||||
pool: 'fake'
|
|
||||||
networks:
|
|
||||||
- net-id: 'some-uuid'
|
|
||||||
rate: 0.0001
|
rate: 0.0001
|
||||||
images:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
max-servers: 96
|
||||||
|
labels:
|
||||||
|
- name: fake-label
|
||||||
|
diskimage: fake-image
|
||||||
min-ram: 8192
|
min-ram: 8192
|
||||||
name-filter: 'Fake'
|
|
||||||
meta:
|
|
||||||
key: value
|
|
||||||
key2: value
|
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
25
nodepool/tests/fixtures/node.yaml
vendored
25
nodepool/tests/fixtures/node.yaml
vendored
@ -12,29 +12,30 @@ zookeeper-servers:
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: fake-label
|
- name: fake-label
|
||||||
image: fake-image
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: fake-provider
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: fake-provider
|
- name: fake-provider
|
||||||
cloud: fake
|
cloud: fake
|
||||||
region-name: fake-region
|
region-name: fake-region
|
||||||
availability-zones:
|
|
||||||
- az1
|
|
||||||
max-servers: 96
|
|
||||||
pool: 'fake'
|
|
||||||
networks:
|
|
||||||
- net-id: 'some-uuid'
|
|
||||||
rate: 0.0001
|
rate: 0.0001
|
||||||
images:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
min-ram: 8192
|
|
||||||
name-filter: 'Fake'
|
|
||||||
meta:
|
meta:
|
||||||
key: value
|
key: value
|
||||||
key2: value
|
key2: value
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
max-servers: 96
|
||||||
|
availability-zones:
|
||||||
|
- az1
|
||||||
|
networks:
|
||||||
|
- net-id: 'some-uuid'
|
||||||
|
labels:
|
||||||
|
- name: fake-label
|
||||||
|
diskimage: fake-image
|
||||||
|
min-ram: 8192
|
||||||
|
name-filter: 'Fake'
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
22
nodepool/tests/fixtures/node_az.yaml
vendored
22
nodepool/tests/fixtures/node_az.yaml
vendored
@ -12,28 +12,30 @@ zookeeper-servers:
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: fake-label
|
- name: fake-label
|
||||||
image: fake-image
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: fake-provider
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: fake-provider
|
- name: fake-provider
|
||||||
|
cloud: fake
|
||||||
region-name: fake-region
|
region-name: fake-region
|
||||||
|
rate: 0.0001
|
||||||
|
diskimages:
|
||||||
|
- name: fake-image
|
||||||
|
meta:
|
||||||
|
key: value
|
||||||
|
key2: value
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
max-servers: 96
|
max-servers: 96
|
||||||
pool: 'fake'
|
|
||||||
networks:
|
networks:
|
||||||
- net-id: 'some-uuid'
|
- net-id: 'some-uuid'
|
||||||
availability-zones:
|
availability-zones:
|
||||||
- az1
|
- az1
|
||||||
rate: 0.0001
|
labels:
|
||||||
images:
|
- name: fake-label
|
||||||
- name: fake-image
|
diskimage: fake-image
|
||||||
min-ram: 8192
|
min-ram: 8192
|
||||||
name-filter: 'Fake'
|
name-filter: 'Fake'
|
||||||
meta:
|
|
||||||
key: value
|
|
||||||
key2: value
|
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
42
nodepool/tests/fixtures/node_cmd.yaml
vendored
42
nodepool/tests/fixtures/node_cmd.yaml
vendored
@ -11,43 +11,47 @@ zookeeper-servers:
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: fake-label1
|
- name: fake-label1
|
||||||
image: fake-image1
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: fake-provider1
|
|
||||||
- name: fake-label2
|
- name: fake-label2
|
||||||
image: fake-image2
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: fake-provider2
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: fake-provider1
|
- name: fake-provider1
|
||||||
max-servers: 96
|
cloud: fake
|
||||||
pool: 'fake'
|
|
||||||
networks:
|
|
||||||
- net-id: 'some-uuid'
|
|
||||||
rate: 0.0001
|
rate: 0.0001
|
||||||
images:
|
diskimages:
|
||||||
- name: fake-image1
|
- name: fake-image1
|
||||||
min-ram: 8192
|
|
||||||
name-filter: 'Fake'
|
|
||||||
meta:
|
meta:
|
||||||
key: value
|
key: value
|
||||||
key2: value
|
key2: value
|
||||||
- name: fake-provider2
|
pools:
|
||||||
|
- name: main
|
||||||
max-servers: 96
|
max-servers: 96
|
||||||
pool: 'fake'
|
|
||||||
networks:
|
networks:
|
||||||
- net-id: 'some-uuid'
|
- net-id: 'some-uuid'
|
||||||
rate: 0.0001
|
labels:
|
||||||
images:
|
- name: fake-label1
|
||||||
- name: fake-image2
|
diskimage: fake-image1
|
||||||
min-ram: 8192
|
min-ram: 8192
|
||||||
name-filter: 'Fake'
|
name-filter: 'fake'
|
||||||
|
|
||||||
|
- name: fake-provider2
|
||||||
|
rate: 0.0001
|
||||||
|
diskimages:
|
||||||
|
- name: fake-image2
|
||||||
meta:
|
meta:
|
||||||
key: value
|
key: value
|
||||||
key2: value
|
key2: value
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
max-servers: 96
|
||||||
|
networks:
|
||||||
|
- net-id: 'some-uuid'
|
||||||
|
labels:
|
||||||
|
- name: fake-label2
|
||||||
|
diskimage: fake-image2
|
||||||
|
min-ram: 8192
|
||||||
|
name-filter: 'fake'
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
- name: fake-image1
|
- name: fake-image1
|
||||||
|
22
nodepool/tests/fixtures/node_disabled_label.yaml
vendored
22
nodepool/tests/fixtures/node_disabled_label.yaml
vendored
@ -12,26 +12,28 @@ zookeeper-servers:
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: fake-label
|
- name: fake-label
|
||||||
image: fake-image
|
|
||||||
min-ready: 0
|
min-ready: 0
|
||||||
providers:
|
|
||||||
- name: fake-provider
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: fake-provider
|
- name: fake-provider
|
||||||
|
cloud: fake
|
||||||
region-name: fake-region
|
region-name: fake-region
|
||||||
max-servers: 96
|
|
||||||
pool: 'fake'
|
|
||||||
networks:
|
|
||||||
- net-id: 'some-uuid'
|
|
||||||
rate: 0.0001
|
rate: 0.0001
|
||||||
images:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
min-ram: 8192
|
|
||||||
name-filter: 'Fake'
|
|
||||||
meta:
|
meta:
|
||||||
key: value
|
key: value
|
||||||
key2: value
|
key2: value
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
max-servers: 96
|
||||||
|
networks:
|
||||||
|
- net-id: 'some-uuid'
|
||||||
|
labels:
|
||||||
|
- name: fake-label
|
||||||
|
diskimage: fake-image
|
||||||
|
min-ram: 8192
|
||||||
|
name-filter: 'fake'
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
22
nodepool/tests/fixtures/node_diskimage_fail.yaml
vendored
22
nodepool/tests/fixtures/node_diskimage_fail.yaml
vendored
@ -12,26 +12,28 @@ zookeeper-servers:
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: fake-label
|
- name: fake-label
|
||||||
image: fake-image
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: fake-provider
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: fake-provider
|
- name: fake-provider
|
||||||
|
cloud: fake
|
||||||
region-name: fake-region
|
region-name: fake-region
|
||||||
max-servers: 96
|
|
||||||
pool: 'fake'
|
|
||||||
networks:
|
|
||||||
- net-id: 'some-uuid'
|
|
||||||
rate: 0.0001
|
rate: 0.0001
|
||||||
images:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
min-ram: 8192
|
|
||||||
name-filter: 'Fake'
|
|
||||||
meta:
|
meta:
|
||||||
key: value
|
key: value
|
||||||
key2: value
|
key2: value
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
max-servers: 96
|
||||||
|
networks:
|
||||||
|
- net-id: 'some-uuid'
|
||||||
|
labels:
|
||||||
|
- name: fake-label
|
||||||
|
diskimage: fake-image
|
||||||
|
min-ram: 8192
|
||||||
|
name-filter: 'fake'
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
@ -12,32 +12,32 @@ zookeeper-servers:
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: fake-label
|
- name: fake-label
|
||||||
image: fake-image
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: fake-provider
|
|
||||||
- name: fake-label2
|
- name: fake-label2
|
||||||
image: fake-image2
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: fake-provider
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: fake-provider
|
- name: fake-provider
|
||||||
|
cloud: fake
|
||||||
region-name: fake-region
|
region-name: fake-region
|
||||||
max-servers: 96
|
|
||||||
pool: 'fake'
|
|
||||||
networks:
|
|
||||||
- net-id: 'some-uuid'
|
|
||||||
rate: 0.0001
|
rate: 0.0001
|
||||||
images:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
min-ram: 8192
|
|
||||||
name-filter: 'Fake'
|
|
||||||
meta:
|
meta:
|
||||||
key: value
|
key: value
|
||||||
key2: value
|
key2: value
|
||||||
- name: fake-image2
|
- name: fake-image2
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
max-servers: 96
|
||||||
|
networks:
|
||||||
|
- net-id: 'some-uuid'
|
||||||
|
labels:
|
||||||
|
- name: fake-label
|
||||||
|
diskimage: fake-image
|
||||||
|
min-ram: 8192
|
||||||
|
- name: fake-label2
|
||||||
|
diskimage: fake-image2
|
||||||
min-ram: 8192
|
min-ram: 8192
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
|
@ -12,33 +12,31 @@ zookeeper-servers:
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: fake-label
|
- name: fake-label
|
||||||
image: fake-image
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: fake-provider
|
|
||||||
- name: fake-label2
|
- name: fake-label2
|
||||||
image: fake-image2
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: fake-provider
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: fake-provider
|
- name: fake-provider
|
||||||
|
cloud: fake
|
||||||
region-name: fake-region
|
region-name: fake-region
|
||||||
max-servers: 96
|
|
||||||
pool: 'fake'
|
|
||||||
networks:
|
|
||||||
- net-id: 'some-uuid'
|
|
||||||
rate: 0.0001
|
rate: 0.0001
|
||||||
images:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
pause: True
|
pause: True
|
||||||
min-ram: 8192
|
|
||||||
name-filter: 'Fake'
|
|
||||||
meta:
|
meta:
|
||||||
key: value
|
key: value
|
||||||
key2: value
|
key2: value
|
||||||
- name: fake-image2
|
- name: fake-image2
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
max-servers: 96
|
||||||
|
labels:
|
||||||
|
- name: fake-label
|
||||||
|
min-ram: 8192
|
||||||
|
diskimage: fake-image
|
||||||
|
- name: fake-label2
|
||||||
|
diskimage: fake-image2
|
||||||
min-ram: 8192
|
min-ram: 8192
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
|
64
nodepool/tests/fixtures/node_ipv6.yaml
vendored
64
nodepool/tests/fixtures/node_ipv6.yaml
vendored
@ -12,70 +12,64 @@ zookeeper-servers:
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: fake-label1
|
- name: fake-label1
|
||||||
image: fake-image
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: fake-provider1
|
|
||||||
|
|
||||||
- name: fake-label2
|
- name: fake-label2
|
||||||
image: fake-image
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: fake-provider2
|
|
||||||
|
|
||||||
- name: fake-label3
|
- name: fake-label3
|
||||||
image: fake-image
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: fake-provider3
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: fake-provider1
|
- name: fake-provider1
|
||||||
|
cloud: fake
|
||||||
region-name: fake-region
|
region-name: fake-region
|
||||||
max-servers: 96
|
|
||||||
pool: 'fake'
|
|
||||||
networks:
|
|
||||||
- net-id: 'ipv6-uuid'
|
|
||||||
ipv6-preferred: True
|
ipv6-preferred: True
|
||||||
rate: 0.0001
|
rate: 0.0001
|
||||||
images:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
max-servers: 96
|
||||||
|
networks:
|
||||||
|
- net-id: 'ipv6-uuid'
|
||||||
|
labels:
|
||||||
|
- name: fake-label1
|
||||||
|
diskimage: fake-image
|
||||||
min-ram: 8192
|
min-ram: 8192
|
||||||
name-filter: 'Fake'
|
|
||||||
meta:
|
|
||||||
key: value
|
|
||||||
key2: value
|
|
||||||
|
|
||||||
- name: fake-provider2
|
- name: fake-provider2
|
||||||
|
cloud: fake
|
||||||
region-name: fake-region
|
region-name: fake-region
|
||||||
|
rate: 0.0001
|
||||||
|
diskimages:
|
||||||
|
- name: fake-image
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
max-servers: 96
|
max-servers: 96
|
||||||
pool: 'fake'
|
|
||||||
networks:
|
networks:
|
||||||
- net-id: 'ipv6-uuid'
|
- net-id: 'ipv6-uuid'
|
||||||
rate: 0.0001
|
labels:
|
||||||
images:
|
- name: fake-label2
|
||||||
- name: fake-image
|
diskimage: fake-image
|
||||||
min-ram: 8192
|
min-ram: 8192
|
||||||
name-filter: 'Fake'
|
|
||||||
meta:
|
|
||||||
key: value
|
|
||||||
key2: value
|
|
||||||
|
|
||||||
- name: fake-provider3
|
- name: fake-provider3
|
||||||
|
cloud: fake
|
||||||
region-name: fake-region
|
region-name: fake-region
|
||||||
max-servers: 96
|
|
||||||
pool: 'fake'
|
|
||||||
networks:
|
|
||||||
- net-id: 'some-uuid'
|
|
||||||
ipv6-preferred: True
|
ipv6-preferred: True
|
||||||
rate: 0.0001
|
rate: 0.0001
|
||||||
images:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
max-servers: 96
|
||||||
|
networks:
|
||||||
|
- net-id: 'some-uuid'
|
||||||
|
labels:
|
||||||
|
- name: fake-label3
|
||||||
|
diskimage: fake-image
|
||||||
min-ram: 8192
|
min-ram: 8192
|
||||||
name-filter: 'Fake'
|
|
||||||
meta:
|
|
||||||
key: value
|
|
||||||
key2: value
|
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
35
nodepool/tests/fixtures/node_label_provider.yaml
vendored
35
nodepool/tests/fixtures/node_label_provider.yaml
vendored
@ -12,40 +12,31 @@ zookeeper-servers:
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: fake-label
|
- name: fake-label
|
||||||
image: fake-image
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: fake-provider2
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: fake-provider
|
- name: fake-provider
|
||||||
|
cloud: fake
|
||||||
region-name: fake-region
|
region-name: fake-region
|
||||||
max-servers: 96
|
|
||||||
pool: 'fake'
|
|
||||||
networks:
|
|
||||||
- net-id: 'some-uuid'
|
|
||||||
rate: 0.0001
|
rate: 0.0001
|
||||||
images:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
min-ram: 8192
|
pools:
|
||||||
name-filter: 'Fake'
|
- name: main
|
||||||
meta:
|
max-servers: 96
|
||||||
key: value
|
|
||||||
key2: value
|
|
||||||
- name: fake-provider2
|
- name: fake-provider2
|
||||||
|
cloud: fake
|
||||||
region-name: fake-region
|
region-name: fake-region
|
||||||
max-servers: 96
|
|
||||||
pool: 'fake'
|
|
||||||
networks:
|
|
||||||
- net-id: 'some-uuid'
|
|
||||||
rate: 0.0001
|
rate: 0.0001
|
||||||
images:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
max-servers: 96
|
||||||
|
labels:
|
||||||
|
- name: fake-label
|
||||||
|
diskimage: fake-image
|
||||||
min-ram: 8192
|
min-ram: 8192
|
||||||
name-filter: 'Fake'
|
|
||||||
meta:
|
|
||||||
key: value
|
|
||||||
key2: value
|
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
23
nodepool/tests/fixtures/node_launch_retry.yaml
vendored
23
nodepool/tests/fixtures/node_launch_retry.yaml
vendored
@ -12,28 +12,25 @@ zookeeper-servers:
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: fake-label
|
- name: fake-label
|
||||||
image: fake-image
|
|
||||||
min-ready: 0
|
min-ready: 0
|
||||||
providers:
|
|
||||||
- name: fake-provider
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: fake-provider
|
- name: fake-provider
|
||||||
cloud: 'fake'
|
cloud: fake
|
||||||
region-name: fake-region
|
region-name: fake-region
|
||||||
max-servers: 96
|
|
||||||
pool: 'fake'
|
|
||||||
launch-retries: 2
|
launch-retries: 2
|
||||||
|
rate: 0.0001
|
||||||
|
diskimages:
|
||||||
|
- name: fake-image
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
networks:
|
networks:
|
||||||
- net-id: 'some-uuid'
|
- net-id: 'some-uuid'
|
||||||
rate: 0.0001
|
max-servers: 96
|
||||||
images:
|
labels:
|
||||||
- name: fake-image
|
- name: fake-label
|
||||||
|
diskimage: fake-image
|
||||||
min-ram: 8192
|
min-ram: 8192
|
||||||
name-filter: 'Fake'
|
|
||||||
meta:
|
|
||||||
key: value
|
|
||||||
key2: value
|
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
22
nodepool/tests/fixtures/node_lost_requests.yaml
vendored
22
nodepool/tests/fixtures/node_lost_requests.yaml
vendored
@ -12,28 +12,24 @@ zookeeper-servers:
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: fake-label
|
- name: fake-label
|
||||||
image: fake-image
|
|
||||||
min-ready: 0
|
min-ready: 0
|
||||||
providers:
|
|
||||||
- name: fake-provider
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: fake-provider
|
- name: fake-provider
|
||||||
|
cloud: fake
|
||||||
region-name: fake-region
|
region-name: fake-region
|
||||||
|
rate: 0.0001
|
||||||
|
diskimages:
|
||||||
|
- name: fake-image
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
availability-zones:
|
availability-zones:
|
||||||
- az1
|
- az1
|
||||||
max-servers: 96
|
max-servers: 96
|
||||||
pool: 'fake'
|
labels:
|
||||||
networks:
|
- name: fake-label
|
||||||
- net-id: 'some-uuid'
|
diskimage: fake-image
|
||||||
rate: 0.0001
|
|
||||||
images:
|
|
||||||
- name: fake-image
|
|
||||||
min-ram: 8192
|
min-ram: 8192
|
||||||
name-filter: 'Fake'
|
|
||||||
meta:
|
|
||||||
key: value
|
|
||||||
key2: value
|
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
20
nodepool/tests/fixtures/node_net_name.yaml
vendored
20
nodepool/tests/fixtures/node_net_name.yaml
vendored
@ -12,28 +12,26 @@ zookeeper-servers:
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: fake-label
|
- name: fake-label
|
||||||
image: fake-image
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: fake-provider
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: fake-provider
|
- name: fake-provider
|
||||||
|
cloud: fake
|
||||||
region-name: fake-region
|
region-name: fake-region
|
||||||
|
rate: 0.0001
|
||||||
|
diskimages:
|
||||||
|
- name: fake-image
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
max-servers: 96
|
max-servers: 96
|
||||||
pool: 'fake'
|
|
||||||
networks:
|
networks:
|
||||||
- name: 'fake-public-network-name'
|
- name: 'fake-public-network-name'
|
||||||
public: true
|
public: true
|
||||||
- name: 'fake-private-network-name'
|
- name: 'fake-private-network-name'
|
||||||
rate: 0.0001
|
labels:
|
||||||
images:
|
- name: fake-label
|
||||||
- name: fake-image
|
diskimage: fake-image
|
||||||
min-ram: 8192
|
min-ram: 8192
|
||||||
name-filter: 'Fake'
|
|
||||||
meta:
|
|
||||||
key: value
|
|
||||||
key2: value
|
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
22
nodepool/tests/fixtures/node_quota.yaml
vendored
22
nodepool/tests/fixtures/node_quota.yaml
vendored
@ -12,28 +12,22 @@ zookeeper-servers:
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: fake-label
|
- name: fake-label
|
||||||
image: fake-image
|
|
||||||
min-ready: 0
|
min-ready: 0
|
||||||
providers:
|
|
||||||
- name: fake-provider
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: fake-provider
|
- name: fake-provider
|
||||||
|
cloud: fake
|
||||||
region-name: fake-region
|
region-name: fake-region
|
||||||
availability-zones:
|
|
||||||
- az1
|
|
||||||
max-servers: 2
|
|
||||||
pool: 'fake'
|
|
||||||
networks:
|
|
||||||
- net-id: 'some-uuid'
|
|
||||||
rate: 0.0001
|
rate: 0.0001
|
||||||
images:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
max-servers: 2
|
||||||
|
labels:
|
||||||
|
- name: fake-label
|
||||||
|
diskimage: fake-image
|
||||||
min-ram: 8192
|
min-ram: 8192
|
||||||
name-filter: 'Fake'
|
|
||||||
meta:
|
|
||||||
key: value
|
|
||||||
key2: value
|
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
27
nodepool/tests/fixtures/node_two_image.yaml
vendored
27
nodepool/tests/fixtures/node_two_image.yaml
vendored
@ -12,32 +12,27 @@ zookeeper-servers:
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: fake-label
|
- name: fake-label
|
||||||
image: fake-image
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: fake-provider
|
|
||||||
- name: fake-label2
|
- name: fake-label2
|
||||||
image: fake-image2
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: fake-provider
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: fake-provider
|
- name: fake-provider
|
||||||
|
cloud: fake
|
||||||
region-name: fake-region
|
region-name: fake-region
|
||||||
max-servers: 96
|
|
||||||
pool: 'fake'
|
|
||||||
networks:
|
|
||||||
- net-id: 'some-uuid'
|
|
||||||
rate: 0.0001
|
rate: 0.0001
|
||||||
images:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
min-ram: 8192
|
|
||||||
name-filter: 'Fake'
|
|
||||||
meta:
|
|
||||||
key: value
|
|
||||||
key2: value
|
|
||||||
- name: fake-image2
|
- name: fake-image2
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
max-servers: 96
|
||||||
|
labels:
|
||||||
|
- name: fake-label
|
||||||
|
diskimage: fake-image
|
||||||
|
min-ram: 8192
|
||||||
|
- name: fake-label2
|
||||||
|
diskimage: fake-image2
|
||||||
min-ram: 8192
|
min-ram: 8192
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
|
@ -12,26 +12,22 @@ zookeeper-servers:
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: fake-label
|
- name: fake-label
|
||||||
image: fake-image
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: fake-provider
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: fake-provider
|
- name: fake-provider
|
||||||
|
cloud: fake
|
||||||
region-name: fake-region
|
region-name: fake-region
|
||||||
max-servers: 96
|
|
||||||
pool: 'fake'
|
|
||||||
networks:
|
|
||||||
- net-id: 'some-uuid'
|
|
||||||
rate: 0.0001
|
rate: 0.0001
|
||||||
images:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
max-servers: 96
|
||||||
|
labels:
|
||||||
|
- name: fake-label
|
||||||
|
diskimage: fake-image
|
||||||
min-ram: 8192
|
min-ram: 8192
|
||||||
name-filter: 'Fake'
|
|
||||||
meta:
|
|
||||||
key: value
|
|
||||||
key2: value
|
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
38
nodepool/tests/fixtures/node_two_provider.yaml
vendored
38
nodepool/tests/fixtures/node_two_provider.yaml
vendored
@ -12,41 +12,35 @@ zookeeper-servers:
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: fake-label
|
- name: fake-label
|
||||||
image: fake-image
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: fake-provider
|
|
||||||
- name: fake-provider2
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: fake-provider
|
- name: fake-provider
|
||||||
|
cloud: fake
|
||||||
region-name: fake-region
|
region-name: fake-region
|
||||||
max-servers: 96
|
|
||||||
pool: 'fake'
|
|
||||||
networks:
|
|
||||||
- net-id: 'some-uuid'
|
|
||||||
rate: 0.0001
|
rate: 0.0001
|
||||||
images:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
max-servers: 96
|
||||||
|
labels:
|
||||||
|
- name: fake-label
|
||||||
|
diskimage: fake-image
|
||||||
min-ram: 8192
|
min-ram: 8192
|
||||||
name-filter: 'Fake'
|
|
||||||
meta:
|
|
||||||
key: value
|
|
||||||
key2: value
|
|
||||||
- name: fake-provider2
|
- name: fake-provider2
|
||||||
|
cloud: fake
|
||||||
region-name: fake-region
|
region-name: fake-region
|
||||||
max-servers: 96
|
|
||||||
pool: 'fake'
|
|
||||||
networks:
|
|
||||||
- net-id: 'some-uuid'
|
|
||||||
rate: 0.0001
|
rate: 0.0001
|
||||||
images:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
max-servers: 96
|
||||||
|
labels:
|
||||||
|
- name: fake-label
|
||||||
|
diskimage: fake-image
|
||||||
min-ram: 8192
|
min-ram: 8192
|
||||||
name-filter: 'Fake'
|
|
||||||
meta:
|
|
||||||
key: value
|
|
||||||
key2: value
|
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
@ -12,34 +12,27 @@ zookeeper-servers:
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: fake-label
|
- name: fake-label
|
||||||
image: fake-image
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: fake-provider
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: fake-provider
|
- name: fake-provider
|
||||||
|
cloud: fake
|
||||||
region-name: fake-region
|
region-name: fake-region
|
||||||
max-servers: 96
|
|
||||||
pool: 'fake'
|
|
||||||
networks:
|
|
||||||
- net-id: 'some-uuid'
|
|
||||||
rate: 0.0001
|
rate: 0.0001
|
||||||
images:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
min-ram: 8192
|
pools:
|
||||||
name-filter: 'Fake'
|
- name: main
|
||||||
meta:
|
|
||||||
key: value
|
|
||||||
key2: value
|
|
||||||
- name: fake-provider2
|
|
||||||
region-name: fake-region
|
|
||||||
max-servers: 96
|
max-servers: 96
|
||||||
pool: 'fake'
|
labels:
|
||||||
networks:
|
- name: fake-label
|
||||||
- net-id: 'some-uuid'
|
diskimage: fake-image
|
||||||
|
min-ram: 8192
|
||||||
|
|
||||||
|
- name: fake-provider2
|
||||||
|
cloud: fake
|
||||||
|
region-name: fake-region
|
||||||
rate: 0.0001
|
rate: 0.0001
|
||||||
images: []
|
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
46
nodepool/tests/fixtures/node_upload_fail.yaml
vendored
46
nodepool/tests/fixtures/node_upload_fail.yaml
vendored
@ -12,42 +12,38 @@ zookeeper-servers:
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: fake-label
|
- name: fake-label
|
||||||
image: fake-image
|
|
||||||
min-ready: 2
|
min-ready: 2
|
||||||
providers:
|
|
||||||
- name: fake-provider1
|
|
||||||
- name: fake-provider2
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: fake-provider1
|
- name: fake-provider1
|
||||||
|
cloud: fake
|
||||||
region-name: fake-region
|
region-name: fake-region
|
||||||
max-servers: 2
|
|
||||||
pool: 'fake'
|
|
||||||
networks:
|
|
||||||
- net-id: 'some-uuid'
|
|
||||||
rate: 0.0001
|
rate: 0.0001
|
||||||
images:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
min-ram: 8192
|
|
||||||
name-filter: 'Fake'
|
|
||||||
meta:
|
meta:
|
||||||
key: value
|
|
||||||
key2: value
|
|
||||||
SHOULD_FAIL: 'true'
|
SHOULD_FAIL: 'true'
|
||||||
- name: fake-provider2
|
pools:
|
||||||
region-name: fake-region
|
- name: main
|
||||||
max-servers: 2
|
max-servers: 2
|
||||||
pool: 'fake'
|
labels:
|
||||||
networks:
|
- name: fake-label
|
||||||
- net-id: 'some-uuid'
|
diskimage: fake-image
|
||||||
rate: 0.0001
|
min-ram: 8192
|
||||||
images:
|
|
||||||
- name: fake-image
|
- name: fake-provider2
|
||||||
|
cloud: fake
|
||||||
|
region-name: fake-region
|
||||||
|
rate: 0.0001
|
||||||
|
diskimages:
|
||||||
|
- name: fake-image
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
max-servers: 2
|
||||||
|
labels:
|
||||||
|
- name: fake-label
|
||||||
|
diskimage: fake-image
|
||||||
min-ram: 8192
|
min-ram: 8192
|
||||||
name-filter: 'Fake'
|
|
||||||
meta:
|
|
||||||
key: value
|
|
||||||
key2: value
|
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
20
nodepool/tests/fixtures/node_vhd.yaml
vendored
20
nodepool/tests/fixtures/node_vhd.yaml
vendored
@ -12,27 +12,23 @@ zookeeper-servers:
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: fake-label
|
- name: fake-label
|
||||||
image: fake-image
|
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
providers:
|
|
||||||
- name: fake-provider
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: fake-provider
|
- name: fake-provider
|
||||||
|
cloud: fake
|
||||||
region-name: fake-region
|
region-name: fake-region
|
||||||
max-servers: 96
|
|
||||||
pool: 'fake'
|
|
||||||
image-type: vhd
|
image-type: vhd
|
||||||
networks:
|
|
||||||
- net-id: 'some-uuid'
|
|
||||||
rate: 0.0001
|
rate: 0.0001
|
||||||
images:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
max-servers: 96
|
||||||
|
labels:
|
||||||
|
- name: fake-label
|
||||||
|
diskimage: fake-image
|
||||||
min-ram: 8192
|
min-ram: 8192
|
||||||
name-filter: 'Fake'
|
|
||||||
meta:
|
|
||||||
key: value
|
|
||||||
key2: value
|
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
49
nodepool/tests/fixtures/node_vhd_and_qcow2.yaml
vendored
49
nodepool/tests/fixtures/node_vhd_and_qcow2.yaml
vendored
@ -12,43 +12,38 @@ zookeeper-servers:
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: fake-label
|
- name: fake-label
|
||||||
image: fake-image
|
|
||||||
min-ready: 2
|
min-ready: 2
|
||||||
providers:
|
|
||||||
- name: fake-provider1
|
|
||||||
- name: fake-provider2
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: fake-provider1
|
- name: fake-provider1
|
||||||
|
cloud: fake
|
||||||
region-name: fake-region
|
region-name: fake-region
|
||||||
max-servers: 2
|
|
||||||
pool: 'fake'
|
|
||||||
image-type: vhd
|
image-type: vhd
|
||||||
networks:
|
|
||||||
- net-id: 'some-uuid'
|
|
||||||
rate: 0.0001
|
rate: 0.0001
|
||||||
images:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
min-ram: 8192
|
pools:
|
||||||
name-filter: 'Fake'
|
- name: main
|
||||||
meta:
|
|
||||||
key: value
|
|
||||||
key2: value
|
|
||||||
- name: fake-provider2
|
|
||||||
region-name: fake-region
|
|
||||||
max-servers: 2
|
max-servers: 2
|
||||||
pool: 'fake'
|
labels:
|
||||||
image-type: qcow2
|
- name: fake-label
|
||||||
networks:
|
diskimage: fake-image
|
||||||
- net-id: 'some-uuid'
|
min-ram: 8192
|
||||||
rate: 0.0001
|
|
||||||
images:
|
- name: fake-provider2
|
||||||
- name: fake-image
|
cloud: fake
|
||||||
|
region-name: fake-region
|
||||||
|
image-type: qcow2
|
||||||
|
rate: 0.0001
|
||||||
|
diskimages:
|
||||||
|
- name: fake-image
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
max-servers: 2
|
||||||
|
labels:
|
||||||
|
- name: fake-label
|
||||||
|
diskimage: fake-image
|
||||||
min-ram: 8192
|
min-ram: 8192
|
||||||
name-filter: 'Fake'
|
|
||||||
meta:
|
|
||||||
key: value
|
|
||||||
key2: value
|
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
|
@ -40,7 +40,7 @@ class TestNodeLaunchManager(tests.DBTestCase):
|
|||||||
self.waitForImage('fake-provider', 'fake-image')
|
self.waitForImage('fake-provider', 'fake-image')
|
||||||
|
|
||||||
self.provider = b._config.providers['fake-provider']
|
self.provider = b._config.providers['fake-provider']
|
||||||
self.labels = b._config.labels
|
self.provider_pool = self.provider.pools['main']
|
||||||
|
|
||||||
# The builder config does not have a provider manager, so create one.
|
# The builder config does not have a provider manager, so create one.
|
||||||
self.pmanager = provider_manager.ProviderManager(self.provider, False)
|
self.pmanager = provider_manager.ProviderManager(self.provider, False)
|
||||||
@ -53,7 +53,7 @@ class TestNodeLaunchManager(tests.DBTestCase):
|
|||||||
n1 = zk.Node()
|
n1 = zk.Node()
|
||||||
n1.state = zk.BUILDING
|
n1.state = zk.BUILDING
|
||||||
n1.type = 'fake-label'
|
n1.type = 'fake-label'
|
||||||
mgr = NodeLaunchManager(self.zk, self.provider, self.labels,
|
mgr = NodeLaunchManager(self.zk, self.provider_pool,
|
||||||
self.pmanager, 'zuul', 1)
|
self.pmanager, 'zuul', 1)
|
||||||
mgr.launch(n1)
|
mgr.launch(n1)
|
||||||
while not mgr.poll():
|
while not mgr.poll():
|
||||||
@ -70,7 +70,7 @@ class TestNodeLaunchManager(tests.DBTestCase):
|
|||||||
n1 = zk.Node()
|
n1 = zk.Node()
|
||||||
n1.state = zk.BUILDING
|
n1.state = zk.BUILDING
|
||||||
n1.type = 'fake-label'
|
n1.type = 'fake-label'
|
||||||
mgr = NodeLaunchManager(self.zk, self.provider, self.labels,
|
mgr = NodeLaunchManager(self.zk, self.provider_pool,
|
||||||
self.pmanager, 'zuul', 1)
|
self.pmanager, 'zuul', 1)
|
||||||
mgr.launch(n1)
|
mgr.launch(n1)
|
||||||
while not mgr.poll():
|
while not mgr.poll():
|
||||||
@ -90,7 +90,7 @@ class TestNodeLaunchManager(tests.DBTestCase):
|
|||||||
n2 = zk.Node()
|
n2 = zk.Node()
|
||||||
n2.state = zk.BUILDING
|
n2.state = zk.BUILDING
|
||||||
n2.type = 'fake-label'
|
n2.type = 'fake-label'
|
||||||
mgr = NodeLaunchManager(self.zk, self.provider, self.labels,
|
mgr = NodeLaunchManager(self.zk, self.provider_pool,
|
||||||
self.pmanager, 'zuul', 1)
|
self.pmanager, 'zuul', 1)
|
||||||
mgr.launch(n1)
|
mgr.launch(n1)
|
||||||
mgr.launch(n2)
|
mgr.launch(n2)
|
||||||
|
@ -469,6 +469,7 @@ class TestNodepool(tests.DBTestCase):
|
|||||||
node.type = 'fake-label'
|
node.type = 'fake-label'
|
||||||
node.public_ipv4 = 'fake'
|
node.public_ipv4 = 'fake'
|
||||||
node.provider = 'fake-provider'
|
node.provider = 'fake-provider'
|
||||||
|
node.pool = 'main'
|
||||||
node.allocated_to = req.id
|
node.allocated_to = req.id
|
||||||
self.zk.storeNode(node)
|
self.zk.storeNode(node)
|
||||||
|
|
||||||
|
@ -404,6 +404,7 @@ class Node(BaseModel):
|
|||||||
super(Node, self).__init__(id)
|
super(Node, self).__init__(id)
|
||||||
self.lock = None
|
self.lock = None
|
||||||
self.provider = None
|
self.provider = None
|
||||||
|
self.pool = None
|
||||||
self.type = None
|
self.type = None
|
||||||
self.allocated_to = None
|
self.allocated_to = None
|
||||||
self.az = None
|
self.az = None
|
||||||
@ -430,6 +431,7 @@ class Node(BaseModel):
|
|||||||
self.state == other.state and
|
self.state == other.state and
|
||||||
self.state_time == other.state_time and
|
self.state_time == other.state_time and
|
||||||
self.provider == other.provider and
|
self.provider == other.provider and
|
||||||
|
self.pool == other.pool and
|
||||||
self.type == other.type and
|
self.type == other.type and
|
||||||
self.allocated_to == other.allocated_to and
|
self.allocated_to == other.allocated_to and
|
||||||
self.az == other.az and
|
self.az == other.az and
|
||||||
@ -452,6 +454,7 @@ class Node(BaseModel):
|
|||||||
'''
|
'''
|
||||||
d = super(Node, self).toDict()
|
d = super(Node, self).toDict()
|
||||||
d['provider'] = self.provider
|
d['provider'] = self.provider
|
||||||
|
d['pool'] = self.pool
|
||||||
d['type'] = self.type
|
d['type'] = self.type
|
||||||
d['allocated_to'] = self.allocated_to
|
d['allocated_to'] = self.allocated_to
|
||||||
d['az'] = self.az
|
d['az'] = self.az
|
||||||
@ -480,6 +483,7 @@ class Node(BaseModel):
|
|||||||
o = Node(o_id)
|
o = Node(o_id)
|
||||||
super(Node, o).fromDict(d)
|
super(Node, o).fromDict(d)
|
||||||
o.provider = d.get('provider')
|
o.provider = d.get('provider')
|
||||||
|
o.pool = d.get('pool')
|
||||||
o.type = d.get('type')
|
o.type = d.get('type')
|
||||||
o.allocated_to = d.get('allocated_to')
|
o.allocated_to = d.get('allocated_to')
|
||||||
o.az = d.get('az')
|
o.az = d.get('az')
|
||||||
|
@ -20,23 +20,21 @@ diskimages:
|
|||||||
BASE_IMAGE_FILE: Fedora-Cloud-Base-20141029-21_Beta.x86_64.qcow2
|
BASE_IMAGE_FILE: Fedora-Cloud-Base-20141029-21_Beta.x86_64.qcow2
|
||||||
|
|
||||||
labels:
|
labels:
|
||||||
- name: single-fake
|
- name: small-fake
|
||||||
image: fake-nodepool
|
|
||||||
min-ready: 2
|
min-ready: 2
|
||||||
providers:
|
- name: big-fake
|
||||||
- name: fake-provider
|
|
||||||
- name: multi-fake
|
|
||||||
image: fake-nodepool
|
|
||||||
min-ready: 2
|
min-ready: 2
|
||||||
providers:
|
|
||||||
- name: fake-provider
|
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: fake-provider
|
- name: fake-provider
|
||||||
region-name: 'fake-region'
|
region-name: 'fake-region'
|
||||||
max-servers: 96
|
diskimages:
|
||||||
images:
|
|
||||||
- name: fake-nodepool
|
- name: fake-nodepool
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
max-servers: 96
|
||||||
|
labels:
|
||||||
|
- name: big-fake
|
||||||
|
diskimage: fake-nodepool
|
||||||
min-ram: 8192
|
min-ram: 8192
|
||||||
name-filter: 'Fake'
|
name-filter: 'Fake'
|
||||||
diskimage: fake-nodepool
|
|
||||||
|
Loading…
Reference in New Issue
Block a user