Remove snapshot support

We don't have plans to support building images from snapshots in the
future.  If we add it back, it will require a new builder implementation
(with zookeeper interface, etc) anyway.

In addition to removing essentially dead code, this also begins the
process of simplifying the configuration file.  It removes 'base-image'
and 'setup' from the provider image attributes since they are only used
for snapshot images.

Change-Id: I68e0d2c7017cfebe35341189b5aedb0556bcd78b
This commit is contained in:
James E. Blair
2016-11-11 10:02:15 -08:00
parent ed62ff9909
commit 7ab5ea700e
26 changed files with 108 additions and 2571 deletions

View File

@@ -178,16 +178,7 @@ cron:
cleanup: '*/1 * * * *'
check: '*/15 * * * *'
# Devstack does not make an Ubuntu image by default. You can
# grab one from Ubuntu and upload it yourself. Note that we
# cannot use devstack's cirros default because cirros does not
# support sftp.
labels:
- name: $NODEPOOL_IMAGE
image: $NODEPOOL_IMAGE
min-ready: 1
providers:
- name: devstack
- name: ubuntu-dib
image: ubuntu-dib
min-ready: 1
@@ -205,17 +196,6 @@ providers:
max-servers: 2
rate: 0.25
images:
- name: $NODEPOOL_IMAGE
base-image: '$NODEPOOL_IMAGE'
min-ram: 1024
# This script should setup the jenkins user to accept
# the ssh key configured below. It goes in the script-dir
# configured above and an example is below.
setup: prepare_node_ubuntu.sh
username: jenkins
# Alter below to point to your local user private key
private-key: $NODEPOOL_KEY
config-drive: true
- name: ubuntu-dib
min-ram: 1024
diskimage: ubuntu-dib

View File

@@ -1,12 +1,8 @@
NODEPOOL_CONFIG=/etc/nodepool/nodepool.yaml
NODEPOOL_LOGGING=/etc/nodepool/logging.conf
NODEPOOL_SECURE=/etc/nodepool/secure.conf
NODEPOOL_IMAGE_URL=https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
NODEPOOL_IMAGE=$(basename "$NODEPOOL_IMAGE_URL" ".img")
NODEPOOL_DIB_BASE_PATH=/opt/dib
IMAGE_URLS+=",$NODEPOOL_IMAGE_URL"
SHADE_REPO_URL=${SHADE_REPO_URL:-https://git.openstack.org/openstack-infra/shade}
SHADE_REPO_REF=${SHADE_REPO_REF:-master}

View File

@@ -303,10 +303,8 @@ provider, the Nodepool image types are also defined (see
public: True
images:
- name: trusty
base-image: 'Trusty'
min-ram: 8192
name-filter: 'something to match'
setup: prepare_node.sh
username: jenkins
user-home: '/home/jenkins'
private-key: /var/lib/jenkins/.ssh/id_rsa
@@ -314,9 +312,7 @@ provider, the Nodepool image types are also defined (see
key: value
key2: value
- name: precise
base-image: 'Precise'
min-ram: 8192
setup: prepare_node.sh
username: jenkins
user-home: '/home/jenkins'
private-key: /var/lib/jenkins/.ssh/id_rsa
@@ -338,9 +334,7 @@ provider, the Nodepool image types are also defined (see
template-hostname: '{image.name}-{timestamp}-nodepool-template'
images:
- name: precise
base-image: 'Fake Precise'
min-ram: 8192
setup: prepare_node.sh
username: jenkins
user-home: '/home/jenkins'
private-key: /var/lib/jenkins/.ssh/id_rsa
@@ -471,10 +465,8 @@ Example::
images:
- name: precise
base-image: 'Precise'
min-ram: 8192
name-filter: 'something to match'
setup: prepare_node.sh
username: jenkins
private-key: /var/lib/jenkins/.ssh/id_rsa
meta:
@@ -487,18 +479,10 @@ Example::
Identifier to refer this image from :ref:`labels` and :ref:`provider`
sections.
If the resulting images from different providers ``base-image`` should be
equivalent, give them the same name; e.g. if one provider has a ``Fedora
20`` image and another has an equivalent ``Fedora 20 (Heisenbug)`` image,
they should use a common ``name``. Otherwise select a unique ``name``.
``base-image``
UUID or string-name of the image to boot as specified by the provider.
``min-ram``
Determine the flavor of ``base-image`` to use (e.g. ``m1.medium``,
``m1.large``, etc). The smallest flavor that meets the ``min-ram``
requirements will be chosen. To further filter by flavor name, see optional
Determine the flavor to use (e.g. ``m1.medium``, ``m1.large``,
etc). The smallest flavor that meets the ``min-ram`` requirements
will be chosen. To further filter by flavor name, see optional
``name-filter`` below.
**optional**
@@ -509,13 +493,6 @@ Example::
`name-filter` to ``Performance`` will ensure the chosen flavor also
contains this string as well as meeting `min-ram` requirements).
``setup``
Script to run to prepare the instance.
Used only when not building images using diskimage-builder, in that case
settings defined in the ``diskimages`` section will be used instead. See
:ref:`scripts` for setup script details.
``diskimage``
See :ref:`diskimages`.

View File

@@ -31,12 +31,10 @@ class ConfigValidator:
images = {
'name': str,
'base-image': str,
'min-ram': int,
'name-filter': str,
'diskimage': str,
'meta': dict,
'setup': str,
'username': str,
'private-key': str,
'config-drive': bool,

View File

@@ -59,13 +59,10 @@ class Provider(ConfigValue):
return False
# check if existing images have been updated
for k in new_images:
if (new_images[k].base_image != old_images[k].base_image or
new_images[k].min_ram != old_images[k].min_ram or
if (new_images[k].min_ram != old_images[k].min_ram or
new_images[k].name_filter != old_images[k].name_filter or
new_images[k].setup != old_images[k].setup or
new_images[k].username != old_images[k].username or
new_images[k].user_home != old_images[k].user_home or
new_images[k].diskimage != old_images[k].diskimage or
new_images[k].private_key != old_images[k].private_key or
new_images[k].meta != old_images[k].meta or
new_images[k].config_drive != old_images[k].config_drive):
@@ -225,10 +222,8 @@ def loadConfig(config_path):
i = ProviderImage()
i.name = image['name']
p.images[i.name] = i
i.base_image = image.get('base-image', None)
i.min_ram = image['min-ram']
i.name_filter = image.get('name-filter', None)
i.setup = image.get('setup', None)
i.diskimage = image.get('diskimage', None)
i.username = image.get('username', 'jenkins')
i.user_home = image.get('user-home', '/home/jenkins')

View File

@@ -25,7 +25,6 @@ import os.path
import paramiko
import pprint
import random
import re
import threading
import time
from uuid import uuid4
@@ -893,236 +892,6 @@ class ImageDeleter(threading.Thread):
self.snap_image_id)
class ImageUpdater(threading.Thread):
log = logging.getLogger("nodepool.ImageUpdater")
def __init__(self, nodepool, provider, image, snap_image_id):
threading.Thread.__init__(self, name='ImageUpdater for %s' %
snap_image_id)
self.provider = provider
self.image = image
self.snap_image_id = snap_image_id
self.nodepool = nodepool
self.scriptdir = self.nodepool.config.scriptdir
self.elementsdir = self.nodepool.config.elementsdir
self.imagesdir = self.nodepool.config.imagesdir
self.statsd = stats.get_client()
def run(self):
try:
self._run()
except Exception:
self.log.exception("Exception in run method:")
def _run(self):
with self.nodepool.getDB().getSession() as session:
self.log.debug("Updating image %s in %s " % (self.image.name,
self.provider.name))
try:
self.snap_image = session.getSnapshotImage(
self.snap_image_id)
self.manager = self.nodepool.getProviderManager(self.provider)
except Exception:
self.log.exception("Exception preparing to update image %s "
"in %s:" % (self.image.name,
self.provider.name))
return
try:
self.updateImage(session)
except Exception:
self.log.exception("Exception updating image %s in %s:" %
(self.image.name, self.provider.name))
try:
if self.snap_image:
self.nodepool.deleteImage(self.snap_image.id)
except Exception:
self.log.exception("Exception deleting image id: %s:" %
self.snap_image.id)
return
class SnapshotImageUpdater(ImageUpdater):
log = logging.getLogger("nodepool.SnapshotImageUpdater")
def updateImage(self, session):
start_time = time.time()
timestamp = int(start_time)
hostname = self.provider.template_hostname.format(
provider=self.provider, image=self.image, timestamp=str(timestamp))
self.log.info("Creating image id: %s with hostname %s for %s in %s" %
(self.snap_image.id, hostname, self.image.name,
self.provider.name))
if self.provider.keypair:
key_name = self.provider.keypair
key = None
use_password = False
else:
try:
key_name = hostname.split('.')[0]
key = self.manager.addKeypair(key_name)
use_password = False
except Exception:
key_name = None
key = None
use_password = True
uuid_pattern = 'hex{8}-(hex{4}-){3}hex{12}'.replace('hex',
'[0-9a-fA-F]')
if re.match(uuid_pattern, self.image.base_image):
image_name = None
image_id = self.image.base_image
else:
image_name = self.image.base_image
image_id = None
try:
server = self.manager.createServer(
hostname, self.image.min_ram, image_name=image_name,
key_name=key_name, name_filter=self.image.name_filter,
image_id=image_id, config_drive=self.image.config_drive,
nodepool_snapshot_image_id=self.snap_image.id)
server_id = server['id']
except Exception:
if self.manager.deleteKeypair(key_name):
# Only log success - failure is logged inside of shade
self.log.debug(
'Deleted keypair for failed image build %s' %
self.snap_image.id)
raise
self.snap_image.hostname = hostname
self.snap_image.version = timestamp
self.snap_image.server_external_id = server_id
session.commit()
self.log.debug("Image id: %s waiting for server %s" %
(self.snap_image.id, server_id))
server = self.manager.waitForServer(server)
if server['status'] != 'ACTIVE':
raise Exception("Server %s for image id: %s status: %s" %
(server_id, self.snap_image.id, server['status']))
ip = server.get('public_v4')
ip_v6 = server.get('public_v6')
if self.provider.ipv6_preferred:
if ip_v6:
ip = ip_v6
else:
self.log.warning('Preferred ipv6 not available, '
'falling back to ipv4.')
if not ip:
self.log.error("Server dict {server}".format(
server=pprint.pformat(dict(server))))
raise Exception("Unable to find public IP of server")
server['public_ip'] = ip
self.bootstrapServer(server, key, use_password=use_password)
image_id = self.manager.createImage(server, hostname,
self.image.meta)['id']
self.snap_image.external_id = image_id
session.commit()
self.log.debug("Image id: %s building image %s" %
(self.snap_image.id, image_id))
# It can take a _very_ long time for Rackspace 1.0 to save an image
image = self.manager.waitForImage(image_id, IMAGE_TIMEOUT)
# Throw exception here and not in waitForImage so that we can log
# the snap_image.id as well, which waitForImage does not know
if image['status'].lower() != 'active':
raise Exception("Image %s for image id: %s status: %s" %
(image_id, self.snap_image.id, image['status']))
if self.statsd:
dt = int((time.time() - start_time) * 1000)
key = 'nodepool.image_update.%s.%s' % (self.image.name,
self.provider.name)
self.statsd.timing(key, dt)
self.statsd.incr(key)
self.snap_image.state = nodedb.READY
session.commit()
self.log.info("Image %s in %s is ready" % (hostname,
self.provider.name))
try:
# We made the snapshot, try deleting the server, but it's okay
# if we fail. The reap script will find it and try again.
self.manager.cleanupServer(server_id)
self.manager.waitForServerDeletion(server_id)
except:
self.log.exception("Exception encountered deleting server"
" %s for image id: %s" %
(server_id, self.snap_image.id))
def bootstrapServer(self, server, key, use_password=False):
log = logging.getLogger("nodepool.image.build.%s.%s" %
(self.provider.name, self.image.name))
ssh_kwargs = dict(log=log)
if not use_password:
ssh_kwargs['pkey'] = key
else:
ssh_kwargs['password'] = server['admin_pass']
host = utils.ssh_connect(server['public_ip'], 'root', ssh_kwargs,
timeout=CONNECT_TIMEOUT)
if not host:
# We have connected to the node but couldn't do anything as root
# try distro specific users, since we know ssh is up (a timeout
# didn't occur), we can connect with a very sort timeout.
for username in ['ubuntu', 'fedora', 'cloud-user', 'centos',
'debian']:
try:
host = utils.ssh_connect(server['public_ip'], username,
ssh_kwargs,
timeout=10)
if host:
break
except:
continue
if not host:
raise Exception("Unable to log in via SSH")
# /etc/nodepool is world writable because by the time we write
# the contents after the node is launched, we may not have
# sudo access any more.
host.ssh("make config dir", "sudo mkdir -p /etc/nodepool")
host.ssh("chmod config dir", "sudo chmod 0777 /etc/nodepool")
if self.scriptdir:
host.ssh("make scripts dir", "mkdir -p scripts")
for fname in os.listdir(self.scriptdir):
path = os.path.join(self.scriptdir, fname)
if not os.path.isfile(path):
continue
host.scp(path, 'scripts/%s' % fname)
host.ssh("move scripts to opt",
"sudo mv scripts /opt/nodepool-scripts")
host.ssh("set scripts permissions",
"sudo chmod -R a+rx /opt/nodepool-scripts")
if self.image.setup:
env_vars = ''
for k, v in os.environ.items():
if k.startswith('NODEPOOL_'):
env_vars += ' %s="%s"' % (k, v)
# non-interactive "cloud-user" type logins can have very
# restrictive paths of just /bin:/usr/bin. Because on
# some hosts we log-in as root and others as a user, we
# standarise the path here
set_path = "export PATH=" \
"/usr/local/sbin:/sbin:/usr/sbin:" \
"/usr/local/bin:/bin:/usr/bin"
host.ssh("run setup script",
"%s; cd /opt/nodepool-scripts "
"&& %s ./%s %s && sync && sleep 5" %
(set_path, env_vars, self.image.setup, server['name']))
class NodePool(threading.Thread):
log = logging.getLogger("nodepool.NodePool")
@@ -1562,19 +1331,6 @@ class NodePool(threading.Thread):
else:
self.launchNode(session, provider, label, target)
def checkForMissingSnapshotImage(self, session, provider, image):
found = False
for snap_image in session.getSnapshotImages():
if (snap_image.provider_name == provider.name and
snap_image.image_name == image.name and
snap_image.state in [nodedb.READY,
nodedb.BUILDING]):
found = True
if not found:
self.log.warning("Missing image %s on %s" %
(image.name, provider.name))
self.updateImage(session, provider.name, image.name)
def checkForMissingDiskImage(self, session, provider, image):
found = False
for dib_image in session.getDibImages():
@@ -1606,10 +1362,7 @@ class NodePool(threading.Thread):
def checkForMissingImage(self, session, provider, image):
if image.name in self.config.images_in_use:
if not image.diskimage:
self.checkForMissingSnapshotImage(session, provider, image)
else:
self.checkForMissingDiskImage(session, provider, image)
self.checkForMissingDiskImage(session, provider, image)
def checkForMissingImages(self, session):
# If we are missing an image, run the image update function
@@ -1639,15 +1392,6 @@ class NodePool(threading.Thread):
def updateImages(self, session):
self.log.debug("Updating all images.")
# first run the snapshot image updates
for provider in self.config.providers.values():
for image in provider.images.values():
if image.name not in self.config.images_in_use:
continue
if image.diskimage:
continue
self.updateImage(session, provider.name, image.name)
needs_build = False
for diskimage in self.config.diskimages.values():
if diskimage.name not in self.config.images_in_use:
@@ -1666,37 +1410,6 @@ class NodePool(threading.Thread):
continue
self.uploadImage(session, provider.name, image.name)
def updateImage(self, session, provider_name, image_name):
try:
return self._updateImage(session, provider_name, image_name)
except Exception:
self.log.exception(
"Could not update image %s on %s", image_name, provider_name)
def _updateImage(self, session, provider_name, image_name):
provider = self.config.providers[provider_name]
image = provider.images[image_name]
# check type of image depending on diskimage flag
if image.diskimage:
raise Exception(
"Cannot update disk image images. "
"Please build and upload images")
if not image.setup:
raise Exception(
"Invalid image config. Must specify either "
"a setup script, or a diskimage to use.")
snap_image = session.createSnapshotImage(
provider_name=provider.name,
image_name=image_name)
t = SnapshotImageUpdater(self, provider, image, snap_image.id)
t.start()
# Enough time to give them different timestamps (versions)
# Just to keep things clearer.
time.sleep(2)
return t
def buildImage(self, image):
# check if we already have this item in the queue
with self.getDB().getSession() as session:

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -17,7 +17,7 @@ zookeeper-servers:
labels:
- name: real-label
image: real-image
image: fake-image
min-ready: 1
providers:
- name: real-provider
@@ -36,14 +36,13 @@ providers:
- net-id: 'some-uuid'
rate: 0.0001
images:
- name: real-image
base-image: 'Real Precise'
- name: fake-image
min-ram: 8192
diskimage: fake-image
name-filter: 'Real'
meta:
key: value
key2: value
setup: prepare_node_devstack.sh
targets:
- name: fake-target
@@ -51,3 +50,6 @@ targets:
url: https://jenkins.example.org/
user: fake
apikey: fake
diskimages:
- name: fake-image

View File

@@ -16,8 +16,8 @@ zookeeper-servers:
- host: localhost
labels:
- name: real-label
image: real-image
- name: fake-label
image: fake-image
min-ready: 1
providers:
- name: real-provider
@@ -32,14 +32,16 @@ providers:
- net-id: 'some-uuid'
rate: 0.0001
images:
- name: real-image
base-image: 'Real Precise'
- name: fake-image
min-ram: 8192
diskimage: fake-image
name-filter: 'Real'
meta:
key: value
key2: value
setup: prepare_node_devstack.sh
targets:
- name: fake-target
diskimages:
- name: fake-image

View File

@@ -37,13 +37,15 @@ providers:
rate: 0.0001
images:
- name: fake-image
base-image: 'Fake Precise'
min-ram: 8192
diskimage: fake-image
name-filter: 'Fake'
meta:
key: value
key2: value
setup: prepare_node_devstack.sh
targets:
- name: fake-target
diskimages:
- name: fake-image

View File

@@ -37,13 +37,15 @@ providers:
rate: 0.0001
images:
- name: fake-image
base-image: 'Fake Precise'
min-ram: 8192
diskimage: fake-image
name-filter: 'Fake'
meta:
key: value
key2: value
setup: prepare_node_devstack.sh
targets:
- name: fake-target
diskimages:
- name: fake-image

View File

@@ -38,10 +38,12 @@ providers:
rate: 0.0001
images:
- name: fake-image
base-image: 'Fake Precise'
min-ram: 8192
diskimage: fake-image
name-filter: 'Fake'
setup: prepare_node_devstack.sh
targets:
- name: fake-target
diskimages:
- name: fake-image

View File

@@ -41,13 +41,12 @@ providers:
rate: 0.0001
images:
- name: fake-image1
base-image: 'Fake Precise'
min-ram: 8192
diskimage: fake-image1
name-filter: 'Fake'
meta:
key: value
key2: value
setup: prepare_node_devstack.sh
- name: fake-provider2
keypair: 'if-present-use-this-keypair'
username: 'fake'
@@ -61,13 +60,16 @@ providers:
rate: 0.0001
images:
- name: fake-image2
base-image: 'Fake Precise'
min-ram: 8192
diskimage: fake-image2
name-filter: 'Fake'
meta:
key: value
key2: value
setup: prepare_node_devstack.sh
targets:
- name: fake-target
diskimages:
- name: fake-image1
- name: fake-image2

View File

@@ -39,7 +39,6 @@ providers:
rate: 0.0001
images:
- name: fake-dib-image
base-image: 'Fake Precise'
min-ram: 8192
diskimage: fake-dib-diskimage

View File

@@ -1,75 +0,0 @@
script-dir: .
elements-dir: .
images-dir: '{images_dir}'
cron:
check: '*/15 * * * *'
cleanup: '*/1 * * * *'
zmq-publishers:
- tcp://localhost:8881
gearman-servers:
- host: localhost
port: {gearman_port}
zookeeper-servers:
- host: localhost
labels:
- name: fake-label
image: fake-dib-image
min-ready: 2
providers:
- name: fake-provider1
- name: fake-provider2
providers:
- name: fake-provider1
keypair: 'if-present-use-this-keypair'
username: 'fake'
password: 'fake'
auth-url: 'fake'
project-id: 'fake'
max-servers: 1
pool: 'fake'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
- name: fake-dib-image
base-image: 'Fake Precise'
min-ram: 8192
diskimage: fake-dib-image
- name: fake-provider2
keypair: 'if-present-use-this-keypair'
username: 'fake'
password: 'fake'
auth-url: 'fake'
project-id: 'fake'
max-servers: 1
pool: 'fake'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
- name: fake-dib-image
base-image: 'Fake Precise'
min-ram: 8192
setup: prepare_node_devstack.sh
targets:
- name: fake-target
diskimages:
- name: fake-dib-image
elements:
- ubuntu
- vm
release: 21
env-vars:
TMPDIR: /opt/dib_tmp
DIB_IMAGE_CACHE: /opt/dib_cache
DIB_CLOUD_IMAGES: http://download.fedoraproject.org/pub/fedora/linux/releases/test/21-Beta/Cloud/Images/x86_64/
BASE_IMAGE_FILE: Fedora-Cloud-Base-20141029-21_Beta.x86_64.qcow2

View File

@@ -1,76 +0,0 @@
script-dir: .
elements-dir: .
images-dir: '{images_dir}'
cron:
check: '*/15 * * * *'
cleanup: '*/1 * * * *'
zmq-publishers:
- tcp://localhost:8881
gearman-servers:
- host: localhost
port: {gearman_port}
zookeeper-servers:
- host: localhost
labels:
- name: fake-label
image: fake-dib-image
min-ready: 2
providers:
- name: fake-provider1
- name: fake-provider2
providers:
- name: fake-provider1
keypair: 'if-present-use-this-keypair'
username: 'fake'
password: 'fake'
auth-url: 'fake'
project-id: 'fake'
max-servers: 1
pool: 'fake'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
- name: fake-dib-image
base-image: 'Fake Precise'
min-ram: 8192
diskimage: fake-dib-image
- name: fake-provider2
keypair: 'if-present-use-this-keypair'
username: 'fake'
password: 'fake'
auth-url: 'fake'
project-id: 'fake'
max-servers: 2
pool: 'fake'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
- name: fake-dib-image
base-image: 'Fake Precise'
min-ram: 8192
setup: prepare_node_devstack.sh
targets:
- name: fake-target
diskimages:
- name: fake-dib-image
elements:
- ubuntu
- vm
release: 21
env-vars:
TMPDIR: /opt/dib_tmp
DIB_IMAGE_CACHE: /opt/dib_cache
DIB_CLOUD_IMAGES: http://download.fedoraproject.org/pub/fedora/linux/releases/test/21-Beta/Cloud/Images/x86_64/
BASE_IMAGE_FILE: Fedora-Cloud-Base-20141029-21_Beta.x86_64.qcow2
# Fail the build
SHOULD_FAIL: 'true'

View File

@@ -38,7 +38,6 @@ providers:
rate: 0.0001
images:
- name: fake-dib-image
base-image: 'Fake Precise'
min-ram: 8192
diskimage: fake-dib-image
meta:
@@ -56,7 +55,6 @@ providers:
rate: 0.0001
images:
- name: fake-dib-image
base-image: 'Fake Precise'
min-ram: 8192
diskimage: fake-dib-image

View File

@@ -38,7 +38,6 @@ providers:
rate: 0.0001
images:
- name: fake-dib-image
base-image: 'Fake Precise'
min-ram: 8192
diskimage: fake-dib-image

View File

@@ -39,7 +39,6 @@ providers:
rate: 0.0001
images:
- name: fake-dib-image
base-image: 'Fake Precise'
min-ram: 8192
diskimage: fake-dib-image
- name: fake-provider2
@@ -56,7 +55,6 @@ providers:
rate: 0.0001
images:
- name: fake-dib-image
base-image: 'Fake Precise'
min-ram: 8192
diskimage: fake-dib-image

View File

@@ -50,10 +50,8 @@ providers:
rate: 0.0001
images:
- name: fake-image
base-image: 'Fake Precise'
min-ram: 8192
name-filter: 'Fake'
setup: prepare_node_devstack.sh
- name: fake-provider2
keypair: 'if-present-use-this-keypair'
@@ -68,10 +66,8 @@ providers:
rate: 0.0001
images:
- name: fake-image
base-image: 'Fake Precise'
min-ram: 8192
name-filter: 'Fake'
setup: prepare_node_devstack.sh
- name: fake-provider3
keypair: 'if-present-use-this-keypair'
@@ -87,10 +83,12 @@ providers:
rate: 0.0001
images:
- name: fake-image
base-image: 'Fake Precise'
diskimage: fake-image
min-ram: 8192
name-filter: 'Fake'
setup: prepare_node_devstack.sh
targets:
- name: fake-target
diskimages:
- name: fake-image

View File

@@ -39,13 +39,15 @@ providers:
rate: 0.0001
images:
- name: fake-image
base-image: 'Fake Precise'
min-ram: 8192
diskimage: fake-image
name-filter: 'Fake'
meta:
key: value
key2: value
setup: prepare_node_devstack.sh
targets:
- name: fake-target
diskimages:
- name: fake-image

View File

@@ -43,10 +43,12 @@ providers:
rate: 0.0001
images:
- name: fake-image
base-image: 'Fake Precise'
min-ram: 8192
diskimage: fake-image
name-filter: 'Fake'
setup: prepare_node_devstack.sh
targets:
- name: fake-target
diskimages:
- name: fake-image

View File

@@ -68,27 +68,6 @@ class TestNodepoolCMD(tests.DBTestCase):
nodepoolcmd.main()
self.assert_images_listed(configfile, 1)
@skip("Skipping until ZooKeeper is enabled")
def test_dib_snapshot_image_update(self):
configfile = self.setup_config("node_dib_and_snap.yaml")
self._useBuilder(configfile)
self.patch_argv("-c", configfile, "image-update",
"fake-provider1", "fake-dib-image")
nodepoolcmd.main()
self.patch_argv("-c", configfile, "image-update",
"fake-provider2", "fake-dib-image")
nodepoolcmd.main()
self.assert_images_listed(configfile, 2)
@skip("Skipping until ZooKeeper is enabled")
def test_dib_snapshot_image_update_all(self):
configfile = self.setup_config("node_dib_and_snap.yaml")
self._useBuilder(configfile)
self.patch_argv("-c", configfile, "image-update",
"all", "fake-dib-image")
nodepoolcmd.main()
self.assert_images_listed(configfile, 2)
@skip("Skipping until ZooKeeper is enabled")
def test_image_update_all(self):
configfile = self.setup_config("node_cmd.yaml")

View File

@@ -123,68 +123,9 @@ class TestNodepool(tests.DBTestCase):
state=nodedb.READY)
self.assertEqual(len(nodes), 1)
def test_dib_and_snap_label(self):
"""Test that a label with dib and snapshot images build."""
configfile = self.setup_config('node_dib_and_snap.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
self.waitForImage(pool, 'fake-provider1', 'fake-dib-image')
self.waitForImage(pool, 'fake-provider2', 'fake-dib-image')
self.waitForNodes(pool)
with pool.getDB().getSession() as session:
nodes = session.getNodes(provider_name='fake-provider1',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 1)
nodes = session.getNodes(provider_name='fake-provider2',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 1)
def test_dib_and_snap_fail(self):
"""Test that snap based nodes build when dib fails."""
configfile = self.setup_config('node_dib_and_snap_fail.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
# fake-provider1 will fail to build fake-dib-image
self.waitForImage(pool, 'fake-provider2', 'fake-dib-image')
self.waitForNodes(pool)
with pool.getDB().getSession() as session:
# fake-provider1 uses dib.
nodes = session.getNodes(provider_name='fake-provider1',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 0)
# fake-provider2 uses snapshots.
nodes = session.getNodes(provider_name='fake-provider2',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 2)
# The fake disk image create script will return 127 with
# SHOULD_FAIL flag set to true.
self.assertEqual(self.subprocesses[0].returncode, 127)
self.assertEqual(self.subprocesses[-1].returncode, 127)
with pool.getDB().getSession() as session:
while True:
dib_images = session.getDibImages()
images = filter(lambda x: x.image_name == 'fake-dib-image',
dib_images)
if len(images) == 0:
break
time.sleep(.2)
def test_dib_upload_fail(self):
"""Test that a dib and snap image upload failure is contained."""
configfile = self.setup_config('node_dib_and_snap_upload_fail.yaml')
"""Test that a dib upload failure is contained."""
configfile = self.setup_config('node_dib_upload_fail.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
@@ -192,13 +133,11 @@ class TestNodepool(tests.DBTestCase):
self.waitForNodes(pool)
with pool.getDB().getSession() as session:
# fake-provider1 uses dib.
nodes = session.getNodes(provider_name='fake-provider1',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 0)
# fake-provider2 uses snapshots.
nodes = session.getNodes(provider_name='fake-provider2',
label_name='fake-label',
target_name='fake-target',

View File

@@ -51,10 +51,8 @@ providers:
max-servers: 96
images:
- name: fake-nodepool
base-image: 'Fake Precise'
min-ram: 8192
name-filter: 'Fake'
setup: prepare_node_devstack.sh
diskimage: fake-nodepool
targets: