Support per provider image build settings

With mutliple cloud providers supporting different image formats and
versions nodepool needs to express the requirements providers have on
images. Add a provider.image-type setting to providers that determine
which image formats diskimage-builder should output.

Change-Id: I808571e2c9cc10fb7dfcd6e30262769872cd75cc
This commit is contained in:
Clark Boylan 2014-10-07 15:52:54 -07:00 committed by Monty Taylor
parent 83ffffd94a
commit 86610b50b5
7 changed files with 266 additions and 50 deletions

View File

@ -173,7 +173,7 @@ will be built using the provider snapshot approach::
qemu-img-options: compat=0.10
env-vars:
DIB_DISTRIBUTION_MIRROR: http://archive.ubuntu.com
DIB_EXTRA_VARIABLE: foobar
DIB_IMAGE_CACHE: /opt/dib_cache
For diskimages, the `name` is required. The `elements` section
enumerates all the elements that will be included when building the
@ -210,6 +210,7 @@ provider, the Nodepool image types are also defined (see
launch-timeout: 900
template-hostname: '{image.name}-{timestamp}.template.openstack.org'
pool: 'public'
image-type: qcow2
networks:
- net-id: 'some-uuid'
- net-label: 'some-network-name'
@ -234,6 +235,11 @@ provider, the Nodepool image types are also defined (see
username: jenkins
user-home: '/home/jenkins'
private-key: /var/lib/jenkins/.ssh/id_rsa
- name: devstack-trusty
min-ram: 30720
diskimage: devstack-trusty
username: jenkins
private-key: /home/nodepool/.ssh/id_rsa
- name: provider2
username: 'username'
password: 'password'
@ -283,6 +289,12 @@ different list of availabiltiy zones.
The 'pool' key is optional. This can be used to specify a floating ip
pool in cases where the 'public' pool is unavailable or undesirable.
The ``image-type`` specifies the image type supported by this provider.
The disk images built by diskimage-builder will output an image for each
``image-type`` specified by a provider using that particular diskimage.
The default value is ``qcow2``, and values of ``vhd``, ``raw`` are also
expected to be valid if you have a sufficiently new diskimage-builder.
.. _images:
images
@ -328,8 +340,7 @@ indicated. Nodepool expects that user to exist after running the
script indicated by `setup`. `setup` will be used only when not
building images using diskimage-builder, in that case settings defined
in the ``diskimages`` section will be used instead. See :ref:`scripts`
for setup script details. See :ref:`scripts` for setup script
details.
for setup script details.
The `config-drive` boolean is optional and defines whether config drive
should be used for the image.

View File

@ -56,6 +56,7 @@ class ConfigValidator:
'project-id': str,
'max-servers': int,
'pool': str,
'image-type': str,
'networks': [{
'net-id': str,
'net-label': str,

View File

@ -768,14 +768,15 @@ class DiskImageBuilder(threading.Thread):
extra_options = ('--qemu-img-options %s' %
image.qemu_img_options)
img_elements = image.elements
img_types = ",".join(image.image_types)
if 'fake-' in filename:
dib_cmd = 'nodepool/tests/fake-image-create'
else:
dib_cmd = 'disk-image-create'
cmd = ('%s -x --no-tmpfs %s -o %s %s' %
(dib_cmd, extra_options, filename, img_elements))
cmd = ('%s -x -t %s --no-tmpfs %s -o %s %s' %
(dib_cmd, img_types, extra_options, filename, img_elements))
log = logging.getLogger("nodepool.image.build.%s" %
(image_name,))
@ -895,10 +896,11 @@ class DiskImageUpdater(ImageUpdater):
log = logging.getLogger("nodepool.DiskImageUpdater")
def __init__(self, nodepool, provider, image, snap_image_id,
filename):
filename, image_type):
super(DiskImageUpdater, self).__init__(nodepool, provider, image,
snap_image_id)
self.filename = filename
self.image_type = image_type
self.image_name = image.name
def updateImage(self, session):
@ -918,10 +920,9 @@ class DiskImageUpdater(ImageUpdater):
self.snap_image.version = timestamp
session.commit()
# strip extension from filename
stripped_filename = self.filename.replace(".qcow2", "")
image_id = self.manager.uploadImage(image_name, stripped_filename,
'qcow2', 'bare', self.image.meta)
image_id = self.manager.uploadImage(image_name, self.filename,
self.image_type, 'bare',
self.image.meta)
self.snap_image.external_id = image_id
session.commit()
self.log.debug("Image id: %s saving image %s" %
@ -1234,26 +1235,6 @@ class NodePool(threading.Thread):
g.name = g.host + '_' + str(g.port)
newconfig.gearman_servers[g.name] = g
if 'diskimages' in config:
for diskimage in config['diskimages']:
d = DiskImage()
d.name = diskimage['name']
newconfig.diskimages[d.name] = d
if 'elements' in diskimage:
d.elements = u' '.join(diskimage['elements'])
else:
d.elements = ''
# must be a string, as it's passed as env-var to
# d-i-b, but might be untyped in the yaml and
# interpreted as a number (e.g. "21" for fedora)
d.release = str(diskimage.get('release', ''))
d.qemu_img_options = diskimage.get('qemu-img-options', '')
d.env_vars = diskimage.get('env-vars', {})
if not isinstance(d.env_vars, dict):
self.log.error("%s: ignoring env-vars; "
"should be a dict" % d.name)
d.env_vars = {}
for provider in config['providers']:
p = Provider()
p.name = provider['name']
@ -1279,6 +1260,7 @@ class NodePool(threading.Thread):
'template-hostname',
'{image.name}-{timestamp}.template.openstack.org'
)
p.image_type = provider.get('image-type', 'qcow2')
p.images = {}
for image in provider['images']:
i = ProviderImage()
@ -1313,6 +1295,34 @@ class NodePool(threading.Thread):
% i.name)
i.meta = {}
if 'diskimages' in config:
for diskimage in config['diskimages']:
d = DiskImage()
d.name = diskimage['name']
newconfig.diskimages[d.name] = d
if 'elements' in diskimage:
d.elements = u' '.join(diskimage['elements'])
else:
d.elements = ''
# must be a string, as it's passed as env-var to
# d-i-b, but might be untyped in the yaml and
# interpreted as a number (e.g. "21" for fedora)
d.release = str(diskimage.get('release', ''))
d.qemu_img_options = diskimage.get('qemu-img-options', '')
d.env_vars = diskimage.get('env-vars', {})
if not isinstance(d.env_vars, dict):
self.log.error("%s: ignoring env-vars; "
"should be a dict" % d.name)
d.env_vars = {}
d.image_types = set()
# Do this after providers to build the image-types
for provider in newconfig.providers.values():
for image in provider.images.values():
if (image.diskimage and
image.diskimage in newconfig.diskimages):
diskimage = newconfig.diskimages[image.diskimage]
diskimage.image_types.add(provider.image_type)
for label in config['labels']:
l = Label()
l.name = label['name']
@ -1382,6 +1392,7 @@ class NodePool(threading.Thread):
new_pm.service_name != old_pm.provider.service_name or
new_pm.max_servers != old_pm.provider.max_servers or
new_pm.pool != old_pm.provider.pool or
new_pm.image_type != old_pm.provider.image_type or
new_pm.rate != old_pm.provider.rate or
new_pm.api_timeout != old_pm.provider.api_timeout or
new_pm.boot_timeout != old_pm.provider.boot_timeout or
@ -1404,6 +1415,7 @@ class NodePool(threading.Thread):
new_images[k].reset != old_images[k].reset or
new_images[k].username != old_images[k].username or
new_images[k].user_home != old_images[k].user_home or
new_images[k].diskimage != old_images[k].diskimage or
new_images[k].private_key != old_images[k].private_key or
new_images[k].meta != old_images[k].meta or
new_images[k].config_drive != old_images[k].config_drive):
@ -1813,19 +1825,25 @@ class NodePool(threading.Thread):
# This is either building or in an error state
# that will be handled by periodic cleanup
return
if (not os.path.exists(dib_image.filename) and
not 'fake-dib-image' in dib_image.filename):
# if image is in ready state, check if image
# file exists in directory, otherwise we need
# to rebuild and delete this buggy image
self.log.warning("Image filename %s does not "
"exist. Removing image" %
dib_image.filename)
self.deleteDibImage(dib_image)
continue
# Found a matching image that is READY and has a file
found = True
break
types_found = True
diskimage = self.config.diskimages[image.diskimage]
for image_type in diskimage.image_types:
if (not os.path.exists(
dib_image.filename + '.' + image_type) and
not 'fake-dib-image' in dib_image.filename):
# if image is in ready state, check if image
# file exists in directory, otherwise we need
# to rebuild and delete this buggy image
types_found = False
self.log.warning("Image filename %s does not "
"exist. Removing image" %
dib_image.filename)
self.deleteDibImage(dib_image)
break
if types_found:
# Found a matching image that is READY and has a file
found = True
break
if not found:
# only build the image, we'll recheck again
self.log.warning("Missing disk image %s" % image.name)
@ -1955,8 +1973,7 @@ class NodePool(threading.Thread):
self.log.debug("Queued image building task for %s" %
image.name)
dib_image = session.createDibImage(image_name=image.name,
filename=filename +
".qcow2")
filename=filename)
# add this build to queue
self._image_builder_queue.put(dib_image.id)
@ -1975,10 +1992,11 @@ class NodePool(threading.Thread):
filename = images[0].filename
provider_entity = self.config.providers[provider]
provider_image = provider_entity.images[images[0].image_name]
image_type = provider_entity.image_type
snap_image = session.createSnapshotImage(
provider_name=provider, image_name=image_name)
t = DiskImageUpdater(self, provider_entity, provider_image,
snap_image.id, filename)
snap_image.id, filename, image_type)
t.start()
# Enough time to give them different timestamps (versions)
@ -2170,9 +2188,14 @@ class NodePool(threading.Thread):
self.log.info("Deleted image id: %s" % snap_image.id)
def deleteDibImage(self, dib_image):
image_config = self.config.diskimages.get(dib_image.image_name)
if not image_config:
# The config was removed deletion will have to be manual
return
# Delete a dib image and it's associated file
if os.path.exists(dib_image.filename):
os.remove(dib_image.filename)
for image_type in image_config.image_types:
if os.path.exists(dib_image.filename + '.' + image_type):
os.remove(dib_image.filename + '.' + image_type)
dib_image.state = nodedb.DELETE
dib_image.delete()

View File

@ -39,12 +39,14 @@ if [[ "${BASE_IMAGE_FILE}" != "Fedora-Cloud-Base-20141029-21_Beta.x86_64.qcow2"
fi
outfile=
outtypes=("qcow2")
TEMP=$(getopt -o o: -- "$@")
TEMP=$(getopt -o o:t: -- "$@")
eval set -- "$TEMP"
while true ; do
case "$1" in
-o) outfile=$2; shift 2;;
-t) IFS="," read -a outtypes <<< "$2"; shift 2;;
--) shift ; break ;;
esac
done
@ -53,7 +55,9 @@ if [ -z "$outfile" ]; then
echo "No output file specified."
exit 1
else
echo "fake-data" > $outfile
for outtype in ${outtypes[@]} ; do
echo "fake-data" > $outfile.$outtype
done
fi
echo "*** fake-image-create: done"

View File

@ -0,0 +1,61 @@
script-dir: .
elements-dir: .
images-dir: .
dburi: '{dburi}'
cron:
check: '*/15 * * * *'
cleanup: '*/1 * * * *'
image-update: '14 2 * * *'
zmq-publishers:
- tcp://localhost:8881
#gearman-servers:
# - host: localhost
labels:
- name: fake-dib-label
image: fake-dib-image
min-ready: 1
providers:
- name: fake-dib-provider
providers:
- name: fake-dib-provider
keypair: 'if-present-use-this-keypair'
username: 'fake'
password: 'fake'
auth-url: 'fake'
project-id: 'fake'
max-servers: 96
pool: 'fake'
image-type: vhd
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
- name: fake-dib-image
base-image: 'Fake Precise'
min-ram: 8192
diskimage: fake-dib-image
targets:
- name: fake-target
jenkins:
url: https://jenkins.example.org/
user: fake
apikey: fake
diskimages:
- name: fake-dib-image
elements:
- fedora
- vm
release: 21
env-vars:
TMPDIR: /opt/dib_tmp
DIB_IMAGE_CACHE: /opt/dib_cache
DIB_CLOUD_IMAGES: http://download.fedoraproject.org/pub/fedora/linux/releases/test/21-Beta/Cloud/Images/x86_64/
BASE_IMAGE_FILE: Fedora-Cloud-Base-20141029-21_Beta.x86_64.qcow2

View File

@ -0,0 +1,80 @@
script-dir: .
elements-dir: .
images-dir: .
dburi: '{dburi}'
cron:
check: '*/15 * * * *'
cleanup: '*/1 * * * *'
image-update: '14 2 * * *'
zmq-publishers:
- tcp://localhost:8881
#gearman-servers:
# - host: localhost
labels:
- name: fake-label
image: fake-dib-image
min-ready: 2
providers:
- name: fake-provider1
- name: fake-provider2
providers:
- name: fake-provider1
keypair: 'if-present-use-this-keypair'
username: 'fake'
password: 'fake'
auth-url: 'fake'
project-id: 'fake'
max-servers: 1
pool: 'fake'
image-type: vhd
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
- name: fake-dib-image
base-image: 'Fake Precise'
min-ram: 8192
diskimage: fake-dib-image
- name: fake-provider2
keypair: 'if-present-use-this-keypair'
username: 'fake'
password: 'fake'
auth-url: 'fake'
project-id: 'fake'
max-servers: 1
pool: 'fake'
image-type: qcow2
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
- name: fake-dib-image
base-image: 'Fake Precise'
min-ram: 8192
diskimage: fake-dib-image
targets:
- name: fake-target
jenkins:
url: https://jenkins.example.org/
user: fake
apikey: fake
diskimages:
- name: fake-dib-image
elements:
- ubuntu
- vm
release: 21
env-vars:
TMPDIR: /opt/dib_tmp
DIB_IMAGE_CACHE: /opt/dib_cache
DIB_CLOUD_IMAGES: http://download.fedoraproject.org/pub/fedora/linux/releases/test/21-Beta/Cloud/Images/x86_64/
BASE_IMAGE_FILE: Fedora-Cloud-Base-20141029-21_Beta.x86_64.qcow2

View File

@ -56,6 +56,42 @@ class TestNodepool(tests.DBTestCase):
state=nodedb.READY)
self.assertEqual(len(nodes), 1)
def test_dib_node_vhd_image(self):
"""Test that a dib image and node are created vhd image"""
configfile = self.setup_config('node_dib_vhd.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
pool.start()
self.waitForImage(pool, 'fake-dib-provider', 'fake-dib-image')
self.waitForNodes(pool)
with pool.getDB().getSession() as session:
nodes = session.getNodes(provider_name='fake-dib-provider',
label_name='fake-dib-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 1)
def test_dib_node_vhd_and_qcow2(self):
"""Test label provided by vhd and qcow2 images builds"""
configfile = self.setup_config('node_dib_vhd_and_qcow2.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
pool.start()
self.waitForImage(pool, 'fake-provider1', 'fake-dib-image')
self.waitForImage(pool, 'fake-provider2', 'fake-dib-image')
self.waitForNodes(pool)
with pool.getDB().getSession() as session:
nodes = session.getNodes(provider_name='fake-provider1',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 1)
nodes = session.getNodes(provider_name='fake-provider2',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 1)
def test_dib_and_snap_label(self):
"""Test that a label with dib and snapshot images build."""
configfile = self.setup_config('node_dib_and_snap.yaml')