Support userdata for instances in openstack

Use "userdata" from Nova API to pass cloud-init config to nova
instances in openstack.

Change-Id: I1c6a1cbc5377d268901210631a376ca26f4887d8
This commit is contained in:
Sagi Shnaidman 2019-01-14 13:18:52 +02:00
parent 7fd6411dac
commit d5027ff6a9
7 changed files with 135 additions and 3 deletions

View File

@ -315,6 +315,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME key-name: $NODEPOOL_KEY_NAME
instance-properties: instance-properties:
nodepool_devstack: testing nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: debian-stretch - name: debian-stretch
diskimage: debian-stretch diskimage: debian-stretch
min-ram: 512 min-ram: 512
@ -323,6 +329,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME key-name: $NODEPOOL_KEY_NAME
instance-properties: instance-properties:
nodepool_devstack: testing nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: fedora-29 - name: fedora-29
diskimage: fedora-29 diskimage: fedora-29
min-ram: 1024 min-ram: 1024
@ -331,6 +343,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME key-name: $NODEPOOL_KEY_NAME
instance-properties: instance-properties:
nodepool_devstack: testing nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: ubuntu-bionic - name: ubuntu-bionic
diskimage: ubuntu-bionic diskimage: ubuntu-bionic
min-ram: 512 min-ram: 512
@ -339,6 +357,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME key-name: $NODEPOOL_KEY_NAME
instance-properties: instance-properties:
nodepool_devstack: testing nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: ubuntu-trusty - name: ubuntu-trusty
diskimage: ubuntu-trusty diskimage: ubuntu-trusty
min-ram: 512 min-ram: 512
@ -347,6 +371,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME key-name: $NODEPOOL_KEY_NAME
instance-properties: instance-properties:
nodepool_devstack: testing nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: ubuntu-xenial - name: ubuntu-xenial
diskimage: ubuntu-xenial diskimage: ubuntu-xenial
min-ram: 512 min-ram: 512
@ -355,6 +385,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME key-name: $NODEPOOL_KEY_NAME
instance-properties: instance-properties:
nodepool_devstack: testing nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: opensuse-423 - name: opensuse-423
diskimage: opensuse-423 diskimage: opensuse-423
min-ram: 512 min-ram: 512
@ -363,6 +399,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME key-name: $NODEPOOL_KEY_NAME
instance-properties: instance-properties:
nodepool_devstack: testing nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: opensuse-150 - name: opensuse-150
diskimage: opensuse-150 diskimage: opensuse-150
min-ram: 512 min-ram: 512
@ -371,6 +413,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME key-name: $NODEPOOL_KEY_NAME
instance-properties: instance-properties:
nodepool_devstack: testing nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: opensuse-tumbleweed - name: opensuse-tumbleweed
diskimage: opensuse-tumbleweed diskimage: opensuse-tumbleweed
min-ram: 512 min-ram: 512
@ -379,6 +427,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME key-name: $NODEPOOL_KEY_NAME
instance-properties: instance-properties:
nodepool_devstack: testing nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: gentoo-17-0-systemd - name: gentoo-17-0-systemd
diskimage: gentoo-17-0-systemd diskimage: gentoo-17-0-systemd
min-ram: 512 min-ram: 512
@ -387,6 +441,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME key-name: $NODEPOOL_KEY_NAME
instance-properties: instance-properties:
nodepool_devstack: testing nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
diskimages: diskimages:
- name: centos-7 - name: centos-7

View File

@ -914,6 +914,15 @@ Selecting the OpenStack driver adds the following options to the
``meta-data`` on the active server (e.g. within ``meta-data`` on the active server (e.g. within
``config-drive:openstack/latest/meta_data.json``) ``config-drive:openstack/latest/meta_data.json``)
.. attr:: userdata
:type: str
:default: None
A string of userdata for a node. Example usage is to install
cloud-init package on image which will apply the userdata.
Additional info about options in cloud-config:
https://cloudinit.readthedocs.io/en/latest/topics/examples.html
Static Driver Static Driver
------------- -------------

View File

@ -87,6 +87,7 @@ class ProviderLabel(ConfigValue):
self.boot_from_volume = False self.boot_from_volume = False
self.volume_size = None self.volume_size = None
self.instance_properties = None self.instance_properties = None
self.userdata = None
# The ProviderPool object that owns this label. # The ProviderPool object that owns this label.
self.pool = None self.pool = None
@ -103,7 +104,8 @@ class ProviderLabel(ConfigValue):
other.console_log == self.console_log and other.console_log == self.console_log and
other.boot_from_volume == self.boot_from_volume and other.boot_from_volume == self.boot_from_volume and
other.volume_size == self.volume_size and other.volume_size == self.volume_size and
other.instance_properties == self.instance_properties) other.instance_properties == self.instance_properties and
other.userdata == self.userdata)
return False return False
def __repr__(self): def __repr__(self):
@ -203,6 +205,7 @@ class ProviderPool(ConfigPool):
pl.volume_size = label.get('volume-size', 50) pl.volume_size = label.get('volume-size', 50)
pl.instance_properties = label.get('instance-properties', pl.instance_properties = label.get('instance-properties',
None) None)
pl.userdata = label.get('userdata', None)
top_label = full_config.labels[pl.name] top_label = full_config.labels[pl.name]
top_label.pools.append(self) top_label.pools.append(self)
@ -355,6 +358,7 @@ class OpenStackProviderConfig(ProviderConfig):
'boot-from-volume': bool, 'boot-from-volume': bool,
'volume-size': int, 'volume-size': int,
'instance-properties': dict, 'instance-properties': dict,
'userdata': str,
} }
label_min_ram = v.Schema({v.Required('min-ram'): int}, extra=True) label_min_ram = v.Schema({v.Required('min-ram'): int}, extra=True)

View File

@ -139,7 +139,8 @@ class OpenStackNodeLauncher(NodeLauncher):
security_groups=self.pool.security_groups, security_groups=self.pool.security_groups,
boot_from_volume=self.label.boot_from_volume, boot_from_volume=self.label.boot_from_volume,
volume_size=self.label.volume_size, volume_size=self.label.volume_size,
instance_properties=self.label.instance_properties) instance_properties=self.label.instance_properties,
userdata=self.label.userdata)
except openstack.cloud.exc.OpenStackCloudCreateException as e: except openstack.cloud.exc.OpenStackCloudCreateException as e:
if e.resource_id: if e.resource_id:
self.node.external_id = e.resource_id self.node.external_id = e.resource_id

View File

@ -280,7 +280,7 @@ class OpenStackProvider(Provider):
nodepool_image_name=None, nodepool_image_name=None,
networks=None, security_groups=None, networks=None, security_groups=None,
boot_from_volume=False, volume_size=50, boot_from_volume=False, volume_size=50,
instance_properties=None): instance_properties=None, userdata=None):
if not networks: if not networks:
networks = [] networks = []
if not isinstance(image, dict): if not isinstance(image, dict):
@ -303,6 +303,8 @@ class OpenStackProvider(Provider):
create_args['availability_zone'] = az create_args['availability_zone'] = az
if security_groups: if security_groups:
create_args['security_groups'] = security_groups create_args['security_groups'] = security_groups
if userdata:
create_args['userdata'] = userdata
nics = [] nics = []
for network in networks: for network in networks:
net_id = self.findNetwork(network)['id'] net_id = self.findNetwork(network)['id']

View File

@ -54,6 +54,12 @@ providers:
instance-properties: instance-properties:
a_key: a_value a_key: a_value
b_key: b_value b_key: b_value
userdata: |
#cloud-config
password: password
chpasswd: { expire: False }
ssh_pwauth: True
hostname: test
- name: cloud2 - name: cloud2
driver: openstack driver: openstack

View File

@ -61,6 +61,36 @@ function sshintonode {
fi fi
} }
function showserver {
name=$1
state='ready'
node_id=`$NODEPOOL list | grep $name | grep $state | cut -d '|' -f5 | tr -d ' '`
EXPECTED=$(mktemp)
RESULT=$(mktemp)
source /opt/stack/devstack/openrc admin admin
nova show $node_id | grep -Eo "user_data[ ]+.*|[ ]*$" | awk {'print $3'} |\
base64 --decode > $RESULT
cat <<EOF >$EXPECTED
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
EOF
diff $EXPECTED $RESULT
if [[ $? -ne 0 ]]; then
echo "*** Failed to find userdata on server!"
FAILURE_REASON="Failed to find userdata on server for $node"
echo "Expected userdata:"
cat $EXPECTED
echo "Found userdata:"
cat $RESULT
RETURN=1
fi
}
function checknm { function checknm {
name=$1 name=$1
state='ready' state='ready'
@ -116,6 +146,8 @@ if [ ${NODEPOOL_PAUSE_CENTOS_7_DIB,,} = 'false' ]; then
sshintonode centos-7 sshintonode centos-7
# networkmanager check # networkmanager check
checknm centos-7 checknm centos-7
# userdata check
showserver centos-7
fi fi
if [ ${NODEPOOL_PAUSE_DEBIAN_STRETCH_DIB,,} = 'false' ]; then if [ ${NODEPOOL_PAUSE_DEBIAN_STRETCH_DIB,,} = 'false' ]; then
@ -125,6 +157,8 @@ if [ ${NODEPOOL_PAUSE_DEBIAN_STRETCH_DIB,,} = 'false' ]; then
waitfornode debian-stretch waitfornode debian-stretch
# check ssh for root user # check ssh for root user
sshintonode debian-stretch sshintonode debian-stretch
# userdata check
showserver debian-stretch
fi fi
if [ ${NODEPOOL_PAUSE_FEDORA_29_DIB,,} = 'false' ]; then if [ ${NODEPOOL_PAUSE_FEDORA_29_DIB,,} = 'false' ]; then
@ -136,6 +170,8 @@ if [ ${NODEPOOL_PAUSE_FEDORA_29_DIB,,} = 'false' ]; then
sshintonode fedora-29 sshintonode fedora-29
# networkmanager check # networkmanager check
checknm fedora-29 checknm fedora-29
# userdata check
showserver fedora-29
fi fi
if [ ${NODEPOOL_PAUSE_UBUNTU_BIONIC_DIB,,} = 'false' ]; then if [ ${NODEPOOL_PAUSE_UBUNTU_BIONIC_DIB,,} = 'false' ]; then
@ -145,6 +181,8 @@ if [ ${NODEPOOL_PAUSE_UBUNTU_BIONIC_DIB,,} = 'false' ]; then
waitfornode ubuntu-bionic waitfornode ubuntu-bionic
# check ssh for root user # check ssh for root user
sshintonode ubuntu-bionic sshintonode ubuntu-bionic
# userdata check
showserver ubuntu-bionic
fi fi
if [ ${NODEPOOL_PAUSE_UBUNTU_TRUSTY_DIB,,} = 'false' ]; then if [ ${NODEPOOL_PAUSE_UBUNTU_TRUSTY_DIB,,} = 'false' ]; then
@ -154,6 +192,8 @@ if [ ${NODEPOOL_PAUSE_UBUNTU_TRUSTY_DIB,,} = 'false' ]; then
waitfornode ubuntu-trusty waitfornode ubuntu-trusty
# check ssh for root user # check ssh for root user
sshintonode ubuntu-trusty sshintonode ubuntu-trusty
# userdata check
showserver ubuntu-trusty
fi fi
if [ ${NODEPOOL_PAUSE_UBUNTU_XENIAL_DIB,,} = 'false' ]; then if [ ${NODEPOOL_PAUSE_UBUNTU_XENIAL_DIB,,} = 'false' ]; then
@ -163,6 +203,8 @@ if [ ${NODEPOOL_PAUSE_UBUNTU_XENIAL_DIB,,} = 'false' ]; then
waitfornode ubuntu-xenial waitfornode ubuntu-xenial
# check ssh for root user # check ssh for root user
sshintonode ubuntu-xenial sshintonode ubuntu-xenial
# userdata check
showserver ubuntu-xenial
fi fi
if [ ${NODEPOOL_PAUSE_OPENSUSE_423_DIB,,} = 'false' ]; then if [ ${NODEPOOL_PAUSE_OPENSUSE_423_DIB,,} = 'false' ]; then
@ -172,6 +214,8 @@ if [ ${NODEPOOL_PAUSE_OPENSUSE_423_DIB,,} = 'false' ]; then
waitfornode opensuse-423 waitfornode opensuse-423
# check ssh for root user # check ssh for root user
sshintonode opensuse-423 sshintonode opensuse-423
# userdata check
showserver opensuse-423
fi fi
if [ ${NODEPOOL_PAUSE_OPENSUSE_150_DIB,,} = 'false' ]; then if [ ${NODEPOOL_PAUSE_OPENSUSE_150_DIB,,} = 'false' ]; then
# check that image built # check that image built
@ -180,6 +224,8 @@ if [ ${NODEPOOL_PAUSE_OPENSUSE_150_DIB,,} = 'false' ]; then
waitfornode opensuse-150 waitfornode opensuse-150
# check ssh for root user # check ssh for root user
sshintonode opensuse-150 sshintonode opensuse-150
# userdata check
showserver opensuse-150
fi fi
if [ ${NODEPOOL_PAUSE_OPENSUSE_TUMBLEWEED_DIB,,} = 'false' ]; then if [ ${NODEPOOL_PAUSE_OPENSUSE_TUMBLEWEED_DIB,,} = 'false' ]; then
# check that image built # check that image built
@ -188,6 +234,8 @@ if [ ${NODEPOOL_PAUSE_OPENSUSE_TUMBLEWEED_DIB,,} = 'false' ]; then
waitfornode opensuse-tumbleweed waitfornode opensuse-tumbleweed
# check ssh for root user # check ssh for root user
sshintonode opensuse-tumbleweed sshintonode opensuse-tumbleweed
# userdata check
showserver opensuse-tumbleweed
fi fi
if [ ${NODEPOOL_PAUSE_GENTOO_17_0_SYSTEMD_DIB,,} = 'false' ]; then if [ ${NODEPOOL_PAUSE_GENTOO_17_0_SYSTEMD_DIB,,} = 'false' ]; then
# check that image built # check that image built
@ -196,6 +244,8 @@ if [ ${NODEPOOL_PAUSE_GENTOO_17_0_SYSTEMD_DIB,,} = 'false' ]; then
waitfornode gentoo-17-0-systemd waitfornode gentoo-17-0-systemd
# check ssh for root user # check ssh for root user
sshintonode gentoo-17-0-systemd sshintonode gentoo-17-0-systemd
# userdata check
showserver gentoo-17-0-systemd
fi fi
set -o errexit set -o errexit