ソースを参照

Support userdata for instances in openstack

Use "userdata" from Nova API to pass cloud-init config to nova
instances in openstack.

Change-Id: I1c6a1cbc5377d268901210631a376ca26f4887d8
tags/3.5.0
Sagi Shnaidman 10ヶ月前
コミット
d5027ff6a9
7個のファイルの変更135行の追加3行の削除
  1. +60
    -0
      devstack/plugin.sh
  2. +9
    -0
      doc/source/configuration.rst
  3. +5
    -1
      nodepool/driver/openstack/config.py
  4. +2
    -1
      nodepool/driver/openstack/handler.py
  5. +3
    -1
      nodepool/driver/openstack/provider.py
  6. +6
    -0
      nodepool/tests/fixtures/config_validate/good.yaml
  7. +50
    -0
      tools/check_devstack_plugin.sh

+ 60
- 0
devstack/plugin.sh ファイルの表示

@@ -315,6 +315,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: debian-stretch
diskimage: debian-stretch
min-ram: 512
@@ -323,6 +329,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: fedora-29
diskimage: fedora-29
min-ram: 1024
@@ -331,6 +343,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: ubuntu-bionic
diskimage: ubuntu-bionic
min-ram: 512
@@ -339,6 +357,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: ubuntu-trusty
diskimage: ubuntu-trusty
min-ram: 512
@@ -347,6 +371,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: ubuntu-xenial
diskimage: ubuntu-xenial
min-ram: 512
@@ -355,6 +385,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: opensuse-423
diskimage: opensuse-423
min-ram: 512
@@ -363,6 +399,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: opensuse-150
diskimage: opensuse-150
min-ram: 512
@@ -371,6 +413,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: opensuse-tumbleweed
diskimage: opensuse-tumbleweed
min-ram: 512
@@ -379,6 +427,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: gentoo-17-0-systemd
diskimage: gentoo-17-0-systemd
min-ram: 512
@@ -387,6 +441,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata

diskimages:
- name: centos-7

+ 9
- 0
doc/source/configuration.rst ファイルの表示

@@ -914,6 +914,15 @@ Selecting the OpenStack driver adds the following options to the
``meta-data`` on the active server (e.g. within
``config-drive:openstack/latest/meta_data.json``)

.. attr:: userdata
:type: str
:default: None

A string of userdata for a node. Example usage is to install
cloud-init package on image which will apply the userdata.
Additional info about options in cloud-config:
https://cloudinit.readthedocs.io/en/latest/topics/examples.html


Static Driver
-------------

+ 5
- 1
nodepool/driver/openstack/config.py ファイルの表示

@@ -87,6 +87,7 @@ class ProviderLabel(ConfigValue):
self.boot_from_volume = False
self.volume_size = None
self.instance_properties = None
self.userdata = None
# The ProviderPool object that owns this label.
self.pool = None

@@ -103,7 +104,8 @@ class ProviderLabel(ConfigValue):
other.console_log == self.console_log and
other.boot_from_volume == self.boot_from_volume and
other.volume_size == self.volume_size and
other.instance_properties == self.instance_properties)
other.instance_properties == self.instance_properties and
other.userdata == self.userdata)
return False

def __repr__(self):
@@ -203,6 +205,7 @@ class ProviderPool(ConfigPool):
pl.volume_size = label.get('volume-size', 50)
pl.instance_properties = label.get('instance-properties',
None)
pl.userdata = label.get('userdata', None)

top_label = full_config.labels[pl.name]
top_label.pools.append(self)
@@ -355,6 +358,7 @@ class OpenStackProviderConfig(ProviderConfig):
'boot-from-volume': bool,
'volume-size': int,
'instance-properties': dict,
'userdata': str,
}

label_min_ram = v.Schema({v.Required('min-ram'): int}, extra=True)

+ 2
- 1
nodepool/driver/openstack/handler.py ファイルの表示

@@ -139,7 +139,8 @@ class OpenStackNodeLauncher(NodeLauncher):
security_groups=self.pool.security_groups,
boot_from_volume=self.label.boot_from_volume,
volume_size=self.label.volume_size,
instance_properties=self.label.instance_properties)
instance_properties=self.label.instance_properties,
userdata=self.label.userdata)
except openstack.cloud.exc.OpenStackCloudCreateException as e:
if e.resource_id:
self.node.external_id = e.resource_id

+ 3
- 1
nodepool/driver/openstack/provider.py ファイルの表示

@@ -280,7 +280,7 @@ class OpenStackProvider(Provider):
nodepool_image_name=None,
networks=None, security_groups=None,
boot_from_volume=False, volume_size=50,
instance_properties=None):
instance_properties=None, userdata=None):
if not networks:
networks = []
if not isinstance(image, dict):
@@ -303,6 +303,8 @@ class OpenStackProvider(Provider):
create_args['availability_zone'] = az
if security_groups:
create_args['security_groups'] = security_groups
if userdata:
create_args['userdata'] = userdata
nics = []
for network in networks:
net_id = self.findNetwork(network)['id']

+ 6
- 0
nodepool/tests/fixtures/config_validate/good.yaml ファイルの表示

@@ -54,6 +54,12 @@ providers:
instance-properties:
a_key: a_value
b_key: b_value
userdata: |
#cloud-config
password: password
chpasswd: { expire: False }
ssh_pwauth: True
hostname: test

- name: cloud2
driver: openstack

+ 50
- 0
tools/check_devstack_plugin.sh ファイルの表示

@@ -61,6 +61,36 @@ function sshintonode {
fi
}

function showserver {
name=$1
state='ready'

node_id=`$NODEPOOL list | grep $name | grep $state | cut -d '|' -f5 | tr -d ' '`
EXPECTED=$(mktemp)
RESULT=$(mktemp)
source /opt/stack/devstack/openrc admin admin

nova show $node_id | grep -Eo "user_data[ ]+.*|[ ]*$" | awk {'print $3'} |\
base64 --decode > $RESULT
cat <<EOF >$EXPECTED
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
EOF
diff $EXPECTED $RESULT
if [[ $? -ne 0 ]]; then
echo "*** Failed to find userdata on server!"
FAILURE_REASON="Failed to find userdata on server for $node"
echo "Expected userdata:"
cat $EXPECTED
echo "Found userdata:"
cat $RESULT
RETURN=1
fi
}

function checknm {
name=$1
state='ready'
@@ -116,6 +146,8 @@ if [ ${NODEPOOL_PAUSE_CENTOS_7_DIB,,} = 'false' ]; then
sshintonode centos-7
# networkmanager check
checknm centos-7
# userdata check
showserver centos-7
fi

if [ ${NODEPOOL_PAUSE_DEBIAN_STRETCH_DIB,,} = 'false' ]; then
@@ -125,6 +157,8 @@ if [ ${NODEPOOL_PAUSE_DEBIAN_STRETCH_DIB,,} = 'false' ]; then
waitfornode debian-stretch
# check ssh for root user
sshintonode debian-stretch
# userdata check
showserver debian-stretch
fi

if [ ${NODEPOOL_PAUSE_FEDORA_29_DIB,,} = 'false' ]; then
@@ -136,6 +170,8 @@ if [ ${NODEPOOL_PAUSE_FEDORA_29_DIB,,} = 'false' ]; then
sshintonode fedora-29
# networkmanager check
checknm fedora-29
# userdata check
showserver fedora-29
fi

if [ ${NODEPOOL_PAUSE_UBUNTU_BIONIC_DIB,,} = 'false' ]; then
@@ -145,6 +181,8 @@ if [ ${NODEPOOL_PAUSE_UBUNTU_BIONIC_DIB,,} = 'false' ]; then
waitfornode ubuntu-bionic
# check ssh for root user
sshintonode ubuntu-bionic
# userdata check
showserver ubuntu-bionic
fi

if [ ${NODEPOOL_PAUSE_UBUNTU_TRUSTY_DIB,,} = 'false' ]; then
@@ -154,6 +192,8 @@ if [ ${NODEPOOL_PAUSE_UBUNTU_TRUSTY_DIB,,} = 'false' ]; then
waitfornode ubuntu-trusty
# check ssh for root user
sshintonode ubuntu-trusty
# userdata check
showserver ubuntu-trusty
fi

if [ ${NODEPOOL_PAUSE_UBUNTU_XENIAL_DIB,,} = 'false' ]; then
@@ -163,6 +203,8 @@ if [ ${NODEPOOL_PAUSE_UBUNTU_XENIAL_DIB,,} = 'false' ]; then
waitfornode ubuntu-xenial
# check ssh for root user
sshintonode ubuntu-xenial
# userdata check
showserver ubuntu-xenial
fi

if [ ${NODEPOOL_PAUSE_OPENSUSE_423_DIB,,} = 'false' ]; then
@@ -172,6 +214,8 @@ if [ ${NODEPOOL_PAUSE_OPENSUSE_423_DIB,,} = 'false' ]; then
waitfornode opensuse-423
# check ssh for root user
sshintonode opensuse-423
# userdata check
showserver opensuse-423
fi
if [ ${NODEPOOL_PAUSE_OPENSUSE_150_DIB,,} = 'false' ]; then
# check that image built
@@ -180,6 +224,8 @@ if [ ${NODEPOOL_PAUSE_OPENSUSE_150_DIB,,} = 'false' ]; then
waitfornode opensuse-150
# check ssh for root user
sshintonode opensuse-150
# userdata check
showserver opensuse-150
fi
if [ ${NODEPOOL_PAUSE_OPENSUSE_TUMBLEWEED_DIB,,} = 'false' ]; then
# check that image built
@@ -188,6 +234,8 @@ if [ ${NODEPOOL_PAUSE_OPENSUSE_TUMBLEWEED_DIB,,} = 'false' ]; then
waitfornode opensuse-tumbleweed
# check ssh for root user
sshintonode opensuse-tumbleweed
# userdata check
showserver opensuse-tumbleweed
fi
if [ ${NODEPOOL_PAUSE_GENTOO_17_0_SYSTEMD_DIB,,} = 'false' ]; then
# check that image built
@@ -196,6 +244,8 @@ if [ ${NODEPOOL_PAUSE_GENTOO_17_0_SYSTEMD_DIB,,} = 'false' ]; then
waitfornode gentoo-17-0-systemd
# check ssh for root user
sshintonode gentoo-17-0-systemd
# userdata check
showserver gentoo-17-0-systemd
fi

set -o errexit

読み込み中…
キャンセル
保存