Browse Source

Add support for shared ansible_host in inventory

Today it is possible to create the following ansible inventory file:

  [foo]
  foo01 ansible_host=192.168.1.1

  [bar]
  bar01 ansible_host=192.168.1.1

Which allows a user to create multiple host aliases for a single
connection. This could be done with ansible groups, however there is
some functional differences on how ansible runs in that configuration.

We could also request 2 nodes from nodepool, however in this case, it
would be a waste of CI resources because every alias would need a new
node from nodepool.

Now, a user is able to alias multiple host names to a single node from
nodepool by doing the following:

  nodeset:
    nodes:
      - name:
          - foo
          - bar
        label: ubuntu-xenial

This would result in a single node request from nodepool, but create
an inventory file with 2 alaises sharing and single ansible_host
variable.

Change-Id: I674d6baac26852ee1503feb1ed16c279bf773688
Signed-off-by: Paul Belanger <pabelanger@redhat.com>
changes/24/521324/2
Paul Belanger 5 years ago
parent
commit
ecb0b84f11
No known key found for this signature in database
GPG Key ID: 611A80832067AF38
  1. 10
      doc/source/user/config.rst
  2. 2
      tests/base.py
  3. 10
      tests/fixtures/config/inventory/git/common-config/zuul.yaml
  4. 1
      tests/fixtures/config/inventory/git/org_project/.zuul.yaml
  5. 20
      tests/unit/test_inventory.py
  6. 20
      tests/unit/test_nodepool.py
  7. 12
      zuul/configloader.py
  8. 3
      zuul/executor/server.py
  9. 7
      zuul/model.py

10
doc/source/user/config.rst

@ -1201,7 +1201,9 @@ configuration may be simplified.
label: controller-label
- name: compute1
label: compute-label
- name: compute2
- name:
- compute2
- web
label: compute-label
groups:
- name: ceph-osd
@ -1212,6 +1214,9 @@ configuration may be simplified.
- controller
- compute1
- compute2
- name: ceph-web
nodes:
- web
.. attr:: nodeset
@ -1233,6 +1238,9 @@ configuration may be simplified.
The name of the node. This will appear in the Ansible inventory
for the job.
This can also be as a list of strings. If so, then the list of hosts in
the Ansible inventory will share a common ansible_host address.
.. attr:: label
:required:

2
tests/base.py

@ -1412,7 +1412,7 @@ class RecordingAnsibleJob(zuul.executor.server.AnsibleJob):
host['host_vars']['ansible_connection'] = 'local'
hosts.append(dict(
name='localhost',
name=['localhost'],
host_vars=dict(ansible_connection='local'),
host_keys=[]))
return hosts

10
tests/fixtures/config/inventory/git/common-config/zuul.yaml vendored

@ -43,6 +43,16 @@
label: ubuntu-xenial
run: playbooks/single-inventory.yaml
- job:
name: single-inventory-list
nodeset:
nodes:
- name:
- compute
- controller
label: ubuntu-xenial
run: playbooks/single-inventory.yaml
- job:
name: group-inventory
nodeset: nodeset1

1
tests/fixtures/config/inventory/git/org_project/.zuul.yaml vendored

@ -3,4 +3,5 @@
check:
jobs:
- single-inventory
- single-inventory-list
- group-inventory

20
tests/unit/test_inventory.py

@ -57,6 +57,26 @@ class TestInventory(ZuulTestCase):
self.executor_server.release()
self.waitUntilSettled()
def test_single_inventory_list(self):
inventory = self._get_build_inventory('single-inventory-list')
all_nodes = ('compute', 'controller')
self.assertIn('all', inventory)
self.assertIn('hosts', inventory['all'])
self.assertIn('vars', inventory['all'])
for node_name in all_nodes:
self.assertIn(node_name, inventory['all']['hosts'])
self.assertIn('zuul', inventory['all']['vars'])
z_vars = inventory['all']['vars']['zuul']
self.assertIn('executor', z_vars)
self.assertIn('src_root', z_vars['executor'])
self.assertIn('job', z_vars)
self.assertEqual(z_vars['job'], 'single-inventory-list')
self.executor_server.release()
self.waitUntilSettled()
def test_group_inventory(self):
inventory = self._get_build_inventory('group-inventory')

20
tests/unit/test_nodepool.py

@ -67,8 +67,8 @@ class TestNodepool(BaseTestCase):
# Test a simple node request
nodeset = model.NodeSet()
nodeset.addNode(model.Node('controller', 'ubuntu-xenial'))
nodeset.addNode(model.Node('compute', 'ubuntu-xenial'))
nodeset.addNode(model.Node(['controller', 'foo'], 'ubuntu-xenial'))
nodeset.addNode(model.Node(['compute'], 'ubuntu-xenial'))
job = model.Job('testjob')
job.nodeset = nodeset
request = self.nodepool.requestNodes(None, job)
@ -99,8 +99,8 @@ class TestNodepool(BaseTestCase):
# Test that node requests are re-submitted after disconnect
nodeset = model.NodeSet()
nodeset.addNode(model.Node('controller', 'ubuntu-xenial'))
nodeset.addNode(model.Node('compute', 'ubuntu-xenial'))
nodeset.addNode(model.Node(['controller'], 'ubuntu-xenial'))
nodeset.addNode(model.Node(['compute'], 'ubuntu-xenial'))
job = model.Job('testjob')
job.nodeset = nodeset
self.fake_nodepool.paused = True
@ -116,8 +116,8 @@ class TestNodepool(BaseTestCase):
# Test that node requests can be canceled
nodeset = model.NodeSet()
nodeset.addNode(model.Node('controller', 'ubuntu-xenial'))
nodeset.addNode(model.Node('compute', 'ubuntu-xenial'))
nodeset.addNode(model.Node(['controller'], 'ubuntu-xenial'))
nodeset.addNode(model.Node(['compute'], 'ubuntu-xenial'))
job = model.Job('testjob')
job.nodeset = nodeset
self.fake_nodepool.paused = True
@ -131,8 +131,8 @@ class TestNodepool(BaseTestCase):
# Test that a resubmitted request would not lock nodes
nodeset = model.NodeSet()
nodeset.addNode(model.Node('controller', 'ubuntu-xenial'))
nodeset.addNode(model.Node('compute', 'ubuntu-xenial'))
nodeset.addNode(model.Node(['controller'], 'ubuntu-xenial'))
nodeset.addNode(model.Node(['compute'], 'ubuntu-xenial'))
job = model.Job('testjob')
job.nodeset = nodeset
request = self.nodepool.requestNodes(None, job)
@ -152,8 +152,8 @@ class TestNodepool(BaseTestCase):
# Test that a lost request would not lock nodes
nodeset = model.NodeSet()
nodeset.addNode(model.Node('controller', 'ubuntu-xenial'))
nodeset.addNode(model.Node('compute', 'ubuntu-xenial'))
nodeset.addNode(model.Node(['controller'], 'ubuntu-xenial'))
nodeset.addNode(model.Node(['compute'], 'ubuntu-xenial'))
job = model.Job('testjob')
job.nodeset = nodeset
request = self.nodepool.requestNodes(None, job)

12
zuul/configloader.py

@ -340,7 +340,7 @@ class PragmaParser(object):
class NodeSetParser(object):
@staticmethod
def getSchema(anonymous=False):
node = {vs.Required('name'): str,
node = {vs.Required('name'): to_list(str),
vs.Required('label'): str,
}
@ -365,11 +365,13 @@ class NodeSetParser(object):
node_names = set()
group_names = set()
for conf_node in as_list(conf['nodes']):
if conf_node['name'] in node_names:
raise DuplicateNodeError(conf['name'], conf_node['name'])
node = model.Node(conf_node['name'], conf_node['label'])
for name in as_list(conf_node['name']):
if name in node_names:
raise DuplicateNodeError(name, conf_node['name'])
node = model.Node(as_list(conf_node['name']), conf_node['label'])
ns.addNode(node)
node_names.add(conf_node['name'])
for name in as_list(conf_node['name']):
node_names.add(name)
for conf_group in as_list(conf.get('groups', [])):
for node_name in as_list(conf_group['nodes']):
if node_name not in node_names:

3
zuul/executor/server.py

@ -495,7 +495,8 @@ def make_inventory_dict(nodes, groups, all_vars):
hosts = {}
for node in nodes:
hosts[node['name']] = node['host_vars']
for name in node['name']:
hosts[name] = node['host_vars']
inventory = {
'all': {

7
zuul/model.py

@ -497,9 +497,10 @@ class NodeSet(object):
return n
def addNode(self, node):
if node.name in self.nodes:
raise Exception("Duplicate node in %s" % (self,))
self.nodes[node.name] = node
for name in node.name:
if name in self.nodes:
raise Exception("Duplicate node in %s" % (self,))
self.nodes[tuple(node.name)] = node
def getNodes(self):
return list(self.nodes.values())

Loading…
Cancel
Save