Add support for shared ansible_host in inventory

Today it is possible to create the following ansible inventory file:

  [foo]
  foo01 ansible_host=192.168.1.1

  [bar]
  bar01 ansible_host=192.168.1.1

Which allows a user to create multiple host aliases for a single
connection. This could be done with ansible groups, however there is
some functional differences on how ansible runs in that configuration.

We could also request 2 nodes from nodepool, however in this case, it
would be a waste of CI resources because every alias would need a new
node from nodepool.

Now, a user is able to alias multiple host names to a single node from
nodepool by doing the following:

  nodeset:
    nodes:
      - name:
          - foo
          - bar
        label: ubuntu-xenial

This would result in a single node request from nodepool, but create
an inventory file with 2 alaises sharing and single ansible_host
variable.

Change-Id: I674d6baac26852ee1503feb1ed16c279bf773688
Signed-off-by: Paul Belanger <pabelanger@redhat.com>
This commit is contained in:
Paul Belanger 2017-11-18 15:23:29 -05:00
parent 11925ef217
commit ecb0b84f11
No known key found for this signature in database
GPG Key ID: 611A80832067AF38
9 changed files with 64 additions and 21 deletions

View File

@ -1201,7 +1201,9 @@ configuration may be simplified.
label: controller-label label: controller-label
- name: compute1 - name: compute1
label: compute-label label: compute-label
- name: compute2 - name:
- compute2
- web
label: compute-label label: compute-label
groups: groups:
- name: ceph-osd - name: ceph-osd
@ -1212,6 +1214,9 @@ configuration may be simplified.
- controller - controller
- compute1 - compute1
- compute2 - compute2
- name: ceph-web
nodes:
- web
.. attr:: nodeset .. attr:: nodeset
@ -1233,6 +1238,9 @@ configuration may be simplified.
The name of the node. This will appear in the Ansible inventory The name of the node. This will appear in the Ansible inventory
for the job. for the job.
This can also be as a list of strings. If so, then the list of hosts in
the Ansible inventory will share a common ansible_host address.
.. attr:: label .. attr:: label
:required: :required:

View File

@ -1412,7 +1412,7 @@ class RecordingAnsibleJob(zuul.executor.server.AnsibleJob):
host['host_vars']['ansible_connection'] = 'local' host['host_vars']['ansible_connection'] = 'local'
hosts.append(dict( hosts.append(dict(
name='localhost', name=['localhost'],
host_vars=dict(ansible_connection='local'), host_vars=dict(ansible_connection='local'),
host_keys=[])) host_keys=[]))
return hosts return hosts

View File

@ -43,6 +43,16 @@
label: ubuntu-xenial label: ubuntu-xenial
run: playbooks/single-inventory.yaml run: playbooks/single-inventory.yaml
- job:
name: single-inventory-list
nodeset:
nodes:
- name:
- compute
- controller
label: ubuntu-xenial
run: playbooks/single-inventory.yaml
- job: - job:
name: group-inventory name: group-inventory
nodeset: nodeset1 nodeset: nodeset1

View File

@ -3,4 +3,5 @@
check: check:
jobs: jobs:
- single-inventory - single-inventory
- single-inventory-list
- group-inventory - group-inventory

View File

@ -57,6 +57,26 @@ class TestInventory(ZuulTestCase):
self.executor_server.release() self.executor_server.release()
self.waitUntilSettled() self.waitUntilSettled()
def test_single_inventory_list(self):
inventory = self._get_build_inventory('single-inventory-list')
all_nodes = ('compute', 'controller')
self.assertIn('all', inventory)
self.assertIn('hosts', inventory['all'])
self.assertIn('vars', inventory['all'])
for node_name in all_nodes:
self.assertIn(node_name, inventory['all']['hosts'])
self.assertIn('zuul', inventory['all']['vars'])
z_vars = inventory['all']['vars']['zuul']
self.assertIn('executor', z_vars)
self.assertIn('src_root', z_vars['executor'])
self.assertIn('job', z_vars)
self.assertEqual(z_vars['job'], 'single-inventory-list')
self.executor_server.release()
self.waitUntilSettled()
def test_group_inventory(self): def test_group_inventory(self):
inventory = self._get_build_inventory('group-inventory') inventory = self._get_build_inventory('group-inventory')

View File

@ -67,8 +67,8 @@ class TestNodepool(BaseTestCase):
# Test a simple node request # Test a simple node request
nodeset = model.NodeSet() nodeset = model.NodeSet()
nodeset.addNode(model.Node('controller', 'ubuntu-xenial')) nodeset.addNode(model.Node(['controller', 'foo'], 'ubuntu-xenial'))
nodeset.addNode(model.Node('compute', 'ubuntu-xenial')) nodeset.addNode(model.Node(['compute'], 'ubuntu-xenial'))
job = model.Job('testjob') job = model.Job('testjob')
job.nodeset = nodeset job.nodeset = nodeset
request = self.nodepool.requestNodes(None, job) request = self.nodepool.requestNodes(None, job)
@ -99,8 +99,8 @@ class TestNodepool(BaseTestCase):
# Test that node requests are re-submitted after disconnect # Test that node requests are re-submitted after disconnect
nodeset = model.NodeSet() nodeset = model.NodeSet()
nodeset.addNode(model.Node('controller', 'ubuntu-xenial')) nodeset.addNode(model.Node(['controller'], 'ubuntu-xenial'))
nodeset.addNode(model.Node('compute', 'ubuntu-xenial')) nodeset.addNode(model.Node(['compute'], 'ubuntu-xenial'))
job = model.Job('testjob') job = model.Job('testjob')
job.nodeset = nodeset job.nodeset = nodeset
self.fake_nodepool.paused = True self.fake_nodepool.paused = True
@ -116,8 +116,8 @@ class TestNodepool(BaseTestCase):
# Test that node requests can be canceled # Test that node requests can be canceled
nodeset = model.NodeSet() nodeset = model.NodeSet()
nodeset.addNode(model.Node('controller', 'ubuntu-xenial')) nodeset.addNode(model.Node(['controller'], 'ubuntu-xenial'))
nodeset.addNode(model.Node('compute', 'ubuntu-xenial')) nodeset.addNode(model.Node(['compute'], 'ubuntu-xenial'))
job = model.Job('testjob') job = model.Job('testjob')
job.nodeset = nodeset job.nodeset = nodeset
self.fake_nodepool.paused = True self.fake_nodepool.paused = True
@ -131,8 +131,8 @@ class TestNodepool(BaseTestCase):
# Test that a resubmitted request would not lock nodes # Test that a resubmitted request would not lock nodes
nodeset = model.NodeSet() nodeset = model.NodeSet()
nodeset.addNode(model.Node('controller', 'ubuntu-xenial')) nodeset.addNode(model.Node(['controller'], 'ubuntu-xenial'))
nodeset.addNode(model.Node('compute', 'ubuntu-xenial')) nodeset.addNode(model.Node(['compute'], 'ubuntu-xenial'))
job = model.Job('testjob') job = model.Job('testjob')
job.nodeset = nodeset job.nodeset = nodeset
request = self.nodepool.requestNodes(None, job) request = self.nodepool.requestNodes(None, job)
@ -152,8 +152,8 @@ class TestNodepool(BaseTestCase):
# Test that a lost request would not lock nodes # Test that a lost request would not lock nodes
nodeset = model.NodeSet() nodeset = model.NodeSet()
nodeset.addNode(model.Node('controller', 'ubuntu-xenial')) nodeset.addNode(model.Node(['controller'], 'ubuntu-xenial'))
nodeset.addNode(model.Node('compute', 'ubuntu-xenial')) nodeset.addNode(model.Node(['compute'], 'ubuntu-xenial'))
job = model.Job('testjob') job = model.Job('testjob')
job.nodeset = nodeset job.nodeset = nodeset
request = self.nodepool.requestNodes(None, job) request = self.nodepool.requestNodes(None, job)

View File

@ -340,7 +340,7 @@ class PragmaParser(object):
class NodeSetParser(object): class NodeSetParser(object):
@staticmethod @staticmethod
def getSchema(anonymous=False): def getSchema(anonymous=False):
node = {vs.Required('name'): str, node = {vs.Required('name'): to_list(str),
vs.Required('label'): str, vs.Required('label'): str,
} }
@ -365,11 +365,13 @@ class NodeSetParser(object):
node_names = set() node_names = set()
group_names = set() group_names = set()
for conf_node in as_list(conf['nodes']): for conf_node in as_list(conf['nodes']):
if conf_node['name'] in node_names: for name in as_list(conf_node['name']):
raise DuplicateNodeError(conf['name'], conf_node['name']) if name in node_names:
node = model.Node(conf_node['name'], conf_node['label']) raise DuplicateNodeError(name, conf_node['name'])
node = model.Node(as_list(conf_node['name']), conf_node['label'])
ns.addNode(node) ns.addNode(node)
node_names.add(conf_node['name']) for name in as_list(conf_node['name']):
node_names.add(name)
for conf_group in as_list(conf.get('groups', [])): for conf_group in as_list(conf.get('groups', [])):
for node_name in as_list(conf_group['nodes']): for node_name in as_list(conf_group['nodes']):
if node_name not in node_names: if node_name not in node_names:

View File

@ -495,7 +495,8 @@ def make_inventory_dict(nodes, groups, all_vars):
hosts = {} hosts = {}
for node in nodes: for node in nodes:
hosts[node['name']] = node['host_vars'] for name in node['name']:
hosts[name] = node['host_vars']
inventory = { inventory = {
'all': { 'all': {

View File

@ -497,9 +497,10 @@ class NodeSet(object):
return n return n
def addNode(self, node): def addNode(self, node):
if node.name in self.nodes: for name in node.name:
raise Exception("Duplicate node in %s" % (self,)) if name in self.nodes:
self.nodes[node.name] = node raise Exception("Duplicate node in %s" % (self,))
self.nodes[tuple(node.name)] = node
def getNodes(self): def getNodes(self):
return list(self.nodes.values()) return list(self.nodes.values())