Browse Source

Completely switch to openstacksdk

Change-Id: I1729797fa03095d200c7334281915abc284b5732
Dmitry Tantsur 4 months ago
parent
commit
eee74d31b8

+ 1
- 2
lower-constraints.txt View File

@@ -4,9 +4,8 @@ fixtures==3.0.0
4 4
 flake8-import-order==0.13
5 5
 hacking==1.0.0
6 6
 mock==2.0
7
-openstacksdk==0.17.0
7
+openstacksdk==0.22.0
8 8
 pbr==2.0.0
9
-python-ironicclient==1.14.0
10 9
 Pygments==2.2.0
11 10
 requests==2.18.4
12 11
 six==1.10.0

+ 33
- 38
metalsmith/_config.py View File

@@ -13,11 +13,15 @@
13 13
 # See the License for the specific language governing permissions and
14 14
 # limitations under the License.
15 15
 
16
-import contextlib
17 16
 import json
18
-import os
19
-import shutil
20
-import tempfile
17
+import logging
18
+
19
+from openstack.baremetal import configdrive
20
+
21
+from metalsmith import _utils
22
+
23
+
24
+LOG = logging.getLogger(__name__)
21 25
 
22 26
 
23 27
 class InstanceConfig(object):
@@ -56,13 +60,12 @@ class InstanceConfig(object):
56 60
             kwargs.setdefault('ssh_authorized_keys', self.ssh_keys)
57 61
         self.users.append(kwargs)
58 62
 
59
-    @contextlib.contextmanager
60
-    def build_configdrive_directory(self, node, hostname):
61
-        """Build a configdrive from the provided information.
63
+    def build_configdrive(self, node, hostname):
64
+        """Make the config drive.
62 65
 
63 66
         :param node: `Node` object.
64 67
         :param hostname: instance hostname.
65
-        :return: a context manager yielding a directory with files
68
+        :return: configdrive contents as a base64-encoded string.
66 69
         """
67 70
         # NOTE(dtantsur): CirrOS does not understand lists
68 71
         if isinstance(self.ssh_keys, list):
@@ -70,33 +73,25 @@ class InstanceConfig(object):
70 73
         else:
71 74
             ssh_keys = self.ssh_keys
72 75
 
73
-        d = tempfile.mkdtemp()
74
-        try:
75
-            metadata = {'public_keys': ssh_keys,
76
-                        'uuid': node.uuid,
77
-                        'name': node.name,
78
-                        'hostname': hostname,
79
-                        'launch_index': 0,
80
-                        'availability_zone': '',
81
-                        'files': [],
82
-                        'meta': {}}
83
-            user_data = {}
84
-            if self.users:
85
-                user_data['users'] = self.users
86
-
87
-            for version in ('2012-08-10', 'latest'):
88
-                subdir = os.path.join(d, 'openstack', version)
89
-                if not os.path.exists(subdir):
90
-                    os.makedirs(subdir)
91
-
92
-                with open(os.path.join(subdir, 'meta_data.json'), 'w') as fp:
93
-                    json.dump(metadata, fp)
94
-
95
-                if user_data:
96
-                    with open(os.path.join(subdir, 'user_data'), 'w') as fp:
97
-                        fp.write("#cloud-config\n")
98
-                        json.dump(user_data, fp)
99
-
100
-            yield d
101
-        finally:
102
-            shutil.rmtree(d)
76
+        metadata = {'public_keys': ssh_keys,
77
+                    'uuid': node.id,
78
+                    'name': node.name,
79
+                    'hostname': hostname,
80
+                    'launch_index': 0,
81
+                    'availability_zone': '',
82
+                    'files': [],
83
+                    'meta': {}}
84
+        user_data = {}
85
+        user_data_bin = None
86
+
87
+        if self.users:
88
+            user_data['users'] = self.users
89
+
90
+        if user_data:
91
+            user_data_bin = ("#cloud-config\n" + json.dumps(user_data)).encode(
92
+                'utf-8')
93
+
94
+        LOG.debug('Generating configdrive tree for node %(node)s with '
95
+                  'metadata %(meta)s', {'node': _utils.log_res(node),
96
+                                        'meta': metadata})
97
+        return configdrive.build(metadata, user_data_bin)

+ 2
- 2
metalsmith/_format.py View File

@@ -52,12 +52,12 @@ class DefaultFormat(object):
52 52
         else:
53 53
             message = "Unprovisioning started for node %(node)s"
54 54
 
55
-        _print(message, node=_utils.log_node(node))
55
+        _print(message, node=_utils.log_res(node))
56 56
 
57 57
     def show(self, instances):
58 58
         for instance in instances:
59 59
             _print("Node %(node)s, current state is %(state)s",
60
-                   node=_utils.log_node(instance.node), state=instance.state)
60
+                   node=_utils.log_res(instance.node), state=instance.state)
61 61
 
62 62
             if instance.is_deployed:
63 63
                 ips = instance.ip_addresses()

+ 11
- 11
metalsmith/_instance.py View File

@@ -13,7 +13,7 @@
13 13
 # See the License for the specific language governing permissions and
14 14
 # limitations under the License.
15 15
 
16
-from metalsmith import _os_api
16
+from metalsmith import _utils
17 17
 
18 18
 
19 19
 _PROGRESS_STATES = frozenset(['deploying', 'wait call-back',
@@ -30,15 +30,15 @@ _HEALTHY_STATES = _PROGRESS_STATES | _ACTIVE_STATES
30 30
 class Instance(object):
31 31
     """Instance status in metalsmith."""
32 32
 
33
-    def __init__(self, api, node):
34
-        self._api = api
35
-        self._uuid = node.uuid
33
+    def __init__(self, connection, node):
34
+        self._connection = connection
35
+        self._uuid = node.id
36 36
         self._node = node
37 37
 
38 38
     @property
39 39
     def hostname(self):
40 40
         """Node's hostname."""
41
-        return self._node.instance_info.get(_os_api.HOSTNAME_FIELD)
41
+        return self._node.instance_info.get(_utils.GetNodeMixin.HOSTNAME_FIELD)
42 42
 
43 43
     def ip_addresses(self):
44 44
         """Returns IP addresses for this instance.
@@ -61,12 +61,12 @@ class Instance(object):
61 61
 
62 62
     @property
63 63
     def _is_deployed_by_metalsmith(self):
64
-        return _os_api.HOSTNAME_FIELD in self._node.instance_info
64
+        return _utils.GetNodeMixin.HOSTNAME_FIELD in self._node.instance_info
65 65
 
66 66
     @property
67 67
     def is_healthy(self):
68 68
         """Whether the node is not at fault or maintenance."""
69
-        return self.state in _HEALTHY_STATES and not self._node.maintenance
69
+        return self.state in _HEALTHY_STATES and not self._node.is_maintenance
70 70
 
71 71
     def nics(self):
72 72
         """List NICs for this instance.
@@ -75,10 +75,10 @@ class Instance(object):
75 75
             with full representations of their networks.
76 76
         """
77 77
         result = []
78
-        vifs = self._api.list_node_attached_ports(self.node)
78
+        vifs = self._connection.baremetal.list_node_vifs(self.node)
79 79
         for vif in vifs:
80
-            port = self._api.connection.network.get_port(vif.id)
81
-            port.network = self._api.connection.network.get_network(
80
+            port = self._connection.network.get_port(vif)
81
+            port.network = self._connection.network.get_network(
82 82
                 port.network_id)
83 83
             result.append(port)
84 84
         return result
@@ -110,7 +110,7 @@ class Instance(object):
110 110
         elif prov_state in _ERROR_STATES:
111 111
             return 'error'
112 112
         elif prov_state in _ACTIVE_STATES:
113
-            if self._node.maintenance:
113
+            if self._node.is_maintenance:
114 114
                 return 'maintenance'
115 115
             else:
116 116
                 return 'active'

+ 18
- 18
metalsmith/_nics.py View File

@@ -26,7 +26,7 @@ LOG = logging.getLogger(__name__)
26 26
 class NICs(object):
27 27
     """Requested NICs."""
28 28
 
29
-    def __init__(self, api, node, nics):
29
+    def __init__(self, connection, node, nics):
30 30
         if nics is None:
31 31
             nics = []
32 32
 
@@ -38,7 +38,7 @@ class NICs(object):
38 38
                 raise TypeError("Each NIC must be a dict got %s" % nic)
39 39
 
40 40
         self._node = node
41
-        self._api = api
41
+        self._connection = connection
42 42
         self._nics = nics
43 43
         self._validated = None
44 44
         self.created_ports = []
@@ -68,25 +68,26 @@ class NICs(object):
68 68
 
69 69
         for nic_type, nic in self._validated:
70 70
             if nic_type == 'network':
71
-                port = self._api.connection.network.create_port(**nic)
71
+                port = self._connection.network.create_port(**nic)
72 72
                 self.created_ports.append(port.id)
73 73
                 LOG.info('Created port %(port)s for node %(node)s with '
74 74
                          '%(nic)s', {'port': _utils.log_res(port),
75
-                                     'node': _utils.log_node(self._node),
75
+                                     'node': _utils.log_res(self._node),
76 76
                                      'nic': nic})
77 77
             else:
78 78
                 port = nic
79 79
 
80
-            self._api.attach_port_to_node(self._node.uuid, port.id)
80
+            self._connection.baremetal.attach_vif_to_node(self._node,
81
+                                                          port.id)
81 82
             LOG.info('Attached port %(port)s to node %(node)s',
82 83
                      {'port': _utils.log_res(port),
83
-                      'node': _utils.log_node(self._node)})
84
+                      'node': _utils.log_res(self._node)})
84 85
             self.attached_ports.append(port.id)
85 86
 
86 87
     def detach_and_delete_ports(self):
87 88
         """Detach attached port and delete previously created ones."""
88
-        detach_and_delete_ports(self._api, self._node, self.created_ports,
89
-                                self.attached_ports)
89
+        detach_and_delete_ports(self._connection, self._node,
90
+                                self.created_ports, self.attached_ports)
90 91
 
91 92
     def _get_port(self, nic):
92 93
         """Validate and get the NIC information for a port.
@@ -100,7 +101,7 @@ class NICs(object):
100 101
                 'Unexpected fields for a port: %s' % ', '.join(unexpected))
101 102
 
102 103
         try:
103
-            port = self._api.connection.network.find_port(
104
+            port = self._connection.network.find_port(
104 105
                 nic['port'], ignore_missing=False)
105 106
         except Exception as exc:
106 107
             raise exceptions.InvalidNIC(
@@ -122,7 +123,7 @@ class NICs(object):
122 123
                 'Unexpected fields for a network: %s' % ', '.join(unexpected))
123 124
 
124 125
         try:
125
-            network = self._api.connection.network.find_network(
126
+            network = self._connection.network.find_network(
126 127
                 nic['network'], ignore_missing=False)
127 128
         except Exception as exc:
128 129
             raise exceptions.InvalidNIC(
@@ -136,33 +137,32 @@ class NICs(object):
136 137
         return port_args
137 138
 
138 139
 
139
-def detach_and_delete_ports(api, node, created_ports, attached_ports):
140
+def detach_and_delete_ports(connection, node, created_ports, attached_ports):
140 141
     """Detach attached port and delete previously created ones.
141 142
 
142
-    :param api: `Api` instance.
143
+    :param connection: `openstacksdk.Connection` instance.
143 144
     :param node: `Node` object to detach ports from.
144 145
     :param created_ports: List of IDs of previously created ports.
145 146
     :param attached_ports: List of IDs of previously attached_ports.
146 147
     """
147 148
     for port_id in set(attached_ports + created_ports):
148 149
         LOG.debug('Detaching port %(port)s from node %(node)s',
149
-                  {'port': port_id, 'node': node.uuid})
150
+                  {'port': port_id, 'node': _utils.log_res(node)})
150 151
         try:
151
-            api.detach_port_from_node(node, port_id)
152
+            connection.baremetal.detach_vif_from_node(node, port_id)
152 153
         except Exception as exc:
153 154
             LOG.debug('Failed to remove VIF %(vif)s from node %(node)s, '
154 155
                       'assuming already removed: %(exc)s',
155
-                      {'vif': port_id, 'node': _utils.log_node(node),
156
+                      {'vif': port_id, 'node': _utils.log_res(node),
156 157
                        'exc': exc})
157 158
 
158 159
     for port_id in created_ports:
159 160
         LOG.debug('Deleting port %s', port_id)
160 161
         try:
161
-            api.connection.network.delete_port(port_id,
162
-                                               ignore_missing=False)
162
+            connection.network.delete_port(port_id, ignore_missing=False)
163 163
         except Exception as exc:
164 164
             LOG.warning('Failed to delete neutron port %(port)s: %(exc)s',
165 165
                         {'port': port_id, 'exc': exc})
166 166
         else:
167 167
             LOG.info('Deleted port %(port)s for node %(node)s',
168
-                     {'port': port_id, 'node': _utils.log_node(node)})
168
+                     {'port': port_id, 'node': _utils.log_res(node)})

+ 0
- 182
metalsmith/_os_api.py View File

@@ -1,182 +0,0 @@
1
-# Copyright 2015-2018 Red Hat, Inc.
2
-#
3
-# Licensed under the Apache License, Version 2.0 (the "License");
4
-# you may not use this file except in compliance with the License.
5
-# You may obtain a copy of the License at
6
-#
7
-#    http://www.apache.org/licenses/LICENSE-2.0
8
-#
9
-# Unless required by applicable law or agreed to in writing, software
10
-# distributed under the License is distributed on an "AS IS" BASIS,
11
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12
-# implied.
13
-# See the License for the specific language governing permissions and
14
-# limitations under the License.
15
-
16
-import contextlib
17
-import logging
18
-
19
-from ironicclient import client as ir_client
20
-import six
21
-
22
-from metalsmith import _utils
23
-
24
-
25
-LOG = logging.getLogger(__name__)
26
-HOSTNAME_FIELD = 'metalsmith_hostname'
27
-
28
-
29
-class _Remove(object):
30
-    """Indicator that a field should be removed."""
31
-
32
-    __slots__ = ()
33
-
34
-    def __repr__(self):
35
-        """Allow nicer logging."""
36
-        return '<REMOVE>'
37
-
38
-
39
-REMOVE = _Remove()
40
-
41
-
42
-class DictWithAttrs(dict):
43
-    __slots__ = ()
44
-
45
-    def __getattr__(self, attr):
46
-        try:
47
-            return self[attr]
48
-        except KeyError:
49
-            super(DictWithAttrs, self).__getattr__(attr)
50
-
51
-
52
-class API(object):
53
-    """Various OpenStack API's."""
54
-
55
-    IRONIC_VERSION = '1'
56
-    # TODO(dtantsur): use openstacksdk and stop hardcoding this here.
57
-    # 1.46 (Rocky) adds conductor_group.
58
-    IRONIC_MICRO_VERSION = '1.46'
59
-
60
-    _node_list = None
61
-
62
-    def __init__(self, session, connection):
63
-        self.ironic = ir_client.get_client(
64
-            self.IRONIC_VERSION, session=session,
65
-            os_ironic_api_version=self.IRONIC_MICRO_VERSION)
66
-        self.connection = connection
67
-
68
-    def _nodes_for_lookup(self):
69
-        return self.list_nodes(maintenance=None,
70
-                               associated=None,
71
-                               provision_state=None,
72
-                               fields=['uuid', 'name', 'instance_info'])
73
-
74
-    def attach_port_to_node(self, node, port_id):
75
-        self.ironic.node.vif_attach(_node_id(node), port_id)
76
-
77
-    @contextlib.contextmanager
78
-    def cache_node_list_for_lookup(self):
79
-        if self._node_list is None:
80
-            self._node_list = self._nodes_for_lookup()
81
-        yield self._node_list
82
-        self._node_list = None
83
-
84
-    def detach_port_from_node(self, node, port_id):
85
-        self.ironic.node.vif_detach(_node_id(node), port_id)
86
-
87
-    def find_node_by_hostname(self, hostname):
88
-        nodes = self._node_list or self._nodes_for_lookup()
89
-        existing = [n for n in nodes
90
-                    if n.instance_info.get(HOSTNAME_FIELD) == hostname]
91
-        if len(existing) > 1:
92
-            raise RuntimeError("More than one node found with hostname "
93
-                               "%(host)s: %(nodes)s" %
94
-                               {'host': hostname,
95
-                                'nodes': ', '.join(_utils.log_node(n)
96
-                                                   for n in existing)})
97
-        elif not existing:
98
-            return None
99
-        else:
100
-            # Fetch the complete node record
101
-            return self.get_node(existing[0].uuid, accept_hostname=False)
102
-
103
-    def get_node(self, node, refresh=False, accept_hostname=False):
104
-        if isinstance(node, six.string_types):
105
-            if accept_hostname and _utils.is_hostname_safe(node):
106
-                by_hostname = self.find_node_by_hostname(node)
107
-                if by_hostname is not None:
108
-                    return by_hostname
109
-
110
-            return self.ironic.node.get(node)
111
-        elif hasattr(node, 'node'):
112
-            # Instance object
113
-            node = node.node
114
-        else:
115
-            node = node
116
-
117
-        if refresh:
118
-            return self.ironic.node.get(node.uuid)
119
-        else:
120
-            return node
121
-
122
-    def list_node_attached_ports(self, node):
123
-        return self.ironic.node.vif_list(_node_id(node))
124
-
125
-    def list_node_ports(self, node):
126
-        return self.ironic.node.list_ports(_node_id(node), limit=0)
127
-
128
-    def list_nodes(self, maintenance=False, associated=False,
129
-                   provision_state='available', **filters):
130
-        if 'fields' not in filters:
131
-            filters['detail'] = True
132
-        return self.ironic.node.list(limit=0, maintenance=maintenance,
133
-                                     associated=associated,
134
-                                     provision_state=provision_state,
135
-                                     **filters)
136
-
137
-    def node_action(self, node, action, **kwargs):
138
-        self.ironic.node.set_provision_state(_node_id(node), action, **kwargs)
139
-
140
-    def release_node(self, node):
141
-        return self.update_node(_node_id(node), instance_uuid=REMOVE)
142
-
143
-    def reserve_node(self, node, instance_uuid):
144
-        return self.update_node(_node_id(node), instance_uuid=instance_uuid)
145
-
146
-    def update_node(self, node, *args, **attrs):
147
-        if args:
148
-            attrs.update(args[0])
149
-        patches = _convert_patches(attrs)
150
-        return self.ironic.node.update(_node_id(node), patches)
151
-
152
-    def validate_node(self, node, validate_deploy=False):
153
-        ifaces = ['power', 'management']
154
-        if validate_deploy:
155
-            ifaces += ['deploy']
156
-
157
-        validation = self.ironic.node.validate(_node_id(node))
158
-        for iface in ifaces:
159
-            result = getattr(validation, iface)
160
-            if not result['result']:
161
-                raise RuntimeError('%s: %s' % (iface, result['reason']))
162
-
163
-
164
-def _node_id(node):
165
-    if isinstance(node, six.string_types):
166
-        return node
167
-    else:
168
-        return node.uuid
169
-
170
-
171
-def _convert_patches(attrs):
172
-    patches = []
173
-    for key, value in attrs.items():
174
-        if not key.startswith('/'):
175
-            key = '/' + key
176
-
177
-        if value is REMOVE:
178
-            patches.append({'op': 'remove', 'path': key})
179
-        else:
180
-            patches.append({'op': 'add', 'path': key, 'value': value})
181
-
182
-    return patches

+ 99
- 142
metalsmith/_provisioner.py View File

@@ -16,7 +16,6 @@
16 16
 import logging
17 17
 import random
18 18
 import sys
19
-import time
20 19
 import warnings
21 20
 
22 21
 from openstack import connection
@@ -25,7 +24,6 @@ import six
25 24
 from metalsmith import _config
26 25
 from metalsmith import _instance
27 26
 from metalsmith import _nics
28
-from metalsmith import _os_api
29 27
 from metalsmith import _scheduler
30 28
 from metalsmith import _utils
31 29
 from metalsmith import exceptions
@@ -38,7 +36,7 @@ _CREATED_PORTS = 'metalsmith_created_ports'
38 36
 _ATTACHED_PORTS = 'metalsmith_attached_ports'
39 37
 
40 38
 
41
-class Provisioner(object):
39
+class Provisioner(_utils.GetNodeMixin):
42 40
     """API to deploy/undeploy nodes with OpenStack.
43 41
 
44 42
     :param session: `Session` object (from ``keystoneauth``) to use when
@@ -63,11 +61,7 @@ class Provisioner(object):
63 61
                             'but not both')
64 62
         else:
65 63
             self.connection = connection.Connection(config=cloud_region)
66
-            # NOTE(dtantsur): Connection.baremetal is a keystoneauth Adapter
67
-            # for baremetal API.
68
-            session = self.connection.baremetal
69 64
 
70
-        self._api = _os_api.API(session, self.connection)
71 65
         self._dry_run = dry_run
72 66
 
73 67
     def reserve_node(self, resource_class=None, conductor_group=None,
@@ -103,13 +97,15 @@ class Provisioner(object):
103 97
                           DeprecationWarning)
104 98
 
105 99
         if candidates:
106
-            nodes = [self._api.get_node(node) for node in candidates]
100
+            nodes = [self._get_node(node) for node in candidates]
107 101
             filters = [
108 102
                 _scheduler.NodeTypeFilter(resource_class, conductor_group),
109 103
             ]
110 104
         else:
111
-            nodes = self._api.list_nodes(resource_class=resource_class,
112
-                                         conductor_group=conductor_group)
105
+            nodes = list(self.connection.baremetal.nodes(
106
+                resource_class=resource_class,
107
+                conductor_group=conductor_group,
108
+                details=True))
113 109
             if not nodes:
114 110
                 raise exceptions.NodesNotFound(resource_class, conductor_group)
115 111
             # Ensure parallel executions don't try nodes in the same sequence
@@ -124,18 +120,16 @@ class Provisioner(object):
124 120
         if predicate is not None:
125 121
             filters.append(_scheduler.CustomPredicateFilter(predicate))
126 122
 
127
-        reserver = _scheduler.IronicReserver(self._api)
128
-        node = _scheduler.schedule_node(nodes, filters, reserver,
129
-                                        dry_run=self._dry_run)
130
-
131
-        update = {}
123
+        instance_info = {}
132 124
         if capabilities:
133
-            update['/instance_info/capabilities'] = capabilities
125
+            instance_info['capabilities'] = capabilities
134 126
         if traits:
135
-            update['/instance_info/traits'] = traits
136
-        if update:
137
-            node = self._api.update_node(node, update)
127
+            instance_info['traits'] = traits
128
+        reserver = _scheduler.IronicReserver(self.connection,
129
+                                             instance_info)
138 130
 
131
+        node = _scheduler.schedule_node(nodes, filters, reserver,
132
+                                        dry_run=self._dry_run)
139 133
         LOG.debug('Reserved node: %s', node)
140 134
         return node
141 135
 
@@ -148,28 +142,29 @@ class Provisioner(object):
148 142
         reserved by us or are in maintenance mode.
149 143
         """
150 144
         try:
151
-            node = self._api.get_node(node)
145
+            node = self._get_node(node)
152 146
         except Exception as exc:
153 147
             raise exceptions.InvalidNode('Cannot find node %(node)s: %(exc)s' %
154 148
                                          {'node': node, 'exc': exc})
155 149
 
156
-        if not node.instance_uuid:
150
+        if not node.instance_id:
157 151
             if not self._dry_run:
158 152
                 LOG.debug('Node %s not reserved yet, reserving',
159
-                          _utils.log_node(node))
160
-                self._api.reserve_node(node, instance_uuid=node.uuid)
161
-        elif node.instance_uuid != node.uuid:
153
+                          _utils.log_res(node))
154
+                self.connection.baremetal.update_node(
155
+                    node, instance_id=node.id)
156
+        elif node.instance_id != node.id:
162 157
             raise exceptions.InvalidNode('Node %(node)s already reserved '
163 158
                                          'by instance %(inst)s outside of '
164 159
                                          'metalsmith, cannot deploy on it' %
165
-                                         {'node': _utils.log_node(node),
166
-                                          'inst': node.instance_uuid})
160
+                                         {'node': _utils.log_res(node),
161
+                                          'inst': node.instance_id})
167 162
 
168
-        if node.maintenance:
163
+        if node.is_maintenance:
169 164
             raise exceptions.InvalidNode('Refusing to deploy on node %(node)s '
170 165
                                          'which is in maintenance mode due to '
171 166
                                          '%(reason)s' %
172
-                                         {'node': _utils.log_node(node),
167
+                                         {'node': _utils.log_res(node),
173 168
                                           'reason': node.maintenance_reason})
174 169
 
175 170
         return node
@@ -187,17 +182,17 @@ class Provisioner(object):
187 182
             if node.name and _utils.is_hostname_safe(node.name):
188 183
                 return node.name
189 184
             else:
190
-                return node.uuid
185
+                return node.id
191 186
 
192 187
         if not _utils.is_hostname_safe(hostname):
193 188
             raise ValueError("%s cannot be used as a hostname" % hostname)
194 189
 
195
-        existing = self._api.find_node_by_hostname(hostname)
196
-        if existing is not None and existing.uuid != node.uuid:
190
+        existing = self._find_node_by_hostname(hostname)
191
+        if existing is not None and existing.id != node.id:
197 192
             raise ValueError("The following node already uses hostname "
198 193
                              "%(host)s: %(node)s" %
199 194
                              {'host': hostname,
200
-                              'node': _utils.log_node(existing)})
195
+                              'node': _utils.log_res(existing)})
201 196
 
202 197
         return hostname
203 198
 
@@ -256,7 +251,7 @@ class Provisioner(object):
256 251
             image = sources.GlanceImage(image)
257 252
 
258 253
         node = self._check_node_for_deploy(node)
259
-        nics = _nics.NICs(self._api, node, nics)
254
+        nics = _nics.NICs(self.connection, node, nics)
260 255
 
261 256
         try:
262 257
             hostname = self._check_hostname(node, hostname)
@@ -271,62 +266,71 @@ class Provisioner(object):
271 266
 
272 267
             if self._dry_run:
273 268
                 LOG.warning('Dry run, not provisioning node %s',
274
-                            _utils.log_node(node))
269
+                            _utils.log_res(node))
275 270
                 return node
276 271
 
277 272
             nics.create_and_attach_ports()
278 273
 
279 274
             capabilities['boot_option'] = 'netboot' if netboot else 'local'
280 275
 
281
-            updates = {'/instance_info/root_gb': root_size_gb,
282
-                       '/instance_info/capabilities': capabilities,
283
-                       '/extra/%s' % _CREATED_PORTS: nics.created_ports,
284
-                       '/extra/%s' % _ATTACHED_PORTS: nics.attached_ports,
285
-                       '/instance_info/%s' % _os_api.HOSTNAME_FIELD: hostname}
286
-            updates.update(image._node_updates(self.connection))
276
+            instance_info = node.instance_info.copy()
277
+            instance_info['root_gb'] = root_size_gb
278
+            instance_info['capabilities'] = capabilities
279
+            instance_info[self.HOSTNAME_FIELD] = hostname
280
+            extra = node.extra.copy()
281
+            extra[_CREATED_PORTS] = nics.created_ports
282
+            extra[_ATTACHED_PORTS] = nics.attached_ports
283
+            instance_info.update(image._node_updates(self.connection))
287 284
             if traits is not None:
288
-                updates['/instance_info/traits'] = traits
285
+                instance_info['traits'] = traits
289 286
             if swap_size_mb is not None:
290
-                updates['/instance_info/swap_mb'] = swap_size_mb
287
+                instance_info['swap_mb'] = swap_size_mb
291 288
 
292
-            LOG.debug('Updating node %(node)s with %(updates)s',
293
-                      {'node': _utils.log_node(node), 'updates': updates})
294
-            node = self._api.update_node(node, updates)
295
-            self._api.validate_node(node, validate_deploy=True)
289
+            LOG.debug('Updating node %(node)s with instance info %(iinfo)s '
290
+                      'and extras %(extra)s', {'node': _utils.log_res(node),
291
+                                               'iinfo': instance_info,
292
+                                               'extra': extra})
293
+            node = self.connection.baremetal.update_node(
294
+                node, instance_info=instance_info, extra=extra)
295
+            self.connection.baremetal.validate_node(node)
296 296
 
297 297
             LOG.debug('Generating a configdrive for node %s',
298
-                      _utils.log_node(node))
299
-            with config.build_configdrive_directory(node, hostname) as cd:
300
-                self._api.node_action(node, 'active',
301
-                                      configdrive=cd)
298
+                      _utils.log_res(node))
299
+            cd = config.build_configdrive(node, hostname)
300
+            # TODO(dtantsur): move this to openstacksdk?
301
+            if not isinstance(cd, six.string_types):
302
+                cd = cd.decode('utf-8')
303
+            LOG.debug('Starting provisioning of node %s', _utils.log_res(node))
304
+            self.connection.baremetal.set_node_provision_state(
305
+                node, 'active', config_drive=cd)
302 306
         except Exception:
303 307
             exc_info = sys.exc_info()
304 308
 
305 309
             try:
306 310
                 LOG.error('Deploy attempt failed on node %s, cleaning up',
307
-                          _utils.log_node(node))
311
+                          _utils.log_res(node))
308 312
                 self._clean_up(node, nics=nics)
309 313
             except Exception:
310 314
                 LOG.exception('Clean up failed')
311 315
 
312 316
             six.reraise(*exc_info)
313 317
 
314
-        LOG.info('Provisioning started on node %s', _utils.log_node(node))
318
+        LOG.info('Provisioning started on node %s', _utils.log_res(node))
315 319
 
316 320
         if wait is not None:
317 321
             LOG.debug('Waiting for node %(node)s to reach state active '
318 322
                       'with timeout %(timeout)s',
319
-                      {'node': _utils.log_node(node), 'timeout': wait})
323
+                      {'node': _utils.log_res(node), 'timeout': wait})
320 324
             instance = self.wait_for_provisioning([node], timeout=wait)[0]
321
-            LOG.info('Deploy succeeded on node %s', _utils.log_node(node))
325
+            LOG.info('Deploy succeeded on node %s', _utils.log_res(node))
322 326
         else:
323 327
             # Update the node to return it's latest state
324
-            node = self._api.get_node(node, refresh=True)
325
-            instance = _instance.Instance(self._api, node)
328
+            node = self._get_node(node, refresh=True)
329
+            instance = _instance.Instance(self.connection, node)
326 330
 
327 331
         return instance
328 332
 
329
-    def wait_for_provisioning(self, nodes, timeout=None, delay=15):
333
+    def wait_for_provisioning(self, nodes, timeout=None, delay=None):
330 334
         """Wait for nodes to be provisioned.
331 335
 
332 336
         Loops until all nodes finish provisioning.
@@ -336,96 +340,46 @@ class Provisioner(object):
336 340
         :param timeout: How much time (in seconds) to wait for all nodes
337 341
             to finish provisioning. If ``None`` (the default), wait forever
338 342
             (more precisely, until the operation times out on server side).
339
-        :param delay: Delay (in seconds) between two provision state checks.
343
+        :param delay: DEPRECATED, do not use.
340 344
         :return: List of updated :py:class:`metalsmith.Instance` objects if
341 345
             all succeeded.
342 346
         :raises: :py:class:`metalsmith.exceptions.DeploymentFailure`
343 347
             if the deployment failed or timed out for any nodes.
344 348
         """
345
-        nodes = self._wait_for_state(nodes, 'active',
346
-                                     timeout=timeout, delay=delay)
347
-        return [_instance.Instance(self._api, node) for node in nodes]
348
-
349
-    def _wait_for_state(self, nodes, state, timeout, delay=15):
350
-        if timeout is not None and timeout <= 0:
351
-            raise ValueError("The timeout argument must be a positive int")
352
-        if delay < 0:
353
-            raise ValueError("The delay argument must be a non-negative int")
354
-
355
-        failed_nodes = []
356
-        finished_nodes = []
357
-
358
-        deadline = time.time() + timeout if timeout is not None else None
359
-        while timeout is None or time.time() < deadline:
360
-            remaining_nodes = []
361
-            for node in nodes:
362
-                node = self._api.get_node(node, refresh=True,
363
-                                          accept_hostname=True)
364
-                if node.provision_state == state:
365
-                    LOG.debug('Node %(node)s reached state %(state)s',
366
-                              {'node': _utils.log_node(node), 'state': state})
367
-                    finished_nodes.append(node)
368
-                elif (node.provision_state == 'error' or
369
-                      node.provision_state.endswith(' failed')):
370
-                    LOG.error('Node %(node)s failed deployment: %(error)s',
371
-                              {'node': _utils.log_node(node),
372
-                               'error': node.last_error})
373
-                    failed_nodes.append(node)
374
-                else:
375
-                    remaining_nodes.append(node)
376
-
377
-            if remaining_nodes:
378
-                nodes = remaining_nodes
379
-            else:
380
-                nodes = []
381
-                break
382
-
383
-            LOG.debug('Still waiting for the following nodes to reach state '
384
-                      '%(state)s: %(nodes)s',
385
-                      {'state': state,
386
-                       'nodes': ', '.join(_utils.log_node(n) for n in nodes)})
387
-            time.sleep(delay)
388
-
389
-        messages = []
390
-        if failed_nodes:
391
-            messages.append('the following nodes failed deployment: %s' %
392
-                            ', '.join('%s (%s)' % (_utils.log_node(node),
393
-                                                   node.last_error)
394
-                                      for node in failed_nodes))
395
-        if nodes:
396
-            messages.append('deployment timed out for nodes %s' %
397
-                            ', '.join(_utils.log_node(node) for node in nodes))
398
-
399
-        if messages:
400
-            raise exceptions.DeploymentFailure(
401
-                'Deployment failed: %s' % '; '.join(messages),
402
-                failed_nodes + nodes)
403
-        else:
404
-            LOG.debug('All nodes reached state %s', state)
405
-            return finished_nodes
349
+        if delay is not None:
350
+            warnings.warn("The delay argument to wait_for_provisioning is "
351
+                          "deprecated and has not effect", DeprecationWarning)
352
+        nodes = [self._get_node(n, accept_hostname=True) for n in nodes]
353
+        nodes = self.connection.baremetal.wait_for_nodes_provision_state(
354
+            nodes, 'active', timeout=timeout)
355
+        return [_instance.Instance(self.connection, node) for node in nodes]
406 356
 
407 357
     def _clean_up(self, node, nics=None):
408 358
         if nics is None:
409 359
             created_ports = node.extra.get(_CREATED_PORTS, [])
410 360
             attached_ports = node.extra.get(_ATTACHED_PORTS, [])
411
-            _nics.detach_and_delete_ports(self._api, node, created_ports,
412
-                                          attached_ports)
361
+            _nics.detach_and_delete_ports(self.connection, node,
362
+                                          created_ports, attached_ports)
413 363
         else:
414 364
             nics.detach_and_delete_ports()
415 365
 
416
-        update = {'/extra/%s' % item: _os_api.REMOVE
417
-                  for item in (_CREATED_PORTS, _ATTACHED_PORTS)}
418
-        update['/instance_info/%s' % _os_api.HOSTNAME_FIELD] = _os_api.REMOVE
419
-        LOG.debug('Updating node %(node)s with %(updates)s',
420
-                  {'node': _utils.log_node(node), 'updates': update})
366
+        extra = node.extra.copy()
367
+        for item in (_CREATED_PORTS, _ATTACHED_PORTS):
368
+            extra.pop(item, None)
369
+        instance_info = node.instance_info.copy()
370
+        instance_info.pop(self.HOSTNAME_FIELD, None)
371
+        LOG.debug('Updating node %(node)s with instance info %(iinfo)s '
372
+                  'and extras %(extra)s and releasing the lock',
373
+                  {'node': _utils.log_res(node),
374
+                   'iinfo': instance_info,
375
+                   'extra': extra})
421 376
         try:
422
-            self._api.update_node(node, update)
377
+            self.connection.baremetal.update_node(
378
+                node, instance_info=instance_info, extra=extra,
379
+                instance_id=None)
423 380
         except Exception as exc:
424 381
             LOG.debug('Failed to clear node %(node)s extra: %(exc)s',
425
-                      {'node': _utils.log_node(node), 'exc': exc})
426
-
427
-        LOG.debug('Releasing lock on node %s', _utils.log_node(node))
428
-        self._api.release_node(node)
382
+                      {'node': _utils.log_res(node), 'exc': exc})
429 383
 
430 384
     def unprovision_node(self, node, wait=None):
431 385
         """Unprovision a previously provisioned node.
@@ -436,21 +390,23 @@ class Provisioner(object):
436 390
             None to return immediately.
437 391
         :return: the latest `Node` object.
438 392
         """
439
-        node = self._api.get_node(node, accept_hostname=True)
393
+        node = self._get_node(node, accept_hostname=True)
440 394
         if self._dry_run:
441 395
             LOG.warning("Dry run, not unprovisioning")
442 396
             return
443 397
 
444 398
         self._clean_up(node)
445
-        self._api.node_action(node, 'deleted')
399
+        node = self.connection.baremetal.set_node_provision_state(
400
+            node, 'deleted', wait=False)
446 401
 
447
-        LOG.info('Deleting started for node %s', _utils.log_node(node))
402
+        LOG.info('Deleting started for node %s', _utils.log_res(node))
448 403
 
449 404
         if wait is not None:
450
-            self._wait_for_state([node], 'available', timeout=wait)
451
-            LOG.info('Node %s undeployed successfully', _utils.log_node(node))
405
+            node = self.connection.baremetal.wait_for_nodes_provision_state(
406
+                [node], 'available', timeout=wait)[0]
407
+            LOG.info('Node %s undeployed successfully', _utils.log_res(node))
452 408
 
453
-        return self._api.get_node(node, refresh=True)
409
+        return node
454 410
 
455 411
     def show_instance(self, instance_id):
456 412
         """Show information about instance.
@@ -470,11 +426,11 @@ class Provisioner(object):
470 426
         :return: list of :py:class:`metalsmith.Instance` objects in the same
471 427
             order as ``instances``.
472 428
         """
473
-        with self._api.cache_node_list_for_lookup():
429
+        with self._cache_node_list_for_lookup():
474 430
             return [
475 431
                 _instance.Instance(
476
-                    self._api,
477
-                    self._api.get_node(inst, accept_hostname=True))
432
+                    self.connection,
433
+                    self._get_node(inst, accept_hostname=True))
478 434
                 for inst in instances
479 435
             ]
480 436
 
@@ -483,8 +439,9 @@ class Provisioner(object):
483 439
 
484 440
         :return: list of :py:class:`metalsmith.Instance` objects.
485 441
         """
486
-        nodes = self._api.list_nodes(provision_state=None, associated=True)
442
+        nodes = self.connection.baremetal.nodes(associated=True, details=True)
487 443
         instances = [i for i in
488
-                     (_instance.Instance(self._api, node) for node in nodes)
444
+                     (_instance.Instance(self.connection, node)
445
+                      for node in nodes)
489 446
                      if i._is_deployed_by_metalsmith]
490 447
         return instances

+ 21
- 16
metalsmith/_scheduler.py View File

@@ -17,6 +17,7 @@ import abc
17 17
 import collections
18 18
 import logging
19 19
 
20
+from openstack import exceptions as sdk_exc
20 21
 import six
21 22
 
22 23
 from metalsmith import _utils
@@ -100,13 +101,13 @@ def schedule_node(nodes, filters, reserver, dry_run=False):
100 101
     for node in nodes:
101 102
         try:
102 103
             result = reserver(node)
103
-        except Exception as exc:
104
+        except sdk_exc.SDKException as exc:
104 105
             LOG.debug('Node %(node)s was not reserved (%(exc)s), moving on '
105 106
                       'to the next one',
106
-                      {'node': _utils.log_node(node), 'exc': exc})
107
+                      {'node': _utils.log_res(node), 'exc': exc})
107 108
         else:
108 109
             LOG.info('Node %s reserved for deployment',
109
-                     _utils.log_node(result))
110
+                     _utils.log_res(result))
110 111
             return result
111 112
 
112 113
     LOG.debug('No nodes could be reserved')
@@ -149,25 +150,25 @@ class CapabilitiesFilter(Filter):
149 150
             caps = _utils.get_capabilities(node)
150 151
         except Exception:
151 152
             LOG.exception('Malformed capabilities on node %(node)s: %(caps)s',
152
-                          {'node': _utils.log_node(node),
153
+                          {'node': _utils.log_res(node),
153 154
                            'caps': node.properties.get('capabilities')})
154 155
             return False
155 156
 
156 157
         LOG.debug('Capabilities for node %(node)s: %(caps)s',
157
-                  {'node': _utils.log_node(node), 'caps': caps})
158
+                  {'node': _utils.log_res(node), 'caps': caps})
158 159
         for key, value in self._capabilities.items():
159 160
             try:
160 161
                 node_value = caps[key]
161 162
             except KeyError:
162 163
                 LOG.debug('Node %(node)s does not have capability %(cap)s',
163
-                          {'node': _utils.log_node(node), 'cap': key})
164
+                          {'node': _utils.log_res(node), 'cap': key})
164 165
                 return False
165 166
             else:
166 167
                 self._counter["%s=%s" % (key, node_value)] += 1
167 168
                 if value != node_value:
168 169
                     LOG.debug('Node %(node)s has capability %(cap)s of '
169 170
                               'value "%(node_val)s" instead of "%(expected)s"',
170
-                              {'node': _utils.log_node(node), 'cap': key,
171
+                              {'node': _utils.log_res(node), 'cap': key,
171 172
                                'node_val': node_value, 'expected': value})
172 173
                     return False
173 174
 
@@ -197,14 +198,14 @@ class TraitsFilter(Filter):
197 198
 
198 199
         traits = node.traits or []
199 200
         LOG.debug('Traits for node %(node)s: %(traits)s',
200
-                  {'node': _utils.log_node(node), 'traits': traits})
201
+                  {'node': _utils.log_res(node), 'traits': traits})
201 202
         for trait in traits:
202 203
             self._counter[trait] += 1
203 204
 
204 205
         missing = set(self._traits) - set(traits)
205 206
         if missing:
206 207
             LOG.debug('Node %(node)s does not have traits %(missing)s',
207
-                      {'node': _utils.log_node(node), 'missing': missing})
208
+                      {'node': _utils.log_res(node), 'missing': missing})
208 209
             return False
209 210
 
210 211
         return True
@@ -239,24 +240,28 @@ class CustomPredicateFilter(Filter):
239 240
 
240 241
 class IronicReserver(Reserver):
241 242
 
242
-    def __init__(self, api):
243
-        self._api = api
243
+    def __init__(self, connection, instance_info=None):
244
+        self._connection = connection
244 245
         self._failed_nodes = []
246
+        self._iinfo = instance_info or {}
245 247
 
246 248
     def validate(self, node):
247 249
         try:
248
-            self._api.validate_node(node)
249
-        except RuntimeError as exc:
250
+            self._connection.baremetal.validate_node(
251
+                node, required=('power', 'management'))
252
+        except sdk_exc.SDKException as exc:
250 253
             message = ('Node %(node)s failed validation: %(err)s' %
251
-                       {'node': _utils.log_node(node), 'err': exc})
254
+                       {'node': _utils.log_res(node), 'err': exc})
252 255
             LOG.warning(message)
253 256
             raise exceptions.ValidationFailed(message)
254 257
 
255 258
     def __call__(self, node):
256 259
         try:
257 260
             self.validate(node)
258
-            return self._api.reserve_node(node, instance_uuid=node.uuid)
259
-        except Exception:
261
+            iinfo = dict(node.instance_info or {}, **self._iinfo)
262
+            return self._connection.baremetal.update_node(
263
+                node, instance_id=node.id, instance_info=iinfo)
264
+        except sdk_exc.SDKException:
260 265
             self._failed_nodes.append(node)
261 266
             raise
262 267
 

+ 65
- 9
metalsmith/_utils.py View File

@@ -13,6 +13,7 @@
13 13
 # See the License for the specific language governing permissions and
14 14
 # limitations under the License.
15 15
 
16
+import contextlib
16 17
 import re
17 18
 
18 19
 import six
@@ -20,13 +21,6 @@ import six
20 21
 from metalsmith import exceptions
21 22
 
22 23
 
23
-def log_node(node):
24
-    if node.name:
25
-        return '%s (UUID %s)' % (node.name, node.uuid)
26
-    else:
27
-        return node.uuid
28
-
29
-
30 24
 def log_res(res):
31 25
     if getattr(res, 'name', None):
32 26
         return '%s (UUID %s)' % (res.name, res.id)
@@ -56,12 +50,12 @@ def get_root_disk(root_size_gb, node):
56 50
         except KeyError:
57 51
             raise exceptions.UnknownRootDiskSize(
58 52
                 'No local_gb for node %s and no root partition size '
59
-                'specified' % log_node(node))
53
+                'specified' % log_res(node))
60 54
         except (TypeError, ValueError, AssertionError):
61 55
             raise exceptions.UnknownRootDiskSize(
62 56
                 'The local_gb for node %(node)s is invalid: '
63 57
                 'expected positive integer, got %(value)s' %
64
-                {'node': log_node(node),
58
+                {'node': log_res(node),
65 59
                  'value': node.properties['local_gb']})
66 60
 
67 61
         # allow for partitioning and config drive
@@ -104,3 +98,65 @@ def parse_checksums(checksums):
104 98
         result[fname.strip().lstrip('*')] = checksum.strip()
105 99
 
106 100
     return result
101
+
102
+
103
+class GetNodeMixin(object):
104
+    """A helper mixin for getting nodes with hostnames."""
105
+
106
+    HOSTNAME_FIELD = 'metalsmith_hostname'
107
+
108
+    _node_list = None
109
+
110
+    def _available_nodes(self):
111
+        return self.connection.baremetal.nodes(details=True,
112
+                                               associated=False,
113
+                                               provision_state='available',
114
+                                               is_maintenance=False)
115
+
116
+    def _nodes_for_lookup(self):
117
+        return self.connection.baremetal.nodes(
118
+            fields=['uuid', 'name', 'instance_info'])
119
+
120
+    def _find_node_by_hostname(self, hostname):
121
+        """A helper to find a node by metalsmith hostname."""
122
+        nodes = self._node_list or self._nodes_for_lookup()
123
+        existing = [n for n in nodes
124
+                    if n.instance_info.get(self.HOSTNAME_FIELD) == hostname]
125
+        if len(existing) > 1:
126
+            raise RuntimeError("More than one node found with hostname "
127
+                               "%(host)s: %(nodes)s" %
128
+                               {'host': hostname,
129
+                                'nodes': ', '.join(log_res(n)
130
+                                                   for n in existing)})
131
+        elif not existing:
132
+            return None
133
+        else:
134
+            # Fetch the complete node information before returning
135
+            return self.connection.baremetal.get_node(existing[0].id)
136
+
137
+    def _get_node(self, node, refresh=False, accept_hostname=False):
138
+        """A helper to find and return a node."""
139
+        if isinstance(node, six.string_types):
140
+            if accept_hostname and is_hostname_safe(node):
141
+                by_hostname = self._find_node_by_hostname(node)
142
+                if by_hostname is not None:
143
+                    return by_hostname
144
+
145
+            return self.connection.baremetal.get_node(node)
146
+        elif hasattr(node, 'node'):
147
+            # Instance object
148
+            node = node.node
149
+        else:
150
+            node = node
151
+
152
+        if refresh:
153
+            return self.connection.baremetal.get_node(node)
154
+        else:
155
+            return node
156
+
157
+    @contextlib.contextmanager
158
+    def _cache_node_list_for_lookup(self):
159
+        if self._node_list is None:
160
+            self._node_list = list(self._nodes_for_lookup())
161
+        yield self._node_list
162
+        self._node_list = None

+ 10
- 10
metalsmith/sources.py View File

@@ -69,12 +69,12 @@ class GlanceImage(_Source):
69 69
         LOG.debug('Image: %s', self._image_obj)
70 70
 
71 71
         updates = {
72
-            '/instance_info/image_source': self._image_obj.id
72
+            'image_source': self._image_obj.id
73 73
         }
74 74
         for prop in ('kernel', 'ramdisk'):
75 75
             value = getattr(self._image_obj, '%s_id' % prop, None)
76 76
             if value:
77
-                updates['/instance_info/%s' % prop] = value
77
+                updates[prop] = value
78 78
 
79 79
         return updates
80 80
 
@@ -144,8 +144,8 @@ class HttpWholeDiskImage(_Source):
144 144
         LOG.debug('Image: %(image)s, checksum %(checksum)s',
145 145
                   {'image': self.url, 'checksum': self.checksum})
146 146
         return {
147
-            '/instance_info/image_source': self.url,
148
-            '/instance_info/image_checksum': self.checksum,
147
+            'image_source': self.url,
148
+            'image_checksum': self.checksum,
149 149
         }
150 150
 
151 151
 
@@ -172,8 +172,8 @@ class HttpPartitionImage(HttpWholeDiskImage):
172 172
 
173 173
     def _node_updates(self, connection):
174 174
         updates = super(HttpPartitionImage, self)._node_updates(connection)
175
-        updates['/instance_info/kernel'] = self.kernel_url
176
-        updates['/instance_info/ramdisk'] = self.ramdisk_url
175
+        updates['kernel'] = self.kernel_url
176
+        updates['ramdisk'] = self.ramdisk_url
177 177
         return updates
178 178
 
179 179
 
@@ -203,8 +203,8 @@ class FileWholeDiskImage(_Source):
203 203
         LOG.debug('Image: %(image)s, checksum %(checksum)s',
204 204
                   {'image': self.location, 'checksum': self.checksum})
205 205
         return {
206
-            '/instance_info/image_source': self.location,
207
-            '/instance_info/image_checksum': self.checksum,
206
+            'image_source': self.location,
207
+            'image_checksum': self.checksum,
208 208
         }
209 209
 
210 210
 
@@ -239,6 +239,6 @@ class FilePartitionImage(FileWholeDiskImage):
239 239
 
240 240
     def _node_updates(self, connection):
241 241
         updates = super(FilePartitionImage, self)._node_updates(connection)
242
-        updates['/instance_info/kernel'] = self.kernel_location
243
-        updates['/instance_info/ramdisk'] = self.ramdisk_location
242
+        updates['kernel'] = self.kernel_location
243
+        updates['ramdisk'] = self.ramdisk_location
244 244
         return updates

+ 7
- 7
metalsmith/test/test_cmd.py View File

@@ -75,7 +75,7 @@ class TestDeploy(testtools.TestCase):
75 75
         instance = mock_pr.return_value.provision_node.return_value
76 76
         instance.create_autospec(_instance.Instance)
77 77
         instance.node.name = None
78
-        instance.node.uuid = '123'
78
+        instance.node.id = '123'
79 79
         instance.state = 'active'
80 80
         instance.is_deployed = True
81 81
         instance.ip_addresses.return_value = {'private': ['1.2.3.4']}
@@ -127,7 +127,7 @@ class TestDeploy(testtools.TestCase):
127 127
         instance.is_deployed = True
128 128
         instance.ip_addresses.return_value = {}
129 129
         instance.node.name = None
130
-        instance.node.uuid = '123'
130
+        instance.node.id = '123'
131 131
         instance.state = 'active'
132 132
 
133 133
         args = ['deploy', '--network', 'mynet', '--image', 'myimg',
@@ -142,7 +142,7 @@ class TestDeploy(testtools.TestCase):
142 142
         instance.create_autospec(_instance.Instance)
143 143
         instance.is_deployed = False
144 144
         instance.node.name = None
145
-        instance.node.uuid = '123'
145
+        instance.node.id = '123'
146 146
         instance.state = 'deploying'
147 147
 
148 148
         args = ['deploy', '--network', 'mynet', '--image', 'myimg',
@@ -487,7 +487,7 @@ class TestUndeploy(testtools.TestCase):
487 487
 
488 488
     def test_ok(self, mock_os_conf, mock_pr):
489 489
         node = mock_pr.return_value.unprovision_node.return_value
490
-        node.uuid = '123'
490
+        node.id = '123'
491 491
         node.name = None
492 492
         node.provision_state = 'cleaning'
493 493
 
@@ -506,7 +506,7 @@ class TestUndeploy(testtools.TestCase):
506 506
 
507 507
     def test_custom_wait(self, mock_os_conf, mock_pr):
508 508
         node = mock_pr.return_value.unprovision_node.return_value
509
-        node.uuid = '123'
509
+        node.id = '123'
510 510
         node.name = None
511 511
         node.provision_state = 'available'
512 512
 
@@ -580,9 +580,9 @@ class TestShowWait(testtools.TestCase):
580 580
             for hostname in ['hostname1', 'hostname2']
581 581
         ]
582 582
         for inst in self.instances:
583
-            inst.node.uuid = inst.uuid
583
+            inst.node.id = inst.uuid
584 584
             inst.node.name = 'name-%s' % inst.uuid
585
-            inst.to_dict.return_value = {inst.node.uuid: inst.node.name}
585
+            inst.to_dict.return_value = {inst.node.id: inst.node.name}
586 586
 
587 587
     def test_show(self, mock_os_conf, mock_pr):
588 588
         mock_pr.return_value.show_instances.return_value = self.instances

+ 15
- 20
metalsmith/test/test_config.py View File

@@ -14,9 +14,9 @@
14 14
 # limitations under the License.
15 15
 
16 16
 import json
17
-import os
18 17
 
19 18
 import mock
19
+from openstack.baremetal import configdrive
20 20
 import testtools
21 21
 
22 22
 from metalsmith import _config
@@ -25,7 +25,7 @@ from metalsmith import _config
25 25
 class TestInstanceConfig(testtools.TestCase):
26 26
     def setUp(self):
27 27
         super(TestInstanceConfig, self).setUp()
28
-        self.node = mock.Mock(uuid='1234')
28
+        self.node = mock.Mock(id='1234')
29 29
         self.node.name = 'node name'
30 30
 
31 31
     def _check(self, config, expected_metadata, expected_userdata=None):
@@ -39,24 +39,19 @@ class TestInstanceConfig(testtools.TestCase):
39 39
                       'meta': {}}
40 40
         expected_m.update(expected_metadata)
41 41
 
42
-        with config.build_configdrive_directory(self.node, 'example.com') as d:
43
-            for version in ('2012-08-10', 'latest'):
44
-                with open(os.path.join(d, 'openstack', version,
45
-                                       'meta_data.json')) as fp:
46
-                    metadata = json.load(fp)
47
-
48
-                self.assertEqual(expected_m, metadata)
49
-                user_data = os.path.join(d, 'openstack', version, 'user_data')
50
-                if expected_userdata is None:
51
-                    self.assertFalse(os.path.exists(user_data))
52
-                else:
53
-                    with open(user_data) as fp:
54
-                        lines = list(fp)
55
-                    self.assertEqual('#cloud-config\n', lines[0])
56
-                    user_data = json.loads(''.join(lines[1:]))
57
-                    self.assertEqual(expected_userdata, user_data)
58
-
59
-        self.assertFalse(os.path.exists(d))
42
+        with mock.patch.object(configdrive, 'build', autospec=True) as mb:
43
+            result = config.build_configdrive(self.node, "example.com")
44
+            mb.assert_called_once_with(expected_m, mock.ANY)
45
+            self.assertIs(result, mb.return_value)
46
+            user_data = mb.call_args[0][1]
47
+
48
+        if expected_userdata:
49
+            self.assertIsNotNone(user_data)
50
+            user_data = user_data.decode('utf-8')
51
+            header, user_data = user_data.split('\n', 1)
52
+            self.assertEqual('#cloud-config', header)
53
+            user_data = json.loads(user_data)
54
+        self.assertEqual(expected_userdata, user_data)
60 55
 
61 56
     def test_default(self):
62 57
         config = _config.InstanceConfig()

+ 6
- 8
metalsmith/test/test_instance.py View File

@@ -23,21 +23,19 @@ class TestInstanceIPAddresses(test_provisioner.Base):
23 23
     def setUp(self):
24 24
         super(TestInstanceIPAddresses, self).setUp()
25 25
         self.instance = _instance.Instance(self.api, self.node)
26
-        self.api.list_node_attached_ports.return_value = [
27
-            mock.Mock(spec=['id'], id=i) for i in ('111', '222')
28
-        ]
26
+        self.api.baremetal.list_node_vifs.return_value = ['111', '222']
29 27
         self.ports = [
30 28
             mock.Mock(spec=['network_id', 'fixed_ips', 'network'],
31 29
                       network_id=n, fixed_ips=[{'ip_address': ip}])
32 30
             for n, ip in [('0', '192.168.0.1'), ('1', '10.0.0.2')]
33 31
         ]
34
-        self.conn.network.get_port.side_effect = self.ports
32
+        self.api.network.get_port.side_effect = self.ports
35 33
         self.nets = [
36 34
             mock.Mock(spec=['id', 'name'], id=str(i)) for i in range(2)
37 35
         ]
38 36
         for n in self.nets:
39 37
             n.name = 'name-%s' % n.id
40
-        self.conn.network.get_network.side_effect = self.nets
38
+        self.api.network.get_network.side_effect = self.nets
41 39
 
42 40
     def test_ip_addresses(self):
43 41
         ips = self.instance.ip_addresses()
@@ -70,7 +68,7 @@ class TestInstanceStates(test_provisioner.Base):
70 68
         self.assertTrue(self.instance.is_healthy)
71 69
 
72 70
     def test_state_deploying_maintenance(self):
73
-        self.node.maintenance = True
71
+        self.node.is_maintenance = True
74 72
         self.node.provision_state = 'wait call-back'
75 73
         self.assertEqual('deploying', self.instance.state)
76 74
         self.assertFalse(self.instance.is_deployed)
@@ -83,7 +81,7 @@ class TestInstanceStates(test_provisioner.Base):
83 81
         self.assertTrue(self.instance.is_healthy)
84 82
 
85 83
     def test_state_maintenance(self):
86
-        self.node.maintenance = True
84
+        self.node.is_maintenance = True
87 85
         self.node.provision_state = 'active'
88 86
         self.assertEqual('maintenance', self.instance.state)
89 87
         self.assertTrue(self.instance.is_deployed)
@@ -112,5 +110,5 @@ class TestInstanceStates(test_provisioner.Base):
112 110
                           'ip_addresses': {'private': ['1.2.3.4']},
113 111
                           'node': {'node': 'dict'},
114 112
                           'state': 'deploying',
115
-                          'uuid': self.node.uuid},
113
+                          'uuid': self.node.id},
116 114
                          self.instance.to_dict())

+ 0
- 129
metalsmith/test/test_os_api.py View File

@@ -1,129 +0,0 @@
1
-# Copyright 2018 Red Hat, Inc.
2
-#
3
-# Licensed under the Apache License, Version 2.0 (the "License");
4
-# you may not use this file except in compliance with the License.
5
-# You may obtain a copy of the License at
6
-#
7
-#    http://www.apache.org/licenses/LICENSE-2.0
8
-#
9
-# Unless required by applicable law or agreed to in writing, software
10
-# distributed under the License is distributed on an "AS IS" BASIS,
11
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12
-# implied.
13
-# See the License for the specific language governing permissions and
14
-# limitations under the License.
15
-
16
-import fixtures
17
-import mock
18
-import testtools
19
-
20
-from metalsmith import _instance
21
-from metalsmith import _os_api
22
-
23
-
24
-class TestNodes(testtools.TestCase):
25
-    def setUp(self):
26
-        super(TestNodes, self).setUp()
27
-        self.session = mock.Mock()
28
-        self.ironic_fixture = self.useFixture(
29
-            fixtures.MockPatchObject(_os_api.ir_client, 'get_client',
30
-                                     autospec=True))
31
-        self.cli = self.ironic_fixture.mock.return_value
32
-        self.api = _os_api.API(session=self.session, connection=mock.Mock())
33
-
34
-    def test_get_node_by_uuid(self):
35
-        res = self.api.get_node('uuid1')
36
-        self.cli.node.get.assert_called_once_with('uuid1')
37
-        self.assertIs(res, self.cli.node.get.return_value)
38
-
39
-    def test_get_node_by_hostname(self):
40
-        self.cli.node.list.return_value = [
41
-            mock.Mock(uuid='uuid0', instance_info={}),
42
-            mock.Mock(uuid='uuid1',
43
-                      instance_info={'metalsmith_hostname': 'host1'}),
44
-        ]
45
-        res = self.api.get_node('host1', accept_hostname=True)
46
-        # Loading details
47
-        self.cli.node.get.assert_called_once_with('uuid1')
48
-        self.assertIs(res, self.cli.node.get.return_value)
49
-
50
-    def test_get_node_by_hostname_not_found(self):
51
-        self.cli.node.list.return_value = [
52
-            mock.Mock(uuid='uuid0', instance_info={}),
53
-            mock.Mock(uuid='uuid1',
54
-                      instance_info={'metalsmith_hostname': 'host0'}),
55
-        ]
56
-        res = self.api.get_node('host1', accept_hostname=True)
57
-        # Loading details
58
-        self.cli.node.get.assert_called_once_with('host1')
59
-        self.assertIs(res, self.cli.node.get.return_value)
60
-
61
-    def test_get_node_by_node(self):
62
-        res = self.api.get_node(mock.sentinel.node)
63
-        self.assertIs(res, mock.sentinel.node)
64
-        self.assertFalse(self.cli.node.get.called)
65
-
66
-    def test_get_node_by_node_with_refresh(self):
67
-        res = self.api.get_node(mock.Mock(spec=['uuid'], uuid='uuid1'),
68
-                                refresh=True)
69
-        self.cli.node.get.assert_called_once_with('uuid1')
70
-        self.assertIs(res, self.cli.node.get.return_value)
71
-
72
-    def test_get_node_by_instance(self):
73
-        inst = _instance.Instance(mock.Mock(), mock.Mock())
74
-        res = self.api.get_node(inst)
75
-        self.assertIs(res, inst.node)
76
-        self.assertFalse(self.cli.node.get.called)
77
-
78
-    def test_get_node_by_instance_with_refresh(self):
79
-        inst = _instance.Instance(mock.Mock(),
80
-                                  mock.Mock(spec=['uuid'], uuid='uuid1'))
81
-        res = self.api.get_node(inst, refresh=True)
82
-        self.cli.node.get.assert_called_once_with('uuid1')
83
-        self.assertIs(res, self.cli.node.get.return_value)
84
-
85
-    def test_find_node_by_hostname(self):
86
-        self.cli.node.list.return_value = [
87
-            mock.Mock(uuid='uuid0', instance_info={}),
88
-            mock.Mock(uuid='uuid1',
89
-                      instance_info={'metalsmith_hostname': 'host1'}),
90
-        ]
91
-        res = self.api.find_node_by_hostname('host1')
92
-        # Loading details
93
-        self.cli.node.get.assert_called_once_with('uuid1')
94
-        self.assertIs(res, self.cli.node.get.return_value)
95
-
96
-    def test_find_node_by_hostname_cached(self):
97
-        self.cli.node.list.return_value = [
98
-            mock.Mock(uuid='uuid0', instance_info={}),
99
-            mock.Mock(uuid='uuid1',
100
-                      instance_info={'metalsmith_hostname': 'host1'}),
101
-        ]
102
-        with self.api.cache_node_list_for_lookup():
103
-            res = self.api.find_node_by_hostname('host1')
104
-            self.assertIs(res, self.cli.node.get.return_value)
105
-            self.assertIsNone(self.api.find_node_by_hostname('host2'))
106
-        self.assertEqual(1, self.cli.node.list.call_count)
107
-        # This call is no longer cached
108
-        self.assertIsNone(self.api.find_node_by_hostname('host2'))
109
-        self.assertEqual(2, self.cli.node.list.call_count)
110
-
111
-    def test_find_node_by_hostname_not_found(self):
112
-        self.cli.node.list.return_value = [
113
-            mock.Mock(uuid='uuid0', instance_info={}),
114
-            mock.Mock(uuid='uuid1',
115
-                      instance_info={'metalsmith_hostname': 'host1'}),
116
-        ]
117
-        self.assertIsNone(self.api.find_node_by_hostname('host0'))
118
-        self.assertFalse(self.cli.node.get.called)
119
-
120
-    def test_find_node_by_hostname_duplicate(self):
121
-        self.cli.node.list.return_value = [
122
-            mock.Mock(uuid='uuid0',
123
-                      instance_info={'metalsmith_hostname': 'host1'}),
124
-            mock.Mock(uuid='uuid1',
125
-                      instance_info={'metalsmith_hostname': 'host1'}),
126
-        ]
127
-        self.assertRaisesRegex(RuntimeError, 'More than one node',
128
-                               self.api.find_node_by_hostname, 'host1')
129
-        self.assertFalse(self.cli.node.get.called)

+ 666
- 819
metalsmith/test/test_provisioner.py
File diff suppressed because it is too large
View File


+ 48
- 30
metalsmith/test/test_scheduler.py View File

@@ -14,6 +14,7 @@
14 14
 # limitations under the License.
15 15
 
16 16
 import mock
17
+from openstack import exceptions as sdk_exc
17 18
 import testtools
18 19
 
19 20
 from metalsmith import _scheduler
@@ -24,14 +25,14 @@ class TestScheduleNode(testtools.TestCase):
24 25
 
25 26
     def setUp(self):
26 27
         super(TestScheduleNode, self).setUp()
27
-        self.nodes = [mock.Mock(spec=['uuid', 'name']) for _ in range(2)]
28
+        self.nodes = [mock.Mock(spec=['id', 'name']) for _ in range(2)]
28 29
         self.reserver = self._reserver(lambda x: x)
29 30
 
30 31
     def _reserver(self, side_effect):
31 32
         reserver = mock.Mock(spec=_scheduler.Reserver)
32 33
         reserver.side_effect = side_effect
33 34
         if isinstance(side_effect, Exception):
34
-            reserver.fail.side_effect = RuntimeError('failed')
35
+            reserver.fail.side_effect = exceptions.ReservationFailed('fail')
35 36
         else:
36 37
             reserver.fail.side_effect = AssertionError('called fail')
37 38
         return reserver
@@ -56,15 +57,16 @@ class TestScheduleNode(testtools.TestCase):
56 57
         self.assertFalse(self.reserver.fail.called)
57 58
 
58 59
     def test_reservation_one_failed(self):
59
-        reserver = self._reserver([Exception("boom"), self.nodes[1]])
60
+        reserver = self._reserver([sdk_exc.SDKException("boom"),
61
+                                   self.nodes[1]])
60 62
         result = _scheduler.schedule_node(self.nodes, [], reserver)
61 63
         self.assertIs(result, self.nodes[1])
62 64
         self.assertEqual([mock.call(n) for n in self.nodes],
63 65
                          reserver.call_args_list)
64 66
 
65 67
     def test_reservation_all_failed(self):
66
-        reserver = self._reserver(Exception("boom"))
67
-        self.assertRaisesRegex(RuntimeError, 'failed',
68
+        reserver = self._reserver(sdk_exc.SDKException("boom"))
69
+        self.assertRaisesRegex(exceptions.ReservationFailed, 'fail',
68 70
                                _scheduler.schedule_node,
69 71
                                self.nodes, [], reserver)
70 72
         self.assertEqual([mock.call(n) for n in self.nodes],
@@ -121,7 +123,7 @@ class TestCapabilitiesFilter(testtools.TestCase):
121 123
 
122 124
     def test_nothing_requested_nothing_found(self):
123 125
         fltr = _scheduler.CapabilitiesFilter({})
124
-        node = mock.Mock(properties={}, spec=['properties', 'name', 'uuid'])
126
+        node = mock.Mock(properties={}, spec=['properties', 'name', 'id'])
125 127
         self.assertTrue(fltr(node))
126 128
 
127 129
     def test_matching_node(self):
@@ -129,7 +131,7 @@ class TestCapabilitiesFilter(testtools.TestCase):
129 131
                                               'foo': 'bar'})
130 132
         node = mock.Mock(
131 133
             properties={'capabilities': 'foo:bar,profile:compute,answer:42'},
132
-            spec=['properties', 'name', 'uuid'])
134
+            spec=['properties', 'name', 'id'])
133 135
         self.assertTrue(fltr(node))
134 136
 
135 137
     def test_not_matching_node(self):
@@ -137,14 +139,14 @@ class TestCapabilitiesFilter(testtools.TestCase):
137 139
                                               'foo': 'bar'})
138 140
         node = mock.Mock(
139 141
             properties={'capabilities': 'foo:bar,answer:42'},
140
-            spec=['properties', 'name', 'uuid'])
142
+            spec=['properties', 'name', 'id'])
141 143
         self.assertFalse(fltr(node))
142 144
 
143 145
     def test_fail_message(self):
144 146
         fltr = _scheduler.CapabilitiesFilter({'profile': 'compute'})
145 147
         node = mock.Mock(
146 148
             properties={'capabilities': 'profile:control'},
147
-            spec=['properties', 'name', 'uuid'])
149
+            spec=['properties', 'name', 'id'])
148 150
         self.assertFalse(fltr(node))
149 151
         self.assertRaisesRegex(exceptions.CapabilitiesNotFound,
150 152
                                'No available nodes found with capabilities '
@@ -156,7 +158,7 @@ class TestCapabilitiesFilter(testtools.TestCase):
156 158
         fltr = _scheduler.CapabilitiesFilter({'profile': 'compute'})
157 159
         for cap in ['foo,profile:control', 42, 'a:b:c']:
158 160
             node = mock.Mock(properties={'capabilities': cap},
159
-                             spec=['properties', 'name', 'uuid'])
161
+                             spec=['properties', 'name', 'id'])
160 162
             self.assertFalse(fltr(node))
161 163
         self.assertRaisesRegex(exceptions.CapabilitiesNotFound,
162 164
                                'No available nodes found with capabilities '
@@ -175,24 +177,24 @@ class TestTraitsFilter(testtools.TestCase):
175 177
 
176 178
     def test_no_traits(self):
177 179
         fltr = _scheduler.TraitsFilter([])
178
-        node = mock.Mock(spec=['name', 'uuid'])
180
+        node = mock.Mock(spec=['name', 'id'])
179 181
         self.assertTrue(fltr(node))
180 182
 
181 183
     def test_ok(self):
182 184
         fltr = _scheduler.TraitsFilter(['tr1', 'tr2'])
183
-        node = mock.Mock(spec=['name', 'uuid', 'traits'],
185
+        node = mock.Mock(spec=['name', 'id', 'traits'],
184 186
                          traits=['tr3', 'tr2', 'tr1'])
185 187
         self.assertTrue(fltr(node))
186 188
 
187 189
     def test_missing_one(self):
188 190
         fltr = _scheduler.TraitsFilter(['tr1', 'tr2'])
189
-        node = mock.Mock(spec=['name', 'uuid', 'traits'],
191
+        node = mock.Mock(spec=['name', 'id', 'traits'],
190 192
                          traits=['tr3', 'tr1'])
191 193
         self.assertFalse(fltr(node))
192 194
 
193 195
     def test_missing_all(self):
194 196
         fltr = _scheduler.TraitsFilter(['tr1', 'tr2'])
195
-        node = mock.Mock(spec=['name', 'uuid', 'traits'], traits=None)
197
+        node = mock.Mock(spec=['name', 'id', 'traits'], traits=None)
196 198
         self.assertFalse(fltr(node))
197 199
 
198 200
 
@@ -200,10 +202,12 @@ class TestIronicReserver(testtools.TestCase):
200 202
 
201 203
     def setUp(self):
202 204
         super(TestIronicReserver, self).setUp()
203
-        self.node = mock.Mock(spec=['uuid', 'name'])
204
-        self.api = mock.Mock(spec=['reserve_node', 'release_node',
205
-                                   'validate_node'])
206
-        self.api.reserve_node.side_effect = lambda node, instance_uuid: node
205
+        self.node = mock.Mock(spec=['id', 'name', 'instance_info'],
206
+                              instance_info={})
207
+        self.api = mock.Mock(spec=['baremetal'])
208
+        self.api.baremetal = mock.Mock(spec=['update_node', 'validate_node'])
209
+        self.api.baremetal.update_node.side_effect = (
210
+            lambda node, **kw: node)
207 211
         self.reserver = _scheduler.IronicReserver(self.api)
208 212
 
209 213
     def test_fail(self):
@@ -213,22 +217,36 @@ class TestIronicReserver(testtools.TestCase):
213 217
 
214 218
     def test_ok(self):
215 219
         self.assertEqual(self.node, self.reserver(self.node))
216
-        self.api.validate_node.assert_called_with(self.node)
217
-        self.api.reserve_node.assert_called_once_with(
218
-            self.node, instance_uuid=self.node.uuid)
220
+        self.api.baremetal.validate_node.assert_called_with(
221
+            self.node, required=('power', 'management'))
222
+        self.api.baremetal.update_node.assert_called_once_with(
223
+            self.node, instance_id=self.node.id, instance_info={})
224
+
225
+    def test_with_instance_info(self):
226
+        self.reserver = _scheduler.IronicReserver(self.api,
227
+                                                  {'cat': 'meow'})
228
+        self.assertEqual(self.node, self.reserver(self.node))
229
+        self.api.baremetal.validate_node.assert_called_with(
230
+            self.node, required=('power', 'management'))
231
+        self.api.baremetal.update_node.assert_called_once_with(
232
+            self.node, instance_id=self.node.id,
233
+            instance_info={'cat': 'meow'})
219 234
 
220 235
     def test_reservation_failed(self):
221
-        self.api.reserve_node.side_effect = RuntimeError('conflict')
222
-        self.assertRaisesRegex(RuntimeError, 'conflict',
236
+        self.api.baremetal.update_node.side_effect = (
237
+            sdk_exc.SDKException('conflict'))
238
+        self.assertRaisesRegex(sdk_exc.SDKException, 'conflict',
223 239
                                self.reserver, self.node)
224
-        self.api.validate_node.assert_called_with(self.node)
225
-        self.api.reserve_node.assert_called_once_with(
226
-            self.node, instance_uuid=self.node.uuid)
240
+        self.api.baremetal.validate_node.assert_called_with(
241
+            self.node, required=('power', 'management'))
242
+        self.api.baremetal.update_node.assert_called_once_with(
243
+            self.node, instance_id=self.node.id, instance_info={})
227 244
 
228 245
     def test_validation_failed(self):
229
-        self.api.validate_node.side_effect = RuntimeError('fail')
246
+        self.api.baremetal.validate_node.side_effect = (
247
+            sdk_exc.SDKException('fail'))
230 248
         self.assertRaisesRegex(exceptions.ValidationFailed, 'fail',
231 249
                                self.reserver, self.node)
232
-        self.api.validate_node.assert_called_once_with(self.node)
233
-        self.assertFalse(self.api.reserve_node.called)
234
-        self.assertFalse(self.api.release_node.called)
250
+        self.api.baremetal.validate_node.assert_called_with(
251
+            self.node, required=('power', 'management'))
252
+        self.assertFalse(self.api.baremetal.update_node.called)

+ 4
- 4
playbooks/integration/exercise.yaml View File

@@ -19,7 +19,7 @@
19 19
   include_role:
20 20
     name: metalsmith_deployment
21 21
   vars:
22
-    metalsmith_extra_args: -vv
22
+    metalsmith_extra_args: --debug
23 23
     metalsmith_resource_class: baremetal
24 24
     metalsmith_instances:
25 25
       - hostname: test
@@ -48,7 +48,7 @@
48 48
   failed_when: instance_via_list.state != 'active' or instance_via_list.node.provision_state != 'active'
49 49
 
50 50
 - name: Show active node information
51
-  command: openstack baremetal node show {{ instance.node.uuid }}
51
+  command: openstack baremetal node show {{ instance.node.id }}
52 52
 
53 53
 - name: Get IP address
54 54
   set_fact:
@@ -69,7 +69,7 @@
69 69
   command: metalsmith --debug undeploy --wait 900 test
70 70
 
71 71
 - name: Get the current status of the deployed node
72
-  command: openstack baremetal node show {{ instance.node.uuid }} -f json
72
+  command: openstack baremetal node show {{ instance.node.id }} -f json
73 73
   register: undeployed_node_result
74 74
 
75 75
 - name: Parse node state
@@ -87,7 +87,7 @@
87 87
   when: undeployed_node.extra != {}
88 88
 
89 89
 - name: Get attached VIFs for the node
90
-  command: openstack baremetal node vif list {{ instance.node.uuid }} -f value -c ID
90
+  command: openstack baremetal node vif list {{ instance.node.id }} -f value -c ID
91 91
   register: vif_list_output
92 92
 
93 93
 - name: Check that no VIFs are still attached

+ 1
- 2
requirements.txt View File

@@ -2,7 +2,6 @@
2 2
 # of appearance. Changing the order has an impact on the overall integration
3 3
 # process, which may cause wedges in the gate later.
4 4
 pbr!=2.1.0,>=2.0.0 # Apache-2.0
5
-openstacksdk>=0.17.0 # Apache-2.0
6
-python-ironicclient>=1.14.0 # Apache-2.0
5
+openstacksdk>=0.22.0 # Apache-2.0
7 6
 requests>=2.18.4 # Apache-2.0
8 7
 six>=1.10.0 # MIT

Loading…
Cancel
Save