Browse Source

added driver, and update bits for Fuel 8.0

Funs Kessen 3 years ago
parent
commit
17410d5ca6

+ 530
- 0
deployment_scripts/puppet/modules/cinder_datera_driver/files/8.0/datera.py View File

@@ -0,0 +1,530 @@
1
+# Copyright 2015 Datera
2
+# All Rights Reserved.
3
+#
4
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
5
+#    not use this file except in compliance with the License. You may obtain
6
+#    a copy of the License at
7
+#
8
+#         http://www.apache.org/licenses/LICENSE-2.0
9
+#
10
+#    Unless required by applicable law or agreed to in writing, software
11
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13
+#    License for the specific language governing permissions and limitations
14
+#    under the License.
15
+
16
+import json
17
+
18
+from oslo_config import cfg
19
+from oslo_log import log as logging
20
+from oslo_log import versionutils
21
+from oslo_utils import excutils
22
+from oslo_utils import units
23
+import requests
24
+import six
25
+
26
+from cinder import context
27
+from cinder import exception
28
+from cinder.i18n import _, _LE, _LW, _LI
29
+from cinder import utils
30
+from cinder.volume.drivers.san import san
31
+from cinder.volume import qos_specs
32
+from cinder.volume import volume_types
33
+
34
+LOG = logging.getLogger(__name__)
35
+
36
+d_opts = [
37
+    cfg.StrOpt('datera_api_token',
38
+               default=None,
39
+               help='DEPRECATED: This will be removed in the Liberty release. '
40
+                    'Use san_login and san_password instead. This directly '
41
+                    'sets the Datera API token.'),
42
+    cfg.StrOpt('datera_api_port',
43
+               default='7717',
44
+               help='Datera API port.'),
45
+    cfg.StrOpt('datera_api_version',
46
+               default='2',
47
+               help='Datera API version.'),
48
+    cfg.StrOpt('datera_num_replicas',
49
+               default='3',
50
+               help='Number of replicas to create of an inode.')
51
+]
52
+
53
+
54
+CONF = cfg.CONF
55
+CONF.import_opt('driver_client_cert_key', 'cinder.volume.driver')
56
+CONF.import_opt('driver_client_cert', 'cinder.volume.driver')
57
+CONF.import_opt('driver_use_ssl', 'cinder.volume.driver')
58
+CONF.register_opts(d_opts)
59
+
60
+DEFAULT_STORAGE_NAME = 'storage-1'
61
+DEFAULT_VOLUME_NAME = 'volume-1'
62
+
63
+
64
+def _authenticated(func):
65
+    """Ensure the driver is authenticated to make a request.
66
+
67
+    In do_setup() we fetch an auth token and store it. If that expires when
68
+    we do API request, we'll fetch a new one.
69
+    """
70
+
71
+    def func_wrapper(self, *args, **kwargs):
72
+        try:
73
+            return func(self, *args, **kwargs)
74
+        except exception.NotAuthorized:
75
+            # Prevent recursion loop. After the self arg is the
76
+            # resource_type arg from _issue_api_request(). If attempt to
77
+            # login failed, we should just give up.
78
+            if args[0] == 'login':
79
+                raise
80
+
81
+            # Token might've expired, get a new one, try again.
82
+            self._login()
83
+            return func(self, *args, **kwargs)
84
+    return func_wrapper
85
+
86
+
87
+class DateraDriver(san.SanISCSIDriver):
88
+
89
+    """The OpenStack Datera Driver
90
+
91
+    Version history:
92
+        1.0 - Initial driver
93
+        1.1 - Look for lun-0 instead of lun-1.
94
+        2.0 - Update For Datera API v2
95
+    """
96
+    VERSION = '2.0'
97
+
98
+    def __init__(self, *args, **kwargs):
99
+        super(DateraDriver, self).__init__(*args, **kwargs)
100
+        self.configuration.append_config_values(d_opts)
101
+        self.num_replicas = self.configuration.datera_num_replicas
102
+        self.username = self.configuration.san_login
103
+        self.password = self.configuration.san_password
104
+        self.auth_token = None
105
+        self.cluster_stats = {}
106
+
107
+    def _login(self):
108
+        """Use the san_login and san_password to set self.auth_token."""
109
+        body = {
110
+            'name': self.username,
111
+            'password': self.password
112
+        }
113
+
114
+        # Unset token now, otherwise potential expired token will be sent
115
+        # along to be used for authorization when trying to login.
116
+        self.auth_token = None
117
+
118
+        try:
119
+            LOG.debug('Getting Datera auth token.')
120
+            results = self._issue_api_request('login', 'put', body=body,
121
+                                              sensitive=True)
122
+            self.auth_token = results['key']
123
+            self.configuration.datera_api_token = results['key']
124
+        except exception.NotAuthorized:
125
+            with excutils.save_and_reraise_exception():
126
+                LOG.error(_LE('Logging into the Datera cluster failed. Please '
127
+                              'check your username and password set in the '
128
+                              'cinder.conf and start the cinder-volume'
129
+                              'service again.'))
130
+
131
+    def _get_lunid(self):
132
+        return 0
133
+
134
+    def do_setup(self, context):
135
+        # If any of the deprecated options are set, we'll warn the operator to
136
+        # use the new authentication method.
137
+        DEPRECATED_OPTS = [
138
+            self.configuration.driver_client_cert_key,
139
+            self.configuration.driver_client_cert,
140
+            self.configuration.datera_api_token
141
+        ]
142
+
143
+        if any(DEPRECATED_OPTS):
144
+            msg = _LW("Client cert verification and datera_api_token are "
145
+                      "deprecated in the Datera driver, and will be removed "
146
+                      "in the Liberty release. Please set the san_login and "
147
+                      "san_password in your cinder.conf instead.")
148
+            versionutils.report_deprecated_feature(LOG, msg)
149
+            return
150
+
151
+        # If we can't authenticate through the old and new method, just fail
152
+        # now.
153
+        if not all([self.username, self.password]):
154
+            msg = _LE("san_login and/or san_password is not set for Datera "
155
+                      "driver in the cinder.conf. Set this information and "
156
+                      "start the cinder-volume service again.")
157
+            LOG.error(msg)
158
+            raise exception.InvalidInput(msg)
159
+
160
+        self._login()
161
+
162
+    @utils.retry(exception.VolumeDriverException, retries=3)
163
+    def _wait_for_resource(self, id, resource_type):
164
+        result = self._issue_api_request(resource_type, 'get', id)
165
+        if result['storage_instances'][DEFAULT_STORAGE_NAME]['volumes'][
166
+                DEFAULT_VOLUME_NAME]['op_state'] == 'available':
167
+            return
168
+        else:
169
+            raise exception.VolumeDriverException(
170
+                message=_('Resource not ready.'))
171
+
172
+    def _create_resource(self, resource, resource_type, body):
173
+        type_id = resource.get('volume_type_id', None)
174
+
175
+        result = None
176
+        try:
177
+            result = self._issue_api_request(resource_type, 'post', body=body)
178
+        except exception.Invalid:
179
+            if resource_type == 'volumes' and type_id:
180
+                LOG.error(_LE("Creation request failed. Please verify the "
181
+                              "extra-specs set for your volume types are "
182
+                              "entered correctly."))
183
+            raise
184
+        else:
185
+            # Handle updating QOS Policies
186
+            if resource_type == 'app_instances':
187
+                url = 'app_instances/{}/storage_instances/{}/volumes/{' + \
188
+                      '}/performance_policy'
189
+                url = url.format(
190
+                    resource['id'],
191
+                    DEFAULT_STORAGE_NAME,
192
+                    DEFAULT_VOLUME_NAME)
193
+                if type_id is not None:
194
+                    policies = self._get_policies_by_volume_type(type_id)
195
+                    if policies:
196
+                        self._issue_api_request(url, 'post', body=policies)
197
+            if result['storage_instances'][DEFAULT_STORAGE_NAME]['volumes'][
198
+                    DEFAULT_VOLUME_NAME]['op_state'] == 'available':
199
+                return
200
+            self._wait_for_resource(resource['id'], resource_type)
201
+
202
+    def create_volume(self, volume):
203
+        """Create a logical volume."""
204
+        # Generate App Instance, Storage Instance and Volume
205
+        # Volume ID will be used as the App Instance Name
206
+        # Storage Instance and Volumes will have standard names
207
+        app_params = \
208
+            {
209
+                'create_mode': "openstack",
210
+                'uuid': str(volume['id']),
211
+                'name': str(volume['id']),
212
+                'access_control_mode': 'allow_all',
213
+                'storage_instances': {
214
+                    DEFAULT_STORAGE_NAME: {
215
+                        'name': DEFAULT_STORAGE_NAME,
216
+                        'volumes': {
217
+                            DEFAULT_VOLUME_NAME: {
218
+                                'name': DEFAULT_VOLUME_NAME,
219
+                                'size': volume['size'],
220
+                                'replica_count': int(self.num_replicas),
221
+                                'snapshot_policies': {
222
+                                }
223
+                            }
224
+                        }
225
+                    }
226
+                }
227
+            }
228
+        self._create_resource(volume, 'app_instances', body=app_params)
229
+
230
+    def extend_volume(self, volume, new_size):
231
+        # Offline App Instance, if necessary
232
+        reonline = False
233
+        app_inst = self._issue_api_request(
234
+            "app_instances/{}".format(volume['id']))
235
+        if app_inst['admin_state'] == 'online':
236
+            reonline = True
237
+            self.detach_volume(None, volume)
238
+        # Change Volume Size
239
+        app_inst = volume['id']
240
+        storage_inst = DEFAULT_STORAGE_NAME
241
+        data = {
242
+            'size': new_size
243
+        }
244
+        self._issue_api_request(
245
+            'app_instances/{}/storage_instances/{}/volumes/{}'.format(
246
+                app_inst, storage_inst, DEFAULT_VOLUME_NAME),
247
+            method='put', body=data)
248
+        # Online Volume, if it was online before
249
+        if reonline:
250
+            self.create_export(None, volume)
251
+
252
+    def create_cloned_volume(self, volume, src_vref):
253
+        clone_src_template = "/app_instances/{}/storage_instances/{" + \
254
+                             "}/volumes/{}"
255
+        src = clone_src_template.format(src_vref['id'], DEFAULT_STORAGE_NAME,
256
+                                        DEFAULT_VOLUME_NAME)
257
+        data = {
258
+            'create_mode': 'openstack',
259
+            'name': str(volume['id']),
260
+            'uuid': str(volume['id']),
261
+            'clone_src': src,
262
+            'access_control_mode': 'allow_all'
263
+        }
264
+        self._issue_api_request('app_instances', 'post', body=data)
265
+
266
+    def delete_volume(self, volume):
267
+        self.detach_volume(None, volume)
268
+        app_inst = volume['id']
269
+        try:
270
+            self._issue_api_request('app_instances/{}'.format(app_inst),
271
+                                    method='delete')
272
+        except exception.NotFound:
273
+            msg = _LI("Tried to delete volume %s, but it was not found in the "
274
+                      "Datera cluster. Continuing with delete.")
275
+            LOG.info(msg, volume['id'])
276
+
277
+    def ensure_export(self, context, volume, connector):
278
+        """Gets the associated account, retrieves CHAP info and updates."""
279
+        return self.create_export(context, volume, connector)
280
+
281
+    def create_export(self, context, volume, connector):
282
+        url = "app_instances/{}".format(volume['id'])
283
+        data = {
284
+            'admin_state': 'online'
285
+        }
286
+        app_inst = self._issue_api_request(url, method='put', body=data)
287
+        storage_instance = app_inst['storage_instances'][
288
+            DEFAULT_STORAGE_NAME]
289
+
290
+        portal = storage_instance['access']['ips'][0] + ':3260'
291
+        iqn = storage_instance['access']['iqn']
292
+
293
+        # Portal, IQN, LUNID
294
+        provider_location = '%s %s %s' % (portal, iqn, self._get_lunid())
295
+        return {'provider_location': provider_location}
296
+
297
+    def detach_volume(self, context, volume, attachment=None):
298
+        url = "app_instances/{}".format(volume['id'])
299
+        data = {
300
+            'admin_state': 'offline',
301
+            'force': True
302
+        }
303
+        try:
304
+            self._issue_api_request(url, method='put', body=data)
305
+        except exception.NotFound:
306
+            msg = _("Tried to detach volume %s, but it was not found in the "
307
+                    "Datera cluster. Continuing with detach.")
308
+            LOG.info(msg, volume['id'])
309
+
310
+    def create_snapshot(self, snapshot):
311
+        url_template = 'app_instances/{}/storage_instances/{}/volumes/{' \
312
+                       '}/snapshots'
313
+        url = url_template.format(snapshot['volume_id'],
314
+                                  DEFAULT_STORAGE_NAME,
315
+                                  DEFAULT_VOLUME_NAME)
316
+
317
+        snap_params = {
318
+            'uuid': snapshot['id'],
319
+        }
320
+        self._issue_api_request(url, method='post', body=snap_params)
321
+
322
+    def delete_snapshot(self, snapshot):
323
+        snap_temp = 'app_instances/{}/storage_instances/{}/volumes/{' \
324
+                    '}/snapshots'
325
+        snapu = snap_temp.format(snapshot['volume_id'],
326
+                                 DEFAULT_STORAGE_NAME,
327
+                                 DEFAULT_VOLUME_NAME)
328
+
329
+        snapshots = self._issue_api_request(snapu, method='get')
330
+
331
+        try:
332
+            for ts, snap in snapshots.viewitems():
333
+                if snap['uuid'] == snapshot['id']:
334
+                    url_template = snapu + '/{}'
335
+                    url = url_template.format(ts)
336
+                    self._issue_api_request(url, method='delete')
337
+                    break
338
+            else:
339
+                raise exception.NotFound
340
+        except exception.NotFound:
341
+            msg = _LI("Tried to delete snapshot %s, but was not found in "
342
+                      "Datera cluster. Continuing with delete.")
343
+            LOG.info(msg, snapshot['id'])
344
+
345
+    def create_volume_from_snapshot(self, volume, snapshot):
346
+        snap_temp = 'app_instances/{}/storage_instances/{}/volumes/{' \
347
+                    '}/snapshots'
348
+        snapu = snap_temp.format(snapshot['volume_id'],
349
+                                 DEFAULT_STORAGE_NAME,
350
+                                 DEFAULT_VOLUME_NAME)
351
+
352
+        snapshots = self._issue_api_request(snapu, method='get')
353
+        for ts, snap in snapshots.viewitems():
354
+            if snap['uuid'] == snapshot['id']:
355
+                found_ts = ts
356
+                break
357
+        else:
358
+            raise exception.NotFound
359
+
360
+        src = '/app_instances/{}/storage_instances/{}/volumes/{' \
361
+            '}/snapshots/{}'.format(
362
+                snapshot['volume_id'],
363
+                DEFAULT_STORAGE_NAME,
364
+                DEFAULT_VOLUME_NAME,
365
+                found_ts)
366
+        app_params = \
367
+            {
368
+                'create_mode': 'openstack',
369
+                'uuid': str(volume['id']),
370
+                'name': str(volume['id']),
371
+                'clone_src': src,
372
+                'access_control_mode': 'allow_all'
373
+            }
374
+        self._issue_api_request(
375
+            'app_instances',
376
+            method='post',
377
+            body=app_params)
378
+
379
+    def get_volume_stats(self, refresh=False):
380
+        """Get volume stats.
381
+
382
+        If 'refresh' is True, run update first.
383
+        The name is a bit misleading as
384
+        the majority of the data here is cluster
385
+        data.
386
+        """
387
+        if refresh or not self.cluster_stats:
388
+            try:
389
+                self._update_cluster_stats()
390
+            except exception.DateraAPIException:
391
+                LOG.error(_LE('Failed to get updated stats from Datera '
392
+                              'cluster.'))
393
+        return self.cluster_stats
394
+
395
+    def _update_cluster_stats(self):
396
+        LOG.debug(_LI("Updating cluster stats info."))
397
+
398
+        results = self._issue_api_request('system')
399
+
400
+        if 'uuid' not in results:
401
+            LOG.error(_LE('Failed to get updated stats from Datera Cluster.'))
402
+
403
+        backend_name = self.configuration.safe_get('volume_backend_name')
404
+        stats = {
405
+            'volume_backend_name': backend_name or 'Datera',
406
+            'vendor_name': 'Datera',
407
+            'driver_version': self.VERSION,
408
+            'storage_protocol': 'iSCSI',
409
+            'total_capacity_gb': int(results['total_capacity']) / units.Gi,
410
+            'free_capacity_gb': int(results['available_capacity']) / units.Gi,
411
+            'reserved_percentage': 0,
412
+        }
413
+
414
+        self.cluster_stats = stats
415
+
416
+    def _get_policies_by_volume_type(self, type_id):
417
+        """Get extra_specs and qos_specs of a volume_type.
418
+
419
+        This fetches the scoped keys from the volume type. Anything set from
420
+         qos_specs will override key/values set from extra_specs.
421
+        """
422
+        ctxt = context.get_admin_context()
423
+        volume_type = volume_types.get_volume_type(ctxt, type_id)
424
+        specs = volume_type.get('extra_specs')
425
+
426
+        policies = {}
427
+        for key, value in specs.items():
428
+            if ':' in key:
429
+                fields = key.split(':')
430
+                key = fields[1]
431
+                policies[key] = value
432
+
433
+        qos_specs_id = volume_type.get('qos_specs_id')
434
+        if qos_specs_id is not None:
435
+            qos_kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
436
+            if qos_kvs:
437
+                policies.update(qos_kvs)
438
+        return policies
439
+
440
+    @_authenticated
441
+    def _issue_api_request(self, resource_type, method='get', resource=None,
442
+                           body=None, action=None, sensitive=False):
443
+        """All API requests to Datera cluster go through this method.
444
+
445
+        :param resource_type: the type of the resource
446
+        :param method: the request verb
447
+        :param resource: the identifier of the resource
448
+        :param body: a dict with options for the action_type
449
+        :param action: the action to perform
450
+        :returns: a dict of the response from the Datera cluster
451
+        """
452
+        host = self.configuration.san_ip
453
+        port = self.configuration.datera_api_port
454
+        api_token = self.configuration.datera_api_token
455
+        api_version = self.configuration.datera_api_version
456
+
457
+        payload = json.dumps(body, ensure_ascii=False)
458
+        payload.encode('utf-8')
459
+
460
+        if not sensitive:
461
+            LOG.debug(_LI("Payload for Datera API call: {}".format(payload)))
462
+
463
+        header = {'Content-Type': 'application/json; charset=utf-8'}
464
+
465
+        protocol = 'http'
466
+        if self.configuration.driver_use_ssl:
467
+            protocol = 'https'
468
+
469
+        # TODO(thingee): Auth method through Auth-Token is deprecated. Remove
470
+        # this and client cert verification stuff in the Liberty release.
471
+        if api_token:
472
+            header['Auth-Token'] = api_token
473
+
474
+        client_cert = self.configuration.driver_client_cert
475
+        client_cert_key = self.configuration.driver_client_cert_key
476
+        cert_data = None
477
+
478
+        if client_cert:
479
+            protocol = 'https'
480
+            cert_data = (client_cert, client_cert_key)
481
+
482
+        connection_string = '%s://%s:%s/v%s/%s' % (protocol, host, port,
483
+                                                   api_version, resource_type)
484
+
485
+        if resource is not None:
486
+            connection_string += '/%s' % resource
487
+        if action is not None:
488
+            connection_string += '/%s' % action
489
+
490
+        LOG.debug(_LI("Endpoint for Datera API call: {"
491
+                      "}".format(connection_string)))
492
+        try:
493
+            response = getattr(requests, method)(connection_string,
494
+                                                 data=payload, headers=header,
495
+                                                 verify=False, cert=cert_data)
496
+        except requests.exceptions.RequestException as ex:
497
+            msg = _LE(
498
+                'Failed to make a request to Datera cluster endpoint due '
499
+                'to the following reason: %s') % six.text_type(
500
+                ex.message)
501
+            LOG.error(msg)
502
+            raise exception.DateraAPIException(msg)
503
+
504
+        data = response.json()
505
+        if not sensitive:
506
+            LOG.debug(_LI("Results of Datera API call: {}".format(data)))
507
+
508
+        if not response.ok:
509
+            LOG.debug(_(response.url))
510
+            LOG.debug(_(payload))
511
+            LOG.debug(_(vars(response)))
512
+            if response.status_code == 404:
513
+                raise exception.NotFound(data['message'])
514
+            elif response.status_code in [403, 401]:
515
+                raise exception.NotAuthorized()
516
+            elif response.status_code == 400 and 'invalidArgs' in data:
517
+                msg = _('Bad request sent to Datera cluster:'
518
+                        'Invalid args: %(args)s | %(message)s') % {
519
+                            'args': data['invalidArgs']['invalidAttrs'],
520
+                            'message': data['message']}
521
+                raise exception.Invalid(msg)
522
+            else:
523
+                msg = _LE('Request to Datera cluster returned bad status:'
524
+                          ' %(status)s | %(reason)s') % {
525
+                              'status': response.status_code,
526
+                              'reason': response.reason}
527
+                LOG.error(msg)
528
+                raise exception.DateraAPIException(msg)
529
+
530
+        return data

+ 2
- 0
environment_config.yaml View File

@@ -1,4 +1,6 @@
1 1
 attributes:
2
+  metadata:
3
+    group: 'storage'
2 4
   multibackend:
3 5
     value: false
4 6
     label: 'Multibackend enabled'

+ 7
- 2
metadata.yaml View File

@@ -1,8 +1,8 @@
1 1
 name: fuel-plugin-datera-cinder
2 2
 title: Fuel Datera driver for Cinder
3
-version: '0.1.43'
3
+version: '0.1.51'
4 4
 description: Installs and enables the Datera driver in Cinder
5
-fuel_version: ['7.0']
5
+fuel_version: ['7.0', '8.0']
6 6
 licenses: ['Apache License Version 2.0']
7 7
 authors: [ 'Funs Kessen <funs@barred.org>' ]
8 8
 homepage: 'https://github.com/stackforge/fuel-plugin-datera-cinder'
@@ -19,6 +19,11 @@ releases:
19 19
     mode: ['ha', 'multinode']
20 20
     deployment_scripts_path: deployment_scripts/
21 21
     repository_path: repositories/centos
22
+  - os: ubuntu
23
+    version: liberty-8.0
24
+    mode: ['ha']
25
+    deployment_scripts_path: deployment_scripts/
26
+    repository_path: repositories/ubuntu
22 27
 
23 28
 # Version of plugin package
24 29
 package_version: '3.0.0'

Loading…
Cancel
Save