Browse Source

Update fuel plugin repo for Kaminario

Update fuel plugin repo for Kamianrio with latest
Kaminario driver files and configuration files

Change-Id: I060393bc66f20a9dbc497524423088cbf9ccf8f8
Co-Authored-By: Chaithanya Kopparthi<chaithanyak@biarca.com>
Pradip Rawat 2 years ago
parent
commit
be18f78d2d

+ 51
- 3
README.md View File

@@ -1,4 +1,52 @@
1
-fuel-plugin-cinder-kaminario
2
-============
1
+Cinder Kaminario plugin for Fuel
2
+=============================
3 3
 
4
-Plugin description
4
+Overview
5
+--------
6
+
7
+The plugin configures Kaminario backend for Cinder using multibackend feature.
8
+
9
+
10
+The plugin support Kaminario K2 All-Flash arrays.
11
+
12
+This repo contains all necessary files to build Cinder Kaminario Fuel plugin.
13
+
14
+
15
+Requirements
16
+------------
17
+
18
+| Requirement                                                                              | Version/Comment                                                        |
19
+|------------------------------------------------------------------------------------------|------------------------------------------------------------------------|
20
+| Mirantis Openstack compatibility                                                         | 9.0                                                                    |
21
+| Kaminario K2 All-Flash array is reachable via one of the Mirantis OpenStack networks     |                                                                        |
22
+
23
+How to build plugin:
24
+
25
+- Install fuel plugin builder (fpb)
26
+- Clone plugin repo 
27
+  
28
+    ```markdown
29
+    # git clone https://github.com/openstack/fuel-plugin-cinder-kaminario.git
30
+    ```
31
+- Build fuel plugin
32
+
33
+    ```markdown
34
+    # cd fuel-plugin-cinder-kaminario/
35
+    # fpb --build .
36
+    ```
37
+
38
+- Install the plugin
39
+
40
+    ```markdown
41
+    # fuel plugins --install cinder_kaminario-1.0-1.0.0-1.noarch.rpm
42
+    ```
43
+    
44
+- Verify that the plugin is installed correctly
45
+    
46
+    ```markdown
47
+    # fuel plugins
48
+    ```
49
+
50
+    | id | name                                           | version | package_version |
51
+    | ---|------------------------------------------------|---------|---------------- |
52
+    | 1  | cinder_kaminario-1.0-1.0.0-1.noarch.rpm        | 1.0.0   | 4.0.0           |

+ 1
- 0
deployment_scripts/puppet/manifests/cinder_controller_config.pp View File

@@ -0,0 +1 @@
1
+include kaminario::controller_config

+ 0
- 8
deployment_scripts/puppet/manifests/cinder_parser.pp View File

@@ -1,8 +0,0 @@
1
-ini_setting { 'parser':
2
-    ensure  => present,
3
-    path    => '/etc/puppet/puppet.conf',
4
-    section => 'main',
5
-    setting => 'parser',
6
-    value   => 'future',
7
-  }
8
-

+ 10
- 174
deployment_scripts/puppet/modules/kaminario/files/exception.py View File

@@ -149,10 +149,6 @@ class GlanceConnectionFailed(CinderException):
149 149
     message = _("Connection to glance failed: %(reason)s")
150 150
 
151 151
 
152
-class ProgrammingError(CinderException):
153
-    message = _('Programming error in Cinder: %(reason)s')
154
-
155
-
156 152
 class NotAuthorized(CinderException):
157 153
     message = _("Not authorized.")
158 154
     code = 403
@@ -204,10 +200,6 @@ class InvalidVolumeType(Invalid):
204 200
     message = _("Invalid volume type: %(reason)s")
205 201
 
206 202
 
207
-class InvalidGroupType(Invalid):
208
-    message = _("Invalid group type: %(reason)s")
209
-
210
-
211 203
 class InvalidVolume(Invalid):
212 204
     message = _("Invalid volume: %(reason)s")
213 205
 
@@ -247,10 +239,6 @@ class DeviceUnavailable(Invalid):
247 239
     message = _("The device in the path %(path)s is unavailable: %(reason)s")
248 240
 
249 241
 
250
-class SnapshotUnavailable(VolumeBackendAPIException):
251
-    message = _("The snapshot is unavailable: %(data)s")
252
-
253
-
254 242
 class InvalidUUID(Invalid):
255 243
     message = _("Expected a uuid but received %(uuid)s.")
256 244
 
@@ -269,10 +257,6 @@ class InvalidGlobalAPIVersion(Invalid):
269 257
                 "is %(min_ver)s and maximum is %(max_ver)s.")
270 258
 
271 259
 
272
-class MissingRequired(Invalid):
273
-    message = _("Missing required element '%(element)s' in request body.")
274
-
275
-
276 260
 class APIException(CinderException):
277 261
     message = _("Error while requesting %(service)s API.")
278 262
 
@@ -292,10 +276,6 @@ class RPCTimeout(CinderException):
292 276
     code = 502
293 277
 
294 278
 
295
-class Duplicate(CinderException):
296
-    pass
297
-
298
-
299 279
 class NotFound(CinderException):
300 280
     message = _("Resource could not be found.")
301 281
     code = 404
@@ -306,10 +286,6 @@ class VolumeNotFound(NotFound):
306 286
     message = _("Volume %(volume_id)s could not be found.")
307 287
 
308 288
 
309
-class MessageNotFound(NotFound):
310
-    message = _("Message %(message_id)s could not be found.")
311
-
312
-
313 289
 class VolumeAttachmentNotFound(NotFound):
314 290
     message = _("Volume attachment could not be found with "
315 291
                 "filter: %(filter)s .")
@@ -362,30 +338,6 @@ class VolumeTypeInUse(CinderException):
362 338
                 "volumes present with the type.")
363 339
 
364 340
 
365
-class GroupTypeNotFound(NotFound):
366
-    message = _("Group type %(group_type_id)s could not be found.")
367
-
368
-
369
-class GroupTypeNotFoundByName(GroupTypeNotFound):
370
-    message = _("Group type with name %(group_type_name)s "
371
-                "could not be found.")
372
-
373
-
374
-class GroupTypeAccessNotFound(NotFound):
375
-    message = _("Group type access not found for %(group_type_id)s / "
376
-                "%(project_id)s combination.")
377
-
378
-
379
-class GroupTypeSpecsNotFound(NotFound):
380
-    message = _("Group Type %(group_type_id)s has no specs with "
381
-                "key %(group_specs_key)s.")
382
-
383
-
384
-class GroupTypeInUse(CinderException):
385
-    message = _("Group Type %(group_type_id)s deletion is not allowed with "
386
-                "groups present with the type.")
387
-
388
-
389 341
 class SnapshotNotFound(NotFound):
390 342
     message = _("Snapshot %(snapshot_id)s could not be found.")
391 343
 
@@ -430,32 +382,6 @@ class ServiceTooOld(Invalid):
430 382
     message = _("Service is too old to fulfil this request.")
431 383
 
432 384
 
433
-class WorkerNotFound(NotFound):
434
-    message = _("Worker with %s could not be found.")
435
-
436
-    def __init__(self, message=None, **kwargs):
437
-        keys_list = ('{0}=%({0})s'.format(key) for key in kwargs)
438
-        placeholder = ', '.join(keys_list)
439
-        self.message = self.message % placeholder
440
-        super(WorkerNotFound, self).__init__(message, **kwargs)
441
-
442
-
443
-class WorkerExists(Duplicate):
444
-    message = _("Worker for %(type)s %(id)s already exists.")
445
-
446
-
447
-class ClusterNotFound(NotFound):
448
-    message = _('Cluster %(id)s could not be found.')
449
-
450
-
451
-class ClusterHasHosts(Invalid):
452
-    message = _("Cluster %(id)s still has hosts.")
453
-
454
-
455
-class ClusterExists(Duplicate):
456
-    message = _("Cluster %(name)s already exists.")
457
-
458
-
459 385
 class HostNotFound(NotFound):
460 386
     message = _("Host %(host)s could not be found.")
461 387
 
@@ -514,6 +440,10 @@ class FileNotFound(NotFound):
514 440
     message = _("File %(file_path)s could not be found.")
515 441
 
516 442
 
443
+class Duplicate(CinderException):
444
+    pass
445
+
446
+
517 447
 class VolumeTypeExists(Duplicate):
518 448
     message = _("Volume Type %(id)s already exists.")
519 449
 
@@ -531,23 +461,6 @@ class VolumeTypeEncryptionNotFound(NotFound):
531 461
     message = _("Volume type encryption for type %(type_id)s does not exist.")
532 462
 
533 463
 
534
-class GroupTypeExists(Duplicate):
535
-    message = _("Group Type %(id)s already exists.")
536
-
537
-
538
-class GroupTypeAccessExists(Duplicate):
539
-    message = _("Group type access for %(group_type_id)s / "
540
-                "%(project_id)s combination already exists.")
541
-
542
-
543
-class GroupTypeEncryptionExists(Invalid):
544
-    message = _("Group type encryption for type %(type_id)s already exists.")
545
-
546
-
547
-class GroupTypeEncryptionNotFound(NotFound):
548
-    message = _("Group type encryption for type %(type_id)s does not exist.")
549
-
550
-
551 464
 class MalformedRequestBody(CinderException):
552 465
     message = _("Malformed message body: %(reason)s")
553 466
 
@@ -615,18 +528,10 @@ class SnapshotLimitExceeded(QuotaError):
615 528
     message = _("Maximum number of snapshots allowed (%(allowed)d) exceeded")
616 529
 
617 530
 
618
-class UnexpectedOverQuota(QuotaError):
619
-    message = _("Unexpected over quota on %(name)s.")
620
-
621
-
622 531
 class BackupLimitExceeded(QuotaError):
623 532
     message = _("Maximum number of backups allowed (%(allowed)d) exceeded")
624 533
 
625 534
 
626
-class ImageLimitExceeded(QuotaError):
627
-    message = _("Image quota exceeded")
628
-
629
-
630 535
 class DuplicateSfVolumeNames(Duplicate):
631 536
     message = _("Detected more than one volume with name %(vol_name)s")
632 537
 
@@ -640,15 +545,6 @@ class VolumeTypeUpdateFailed(CinderException):
640 545
     message = _("Cannot update volume_type %(id)s")
641 546
 
642 547
 
643
-class GroupTypeCreateFailed(CinderException):
644
-    message = _("Cannot create group_type with "
645
-                "name %(name)s and specs %(group_specs)s")
646
-
647
-
648
-class GroupTypeUpdateFailed(CinderException):
649
-    message = _("Cannot update group_type %(id)s")
650
-
651
-
652 548
 class UnknownCmd(VolumeDriverException):
653 549
     message = _("Unknown or unsupported command %(cmd)s")
654 550
 
@@ -875,29 +771,16 @@ class VolumeGroupCreationFailed(CinderException):
875 771
     message = _('Failed to create Volume Group: %(vg_name)s')
876 772
 
877 773
 
878
-class VolumeNotDeactivated(CinderException):
879
-    message = _('Volume %(name)s was not deactivated in time.')
880
-
881
-
882 774
 class VolumeDeviceNotFound(CinderException):
883 775
     message = _('Volume device not found at %(device)s.')
884 776
 
885 777
 
886 778
 # Driver specific exceptions
887
-# Dell
888
-class DellDriverRetryableException(VolumeBackendAPIException):
889
-    message = _("Retryable Dell Exception encountered")
890
-
891
-
892 779
 # Pure Storage
893 780
 class PureDriverException(VolumeDriverException):
894 781
     message = _("Pure Storage Cinder driver failure: %(reason)s")
895 782
 
896 783
 
897
-class PureRetryableException(VolumeBackendAPIException):
898
-    message = _("Retryable Pure Storage Exception encountered")
899
-
900
-
901 784
 # SolidFire
902 785
 class SolidFireAPIException(VolumeBackendAPIException):
903 786
     message = _("Bad response from SolidFire API")
@@ -1111,59 +994,26 @@ class XIODriverException(VolumeDriverException):
1111 994
 
1112 995
 
1113 996
 # Violin Memory drivers
1114
-class ViolinInvalidBackendConfig(VolumeDriverException):
997
+class ViolinInvalidBackendConfig(CinderException):
1115 998
     message = _("Volume backend config is invalid: %(reason)s")
1116 999
 
1117 1000
 
1118
-class ViolinRequestRetryTimeout(VolumeDriverException):
1001
+class ViolinRequestRetryTimeout(CinderException):
1119 1002
     message = _("Backend service retry timeout hit: %(timeout)s sec")
1120 1003
 
1121 1004
 
1122
-class ViolinBackendErr(VolumeBackendAPIException):
1005
+class ViolinBackendErr(CinderException):
1123 1006
     message = _("Backend reports: %(message)s")
1124 1007
 
1125 1008
 
1126
-class ViolinBackendErrExists(VolumeBackendAPIException):
1009
+class ViolinBackendErrExists(CinderException):
1127 1010
     message = _("Backend reports: item already exists")
1128 1011
 
1129 1012
 
1130
-class ViolinBackendErrNotFound(NotFound):
1013
+class ViolinBackendErrNotFound(CinderException):
1131 1014
     message = _("Backend reports: item not found")
1132 1015
 
1133 1016
 
1134
-class ViolinResourceNotFound(NotFound):
1135
-    message = _("Backend reports: %(message)s")
1136
-
1137
-
1138
-class BadHTTPResponseStatus(VolumeDriverException):
1139
-    message = _("Bad HTTP response status %(status)s")
1140
-
1141
-
1142
-# ZADARA STORAGE VPSA driver exception
1143
-class ZadaraServerCreateFailure(VolumeDriverException):
1144
-    message = _("Unable to create server object for initiator %(name)s")
1145
-
1146
-
1147
-class ZadaraServerNotFound(NotFound):
1148
-    message = _("Unable to find server object for initiator %(name)s")
1149
-
1150
-
1151
-class ZadaraVPSANoActiveController(VolumeDriverException):
1152
-    message = _("Unable to find any active VPSA controller")
1153
-
1154
-
1155
-class ZadaraAttachmentsNotFound(NotFound):
1156
-    message = _("Failed to retrieve attachments for volume %(name)s")
1157
-
1158
-
1159
-class ZadaraInvalidAttachmentInfo(Invalid):
1160
-    message = _("Invalid attachment info for volume %(name)s: %(reason)s")
1161
-
1162
-
1163
-class ZadaraVolumeNotFound(VolumeDriverException):
1164
-    message = _("%(reason)s")
1165
-
1166
-
1167 1017
 # ZFSSA NFS driver exception.
1168 1018
 class WebDAVClientError(CinderException):
1169 1019
     message = _("The WebDAV request failed. Reason: %(msg)s, "
@@ -1217,8 +1067,7 @@ class DotHillNotTargetPortal(CinderException):
1217 1067
 
1218 1068
 # Sheepdog
1219 1069
 class SheepdogError(VolumeBackendAPIException):
1220
-    message = _("An error has occurred in SheepdogDriver. "
1221
-                "(Reason: %(reason)s)")
1070
+    message = _("An error has occured in SheepdogDriver. (Reason: %(reason)s)")
1222 1071
 
1223 1072
 
1224 1073
 class SheepdogCmdError(SheepdogError):
@@ -1277,16 +1126,3 @@ class KaminarioCinderDriverException(VolumeDriverException):
1277 1126
 
1278 1127
 class KaminarioRetryableException(VolumeDriverException):
1279 1128
     message = _("Kaminario retryable exception: %(reason)s")
1280
-
1281
-
1282
-# Synology driver
1283
-class SynoAPIHTTPError(CinderException):
1284
-    message = _("HTTP exit code: [%(code)s]")
1285
-
1286
-
1287
-class SynoAuthError(CinderException):
1288
-    message = _("Synology driver authentication failed: %(reason)s.")
1289
-
1290
-
1291
-class SynoLUNNotExist(CinderException):
1292
-    message = _("LUN not found by UUID: %(uuid)s.")

+ 300
- 38
deployment_scripts/puppet/modules/kaminario/files/kaminario_common.py View File

@@ -17,6 +17,7 @@
17 17
 import math
18 18
 import re
19 19
 import threading
20
+import time
20 21
 
21 22
 import eventlet
22 23
 from oslo_config import cfg
@@ -30,6 +31,7 @@ import six
30 31
 import cinder
31 32
 from cinder import exception
32 33
 from cinder.i18n import _, _LE, _LW, _LI
34
+from cinder import objects
33 35
 from cinder.objects import fields
34 36
 from cinder import utils
35 37
 from cinder.volume.drivers.san import san
@@ -38,8 +40,9 @@ from cinder.volume import utils as vol_utils
38 40
 krest = importutils.try_import("krest")
39 41
 
40 42
 K2_MIN_VERSION = '2.2.0'
41
-K2_LOCK_PREFIX = 'Kaminario'
43
+K2_LOCK_NAME = 'Kaminario'
42 44
 MAX_K2_RETRY = 5
45
+K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER
43 46
 LOG = logging.getLogger(__name__)
44 47
 
45 48
 kaminario1_opts = [
@@ -139,8 +142,6 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
139 142
         self.configuration.append_config_values(kaminario2_opts)
140 143
         self.replica = None
141 144
         self._protocol = None
142
-        k2_lock_sfx = self.configuration.safe_get('volume_backend_name') or ''
143
-        self.k2_lock_name = "%s-%s" % (K2_LOCK_PREFIX, k2_lock_sfx)
144 145
 
145 146
     def check_for_setup_error(self):
146 147
         if krest is None:
@@ -290,6 +291,62 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
290 291
             raise exception.KaminarioCinderDriverException(
291 292
                 reason=six.text_type(ex.message))
292 293
 
294
+    @kaminario_logger
295
+    def _create_failover_volume_replica(self, volume, vg_name, vol_name):
296
+        """Volume replica creation in K2 needs session and remote volume.
297
+
298
+        - create a session
299
+        - create a volume in the volume group
300
+
301
+        """
302
+        session_name = self.get_session_name(volume.id)
303
+        rsession_name = self.get_rep_name(session_name)
304
+
305
+        rvg_name = self.get_rep_name(vg_name)
306
+        rvol_name = self.get_rep_name(vol_name)
307
+        rvg = self.target.search("volume_groups", name=rvg_name).hits[0]
308
+        rvol = self.target.search("volumes", name=rvol_name).hits[0]
309
+        k2peer_rs = self.target.search("replication/peer_k2arrays",
310
+                                       mgmt_host=self.configuration.san_ip)
311
+        if hasattr(k2peer_rs, 'hits') and k2peer_rs.total != 0:
312
+            k2peer = k2peer_rs.hits[0]
313
+        else:
314
+            msg = _("Unable to find K2peer in source K2:")
315
+            LOG.error(msg)
316
+            raise exception.KaminarioCinderDriverException(reason=msg)
317
+        try:
318
+            LOG.debug("Creating source session with name: %(sname)s and "
319
+                      " target session name: %(tname)s",
320
+                      {'sname': rsession_name, 'tname': session_name})
321
+            tgt_ssn = self.target.new("replication/sessions")
322
+            tgt_ssn.replication_peer_k2array = k2peer
323
+            tgt_ssn.auto_configure_peer_volumes = "False"
324
+            tgt_ssn.local_volume_group = rvg
325
+            tgt_ssn.replication_peer_volume_group_name = vg_name
326
+            tgt_ssn.remote_replication_session_name = session_name
327
+            tgt_ssn.name = rsession_name
328
+            tgt_ssn.rpo = self.replica.rpo
329
+            tgt_ssn.save()
330
+            LOG.debug("Creating remote volume with name: %s",
331
+                      rvol_name)
332
+            self.target.new("replication/peer_volumes",
333
+                            local_volume=rvol,
334
+                            name=vol_name,
335
+                            replication_session=tgt_ssn).save()
336
+            tgt_ssn.state = "in_sync"
337
+            tgt_ssn.save()
338
+        except Exception as ex:
339
+            LOG.exception(_LE("Replication for the volume %s has "
340
+                              "failed."), rvol_name)
341
+            self._delete_by_ref(self.target, "replication/sessions",
342
+                                rsession_name, 'session')
343
+            self._delete_by_ref(self.client, "replication/sessions",
344
+                                session_name, 'remote session')
345
+            self._delete_by_ref(self.client, "volumes", vol_name, "volume")
346
+            self._delete_by_ref(self.client, "volume_groups", vg_name, "vg")
347
+            raise exception.KaminarioCinderDriverException(
348
+                reason=six.text_type(ex.message))
349
+
293 350
     def _delete_by_ref(self, device, url, name, msg):
294 351
         rs = device.search(url, name=name)
295 352
         for result in rs.hits:
@@ -313,27 +370,183 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
313 370
     def failover_host(self, context, volumes, secondary_id=None):
314 371
         """Failover to replication target."""
315 372
         volume_updates = []
373
+        back_end_ip = None
374
+        svc_host = vol_utils.extract_host(self.host, 'backend')
375
+        service = objects.Service.get_by_args(context, svc_host,
376
+                                              'cinder-volume')
377
+
316 378
         if secondary_id and secondary_id != self.replica.backend_id:
317 379
             LOG.error(_LE("Kaminario driver received failover_host "
318 380
                           "request, But backend is non replicated device"))
319 381
             raise exception.UnableToFailOver(reason=_("Failover requested "
320 382
                                                       "on non replicated "
321 383
                                                       "backend."))
322
-        for v in volumes:
323
-            vol_name = self.get_volume_name(v['id'])
324
-            rv = self.get_rep_name(vol_name)
325
-            if self.target.search("volumes", name=rv).total:
326
-                self._failover_volume(v)
327
-                volume_updates.append(
328
-                    {'volume_id': v['id'],
329
-                     'updates':
330
-                     {'replication_status':
331
-                      fields.ReplicationStatus.FAILED_OVER}})
332
-            else:
333
-                volume_updates.append({'volume_id': v['id'],
334
-                                       'updates': {'status': 'error', }})
335 384
 
336
-        return self.replica.backend_id, volume_updates
385
+        if (service.active_backend_id and
386
+                service.active_backend_id != self.configuration.san_ip):
387
+            self.snap_updates = []
388
+            rep_volumes = []
389
+            # update status for non-replicated primary volumes
390
+            for v in volumes:
391
+                vol_name = self.get_volume_name(v['id'])
392
+                vol = self.client.search("volumes", name=vol_name)
393
+                if v.replication_status != K2_REP_FAILED_OVER and vol.total:
394
+                    status = 'available'
395
+                    if v.volume_attachment:
396
+                        map_rs = self.client.search("mappings",
397
+                                                    volume=vol.hits[0])
398
+                        status = 'in-use'
399
+                        if map_rs.total:
400
+                            map_rs.hits[0].delete()
401
+                    volume_updates.append({'volume_id': v['id'],
402
+                                           'updates':
403
+                                           {'status': status}})
404
+                else:
405
+                    rep_volumes.append(v)
406
+
407
+            # In-sync from secondaray array to primary array
408
+            for v in rep_volumes:
409
+                vol_name = self.get_volume_name(v['id'])
410
+                vol = self.client.search("volumes", name=vol_name)
411
+                rvol_name = self.get_rep_name(vol_name)
412
+                rvol = self.target.search("volumes", name=rvol_name)
413
+                session_name = self.get_session_name(v['id'])
414
+                rsession_name = self.get_rep_name(session_name)
415
+                ssn = self.target.search("replication/sessions",
416
+                                         name=rsession_name)
417
+                if ssn.total:
418
+                    tgt_ssn = ssn.hits[0]
419
+                ssn = self.client.search("replication/sessions",
420
+                                         name=session_name)
421
+                if ssn.total:
422
+                    src_ssn = ssn.hits[0]
423
+
424
+                if (tgt_ssn.state == 'failed_over' and
425
+                   tgt_ssn.current_role == 'target' and vol.total and src_ssn):
426
+                    map_rs = self.client.search("mappings", volume=vol.hits[0])
427
+                    if map_rs.total:
428
+                        map_rs.hits[0].delete()
429
+                    tgt_ssn.state = 'in_sync'
430
+                    tgt_ssn.save()
431
+                    self._check_for_status(src_ssn, 'in_sync')
432
+                if (rvol.total and src_ssn.state == 'in_sync' and
433
+                   src_ssn.current_role == 'target'):
434
+                    gen_no = self._create_volume_replica_user_snap(self.target,
435
+                                                                   tgt_ssn)
436
+                    self.snap_updates.append({'tgt_ssn': tgt_ssn,
437
+                                              'gno': gen_no,
438
+                                              'stime': time.time()})
439
+                LOG.debug("The target session: %s state is "
440
+                          "changed to in sync", rsession_name)
441
+
442
+            self._is_user_snap_sync_finished()
443
+
444
+            # Delete secondary volume mappings and create snapshot
445
+            for v in rep_volumes:
446
+                vol_name = self.get_volume_name(v['id'])
447
+                vol = self.client.search("volumes", name=vol_name)
448
+                rvol_name = self.get_rep_name(vol_name)
449
+                rvol = self.target.search("volumes", name=rvol_name)
450
+                session_name = self.get_session_name(v['id'])
451
+                rsession_name = self.get_rep_name(session_name)
452
+                ssn = self.target.search("replication/sessions",
453
+                                         name=rsession_name)
454
+                if ssn.total:
455
+                    tgt_ssn = ssn.hits[0]
456
+                ssn = self.client.search("replication/sessions",
457
+                                         name=session_name)
458
+                if ssn.total:
459
+                    src_ssn = ssn.hits[0]
460
+                if (rvol.total and src_ssn.state == 'in_sync' and
461
+                   src_ssn.current_role == 'target'):
462
+                    map_rs = self.target.search("mappings",
463
+                                                volume=rvol.hits[0])
464
+                    if map_rs.total:
465
+                        map_rs.hits[0].delete()
466
+                    gen_no = self._create_volume_replica_user_snap(self.target,
467
+                                                                   tgt_ssn)
468
+                    self.snap_updates.append({'tgt_ssn': tgt_ssn,
469
+                                              'gno': gen_no,
470
+                                              'stime': time.time()})
471
+            self._is_user_snap_sync_finished()
472
+            # changing source sessions to failed-over
473
+            for v in rep_volumes:
474
+                vol_name = self.get_volume_name(v['id'])
475
+                vol = self.client.search("volumes", name=vol_name)
476
+                rvol_name = self.get_rep_name(vol_name)
477
+                rvol = self.target.search("volumes", name=rvol_name)
478
+                session_name = self.get_session_name(v['id'])
479
+                rsession_name = self.get_rep_name(session_name)
480
+                ssn = self.target.search("replication/sessions",
481
+                                         name=rsession_name)
482
+                if ssn.total:
483
+                    tgt_ssn = ssn.hits[0]
484
+                ssn = self.client.search("replication/sessions",
485
+                                         name=session_name)
486
+                if ssn.total:
487
+                    src_ssn = ssn.hits[0]
488
+                if (rvol.total and src_ssn.state == 'in_sync' and
489
+                   src_ssn.current_role == 'target'):
490
+                    src_ssn.state = 'failed_over'
491
+                    src_ssn.save()
492
+                    self._check_for_status(tgt_ssn, 'suspended')
493
+                    LOG.debug("The target session: %s state is "
494
+                              "changed to failed over", session_name)
495
+
496
+                    src_ssn.state = 'in_sync'
497
+                    src_ssn.save()
498
+                    LOG.debug("The target session: %s state is "
499
+                              "changed to in sync", session_name)
500
+                    rep_status = fields.ReplicationStatus.DISABLED
501
+                    volume_updates.append({'volume_id': v['id'],
502
+                                           'updates':
503
+                                          {'replication_status': rep_status}})
504
+
505
+            back_end_ip = self.configuration.san_ip
506
+        else:
507
+            """Failover to replication target."""
508
+            for v in volumes:
509
+                vol_name = self.get_volume_name(v['id'])
510
+                rv = self.get_rep_name(vol_name)
511
+                if self.target.search("volumes", name=rv).total:
512
+                    self._failover_volume(v)
513
+                    volume_updates.append(
514
+                        {'volume_id': v['id'],
515
+                         'updates':
516
+                         {'replication_status': K2_REP_FAILED_OVER}})
517
+                else:
518
+                    volume_updates.append({'volume_id': v['id'],
519
+                                           'updates': {'status': 'error', }})
520
+            back_end_ip = self.replica.backend_id
521
+        return back_end_ip, volume_updates
522
+
523
+    def _create_volume_replica_user_snap(self, k2, sess):
524
+        snap = k2.new("snapshots")
525
+        snap.is_application_consistent = "False"
526
+        snap.replication_session = sess
527
+        snap.save()
528
+        return snap.generation_number
529
+
530
+    def _is_user_snap_sync_finished(self):
531
+        # waiting for user snapshot to be synced
532
+        while len(self.snap_updates) > 0:
533
+            for l in self.snap_updates:
534
+                sess = l.get('tgt_ssn')
535
+                gno = l.get('gno')
536
+                stime = l.get('stime')
537
+                sess.refresh()
538
+                if (sess.generation_number == gno and
539
+                   sess.current_snapshot_progress == 100
540
+                   and sess.current_snapshot_id is None):
541
+                    if time.time() - stime > 300:
542
+                        gen_no = self._create_volume_replica_user_snap(
543
+                            self.target,
544
+                            sess)
545
+                        self.snap_updates.append({'tgt_ssn': sess,
546
+                                                  'gno': gen_no,
547
+                                                  'stime': time.time()})
548
+                    self.snap_updates.remove(l)
549
+                eventlet.sleep(1)
337 550
 
338 551
     @kaminario_logger
339 552
     def create_volume_from_snapshot(self, volume, snapshot):
@@ -385,9 +598,14 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
385 598
                                   snapshot.volume.size * units.Ki,
386 599
                                   self.configuration.volume_dd_blocksize,
387 600
                                   sparse=True)
601
+            self._kaminario_disconnect_volume(src_attach_info,
602
+                                              dest_attach_info)
388 603
             self.terminate_connection(volume, properties)
389 604
             self.terminate_connection(cview, properties)
605
+            cview.delete()
390 606
         except Exception as ex:
607
+            self._kaminario_disconnect_volume(src_attach_info,
608
+                                              dest_attach_info)
391 609
             self.terminate_connection(cview, properties)
392 610
             self.terminate_connection(volume, properties)
393 611
             cview.delete()
@@ -410,6 +628,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
410 628
         src_name = self.get_volume_name(src_vref.id)
411 629
         src_vol = self.client.search("volumes", name=src_name)
412 630
         src_map = self.client.search("mappings", volume=src_vol)
631
+        src_attach_info = dest_attach_info = None
413 632
         if src_map.total != 0:
414 633
             msg = _("K2 driver does not support clone of a attached volume. "
415 634
                     "To get this done, create a snapshot from the attached "
@@ -428,10 +647,13 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
428 647
                                   src_vref.size * units.Ki,
429 648
                                   self.configuration.volume_dd_blocksize,
430 649
                                   sparse=True)
431
-
650
+            self._kaminario_disconnect_volume(src_attach_info,
651
+                                              dest_attach_info)
432 652
             self.terminate_connection(volume, properties)
433 653
             self.terminate_connection(src_vref, properties)
434 654
         except Exception as ex:
655
+            self._kaminario_disconnect_volume(src_attach_info,
656
+                                              dest_attach_info)
435 657
             self.terminate_connection(src_vref, properties)
436 658
             self.terminate_connection(volume, properties)
437 659
             self.delete_volume(volume)
@@ -499,6 +721,26 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
499 721
         self._delete_by_ref(self.target, "volume_groups",
500 722
                             rvg_name, "remote vg")
501 723
 
724
+    @kaminario_logger
725
+    def _delete_failover_volume_replica(self, volume, vg_name, vol_name):
726
+        rvg_name = self.get_rep_name(vg_name)
727
+        rvol_name = self.get_rep_name(vol_name)
728
+        session_name = self.get_session_name(volume.id)
729
+        rsession_name = self.get_rep_name(session_name)
730
+        tgt_ssn = self.target.search('replication/sessions',
731
+                                     name=rsession_name).hits[0]
732
+        tgt_ssn.state = 'idle'
733
+        tgt_ssn.save()
734
+        tgt_ssn.delete()
735
+
736
+        LOG.debug("Searching and deleting snapshots for target volume group "
737
+                  "and target volume: %(vol)s, %(vg)s in K2.",
738
+                  {'vol': rvol_name, 'vg': rvg_name})
739
+        rvg = self.target.search('volume_groups', name=rvg_name).hits
740
+        rsnaps = self.target.search('snapshots', volume_group=rvg).hits
741
+        for s in rsnaps:
742
+            s.delete()
743
+
502 744
     @kaminario_logger
503 745
     def _check_for_status(self, obj, status):
504 746
         while obj.state != status:
@@ -664,9 +906,8 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
664 906
     @kaminario_logger
665 907
     def _get_volume_object(self, volume):
666 908
         vol_name = self.get_volume_name(volume.id)
667
-        if volume.replication_status == 'failed-over':
909
+        if volume.replication_status == K2_REP_FAILED_OVER:
668 910
             vol_name = self.get_rep_name(vol_name)
669
-            self.client = self.target
670 911
         LOG.debug("Searching volume : %s in K2.", vol_name)
671 912
         vol_rs = self.client.search("volumes", name=vol_name)
672 913
         if not hasattr(vol_rs, 'hits') or vol_rs.total == 0:
@@ -696,9 +937,8 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
696 937
         # Get volume object
697 938
         if type(volume).__name__ != 'RestObject':
698 939
             vol_name = self.get_volume_name(volume.id)
699
-            if volume.replication_status == 'failed-over':
940
+            if volume.replication_status == K2_REP_FAILED_OVER:
700 941
                 vol_name = self.get_rep_name(vol_name)
701
-                self.client = self.target
702 942
             LOG.debug("Searching volume: %s in K2.", vol_name)
703 943
             volume_rs = self.client.search("volumes", name=vol_name)
704 944
             if hasattr(volume_rs, "hits") and volume_rs.total != 0:
@@ -779,12 +1019,13 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
779 1019
         return replica
780 1020
 
781 1021
     def _get_replica_status(self, vg_name):
782
-        vg = self.client.search("volume_groups", name=vg_name).hits[0]
783
-        if self.client.search("replication/sessions",
784
-                              local_volume_group=vg).total != 0:
785
-            return True
786
-        else:
787
-            return False
1022
+        vg_rs = self.client.search("volume_groups", name=vg_name)
1023
+        if vg_rs.total:
1024
+            vg = vg_rs.hits[0]
1025
+            if self.client.search("replication/sessions",
1026
+                                  local_volume_group=vg).total:
1027
+                return True
1028
+        return False
788 1029
 
789 1030
     def manage_existing(self, volume, existing_ref):
790 1031
         vol_name = existing_ref['source-name']
@@ -853,6 +1094,11 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
853 1094
     def retype(self, ctxt, volume, new_type, diff, host):
854 1095
         old_type = volume.get('volume_type')
855 1096
         vg_name = self.get_volume_group_name(volume.id)
1097
+        vol_name = self.get_volume_name(volume.id)
1098
+        vol_rs = self.client.search("volumes", name=vol_name)
1099
+        if vol_rs.total:
1100
+            vol = vol_rs.hits[0]
1101
+            vmap = self.client.search("mappings", volume=vol).total
856 1102
         old_rep_type = self._get_replica_status(vg_name)
857 1103
         new_rep_type = self._get_is_replica(new_type)
858 1104
         new_prov_type = self._get_is_dedup(new_type)
@@ -867,8 +1113,11 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
867 1113
                 self._delete_replication(volume)
868 1114
                 return True
869 1115
         elif not new_rep_type and not old_rep_type:
870
-            LOG.debug("Use '--migration-policy on-demand' to change 'dedup "
871
-                      "without replication'<->'nodedup without replication'.")
1116
+            msg = ("Use '--migration-policy on-demand' to change 'dedup "
1117
+                   "without replication'<->'nodedup without replication'.")
1118
+            if vol_rs.total and vmap:
1119
+                msg = "Unattach volume and {0}".format(msg)
1120
+            LOG.debug(msg)
872 1121
             return False
873 1122
         else:
874 1123
             LOG.error(_LE('Change from type1: %(type1)s to type2: %(type2)s '
@@ -879,15 +1128,28 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
879 1128
     def _add_replication(self, volume):
880 1129
         vg_name = self.get_volume_group_name(volume.id)
881 1130
         vol_name = self.get_volume_name(volume.id)
882
-        LOG.debug("Searching volume group with name: %(name)s",
883
-                  {'name': vg_name})
884
-        vg = self.client.search("volume_groups", name=vg_name).hits[0]
885
-        LOG.debug("Searching volume with name: %(name)s",
886
-                  {'name': vol_name})
887
-        vol = self.client.search("volumes", name=vol_name).hits[0]
888
-        self._create_volume_replica(volume, vg, vol, self.replica.rpo)
1131
+        if volume.replication_status == K2_REP_FAILED_OVER:
1132
+            self._create_failover_volume_replica(volume, vg_name, vol_name)
1133
+        else:
1134
+            LOG.debug("Searching volume group with name: %(name)s",
1135
+                      {'name': vg_name})
1136
+            vg = self.client.search("volume_groups", name=vg_name).hits[0]
1137
+            LOG.debug("Searching volume with name: %(name)s",
1138
+                      {'name': vol_name})
1139
+            vol = self.client.search("volumes", name=vol_name).hits[0]
1140
+            self._create_volume_replica(volume, vg, vol, self.replica.rpo)
889 1141
 
890 1142
     def _delete_replication(self, volume):
891 1143
         vg_name = self.get_volume_group_name(volume.id)
892 1144
         vol_name = self.get_volume_name(volume.id)
893
-        self._delete_volume_replica(volume, vg_name, vol_name)
1145
+        if volume.replication_status == K2_REP_FAILED_OVER:
1146
+            self._delete_failover_volume_replica(volume, vg_name, vol_name)
1147
+        else:
1148
+            self._delete_volume_replica(volume, vg_name, vol_name)
1149
+
1150
+    def _kaminario_disconnect_volume(self, *attach_info):
1151
+        for info in attach_info:
1152
+            if (info and info.get('connector') and
1153
+                    info.get('conn', {}).get('data') and info.get('device')):
1154
+                info['connector'].disconnect_volume(info['conn']['data'],
1155
+                                                    info['device'])

+ 24
- 12
deployment_scripts/puppet/modules/kaminario/files/kaminario_fc.py View File

@@ -17,13 +17,14 @@ import six
17 17
 
18 18
 from oslo_log import log as logging
19 19
 
20
-from cinder import coordination
21 20
 from cinder import exception
21
+from cinder import utils
22 22
 from cinder.i18n import _, _LE
23 23
 from cinder.objects import fields
24 24
 from cinder.volume.drivers.kaminario import kaminario_common as common
25 25
 from cinder.zonemanager import utils as fczm_utils
26 26
 
27
+K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER
27 28
 LOG = logging.getLogger(__name__)
28 29
 kaminario_logger = common.kaminario_logger
29 30
 
@@ -32,13 +33,10 @@ class KaminarioFCDriver(common.KaminarioCinderDriver):
32 33
     """Kaminario K2 FC Volume Driver.
33 34
 
34 35
     Version history:
35
-        1.0 - Initial driver
36
-        1.1 - Added manage/unmanage and extra-specs support for nodedup
37
-        1.2 - Added replication support
38
-        1.3 - Added retype support
36
+        1.0.2.0 - Initial driver
39 37
     """
40 38
 
41
-    VERSION = '1.3'
39
+    VERSION = '1.0.2.0'
42 40
 
43 41
     # ThirdPartySystems wiki page name
44 42
     CI_WIKI_NAME = "Kaminario_K2_CI"
@@ -51,7 +49,7 @@ class KaminarioFCDriver(common.KaminarioCinderDriver):
51 49
 
52 50
     @fczm_utils.AddFCZone
53 51
     @kaminario_logger
54
-    @coordination.synchronized('{self.k2_lock_name}')
52
+    @utils.synchronized(common.K2_LOCK_NAME, external=True)
55 53
     def initialize_connection(self, volume, connector):
56 54
         """Attach K2 volume to host."""
57 55
         # Check wwpns in host connector.
@@ -59,6 +57,12 @@ class KaminarioFCDriver(common.KaminarioCinderDriver):
59 57
             msg = _("No wwpns found in host connector.")
60 58
             LOG.error(msg)
61 59
             raise exception.KaminarioCinderDriverException(reason=msg)
60
+        # To support replication failback
61
+        temp_client = None
62
+        if (hasattr(volume, 'replication_status') and
63
+                volume.replication_status == K2_REP_FAILED_OVER):
64
+            temp_client = self.client
65
+            self.client = self.target
62 66
         # Get target wwpns.
63 67
         target_wwpns = self.get_target_info(volume)
64 68
         # Map volume.
@@ -66,6 +70,9 @@ class KaminarioFCDriver(common.KaminarioCinderDriver):
66 70
         # Create initiator-target mapping.
67 71
         target_wwpns, init_target_map = self._build_initiator_target_map(
68 72
             connector, target_wwpns)
73
+        # To support replication failback
74
+        if temp_client:
75
+            self.client = temp_client
69 76
         # Return target volume information.
70 77
         return {'driver_volume_type': 'fibre_channel',
71 78
                 'data': {"target_discovered": True,
@@ -75,8 +82,14 @@ class KaminarioFCDriver(common.KaminarioCinderDriver):
75 82
 
76 83
     @fczm_utils.RemoveFCZone
77 84
     @kaminario_logger
78
-    @coordination.synchronized('{self.k2_lock_name}')
85
+    @utils.synchronized(common.K2_LOCK_NAME, external=True)
79 86
     def terminate_connection(self, volume, connector, **kwargs):
87
+        # To support replication failback
88
+        temp_client = None
89
+        if (hasattr(volume, 'replication_status') and
90
+                volume.replication_status == K2_REP_FAILED_OVER):
91
+            temp_client = self.client
92
+            self.client = self.target
80 93
         super(KaminarioFCDriver, self).terminate_connection(volume, connector)
81 94
         properties = {"driver_volume_type": "fibre_channel", "data": {}}
82 95
         host_name = self.get_initiator_host_name(connector)
@@ -90,14 +103,13 @@ class KaminarioFCDriver(common.KaminarioCinderDriver):
90 103
                 connector, target_wwpns)
91 104
             properties["data"] = {"target_wwn": target_wwpns,
92 105
                                   "initiator_target_map": init_target_map}
106
+        # To support replication failback
107
+        if temp_client:
108
+            self.client = temp_client
93 109
         return properties
94 110
 
95 111
     @kaminario_logger
96 112
     def get_target_info(self, volume):
97
-        rep_status = fields.ReplicationStatus.FAILED_OVER
98
-        if (hasattr(volume, 'replication_status') and
99
-                volume.replication_status == rep_status):
100
-            self.client = self.target
101 113
         LOG.debug("Searching target wwpns in K2.")
102 114
         fc_ports_rs = self.client.search("system/fc_ports")
103 115
         target_wwpns = []

+ 24
- 14
deployment_scripts/puppet/modules/kaminario/files/kaminario_iscsi.py View File

@@ -17,30 +17,26 @@ import six
17 17
 
18 18
 from oslo_log import log as logging
19 19
 
20
-from cinder import coordination
21 20
 from cinder import exception
21
+from cinder import utils
22 22
 from cinder.i18n import _, _LE
23
-from cinder import interface
24 23
 from cinder.objects import fields
25 24
 from cinder.volume.drivers.kaminario import kaminario_common as common
26 25
 
27 26
 ISCSI_TCP_PORT = "3260"
27
+K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER
28 28
 LOG = logging.getLogger(__name__)
29 29
 kaminario_logger = common.kaminario_logger
30 30
 
31 31
 
32
-@interface.volumedriver
33 32
 class KaminarioISCSIDriver(common.KaminarioCinderDriver):
34 33
     """Kaminario K2 iSCSI Volume Driver.
35 34
 
36 35
     Version history:
37
-        1.0 - Initial driver
38
-        1.1 - Added manage/unmanage and extra-specs support for nodedup
39
-        1.2 - Added replication support
40
-        1.3 - Added retype support
36
+        1.0.2.0 - Initial driver
41 37
     """
42 38
 
43
-    VERSION = '1.3'
39
+    VERSION = '1.0.2.0'
44 40
 
45 41
     # ThirdPartySystems wiki page name
46 42
     CI_WIKI_NAME = "Kaminario_K2_CI"
@@ -51,13 +47,22 @@ class KaminarioISCSIDriver(common.KaminarioCinderDriver):
51 47
         self._protocol = 'iSCSI'
52 48
 
53 49
     @kaminario_logger
54
-    @coordination.synchronized('{self.k2_lock_name}')
50
+    @utils.synchronized(common.K2_LOCK_NAME, external=True)
55 51
     def initialize_connection(self, volume, connector):
56 52
         """Attach K2 volume to host."""
53
+        # To support replication failback
54
+        temp_client = None
55
+        if (hasattr(volume, 'replication_status') and
56
+                volume.replication_status == K2_REP_FAILED_OVER):
57
+            temp_client = self.client
58
+            self.client = self.target
57 59
         # Get target_portal and target iqn.
58 60
         iscsi_portal, target_iqn = self.get_target_info(volume)
59 61
         # Map volume.
60 62
         lun = self.k2_initialize_connection(volume, connector)
63
+        # To support replication failback
64
+        if temp_client:
65
+            self.client = temp_client
61 66
         # Return target volume information.
62 67
         return {"driver_volume_type": "iscsi",
63 68
                 "data": {"target_iqn": target_iqn,
@@ -66,17 +71,22 @@ class KaminarioISCSIDriver(common.KaminarioCinderDriver):
66 71
                          "target_discovered": True}}
67 72
 
68 73
     @kaminario_logger
69
-    @coordination.synchronized('{self.k2_lock_name}')
74
+    @utils.synchronized(common.K2_LOCK_NAME, external=True)
70 75
     def terminate_connection(self, volume, connector, **kwargs):
76
+        # To support replication failback
77
+        temp_client = None
78
+        if (hasattr(volume, 'replication_status') and
79
+                volume.replication_status == K2_REP_FAILED_OVER):
80
+            temp_client = self.client
81
+            self.client = self.target
71 82
         super(KaminarioISCSIDriver, self).terminate_connection(volume,
72 83
                                                                connector)
84
+        # To support replication failback
85
+        if temp_client:
86
+            self.client = temp_client
73 87
 
74 88
     @kaminario_logger
75 89
     def get_target_info(self, volume):
76
-        rep_status = fields.ReplicationStatus.FAILED_OVER
77
-        if (hasattr(volume, 'replication_status') and
78
-                volume.replication_status == rep_status):
79
-            self.client = self.target
80 90
         LOG.debug("Searching first iscsi port ip without wan in K2.")
81 91
         iscsi_ip_rs = self.client.search("system/net_ips", wan_port="")
82 92
         iscsi_ip = target_iqn = None

+ 12
- 12
deployment_scripts/puppet/modules/kaminario/lib/facter/default_volume_type.rb View File

@@ -1,18 +1,18 @@
1 1
 require 'hiera'
2 2
 require 'facter'
3
-hiera = Hiera.new(:config => '/etc/hiera.yaml')
4
-cinder_kaminario=hiera.lookup('cinder_kaminario', {}, {},'nil')
5
-5.times do |i|
6
-  add_backend = "add_backend_" + i.to_s
7
-  type_name  = "type_name_" + i.to_s
8
-    if cinder_kaminario[add_backend] == true
9
-      default_type = cinder_kaminario[type_name]
10
-  end
11
-end
12
-
13 3
 Facter.add("default_volume_type") do
14
-  setcode do
15
-    default_type
4
+  hiera = Hiera.new(:config => '/etc/hiera.yaml')
5
+  cinder_kaminario=hiera.lookup('cinder_kaminario', {}, {},'nil')
6
+  5.times do |i|
7
+    default_type = "default_type_" + i.to_s
8
+    type_name  = "type_name_" + i.to_s
9
+      if cinder_kaminario[default_type] == true
10
+        default_type = cinder_kaminario[type_name]
11
+        setcode do
12
+          default_type
13
+        end
14
+        break
15
+      end
16 16
   end
17 17
 end
18 18
 

+ 38
- 0
deployment_scripts/puppet/modules/kaminario/manifests/controller_config.pp View File

@@ -0,0 +1,38 @@
1
+class kaminario::controller_config{
2
+
3
+$config_file='/etc/cinder/cinder.conf'
4
+$plugin_settings = hiera('cinder_kaminario')
5
+
6
+  if $plugin_settings['scheduler_default_filters'] != ''
7
+  {
8
+  ini_subsetting {"scheduler_default_filters":
9
+    ensure               => present,
10
+    section              => 'DEFAULT',
11
+    key_val_separator    => '=',
12
+    path                 => $config_file,
13
+    setting              => 'scheduler_default_filters',
14
+    subsetting           => $plugin_settings['scheduler_default_filters'],
15
+    subsetting_separator => ',',
16
+  }
17
+  }
18
+  if $plugin_settings['scheduler_default_weighers'] != ''
19
+  {
20
+  cinder_config {
21
+    "DEFAULT/scheduler_default_weighers"       : value => $plugin_settings['scheduler_default_weighers'];
22
+  }
23
+  }
24
+  if $plugin_settings['rpc_response_timeout'] != ''
25
+  {
26
+  cinder_config {
27
+    "DEFAULT/rpc_response_timeout"             : value => $plugin_settings['rpc_response_timeout'];
28
+  }
29
+  }
30
+
31
+  cinder_config {
32
+    "DEFAULT/default_volume_type"             : value => $default_volume_type
33
+  }~> Exec[cinder_api]
34
+
35
+exec {'cinder_api':
36
+  command => '/usr/sbin/service cinder-api restart',}
37
+
38
+}

+ 57
- 66
deployment_scripts/puppet/modules/kaminario/manifests/init.pp View File

@@ -1,36 +1,52 @@
1 1
 class kaminario::config {
2
-$num = [ '0', '1', '2', '3', '4', '5' ]
2
+
3
+recursion { 'start':
4
+    value => 5,
5
+}
6
+
7
+define recursion(
8
+    $value
9
+) {
3 10
 $plugin_settings = hiera('cinder_kaminario')
4
-each($num) |$value| {
5
-config {"plugin_${value}":
6
-  add_backend            =>      $plugin_settings["add_backend_${value}"],
7
-  cinder_node            =>      $plugin_settings["cinder_node_${value}"],
8
-  storage_protocol       =>      $plugin_settings["storage_protocol_${value}"],
9
-  backend_name           =>      $plugin_settings["backend_name_${value}"],
10
-  storage_user           =>      $plugin_settings["storage_user_${value}"],
11
-  storage_password       =>      $plugin_settings["storage_password_${value}"],
12
-  storage_ip             =>      $plugin_settings["storage_ip_${value}"],
13
-  enable_replication     =>      $plugin_settings["enable_replication_${value}"],
14
-  replication_ip         =>      $plugin_settings["replication_ip_${value}"],
15
-  replication_login      =>      $plugin_settings["replication_login_${value}"],
16
-  replication_rpo        =>      $plugin_settings["replication_rpo_${value}"],
17
-  replication_password   =>      $plugin_settings["replication_password_${value}"],
18
-  enable_multipath       =>      $plugin_settings["enable_multipath_${value}"],
19
-  suppress_logs          =>      $plugin_settings["suppress_logs_${value}"],
20
-  filter_function        =>      $plugin_settings["filter_function_${value}"],
21
-  oversubscription_ratio =>      $plugin_settings["oversubscription_ratio_${value}"],
22
-  num                    =>      $value
23
-  }
11
+
12
+            config {"plugin_${value}":
13
+              add_backend            =>      $plugin_settings["add_backend_${value}"],
14
+              cinder_node            =>      $plugin_settings["cinder_node_${value}"],
15
+              storage_protocol       =>      $plugin_settings["storage_protocol_${value}"],
16
+              backend_name           =>      $plugin_settings["backend_name_${value}"],
17
+              storage_user           =>      $plugin_settings["storage_user_${value}"],
18
+              storage_password       =>      $plugin_settings["storage_password_${value}"],
19
+              storage_ip             =>      $plugin_settings["storage_ip_${value}"],
20
+              enable_replication     =>      $plugin_settings["enable_replication_${value}"],
21
+              replication_ip         =>      $plugin_settings["replication_ip_${value}"],
22
+              replication_login      =>      $plugin_settings["replication_login_${value}"],
23
+              replication_rpo        =>      $plugin_settings["replication_rpo_${value}"],
24
+              replication_password   =>      $plugin_settings["replication_password_${value}"],
25
+              enable_multipath       =>      $plugin_settings["enable_multipath_${value}"],
26
+              suppress_logs          =>      $plugin_settings["suppress_logs_${value}"],
27
+              filter_function        =>      $plugin_settings["filter_function_${value}"],
28
+              goodness_function      =>      $plugin_settings["goodness_function_${value}"],
29
+              oversubscription_ratio =>      $plugin_settings["oversubscription_ratio_${value}"],
30
+              num                    =>      $value
31
+            }
32
+    $minus1 = inline_template('<%= @value.to_i - 1 %>')
33
+    if "${minus1}" < '0' {
34
+        
35
+   }  else {
36
+        recursion { "value-${minus1}":
37
+            value => $minus1,
38
+        }
39
+    }
24 40
 }
25 41
 }
26 42
 
27
-define config($add_backend,$storage_protocol,$backend_name,$storage_user,$storage_password,$storage_ip,$num,$cinder_node,$enable_replication,$replication_ip,$replication_login,$replication_rpo,$replication_password,$enable_multipath,$suppress_logs,$filter_function,$oversubscription_ratio) {
43
+
44
+define config($add_backend,$storage_protocol,$backend_name,$storage_user,$storage_password,$storage_ip,$num,$cinder_node,$enable_replication,$replication_ip,$replication_login,$replication_rpo,$replication_password,$enable_multipath,$suppress_logs,$filter_function,$oversubscription_ratio,$goodness_function) {
28 45
 
29 46
   $sec_name = section_name( $storage_ip , $backend_name )
30 47
   $config_file = "/etc/cinder/cinder.conf"
31 48
   if $cinder_node == hiera(user_node_name) {
32 49
   if $add_backend == true {
33
-  if $storage_protocol == 'FC'{
34 50
 
35 51
   ini_subsetting {"enable_backend_${num}":
36 52
         ensure               => present,
@@ -38,18 +54,26 @@ define config($add_backend,$storage_protocol,$backend_name,$storage_user,$storag
38 54
         key_val_separator    => '=',
39 55
         path                 => $config_file,
40 56
         setting              => 'enabled_backends',
41
-        subsetting           => $backend_name,
57
+        subsetting           => $sec_name,
42 58
         subsetting_separator => ',',
43 59
    }->
44 60
     cinder_config {
45
-        "$sec_name/volume_driver"       : value => "cinder.volume.drivers.kaminario.kaminario_fc.KaminarioFCDriver";
46 61
         "$sec_name/volume_backend_name" : value => $backend_name;
47 62
         "$sec_name/san_ip"              : value => $storage_ip;
48 63
         "$sec_name/san_login"           : value => $storage_user;
49 64
         "$sec_name/san_password"        : value => $storage_password;
50
-        "$sec_name/filter_function"     : value => $filter_function;
51 65
    }
52 66
 
67
+  if $storage_protocol == 'FC'{
68
+    cinder_config {
69
+        "$sec_name/volume_driver"       : value => "cinder.volume.drivers.kaminario.kaminario_fc.KaminarioFCDriver";
70
+    }
71
+  }
72
+  elsif $storage_protocol == 'ISCSI'{
73
+    cinder_config {
74
+        "$sec_name/volume_driver"       : value => "cinder.volume.drivers.kaminario.kaminario_iscsi.KaminarioISCSIDriver";
75
+    }
76
+  }
53 77
     if $enable_replication == true {
54 78
     $replication_device = get_replication_device($replication_ip, $replication_login , $replication_password , $replication_rpo)
55 79
     cinder_config {
@@ -69,56 +93,23 @@ define config($add_backend,$storage_protocol,$backend_name,$storage_user,$storag
69 93
     }
70 94
     }
71 95
 
72
-    if $oversubscription_ratio == true {
96
+    if $filter_function != '' {
73 97
     cinder_config {
74
-        "$sec_name/auto_calc_max_oversubscription_ratio"   : value => "True";
98
+        "$sec_name/filter_function"                        : value => $filter_function;
75 99
     }
76 100
     }
77 101
 
78
-}
79
-  if $storage_protocol == 'ISCSI'{
80
-  ini_subsetting {"enable_backend_${num}":
81
-        ensure               => present,
82
-        section              => 'DEFAULT',
83
-        key_val_separator    => '=',
84
-        path                 => $config_file,
85
-        setting              => 'enabled_backends',
86
-        subsetting           => $backend_name,
87
-        subsetting_separator => ',',
88
-   }->
89
-    cinder_config {
90
-        "$sec_name/volume_driver"       : value => "cinder.volume.drivers.kaminario.kaminario_iscsi.KaminarioISCSIDriver";
91
-        "$sec_name/volume_backend_name" : value => $backend_name;
92
-        "$sec_name/san_ip"              : value => $storage_ip;
93
-        "$sec_name/san_login"           : value => $storage_user;
94
-        "$sec_name/san_password"        : value => $storage_password;
95
-        "$sec_name/filter_function"     : value => $filter_function;
96
-   }
97
-
98
-    if $enable_replication == true {
99
-    $replication_device = get_replication_device($replication_ip, $replication_login , $replication_password , $replication_rpo)
100
-    cinder_config {
101
-        "$sec_name/replication_device"       : value => $replication_device;
102
-    }
103
-    }
104
-    if $enable_multipath == true {
105
-    cinder_config {
106
-        "$sec_name/use_multipath_for_image_xfer"           : value => "True";
107
-        "$sec_name/enforce_multipath_for_image_xfer"       : value => "True";
108
-    }
109
-    }
110
-    if $suppress_logs == true {
102
+    if $goodness_function != '' {
111 103
     cinder_config {
112
-        "$sec_name/suppress_requests_ssl_warnings"         : value => "True";
113
-    }
104
+        "$sec_name/goodness_function"                      : value => $goodness_function;
105
+    }   
114 106
     }
107
+    
115 108
     if $oversubscription_ratio == true {
116 109
     cinder_config {
117
-        "$sec_name/auto_calc_max_oversubscription_ratio"  : value => "True";
110
+        "$sec_name/auto_calc_max_oversubscription_ratio"   : value => "True";
118 111
     }
119 112
     }
120
-
121
-}
122 113
 }
123 114
 }
124 115
 }

+ 28
- 13
deployment_scripts/puppet/modules/kaminario/manifests/type.pp View File

@@ -1,38 +1,53 @@
1 1
 class kaminario::type {
2
-$num = [ '0', '1', '2', '3', '4', '5' ]
3
-$plugin_settings = hiera('cinder_kaminario')
4
-each($num) |$value| {
5
-kaminario_type {"plugin_${value}":
6
-  create_type            =>      $plugin_settings["create_type_${value}"],
7
-  options                =>      $plugin_settings["options_${value}"],
8
-  backend_name           =>      $plugin_settings["backend_name_${value}"]
2
+recursion { 'start':
3
+    value => 5,
4
+}
5
+
6
+define recursion(
7
+    $value
8
+) {
9
+    $plugin_settings = hiera('cinder_kaminario')
10
+
11
+    kaminario_type {"plugin_${value}":
12
+      create_type            =>      $plugin_settings["create_type_${value}"],
13
+      options                =>      $plugin_settings["options_${value}"],
14
+      backend_name           =>      $plugin_settings["backend_name_${value}"],
15
+      type_name              =>      $plugin_settings["type_name_${value}"]
9 16
   }
17
+    $minus1 = inline_template('<%= @value.to_i - 1 %>')
18
+    if "${minus1}" < '0' {
19
+        
20
+   }  else {
21
+        recursion { "value-${minus1}":
22
+            value => $minus1,
23
+        }
24
+    }
10 25
 }
11 26
 }
12 27
 
13
-define kaminario_type ($create_type,$options,$backend_name) {
28
+define kaminario_type ($create_type,$options,$backend_name,$type_name) {
14 29
 if $create_type == true {
15 30
 case $options {
16 31
   "enable_replication_type": {
17
-    cinder_type {$backend_name:
32
+    cinder_type {$type_name:
18 33
       ensure     => present,
19 34
       properties => ["volume_backend_name=${backend_name}",'kaminario:replication=enabled'],
20 35
     }
21 36
   }
22 37
   "enable_dedup": {
23
-    cinder_type {$backend_name:
38
+    cinder_type {$type_name:
24 39
       ensure     => present,
25 40
       properties => ["volume_backend_name=${backend_name}",'kaminario:thin_prov_type=nodedup'],
26 41
     }
27 42
   }
28 43
   "replication_dedup": {
29
-    cinder_type {$backend_name:
44
+    cinder_type {$type_name:
30 45
       ensure     => present,
31
-      properties => ["volume_backend_name=${backend_name}",'kaminario:thin_prov_type=nodedup','kaminario:thin_prov_type=nodedup'],
46
+      properties => ["volume_backend_name=${backend_name}",'kaminario:thin_prov_type=nodedup','kaminario:replication=enabled'],
32 47
     }
33 48
   }
34 49
   "default": {
35
-    cinder_type {$backend_name:
50
+    cinder_type {$type_name:
36 51
       ensure     => present,
37 52
       properties => ["volume_backend_name=${backend_name}"],
38 53
    }

+ 12
- 11
deployment_tasks.yaml View File

@@ -1,34 +1,34 @@
1
-- id: kaminario_parser
1
+- id: kaminario_cinder
2 2
   type: puppet
3 3
   version: 2.1.0
4
-  groups: [cinder,primary-controller,controller]
5
-  requires: [openstack-cinder,top-role-cinder,netconfig]
6
-  required_for: [kaminario_cinder,kaminario_types]
4
+  groups: [cinder]
5
+  requires: [top-role-cinder]
6
+  required_for: [deploy_end]
7 7
   condition:
8 8
     yaql_exp: "changedAny($.storage, $.cinder_kaminario)"
9 9
   parameters:
10
-    puppet_manifest: puppet/manifests/cinder_parser.pp
10
+    puppet_manifest: puppet/manifests/cinder_kaminario.pp
11 11
     puppet_modules:  puppet/modules:/etc/puppet/modules
12 12
     timeout: 360
13 13
 
14
-- id: kaminario_cinder
14
+- id: kaminario_config
15 15
   type: puppet
16 16
   version: 2.1.0
17
-  groups: [cinder]
18
-  requires: [kaminario_parser,top-role-cinder]
17
+  groups: [primary-controller,controller]
18
+  requires: [openstack-cinder]
19 19
   required_for: [deploy_end]
20 20
   condition:
21 21
     yaql_exp: "changedAny($.storage, $.cinder_kaminario)"
22 22
   parameters:
23
-    puppet_manifest: puppet/manifests/cinder_kaminario.pp
24
-    puppet_modules:  puppet/modules:/etc/puppet/modules
23
+    puppet_manifest: puppet/manifests/cinder_controller_config.pp
24
+    puppet_modules: puppet/modules:/etc/puppet/modules
25 25
     timeout: 360
26 26
 
27 27
 - id: kaminario_types
28 28
   type: puppet
29 29
   version: 2.1.0
30 30
   groups: [primary-controller]
31
-  requires: [kaminario_parser]
31
+  requires: [openstack-cinder]
32 32
   required_for: [deploy_end]
33 33
   condition:
34 34
     yaql_exp: "changedAny($.storage, $.cinder_kaminario)"
@@ -47,3 +47,4 @@
47 47
     puppet_manifest: puppet/manifests/cinder_multipath.pp
48 48
     puppet_modules:  puppet/modules:/etc/puppet/modules
49 49
     timeout: 360
50
+

+ 158
- 83
environment_config.yaml View File

@@ -13,7 +13,7 @@ attributes:
13 13
     type: "radio"
14 14
     weight: 10
15 15
     value: "FC"
16
-    label: "Kaminario Storage Protocol"
16
+    label: "Storage protocol to be used on the data path with storage system"
17 17
     values:
18 18
       - data: "ISCSI"
19 19
         label: "ISCSI"
@@ -37,7 +37,7 @@ attributes:
37 37
   storage_ip_0:
38 38
     value: ""
39 39
     label: 'Kaminario Storage Hostname/IP'
40
-    description: 'IP address of Kaminario Storage Array'
40
+    description: 'Provide management IP of kaminario K2 All-Flash array'
41 41
     weight: 20
42 42
     type: "text"
43 43
     regex:
@@ -47,7 +47,7 @@ attributes:
47 47
   storage_user_0:
48 48
     value: ""
49 49
     label: 'Username'
50
-    description: 'user name of Kaminario Storage Array'
50
+    description: 'Provide username of kaminario K2 All-Flash array'
51 51
     weight: 25
52 52
     type: "text"
53 53
     regex:
@@ -57,14 +57,14 @@ attributes:
57 57
   storage_password_0:
58 58
     value: ""
59 59
     label: 'Password'
60
-    description: 'password of Kaminario Storage Array'
60
+    description: 'Provide password of kaminario K2 All-Flash array'
61 61
     weight: 30
62 62
     type: "password"
63 63
 
64 64
   add_backend_0:
65 65
     value: true
66
-    label: 'Add a new kaminario backend or  new kaminario Array'
67
-    description: 'Add a new kaminario backend or  scale an existing backend'
66
+    label: 'Add a new kaminario backend or new kaminario Array'
67
+    description: 'Add a new kaminario backend or scale an existing backend'
68 68
     weight: 35
69 69
     type: 'checkbox'
70 70
     restrictions:
@@ -74,13 +74,13 @@ attributes:
74 74
   enable_replication_0:
75 75
     value: false
76 76
     label: 'Enable Replication'
77
-    description: Enable replication for Kaminario Array
77
+    description: 'Enable replication for Kaminario Array'
78 78
     weight: 40
79 79
     type: 'checkbox'
80 80
 
81 81
   replication_ip_0:
82 82
     value: ""
83
-    label: 'Ipaddress'
83
+    label: 'IPaddress'
84 84
     description: 'Ipaddress of Kaminario replication array'
85 85
     weight: 45
86 86
     type: "text"
@@ -112,9 +112,9 @@ attributes:
112 112
         action: 'hide'
113 113
 
114 114
   replication_rpo_0:
115
-    value: ""
115
+    value: "60"
116 116
     label: 'RPO'
117
-    description: 'Value (in seconds) should be either 1 minute or multiple of 5 minutes'
117
+    description: 'Value (in seconds) should be either 60 sec or multiple of 300 sec'
118 118
     weight: 60
119 119
     type: "text"
120 120
     restrictions:
@@ -150,9 +150,9 @@ attributes:
150 150
       - data: "enable_replication_type"
151 151
         label: "Enable Replication"
152 152
       - data: "enable_dedup"
153
-        label: "Enable Non Deduplication"
153
+        label: "Enable nodedup"
154 154
       - data: "replication_dedup"
155
-        label: "Enable both Replication and Non Deduplication"
155
+        label: "Enable both Replication and nodedup"
156 156
     restrictions:
157 157
       - condition: "settings:cinder_kaminario.create_type_0.value == false"
158 158
         action: 'hide'
@@ -160,7 +160,7 @@ attributes:
160 160
   default_type_0:
161 161
     value: false
162 162
     label: 'Default Type'
163
-    description: 'Make this type as default'
163
+    description: 'Make this volume type as default volume type'
164 164
     weight: 80
165 165
     type: 'checkbox'
166 166
     restrictions:
@@ -182,32 +182,60 @@ attributes:
182 182
     type: 'checkbox'
183 183
 
184 184
   filter_function_0:
185
-    value: ""
185
+    value: "capabilities.total_volumes < 250"
186 186
     label: 'Filter Function'
187 187
     description: 'Filter function for backend'
188 188
     weight: 87
189 189
     type: "text"
190 190
 
191
+  goodness_function_0:
192
+    value: ""
193
+    label: 'Goodness Function'
194
+    description: 'Goodness function for backend'
195
+    weight: 88
196
+    type: "text"
197
+
191 198
   oversubscription_ratio_0:
192 199
     value: false
193 200
     label: 'Oversubscription Ratio'
194
-    description: 'Enable Oversubscription Ratio for backend'
195
-    weight: 88
201
+    description: 'Enable Oversubscription Ratio for backend If "auto_calc_max_oversubscription_ratio = True", kaminario driver will calculate dynamically "max_over_subscription_ratio"'
202
+    weight: 89
196 203
     type: 'checkbox'
197
- 
204
+
205
+  scheduler_default_weighers:
206
+    value: "CapacityWeigher"
207
+    label: 'Scheduler  weighers'
208
+    description: 'Default weighers for scheduler. For enabling multiple weighers, provide weighers seperated by ","'
209
+    weight: 90
210
+    type: "text" 
211
+
212
+  scheduler_default_filters:
213
+    value: "DriverFilters"
214
+    label: 'Scheduler filters'
215
+    description: 'Default filters for scheduler. For enabling multiple weighers, provide weighers seperated by ","'
216
+    weight: 91
217
+    type: "text"
218
+
219
+
220
+  rpc_response_timeout:
221
+    value: "60"
222
+    label: 'RPC timeout'
223
+    description: 'Timeout for RPC. Default timeout is 60'
224
+    weight: 92
225
+    type: "text"
198 226
 
199 227
   add_backend_1:
200 228
     value: false
201 229
     label: 'Add a new kaminario backend or scale an existing backend'
202 230
     description: 'Add a new kaminario backend or scale an existing backend' 
203
-    weight: 90
231
+    weight: 93
204 232
     type: 'checkbox'
205 233
   
206 234
   storage_protocol_1:
207 235
     type: "radio"
208 236
     weight: 95
209 237
     value: "FC"
210
-    label: "Kaminario Storage Protocol"
238
+    label: "Storage protocol to be used on the data path with storage system"
211 239
     values:
212 240
       - data: "ISCSI"
213 241
         label: "ISCSI"
@@ -240,7 +268,7 @@ attributes:
240 268
   storage_ip_1:
241 269
     value: ""
242 270
     label: 'Kaminario Storage Hostname/IP'
243
-    description: 'IP address of Kaminario Storage Array'
271
+    description: 'Provide management IP of kaminario K2 All-Flash array'
244 272
     weight: 110
245 273
     type: "text"
246 274
     restrictions:
@@ -253,7 +281,7 @@ attributes:
253 281
   storage_user_1:
254 282
     value: ""
255 283
     label: 'Username'
256
-    description: 'user name of Kaminario Storage Array'
284
+    description: 'Provide username of kaminario K2 All-Flash array'
257 285
     weight: 115
258 286
     type: "text"
259 287
     regex:
@@ -266,7 +294,7 @@ attributes:
266 294
   storage_password_1:
267 295
     value: ""
268 296
     label: 'Password'
269
-    description: 'password of Kaminario Storage Array'
297
+    description: 'Provide password of kaminario K2 All-Flash array'
270 298
     weight: 120
271 299
     type: "password"
272 300
     restrictions:
@@ -285,7 +313,7 @@ attributes:
285 313
 
286 314
   replication_ip_1:
287 315
     value: ""
288
-    label: 'Ipaddress'
316
+    label: 'IPaddress'
289 317
     description: 'Ipaddress of Kaminario replication array'
290 318
     weight: 130
291 319
     type: "text"
@@ -317,9 +345,9 @@ attributes:
317 345
         action: 'hide'
318 346
 
319 347
   replication_rpo_1:
320
-    value: ""
348
+    value: "60"
321 349
     label: 'RPO'
322
-    description: 'Value (in seconds) should be either 1 minute or multiple of 5 minutes'
350
+    description: 'Value (in seconds) should be either 60 sec or multiple of 300 sec'
323 351
     weight: 145
324 352
     type: "text"
325 353
     restrictions:
@@ -359,9 +387,9 @@ attributes:
359 387
       - data: "enable_replication_type"
360 388
         label: "Enable Replication"
361 389
       - data: "enable_dedup"
362
-        label: "Enable Non Deduplication"
390
+        label: "Enable nodedup"
363 391
       - data: "replication_dedup"
364
-        label: "Enable both Replication and Non Deduplication"
392
+        label: "Enable both Replication and nodedup"
365 393
     restrictions:
366 394
       - condition: "settings:cinder_kaminario.create_type_1.value == false"
367 395
         action: 'hide'
@@ -369,7 +397,7 @@ attributes:
369 397
   default_type_1:
370 398
     value: false
371 399
     label: 'Default Type'
372
-    description: 'Make this type as default'
400
+    description: 'Make this volume type as default volume type'
373 401
     weight: 165
374 402
     type: 'checkbox'
375 403
     restrictions:
@@ -397,7 +425,7 @@ attributes:
397 425
          action: 'hide'
398 426
 
399 427
   filter_function_1:
400
-    value: ""
428
+    value: "capabilities.total_volumes < 250"
401 429
     label: 'Filter Function'
402 430
     description: 'Filter function for backend'
403 431
     weight: 172
@@ -406,12 +434,21 @@ attributes:
406 434
        - condition: "settings:cinder_kaminario.add_backend_1.value != true"
407 435
          action: 'hide'
408 436
 
437
+  goodness_function_1:
438
+    value: ""
439
+    label: 'Goodness Function'
440
+    description: 'Goodness function for backend'
441
+    weight: 173
442
+    type: "text"
443
+    restrictions:
444
+       - condition: "settings:cinder_kaminario.add_backend_1.value != true"
445
+         action: 'hide'
409 446
 
410 447
   oversubscription_ratio_1:
411 448
     value: false
412 449
     label: 'Oversubscription Ratio'
413
-    description: 'Enable Oversubscription Ratio for backend'
414
-    weight: 173
450
+    description: ''
451
+    weight: 174
415 452
     type: 'checkbox'
416 453
     restrictions:
417 454
        - condition: "settings:cinder_kaminario.add_backend_1.value != true"
@@ -433,7 +470,7 @@ attributes:
433 470
     type: "radio"
434 471
     weight: 180
435 472
     value: "FC"
436
-    label: "Kaminario Storage Protocol"
473
+    label: "Storage protocol to be used on the data path with storage system"
437 474
     values:
438 475
       - data: "ISCSI"
439 476
         label: "ISCSI"
@@ -466,7 +503,7 @@ attributes:
466 503
   storage_ip_2:
467 504
     value: ""
468 505
     label: 'Kaminario Storage Hostname/IP'
469
-    description: 'IP address of Kaminario Storage Array'
506
+    description: 'Provide management IP of kaminario K2 All-Flash array'
470 507
     weight: 195
471 508
     type: "text"
472 509
     restrictions:
@@ -479,7 +516,7 @@ attributes:
479 516
   storage_user_2:
480 517
     value: ""
481 518
     label: 'Username'
482
-    description: 'user name of Kaminario Storage Array'
519
+    description: 'Provide username of kaminario K2 All-Flash array'
483 520
     weight: 200
484 521
     type: "text"
485 522
     regex:
@@ -492,7 +529,7 @@ attributes:
492 529
   storage_password_2:
493 530
     value: ""
494 531
     label: 'Password'
495
-    description: 'password of Kaminario Storage Array'
532
+    description: 'Provide password of kaminario K2 All-Flash array'
496 533
     weight: 205
497 534
     type: "password"
498 535
     restrictions:
@@ -511,7 +548,7 @@ attributes:
511 548
 
512 549
   replication_ip_2:
513 550
     value: ""
514
-    label: 'Ipaddress'
551
+    label: 'IPaddress'
515 552
     description: 'Ipaddress of Kaminario replication array'
516 553
     weight: 215
517 554
     type: "text"
@@ -543,9 +580,9 @@ attributes:
543 580
         action: 'hide'
544 581
 
545 582
   replication_rpo_2:
546
-    value: ""
583
+    value: "60"
547 584
     label: 'RPO'
548
-    description: 'Value (in seconds) should be either 1 minute or multiple of 5 minutes'
585
+    description: 'Value (in seconds) should be either 60 sec or multiple of 300 sec'
549 586
     weight: 230
550 587
     type: "text"
551 588
     restrictions:
@@ -584,9 +621,9 @@ attributes:
584 621
       - data: "enable_replication_type"
585 622
         label: "Enable Replication"
586 623
       - data: "enable_dedup"
587
-        label: "Enable Non Deduplication"
624
+        label: "Enable  nodedup"
588 625
       - data: "replication_dedup"
589
-        label: "Enable both Replication and Non Deduplication"
626
+        label: "Enable both Replication and nodedup"
590 627
     restrictions:
591 628
       - condition: "settings:cinder_kaminario.create_type_2.value == false"
592 629
         action: 'hide'
@@ -594,7 +631,7 @@ attributes:
594 631
   default_type_2:
595 632
     value: false
596 633
     label: 'Default_type'
597
-    description: 'Make this type as default'
634
+    description: 'Make this volume type as default volume type'
598 635
     weight: 250
599 636
     type: 'checkbox'
600 637
     restrictions:
@@ -622,7 +659,7 @@ attributes:
622 659
          action: 'hide'
623 660
   
624 661
   filter_function_2:
625
-    value: ""
662
+    value: "capabilities.total_volumes < 250"
626 663
     label: 'Filter Function'
627 664
     description: 'Filter function for backend'
628 665
     weight: 262
@@ -631,12 +668,21 @@ attributes:
631 668
        - condition: "settings:cinder_kaminario.add_backend_2.value != true or settings:cinder_kaminario.add_backend_1.value != true"
632 669
          action: 'hide'
633 670
 
671
+  goodness_function_2:
672
+    value: ""
673
+    label: 'Goodness Function'
674
+    description: 'Goodness function for backend'
675
+    weight: 263
676
+    type: "text"
677
+    restrictions:
678
+       - condition: "settings:cinder_kaminario.add_backend_2.value != true or settings:cinder_kaminario.add_backend_1.value != true"
679
+         action: 'hide'
634 680
 
635 681
   oversubscription_ratio_2:
636 682
     value: false
637 683
     label: 'Oversubscription Ratio'
638
-    description: 'Enable Oversubscription Ratio for backend'
639
-    weight: 263
684
+    description: 'Enable Oversubscription Ratio for backend If "auto_calc_max_oversubscription_ratio = True", kaminario driver will calculate dynamically "max_over_subscription_ratio"'
685
+    weight: 264
640 686
     type: 'checkbox'
641 687
     restrictions:
642 688
        - condition: "settings:cinder_kaminario.add_backend_2.value != true or settings:cinder_kaminario.add_backend_1.value != true"
@@ -658,7 +704,7 @@ attributes:
658 704
     type: "radio"
659 705
     weight: 270
660 706
     value: "FC"
661
-    label: "Kaminario Storage Protocol"
707
+    label: "Storage protocol to be used on the data path with storage system"
662 708
     values:
663 709
       - data: "ISCSI"
664 710
         label: "ISCSI"
@@ -690,7 +736,7 @@ attributes:
690 736
   storage_ip_3:
691 737
     value: ""
692 738
     label: 'Kaminario Storage Hostname/IP'
693
-    description: 'IP address of Kaminario Storage Array'
739
+    description: 'Provide management IP of kaminario K2 All-Flash array'
694 740
     weight: 285
695 741
     type: "text"
696 742
     restrictions:
@@ -703,7 +749,7 @@ attributes:
703 749
   storage_user_3:
704 750
     value: ""
705 751
     label: 'Username'
706
-    description: 'user name of Kaminario Storage Array'
752
+    description: 'Provide username of kaminario K2 All-Flash array'
707 753
     weight: 290
708 754
     type: "text"
709 755
     regex:
@@ -716,7 +762,7 @@ attributes:
716 762
   storage_password_3:
717 763
     value: ""
718 764
     label: 'Password'
719
-    description: 'password of Kaminario Storage Array'
765
+    description: 'Provide management password of kaminario K2 All-Flash array'
720 766
     weight: 295
721 767
     type: "password"
722 768
     restrictions:
@@ -735,7 +781,7 @@ attributes:
735 781
 
736 782
   replication_ip_3:
737 783
     value: ""
738
-    label: 'Ipaddress'
784
+    label: 'IPaddress'
739 785
     description: 'Ipaddress of Kaminario replication array'
740 786
     weight: 305
741 787
     type: "text"
@@ -767,9 +813,9 @@ attributes:
767 813
         action: 'hide'
768 814
 
769 815
   replication_rpo_3:
770
-    value: ""
816
+    value: "60"
771 817
     label: 'RPO'
772
-    description: 'Value (in seconds) should be either 1 minute or multiple of 5 minutes'
818
+    description: 'Value (in seconds) should be either 60 sec or multiple of 300 sec'
773 819
     weight: 320
774 820
     type: "text"
775 821
     restrictions:
@@ -808,9 +854,9 @@ attributes:
808 854
       - data: "enable_replication_type"
809 855
         label: "Enable Replication"
810 856
       - data: "enable_dedup"
811
-        label: "Enable Non Deduplication"
857
+        label: "Enable nodedup"
812 858
       - data: "replication_dedup"
813
-        label: "Enable both Replication and Non Deduplication"
859
+        label: "Enable both Replication and nodedup"
814 860
     restrictions:
815 861
       - condition: "settings:cinder_kaminario.create_type_3.value == false"
816 862
         action: 'hide'
@@ -818,7 +864,7 @@ attributes:
818 864
   default_type_3:
819 865
     value: false
820 866
     label: 'Default_type'
821
-    description: 'Make this type as default'
867
+    description: 'Make this volume type as default volume type'
822 868
     weight: 335
823 869
     type: 'checkbox'
824 870
     restrictions:
@@ -846,7 +892,7 @@ attributes:
846 892
          action: 'hide'
847 893
 
848 894
   filter_function_3:
849
-    value: ""
895
+    value: "capabilities.total_volumes < 250"
850 896
     label: 'Filter Function'
851 897
     description: 'Filter function for backend'
852 898
     weight: 342
@@ -855,12 +901,21 @@ attributes:
855 901
        - condition: "settings:cinder_kaminario.add_backend_3.value != true or settings:cinder_kaminario.add_backend_2.value != true or settings:cinder_kaminario.add_backend_1.value != true"
856 902
          action: 'hide'
857 903
 
904
+  goodness_function_3:
905
+    value: ""
906
+    label: 'Goodness Function'
907
+    description: 'Goodness function for backend'
908
+    weight: 343
909
+    type: "text"
910
+    restrictions:
911
+       - condition: "settings:cinder_kaminario.add_backend_3.value != true or settings:cinder_kaminario.add_backend_2.value != true or settings:cinder_kaminario.add_backend_1.value != true"
912
+         action: 'hide'
858 913
 
859 914
   oversubscription_ratio_3:
860 915
     value: false
861 916
     label: 'Oversubscription Ratio'
862
-    description: 'Enable Oversubscription Ratio for backend'
863
-    weight: 343
917
+    description: 'Enable Oversubscription Ratio for backend If "auto_calc_max_oversubscription_ratio = True", kaminario driver will calculate dynamically "max_over_subscription_ratio"'
918
+    weight: 344
864 919
     type: 'checkbox'
865 920
     restrictions:
866 921
        - condition: "settings:cinder_kaminario.add_backend_3.value != true or settings:cinder_kaminario.add_backend_2.value != true or settings:cinder_kaminario.add_backend_1.value != true"
@@ -881,7 +936,7 @@ attributes:
881 936
     type: "radio"
882 937
     weight: 350
883 938
     value: "FC"
884
-    label: "Kaminario Storage Protocol"
939
+    label: "Storage protocol to be used on the data path with storage system"
885 940
     values:
886 941
       - data: "ISCSI"
887 942
         label: "ISCSI"
@@ -914,7 +969,7 @@ attributes:
914 969
   storage_ip_4:
915 970
     value: ""
916 971
     label: 'Kaminario Storage Hostname/IP'
917
-    description: 'IP address of Kaminario Storage Array'
972
+    description: 'Provide management IP of kaminario K2 All-Flash array'
918 973
     weight: 365
919 974
     type: "text"
920 975
     restrictions:
@@ -927,7 +982,7 @@ attributes:
927 982
   storage_user_4:
928 983
     value: ""
929 984
     label: 'Username'
930
-    description: 'user name of Kaminario Storage Array'
985
+    description: 'Provide username of kaminario K2 All-Flash array'
931 986
     weight: 370
932 987
     type: "text"
933 988
     regex:
@@ -940,7 +995,7 @@ attributes:
940 995
   storage_password_4:
941 996
     value: ""
942 997
     label: 'Password'
943
-    description: 'password of Kaminario Storage Array'
998
+    description: 'Provide password of kaminario K2 All-Flash array'
944 999
     weight: 375
945 1000
     type: "password"
946 1001
     restrictions:
@@ -959,7 +1014,7 @@ attributes:
959 1014
 
960 1015
   replication_ip_4:
961 1016
     value: ""
962
-    label: 'Ipaddress'
1017
+    label: 'IPaddress'
963 1018
     description: 'Ipaddress of Kaminario replication array'
964 1019
     weight: 385
965 1020
     type: "text"
@@ -991,9 +1046,9 @@ attributes:
991 1046
         action: 'hide'
992 1047
 
993 1048
   replication_rpo_4:
994
-    value: ""
1049
+    value: "60"
995 1050
     label: 'RPO'
996
-    description: 'Value (in seconds) should be either 1 minute or multiple of 5 minutes'
1051
+    description: 'Value (in seconds) should be either 60 sec or multiple of 300 sec'
997 1052
     weight: 400
998 1053
     type: "text"
999 1054
     restrictions:
@@ -1033,9 +1088,9 @@ attributes:
1033 1088
       - data: "enable_replication_type"
1034 1089
         label: "Enable Replication"
1035 1090
       - data: "enable_dedup"
1036
-        label: "Enable Non Deduplication"
1091
+        label: "Enable nodedup"
1037 1092
       - data: "replication_dedup"
1038
-        label: "Enable both Replication and Non Deduplication"
1093
+        label: "Enable both Replication and nodedup"
1039 1094
     restrictions:
1040 1095
       - condition: "settings:cinder_kaminario.create_type_4.value == false"
1041 1096
         action: 'hide'
@@ -1043,7 +1098,7 @@ attributes:
1043 1098
   default_type_4:
1044 1099
     value: false
1045 1100
     label: 'Default type'
1046
-    description: 'Make this type as default'
1101
+    description: 'Make this volume type as default volume type'
1047 1102
     weight: 420
1048 1103
     type: 'checkbox'
1049 1104
     restrictions:
@@ -1071,7 +1126,7 @@ attributes:
1071 1126
          action: 'hide'
1072 1127
 
1073 1128
   filter_function_4:
1074
-    value: ""
1129
+    value: "capabilities.total_volumes < 250"
1075 1130
     label: 'Filter Function'
1076 1131
     description: 'Filter function for backend'
1077 1132
     weight: 427
@@ -1080,12 +1135,21 @@ attributes:
1080 1135
        - condition: "settings:cinder_kaminario.add_backend_4.value != true or settings:cinder_kaminario.add_backend_3.value != true or settings:cinder_kaminario.add_backend_2.value != true or settings:cinder_kaminario.add_backend_1.value != true"
1081 1136
          action: 'hide'
1082 1137
 
1138
+  goodness_function_4:
1139
+    value: ""
1140
+    label: 'Goodness Function'
1141
+    description: 'Goodness function for backend'
1142
+    weight: 428
1143
+    type: "text"
1144
+    restrictions:
1145
+       - condition: "settings:cinder_kaminario.add_backend_4.value != true or settings:cinder_kaminario.add_backend_3.value != true or settings:cinder_kaminario.add_backend_2.value != true or settings:cinder_kaminario.add_backend_1.value != true"
1146
+         action: 'hide'
1083 1147
 
1084 1148
   oversubscription_ratio_4:
1085 1149
     value: false
1086 1150
     label: 'Oversubscription Ratio'
1087
-    description: 'Enable Oversubscription Ratio for backend'
1088
-    weight: 428
1151
+    description: 'Enable Oversubscription Ratio for backend If "auto_calc_max_oversubscription_ratio = True", kaminario driver will calculate dynamically "max_over_subscription_ratio"'
1152
+    weight: 429
1089 1153
     type: 'checkbox'
1090 1154
     restrictions:
1091 1155
        - condition: "settings:cinder_kaminario.add_backend_4.value != true or settings:cinder_kaminario.add_backend_3.value != true or settings:cinder_kaminario.add_backend_2.value != true or settings:cinder_kaminario.add_backend_1.value != true"
@@ -1107,7 +1171,7 @@ attributes:
1107 1171
     type: "radio"
1108 1172
     weight: 435
1109 1173
     value: "FC"
1110
-    label: "Kaminario Storage Protocol"
1174
+    label: "Storage protocol to be used on the data path with storage system"
1111 1175
     values:
1112 1176
       - data: "ISCSI"
1113 1177
         label: "ISCSI"
@@ -1140,7 +1204,7 @@ attributes:
1140 1204
   storage_ip_5:
1141 1205
     value: ""
1142 1206
     label: 'Kaminario Storage Hostname/IP'
1143
-    description: 'IP address of Kaminario Storage Array'
1207
+    description: 'Provide management IP of kaminario K2 All-Flash array'
1144 1208
     weight: 450
1145 1209
     type: "text"
1146 1210
     restrictions:
@@ -1153,7 +1217,7 @@ attributes:
1153 1217
   storage_user_5:
1154 1218
     value: ""
1155 1219
     label: 'Username'
1156
-    description: 'user name of Kaminario Storage Array'
1220
+    description: 'Provide username of kaminario K2 All-Flash array'
1157 1221
     weight: 455
1158 1222
     type: "text"
1159 1223
     regex:
@@ -1166,7 +1230,7 @@ attributes:
1166 1230
   storage_password_5:
1167 1231
     value: ""
1168 1232
     label: 'Password'
1169
-    description: 'password of Kaminario Storage Array'
1233
+    description: 'Provide password of kaminario K2 All-Flash array'
1170 1234
     weight: 460
1171 1235
     type: "password"
1172 1236
     restrictions:
@@ -1185,7 +1249,7 @@ attributes:
1185 1249
 
1186 1250
   replication_ip_5:
1187 1251
     value: ""
1188
-    label: 'Ipaddress'
1252
+    label: 'IPaddress'
1189 1253
     description: 'Ipaddress of Kaminario replication array'
1190 1254
     weight: 470
1191 1255
     type: "text"
@@ -1217,9 +1281,9 @@ attributes:
1217 1281
         action: 'hide'
1218 1282
 
1219 1283
   replication_rpo_5:
1220
-    value: ""
1284
+    value: "60"
1221 1285
     label: 'RPO'
1222
-    description: 'Value (in seconds) should be either 1 minute or multiple of 5 minutes'
1286
+    description: 'Value (in seconds) should be either 60 sec or multiple of 300 sec'
1223 1287
     weight: 485
1224 1288
     type: "text"
1225 1289
     restrictions:
@@ -1259,9 +1323,9 @@ attributes:
1259 1323
       - data: "enable_replication_type"
1260 1324
         label: "Enable Replication"
1261 1325
       - data: "enable_dedup"
1262
-        label: "Enable Non Deduplication"
1326
+        label: "Enable nodedup"
1263 1327
       - data: "replication_dedup"
1264
-        label: "Enable both Replication and Non Deduplication"
1328
+        label: "Enable both Replication and nodedup"
1265 1329
     restrictions:
1266 1330
       - condition: "settings:cinder_kaminario.create_type_5.value == false"
1267 1331
         action: 'hide'
@@ -1269,7 +1333,7 @@ attributes:
1269 1333
   default_type_5:
1270 1334
     value: false
1271 1335
     label: 'Default Type'
1272
-    description: 'Make this type as default'
1336
+    description: 'Make this volume type as default volume type'
1273 1337
     weight: 505
1274 1338
     type: 'checkbox'
1275 1339
     restrictions:
@@ -1297,7 +1361,7 @@ attributes:
1297 1361
          action: 'hide'
1298 1362
 
1299 1363
   filter_function_5:
1300
-    value: ""
1364
+    value: "capabilities.total_volumes < 250"
1301 1365
     label: 'Filter Function'
1302 1366
     description: 'Filter function for backend'
1303 1367
     weight: 512
@@ -1306,11 +1370,22 @@ attributes:
1306 1370
        - condition: "settings:cinder_kaminario.add_backend_5.value != true or settings:cinder_kaminario.add_backend_4.value != true or settings:cinder_kaminario.add_backend_3.value != true or settings:cinder_kaminario.add_backend_2.value != true or settings:cinder_kaminario.add_backend_1.value != true"
1307 1371
          action: 'hide'
1308 1372
 
1373
+  goodness_function_5:
1374
+    value: ""
1375
+    label: 'Goodness Function'
1376
+    description: 'Goodness function for backend'
1377
+    weight: 513
1378
+    type: "text"
1379
+    restrictions:
1380
+       - condition: "settings:cinder_kaminario.add_backend_5.value != true or settings:cinder_kaminario.add_backend_4.value != true or settings:cinder_kaminario.add_backend_3.value != true or settings:cinder_kaminario.add_backend_2.value != true or settings:cinder_kaminario.add_backend_1.value != true"
1381
+         action: 'hide'
1382
+
1383
+
1309 1384
   oversubscription_ratio_5:
1310 1385
     value: false
1311 1386
     label: 'Oversubscription Ratio'
1312
-    description: 'Enable Oversubscription Ratio for backend'
1313
-    weight: 513
1387
+    description: 'Enable Oversubscription Ratio for backend If "auto_calc_max_oversubscription_ratio = True", kaminario driver will calculate dynamically "max_over_subscription_ratio"'
1388
+    weight: 514
1314 1389
     type: 'checkbox'
1315 1390
     restrictions:
1316 1391
        - condition: "settings:cinder_kaminario.add_backend_5.value != true or settings:cinder_kaminario.add_backend_4.value != true or settings:cinder_kaminario.add_backend_3.value != true or settings:cinder_kaminario.add_backend_2.value != true or settings:cinder_kaminario.add_backend_1.value != true"

Loading…
Cancel
Save