Browse Source

Update fuel plugin repo for Kamianrio with latest

Kaminario driver files from github

Change-Id: Ib16114525eed6066c19dfc4b2b28f5e2128eb56f
Co-Authored-By: Chaithanya Kopparthi<chaithanyak@biarca.com>
changes/04/387004/1
Pradip Rawat 2 years ago
parent
commit
9550a4d0e6

+ 4
- 0
.gitreview View File

@@ -0,0 +1,4 @@
1
+[gerrit]
2
+host=review.openstack.org
3
+port=29418
4
+project=openstack/fuel-plugin-cinder-kaminario.git

+ 1
- 1
deployment_scripts/puppet/manifests/cinder_kaminario.pp View File

@@ -1,8 +1,8 @@
1 1
 notice('MODULAR: cinder_kaminario')
2 2
 
3 3
 
4
-class { 'kaminario::driver': }->
5 4
 class { 'kaminario::krest': }->
5
+class { 'kaminario::driver': }->
6 6
 class { 'kaminario::config': }~> Exec[cinder_volume]
7 7
 
8 8
 exec {'cinder_volume':

+ 0
- 0
deployment_scripts/puppet/modules/kaminario/files/__init__.py View File


+ 0
- 1128
deployment_scripts/puppet/modules/kaminario/files/exception.py
File diff suppressed because it is too large
View File


+ 2
- 0
deployment_scripts/puppet/modules/kaminario/files/exception.sh View File

@@ -0,0 +1,2 @@
1
+grep -q -F 'Kaminario' /usr/lib/python2.7/dist-packages/cinder/exception.py || sudo sed -i '$a \ \ \n\nclass KaminarioCinderDriverException(VolumeDriverException):\n\    \message = _("KaminarioCinderDriver failure: %(reason)s")\n\n\nclass KaminarioRetryableException(VolumeDriverException):\n\    \message = _("Kaminario retryable exception: %(reason)s")' /usr/lib/python2.7/dist-packages/cinder/exception.py
2
+

+ 0
- 1155
deployment_scripts/puppet/modules/kaminario/files/kaminario_common.py
File diff suppressed because it is too large
View File


+ 0
- 196
deployment_scripts/puppet/modules/kaminario/files/kaminario_fc.py View File

@@ -1,196 +0,0 @@
1
-# Copyright (c) 2016 by Kaminario Technologies, Ltd.
2
-# All Rights Reserved.
3
-#
4
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
5
-#    not use this file except in compliance with the License. You may obtain
6
-#    a copy of the License at
7
-#
8
-#         http://www.apache.org/licenses/LICENSE-2.0
9
-#
10
-#    Unless required by applicable law or agreed to in writing, software
11
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13
-#    License for the specific language governing permissions and limitations
14
-#    under the License.
15
-"""Volume driver for Kaminario K2 all-flash arrays."""
16
-import six
17
-
18
-from oslo_log import log as logging
19
-
20
-from cinder import exception
21
-from cinder import utils
22
-from cinder.i18n import _, _LE
23
-from cinder.objects import fields
24
-from cinder.volume.drivers.kaminario import kaminario_common as common
25
-from cinder.zonemanager import utils as fczm_utils
26
-
27
-K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER
28
-LOG = logging.getLogger(__name__)
29
-kaminario_logger = common.kaminario_logger
30
-
31
-
32
-class KaminarioFCDriver(common.KaminarioCinderDriver):
33
-    """Kaminario K2 FC Volume Driver.
34
-
35
-    Version history:
36
-        1.0.2.0 - Initial driver
37
-    """
38
-
39
-    VERSION = '1.0.2.0'
40
-
41
-    # ThirdPartySystems wiki page name
42
-    CI_WIKI_NAME = "Kaminario_K2_CI"
43
-
44
-    @kaminario_logger
45
-    def __init__(self, *args, **kwargs):
46
-        super(KaminarioFCDriver, self).__init__(*args, **kwargs)
47
-        self._protocol = 'FC'
48
-        self.lookup_service = fczm_utils.create_lookup_service()
49
-
50
-    @fczm_utils.AddFCZone
51
-    @kaminario_logger
52
-    @utils.synchronized(common.K2_LOCK_NAME, external=True)
53
-    def initialize_connection(self, volume, connector):
54
-        """Attach K2 volume to host."""
55
-        # Check wwpns in host connector.
56
-        if not connector.get('wwpns'):
57
-            msg = _("No wwpns found in host connector.")
58
-            LOG.error(msg)
59
-            raise exception.KaminarioCinderDriverException(reason=msg)
60
-        # To support replication failback
61
-        temp_client = None
62
-        if (hasattr(volume, 'replication_status') and
63
-                volume.replication_status == K2_REP_FAILED_OVER):
64
-            temp_client = self.client
65
-            self.client = self.target
66
-        # Get target wwpns.
67
-        target_wwpns = self.get_target_info(volume)
68
-        # Map volume.
69
-        lun = self.k2_initialize_connection(volume, connector)
70
-        # Create initiator-target mapping.
71
-        target_wwpns, init_target_map = self._build_initiator_target_map(
72
-            connector, target_wwpns)
73
-        # To support replication failback
74
-        if temp_client:
75
-            self.client = temp_client
76
-        # Return target volume information.
77
-        return {'driver_volume_type': 'fibre_channel',
78
-                'data': {"target_discovered": True,
79
-                         "target_lun": lun,
80
-                         "target_wwn": target_wwpns,
81
-                         "initiator_target_map": init_target_map}}
82
-
83
-    @fczm_utils.RemoveFCZone
84
-    @kaminario_logger
85
-    @utils.synchronized(common.K2_LOCK_NAME, external=True)
86
-    def terminate_connection(self, volume, connector, **kwargs):
87
-        # To support replication failback
88
-        temp_client = None
89
-        if (hasattr(volume, 'replication_status') and
90
-                volume.replication_status == K2_REP_FAILED_OVER):
91
-            temp_client = self.client
92
-            self.client = self.target
93
-        super(KaminarioFCDriver, self).terminate_connection(volume, connector)
94
-        properties = {"driver_volume_type": "fibre_channel", "data": {}}
95
-        host_name = self.get_initiator_host_name(connector)
96
-        host_rs = self.client.search("hosts", name=host_name)
97
-        # In terminate_connection, host_entry is deleted if host
98
-        # is not attached to any volume
99
-        if host_rs.total == 0:
100
-            # Get target wwpns.
101
-            target_wwpns = self.get_target_info(volume)
102
-            target_wwpns, init_target_map = self._build_initiator_target_map(
103
-                connector, target_wwpns)
104
-            properties["data"] = {"target_wwn": target_wwpns,
105
-                                  "initiator_target_map": init_target_map}
106
-        # To support replication failback
107
-        if temp_client:
108
-            self.client = temp_client
109
-        return properties
110
-
111
-    @kaminario_logger
112
-    def get_target_info(self, volume):
113
-        LOG.debug("Searching target wwpns in K2.")
114
-        fc_ports_rs = self.client.search("system/fc_ports")
115
-        target_wwpns = []
116
-        if hasattr(fc_ports_rs, 'hits') and fc_ports_rs.total != 0:
117
-            for port in fc_ports_rs.hits:
118
-                if port.pwwn:
119
-                    target_wwpns.append((port.pwwn).replace(':', ''))
120
-        if not target_wwpns:
121
-            msg = _("Unable to get FC target wwpns from K2.")
122
-            LOG.error(msg)
123
-            raise exception.KaminarioCinderDriverException(reason=msg)
124
-        return target_wwpns
125
-
126
-    @kaminario_logger
127
-    def _get_host_object(self, connector):
128
-        host_name = self.get_initiator_host_name(connector)
129
-        LOG.debug("Searching initiator hostname: %s in K2.", host_name)
130
-        host_rs = self.client.search("hosts", name=host_name)
131
-        host_wwpns = connector['wwpns']
132
-        if host_rs.total == 0:
133
-            try:
134
-                LOG.debug("Creating initiator hostname: %s in K2.", host_name)
135
-                host = self.client.new("hosts", name=host_name,
136
-                                       type="Linux").save()
137
-            except Exception as ex:
138
-                LOG.exception(_LE("Unable to create host : %s in K2."),
139
-                              host_name)
140
-                raise exception.KaminarioCinderDriverException(
141
-                    reason=six.text_type(ex.message))
142
-        else:
143
-            # Use existing host.
144
-            LOG.debug("Use existing initiator hostname: %s in K2.", host_name)
145
-            host = host_rs.hits[0]
146
-        # Adding host wwpn.
147
-        for wwpn in host_wwpns:
148
-            wwpn = ":".join([wwpn[i:i + 2] for i in range(0, len(wwpn), 2)])
149
-            if self.client.search("host_fc_ports", pwwn=wwpn,
150
-                                  host=host).total == 0:
151
-                LOG.debug("Adding wwpn: %(wwpn)s to host: "
152
-                          "%(host)s in K2.", {'wwpn': wwpn,
153
-                                              'host': host_name})
154
-                try:
155
-                    self.client.new("host_fc_ports", pwwn=wwpn,
156
-                                    host=host).save()
157
-                except Exception as ex:
158
-                    if host_rs.total == 0:
159
-                        self._delete_host_by_name(host_name)
160
-                    LOG.exception(_LE("Unable to add wwpn : %(wwpn)s to "
161
-                                      "host: %(host)s in K2."),
162
-                                  {'wwpn': wwpn, 'host': host_name})
163
-                    raise exception.KaminarioCinderDriverException(
164
-                        reason=six.text_type(ex.message))
165
-        return host, host_rs, host_name
166
-
167
-    @kaminario_logger
168
-    def _build_initiator_target_map(self, connector, all_target_wwns):
169
-        """Build the target_wwns and the initiator target map."""
170
-        target_wwns = []
171
-        init_targ_map = {}
172
-
173
-        if self.lookup_service is not None:
174
-            # use FC san lookup.
175
-            dev_map = self.lookup_service.get_device_mapping_from_network(
176
-                connector.get('wwpns'),
177
-                all_target_wwns)
178
-
179
-            for fabric_name in dev_map:
180
-                fabric = dev_map[fabric_name]
181
-                target_wwns += fabric['target_port_wwn_list']
182
-                for initiator in fabric['initiator_port_wwn_list']:
183
-                    if initiator not in init_targ_map:
184
-                        init_targ_map[initiator] = []
185
-                    init_targ_map[initiator] += fabric['target_port_wwn_list']
186
-                    init_targ_map[initiator] = list(set(
187
-                        init_targ_map[initiator]))
188
-            target_wwns = list(set(target_wwns))
189
-        else:
190
-            initiator_wwns = connector.get('wwpns', [])
191
-            target_wwns = all_target_wwns
192
-
193
-            for initiator in initiator_wwns:
194
-                init_targ_map[initiator] = target_wwns
195
-
196
-        return target_wwns, init_targ_map

+ 0
- 137
deployment_scripts/puppet/modules/kaminario/files/kaminario_iscsi.py View File

@@ -1,137 +0,0 @@
1
-# Copyright (c) 2016 by Kaminario Technologies, Ltd.
2
-# All Rights Reserved.
3
-#
4
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
5
-#    not use this file except in compliance with the License. You may obtain
6
-#    a copy of the License at
7
-#
8
-#         http://www.apache.org/licenses/LICENSE-2.0
9
-#
10
-#    Unless required by applicable law or agreed to in writing, software
11
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13
-#    License for the specific language governing permissions and limitations
14
-#    under the License.
15
-"""Volume driver for Kaminario K2 all-flash arrays."""
16
-import six
17
-
18
-from oslo_log import log as logging
19
-
20
-from cinder import exception
21
-from cinder import utils
22
-from cinder.i18n import _, _LE
23
-from cinder.objects import fields
24
-from cinder.volume.drivers.kaminario import kaminario_common as common
25
-
26
-ISCSI_TCP_PORT = "3260"
27
-K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER
28
-LOG = logging.getLogger(__name__)
29
-kaminario_logger = common.kaminario_logger
30
-
31
-
32
-class KaminarioISCSIDriver(common.KaminarioCinderDriver):
33
-    """Kaminario K2 iSCSI Volume Driver.
34
-
35
-    Version history:
36
-        1.0.2.0 - Initial driver
37
-    """
38
-
39
-    VERSION = '1.0.2.0'
40
-
41
-    # ThirdPartySystems wiki page name
42
-    CI_WIKI_NAME = "Kaminario_K2_CI"
43
-
44
-    @kaminario_logger
45
-    def __init__(self, *args, **kwargs):
46
-        super(KaminarioISCSIDriver, self).__init__(*args, **kwargs)
47
-        self._protocol = 'iSCSI'
48
-
49
-    @kaminario_logger
50
-    @utils.synchronized(common.K2_LOCK_NAME, external=True)
51
-    def initialize_connection(self, volume, connector):
52
-        """Attach K2 volume to host."""
53
-        # To support replication failback
54
-        temp_client = None
55
-        if (hasattr(volume, 'replication_status') and
56
-                volume.replication_status == K2_REP_FAILED_OVER):
57
-            temp_client = self.client
58
-            self.client = self.target
59
-        # Get target_portal and target iqn.
60
-        iscsi_portal, target_iqn = self.get_target_info(volume)
61
-        # Map volume.
62
-        lun = self.k2_initialize_connection(volume, connector)
63
-        # To support replication failback
64
-        if temp_client:
65
-            self.client = temp_client
66
-        # Return target volume information.
67
-        return {"driver_volume_type": "iscsi",
68
-                "data": {"target_iqn": target_iqn,
69
-                         "target_portal": iscsi_portal,
70
-                         "target_lun": lun,
71
-                         "target_discovered": True}}
72
-
73
-    @kaminario_logger
74
-    @utils.synchronized(common.K2_LOCK_NAME, external=True)
75
-    def terminate_connection(self, volume, connector, **kwargs):
76
-        # To support replication failback
77
-        temp_client = None
78
-        if (hasattr(volume, 'replication_status') and
79
-                volume.replication_status == K2_REP_FAILED_OVER):
80
-            temp_client = self.client
81
-            self.client = self.target
82
-        super(KaminarioISCSIDriver, self).terminate_connection(volume,
83
-                                                               connector)
84
-        # To support replication failback
85
-        if temp_client:
86
-            self.client = temp_client
87
-
88
-    @kaminario_logger
89
-    def get_target_info(self, volume):
90
-        LOG.debug("Searching first iscsi port ip without wan in K2.")
91
-        iscsi_ip_rs = self.client.search("system/net_ips", wan_port="")
92
-        iscsi_ip = target_iqn = None
93
-        if hasattr(iscsi_ip_rs, 'hits') and iscsi_ip_rs.total != 0:
94
-            iscsi_ip = iscsi_ip_rs.hits[0].ip_address
95
-        if not iscsi_ip:
96
-            msg = _("Unable to get ISCSI IP address from K2.")
97
-            LOG.error(msg)
98
-            raise exception.KaminarioCinderDriverException(reason=msg)
99
-        iscsi_portal = "{0}:{1}".format(iscsi_ip, ISCSI_TCP_PORT)
100
-        LOG.debug("Searching system state for target iqn in K2.")
101
-        sys_state_rs = self.client.search("system/state")
102
-
103
-        if hasattr(sys_state_rs, 'hits') and sys_state_rs.total != 0:
104
-            target_iqn = sys_state_rs.hits[0].iscsi_qualified_target_name
105
-
106
-        if not target_iqn:
107
-            msg = _("Unable to get target iqn from K2.")
108
-            LOG.error(msg)
109
-            raise exception.KaminarioCinderDriverException(reason=msg)
110
-        return iscsi_portal, target_iqn
111
-
112
-    @kaminario_logger
113
-    def _get_host_object(self, connector):
114
-        host_name = self.get_initiator_host_name(connector)
115
-        LOG.debug("Searching initiator hostname: %s in K2.", host_name)
116
-        host_rs = self.client.search("hosts", name=host_name)
117
-        """Create a host if not exists."""
118
-        if host_rs.total == 0:
119
-            try:
120
-                LOG.debug("Creating initiator hostname: %s in K2.", host_name)
121
-                host = self.client.new("hosts", name=host_name,
122
-                                       type="Linux").save()
123
-                LOG.debug("Adding iqn: %(iqn)s to host: %(host)s in K2.",
124
-                          {'iqn': connector['initiator'], 'host': host_name})
125
-                iqn = self.client.new("host_iqns", iqn=connector['initiator'],
126
-                                      host=host)
127
-                iqn.save()
128
-            except Exception as ex:
129
-                self._delete_host_by_name(host_name)
130
-                LOG.exception(_LE("Unable to create host: %s in K2."),
131
-                              host_name)
132
-                raise exception.KaminarioCinderDriverException(
133
-                    reason=six.text_type(ex.message))
134
-        else:
135
-            LOG.debug("Use existing initiator hostname: %s in K2.", host_name)
136
-            host = host_rs.hits[0]
137
-        return host, host_rs, host_name

+ 4
- 4
deployment_scripts/puppet/modules/kaminario/manifests/controller_config.pp View File

@@ -5,7 +5,7 @@ $plugin_settings = hiera('cinder_kaminario')
5 5
 
6 6
   if $plugin_settings['scheduler_default_filters'] != ''
7 7
   {
8
-  ini_subsetting {"scheduler_default_filters":
8
+  ini_subsetting {'scheduler_default_filters':
9 9
     ensure               => present,
10 10
     section              => 'DEFAULT',
11 11
     key_val_separator    => '=',
@@ -18,18 +18,18 @@ $plugin_settings = hiera('cinder_kaminario')
18 18
   if $plugin_settings['scheduler_default_weighers'] != ''
19 19
   {
20 20
   cinder_config {
21
-    "DEFAULT/scheduler_default_weighers"       : value => $plugin_settings['scheduler_default_weighers'];
21
+    'DEFAULT/scheduler_default_weighers'       : value => $plugin_settings['scheduler_default_weighers'];
22 22
   }
23 23
   }
24 24
   if $plugin_settings['rpc_response_timeout'] != ''
25 25
   {
26 26
   cinder_config {
27
-    "DEFAULT/rpc_response_timeout"             : value => $plugin_settings['rpc_response_timeout'];
27
+    'DEFAULT/rpc_response_timeout'             : value => $plugin_settings['rpc_response_timeout'];
28 28
   }
29 29
   }
30 30
 
31 31
   cinder_config {
32
-    "DEFAULT/default_volume_type"             : value => $default_volume_type
32
+    'DEFAULT/default_volume_type'             : value => $default_volume_type
33 33
   }~> Exec[cinder_api]
34 34
 
35 35
 exec {'cinder_api':

+ 29
- 33
deployment_scripts/puppet/modules/kaminario/manifests/driver.pp View File

@@ -1,39 +1,35 @@
1 1
 class kaminario::driver{
2 2
 
3
-file { '/usr/lib/python2.7/dist-packages/cinder/volume/drivers/kaminario':
4
-        ensure => 'directory',
5
-        owner  => 'root',
6
-        group  => 'root',
7
-        mode   => '0755',}
3
+$source_directory = '/tmp/openstack-cinder-driver/source/kaminario'
4
+$target_directory = '/usr/lib/python2.7/dist-packages/cinder/volume/drivers/kaminario'
5
+vcsrepo { '/tmp/openstack-cinder-driver':
6
+  ensure   => present,
7
+  provider => git,
8
+  source   => 'https://github.com/Kaminario/openstack-cinder-driver.git',
9
+  user     => 'root',
10
+  revision => 'Mitaka',
11
+  }
12
+file {$target_directory:
13
+  ensure => 'directory',
14
+  recurse => true,
15
+  source => "file:///${source_directory}",
16
+  }
8 17
 
9
-file { '/usr/lib/python2.7/dist-packages/cinder/volume/drivers/kaminario/__init__.py':
10
-        mode   => '0644',
11
-        owner  => root,
12
-        group  => root,
13
-        source => 'puppet:///modules/kaminario/__init__.py'}
18
+file {'/usr/lib/python2.7/dist-packages/cinder/tests/unit/volume/drivers/':
19
+  ensure => 'file',
20
+  recurse => true,
21
+  source => 'file:///tmp/openstack-cinder-driver/test',
22
+  }
14 23
 
15
-file { '/usr/lib/python2.7/dist-packages/cinder/volume/drivers/kaminario/kaminario_common.py':
16
-        mode   => '0644',
17
-        owner  => root,
18
-        group  => root,
19
-        source => 'puppet:///modules/kaminario/kaminario_common.py'}
20
-
21
-file { '/usr/lib/python2.7/dist-packages/cinder/volume/drivers/kaminario/kaminario_fc.py':
22
-        mode   => '0644',
23
-        owner  => root,
24
-        group  => root,
25
-        source => 'puppet:///modules/kaminario/kaminario_fc.py'}
26
-
27
-file { '/usr/lib/python2.7/dist-packages/cinder/volume/drivers/kaminario/kaminario_iscsi.py':
28
-        mode   => '0644',
29
-        owner  => root,
30
-        group  => root,
31
-        source => 'puppet:///modules/kaminario/kaminario_iscsi.py'}
32
-
33
-file { '/usr/lib/python2.7/dist-packages/cinder/exception.py':
34
-        mode   => '0644',
35
-        owner  => root,
36
-        group  => root,
37
-        source => 'puppet:///modules/kaminario/exception.py'}
24
+file { '/tmp/exception.sh':
25
+  source => 'puppet:///modules/kaminario/exception.sh',
26
+  recurse => true,
27
+  mode  => '0744',
28
+  notify => Exec['modify_exception'],
29
+  }
30
+exec { 'modify_exception':
31
+  command => '/tmp/exception.sh',
32
+  refreshonly => true,
33
+  }
38 34
 
39 35
 }

+ 23
- 23
deployment_scripts/puppet/modules/kaminario/manifests/init.pp View File

@@ -30,9 +30,9 @@ $plugin_settings = hiera('cinder_kaminario')
30 30
               num                    =>      $value
31 31
             }
32 32
     $minus1 = inline_template('<%= @value.to_i - 1 %>')
33
-    if "${minus1}" < '0' {
34
-        
35
-   }  else {
33
+    if $minus1 < '0' {
34
+
35
+  }  else {
36 36
         recursion { "value-${minus1}":
37 37
             value => $minus1,
38 38
         }
@@ -44,7 +44,7 @@ $plugin_settings = hiera('cinder_kaminario')
44 44
 define config($add_backend,$storage_protocol,$backend_name,$storage_user,$storage_password,$storage_ip,$num,$cinder_node,$enable_replication,$replication_ip,$replication_login,$replication_rpo,$replication_password,$enable_multipath,$suppress_logs,$filter_function,$oversubscription_ratio,$goodness_function) {
45 45
 
46 46
   $sec_name = section_name( $storage_ip , $backend_name )
47
-  $config_file = "/etc/cinder/cinder.conf"
47
+  $config_file = '/etc/cinder/cinder.conf'
48 48
   if $cinder_node == hiera(user_node_name) {
49 49
   if $add_backend == true {
50 50
 
@@ -56,58 +56,58 @@ define config($add_backend,$storage_protocol,$backend_name,$storage_user,$storag
56 56
         setting              => 'enabled_backends',
57 57
         subsetting           => $sec_name,
58 58
         subsetting_separator => ',',
59
-   }->
59
+  }->
60 60
     cinder_config {
61
-        "$sec_name/volume_backend_name" : value => $backend_name;
62
-        "$sec_name/san_ip"              : value => $storage_ip;
63
-        "$sec_name/san_login"           : value => $storage_user;
64
-        "$sec_name/san_password"        : value => $storage_password;
65
-   }
61
+        "${sec_name}/volume_backend_name" : value => $backend_name;
62
+        "${sec_name}/san_ip"              : value => $storage_ip;
63
+        "${sec_name}/san_login"           : value => $storage_user;
64
+        "${sec_name}/san_password"        : value => $storage_password;
65
+  }
66 66
 
67 67
   if $storage_protocol == 'FC'{
68 68
     cinder_config {
69
-        "$sec_name/volume_driver"       : value => "cinder.volume.drivers.kaminario.kaminario_fc.KaminarioFCDriver";
69
+        "${sec_name}/volume_driver"       : value => 'cinder.volume.drivers.kaminario.kaminario_fc.KaminarioFCDriver';
70 70
     }
71 71
   }
72 72
   elsif $storage_protocol == 'ISCSI'{
73 73
     cinder_config {
74
-        "$sec_name/volume_driver"       : value => "cinder.volume.drivers.kaminario.kaminario_iscsi.KaminarioISCSIDriver";
74
+        "${sec_name}/volume_driver"       : value => 'cinder.volume.drivers.kaminario.kaminario_iscsi.KaminarioISCSIDriver';
75 75
     }
76 76
   }
77 77
     if $enable_replication == true {
78 78
     $replication_device = get_replication_device($replication_ip, $replication_login , $replication_password , $replication_rpo)
79 79
     cinder_config {
80
-        "$sec_name/replication_device"       : value => $replication_device;
80
+        "${sec_name}/replication_device"       : value => $replication_device;
81 81
     }
82 82
     }
83
- 
83
+
84 84
     if $enable_multipath == true {
85 85
     cinder_config {
86
-        "$sec_name/use_multipath_for_image_xfer"           : value => "True";
87
-        "$sec_name/enforce_multipath_for_image_xfer"       : value => "True";
88
-    }   
86
+        "${sec_name}/use_multipath_for_image_xfer"           : value => 'True';
87
+        "${sec_name}/enforce_multipath_for_image_xfer"       : value => 'True';
88
+    }
89 89
     }
90 90
     if $suppress_logs == true {
91 91
     cinder_config {
92
-        "$sec_name/suppress_requests_ssl_warnings"         : value => "True";
92
+        "${sec_name}/suppress_requests_ssl_warnings"         : value => 'True';
93 93
     }
94 94
     }
95 95
 
96 96
     if $filter_function != '' {
97 97
     cinder_config {
98
-        "$sec_name/filter_function"                        : value => $filter_function;
98
+        "${sec_name}/filter_function"                        : value => $filter_function;
99 99
     }
100 100
     }
101 101
 
102 102
     if $goodness_function != '' {
103 103
     cinder_config {
104
-        "$sec_name/goodness_function"                      : value => $goodness_function;
105
-    }   
104
+        "${sec_name}/goodness_function"                      : value => $goodness_function;
105
+    }
106 106
     }
107
-    
107
+
108 108
     if $oversubscription_ratio == true {
109 109
     cinder_config {
110
-        "$sec_name/auto_calc_max_oversubscription_ratio"   : value => "True";
110
+        "${sec_name}/auto_calc_max_oversubscription_ratio"   : value => 'True';
111 111
     }
112 112
     }
113 113
 }

+ 2
- 0
deployment_scripts/puppet/modules/kaminario/manifests/krest.pp View File

@@ -5,4 +5,6 @@ package { 'krest':
5 5
   ensure => installed,
6 6
   provider => pip,
7 7
   require => Package['python-pip'],}
8
+  package { 'git':
9
+  ensure => installed,}
8 10
 }

+ 9
- 9
deployment_scripts/puppet/modules/kaminario/manifests/type.pp View File

@@ -15,42 +15,42 @@ define recursion(
15 15
       type_name              =>      $plugin_settings["type_name_${value}"]
16 16
   }
17 17
     $minus1 = inline_template('<%= @value.to_i - 1 %>')
18
-    if "${minus1}" < '0' {
19
-        
20
-   }  else {
18
+    if $minus1 < '0' {
19
+
20
+  }  else {
21 21
         recursion { "value-${minus1}":
22 22
             value => $minus1,
23 23
         }
24
-    }
24
+      }
25 25
 }
26 26
 }
27 27
 
28 28
 define kaminario_type ($create_type,$options,$backend_name,$type_name) {
29 29
 if $create_type == true {
30 30
 case $options {
31
-  "enable_replication_type": {
31
+  'enable_replication_type': {
32 32
     cinder_type {$type_name:
33 33
       ensure     => present,
34 34
       properties => ["volume_backend_name=${backend_name}",'kaminario:replication=enabled'],
35 35
     }
36 36
   }
37
-  "enable_dedup": {
37
+  'enable_dedup': {
38 38
     cinder_type {$type_name:
39 39
       ensure     => present,
40 40
       properties => ["volume_backend_name=${backend_name}",'kaminario:thin_prov_type=nodedup'],
41 41
     }
42 42
   }
43
-  "replication_dedup": {
43
+  'replication_dedup': {
44 44
     cinder_type {$type_name:
45 45
       ensure     => present,
46 46
       properties => ["volume_backend_name=${backend_name}",'kaminario:thin_prov_type=nodedup','kaminario:replication=enabled'],
47 47
     }
48 48
   }
49
-  "default": {
49
+  'default': {
50 50
     cinder_type {$type_name:
51 51
       ensure     => present,
52 52
       properties => ["volume_backend_name=${backend_name}"],
53
-   }
53
+  }
54 54
   }
55 55
 
56 56
 }

+ 13
- 4
deployment_scripts/puppet/modules/multipath/manifests/init.pp View File

@@ -1,12 +1,21 @@
1 1
 class multipath {
2
+
3
+include ::nova::params
4
+
2 5
 $multipath_packages = [ 'sg3-utils', 'multipath-tools' ]
3 6
 package { $multipath_packages: ensure => 'installed' }
4 7
 
5 8
 nova_config {
6
-'libvirt/iscsi_use_multipath' :   value => True,
7
-}~> Exec[cinder_volume]
9
+  'libvirt/iscsi_use_multipath' :   value => True,
10
+}
8 11
 
9
-exec {'cinder_volume':
10
-  command => '/usr/sbin/service nova-compute restart',}
12
+service { 'nova_compute':
13
+  ensure     => running,
14
+  name       => $::nova::params::compute_service_name,
15
+  enable     => true,
16
+  hasstatus  => true,
17
+  hasrestart => true,
18
+}
11 19
 
20
+Nova_config<||> ~> Service['nova-compute']
12 21
 }

+ 1
- 1
deployment_tasks.yaml View File

@@ -41,7 +41,7 @@
41 41
   type: puppet
42 42
   version: 2.1.0
43 43
   groups: [compute]
44
-  requires: [top-role-compute]
44
+  requires: [top-role-compute,enable_nova_compute_service]
45 45
   required_for: [deploy_end]
46 46
   parameters:
47 47
     puppet_manifest: puppet/manifests/cinder_multipath.pp

Loading…
Cancel
Save