Browse Source

Add Nailgun Converted serializers base code

This commit introduces Nailgun converted serializers extension which
allows for usage of Fuel Mitaka LCM features in pre-Mitaka releases
It essentially runs old serializers and patches the result properly, so
that LCM deployment engine can work with this serialized data
tags/10.0.0b1
Vladimir Kuklin 2 years ago
parent
commit
caebcc64b7

+ 6
- 0
.coveragerc View File

@@ -0,0 +1,6 @@
1
+[run]
2
+branch = True
3
+source = converted_serializers
4
+
5
+[report]
6
+ignore_errors = True

+ 37
- 68
.gitignore View File

@@ -1,89 +1,58 @@
1
-# Byte-compiled / optimized / DLL files
2
-__pycache__/
3 1
 *.py[cod]
4
-*$py.class
5 2
 
6 3
 # C extensions
7 4
 *.so
8 5
 
9
-# Distribution / packaging
10
-.Python
11
-env/
12
-build/
13
-develop-eggs/
14
-dist/
15
-downloads/
16
-eggs/
17
-.eggs/
18
-lib/
19
-lib64/
20
-parts/
21
-sdist/
22
-var/
23
-*.egg-info/
6
+# Packages
7
+*.egg*
8
+*.egg-info
9
+dist
10
+build
11
+eggs
12
+parts
13
+bin
14
+var
15
+sdist
16
+develop-eggs
24 17
 .installed.cfg
25
-*.egg
26
-
27
-# PyInstaller
28
-#  Usually these files are written by a python script from a template
29
-#  before PyInstaller builds the exe, so as to inject date/other infos into it.
30
-*.manifest
31
-*.spec
18
+lib
19
+lib64
32 20
 
33 21
 # Installer logs
34 22
 pip-log.txt
35
-pip-delete-this-directory.txt
36 23
 
37 24
 # Unit test / coverage reports
38
-htmlcov/
39
-.tox/
40
-.coverage
41
-.coverage.*
42
-.cache
25
+cover/
26
+.coverage*
27
+!.coveragerc
28
+.tox
43 29
 nosetests.xml
44
-coverage.xml
45
-*,cover
46
-.hypothesis/
30
+.testrepository
31
+.venv
47 32
 
48 33
 # Translations
49 34
 *.mo
50
-*.pot
51
-
52
-# Django stuff:
53
-*.log
54
-local_settings.py
55
-
56
-# Flask stuff:
57
-instance/
58
-.webassets-cache
59
-
60
-# Scrapy stuff:
61
-.scrapy
62
-
63
-# Sphinx documentation
64
-docs/_build/
65
-
66
-# PyBuilder
67
-target/
68
-
69
-# IPython Notebook
70
-.ipynb_checkpoints
71 35
 
72
-# pyenv
73
-.python-version
36
+# Mr Developer
37
+.mr.developer.cfg
38
+.project
39
+.pydevproject
74 40
 
75
-# celery beat schedule file
76
-celerybeat-schedule
41
+# Complexity
42
+output/*.html
43
+output/*/index.html
77 44
 
78
-# dotenv
79
-.env
45
+# Sphinx
46
+doc/build
80 47
 
81
-# virtualenv
82
-venv/
83
-ENV/
48
+# pbr generates these
49
+AUTHORS
50
+ChangeLog
84 51
 
85
-# Spyder project settings
86
-.spyderproject
52
+# Editors
53
+*~
54
+.*.swp
55
+.*sw?
87 56
 
88
-# Rope project settings
89
-.ropeproject
57
+# Files created by releasenotes build
58
+extension.xml

+ 6
- 0
MANIFEST.in View File

@@ -0,0 +1,6 @@
1
+include AUTHORS
2
+include ChangeLog
3
+exclude .gitignore
4
+exclude .gitreview
5
+
6
+global-exclude *.pyc

+ 10
- 0
README.rst View File

@@ -0,0 +1,10 @@
1
+Fuel nailgun extenstion for converted serializers
2
+=================================================
3
+
4
+This extension for Nailgun provides conversion layer which triggers pre-Mitaka
5
+serializers to generate deployment data, so that pre-9.x clusters can leverage
6
+Fuel Mitaka LCM features
7
+
8
+Installation
9
+-----------
10
+Just install the package `fuel-nailgun-extension-converted-serializers`

+ 6
- 0
bindep.txt View File

@@ -0,0 +1,6 @@
1
+libpq-dev
2
+postgresql
3
+postgresql-client
4
+# We don't use these, but mysql-prep step is in template job
5
+mysql-client
6
+mysql-server

+ 24
- 0
conftest.py View File

@@ -0,0 +1,24 @@
1
+# coding: utf-8
2
+
3
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
4
+#    not use this file except in compliance with the License. You may obtain
5
+#    a copy of the License at
6
+#
7
+#         http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+#    Unless required by applicable law or agreed to in writing, software
10
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12
+#    License for the specific language governing permissions and limitations
13
+#    under the License.
14
+
15
+
16
+def pytest_configure(config):
17
+    from nailgun import db
18
+    db.dropdb()
19
+    db.syncdb()
20
+
21
+
22
+def pytest_unconfigure(config):
23
+    from nailgun import db
24
+    db.dropdb()

+ 0
- 0
converted_serializers/__init__.py View File


+ 72
- 0
converted_serializers/extension.py View File

@@ -0,0 +1,72 @@
1
+# -*- coding: utf-8 -*-
2
+
3
+#    Copyright 2015 Mirantis, Inc.
4
+#
5
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
6
+#    not use this file except in compliance with the License. You may obtain
7
+#    a copy of the License at
8
+#
9
+#         http://www.apache.org/licenses/LICENSE-2.0
10
+#
11
+#    Unless required by applicable law or agreed to in writing, software
12
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14
+#    License for the specific language governing permissions and limitations
15
+#    under the License.
16
+import logging
17
+
18
+from nailgun import extensions
19
+from nailgun import objects
20
+from nailgun.orchestrator.deployment_serializers import \
21
+    get_serializer_for_cluster
22
+
23
+
24
+logger = logging.getLogger(__name__)
25
+
26
+
27
+class ConvertPreLCMtoLCM(extensions.BasePipeline):
28
+
29
+    @classmethod
30
+    def pre_process_data(cls, data, cluster, nodes, **kwargs):
31
+        return data
32
+
33
+    @classmethod
34
+    def post_process_data(cls, data, cluster, nodes, **kwargs):
35
+        return data
36
+
37
+    @classmethod
38
+    def serialize(cls, data, cluster, nodes, **kwargs):
39
+        if objects.Release.is_lcm_supported(cluster.release):
40
+            return data
41
+        serializer = get_serializer_for_cluster(cluster)()
42
+        real_data = serializer.serialize(cluster, nodes, **kwargs)
43
+        return real_data
44
+
45
+    @classmethod
46
+    def process_deployment(cls, data, cluster, nodes, **kwargs):
47
+        pre_processed_data = cls.pre_process_data(data,
48
+                                                  cluster, nodes, **kwargs)
49
+        real_data = cls.serialize(pre_processed_data, cluster, nodes, **kwargs)
50
+        post_processed_data = cls.post_process_data(real_data,
51
+                                                    cluster, nodes, **kwargs)
52
+        # copypaste cluster specific values from LCM serializer.
53
+        # This is needed for tasks paramters interpolation like CLUSTER_ID
54
+        cluster_data = data[0]['cluster']
55
+        for node_data in post_processed_data:
56
+            node_data['cluster'] = cluster_data
57
+        return post_processed_data
58
+
59
+    @classmethod
60
+    def process_provisioning(cls, data, cluster, nodes, **kwargs):
61
+        return data
62
+
63
+
64
+class ConvertedSerializersExtension(extensions.BaseExtension):
65
+    name = 'converted_serializers'
66
+    version = '0.0.1'
67
+    description = "Serializers Conversion extension"
68
+    weight = 100
69
+
70
+    data_pipelines = [
71
+        ConvertPreLCMtoLCM,
72
+    ]

+ 0
- 0
converted_serializers/tests/__init__.py View File


+ 647
- 0
converted_serializers/tests/test_pipelines.py View File

@@ -0,0 +1,647 @@
1
+# -*- coding: utf-8 -*-
2
+
3
+#    Copyright 2015 Mirantis, Inc.
4
+#
5
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
6
+#    not use this file except in compliance with the License. You may obtain
7
+#    a copy of the License at
8
+#
9
+#         http://www.apache.org/licenses/LICENSE-2.0
10
+#
11
+#    Unless required by applicable law or agreed to in writing, software
12
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14
+#    License for the specific language governing permissions and limitations
15
+#    under the License.
16
+
17
+
18
+from copy import deepcopy
19
+import mock
20
+import six
21
+
22
+import nailgun
23
+
24
+from nailgun import consts
25
+from nailgun.db.sqlalchemy import models
26
+from nailgun import objects
27
+from nailgun import rpc
28
+
29
+from nailgun.orchestrator import deployment_serializers
30
+from nailgun.orchestrator.deployment_serializers import \
31
+    get_serializer_for_cluster
32
+from nailgun.orchestrator.neutron_serializers import \
33
+    NeutronNetworkDeploymentSerializer80
34
+from nailgun.orchestrator.neutron_serializers import \
35
+    NeutronNetworkTemplateSerializer80
36
+from nailgun.test.integration.test_orchestrator_serializer import \
37
+    BaseDeploymentSerializer
38
+from nailgun.test.integration.test_orchestrator_serializer import \
39
+    TestSerializeInterfaceDriversData
40
+from nailgun.test.integration.test_orchestrator_serializer_70 import \
41
+    TestDeploymentHASerializer70
42
+from nailgun.test.integration.test_orchestrator_serializer_80 import \
43
+    TestSerializer80Mixin
44
+
45
+
46
+class TestSerializerWrapper(deployment_serializers.DeploymentLCMSerializer):
47
+
48
+    def serialize(self, cluster, nodes, ignore_customized=False):
49
+        return deployment_serializers.serialize_for_lcm(
50
+            cluster, nodes, ignore_customized=ignore_customized)
51
+
52
+    def get_net_provider_serializer(cls, cluster):
53
+        return deployment_serializers\
54
+            .DeploymentHASerializer80.get_net_provider_serializer(cluster)
55
+
56
+
57
+class TestSerializerConverter80To90MixIn(TestSerializer80Mixin):
58
+    env_version = "liberty-8.0"
59
+    task_deploy = True
60
+    is_propagate_task_deploy = True
61
+    enforce_lcm = True
62
+
63
+    @classmethod
64
+    def create_serializer(cls, cluster):
65
+        serializer_type = TestSerializerWrapper
66
+        return serializer_type(None)
67
+
68
+
69
+class TestNetworkTemplateSerializer80MixIn(
70
+    TestSerializerConverter80To90MixIn,
71
+    BaseDeploymentSerializer
72
+):
73
+    legacy_serializer = NeutronNetworkDeploymentSerializer80
74
+    template_serializer = NeutronNetworkTemplateSerializer80
75
+
76
+    def setUp(self, *args):
77
+        super(TestNetworkTemplateSerializer80MixIn, self).setUp()
78
+        self.env.create(
79
+            release_kwargs={'version': self.env_version},
80
+            cluster_kwargs={
81
+                'mode': consts.CLUSTER_MODES.ha_compact,
82
+                'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
83
+                'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.vlan})
84
+        self.net_template = self.env.read_fixtures(['network_template_80'])[0]
85
+        self.cluster = self.env.clusters[-1]
86
+        self.cluster.extensions = ['volume_manager', 'converted_serializers']
87
+        self.serializer = self.create_serializer(self.cluster)
88
+
89
+    def test_get_net_provider_serializer(self):
90
+        self.cluster.network_config.configuration_template = None
91
+
92
+        net_serializer = self.serializer.\
93
+            get_net_provider_serializer(self.cluster)
94
+        self.assertIs(net_serializer, self.legacy_serializer)
95
+
96
+        self.cluster.network_config.configuration_template = \
97
+            self.net_template
98
+        net_serializer = self.serializer.\
99
+            get_net_provider_serializer(self.cluster)
100
+        self.assertIs(net_serializer, self.template_serializer)
101
+
102
+    def test_baremetal_neutron_attrs(self):
103
+        brmtl_template = deepcopy(
104
+            self.net_template['adv_net_template']['default'])
105
+        brmtl_template['network_assignments']['baremetal'] = {
106
+            'ep': 'br-baremetal'}
107
+        brmtl_template['templates_for_node_role']['controller'].append(
108
+            'baremetal')
109
+        brmtl_template['nic_mapping']['default']['if8'] = 'eth7'
110
+        brmtl_template['network_scheme']['baremetal'] = {
111
+            'endpoints': ['br-baremetal'],
112
+            'transformations': [],
113
+            'roles': {'baremetal': 'br-baremetal'}}
114
+        self.cluster.network_config.configuration_template = {
115
+            'adv_net_template': {'default': brmtl_template}, 'pk': 1}
116
+        self._check_baremetal_neutron_attrs(self.cluster)
117
+
118
+    def test_network_schemes_priorities(self):
119
+        expected = [
120
+            {
121
+                "action": "add-br",
122
+                "name": "br-prv",
123
+                "provider": "ovs"
124
+            },
125
+            {
126
+                "action": "add-br",
127
+                "name": "br-aux"
128
+            },
129
+            {
130
+                "action": "add-patch",
131
+                "bridges": [
132
+                    "br-prv",
133
+                    "br-aux"
134
+                ],
135
+                "provider": "ovs",
136
+                "mtu": 65000
137
+            },
138
+            {
139
+                "action": "add-port",
140
+                "bridge": "br-aux",
141
+                "name": "eth3.101"
142
+            },
143
+            {
144
+                "action": "add-br",
145
+                "name": "br-fw-admin"
146
+            },
147
+            {
148
+                "action": "add-port",
149
+                "bridge": "br-fw-admin",
150
+                "name": "eth0"
151
+            },
152
+            {
153
+                "action": "add-br",
154
+                "name": "br-mgmt"
155
+            },
156
+            {
157
+                "action": "add-port",
158
+                "bridge": "br-mgmt",
159
+                "name": "eth1.104"
160
+            },
161
+            {
162
+                "action": "add-br",
163
+                "name": "br-storage"
164
+            },
165
+            {
166
+                "action": "add-port",
167
+                "bridge": "br-storage",
168
+                "name": "eth2"
169
+            }
170
+        ]
171
+
172
+        objects.Cluster.set_network_template(
173
+            self.cluster,
174
+            self.net_template
175
+        )
176
+
177
+        node = self.env.create_nodes_w_interfaces_count(
178
+            1, 8, roles=['compute', 'cinder'],
179
+            cluster_id=self.cluster.id
180
+        )[0]
181
+
182
+        self.serializer = get_serializer_for_cluster(self.cluster)
183
+        net_serializer = self.serializer.get_net_provider_serializer(
184
+            self.cluster)
185
+
186
+        nm = objects.Cluster.get_network_manager(self.cluster)
187
+        network_scheme = net_serializer.generate_network_scheme(
188
+            node, nm.get_node_networks(node))
189
+        self.assertEqual(expected, network_scheme['transformations'])
190
+
191
+
192
+class TestDeploymentTasksSerialization80MixIn(
193
+    TestSerializerConverter80To90MixIn,
194
+    BaseDeploymentSerializer
195
+):
196
+    tasks_for_rerun = {"globals", "netconfig"}
197
+
198
+    def setUp(self):
199
+        super(TestDeploymentTasksSerialization80MixIn, self).setUp()
200
+        self.env.create(
201
+            release_kwargs={'version': self.env_version},
202
+            cluster_kwargs={
203
+                'mode': consts.CLUSTER_MODES.ha_compact,
204
+                'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
205
+                'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.vlan,
206
+                'status': consts.CLUSTER_STATUSES.operational},
207
+            nodes_kwargs=[
208
+                {'roles': ['controller'],
209
+                 'status': consts.NODE_STATUSES.ready}]
210
+        )
211
+
212
+        self.cluster = self.env.clusters[-1]
213
+        self.cluster.extensions = ['volume_manager', 'converted_serializers']
214
+        if not self.task_deploy:
215
+            self.env.disable_task_deploy(self.cluster)
216
+
217
+    def add_node(self, role):
218
+        return self.env.create_node(
219
+            cluster_id=self.cluster.id,
220
+            pending_roles=[role],
221
+            pending_addition=True
222
+        )
223
+
224
+    def get_rpc_args(self):
225
+        self.env.launch_deployment()
226
+        args, kwargs = nailgun.task.manager.rpc.cast.call_args
227
+        return args[1][1]['args']
228
+
229
+    def check_add_node_for_task_deploy(self, rpc_message):
230
+        tasks_graph = rpc_message['tasks_graph']
231
+        for node_id, tasks in six.iteritems(tasks_graph):
232
+            if node_id is None or node_id == consts.MASTER_NODE_UID:
233
+                # skip virtual node
234
+                continue
235
+
236
+            task_ids = {
237
+                t['id'] for t in tasks
238
+                if t['type'] != consts.ORCHESTRATOR_TASK_TYPES.skipped
239
+            }
240
+            # all tasks are run on all nodes
241
+            self.assertTrue(self.tasks_for_rerun.issubset(task_ids))
242
+
243
+    def check_add_compute_for_granular_deploy(self, new_node_uid, rpc_message):
244
+        for node in rpc_message['deployment_info']:
245
+            task_ids = {t['id'] for t in node['tasks']}
246
+            if node['tasks'][0]['uids'] == [new_node_uid]:
247
+                # all tasks are run on a new node
248
+                self.assertTrue(
249
+                    self.tasks_for_rerun.issubset(task_ids))
250
+            else:
251
+                # only selected tasks are run on a deployed node
252
+                self.assertItemsEqual(self.tasks_for_rerun, task_ids)
253
+
254
+    def check_add_controller_for_granular_deploy(self, rpc_message):
255
+        for node in rpc_message['deployment_info']:
256
+            task_ids = {t['id'] for t in node['tasks']}
257
+            # controller is redeployed when other one is added
258
+            # so all tasks are run on all nodes
259
+            self.assertTrue(
260
+                self.tasks_for_rerun.issubset(task_ids))
261
+
262
+    @mock.patch('nailgun.rpc.cast')
263
+    def test_add_compute(self, _):
264
+        new_node = self.add_node('compute')
265
+        rpc_deploy_message = self.get_rpc_args()
266
+        if self.task_deploy:
267
+            self.check_add_node_for_task_deploy(rpc_deploy_message)
268
+        else:
269
+            self.check_add_compute_for_granular_deploy(
270
+                new_node.uid, rpc_deploy_message
271
+            )
272
+
273
+    @mock.patch('nailgun.rpc.cast')
274
+    def test_add_controller(self, _):
275
+        self.add_node('controller')
276
+        rpc_deploy_message = self.get_rpc_args()
277
+
278
+        if self.task_deploy:
279
+            self.check_add_node_for_task_deploy(rpc_deploy_message)
280
+        else:
281
+            self.check_add_controller_for_granular_deploy(rpc_deploy_message)
282
+
283
+
284
+class TestDeploymentAttributesSerialization80MixIn(
285
+    TestSerializerConverter80To90MixIn,
286
+    BaseDeploymentSerializer
287
+):
288
+    def setUp(self):
289
+        super(TestDeploymentAttributesSerialization80MixIn, self).setUp()
290
+        self.cluster = self.env.create(
291
+            release_kwargs={
292
+                'version': self.env_version,
293
+                'operating_system': consts.RELEASE_OS.ubuntu},
294
+            cluster_kwargs={
295
+                'mode': consts.CLUSTER_MODES.ha_compact,
296
+                'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
297
+                'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.vlan})
298
+        self.cluster_db = self.db.query(models.Cluster).get(self.cluster['id'])
299
+        self.cluster.extensions = ['volume_manager', 'converted_serializers']
300
+        self.serializer = self.create_serializer(self.cluster_db)
301
+
302
+    def test_neutron_attrs(self):
303
+        self.env.create_node(
304
+            cluster_id=self.cluster_db.id,
305
+            roles=['controller'], primary_roles=['controller']
306
+        )
307
+        objects.Cluster.prepare_for_deployment(self.cluster_db)
308
+        serialized_for_astute = self.serializer.serialize(
309
+            self.cluster_db, self.cluster_db.nodes)
310
+        for node in serialized_for_astute:
311
+            self.assertEqual(
312
+                {
313
+                    "bridge": consts.DEFAULT_BRIDGES_NAMES.br_floating,
314
+                    "vlan_range": None
315
+                },
316
+                node['quantum_settings']['L2']['phys_nets']['physnet1']
317
+            )
318
+            l2 = (node["quantum_settings"]["predefined_networks"]
319
+                  [self.cluster_db.network_config.floating_name]["L2"])
320
+
321
+            self.assertEqual("physnet1", l2["physnet"])
322
+            self.assertEqual("flat", l2["network_type"])
323
+
324
+    def test_baremetal_transformations(self):
325
+        self.env._set_additional_component(self.cluster_db, 'ironic', True)
326
+        self.env.create_node(cluster_id=self.cluster_db.id,
327
+                             roles=['primary-controller'])
328
+        objects.Cluster.prepare_for_deployment(self.cluster_db)
329
+        serialized_for_astute = self.serializer.serialize(
330
+            self.cluster_db, self.cluster_db.nodes)
331
+        for node in serialized_for_astute:
332
+            if node['uid'] == 'master':
333
+                continue
334
+            transformations = node['network_scheme']['transformations']
335
+            baremetal_brs = filter(lambda t: t.get('name') ==
336
+                                   consts.DEFAULT_BRIDGES_NAMES.br_baremetal,
337
+                                   transformations)
338
+            baremetal_ports = filter(lambda t: t.get('name') == "eth0.104",
339
+                                     transformations)
340
+            expected_patch = {
341
+                'action': 'add-patch',
342
+                'bridges': [consts.DEFAULT_BRIDGES_NAMES.br_ironic,
343
+                            consts.DEFAULT_BRIDGES_NAMES.br_baremetal],
344
+                'provider': 'ovs'}
345
+            self.assertEqual(len(baremetal_brs), 1)
346
+            self.assertEqual(len(baremetal_ports), 1)
347
+            self.assertEqual(baremetal_ports[0]['bridge'],
348
+                             consts.DEFAULT_BRIDGES_NAMES.br_baremetal)
349
+            self.assertIn(expected_patch, transformations)
350
+
351
+    def test_disks_attrs(self):
352
+        disks = [
353
+            {
354
+                "model": "TOSHIBA MK1002TS",
355
+                "name": "sda",
356
+                "disk": "sda",
357
+                "size": 1004886016
358
+            },
359
+        ]
360
+        expected_node_volumes_hash = [
361
+            {
362
+                u'name': u'sda',
363
+                u'bootable': True,
364
+                u'extra': [],
365
+                u'free_space': 330,
366
+                u'volumes': [
367
+                    {
368
+                        u'type': u'boot',
369
+                        u'size': 300
370
+                    },
371
+                    {
372
+                        u'mount': u'/boot',
373
+                        u'type': u'partition',
374
+                        u'file_system': u'ext2',
375
+                        u'name': u'Boot',
376
+                        u'size': 200
377
+                    },
378
+                    {
379
+                        u'type': u'lvm_meta_pool',
380
+                        u'size': 64
381
+                    },
382
+                    {
383
+                        u'vg': u'os',
384
+                        u'type': u'pv',
385
+                        u'lvm_meta_size': 64,
386
+                        u'size': 394
387
+                    },
388
+                    {
389
+                        u'vg': u'vm',
390
+                        u'type': u'pv',
391
+                        u'lvm_meta_size': 0,
392
+                        u'size': 0
393
+                    }
394
+                ],
395
+                u'type': u'disk',
396
+                u'id': u'sda',
397
+                u'size': 958
398
+            },
399
+            {
400
+                u'_allocate_size': u'min',
401
+                u'label': u'Base System',
402
+                u'min_size': 19456,
403
+                u'volumes': [
404
+                    {
405
+                        u'mount': u'/',
406
+                        u'size': -3766,
407
+                        u'type': u'lv',
408
+                        u'name': u'root',
409
+                        u'file_system': u'ext4'
410
+                    },
411
+                    {
412
+                        u'mount': u'swap',
413
+                        u'size': 4096,
414
+                        u'type': u'lv',
415
+                        u'name': u'swap',
416
+                        u'file_system': u'swap'
417
+                    }
418
+                ],
419
+                u'type': u'vg',
420
+                u'id': u'os'
421
+            },
422
+            {
423
+                u'_allocate_size': u'all',
424
+                u'label': u'Virtual Storage',
425
+                u'min_size': 5120,
426
+                u'volumes': [
427
+                    {
428
+                        u'mount': u'/var/lib/nova',
429
+                        u'size': 0,
430
+                        u'type': u'lv',
431
+                        u'name': u'nova',
432
+                        u'file_system': u'xfs'
433
+                    }
434
+                ],
435
+                u'type': u'vg',
436
+                u'id': u'vm'
437
+            }
438
+        ]
439
+        self.env.create_node(
440
+            cluster_id=self.cluster_db.id,
441
+            roles=['compute'],
442
+            meta={"disks": disks},
443
+        )
444
+        objects.Cluster.prepare_for_deployment(self.cluster_db)
445
+        serialized_for_astute = self.serializer.serialize(
446
+            self.cluster_db, self.cluster_db.nodes)
447
+        for node in serialized_for_astute:
448
+            if node['uid'] == 'master':
449
+                continue
450
+            self.assertIn("node_volumes", node)
451
+            self.assertItemsEqual(
452
+                expected_node_volumes_hash, node["node_volumes"])
453
+
454
+    def test_attributes_contains_plugins(self):
455
+        self.env.create_plugin(
456
+            cluster=self.cluster_db,
457
+            name='plugin_1',
458
+            attributes_metadata={'attributes': {'name': 'plugin_1'}},
459
+            package_version='4.0.0',
460
+            fuel_version=['8.0'])
461
+        self.env.create_plugin(
462
+            cluster=self.cluster_db,
463
+            name='plugin_2',
464
+            attributes_metadata={'attributes': {'name': 'plugin_2'}},
465
+            package_version='4.0.0',
466
+            fuel_version=['8.0'])
467
+        self.env.create_plugin(
468
+            cluster=self.cluster_db,
469
+            enabled=False,
470
+            name='plugin_3',
471
+            attributes_metadata={'attributes': {'name': 'plugin_3'}},
472
+            package_version='4.0.0',
473
+            fuel_version=['8.0'])
474
+
475
+        expected_plugins_list = ['plugin_1', 'plugin_2']
476
+        self.env.create_node(
477
+            cluster_id=self.cluster_db.id,
478
+            roles=['compute']
479
+        )
480
+        objects.Cluster.prepare_for_deployment(self.cluster_db)
481
+        serialized_for_astute = self.serializer.serialize(
482
+            self.cluster_db, self.cluster_db.nodes)
483
+        for node in serialized_for_astute:
484
+            if node['uid'] == 'master':
485
+                continue
486
+            self.assertIn('plugins', node)
487
+            self.assertItemsEqual(
488
+                expected_plugins_list, node['plugins'])
489
+            self.assertTrue(all(name in node for name
490
+                                in expected_plugins_list))
491
+
492
+    def test_common_attributes_contains_plugin_metadata(self):
493
+        expected_value = 'check_value'
494
+        plugin = self.env.create_plugin(
495
+            cluster=self.cluster_db,
496
+            name='test_plugin',
497
+            package_version='4.0.0',
498
+            fuel_version=['8.0'],
499
+            attributes_metadata={
500
+                'attributes': {
501
+                    'config': {
502
+                        'description': "Description",
503
+                        'weight': 52,
504
+                        'value': expected_value
505
+                    }
506
+                }
507
+            }
508
+        )
509
+        attrs = self.serializer.get_common_attrs(self.cluster_db)
510
+        self.assertIn('test_plugin', attrs)
511
+        self.assertIn('metadata', attrs['test_plugin'])
512
+        self.assertEqual(
513
+            plugin.id, attrs['test_plugin']['metadata']['plugin_id']
514
+        )
515
+        self.assertEqual(expected_value, attrs['test_plugin']['config'])
516
+
517
+
518
+class TestMultiNodeGroupsSerialization80MixIn(
519
+    TestSerializerConverter80To90MixIn,
520
+    BaseDeploymentSerializer
521
+):
522
+    def setUp(self):
523
+        super(TestMultiNodeGroupsSerialization80MixIn, self).setUp()
524
+        cluster = self.env.create(
525
+            release_kwargs={'version': self.env_version},
526
+            cluster_kwargs={
527
+                'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
528
+                'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.vlan}
529
+        )
530
+        self.env.create_nodes_w_interfaces_count(
531
+            nodes_count=3,
532
+            if_count=2,
533
+            roles=['controller', 'cinder'],
534
+            pending_addition=True,
535
+            cluster_id=cluster['id'])
536
+        self.cluster_db = self.db.query(models.Cluster).get(cluster['id'])
537
+        cluster.extensions = ['volume_manager', 'converted_serializers']
538
+        self.serializer = self.create_serializer(cluster)
539
+
540
+    def _add_node_group_with_node(self, cidr_start, node_address):
541
+        node_group = self.env.create_node_group(
542
+            api=False, cluster_id=self.cluster_db.id,
543
+            name='ng_' + cidr_start + '_' + str(node_address))
544
+
545
+        with mock.patch.object(rpc, 'cast'):
546
+            resp = self.env.setup_networks_for_nodegroup(
547
+                cluster_id=self.cluster_db.id, node_group=node_group,
548
+                cidr_start=cidr_start)
549
+        self.assertEqual(resp.status_code, 200)
550
+
551
+        self.db.query(models.Task).filter_by(
552
+            name=consts.TASK_NAMES.update_dnsmasq
553
+        ).delete(synchronize_session=False)
554
+
555
+        self.env.create_nodes_w_interfaces_count(
556
+            nodes_count=1,
557
+            if_count=2,
558
+            roles=['compute'],
559
+            pending_addition=True,
560
+            cluster_id=self.cluster_db.id,
561
+            group_id=node_group.id,
562
+            ip='{0}.9.{1}'.format(cidr_start, node_address))
563
+
564
+    def _check_routes_count(self, count):
565
+        objects.Cluster.prepare_for_deployment(self.cluster_db)
566
+        facts = self.serializer.serialize(
567
+            self.cluster_db, self.cluster_db.nodes)
568
+
569
+        for node in facts:
570
+            if node['uid'] == 'master':
571
+                continue
572
+            endpoints = node['network_scheme']['endpoints']
573
+            for name, descr in six.iteritems(endpoints):
574
+                if descr['IP'] == 'none':
575
+                    self.assertNotIn('routes', descr)
576
+                else:
577
+                    self.assertEqual(len(descr['routes']), count)
578
+
579
+    def test_routes_with_no_shared_networks_2_nodegroups(self):
580
+        self._add_node_group_with_node('199.99', 3)
581
+        # all networks have different CIDRs
582
+        self._check_routes_count(1)
583
+
584
+    def test_routes_with_no_shared_networks_3_nodegroups(self):
585
+        self._add_node_group_with_node('199.99', 3)
586
+        self._add_node_group_with_node('199.77', 3)
587
+        # all networks have different CIDRs
588
+        self._check_routes_count(2)
589
+
590
+    def test_routes_with_shared_networks_3_nodegroups(self):
591
+        self._add_node_group_with_node('199.99', 3)
592
+        self._add_node_group_with_node('199.99', 4)
593
+        # networks in two racks have equal CIDRs
594
+        self._check_routes_count(1)
595
+
596
+
597
+class TestBlockDeviceDevicesSerialization80MixIn(
598
+    TestSerializerConverter80To90MixIn,
599
+    BaseDeploymentSerializer
600
+):
601
+    def setUp(self):
602
+        super(TestBlockDeviceDevicesSerialization80MixIn, self).setUp()
603
+        self.cluster = self.env.create(
604
+            release_kwargs={'version': self.env_version},
605
+            cluster_kwargs={
606
+                'mode': consts.CLUSTER_MODES.ha_compact,
607
+                'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
608
+                'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.vlan})
609
+        self.cluster_db = self.db.query(models.Cluster).get(self.cluster['id'])
610
+        self.cluster.extensions = ['volume_manager', 'converted_serializers']
611
+        self.serializer = self.create_serializer(self.cluster_db)
612
+
613
+    def test_block_device_disks(self):
614
+        self.env.create_node(
615
+            cluster_id=self.cluster_db.id,
616
+            roles=['cinder-block-device']
617
+        )
618
+        self.env.create_node(
619
+            cluster_id=self.cluster_db.id,
620
+            roles=['controller']
621
+        )
622
+        objects.Cluster.prepare_for_deployment(self.cluster_db)
623
+        serialized_for_astute = self.serializer.serialize(
624
+            self.cluster_db, self.cluster_db.nodes)
625
+        for node in serialized_for_astute:
626
+            if node['uid'] == 'master':
627
+                continue
628
+            self.assertIn("node_volumes", node)
629
+            for node_volume in node["node_volumes"]:
630
+                if node_volume["id"] == "cinder-block-device":
631
+                    self.assertEqual(node_volume["volumes"], [])
632
+                else:
633
+                    self.assertNotEqual(node_volume["volumes"], [])
634
+
635
+
636
+class TestSerializeInterfaceDriversData80MixIn(
637
+    TestSerializerConverter80To90MixIn,
638
+    TestSerializeInterfaceDriversData
639
+):
640
+    pass
641
+
642
+
643
+class TestDeploymentHASerializer80MixIn(
644
+    TestSerializerConverter80To90MixIn,
645
+    TestDeploymentHASerializer70
646
+):
647
+    pass

+ 15
- 0
nailgun-test-settings.yaml View File

@@ -0,0 +1,15 @@
1
+DEVELOPMENT: 1
2
+DATABASE:
3
+  name: "openstack_citest"
4
+  engine: "postgresql"
5
+  host: "localhost"
6
+  port: "5432"
7
+  user: "openstack_citest"
8
+  passwd: "openstack_citest"
9
+API_LOG: "logs/api.log"
10
+APP_LOG: "logs/app.log"
11
+APP_LOGLEVEL: "ERROR"
12
+RPC_CONSUMER_LOG_PATH: "logs/receiverd.log"
13
+ASSASSIN_LOG_PATH: "logs/assassind.log"
14
+STATS_LOGS_PATH: "logs/"
15
+LCM_SERIALIZERS_CONCURRENCY_FACTOR: 1

+ 5
- 0
requirements.txt View File

@@ -0,0 +1,5 @@
1
+# The order of packages is significant, because pip processes them in the order
2
+# of appearance. Changing the order has an impact on the overall integration
3
+# process, which may cause wedges in the gate later.
4
+
5
+pbr>=1.6

+ 28
- 0
setup.cfg View File

@@ -0,0 +1,28 @@
1
+[metadata]
2
+name = fuel-nailgun-extension-converted-serializers
3
+summary = Converted serializers extension for Fuel
4
+description-file = README.rst
5
+author = Mirantis Inc.
6
+author-email = product@mirantis.com
7
+home-page = http://mirantis.com
8
+classifier =
9
+    Environment :: OpenStack
10
+    Intended Audience :: Information Technology
11
+    Intended Audience :: System Administrators
12
+    License :: OSI Approved :: Apache Software License
13
+    Operating System :: POSIX :: Linux
14
+    Programming Language :: Python
15
+    Programming Language :: Python :: 2
16
+    Programming Language :: Python :: 2.7
17
+    Programming Language :: Python :: 3
18
+    Programming Language :: Python :: 3.3
19
+    Programming Language :: Python :: 3.4
20
+
21
+[files]
22
+packages =
23
+    converted_serializers
24
+
25
+[entry_points]
26
+nailgun.extensions =
27
+    converted_serializers = converted_serializers.extension:ConvertedSerializersExtension
28
+

+ 29
- 0
setup.py View File

@@ -0,0 +1,29 @@
1
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
2
+#
3
+# Licensed under the Apache License, Version 2.0 (the "License");
4
+# you may not use this file except in compliance with the License.
5
+# You may obtain a copy of the License at
6
+#
7
+#    http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+# Unless required by applicable law or agreed to in writing, software
10
+# distributed under the License is distributed on an "AS IS" BASIS,
11
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12
+# implied.
13
+# See the License for the specific language governing permissions and
14
+# limitations under the License.
15
+
16
+# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
17
+import setuptools
18
+
19
+# In python < 2.7.4, a lazy loading of package `pbr` will break
20
+# setuptools if some other modules registered functions in `atexit`.
21
+# solution from: http://bugs.python.org/issue15881#msg170215
22
+try:
23
+    import multiprocessing  # noqa
24
+except ImportError:
25
+    pass
26
+
27
+setuptools.setup(
28
+    setup_requires=['pbr'],
29
+    pbr=True)

+ 38
- 0
specs/fuel-nailgun-extension-converted-serializers.spec View File

@@ -0,0 +1,38 @@
1
+Name:           fuel-nailgun-extension-converted-serializers
2
+Version:        10.0~b1
3
+Release:        1%{?dist}
4
+Summary:        Converted serializers extension for Fuel
5
+License:        Apache-2.0
6
+Url:            https://git.openstack.org/cgit/openstack/fuel-nailgun-extension-converted-serializers/
7
+Source0:        %{name}-%{version}.tar.gz
8
+BuildArch:      noarch
9
+
10
+BuildRequires:  python-devel
11
+BuildRequires:  python-pbr
12
+BuildRequires:  python-setuptools
13
+
14
+Requires:       fuel-nailgun
15
+Requires:       python-pbr
16
+
17
+%description
18
+Converted serializers extension for Fuel
19
+
20
+%prep
21
+%setup -q -c -n %{name}-%{version}
22
+
23
+%build
24
+export OSLO_PACKAGE_VERSION=%{version}
25
+%py2_build
26
+
27
+%install
28
+export OSLO_PACKAGE_VERSION=%{version}
29
+%py2_install
30
+
31
+%files
32
+%license LICENSE
33
+%{python2_sitelib}/converted_serializers
34
+%{python2_sitelib}/*.egg-info
35
+
36
+%changelog
37
+* Thu Sep 8 2016 Vladimir Kuklin <vkuklin@mirantis.com> - 10.0~b1-1
38
+- Initial package.

+ 6
- 0
test-requirements.txt View File

@@ -0,0 +1,6 @@
1
+# The order of packages is significant, because pip processes them in the order
2
+# of appearance. Changing the order has an impact on the overall integration
3
+# process, which may cause wedges in the gate later.
4
+
5
+hacking
6
+pytest

+ 38
- 0
tox.ini View File

@@ -0,0 +1,38 @@
1
+[tox]
2
+minversion = 2.0
3
+envlist = pep8,py27
4
+skipsdist = True
5
+
6
+[base]
7
+NAILGUN_REPO = git+https://github.com/openstack/fuel-web.git
8
+NAILGUN_CONFIG = {toxinidir}/nailgun-test-settings.yaml
9
+NAILGUN_BRANCH={env:ZUUL_BRANCH:master}
10
+
11
+[testenv]
12
+deps = -r{toxinidir}/test-requirements.txt
13
+setenv = VIRTUAL_ENV={envdir}
14
+
15
+[testenv:py27]
16
+usedevelop = True
17
+deps = {[testenv]deps}
18
+       -r{toxinidir}/requirements.txt
19
+       -e{[base]NAILGUN_REPO}@{[base]NAILGUN_BRANCH}#egg=nailgun[test]&subdirectory=nailgun
20
+setenv = {[testenv]setenv}
21
+         NAILGUN_CONFIG={[base]NAILGUN_CONFIG}
22
+
23
+commands = py.test -v --junit-xml {toxinidir}/extension.xml {posargs}
24
+
25
+[testenv:pep8]
26
+commands = flake8 {posargs}
27
+
28
+[testenv:venv]
29
+commands = {posargs}
30
+
31
+[flake8]
32
+# E123, E125 skipped as they are invalid PEP-8.
33
+# H101 - Don't force author's name on TODOs
34
+# H304 is "No relative imports" error, required for extensions
35
+show-source = True
36
+ignore = E123,E125,H101,H304
37
+builtins = _
38
+exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build

Loading…
Cancel
Save