Do not allow to deploy multinode in tests

In tests, by default, we create Releases with 'multinode'
mode allowed for clusters, now it is taken from release fixture and
only in places that require it, specific modes for releases are used.

Change-Id: If0c0d2bf8fc8a6c572d6284304e0615fd951e005
Related-Bug: #1428054
This commit is contained in:
Sebastian Kalinowski 2015-08-11 18:04:08 +02:00
parent 58c080206c
commit 25ee7cbea8
12 changed files with 215 additions and 118 deletions

View File

@ -36,6 +36,10 @@ class ClusterValidator(BasicValidator):
single_schema = cluster_schema.single_schema
collection_schema = cluster_schema.collection_schema
_blocked_for_update = (
'net_provider',
)
@classmethod
def _can_update_release(cls, curr_release, pend_release):
return any([
@ -121,7 +125,7 @@ class ClusterValidator(BasicValidator):
log_message=True
)
for k in ("net_provider",):
for k in cls._blocked_for_update:
if k in d and getattr(instance, k) != d[k]:
raise errors.InvalidData(
u"Changing '{0}' for environment is prohibited".format(k),
@ -140,10 +144,11 @@ class ClusterValidator(BasicValidator):
def _validate_mode(cls, data, release):
mode = data.get("mode")
if mode and mode not in release.modes:
modes_list = ', '.join(release.modes)
raise errors.InvalidData(
"Cannot deploy in {0} mode in current release."
" Need to be one of {1}".format(
mode, release.modes),
" Need to be one of: {1}".format(
mode, modes_list),
log_message=True
)

View File

@ -692,7 +692,15 @@ def upgrade_cluster_bond_settings():
}
for release_id, networks_db_meta in releases:
# NOTE(prmtl): Release.networks_metadata field is nullable, so it is
# possible that it will be empty
if not networks_db_meta:
continue
networks_meta = jsonutils.loads(networks_db_meta)
if 'bonding' not in networks_meta:
continue
db_bond_meta = networks_meta['bonding']['properties']
for bond_mode in new_bond_meta:
if bond_mode in db_bond_meta:

View File

@ -6,8 +6,6 @@
modes_metadata:
ha_compact:
description: "This configuration Deploys OpenStack ready for high availability (HA). Controller services are prepared for HA by setting up a base MySQL/Galera, RabbitMQ and HAProxy so that additional controllers can be deployed NOW, or scaled out LATER. 3 or more controllers are required for a true HA environment."
multinode:
description: "In this configuration the OpenStack controller is deployed separately from the compute and cinder nodes. This mode assumes the presence of 1 controller node and 1 or more compute/cinder nodes. You can add more nodes to scale your cloud later."
roles_metadata:
controller:
name: "Controller"

View File

@ -152,7 +152,6 @@ class EnvironmentManager(object):
'name': u"release_name_" + version,
'version': version,
'description': u"release_desc" + version,
'modes': ['ha_compact', 'multinode'],
})
if kwargs.get('deployment_tasks') is None:

View File

@ -61,7 +61,7 @@ class TestHandlers(BaseIntegrationTest):
cluster_db = self.env.clusters[0]
common_attrs = {
'deployment_mode': 'ha_compact',
'deployment_mode': consts.CLUSTER_MODES.ha_compact,
'management_vip': '192.168.0.1',
'management_vrouter_vip': '192.168.0.2',
@ -247,8 +247,9 @@ class TestHandlers(BaseIntegrationTest):
admin_net = self.env.network_manager.get_admin_network_group()
for n in sorted(self.env.nodes, key=lambda n: n.id):
udev_interfaces_mapping = ','.join([
'{0}_{1}'.format(i.mac, i.name) for i in n.interfaces])
udev_interfaces_mapping = ','.join(
['{0}_{1}'.format(iface.mac, iface.name)
for iface in n.interfaces])
pnd = {
'uid': n.uid,
'slave_name': objects.Node.get_slave_name(n),
@ -429,7 +430,7 @@ class TestHandlers(BaseIntegrationTest):
self.assertEqual(200, resp.status_code)
common_attrs = {
'deployment_mode': 'ha_compact',
'deployment_mode': consts.CLUSTER_MODES.ha_compact,
'management_vip': '192.168.0.1',
'management_vrouter_vip': '192.168.0.2',
@ -699,8 +700,9 @@ class TestHandlers(BaseIntegrationTest):
admin_net = self.env.network_manager.get_admin_network_group()
for n in sorted(self.env.nodes, key=lambda n: n.id):
udev_interfaces_mapping = ','.join([
'{0}_{1}'.format(i.mac, i.name) for i in n.interfaces])
udev_interfaces_mapping = ','.join(
['{0}_{1}'.format(iface.mac, iface.name)
for iface in n.interfaces])
pnd = {
'uid': n.uid,
@ -879,7 +881,7 @@ class TestHandlers(BaseIntegrationTest):
self.assertEqual(200, resp.status_code)
common_attrs = {
'deployment_mode': 'ha_compact',
'deployment_mode': consts.CLUSTER_MODES.ha_compact,
'management_vip': '192.168.0.1',
'management_vrouter_vip': '192.168.0.2',
@ -1167,8 +1169,9 @@ class TestHandlers(BaseIntegrationTest):
admin_net = self.env.network_manager.get_admin_network_group()
for n in sorted(self.env.nodes, key=lambda n: n.id):
udev_interfaces_mapping = ','.join([
'{0}_{1}'.format(i.mac, i.name) for i in n.interfaces])
udev_interfaces_mapping = ','.join(
['{0}_{1}'.format(iface.mac, iface.name)
for iface in n.interfaces])
pnd = {
'uid': n.uid,
@ -1407,7 +1410,7 @@ class TestHandlers(BaseIntegrationTest):
@patch('nailgun.rpc.cast')
def test_deploy_ha_neutron_gre_w_custom_public_ranges(self, mocked_rpc):
self.env.create(
cluster_kwargs={'mode': 'ha_compact',
cluster_kwargs={'mode': consts.CLUSTER_MODES.ha_compact,
'net_provider': 'neutron',
'net_segment_type': 'gre'},
nodes_kwargs=[{"pending_addition": True},
@ -1580,8 +1583,11 @@ class TestHandlers(BaseIntegrationTest):
# TODO(awoodward): Purge multinode
def test_occurs_error_not_enough_controllers_for_multinode(self):
self.env.create(
release_kwargs={
'modes': [consts.CLUSTER_MODES.multinode, ]
},
cluster_kwargs={
'mode': 'multinode'
'mode': consts.CLUSTER_MODES.multinode
},
nodes_kwargs=[
{'roles': ['compute'], 'pending_addition': True}])

View File

@ -16,6 +16,7 @@
from oslo_serialization import jsonutils
from nailgun import consts
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.db.sqlalchemy.models import NetworkGroup
from nailgun.db.sqlalchemy.models import Node
@ -237,3 +238,65 @@ class TestHandlers(BaseIntegrationTest):
self.assertEqual(resp.status_code, 200)
self.db.refresh(cluster)
self.assertEqual(long_name, cluster.name)
class TestClusterModes(BaseIntegrationTest):
def test_fail_to_create_cluster_with_multinode_mode(self):
release = self.env.create_release(
version='2015-7.0',
modes=[consts.CLUSTER_MODES.ha_compact],
)
cluster_data = {
'name': 'CrazyFrog',
'release_id': release.id,
'mode': consts.CLUSTER_MODES.multinode,
}
resp = self.app.post(
reverse('ClusterCollectionHandler'),
jsonutils.dumps(cluster_data),
headers=self.default_headers,
expect_errors=True
)
self.check_wrong_response(resp)
def check_wrong_response(self, resp):
self.assertEqual(resp.status_code, 400)
self.assertIn(
'Cannot deploy in multinode mode in current release. '
'Need to be one of',
resp.json_body['message']
)
def test_update_cluster_to_wrong_mode(self):
update_resp = self._try_cluster_update(
name='SadCrazyFrog',
mode=consts.CLUSTER_MODES.multinode,
)
self.check_wrong_response(update_resp)
def test_update_cluster_but_not_mode(self):
update_resp = self._try_cluster_update(
name='HappyCrazyFrog',
)
self.assertEqual(update_resp.status_code, 200)
def _try_cluster_update(self, **attrs_to_update):
release = self.env.create_release(
version='2015-7.0',
modes=[consts.CLUSTER_MODES.ha_compact],
)
create_resp = self.env.create_cluster(
release_id=release.id,
mode=consts.CLUSTER_MODES.ha_compact,
api=True,
)
cluster_id = create_resp['id']
return self.app.put(
reverse('ClusterHandler', kwargs={'obj_id': cluster_id}),
jsonutils.dumps(attrs_to_update),
headers=self.default_headers,
expect_errors=True
)

View File

@ -14,8 +14,10 @@
# License for the specific language governing permissions and limitations
# under the License.
import copy
from random import randint
from nailgun import consts
from nailgun import objects
from nailgun.db.sqlalchemy.models import NetworkGroup
@ -38,7 +40,11 @@ class TestMellanox(OrchestratorSerializerTestBase):
def create_env(self, mode, mellanox=False, iser=False, iser_vlan=None):
# Create env
cluster = self.env.create(
release_kwargs={'version': self.env_version},
release_kwargs={
'version': self.env_version,
'modes': [consts.CLUSTER_MODES.ha_compact,
consts.CLUSTER_MODES.multinode],
},
cluster_kwargs={
'mode': mode,
'net_provider': 'neutron',
@ -52,7 +58,7 @@ class TestMellanox(OrchestratorSerializerTestBase):
)
self.cluster_id = cluster['id']
cluster_db = objects.Cluster.get_by_uid(self.cluster_id)
editable_attrs = self._make_data_copy(cluster_db.attributes.editable)
editable_attrs = copy.deepcopy(cluster_db.attributes.editable)
# Set Mellanox params
if mellanox:
@ -79,7 +85,8 @@ class TestMellanox(OrchestratorSerializerTestBase):
def test_serialize_mellanox_plugin_enabled(self):
# Serialize cluster
self.cluster = self.create_env('ha_compact', mellanox=True)
self.cluster = self.create_env(consts.CLUSTER_MODES.ha_compact,
mellanox=True)
serialized_data = self.serializer.serialize(self.cluster,
self.cluster.nodes)
for data in serialized_data:
@ -112,9 +119,9 @@ class TestMellanox(OrchestratorSerializerTestBase):
def test_serialize_mellanox_iser_enabled_untagged(self):
# Serialize cluster
self.cluster = \
self.create_env('ha_compact', mellanox=True, iser=True,
iser_vlan=None)
self.cluster = self.create_env(consts.CLUSTER_MODES.ha_compact,
mellanox=True, iser=True,
iser_vlan=None)
serialized_data = self.serializer.serialize(self.cluster,
self.cluster.nodes)
@ -142,9 +149,9 @@ class TestMellanox(OrchestratorSerializerTestBase):
vlan_name = 'vlan{0}'.format(vlan)
# Serialize cluster
self.cluster = \
self.create_env('ha_compact', mellanox=True, iser=True,
iser_vlan=vlan)
self.cluster = self.create_env(consts.CLUSTER_MODES.ha_compact,
mellanox=True, iser=True,
iser_vlan=vlan)
serialized_data = self.serializer.serialize(self.cluster,
self.cluster.nodes)
@ -190,21 +197,21 @@ class TestMellanox(OrchestratorSerializerTestBase):
def test_serialize_kernel_params_using_mellanox_sriov_plugin(self):
self.check_mellanox_kernel_params(
mode='multinode',
mode=consts.CLUSTER_MODES.multinode,
mellanox=True,
iser=False,
)
def test_serialize_kernel_params_using_mellanox_iser(self):
self.check_mellanox_kernel_params(
mode='ha_compact',
mode=consts.CLUSTER_MODES.ha_compact,
mellanox=True,
iser=True,
)
def test_serialize_kernel_params_not_using_mellanox(self):
self.check_mellanox_kernel_params(
mode='ha_compact',
mode=consts.CLUSTER_MODES.ha_compact,
mellanox=False,
iser=False,
)

View File

@ -108,14 +108,6 @@ class OrchestratorSerializerTestBase(base.BaseIntegrationTest):
self.prepare_for_deployment(cluster.nodes)
return self.serializer.serialize(cluster, cluster.nodes)
def _make_data_copy(self, data_to_copy):
'''Sqalchemy doesn't track change on composite attribute
so we need to create fresh copy of it which will take all
needed modifications and will be assigned as new value
for that attribute
'''
return copy.deepcopy(data_to_copy)
def move_network(self, node_id, net_name, from_if, to_if):
resp = self.app.get(
reverse("NodeNICsHandler",
@ -183,7 +175,7 @@ class TestNovaOrchestratorSerializer(OrchestratorSerializerTestBase):
def setUp(self):
super(TestNovaOrchestratorSerializer, self).setUp()
self.cluster = self.create_env('ha_compact')
self.cluster = self.create_env(consts.CLUSTER_MODES.ha_compact)
objects.Cluster.set_primary_roles(self.cluster, self.cluster.nodes)
def create_env(self, mode, network_manager='FlatDHCPManager'):
@ -458,7 +450,7 @@ class TestNovaNetworkOrchestratorSerializer61(OrchestratorSerializerTestBase):
def create_env(self, manager, nodes_count=3, ctrl_count=1, nic_count=2):
cluster = self.env.create(
release_kwargs={'version': self.env_version},
cluster_kwargs={'mode': 'ha_compact'}
cluster_kwargs={'mode': consts.CLUSTER_MODES.ha_compact}
)
data = {'networking_parameters': {'net_manager': manager}}
@ -723,7 +715,7 @@ class TestNeutronOrchestratorSerializer61(OrchestratorSerializerTestBase):
cluster = self.env.create(
release_kwargs={'version': self.env_version},
cluster_kwargs={
'mode': 'ha_compact',
'mode': consts.CLUSTER_MODES.ha_compact,
'net_provider': 'neutron',
'net_segment_type': segment_type}
)
@ -1226,7 +1218,7 @@ class TestNovaOrchestratorHASerializer(OrchestratorSerializerTestBase):
def setUp(self):
super(TestNovaOrchestratorHASerializer, self).setUp()
self.cluster = self.create_env('ha_compact')
self.cluster = self.create_env(consts.CLUSTER_MODES.ha_compact)
objects.Cluster.set_primary_roles(self.cluster, self.cluster.nodes)
def create_env(self, mode):
@ -1532,7 +1524,7 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
def setUp(self):
super(TestNeutronOrchestratorSerializer, self).setUp()
self.cluster = self.create_env('ha_compact')
self.cluster = self.create_env(consts.CLUSTER_MODES.ha_compact)
objects.Cluster.set_primary_roles(self.cluster, self.cluster.nodes)
def create_env(self, mode, segment_type='vlan'):
@ -1563,7 +1555,7 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
def serialize_env_w_version(self, version):
self.new_env_release_version = version
cluster = self.create_env(mode='ha_compact')
cluster = self.create_env(mode=consts.CLUSTER_MODES.ha_compact)
serializer = get_serializer_for_cluster(cluster)
return serializer(AstuteGraph(cluster)).serialize(
cluster, cluster.nodes)
@ -1828,7 +1820,7 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
self.assertEqual(len(need_public_nodes_count), 4 if assign else 1)
def test_neutron_l3_gateway(self):
cluster = self.create_env('ha_compact', 'gre')
cluster = self.create_env(consts.CLUSTER_MODES.ha_compact, 'gre')
test_gateway = "192.168.111.255"
public_ng = self.db.query(NetworkGroup).filter(
NetworkGroup.name == 'public'
@ -1849,7 +1841,7 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
)
def test_gre_segmentation(self):
cluster = self.create_env('ha_compact', 'gre')
cluster = self.create_env(consts.CLUSTER_MODES.ha_compact, 'gre')
facts = self.serializer.serialize(cluster, cluster.nodes)
for fact in facts:
@ -1864,7 +1856,7 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
self.new_env_release_version = '2015.1.0-7.0'
self.prepare_for_deployment = \
objects.NodeCollection.prepare_for_deployment
cluster = self.create_env('ha_compact', 'tun')
cluster = self.create_env(consts.CLUSTER_MODES.ha_compact, 'tun')
facts = self.serializer.serialize(cluster, cluster.nodes)
for fact in facts:
@ -1977,7 +1969,7 @@ class TestVlanSplinters(OrchestratorSerializerTestBase):
def test_vlan_splinters_disabled(self):
cluster = self._create_cluster_for_vlan_splinters()
cluster_id = cluster.id
editable_attrs = self._make_data_copy(cluster.attributes.editable)
editable_attrs = copy.deepcopy(cluster.attributes.editable)
# Remove 'vlan_splinters' attribute and check results.
@ -1999,7 +1991,7 @@ class TestVlanSplinters(OrchestratorSerializerTestBase):
# Set 'vlan_splinters' to 'some_text' and check results.
editable_attrs = self._make_data_copy(cluster.attributes.editable)
editable_attrs = copy.deepcopy(cluster.attributes.editable)
editable_attrs['vlan_splinters'] = {'vswitch': {'value': 'some_text'}}
editable_attrs['vlan_splinters']['metadata'] = {'enabled': True}
cluster.attributes.editable = editable_attrs
@ -2043,7 +2035,7 @@ class TestVlanSplinters(OrchestratorSerializerTestBase):
def test_kernel_lt_vlan_splinters(self):
cluster = self._create_cluster_for_vlan_splinters()
cluster_id = cluster.id
editable_attrs = self._make_data_copy(cluster.attributes.editable)
editable_attrs = copy.deepcopy(cluster.attributes.editable)
# value of kernel-ml should end up with vlan_splinters = off
editable_attrs['vlan_splinters']['metadata']['enabled'] = True
@ -2067,7 +2059,7 @@ class TestVlanSplinters(OrchestratorSerializerTestBase):
def test_hard_vlan_splinters_in_gre(self):
cluster = self._create_cluster_for_vlan_splinters('gre')
editable_attrs = self._make_data_copy(cluster.attributes.editable)
editable_attrs = copy.deepcopy(cluster.attributes.editable)
editable_attrs['vlan_splinters']['metadata']['enabled'] = True
editable_attrs['vlan_splinters']['vswitch']['value'] = 'hard'
@ -2094,7 +2086,7 @@ class TestVlanSplinters(OrchestratorSerializerTestBase):
def test_hard_vlan_splinters_in_vlan(self):
cluster = self._create_cluster_for_vlan_splinters('vlan')
editable_attrs = self._make_data_copy(cluster.attributes.editable)
editable_attrs = copy.deepcopy(cluster.attributes.editable)
editable_attrs['vlan_splinters']['metadata']['enabled'] = True
editable_attrs['vlan_splinters']['vswitch']['value'] = 'hard'
@ -2125,7 +2117,7 @@ class TestVlanSplinters(OrchestratorSerializerTestBase):
def test_soft_vlan_splinters_in_vlan(self):
cluster = self._create_cluster_for_vlan_splinters('vlan')
editable_attrs = self._make_data_copy(cluster.attributes.editable)
editable_attrs = copy.deepcopy(cluster.attributes.editable)
editable_attrs['vlan_splinters']['metadata']['enabled'] = True
editable_attrs['vlan_splinters']['vswitch']['value'] = 'soft'
@ -2149,7 +2141,7 @@ class TestNeutronOrchestratorHASerializer(OrchestratorSerializerTestBase):
def setUp(self):
super(TestNeutronOrchestratorHASerializer, self).setUp()
self.cluster = self.create_env('ha_compact')
self.cluster = self.create_env(consts.CLUSTER_MODES.ha_compact)
objects.Cluster.set_primary_roles(self.cluster, self.cluster.nodes)
def create_env(self, mode):
@ -2287,9 +2279,12 @@ class TestCephOsdImageOrchestratorSerialize(OrchestratorSerializerTestBase):
def setUp(self):
super(TestCephOsdImageOrchestratorSerialize, self).setUp()
cluster = self.env.create(
release_kwargs={'version': self.env_version},
release_kwargs={
'version': self.env_version,
'modes': [consts.CLUSTER_MODES.ha_compact,
consts.CLUSTER_MODES.multinode]},
cluster_kwargs={
'mode': 'multinode'},
'mode': consts.CLUSTER_MODES.multinode},
nodes_kwargs=[
{'roles': ['controller', 'ceph-osd']}])
self.app.patch(
@ -2316,9 +2311,12 @@ class TestCephPgNumOrchestratorSerialize(OrchestratorSerializerTestBase):
def create_env(self, nodes, osd_pool_size='2'):
cluster = self.env.create(
release_kwargs={'version': self.env_version},
release_kwargs={
'version': self.env_version,
'modes': [consts.CLUSTER_MODES.ha_compact,
consts.CLUSTER_MODES.multinode]},
cluster_kwargs={
'mode': 'multinode'},
'mode': consts.CLUSTER_MODES.multinode},
nodes_kwargs=nodes)
self.app.patch(
reverse(
@ -2367,7 +2365,7 @@ class TestMongoNodesSerialization(OrchestratorSerializerTestBase):
cluster = self.env.create(
release_kwargs={'version': self.env_version},
cluster_kwargs={
'mode': 'ha_compact',
'mode': consts.CLUSTER_MODES.ha_compact,
'network_manager': 'FlatDHCPManager'
},
nodes_kwargs=[
@ -2403,7 +2401,7 @@ class TestNSXOrchestratorSerializer(OrchestratorSerializerTestBase):
def setUp(self):
super(TestNSXOrchestratorSerializer, self).setUp()
self.cluster = self.create_env('ha_compact')
self.cluster = self.create_env(consts.CLUSTER_MODES.ha_compact)
def create_env(self, mode, segment_type='gre'):
cluster = self.env.create(
@ -2420,7 +2418,7 @@ class TestNSXOrchestratorSerializer(OrchestratorSerializerTestBase):
)
cluster_db = self.db.query(Cluster).get(cluster['id'])
editable_attrs = self._make_data_copy(cluster_db.attributes.editable)
editable_attrs = copy.deepcopy(cluster_db.attributes.editable)
nsx_attrs = editable_attrs.setdefault('nsx_plugin', {})
nsx_attrs.setdefault('metadata', {})['enabled'] = True
cluster_db.attributes.editable = editable_attrs
@ -2457,8 +2455,17 @@ class BaseDeploymentSerializer(base.BaseIntegrationTest):
prepare_for_deployment = objects.NodeCollection.prepare_for_6_1_deployment
def create_env(self, mode):
if mode == consts.CLUSTER_MODES.multinode:
available_modes = [consts.CLUSTER_MODES.ha_compact,
consts.CLUSTER_MODES.multinode]
else:
available_modes = [consts.CLUSTER_MODES.ha_compact, ]
return self.env.create(
release_kwargs={'version': self.env_version},
release_kwargs={
'version': self.env_version,
'modes': available_modes,
},
cluster_kwargs={
'mode': mode,
'net_provider': 'neutron',
@ -2601,7 +2608,7 @@ class TestDeploymentMultinodeSerializer61(BaseDeploymentSerializer):
def setUp(self):
super(TestDeploymentMultinodeSerializer61, self).setUp()
self.cluster = self.create_env('multinode')
self.cluster = self.create_env(consts.CLUSTER_MODES.multinode)
self.prepare_for_deployment(self.env.nodes)
self.serializer = DeploymentMultinodeSerializer61(self.cluster)
self.vm_data = self.env.read_fixtures(['vmware_attributes'])
@ -2623,7 +2630,7 @@ class TestDeploymentAttributesSerialization61(BaseDeploymentSerializer):
def setUp(self):
super(TestDeploymentAttributesSerialization61, self).setUp()
self.cluster = self.create_env('ha_compact')
self.cluster = self.create_env(consts.CLUSTER_MODES.ha_compact)
self.prepare_for_deployment(self.env.nodes, 'gre')
self.serializer = DeploymentHASerializer61(self.cluster)
@ -2670,7 +2677,7 @@ class TestDeploymentHASerializer61(BaseDeploymentSerializer):
def setUp(self):
super(TestDeploymentHASerializer61, self).setUp()
self.cluster = self.create_env('ha_compact')
self.cluster = self.create_env(consts.CLUSTER_MODES.ha_compact)
self.prepare_for_deployment(self.env.nodes, 'gre')
self.serializer = DeploymentHASerializer61(self.cluster)
self.vm_data = self.env.read_fixtures(['vmware_attributes'])
@ -2842,7 +2849,7 @@ class TestDeploymentHASerializer50(BaseDeploymentSerializer):
def setUp(self):
super(TestDeploymentHASerializer50, self).setUp()
self.cluster = self.create_env('ha_compact')
self.cluster = self.create_env(consts.CLUSTER_MODES.ha_compact)
objects.NodeCollection.prepare_for_lt_6_1_deployment(self.env.nodes)
self.serializer = DeploymentHASerializer50(self.cluster)
@ -2856,7 +2863,7 @@ class TestDeploymentMultinodeSerializer50(BaseDeploymentSerializer):
def setUp(self):
super(TestDeploymentMultinodeSerializer50, self).setUp()
self.cluster = self.create_env('multinode')
self.cluster = self.create_env(consts.CLUSTER_MODES.multinode)
objects.NodeCollection.prepare_for_lt_6_1_deployment(self.env.nodes)
self.serializer = DeploymentMultinodeSerializer50(self.cluster)

View File

@ -17,6 +17,7 @@
from mock import patch
from oslo_serialization import jsonutils
from nailgun import consts
from nailgun.extensions.volume_manager.extension import VolumeManagerExtension
from nailgun.extensions.volume_manager import manager
from nailgun.test import base
@ -67,7 +68,10 @@ class TestVolumeManagerGlancePartition(base.BaseIntegrationTest):
"""
cluster = self.env.create(
cluster_kwargs={
'mode': 'multinode'},
'mode': consts.CLUSTER_MODES.multinode},
release_kwargs={
'modes': [consts.CLUSTER_MODES.ha_compact,
consts.CLUSTER_MODES.multinode]},
nodes_kwargs=[
{'roles': ['controller', 'ceph-osd']}])
self.app.patch(
@ -85,7 +89,10 @@ class TestVolumeManagerGlancePartition(base.BaseIntegrationTest):
def test_glance_partition_without_ceph_osd(self):
self.env.create(
cluster_kwargs={
'mode': 'multinode'},
'mode': consts.CLUSTER_MODES.multinode},
release_kwargs={
'modes': [consts.CLUSTER_MODES.ha_compact,
consts.CLUSTER_MODES.multinode]},
nodes_kwargs=[
{'roles': ['controller']}])
volumes = self.env.nodes[0].volume_manager.gen_volumes_info()

View File

@ -19,6 +19,7 @@ import mock
import six
import yaml
from nailgun import consts
from nailgun.db import db
from nailgun.objects import Plugin
from nailgun.plugins import adapters
@ -40,10 +41,12 @@ class TestPluginBase(base.BaseTestCase):
package_version=self.package_version)
self.plugin = Plugin.create(self.plugin_metadata)
self.env.create(
cluster_kwargs={'mode': 'multinode'},
cluster_kwargs={'mode': consts.CLUSTER_MODES.multinode},
release_kwargs={
'version': '2014.2-6.0',
'operating_system': 'Ubuntu'})
'operating_system': 'Ubuntu',
'modes': [consts.CLUSTER_MODES.multinode,
consts.CLUSTER_MODES.ha_compact]})
self.cluster = self.env.clusters[0]
self.plugin_adapter = adapters.wrap_plugin(self.plugin)
self.env_config = self.env.get_default_plugin_env_config()
@ -269,41 +272,41 @@ class TestClusterCompatiblityValidation(base.BaseTestCase):
def test_validation_ubuntu_ha(self):
self.assertTrue(self.validate_with_cluster(
os='Ubuntu',
mode='ha_compact',
mode=consts.CLUSTER_MODES.ha_compact,
version='2014.2-6.0'))
def test_plugin_provided_ha_compact(self):
self.assertTrue(self.validate_with_cluster(
os='Ubuntu',
mode='ha_compact',
mode=consts.CLUSTER_MODES.ha_compact,
version='2014.2-6.0'))
def test_not_existent_os(self):
self.assertFalse(self.validate_with_cluster(
os='Centos',
mode='multinode',
mode=consts.CLUSTER_MODES.multinode,
version='2014.2-6.0'))
def test_version_fuel_mismatch(self):
self.assertFalse(self.validate_with_cluster(
os='Ubuntu',
mode='ha_compact',
mode=consts.CLUSTER_MODES.ha_compact,
version='2014.2-6.1'))
def test_version_os_mismatch(self):
self.assertFalse(self.validate_with_cluster(
os='Ubuntu',
mode='ha_compact',
mode=consts.CLUSTER_MODES.ha_compact,
version='2014.3-6.1'))
def test_validation_centos_multinode(self):
self.assertFalse(self.validate_with_cluster(
os='Ubuntu',
mode='multinode',
mode=consts.CLUSTER_MODES.multinode,
version='2014.2-6.0'))
def test_validation_centos_different_minor_version(self):
self.assertTrue(self.validate_with_cluster(
os='Ubuntu',
mode='ha_compact',
mode=consts.CLUSTER_MODES.ha_compact,
version='2014.2.99-6.0.99'))

View File

@ -17,7 +17,7 @@ from contextlib import contextmanager
import six
from nailgun.consts import NODE_STATUSES
from nailgun import consts
from nailgun import objects
from nailgun.test import base
@ -39,15 +39,15 @@ class BasePrimaryRolesAssignmentTestCase(base.BaseTestCase):
def test_primary_controllers_assigned_for_pendings_roles(self):
self.env.create(
cluster_kwargs={'mode': 'ha_compact'},
cluster_kwargs={'mode': consts.CLUSTER_MODES.ha_compact},
release_kwargs={'version': '2014.2-6.0',
'operating_system': 'Ubuntu'},
nodes_kwargs=[
{'pending_roles': [self.role_name],
'status': NODE_STATUSES.discover,
'status': consts.NODE_STATUSES.discover,
'pending_addition': True},
{'pending_roles': [self.role_name],
'status': NODE_STATUSES.discover,
'status': consts.NODE_STATUSES.discover,
'pending_addition': True}])
cluster = self.env.clusters[0]
objects.Cluster.set_primary_roles(cluster, cluster.nodes)
@ -60,22 +60,22 @@ class BasePrimaryRolesAssignmentTestCase(base.BaseTestCase):
def test_primary_controller_assigned_for_ready_node(self):
self.env.create(
cluster_kwargs={'mode': 'ha_compact'},
cluster_kwargs={'mode': consts.CLUSTER_MODES.ha_compact},
release_kwargs={'version': '2014.2-6.0',
'operating_system': 'Ubuntu'},
nodes_kwargs=[
{'pending_roles': [self.role_name],
'status': NODE_STATUSES.discover,
'status': consts.NODE_STATUSES.discover,
'pending_addition': True},
{'roles': [self.role_name],
'status': NODE_STATUSES.ready,
'status': consts.NODE_STATUSES.ready,
'pending_addition': True}])
cluster = self.env.clusters[0]
objects.Cluster.set_primary_roles(cluster, cluster.nodes)
# primary assigned to node with ready status
nodes = sorted(cluster.nodes, key=lambda node: node.id)
ready_node = next(n for n in cluster.nodes
if n.status == NODE_STATUSES.ready)
if n.status == consts.NODE_STATUSES.ready)
self.assertEqual(nodes[1], ready_node)
self.assertEqual(
objects.Node.all_roles(nodes[1]), [self.primary_role_name])
@ -85,15 +85,17 @@ class BasePrimaryRolesAssignmentTestCase(base.BaseTestCase):
def test_primary_assignment_multinode(self):
"""Primary should not be assigned in multinode env."""
self.env.create(
cluster_kwargs={'mode': 'multinode'},
cluster_kwargs={'mode': consts.CLUSTER_MODES.multinode},
release_kwargs={'version': '2014.2-6.0',
'operating_system': 'Ubuntu'},
'operating_system': 'Ubuntu',
'modes': [consts.CLUSTER_MODES.ha_compact,
consts.CLUSTER_MODES.multinode]},
nodes_kwargs=[
{'pending_roles': [self.role_name],
'status': NODE_STATUSES.discover,
'status': consts.NODE_STATUSES.discover,
'pending_addition': True},
{'roles': [self.role_name],
'status': NODE_STATUSES.ready,
'status': consts.NODE_STATUSES.ready,
'pending_addition': True}])
cluster = self.env.clusters[0]
objects.Cluster.set_primary_roles(cluster, cluster.nodes)
@ -104,12 +106,12 @@ class BasePrimaryRolesAssignmentTestCase(base.BaseTestCase):
def test_primary_not_assigned_to_pending_deletion(self):
self.env.create(
cluster_kwargs={'mode': 'ha_compact'},
cluster_kwargs={'mode': consts.CLUSTER_MODES.ha_compact},
release_kwargs={'version': '2014.2-6.0',
'operating_system': 'Ubuntu'},
nodes_kwargs=[
{'roles': [self.role_name],
'status': NODE_STATUSES.ready,
'status': consts.NODE_STATUSES.ready,
'pending_deletion': True}])
cluster = self.env.clusters[0]
objects.Cluster.set_primary_roles(cluster, cluster.nodes)
@ -119,15 +121,15 @@ class BasePrimaryRolesAssignmentTestCase(base.BaseTestCase):
@contextmanager
def assert_node_reassigned(self):
self.env.create(
cluster_kwargs={'mode': 'ha_compact'},
cluster_kwargs={'mode': consts.CLUSTER_MODES.ha_compact},
release_kwargs={'version': '2014.2-6.0',
'operating_system': 'Ubuntu'},
nodes_kwargs=[
{'pending_roles': [self.role_name],
'status': NODE_STATUSES.discover,
'status': consts.NODE_STATUSES.discover,
'pending_addition': True},
{'roles': [self.role_name],
'status': NODE_STATUSES.ready,
'status': consts.NODE_STATUSES.ready,
'pending_addition': True}])
cluster = self.env.clusters[0]
objects.Cluster.set_primary_roles(cluster, cluster.nodes)

View File

@ -15,17 +15,24 @@
# under the License.
from nailgun import consts
from nailgun.task import helpers
from nailgun.test import base
class TestClusterRedeploymentScenario(base.BaseTestCase):
def test_cluster_deployed_with_computes(self):
self.env.create(
cluster_kwargs={'mode': 'multinode'},
def create_env(self, nodes_kwargs):
return self.env.create(
cluster_kwargs={'mode': consts.CLUSTER_MODES.multinode},
release_kwargs={'version': '2014.2-6.0',
'operating_system': 'Ubuntu'},
'operating_system': 'Ubuntu',
'modes': [consts.CLUSTER_MODES.ha_compact,
consts.CLUSTER_MODES.multinode]},
nodes_kwargs=nodes_kwargs)
def test_cluster_deployed_with_computes(self):
self.create_env(
nodes_kwargs=[
{'pending_roles': ['controller'],
'status': 'discover',
@ -37,10 +44,7 @@ class TestClusterRedeploymentScenario(base.BaseTestCase):
self.assertEqual(cluster.nodes, nodes)
def test_cluster_deployed_with_cinder(self):
self.env.create(
cluster_kwargs={'mode': 'multinode'},
release_kwargs={'version': '2014.2-6.0',
'operating_system': 'Ubuntu'},
self.create_env(
nodes_kwargs=[
{'pending_roles': ['controller'],
'status': 'discover',
@ -52,10 +56,7 @@ class TestClusterRedeploymentScenario(base.BaseTestCase):
self.assertEqual(cluster.nodes, nodes)
def test_ceph_osd_is_not_affected(self):
self.env.create(
cluster_kwargs={'mode': 'multinode'},
release_kwargs={'version': '2014.2-6.0',
'operating_system': 'Ubuntu'},
self.create_env(
nodes_kwargs=[
{'pending_roles': ['controller'],
'status': 'discover',
@ -69,10 +70,7 @@ class TestClusterRedeploymentScenario(base.BaseTestCase):
self.assertEqual(nodes[0].pending_roles, ['controller'])
def test_cinder_is_not_affected_when_add_compute(self):
self.env.create(
cluster_kwargs={'mode': 'multinode'},
release_kwargs={'version': '2014.2-6.0',
'operating_system': 'Ubuntu'},
self.create_env(
nodes_kwargs=[
{'roles': ['controller'],
'status': 'ready'},
@ -88,10 +86,7 @@ class TestClusterRedeploymentScenario(base.BaseTestCase):
self.assertEqual(nodes[0].pending_roles, ['compute'])
def test_controllers_redeployed_if_ceph_added(self):
self.env.create(
cluster_kwargs={'mode': 'multinode'},
release_kwargs={'version': '2014.2-6.0',
'operating_system': 'Ubuntu'},
self.create_env(
nodes_kwargs=[
{'roles': ['controller'],
'status': 'ready'},
@ -104,10 +99,7 @@ class TestClusterRedeploymentScenario(base.BaseTestCase):
self.assertEqual(sorted(cluster.nodes), sorted(nodes))
def test_controllers_not_redeployed_if_ceph_previously_in_cluster(self):
self.env.create(
cluster_kwargs={'mode': 'multinode'},
release_kwargs={'version': '2014.2-6.0',
'operating_system': 'Ubuntu'},
self.create_env(
nodes_kwargs=[
{'roles': ['controller'],
'status': 'ready'},