ng-4: Adapt cluster object
This commit removes the fields node_addresses, master_addresses, node_count and master_count from the cluster object since this info will be stored in the nodegroups. At the same time, provides the way to adapt existing clusters to the new schema. story: 2005266 Change-Id: Iaf2cef3cc50b956c9b6d7bae13dbb716ae54eaf7
This commit is contained in:
parent
fb82777983
commit
3f80cbab06
@ -183,6 +183,12 @@ class Bay(base.APIBase):
|
||||
else:
|
||||
setattr(self, 'bay_faults', kwargs.get('faults', wtypes.Unset))
|
||||
|
||||
nodegroup_fields = ['node_count', 'master_count',
|
||||
'node_addresses', 'master_addresses']
|
||||
for field in nodegroup_fields:
|
||||
self.fields.append(field)
|
||||
setattr(self, field, kwargs.get(field, wtypes.Unset))
|
||||
|
||||
@staticmethod
|
||||
def _convert_with_links(bay, url, expand=True):
|
||||
if not expand:
|
||||
|
@ -182,6 +182,11 @@ class Cluster(base.APIBase):
|
||||
continue
|
||||
self.fields.append(field)
|
||||
setattr(self, field, kwargs.get(field, wtypes.Unset))
|
||||
nodegroup_fields = ['node_count', 'master_count',
|
||||
'node_addresses', 'master_addresses']
|
||||
for field in nodegroup_fields:
|
||||
self.fields.append(field)
|
||||
setattr(self, field, kwargs.get(field, wtypes.Unset))
|
||||
|
||||
@staticmethod
|
||||
def _convert_with_links(cluster, url, expand=True):
|
||||
|
@ -51,8 +51,6 @@ class Handler(object):
|
||||
|
||||
cluster.status = fields.ClusterStatus.CREATE_IN_PROGRESS
|
||||
cluster.status_reason = None
|
||||
cluster.node_count = node_count
|
||||
cluster.master_count = master_count
|
||||
cluster.create()
|
||||
|
||||
# Master nodegroup
|
||||
@ -137,8 +135,6 @@ class Handler(object):
|
||||
context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_PENDING)
|
||||
worker_ng.node_count = node_count
|
||||
worker_ng.save()
|
||||
# For now update also the cluster.node_count
|
||||
cluster.node_count = node_count
|
||||
cluster_driver.update_cluster(context, cluster, manager, rollback)
|
||||
cluster.status = fields.ClusterStatus.UPDATE_IN_PROGRESS
|
||||
cluster.status_reason = None
|
||||
|
@ -0,0 +1,132 @@
|
||||
# Copyright (c) 2018 European Organization for Nuclear Research.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""change cluster to support nodegroups
|
||||
|
||||
Revision ID: 461d798132c7
|
||||
Revises: ac92cbae311c
|
||||
Create Date: 2019-02-06 14:32:40.316528
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '461d798132c7'
|
||||
down_revision = 'ac92cbae311c'
|
||||
|
||||
from alembic import op
|
||||
|
||||
import sqlalchemy as sa
|
||||
|
||||
from oslo_db.sqlalchemy.types import String
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from magnum.db.sqlalchemy import models
|
||||
|
||||
|
||||
def _handle_json_columns(value, default=None):
|
||||
if value is not None:
|
||||
return jsonutils.loads(value)
|
||||
return default
|
||||
|
||||
|
||||
def upgrade():
|
||||
|
||||
nodegroup = sa.sql.table(
|
||||
'nodegroup',
|
||||
sa.Column('created_at', sa.DateTime(), default=sa.func.now()),
|
||||
sa.Column('uuid', String(length=36), nullable=False),
|
||||
sa.Column('name', String(length=255), nullable=False),
|
||||
sa.Column('cluster_id', String(length=255), nullable=False),
|
||||
sa.Column('project_id', String(length=255), nullable=False),
|
||||
sa.Column('docker_volume_size', sa.Integer(), nullable=True),
|
||||
sa.Column('labels', models.JSONEncodedDict, nullable=True),
|
||||
sa.Column('flavor_id', String(length=255), nullable=True),
|
||||
sa.Column('image_id', String(length=255), nullable=True),
|
||||
sa.Column('node_addresses', models.JSONEncodedList(), nullable=True),
|
||||
sa.Column('node_count', sa.Integer, nullable=True),
|
||||
sa.Column('max_node_count', sa.Integer, nullable=True),
|
||||
sa.Column('min_node_count', sa.Integer, nullable=True),
|
||||
sa.Column('role', String(length=255), nullable=True),
|
||||
sa.Column('is_default', sa.Boolean())
|
||||
)
|
||||
|
||||
connection = op.get_bind()
|
||||
# Fetching all required info from existing cluster
|
||||
res = connection.execute(
|
||||
"SELECT "
|
||||
"cluster.uuid, "
|
||||
"cluster.name, "
|
||||
"cluster.project_id, "
|
||||
"cluster.docker_volume_size, "
|
||||
"cluster.labels, "
|
||||
"cluster.master_flavor_id, "
|
||||
"cluster.flavor_id, "
|
||||
"cluster.node_count, "
|
||||
"cluster.master_count, "
|
||||
"cluster.node_addresses, "
|
||||
"cluster.master_addresses, "
|
||||
"cluster_template.master_flavor_id, "
|
||||
"cluster_template.flavor_id, "
|
||||
"cluster_template.image_id "
|
||||
"FROM cluster INNER JOIN cluster_template "
|
||||
"ON cluster.cluster_template_id=cluster_template.uuid"
|
||||
)
|
||||
|
||||
results = res.fetchall()
|
||||
|
||||
# Create a list containing populated master nodegroups
|
||||
master_ngs = [{
|
||||
'uuid': uuidutils.generate_uuid(),
|
||||
'name': '%s-master' % rs[1],
|
||||
'cluster_id': rs[0],
|
||||
'project_id': rs[2],
|
||||
'docker_volume_size': rs[3],
|
||||
'labels': _handle_json_columns(rs[4]),
|
||||
'flavor_id': rs[5] or rs[11],
|
||||
'image_id': rs[13],
|
||||
'node_addresses': _handle_json_columns(rs[10]),
|
||||
'node_count': rs[8],
|
||||
'role': 'master',
|
||||
'min_node_count': 1,
|
||||
'is_default': True
|
||||
} for rs in results]
|
||||
|
||||
# Create a list containing populated worker nodegroups
|
||||
worker_ngs = [{
|
||||
'uuid': uuidutils.generate_uuid(),
|
||||
'name': '%s-worker' % rs[1],
|
||||
'cluster_id': rs[0],
|
||||
'project_id': rs[2],
|
||||
'docker_volume_size': rs[3],
|
||||
'labels': _handle_json_columns(rs[4]),
|
||||
'flavor_id': rs[6] or rs[12],
|
||||
'image_id': rs[13],
|
||||
'node_addresses': _handle_json_columns(rs[9]),
|
||||
'node_count': rs[7],
|
||||
'role': "worker",
|
||||
'min_node_count': 1,
|
||||
'is_default': True
|
||||
} for rs in results]
|
||||
|
||||
# Insert the populated nodegroups
|
||||
op.bulk_insert(nodegroup, master_ngs)
|
||||
op.bulk_insert(nodegroup, worker_ngs)
|
||||
|
||||
# Drop the columns from cluster table
|
||||
op.drop_column('cluster', 'node_count')
|
||||
op.drop_column('cluster', 'node_addresses')
|
||||
op.drop_column('cluster', 'master_count')
|
||||
op.drop_column('cluster', 'master_addresses')
|
@ -151,9 +151,9 @@ class Connection(api.Connection):
|
||||
if filters is None:
|
||||
filters = {}
|
||||
|
||||
possible_filters = ["cluster_template_id", "name", "node_count",
|
||||
"master_count", "stack_id", "api_address",
|
||||
"node_addresses", "project_id", "user_id"]
|
||||
possible_filters = ["cluster_template_id", "name", "stack_id",
|
||||
"api_address", "node_addresses", "project_id",
|
||||
"user_id"]
|
||||
|
||||
filter_names = set(filters).intersection(possible_filters)
|
||||
filter_dict = {filter_name: filters[filter_name]
|
||||
@ -164,6 +164,26 @@ class Connection(api.Connection):
|
||||
if 'status' in filters:
|
||||
query = query.filter(models.Cluster.status.in_(filters['status']))
|
||||
|
||||
# Helper to filter based on node_count field from nodegroups
|
||||
def filter_node_count(query, node_count, is_master=False):
|
||||
nfunc = func.sum(models.NodeGroup.node_count)
|
||||
nquery = model_query(models.NodeGroup)
|
||||
if is_master:
|
||||
nquery = nquery.filter(models.NodeGroup.role == 'master')
|
||||
else:
|
||||
nquery = nquery.filter(models.NodeGroup.role != 'master')
|
||||
nquery = nquery.group_by(models.NodeGroup.cluster_id)
|
||||
nquery = nquery.having(nfunc == node_count)
|
||||
uuids = [ng.cluster_id for ng in nquery.all()]
|
||||
return query.filter(models.Cluster.uuid.in_(uuids))
|
||||
|
||||
if 'node_count' in filters:
|
||||
query = filter_node_count(
|
||||
query, filters['node_count'], is_master=False)
|
||||
if 'master_count' in filters:
|
||||
query = filter_node_count(
|
||||
query, filters['master_count'], is_master=True)
|
||||
|
||||
return query
|
||||
|
||||
def get_cluster_list(self, context, filters=None, limit=None, marker=None,
|
||||
@ -219,9 +239,8 @@ class Connection(api.Connection):
|
||||
|
||||
def get_cluster_stats(self, context, project_id=None):
|
||||
query = model_query(models.Cluster)
|
||||
node_count_col = models.Cluster.node_count
|
||||
master_count_col = models.Cluster.master_count
|
||||
ncfunc = func.sum(node_count_col + master_count_col)
|
||||
node_count_col = models.NodeGroup.node_count
|
||||
ncfunc = func.sum(node_count_col)
|
||||
|
||||
if project_id:
|
||||
query = query.filter_by(project_id=project_id)
|
||||
|
@ -122,16 +122,12 @@ class Cluster(Base):
|
||||
flavor_id = Column(String(255))
|
||||
stack_id = Column(String(255))
|
||||
api_address = Column(String(255))
|
||||
node_addresses = Column(JSONEncodedList)
|
||||
node_count = Column(Integer())
|
||||
master_count = Column(Integer())
|
||||
status = Column(String(20))
|
||||
status_reason = Column(Text)
|
||||
health_status = Column(String(20))
|
||||
health_status_reason = Column(JSONEncodedDict)
|
||||
create_timeout = Column(Integer())
|
||||
discovery_url = Column(String(255, mysql_ndb_type=TINYTEXT))
|
||||
master_addresses = Column(JSONEncodedList)
|
||||
# TODO(wanghua): encrypt trust_id in db
|
||||
trust_id = Column(String(255))
|
||||
trustee_username = Column(String(255, mysql_ndb_type=TINYTEXT))
|
||||
|
@ -49,8 +49,10 @@ class Cluster(base.MagnumPersistentObject, base.MagnumObject,
|
||||
# Version 1.17: Added 'flavor_id' field
|
||||
# Version 1.18: Added 'health_status' and 'health_status_reason' field
|
||||
# Version 1.19: Added nodegroups, default_ng_worker, default_ng_master
|
||||
# Version 1.20: Fields node_count, master_count, node_addresses,
|
||||
# master_addresses are now properties.
|
||||
|
||||
VERSION = '1.19'
|
||||
VERSION = '1.20'
|
||||
|
||||
dbapi = dbapi.get_instance()
|
||||
|
||||
@ -73,11 +75,7 @@ class Cluster(base.MagnumPersistentObject, base.MagnumObject,
|
||||
'health_status_reason': fields.DictOfStringsField(nullable=True),
|
||||
'create_timeout': fields.IntegerField(nullable=True),
|
||||
'api_address': fields.StringField(nullable=True),
|
||||
'node_addresses': fields.ListOfStringsField(nullable=True),
|
||||
'node_count': fields.IntegerField(nullable=True),
|
||||
'master_count': fields.IntegerField(nullable=True),
|
||||
'discovery_url': fields.StringField(nullable=True),
|
||||
'master_addresses': fields.ListOfStringsField(nullable=True),
|
||||
'ca_cert_ref': fields.StringField(nullable=True),
|
||||
'magnum_cert_ref': fields.StringField(nullable=True),
|
||||
'cluster_template': fields.ObjectField('ClusterTemplate'),
|
||||
@ -128,6 +126,30 @@ class Cluster(base.MagnumPersistentObject, base.MagnumObject,
|
||||
filters = {'role': 'master', 'is_default': True}
|
||||
return NodeGroup.list(self._context, self.uuid, filters=filters)[0]
|
||||
|
||||
@property
|
||||
def node_count(self):
|
||||
return sum(n.node_count for n in self.nodegroups if n.role != 'master')
|
||||
|
||||
@property
|
||||
def master_count(self):
|
||||
return sum(n.node_count for n in self.nodegroups if n.role == 'master')
|
||||
|
||||
@property
|
||||
def node_addresses(self):
|
||||
node_addresses = []
|
||||
for ng in self.nodegroups:
|
||||
if ng.role != 'master':
|
||||
node_addresses += ng.node_addresses
|
||||
return node_addresses
|
||||
|
||||
@property
|
||||
def master_addresses(self):
|
||||
master_addresses = []
|
||||
for ng in self.nodegroups:
|
||||
if ng.role == 'master':
|
||||
master_addresses += ng.node_addresses
|
||||
return master_addresses
|
||||
|
||||
@staticmethod
|
||||
def _from_db_object_list(db_objects, cls, context):
|
||||
"""Converts a list of database entities to a list of formal objects."""
|
||||
@ -299,3 +321,15 @@ class Cluster(base.MagnumPersistentObject, base.MagnumObject,
|
||||
for field in self.fields:
|
||||
if self.obj_attr_is_set(field) and self[field] != current[field]:
|
||||
self[field] = current[field]
|
||||
|
||||
def as_dict(self):
|
||||
dict_ = super(Cluster, self).as_dict()
|
||||
# Update the dict with the attributes coming form
|
||||
# the cluster's nodegroups.
|
||||
dict_.update({
|
||||
'node_count': self.node_count,
|
||||
'master_count': self.master_count,
|
||||
'node_addresses': self.node_addresses,
|
||||
'master_addresses': self.master_addresses
|
||||
})
|
||||
return dict_
|
||||
|
@ -26,6 +26,7 @@ from magnum import objects
|
||||
from magnum.tests import base
|
||||
from magnum.tests.unit.api import base as api_base
|
||||
from magnum.tests.unit.api import utils as apiutils
|
||||
from magnum.tests.unit.db import utils as db_utils
|
||||
from magnum.tests.unit.objects import utils as obj_utils
|
||||
|
||||
|
||||
@ -245,8 +246,10 @@ class TestPatch(api_base.FunctionalTest):
|
||||
|
||||
def _simulate_rpc_bay_update(self, bay, node_count, rollback=False):
|
||||
bay.status = 'UPDATE_IN_PROGRESS'
|
||||
bay.node_count = node_count
|
||||
bay.save()
|
||||
default_ng_worker = bay.default_ng_worker
|
||||
default_ng_worker.node_count = node_count
|
||||
default_ng_worker.save()
|
||||
return bay
|
||||
|
||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||
@ -461,9 +464,10 @@ class TestPost(api_base.FunctionalTest):
|
||||
|
||||
def _simulate_rpc_bay_create(self, bay, master_count, node_count,
|
||||
bay_create_timeout):
|
||||
bay.node_count = node_count
|
||||
bay.master_count = master_count
|
||||
bay.create()
|
||||
db_utils.create_nodegroups_for_cluster(
|
||||
cluster_id=bay.uuid, node_count=node_count,
|
||||
master_count=master_count)
|
||||
return bay
|
||||
|
||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||
@ -493,6 +497,9 @@ class TestPost(api_base.FunctionalTest):
|
||||
self.assertEqual(self.context.project_id, bay.project_id)
|
||||
self.assertEqual(self.context.user_id, bay.user_id)
|
||||
bay.create()
|
||||
db_utils.create_nodegroups_for_cluster(
|
||||
cluster_id=bay.uuid, node_count=node_count,
|
||||
master_count=master_count)
|
||||
return bay
|
||||
self.mock_bay_create.side_effect = _simulate_rpc_bay_create
|
||||
|
||||
@ -703,6 +710,9 @@ class TestPost(api_base.FunctionalTest):
|
||||
bay_create_timeout):
|
||||
self.assertEqual(60, bay_create_timeout)
|
||||
bay.create()
|
||||
db_utils.create_nodegroups_for_cluster(
|
||||
cluster_id=bay.uuid, node_count=node_count,
|
||||
master_count=master_count)
|
||||
return bay
|
||||
self.mock_bay_create.side_effect = _simulate_rpc_bay_create
|
||||
bdict = apiutils.bay_post_data()
|
||||
@ -833,6 +843,9 @@ class TestDelete(api_base.FunctionalTest):
|
||||
def _simulate_rpc_bay_delete(self, bay_uuid):
|
||||
bay = objects.Cluster.get_by_uuid(self.context, bay_uuid)
|
||||
bay.destroy()
|
||||
ngs = objects.NodeGroup.list(self.context, bay_uuid)
|
||||
for ng in ngs:
|
||||
ng.destroy()
|
||||
|
||||
def test_delete_bay(self):
|
||||
self.delete('/bays/%s' % self.bay.uuid)
|
||||
|
@ -282,7 +282,9 @@ class TestPatch(api_base.FunctionalTest):
|
||||
|
||||
def _sim_rpc_cluster_update(self, cluster, node_count, rollback=False):
|
||||
cluster.status = 'UPDATE_IN_PROGRESS'
|
||||
cluster.node_count = node_count
|
||||
default_ng_worker = cluster.default_ng_worker
|
||||
default_ng_worker.node_count = node_count
|
||||
default_ng_worker.save()
|
||||
cluster.save()
|
||||
return cluster
|
||||
|
||||
@ -518,8 +520,6 @@ class TestPost(api_base.FunctionalTest):
|
||||
|
||||
def _simulate_cluster_create(self, cluster, master_count, node_count,
|
||||
create_timeout):
|
||||
cluster.node_count = node_count
|
||||
cluster.master_count = master_count
|
||||
cluster.create()
|
||||
return cluster
|
||||
|
||||
@ -1021,6 +1021,9 @@ class TestClusterPolicyEnforcement(api_base.FunctionalTest):
|
||||
def _simulate_cluster_delete(self, cluster_uuid):
|
||||
cluster = objects.Cluster.get_by_uuid(self.context, cluster_uuid)
|
||||
cluster.destroy()
|
||||
ngs = objects.NodeGroup.list(self.context, cluster_uuid)
|
||||
for ng in ngs:
|
||||
ng.destroy()
|
||||
|
||||
def test_policy_disallow_delete(self):
|
||||
p = mock.patch.object(rpcapi.API, 'cluster_delete')
|
||||
|
@ -33,8 +33,6 @@ class TestClusterActions(api_base.FunctionalTest):
|
||||
|
||||
def _sim_rpc_cluster_resize(self, cluster, node_count, nodes_to_remove,
|
||||
nodegroup, rollback=False):
|
||||
cluster.node_count = node_count
|
||||
cluster.save()
|
||||
nodegroup.node_count = node_count
|
||||
nodegroup.save()
|
||||
return cluster
|
||||
|
@ -43,6 +43,7 @@ def cluster_template_post_data(**kw):
|
||||
|
||||
|
||||
def bay_post_data(**kw):
|
||||
kw.update({'for_api_use': True})
|
||||
bay = utils.get_test_cluster(**kw)
|
||||
bay['baymodel_id'] = kw.get('baymodel_id', bay['cluster_template_id'])
|
||||
bay['bay_create_timeout'] = kw.get('bay_create_timeout', 15)
|
||||
@ -53,6 +54,7 @@ def bay_post_data(**kw):
|
||||
|
||||
|
||||
def cluster_post_data(**kw):
|
||||
kw.update({'for_api_use': True})
|
||||
cluster = utils.get_test_cluster(**kw)
|
||||
cluster['create_timeout'] = kw.get('create_timeout', 15)
|
||||
internal = cluster_controller.ClusterPatchType.internal_attrs()
|
||||
|
@ -75,13 +75,9 @@ class TestClusterConductorWithK8s(base.TestCase):
|
||||
'name': 'cluster1',
|
||||
'stack_id': 'xx-xx-xx-xx',
|
||||
'api_address': '172.17.2.3',
|
||||
'node_addresses': ['172.17.2.4'],
|
||||
'node_count': 1,
|
||||
'master_count': 1,
|
||||
'discovery_url': 'https://discovery.etcd.io/test',
|
||||
'docker_volume_size': 20,
|
||||
'flavor_id': 'flavor_id',
|
||||
'master_addresses': ['172.17.2.18'],
|
||||
'ca_cert_ref': 'http://barbican/v1/containers/xx-xx-xx-xx',
|
||||
'magnum_cert_ref': 'http://barbican/v1/containers/xx-xx-xx-xx',
|
||||
'trustee_username': 'fake_trustee',
|
||||
|
@ -61,9 +61,6 @@ class TestClusterConductorWithMesos(base.TestCase):
|
||||
'name': 'cluster1',
|
||||
'stack_id': 'xx-xx-xx-xx',
|
||||
'api_address': '172.17.2.3',
|
||||
'node_addresses': ['172.17.2.4'],
|
||||
'node_count': 1,
|
||||
'master_count': 1,
|
||||
'trustee_username': 'fake_trustee',
|
||||
'trustee_password': 'fake_trustee_password',
|
||||
'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656',
|
||||
|
@ -69,9 +69,6 @@ class TestClusterConductorWithSwarm(base.TestCase):
|
||||
'name': 'cluster1',
|
||||
'stack_id': 'xx-xx-xx-xx',
|
||||
'api_address': '172.17.2.3',
|
||||
'node_addresses': ['172.17.2.4'],
|
||||
'master_count': 1,
|
||||
'node_count': 1,
|
||||
'discovery_url': 'https://discovery.test.io/123456789',
|
||||
'trustee_username': 'fake_trustee',
|
||||
'trustee_password': 'fake_trustee_password',
|
||||
@ -556,7 +553,6 @@ class TestClusterConductorWithSwarm(base.TestCase):
|
||||
mock_objects_cluster_template_get_by_uuid,
|
||||
mock_get):
|
||||
self.cluster_template_dict['master_lb_enabled'] = True
|
||||
self.cluster_dict['master_count'] = 2
|
||||
self.master_ng_dict['node_count'] = 2
|
||||
cluster_template = objects.ClusterTemplate(
|
||||
self.context, **self.cluster_template_dict)
|
||||
|
@ -337,8 +337,11 @@ class MonitorsTestCase(base.TestCase):
|
||||
self.assertEqual(self.mesos_monitor.data['cpu_used'],
|
||||
expected_cpu_used)
|
||||
|
||||
@mock.patch('magnum.objects.NodeGroup.list')
|
||||
@mock.patch('magnum.common.urlfetch.get')
|
||||
def test_mesos_monitor_pull_data_success(self, mock_url_get):
|
||||
def test_mesos_monitor_pull_data_success(self, mock_url_get,
|
||||
mock_ng_list):
|
||||
mock_ng_list.return_value = self.nodegroups
|
||||
state_json = {
|
||||
'leader': 'master@10.0.0.6:5050',
|
||||
'pid': 'master@10.0.0.6:5050',
|
||||
@ -356,8 +359,11 @@ class MonitorsTestCase(base.TestCase):
|
||||
self._test_mesos_monitor_pull_data(mock_url_get, state_json,
|
||||
100, 50, 1, 0.2)
|
||||
|
||||
@mock.patch('magnum.objects.NodeGroup.list')
|
||||
@mock.patch('magnum.common.urlfetch.get')
|
||||
def test_mesos_monitor_pull_data_success_not_leader(self, mock_url_get):
|
||||
def test_mesos_monitor_pull_data_success_not_leader(self, mock_url_get,
|
||||
mock_ng_list):
|
||||
mock_ng_list.return_value = self.nodegroups
|
||||
state_json = {
|
||||
'leader': 'master@10.0.0.6:5050',
|
||||
'pid': 'master@1.1.1.1:5050',
|
||||
@ -366,9 +372,11 @@ class MonitorsTestCase(base.TestCase):
|
||||
self._test_mesos_monitor_pull_data(mock_url_get, state_json,
|
||||
0, 0, 0, 0)
|
||||
|
||||
@mock.patch('magnum.objects.NodeGroup.list')
|
||||
@mock.patch('magnum.common.urlfetch.get')
|
||||
def test_mesos_monitor_pull_data_success_no_master(self, mock_url_get):
|
||||
self.cluster.master_addresses = []
|
||||
def test_mesos_monitor_pull_data_success_no_master(self, mock_url_get,
|
||||
mock_ng_list):
|
||||
mock_ng_list.return_value = []
|
||||
self._test_mesos_monitor_pull_data(mock_url_get, {}, 0, 0, 0, 0)
|
||||
|
||||
def test_mesos_monitor_get_metric_names(self):
|
||||
|
@ -45,25 +45,27 @@ class SqlAlchemyCustomTypesTestCase(base.DbTestCase):
|
||||
['this is not a dict']})
|
||||
|
||||
def test_JSONEncodedList_default_value(self):
|
||||
# Create cluster w/o master_addresses
|
||||
cluster1_id = uuidutils.generate_uuid()
|
||||
self.dbapi.create_cluster({'uuid': cluster1_id})
|
||||
cluster1 = sa_api.model_query(
|
||||
models.Cluster).filter_by(uuid=cluster1_id).one()
|
||||
self.assertEqual([], cluster1.master_addresses)
|
||||
# Create nodegroup w/o node_addresses
|
||||
nodegroup1_id = uuidutils.generate_uuid()
|
||||
self.dbapi.create_nodegroup({'uuid': nodegroup1_id})
|
||||
nodegroup1 = sa_api.model_query(
|
||||
models.NodeGroup).filter_by(uuid=nodegroup1_id).one()
|
||||
self.assertEqual([], nodegroup1.node_addresses)
|
||||
|
||||
# Create cluster with master_addresses
|
||||
cluster2_id = uuidutils.generate_uuid()
|
||||
self.dbapi.create_cluster({'uuid': cluster2_id,
|
||||
'master_addresses': ['mymaster_address1',
|
||||
'mymaster_address2']})
|
||||
cluster2 = sa_api.model_query(
|
||||
models.Cluster).filter_by(uuid=cluster2_id).one()
|
||||
self.assertEqual(['mymaster_address1', 'mymaster_address2'],
|
||||
cluster2.master_addresses)
|
||||
# Create nodegroup with node_addresses
|
||||
nodegroup2_id = uuidutils.generate_uuid()
|
||||
self.dbapi.create_nodegroup({
|
||||
'uuid': nodegroup2_id,
|
||||
'node_addresses': ['mynode_address1',
|
||||
'mynode_address2']
|
||||
})
|
||||
nodegroup2 = sa_api.model_query(
|
||||
models.NodeGroup).filter_by(uuid=nodegroup2_id).one()
|
||||
self.assertEqual(['mynode_address1', 'mynode_address2'],
|
||||
nodegroup2.node_addresses)
|
||||
|
||||
def test_JSONEncodedList_type_check(self):
|
||||
self.assertRaises(db_exc.DBError,
|
||||
self.dbapi.create_cluster,
|
||||
{'master_addresses':
|
||||
self.dbapi.create_nodegroup,
|
||||
{'node_addresses':
|
||||
{'this is not a list': 'test'}})
|
||||
|
@ -79,22 +79,32 @@ class DbClusterTestCase(base.DbTestCase):
|
||||
self.context, 'clusterone')
|
||||
|
||||
def test_get_all_cluster_stats(self):
|
||||
uuid1 = uuidutils.generate_uuid()
|
||||
utils.create_test_cluster(
|
||||
id=1, name='clusterone',
|
||||
uuid=uuidutils.generate_uuid())
|
||||
uuid=uuid1)
|
||||
utils.create_nodegroups_for_cluster(cluster_id=uuid1)
|
||||
uuid2 = uuidutils.generate_uuid()
|
||||
utils.create_test_cluster(
|
||||
id=2, name='clustertwo',
|
||||
uuid=uuidutils.generate_uuid())
|
||||
uuid=uuid2)
|
||||
utils.create_nodegroups_for_cluster(cluster_id=uuid2)
|
||||
ret = self.dbapi.get_cluster_stats(self.context)
|
||||
self.assertEqual(ret, (2, 12))
|
||||
|
||||
def test_get_one_tenant_cluster_stats(self):
|
||||
uuid1 = uuidutils.generate_uuid()
|
||||
utils.create_test_cluster(
|
||||
id=1, name='clusterone', project_id='proj1',
|
||||
uuid=uuidutils.generate_uuid())
|
||||
uuid=uuid1)
|
||||
utils.create_nodegroups_for_cluster(
|
||||
cluster_id=uuid1, project_id='proj1')
|
||||
uuid2 = uuidutils.generate_uuid()
|
||||
utils.create_test_cluster(
|
||||
id=2, name='clustertwo', project_id='proj2',
|
||||
uuid=uuidutils.generate_uuid())
|
||||
uuid=uuid2)
|
||||
utils.create_nodegroups_for_cluster(
|
||||
cluster_id=uuid2, project_id='proj2')
|
||||
ret = self.dbapi.get_cluster_stats(self.context, 'proj2')
|
||||
self.assertEqual(ret, (1, 6))
|
||||
|
||||
@ -129,23 +139,26 @@ class DbClusterTestCase(base.DbTestCase):
|
||||
self.dbapi.create_cluster_template(ct1)
|
||||
self.dbapi.create_cluster_template(ct2)
|
||||
|
||||
uuid1 = uuidutils.generate_uuid()
|
||||
cluster1 = utils.create_test_cluster(
|
||||
name='cluster-one',
|
||||
uuid=uuidutils.generate_uuid(),
|
||||
uuid=uuid1,
|
||||
cluster_template_id=ct1['uuid'],
|
||||
status=cluster_status.CREATE_IN_PROGRESS)
|
||||
utils.create_nodegroups_for_cluster(cluster_id=uuid1)
|
||||
uuid2 = uuidutils.generate_uuid()
|
||||
cluster2 = utils.create_test_cluster(
|
||||
name='cluster-two',
|
||||
uuid=uuidutils.generate_uuid(),
|
||||
uuid=uuid2,
|
||||
cluster_template_id=ct2['uuid'],
|
||||
node_count=1,
|
||||
master_count=1,
|
||||
status=cluster_status.UPDATE_IN_PROGRESS)
|
||||
utils.create_nodegroups_for_cluster(
|
||||
cluster_id=uuid2, node_count=1, master_count=1)
|
||||
cluster3 = utils.create_test_cluster(
|
||||
name='cluster-three',
|
||||
node_count=2,
|
||||
master_count=5,
|
||||
status=cluster_status.DELETE_IN_PROGRESS)
|
||||
utils.create_nodegroups_for_cluster(
|
||||
node_count=2, master_count=5)
|
||||
|
||||
res = self.dbapi.get_cluster_list(
|
||||
self.context, filters={'cluster_template_id': ct1['uuid']})
|
||||
@ -179,6 +192,15 @@ class DbClusterTestCase(base.DbTestCase):
|
||||
filters={'master_count': 1})
|
||||
self.assertEqual([cluster2.id], [r.id for r in res])
|
||||
|
||||
# Check that both filters have to be valid
|
||||
filters = {'master_count': 1, 'node_count': 1}
|
||||
res = self.dbapi.get_cluster_list(self.context, filters=filters)
|
||||
self.assertEqual([cluster2.id], [r.id for r in res])
|
||||
|
||||
filters = {'master_count': 1, 'node_count': 2}
|
||||
res = self.dbapi.get_cluster_list(self.context, filters=filters)
|
||||
self.assertEqual(0, len(res))
|
||||
|
||||
filters = {'status': [cluster_status.CREATE_IN_PROGRESS,
|
||||
cluster_status.DELETE_IN_PROGRESS]}
|
||||
res = self.dbapi.get_cluster_list(self.context,
|
||||
@ -234,11 +256,11 @@ class DbClusterTestCase(base.DbTestCase):
|
||||
|
||||
def test_update_cluster(self):
|
||||
cluster = utils.create_test_cluster()
|
||||
old_nc = cluster.node_count
|
||||
new_nc = 5
|
||||
self.assertNotEqual(old_nc, new_nc)
|
||||
res = self.dbapi.update_cluster(cluster.id, {'node_count': new_nc})
|
||||
self.assertEqual(new_nc, res.node_count)
|
||||
old_status = cluster.status
|
||||
new_status = 'UPDATE_IN_PROGRESS'
|
||||
self.assertNotEqual(old_status, new_status)
|
||||
res = self.dbapi.update_cluster(cluster.id, {'status': new_status})
|
||||
self.assertEqual(new_status, res.status)
|
||||
|
||||
def test_update_cluster_not_found(self):
|
||||
cluster_uuid = uuidutils.generate_uuid()
|
||||
|
@ -92,10 +92,6 @@ def get_test_cluster(**kw):
|
||||
'status_reason': kw.get('status_reason', 'Completed successfully'),
|
||||
'create_timeout': kw.get('create_timeout', 60),
|
||||
'api_address': kw.get('api_address', '172.17.2.3'),
|
||||
'node_addresses': kw.get('node_addresses', ['172.17.2.4']),
|
||||
'node_count': kw.get('node_count', 3),
|
||||
'master_count': kw.get('master_count', 3),
|
||||
'master_addresses': kw.get('master_addresses', ['172.17.2.18']),
|
||||
'created_at': kw.get('created_at'),
|
||||
'updated_at': kw.get('updated_at'),
|
||||
'docker_volume_size': kw.get('docker_volume_size'),
|
||||
@ -104,6 +100,13 @@ def get_test_cluster(**kw):
|
||||
'flavor_id': kw.get('flavor_id', None),
|
||||
}
|
||||
|
||||
if kw.pop('for_api_use', False):
|
||||
attrs.update({
|
||||
'node_addresses': kw.get('node_addresses', ['172.17.2.4']),
|
||||
'node_count': kw.get('node_count', 3),
|
||||
'master_count': kw.get('master_count', 3),
|
||||
'master_addresses': kw.get('master_addresses', ['172.17.2.18'])
|
||||
})
|
||||
# Only add Keystone trusts related attributes on demand since they may
|
||||
# break other tests.
|
||||
for attr in ['trustee_username', 'trustee_password', 'trust_id']:
|
||||
@ -338,8 +341,10 @@ def create_nodegroups_for_cluster(**kw):
|
||||
nodegroups = get_nodegroups_for_cluster(**kw)
|
||||
# Create workers nodegroup
|
||||
worker = nodegroups['worker']
|
||||
del worker['id']
|
||||
create_test_nodegroup(**worker)
|
||||
|
||||
# Create masters nodegroup
|
||||
master = nodegroups['master']
|
||||
del master['id']
|
||||
create_test_nodegroup(**master)
|
||||
|
@ -171,13 +171,12 @@ class TestClusterObject(base.DbTestCase):
|
||||
with mock.patch.object(self.dbapi, 'update_cluster',
|
||||
autospec=True) as mock_update_cluster:
|
||||
cluster = objects.Cluster.get_by_uuid(self.context, uuid)
|
||||
cluster.node_count = 10
|
||||
cluster.master_count = 5
|
||||
cluster.status = 'DELETE_IN_PROGRESS'
|
||||
cluster.save()
|
||||
|
||||
mock_get_cluster.assert_called_once_with(self.context, uuid)
|
||||
mock_update_cluster.assert_called_once_with(
|
||||
uuid, {'node_count': 10, 'master_count': 5,
|
||||
uuid, {'status': 'DELETE_IN_PROGRESS',
|
||||
'cluster_template': self.fake_cluster_template})
|
||||
self.assertEqual(self.context, cluster._context)
|
||||
|
||||
|
@ -355,7 +355,7 @@ class TestObject(test_base.TestCase, _TestObject):
|
||||
# For more information on object version testing, read
|
||||
# https://docs.openstack.org/magnum/latest/contributor/objects.html
|
||||
object_data = {
|
||||
'Cluster': '1.19-9f0dfcc3e898eef2b9a09647b612adb6',
|
||||
'Cluster': '1.20-fcdb29a886bf9552cdac03470570024c',
|
||||
'ClusterTemplate': '1.19-3b0b2b3933d0955abf3ab40111744960',
|
||||
'Certificate': '1.1-1924dc077daa844f0f9076332ef96815',
|
||||
'MyObj': '1.0-34c4b1aadefd177b13f9a2f894cc23cd',
|
||||
|
Loading…
x
Reference in New Issue
Block a user