Cluster helpers now in object & tests

Related to blueprint nailgun-objects-flow

Moved all helpers from Cluster model to object

Removed unnecessary "with" wrappers for transactions

Change-Id: Id4a15c59eb6b10b7573ce0c559c59f6d99f54002
This commit is contained in:
Nikolay Markov 2014-03-20 18:42:00 +04:00
parent e568fb7dec
commit be6354584b
27 changed files with 386 additions and 401 deletions

View File

@ -24,9 +24,11 @@ from nailgun.api.handlers.base import BaseHandler
from nailgun.api.handlers.base import content_json
from nailgun.api.validators.assignment import NodeAssignmentValidator
from nailgun.api.validators.assignment import NodeUnassignmentValidator
from nailgun.db import db
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.db.sqlalchemy.models import Node
from nailgun import objects
from nailgun.logger import logger
from nailgun.network.manager import NetworkManager
from nailgun import notifier
@ -48,7 +50,7 @@ class NodeAssignmentHandler(BaseHandler):
cluster_id=cluster_id
)
nodes = self.get_objects_list_or_404(Node, data.keys())
cluster = self.get_object_or_404(Cluster, cluster_id)
cluster = self.get_object_or_404(objects.Cluster.model, cluster_id)
for node in nodes:
node.cluster = cluster
node.pending_roles = data[node.id]
@ -56,7 +58,12 @@ class NodeAssignmentHandler(BaseHandler):
try:
node.attributes.volumes = \
node.volume_manager.gen_volumes_info()
node.cluster.add_pending_changes("disks", node_id=node.id)
objects.Cluster.add_pending_changes(
node.cluster,
"disks",
node_id=node.id
)
network_manager = node.cluster.network_manager
network_manager.assign_networks_by_default(node)
@ -71,7 +78,6 @@ class NodeAssignmentHandler(BaseHandler):
),
node_id=node.id
)
db().commit()
class NodeUnassignmentHandler(BaseHandler):
@ -85,18 +91,20 @@ class NodeUnassignmentHandler(BaseHandler):
:http: * 204 (node successfully unassigned)
* 404 (node not found in db)
"""
cluster = self.get_object_or_404(Cluster, cluster_id)
cluster = self.get_object_or_404(objects.Cluster.model, cluster_id)
nodes = self.checked_data(
self.validator.validate_collection_update,
cluster_id=cluster.id
)
for node in nodes:
if node.status == "discover":
node.cluster.clear_pending_changes(node_id=node.id)
objects.Cluster.clear_pending_changes(
node.cluster,
node_id=node.id
)
node.pending_roles = []
node.cluster_id = None
node.pending_addition = False
NetworkManager.clear_assigned_networks(node)
else:
node.pending_deletion = True
db().commit()

View File

@ -148,8 +148,7 @@ class ClusterAttributesHandler(BaseHandler):
for key, value in data.iteritems():
setattr(cluster.attributes, key, value)
cluster.add_pending_changes("attributes")
db().commit()
objects.Cluster.add_pending_changes(cluster, "attributes")
return {"editable": cluster.attributes.editable}
@content_json
@ -174,8 +173,7 @@ class ClusterAttributesHandler(BaseHandler):
cluster.attributes.editable = utils.dict_merge(
cluster.attributes.editable, data['editable'])
cluster.add_pending_changes("attributes")
db().commit()
objects.Cluster.add_pending_changes(cluster, "attributes")
return {"editable": cluster.attributes.editable}
@ -227,7 +225,7 @@ class ClusterAttributesDefaultsHandler(BaseHandler):
"editable"
)
db().commit()
cluster.add_pending_changes("attributes")
objects.Cluster.add_pending_changes(cluster, "attributes")
logger.debug('ClusterAttributesDefaultsHandler:'
' editable attributes for cluster_id %s were reset'

View File

@ -21,6 +21,9 @@ Handlers dealing with disks
from nailgun.api.handlers.base import BaseHandler
from nailgun.api.handlers.base import content_json
from nailgun.api.validators.node import NodeDisksValidator
from nailgun import objects
from nailgun.db import db
from nailgun.db.sqlalchemy.models import Node
from nailgun.db.sqlalchemy.models import NodeAttributes
@ -54,7 +57,11 @@ class NodeDisksHandler(BaseHandler):
data = self.checked_data()
if node.cluster:
node.cluster.add_pending_changes('disks', node_id=node.id)
objects.Cluster.add_pending_changes(
node.cluster,
'disks',
node_id=node.id
)
volumes_data = DisksFormatConvertor.format_disks_to_full(node, data)
# For some reasons if we update node attributes like

View File

@ -31,12 +31,15 @@ from nailgun.api.handlers.base import content_json
from nailgun.api.serializers.node import NodeInterfacesSerializer
from nailgun.api.validators.network import NetAssignmentValidator
from nailgun.api.validators.node import NodeValidator
from nailgun import objects
from nailgun.db import db
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.db.sqlalchemy.models import NetworkGroup
from nailgun.db.sqlalchemy.models import Node
from nailgun.db.sqlalchemy.models import NodeAttributes
from nailgun.db.sqlalchemy.models import NodeNICInterface
from nailgun.logger import logger
from nailgun.network.manager import NetworkManager
from nailgun import notifier
@ -88,11 +91,17 @@ class NodeHandler(BaseHandler):
old_cluster_id = node.cluster_id
if data.get("pending_roles") == [] and node.cluster:
node.cluster.clear_pending_changes(node_id=node.id)
objects.Cluster.clear_pending_changes(
node.cluster,
node_id=node.id
)
if "cluster_id" in data:
if data["cluster_id"] is None and node.cluster:
node.cluster.clear_pending_changes(node_id=node.id)
objects.Cluster.clear_pending_changes(
node.cluster,
node_id=node.id
)
node.roles = node.pending_roles = []
node.reset_name_to_default()
node.cluster_id = data["cluster_id"]
@ -116,6 +125,7 @@ class NodeHandler(BaseHandler):
if key in ("id", "cluster_id"):
continue
setattr(node, key, value)
db().flush()
if not node.status in ('provisioning', 'deploying'
) and regenerate_volumes:
@ -132,7 +142,6 @@ class NodeHandler(BaseHandler):
)
logger.warning(traceback.format_exc())
notifier.notify("error", msg, node_id=node.id)
db().commit()
return self.render(node)
def DELETE(self, node_id):
@ -142,7 +151,6 @@ class NodeHandler(BaseHandler):
"""
node = self.get_object_or_404(Node, node_id)
db().delete(node)
db().commit()
raise self.http(204)
@ -233,7 +241,7 @@ class NodeCollectionHandler(BaseHandler):
# We need to assign cluster first
cluster_id = data.pop("cluster_id")
if cluster_id:
node.cluster = db.query(Cluster).get(cluster_id)
node.cluster = objects.Cluster.get_by_uid(cluster_id)
for key, value in data.iteritems():
if key == "id":
continue
@ -250,7 +258,8 @@ class NodeCollectionHandler(BaseHandler):
try:
node.attributes.volumes = node.volume_manager.gen_volumes_info()
if node.cluster:
node.cluster.add_pending_changes(
objects.Cluster.add_pending_changes(
node.cluster,
"disks",
node_id=node.id
)
@ -332,11 +341,17 @@ class NodeCollectionHandler(BaseHandler):
old_cluster_id = node.cluster_id
if nd.get("pending_roles") == [] and node.cluster:
node.cluster.clear_pending_changes(node_id=node.id)
objects.Cluster.clear_pending_changes(
node.cluster,
node_id=node.id
)
if "cluster_id" in nd:
if nd["cluster_id"] is None and node.cluster:
node.cluster.clear_pending_changes(node_id=node.id)
objects.Cluster.clear_pending_changes(
node.cluster,
node_id=node.id
)
node.roles = node.pending_roles = []
node.reset_name_to_default()
node.cluster_id = nd["cluster_id"]
@ -379,7 +394,8 @@ class NodeCollectionHandler(BaseHandler):
node.attributes.volumes = \
node.volume_manager.gen_volumes_info()
if node.cluster:
node.cluster.add_pending_changes(
objects.Cluster.add_pending_changes(
node.cluster,
"disks",
node_id=node.id
)
@ -499,8 +515,10 @@ class NodeAgentHandler(BaseHandler):
node.volume_manager.gen_volumes_info()
)
if node.cluster:
node.cluster.add_pending_changes(
"disks", node_id=node.id
objects.Cluster.add_pending_changes(
node.cluster,
"disks",
node_id=node.id
)
except Exception as exc:
msg = (
@ -515,7 +533,6 @@ class NodeAgentHandler(BaseHandler):
db().commit()
NetworkManager.update_interfaces_info(node)
db().commit()
return {"id": node.id}

View File

@ -14,26 +14,25 @@
# under the License.
from nailgun.api.validators.base import BasicValidator
from nailgun.db import db
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.db.sqlalchemy.models import Release
from nailgun.errors import errors
from nailgun.objects import ClusterCollection
from nailgun.objects import Release
class ClusterValidator(BasicValidator):
@classmethod
def _validate_common(cls, data):
d = cls.validate_json(data)
if d.get("name"):
if db().query(Cluster).filter_by(
name=d["name"]
).first():
if ClusterCollection.filter_by(name=d["name"]).first():
raise errors.AlreadyExists(
"Environment with this name already exists",
log_message=True
)
if d.get("release"):
release = db().query(Release).get(d.get("release"))
release_id = d.get("release", d.get("release_id", None))
if release_id:
release = Release.get_by_uid(release_id)
if not release:
raise errors.InvalidData(
"Invalid release ID",
@ -43,7 +42,14 @@ class ClusterValidator(BasicValidator):
@classmethod
def validate(cls, data):
return cls._validate_common(data)
d = cls._validate_common(data)
release_id = d.get("release", d.get("release_id", None))
if not release_id:
raise errors.InvalidData(
u"Release ID is required",
log_message=True
)
return d
@classmethod
def validate_update(cls, data, instance):

View File

@ -59,6 +59,11 @@ class ReleaseValidator(BasicValidator):
"No release version specified",
log_message=True
)
if "operating_system" not in d:
raise errors.InvalidData(
"No release operating system specified",
log_message=True
)
if db().query(Release).filter_by(
name=d["name"],
version=d["version"]

View File

@ -14,9 +14,6 @@
# License for the specific language governing permissions and limitations
# under the License.
from random import choice
import string
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import Enum
@ -31,9 +28,6 @@ from nailgun.db import db
from nailgun.db.sqlalchemy.models.base import Base
from nailgun.db.sqlalchemy.models.fields import JSON
from nailgun.db.sqlalchemy.models.node import Node
from nailgun.logger import logger
from nailgun.settings import settings
from nailgun.utils import dict_merge
class ClusterChanges(Base):
@ -165,36 +159,6 @@ class Cluster(Base):
return False
return True
def add_pending_changes(self, changes_type, node_id=None):
ex_chs = db().query(ClusterChanges).filter_by(
cluster=self,
name=changes_type
)
if not node_id:
ex_chs = ex_chs.first()
else:
ex_chs = ex_chs.filter_by(node_id=node_id).first()
# do nothing if changes with the same name already pending
if ex_chs:
return
ch = ClusterChanges(
cluster_id=self.id,
name=changes_type
)
if node_id:
ch.node_id = node_id
db().add(ch)
db().flush()
def clear_pending_changes(self, node_id=None):
chs = db().query(ClusterChanges).filter_by(
cluster_id=self.id
)
if node_id:
chs = chs.filter_by(node_id=node_id)
map(db().delete, chs.all())
db().flush()
@property
def network_manager(self):
if self.net_provider == 'neutron':
@ -205,78 +169,9 @@ class Cluster(Base):
return NovaNetworkManager
class AttributesGenerators(object):
@classmethod
def password(cls, arg=None):
try:
length = int(arg)
except Exception:
length = 8
chars = string.letters + string.digits
return u''.join([choice(chars) for _ in xrange(length)])
@classmethod
def ip(cls, arg=None):
if str(arg) in ("admin", "master"):
return settings.MASTER_IP
return "127.0.0.1"
@classmethod
def identical(cls, arg=None):
return str(arg)
class Attributes(Base):
__tablename__ = 'attributes'
id = Column(Integer, primary_key=True)
cluster_id = Column(Integer, ForeignKey('clusters.id'))
editable = Column(JSON)
generated = Column(JSON)
def generate_fields(self):
self.generated = self.traverse(self.generated)
db().add(self)
db().flush()
@classmethod
def traverse(cls, cdict):
new_dict = {}
if cdict:
for i, val in cdict.iteritems():
if isinstance(val, (str, unicode, int, float)):
new_dict[i] = val
elif isinstance(val, dict) and "generator" in val:
try:
generator = getattr(
AttributesGenerators,
val["generator"]
)
except AttributeError:
logger.error("Attribute error: %s" % val["generator"])
raise
else:
new_dict[i] = generator(val.get("generator_arg"))
else:
new_dict[i] = cls.traverse(val)
return new_dict
def merged_attrs(self):
return dict_merge(self.generated, self.editable)
def merged_attrs_values(self):
attrs = self.merged_attrs()
for group_attrs in attrs.itervalues():
for attr, value in group_attrs.iteritems():
if isinstance(value, dict) and 'value' in value:
group_attrs[attr] = value['value']
if 'common' in attrs:
attrs.update(attrs.pop('common'))
if 'additional_components' in attrs:
for comp, enabled in attrs['additional_components'].iteritems():
if isinstance(enabled, bool):
attrs.setdefault(comp, {}).update({
"enabled": enabled
})
attrs.pop('additional_components')
return attrs

View File

@ -29,9 +29,10 @@ from netaddr import IPRange
from sqlalchemy.orm import joinedload
from sqlalchemy.sql import not_
from nailgun import objects
from nailgun import consts
from nailgun.db import db
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.db.sqlalchemy.models import IPAddr
from nailgun.db.sqlalchemy.models import IPAddrRange
from nailgun.db.sqlalchemy.models import NetworkGroup
@ -242,7 +243,7 @@ class NetworkManager(object):
:returns: None
:raises: Exception
"""
cluster = db().query(Cluster).get(cluster_id)
cluster = objects.Cluster.get_by_uid(cluster_id)
if not cluster:
raise Exception(u"Cluster id='%s' not found" % cluster_id)
@ -886,7 +887,7 @@ class NetworkManager(object):
@classmethod
def get_end_point_ip(cls, cluster_id):
cluster_db = db().query(Cluster).get(cluster_id)
cluster_db = objects.Cluster.get_by_uid(cluster_id)
ip = None
if cluster_db.is_ha_mode:
ip = cls.assign_vip(cluster_db.id, "public")
@ -987,7 +988,7 @@ class NetworkManager(object):
:type cluster_id: int
:returns: None
"""
cluster_db = db().query(Cluster).get(cluster_id)
cluster_db = objects.Cluster.get_by_uid(cluster_id)
networks_metadata = cluster_db.release.networks_metadata
networks_list = networks_metadata[cluster_db.net_provider]["networks"]
used_nets = [IPNetwork(cls.get_admin_network_group().cidr)]
@ -1077,7 +1078,7 @@ class NetworkManager(object):
cls.update_cidr_from_gw_mask(ng_db, ng)
if ng_db.meta.get("notation"):
cls.cleanup_network_group(ng_db)
ng_db.cluster.add_pending_changes('networks')
objects.Cluster.add_pending_changes(ng_db.cluster, 'networks')
@classmethod
def cluster_has_bonds(cls, cluster_id):

View File

@ -20,6 +20,7 @@ from nailgun.objects.base import NailgunCollection
from nailgun.objects.release import Release
from nailgun.objects.release import ReleaseCollection
from nailgun.objects.cluster import Attributes
from nailgun.objects.cluster import Cluster
from nailgun.objects.cluster import ClusterCollection

View File

@ -27,6 +27,50 @@ from nailgun.errors import errors
from nailgun.objects import NailgunCollection
from nailgun.objects import NailgunObject
from nailgun.utils import AttributesGenerator
from nailgun.utils import dict_merge
from nailgun.utils import traverse
class Attributes(NailgunObject):
model = models.Attributes
@classmethod
def generate_fields(cls, instance):
instance.generated = traverse(
instance.generated,
AttributesGenerator
)
db().add(instance)
db().flush()
@classmethod
def merged_attrs(cls, instance):
return dict_merge(
instance.generated,
instance.editable
)
@classmethod
def merged_attrs_values(cls, instance):
attrs = cls.merged_attrs(instance)
for group_attrs in attrs.itervalues():
for attr, value in group_attrs.iteritems():
if isinstance(value, dict) and 'value' in value:
group_attrs[attr] = value['value']
if 'common' in attrs:
attrs.update(attrs.pop('common'))
if 'additional_components' in attrs:
for comp, enabled in attrs['additional_components'].iteritems():
if isinstance(enabled, bool):
attrs.setdefault(comp, {}).update({
"enabled": enabled
})
attrs.pop('additional_components')
return attrs
class Cluster(NailgunObject):
@ -86,44 +130,89 @@ class Cluster(NailgunObject):
assign_nodes = data.pop("nodes", [])
with db().begin(subtransactions=True):
new_cluster = super(Cluster, cls).create(data)
new_cluster = super(Cluster, cls).create(data)
attributes = models.Attributes(
editable=new_cluster.release.attributes_metadata.get(
"editable"
),
generated=new_cluster.release.attributes_metadata.get(
"generated"
),
cluster=new_cluster
)
attributes.generate_fields()
cls.create_attributes(new_cluster)
netmanager = new_cluster.network_manager
netmanager = new_cluster.network_manager
try:
netmanager.create_network_groups(new_cluster.id)
if new_cluster.net_provider == 'neutron':
netmanager.create_neutron_config(new_cluster)
try:
netmanager.create_network_groups(new_cluster.id)
if new_cluster.net_provider == 'neutron':
netmanager.create_neutron_config(new_cluster)
new_cluster.add_pending_changes("attributes")
new_cluster.add_pending_changes("networks")
cls.add_pending_changes(new_cluster, "attributes")
cls.add_pending_changes(new_cluster, "networks")
if assign_nodes:
cls.update_nodes(new_cluster, assign_nodes)
if assign_nodes:
cls.update_nodes(new_cluster, assign_nodes)
except (
errors.OutOfVLANs,
errors.OutOfIPs,
errors.NoSuitableCIDR,
errors.InvalidNetworkPool
) as exc:
db().delete(new_cluster)
raise errors.CannotCreate(exc.message)
except (
errors.OutOfVLANs,
errors.OutOfIPs,
errors.NoSuitableCIDR,
errors.InvalidNetworkPool
) as exc:
db().delete(new_cluster)
raise errors.CannotCreate(exc.message)
db().flush()
return new_cluster
@classmethod
def create_attributes(cls, instance):
attributes = Attributes.create(
{
"editable": instance.release.attributes_metadata.get(
"editable"
),
"generated": instance.release.attributes_metadata.get(
"generated"
),
"cluster_id": instance.id
}
)
Attributes.generate_fields(attributes)
@classmethod
def get_attributes(cls, instance):
return db().query(models.Attributes).filter(
models.Attributes.cluster_id == instance.id
).first()
@classmethod
def add_pending_changes(cls, instance, changes_type, node_id=None):
ex_chs = db().query(models.ClusterChanges).filter_by(
cluster=instance,
name=changes_type
)
if not node_id:
ex_chs = ex_chs.first()
else:
ex_chs = ex_chs.filter_by(node_id=node_id).first()
# do nothing if changes with the same name already pending
if ex_chs:
return
ch = models.ClusterChanges(
cluster_id=instance.id,
name=changes_type
)
if node_id:
ch.node_id = node_id
db().add(ch)
db().flush()
@classmethod
def clear_pending_changes(cls, instance, node_id=None):
chs = db().query(models.ClusterChanges).filter_by(
cluster_id=instance.id
)
if node_id:
chs = chs.filter_by(node_id=node_id)
map(db().delete, chs.all())
db().flush()
@classmethod
def update(cls, instance, data):
nodes = data.pop("nodes", None)
@ -134,37 +223,37 @@ class Cluster(NailgunObject):
@classmethod
def update_nodes(cls, instance, nodes_ids):
with db().begin(subtransactions=True):
# TODO(NAME): sepatate nodes
#for deletion and addition by set().
new_nodes = []
if nodes_ids:
new_nodes = db().query(models.Node).filter(
models.Node.id.in_(nodes_ids)
# TODO(NAME): sepatate nodes
#for deletion and addition by set().
new_nodes = []
if nodes_ids:
new_nodes = db().query(models.Node).filter(
models.Node.id.in_(nodes_ids)
)
nodes_to_remove = [n for n in instance.nodes
if n not in new_nodes]
nodes_to_add = [n for n in new_nodes
if n not in instance.nodes]
for node in nodes_to_add:
if not node.online:
raise errors.NodeOffline(
u"Cannot add offline node "
u"'{0}' to environment".format(node.id)
)
nodes_to_remove = [n for n in instance.nodes
if n not in new_nodes]
nodes_to_add = [n for n in new_nodes
if n not in instance.nodes]
for node in nodes_to_add:
if not node.online:
raise errors.NodeOffline(
u"Cannot add offline node "
u"'{0}' to environment".format(node.id)
)
map(instance.nodes.remove, nodes_to_remove)
map(instance.nodes.append, nodes_to_add)
map(
instance.network_manager.clear_assigned_networks,
nodes_to_remove
)
map(
instance.network_manager.assign_networks_by_default,
nodes_to_add
)
map(instance.nodes.remove, nodes_to_remove)
map(instance.nodes.append, nodes_to_add)
map(
instance.network_manager.clear_assigned_networks,
nodes_to_remove
)
map(
instance.network_manager.assign_networks_by_default,
nodes_to_add
)
db().flush()
class ClusterCollection(NailgunCollection):

View File

@ -21,6 +21,8 @@ from copy import deepcopy
from netaddr import IPNetwork
from sqlalchemy import and_
from nailgun import objects
from nailgun import consts
from nailgun.db import db
from nailgun.db.sqlalchemy.models import NetworkGroup
@ -80,7 +82,9 @@ class DeploymentMultinodeSerializer(object):
@classmethod
def get_common_attrs(cls, cluster):
"""Cluster attributes."""
attrs = cluster.attributes.merged_attrs_values()
attrs = objects.Attributes.merged_attrs_values(
cluster.attributes
)
attrs['deployment_mode'] = cluster.mode
attrs['deployment_id'] = cluster.id
attrs['nodes'] = cls.node_list(get_nodes_not_for_deletion(cluster))

View File

@ -16,6 +16,8 @@
"""Provisioning serializers for orchestrator"""
from nailgun import objects
from nailgun.logger import logger
from nailgun.network.manager import NetworkManager
from nailgun.settings import settings
@ -29,7 +31,9 @@ class ProvisioningSerializer(object):
def serialize(cls, cluster, nodes):
"""Serialize cluster for provisioning."""
cluster_attrs = cluster.attributes.merged_attrs_values()
cluster_attrs = objects.Attributes.merged_attrs_values(
cluster.attributes
)
serialized_nodes = cls.serialize_nodes(cluster_attrs, nodes)
return {

View File

@ -24,6 +24,7 @@ import traceback
from sqlalchemy import or_
from nailgun import notifier
from nailgun import objects
from nailgun.db import db
from nailgun.db.sqlalchemy.models import IPAddr
@ -517,11 +518,12 @@ class NailgunReceiver(object):
# restoring pending changes
task.cluster.status = "new"
task.cluster.add_pending_changes("attributes")
task.cluster.add_pending_changes("networks")
objects.Cluster.add_pending_changes(task.cluster, "attributes")
objects.Cluster.add_pending_changes(task.cluster, "networks")
for node in task.cluster.nodes:
task.cluster.add_pending_changes(
objects.Cluster.add_pending_changes(
task.cluster,
"disks",
node_id=node.id
)

View File

@ -19,6 +19,8 @@ import shutil
from sqlalchemy import or_
from nailgun import objects
from nailgun.db import db
from nailgun.db.sqlalchemy.models import IPAddr
from nailgun.db.sqlalchemy.models import Node
@ -252,7 +254,7 @@ class TaskHelper(object):
n.progress = 100
cls.__set_cluster_status(cluster, 'operational')
cluster.clear_pending_changes()
objects.Cluster.clear_pending_changes(cluster)
elif task.status == 'error' and \
not cls.__before_deployment_error(task):
# We don't want to set cluster status to

View File

@ -24,6 +24,8 @@ from sqlalchemy.orm import object_mapper
import nailgun.rpc as rpc
from nailgun import objects
from nailgun.db import db
from nailgun.db.sqlalchemy.models import CapacityLog
from nailgun.db.sqlalchemy.models import Cluster
@ -467,7 +469,9 @@ class CheckBeforeDeploymentTask(object):
@classmethod
def _check_ceph(cls, task):
storage = task.cluster.attributes.merged_attrs()['storage']
storage = objects.Attributes.merged_attrs(
task.cluster.attributes
)['storage']
for option in storage:
if '_ceph' in option and\
storage[option] and\
@ -490,7 +494,8 @@ class CheckBeforeDeploymentTask(object):
osd_count = len(filter(
lambda node: 'ceph-osd' in node.all_roles,
task.cluster.nodes))
osd_pool_size = int(task.cluster.attributes.merged_attrs(
osd_pool_size = int(objects.Attributes.merged_attrs(
task.cluster.attributes
)['storage']['osd_pool_size']['value'])
if osd_count < osd_pool_size:
raise errors.NotEnoughOsdNodes(

View File

@ -46,7 +46,6 @@ from nailgun.logger import logger
from nailgun.db.sqlalchemy.fixman import load_fixture
from nailgun.db.sqlalchemy.fixman import upload_fixture
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.db.sqlalchemy.models import Node
from nailgun.db.sqlalchemy.models import NodeAttributes
from nailgun.db.sqlalchemy.models import NodeNICInterface
@ -55,6 +54,7 @@ from nailgun.db.sqlalchemy.models import RedHatAccount
from nailgun.db.sqlalchemy.models import Task
# here come objects
from nailgun.objects import Cluster
from nailgun.objects import Release
from nailgun.consts import NETWORK_INTERFACE_TYPES
@ -122,7 +122,7 @@ class Environment(object):
'name': u"release_name_" + version,
'version': version,
'description': u"release_desc" + version,
'operating_system': 'CensOS',
'operating_system': 'CentOS',
'roles': self.get_default_roles(),
'networks_metadata': self.get_default_networks_metadata(),
'attributes_metadata': self.get_default_attributes_metadata(),
@ -166,16 +166,15 @@ class Environment(object):
def create_cluster(self, api=True, exclude=None, **kwargs):
cluster_data = {
'name': 'cluster-api-' + str(randint(0, 1000000))
'name': 'cluster-api-' + str(randint(0, 1000000)),
}
if api:
cluster_data['release'] = self.create_release(api=False).id
else:
cluster_data['release'] = self.create_release(api=False)
if kwargs:
cluster_data.update(kwargs)
if 'release_id' not in cluster_data:
cluster_data['release_id'] = self.create_release(api=False).id
if exclude and isinstance(exclude, list):
for ex in exclude:
try:
@ -186,19 +185,17 @@ class Environment(object):
resp = self.app.post(
reverse('ClusterCollectionHandler'),
json.dumps(cluster_data),
headers=self.default_headers
headers=self.default_headers,
expect_errors=True
)
self.tester.assertEquals(resp.status_code, 201)
cluster = json.loads(resp.body)
self.clusters.append(
self.db.query(Cluster).get(cluster['id'])
Cluster.get_by_uid(cluster['id'])
)
else:
cluster = Cluster()
for field, value in cluster_data.iteritems():
setattr(cluster, field, value)
self.db.add(cluster)
self.db.commit()
cluster = Cluster.create(cluster_data)
db().commit()
self.clusters.append(cluster)
return cluster

View File

@ -16,8 +16,8 @@
import json
from nailgun.db.sqlalchemy.models import Attributes
from nailgun.db.sqlalchemy.models import Cluster
from nailgun import objects
from nailgun.db.sqlalchemy.models import Release
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import reverse
@ -27,23 +27,20 @@ class TestAttributes(BaseIntegrationTest):
def test_attributes_creation(self):
cluster = self.env.create_cluster(api=True)
cluster_db = self.env.clusters[0]
resp = self.app.get(
reverse(
'ClusterAttributesHandler',
kwargs={'cluster_id': cluster['id']}),
headers=self.default_headers
)
release = self.db.query(Release).get(
cluster['release_id']
)
release = objects.Release.get_by_uid(cluster['release_id'])
self.assertEquals(200, resp.status_code)
self.assertEquals(
json.loads(resp.body)['editable'],
release.attributes_metadata['editable']
)
attrs = self.db.query(Attributes).filter(
Attributes.cluster_id == cluster['id']
).first()
attrs = objects.Cluster.get_attributes(cluster_db)
self._compare(
release.attributes_metadata['generated'],
attrs.generated
@ -51,6 +48,8 @@ class TestAttributes(BaseIntegrationTest):
def test_500_if_no_attributes(self):
cluster = self.env.create_cluster(api=False)
self.db.delete(cluster.attributes)
self.db.commit()
resp = self.app.put(
reverse(
'ClusterAttributesHandler',
@ -67,6 +66,7 @@ class TestAttributes(BaseIntegrationTest):
def test_attributes_update_put(self):
cluster_id = self.env.create_cluster(api=True)['id']
cluster_db = self.env.clusters[0]
resp = self.app.get(
reverse(
'ClusterAttributesHandler',
@ -86,9 +86,7 @@ class TestAttributes(BaseIntegrationTest):
headers=self.default_headers
)
self.assertEquals(200, resp.status_code)
attrs = self.db.query(Attributes).filter(
Attributes.cluster_id == cluster_id
).first()
attrs = objects.Cluster.get_attributes(cluster_db)
self.assertEquals("bar", attrs.editable["foo"])
attrs.editable.pop('foo')
self.assertEqual(attrs.editable, {})
@ -121,6 +119,7 @@ class TestAttributes(BaseIntegrationTest):
def test_attributes_update_patch(self):
cluster_id = self.env.create_cluster(api=True)['id']
cluster_db = self.env.clusters[0]
resp = self.app.get(
reverse(
'ClusterAttributesHandler',
@ -140,9 +139,7 @@ class TestAttributes(BaseIntegrationTest):
headers=self.default_headers
)
self.assertEquals(200, resp.status_code)
attrs = self.db.query(Attributes).filter(
Attributes.cluster_id == cluster_id
).first()
attrs = objects.Cluster.get_attributes(cluster_db)
self.assertEquals("bar", attrs.editable["foo"])
attrs.editable.pop('foo')
self.assertNotEqual(attrs.editable, {})
@ -166,6 +163,7 @@ class TestAttributes(BaseIntegrationTest):
def test_attributes_set_defaults(self):
cluster = self.env.create_cluster(api=True)
cluster_db = self.env.clusters[0]
# Change editable attributes.
resp = self.app.put(
reverse(
@ -180,9 +178,7 @@ class TestAttributes(BaseIntegrationTest):
expect_errors=True
)
self.assertEquals(200, resp.status_code)
attrs = self.db.query(Attributes).filter(
Attributes.cluster_id == cluster['id']
).first()
attrs = objects.Cluster.get_attributes(cluster_db)
self.assertEquals("bar", attrs.editable["foo"])
# Set attributes to defaults.
resp = self.app.put(
@ -202,9 +198,10 @@ class TestAttributes(BaseIntegrationTest):
def test_attributes_merged_values(self):
cluster = self.env.create_cluster(api=True)
cluster_db = self.db.query(Cluster).get(cluster['id'])
orig_attrs = cluster_db.attributes.merged_attrs()
attrs = cluster_db.attributes.merged_attrs_values()
cluster_db = objects.Cluster.get_by_uid(cluster['id'])
attrs = objects.Cluster.get_attributes(cluster_db)
orig_attrs = objects.Attributes.merged_attrs(attrs)
attrs = objects.Attributes.merged_attrs_values(attrs)
for group, group_attrs in orig_attrs.iteritems():
for attr, orig_value in group_attrs.iteritems():
if group == 'common':

View File

@ -16,6 +16,8 @@
import json
from nailgun import objects
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.db.sqlalchemy.models import ClusterChanges
from nailgun.test.base import BaseIntegrationTest
@ -93,7 +95,7 @@ class TestClusterChanges(BaseIntegrationTest):
def test_attributes_changing_adds_pending_changes(self):
cluster = self.env.create_cluster(api=True)
cluster_db = self.env.clusters[0]
cluster_db.clear_pending_changes()
objects.Cluster.clear_pending_changes(cluster_db)
all_changes = self.db.query(ClusterChanges).all()
self.assertEquals(len(all_changes), 0)
self.app.put(
@ -115,7 +117,7 @@ class TestClusterChanges(BaseIntegrationTest):
def test_default_attributes_adds_pending_changes(self):
cluster = self.env.create_cluster(api=True)
cluster_db = self.env.clusters[0]
cluster_db.clear_pending_changes()
objects.Cluster.clear_pending_changes(cluster_db)
all_changes = self.db.query(ClusterChanges).all()
self.assertEquals(len(all_changes), 0)
self.app.put(
@ -132,7 +134,7 @@ class TestClusterChanges(BaseIntegrationTest):
def test_network_changing_adds_pending_changes(self):
cluster = self.env.create_cluster(api=True)
cluster_db = self.env.clusters[0]
cluster_db.clear_pending_changes()
objects.Cluster.clear_pending_changes(cluster_db)
all_changes = self.db.query(ClusterChanges).all()
self.assertEquals(len(all_changes), 0)
resp = self.app.get(

View File

@ -21,6 +21,8 @@ from mock import patch
import nailgun
from nailgun import objects
from nailgun.db.sqlalchemy.models import NetworkGroup
from nailgun.network.manager import NetworkManager
from nailgun.settings import settings
@ -86,7 +88,9 @@ class TestHandlers(BaseIntegrationTest):
'deployment_id': cluster_db.id
}
cluster_attrs = cluster_db.attributes.merged_attrs_values()
cluster_attrs = objects.Attributes.merged_attrs_values(
cluster_db.attributes
)
common_attrs.update(cluster_attrs)
# Common attrs calculation
@ -341,7 +345,9 @@ class TestHandlers(BaseIntegrationTest):
'deployment_id': cluster_db.id
}
cluster_attrs = cluster_db.attributes.merged_attrs_values()
cluster_attrs = objects.Attributes.merged_attrs_values(
cluster_db.attributes
)
common_attrs.update(cluster_attrs)
L2 = {

View File

@ -66,7 +66,7 @@ class TestHandlers(BaseIntegrationTest):
cluster = self.env.create_cluster(api=True)
cluster_db = self.db.query(Cluster).get(cluster["id"])
cluster2 = self.env.create_cluster(api=True,
release=cluster_db.release.id)
release_id=cluster_db.release.id)
cluster2_db = self.db.query(Cluster).get(cluster2["id"])
for clstr in (cluster_db, cluster2_db):

View File

@ -16,9 +16,6 @@
import json
from sqlalchemy.sql import not_
from nailgun.db.sqlalchemy.models import NetworkGroup
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import fake_tasks
from nailgun.test.base import reverse
@ -30,27 +27,6 @@ class TestNetworkModels(BaseIntegrationTest):
self._wait_for_threads()
super(TestNetworkModels, self).tearDown()
def test_network_group_size_of_1_creates_1_network(self):
cluster = self.env.create_cluster(api=False)
kw = {'release': cluster.release_id,
'cidr': '10.0.0.0/24',
'netmask': '255.255.255.0',
'network_size': 256,
'name': 'fixed',
'vlan_start': 200,
'cluster_id': cluster.id}
ng = NetworkGroup(**kw)
self.db.add(ng)
self.db.commit()
self.env.network_manager.cleanup_network_group(ng)
nets_db = self.db.query(NetworkGroup).filter(
not_(NetworkGroup.name == "fuelweb_admin")
).all()
self.assertEquals(len(nets_db), 1)
self.assertEquals(nets_db[0].vlan_start, kw['vlan_start'])
self.assertEquals(nets_db[0].name, kw['name'])
self.assertEquals(nets_db[0].cidr, kw['cidr'])
@fake_tasks(godmode=True)
def test_cluster_locking_after_deployment(self):
self.env.create(
@ -97,101 +73,3 @@ class TestNetworkModels(BaseIntegrationTest):
# it's 400 because we used Nova network
self.assertEquals(resp_neutron_net.status_code, 400)
self.assertEquals(resp_cluster.status_code, 403)
def test_network_group_creates_several_networks(self):
cluster = self.env.create_cluster(api=False)
kw = {'release': cluster.release_id,
'cidr': '10.0.0.0/8',
'netmask': '255.0.0.0',
'network_size': 256,
'name': 'fixed',
'vlan_start': 200,
'amount': 25,
'cluster_id': cluster.id}
ng = NetworkGroup(**kw)
self.db.add(ng)
self.db.commit()
self.env.network_manager.cleanup_network_group(ng)
nets_db = self.db.query(NetworkGroup).filter(
not_(NetworkGroup.name == "fuelweb_admin")
).all()
self.assertEquals(nets_db[0].amount, kw['amount'])
self.assertEquals(nets_db[0].vlan_start, kw['vlan_start'])
self.assertEquals(all(x.name == kw['name'] for x in nets_db), True)
def test_network_group_slices_cidr_for_networks(self):
cluster = self.env.create_cluster(api=False)
kw = {'release': cluster.release_id,
'cidr': '10.0.0.0/16',
'netmask': '255.255.0.0',
'network_size': 128,
'name': 'fixed',
'vlan_start': 200,
'amount': 2,
'cluster_id': cluster.id}
ng = NetworkGroup(**kw)
self.db.add(ng)
self.db.commit()
self.env.network_manager.cleanup_network_group(ng)
nets_db = self.db.query(NetworkGroup).filter(
not_(NetworkGroup.name == "fuelweb_admin")
).all()
self.assertEquals(nets_db[0].amount, kw['amount'])
self.assertEquals(nets_db[0].cidr, '10.0.0.0/16')
self.db.refresh(ng)
self.assertEquals(ng.cidr, '10.0.0.0/16')
def test_network_group_does_not_squeezes_base_cidr(self):
cluster = self.env.create_cluster(api=False)
kw = {'release': cluster.release_id,
'cidr': '172.0.0.0/24',
'netmask': '255.255.255.0',
'network_size': 64,
'name': 'fixed',
'vlan_start': 200,
'amount': 3,
'cluster_id': cluster.id}
ng = NetworkGroup(**kw)
self.db.add(ng)
self.db.commit()
self.env.network_manager.cleanup_network_group(ng)
self.db.refresh(ng)
self.assertEquals(ng.cidr, "172.0.0.0/24")
def test_network_group_does_not_squeezes_base_cidr_if_amount_1(self):
cluster = self.env.create_cluster(api=False)
kw = {'release': cluster.release_id,
'cidr': '172.0.0.0/8',
'netmask': '255.0.0.0',
'network_size': 256,
'name': 'public',
'vlan_start': 200,
'amount': 1,
'cluster_id': cluster.id}
ng = NetworkGroup(**kw)
self.db.add(ng)
self.db.commit()
self.env.network_manager.cleanup_network_group(ng)
self.db.refresh(ng)
self.assertEquals(ng.cidr, "172.0.0.0/8")
def test_network_group_sets_correct_gateway_for_few_nets(self):
cluster = self.env.create_cluster(api=False)
kw = {'release': cluster.release_id,
'cidr': '10.0.0.0/8',
'netmask': '255.0.0.0',
'network_size': 128,
'name': 'fixed',
'vlan_start': 200,
'amount': 2,
'gateway': "10.0.0.5",
'cluster_id': cluster.id}
ng = NetworkGroup(**kw)
self.db.add(ng)
self.db.commit()
self.env.network_manager.cleanup_network_group(ng)
nets_db = self.db.query(NetworkGroup).filter(
not_(NetworkGroup.name == "fuelweb_admin")
).all()
self.assertEquals(nets_db[0].amount, kw['amount'])
self.assertEquals(nets_db[0].gateway, "10.0.0.5")

View File

@ -102,7 +102,7 @@ class TestHandlers(BaseIntegrationTest):
headers=self.default_headers
)
self.assertEquals(resp.status_code, 200)
self.db.refresh(node)
self.db.refresh(node)
node_from_db = self.db.query(Node).filter(
Node.id == node.id

View File

@ -16,6 +16,8 @@
from mock import patch
from nailgun import objects
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import fake_tasks
@ -64,7 +66,7 @@ class TestProvisioning(BaseIntegrationTest):
]
)
cluster = self.env.clusters[0]
cluster.clear_pending_changes()
objects.Cluster.clear_pending_changes(cluster)
nodes_ids = map(lambda n: n.id, self.env.nodes)
self.env.network_manager.assign_ips(nodes_ids, 'management')
self.env.network_manager.assign_ips(nodes_ids, 'storage')

View File

@ -15,7 +15,6 @@
# under the License.
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.db.sqlalchemy.models import Node
from nailgun.orchestrator.provisioning_serializers import serialize
from nailgun.test.base import BaseIntegrationTest
@ -25,17 +24,18 @@ class TestProvisioningSerializer(BaseIntegrationTest):
def test_ubuntu_serializer(self):
release = self.env.create_release(
api=False, operating_system='Ubuntu')
api=False,
operating_system='Ubuntu')
cluster = self.env.create(
self.env.create(
cluster_kwargs={
'mode': 'multinode',
'release': release.id},
'release_id': release.id},
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['compute'], 'pending_addition': True}])
cluster_db = self.db.query(Cluster).get(cluster['id'])
cluster_db = self.env.clusters[0]
serialized_cluster = serialize(cluster_db, cluster_db.nodes)
for node in serialized_cluster['nodes']:

View File

@ -22,6 +22,7 @@ import time
from mock import patch
from nailgun import objects
from nailgun.settings import settings
from nailgun.db.sqlalchemy.models import Cluster
@ -128,7 +129,7 @@ class TestTaskManagers(BaseIntegrationTest):
self.env.nodes[0].pending_addition = False
self.db.commit()
cluster_db.clear_pending_changes()
objects.Cluster.clear_pending_changes(cluster_db)
supertask = self.env.launch_deployment()
self.assertEquals(supertask.name, 'deploy')
@ -398,7 +399,7 @@ class TestTaskManagers(BaseIntegrationTest):
]
)
cluster_db = self.env.clusters[0]
cluster_db.clear_pending_changes()
objects.Cluster.clear_pending_changes(cluster_db)
manager = ApplyChangesTaskManager(cluster_db.id)
self.assertRaises(errors.WrongNodeStatus, manager.execute)
@ -439,7 +440,7 @@ class TestTaskManagers(BaseIntegrationTest):
@fake_tasks()
def test_deletion_offline_node_when_cluster_has_only_one_node(self):
cluster = self.env.create_cluster()
self.env.clusters[0].clear_pending_changes()
objects.Cluster.clear_pending_changes(self.env.clusters[0])
self.env.create_node(
cluster_id=cluster['id'],
online=False,

View File

@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
from contextlib import nested
from mock import Mock
from mock import patch
@ -24,37 +26,44 @@ from nailgun.test.base import BaseTestCase
class TestClusterValidator(BaseTestCase):
def setUp(self):
super(TestClusterValidator, self).setUp()
self.cluster_data = '{"name": "test"}'
self.release_data = '{"release": 1}'
self.cluster_data = '{"name": "test", "release": 1}'
def test_cluster_exists_validation(self):
with patch('nailgun.api.validators.cluster.db', Mock()) as db:
db.return_value.query.return_value.filter_by.\
return_value.first.return_value = 'cluster'
with patch(
'nailgun.api.validators.cluster.ClusterCollection',
Mock()
) as cc:
cc.filter_by.return_value.first.return_value = 'cluster'
self.assertRaises(errors.AlreadyExists,
ClusterValidator.validate, self.cluster_data)
def test_cluster_non_exists_validation(self):
with patch('nailgun.api.validators.cluster.db', Mock()) as db:
with nested(
patch('nailgun.api.validators.cluster.ClusterCollection', Mock()),
patch('nailgun.api.validators.cluster.Release', Mock())
) as (cc, r):
try:
db.return_value.query.return_value.filter_by.\
return_value.first.return_value = None
cc.filter_by.return_value.first.return_value = None
r.get_by_uuid.return_value = 'release'
ClusterValidator.validate(self.cluster_data)
except errors.AlreadyExists as e:
self.fail('Cluster exists validation failed: {0}'.format(e))
self.fail(
'Cluster exists validation failed: {0}'.format(e)
)
def test_release_exists_validation(self):
with patch('nailgun.api.validators.cluster.db', Mock()) as db:
db.return_value.query.return_value.get.\
return_value = None
with patch(
'nailgun.api.validators.cluster.ClusterCollection',
Mock()
) as cc:
cc.filter_by.return_value.first.return_value = None
self.assertRaises(errors.InvalidData,
ClusterValidator.validate, self.release_data)
ClusterValidator.validate, self.cluster_data)
def test_release_non_exists_validation(self):
with patch('nailgun.api.validators.cluster.db', Mock()) as db:
with patch('nailgun.api.validators.cluster.Release', Mock()) as r:
try:
db.return_value.query.return_value.get.\
return_value = 'release'
ClusterValidator.validate(self.release_data)
r.get_by_uuid.return_value = None
ClusterValidator.validate(self.cluster_data)
except errors.InvalidData as e:
self.fail('Release exists validation failed: {0}'.format(e))

View File

@ -12,7 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
import string
from copy import deepcopy
from random import choice
from nailgun.logger import logger
from nailgun.settings import settings
def dict_merge(a, b):
@ -29,3 +35,46 @@ def dict_merge(a, b):
else:
result[k] = deepcopy(v)
return result
def traverse(cdict, generator_class):
new_dict = {}
if cdict:
for i, val in cdict.iteritems():
if isinstance(val, (str, unicode, int, float)):
new_dict[i] = val
elif isinstance(val, dict) and "generator" in val:
try:
generator = getattr(
generator_class,
val["generator"]
)
except AttributeError:
logger.error("Attribute error: %s" % val["generator"])
raise
else:
new_dict[i] = generator(val.get("generator_arg"))
else:
new_dict[i] = traverse(val, generator_class)
return new_dict
class AttributesGenerator(object):
@classmethod
def password(cls, arg=None):
try:
length = int(arg)
except Exception:
length = 8
chars = string.letters + string.digits
return u''.join([choice(chars) for _ in xrange(length)])
@classmethod
def ip(cls, arg=None):
if str(arg) in ("admin", "master"):
return settings.MASTER_IP
return "127.0.0.1"
@classmethod
def identical(cls, arg=None):
return str(arg)