Merge NodeAttributes model into Node model

* Move vms_conf from NodeAttributes to Node.
* Drop interfaces field, that is not used anymore.
* Fix all references to vms_conf across all sources.
* Remove explicit changed() call, since JSON fields
  can detect updates on all levels.
* Replace objects.Node.set_vms_conf() function
  with direct attribute assignment

Change-Id: I2112a6dba30b49e3d658ef1503ee48ad1f92e693
Implements: blueprint support-numa-cpu-pinning
This commit is contained in:
Alexander Saprykin 2016-02-08 14:43:29 +02:00
parent 65f59a458a
commit 5a2d4f0d3e
16 changed files with 117 additions and 85 deletions

View File

@ -87,7 +87,7 @@ class NodeVMsHandler(BaseHandler):
* 404 (node not found in db)
"""
node = self.get_object_or_404(objects.Node, node_id)
node_vms = node.attributes.vms_conf
node_vms = node.vms_conf
return {"vms_conf": node_vms}
@content
@ -101,5 +101,5 @@ class NodeVMsHandler(BaseHandler):
node = self.get_object_or_404(objects.Node, node_id)
data = self.checked_data()
objects.Node.set_vms_conf(node, data.get("vms_conf"))
return {"vms_conf": node.attributes.vms_conf}
node.vms_conf = data.get("vms_conf")
return {"vms_conf": node.vms_conf}

View File

@ -28,6 +28,7 @@ import sqlalchemy as sa
from oslo_serialization import jsonutils
from nailgun import consts
from nailgun.db.sqlalchemy.models import fields
revision = '11a9adc6d36a'
down_revision = '43b2cb64dae6'
@ -38,9 +39,11 @@ def upgrade():
upgrade_ip_address()
update_vips_from_network_roles()
upgrade_node_roles_metadata()
merge_node_attributes_with_nodes()
def downgrade():
downgrade_merge_node_attributes_with_nodes()
downgrade_node_roles_metadata()
remove_foreign_key_ondelete()
downgrade_ip_address()
@ -588,3 +591,41 @@ def downgrade_node_roles_metadata():
id=id,
roles_metadata=jsonutils.dumps(roles_metadata),
)
def merge_node_attributes_with_nodes():
connection = op.get_bind()
op.add_column(
'nodes',
sa.Column(
'vms_conf',
fields.JSON(),
nullable=False,
server_default='[]'
)
)
select_query = sa.sql.text('SELECT node_id, vms_conf FROM node_attributes')
update_query = sa.sql.text(
'UPDATE nodes SET vms_conf = :vms_conf WHERE id = :node_id')
for node_id, vms_conf in connection.execute(select_query):
connection.execute(update_query, node_id=node_id, vms_conf=vms_conf)
op.drop_table('node_attributes')
def downgrade_merge_node_attributes_with_nodes():
op.create_table(
'node_attributes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('node_id', sa.Integer(), nullable=True),
sa.Column('interfaces', fields.JSON(), nullable=True),
sa.Column('vms_conf', fields.JSON(),
nullable=False, server_default='[]'),
sa.ForeignKeyConstraint(['node_id'], ['nodes.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.drop_column('nodes', 'vms_conf')

View File

@ -197,7 +197,6 @@ def upload_fixture(fileobj, loader=None):
# UGLY HACK for testing
if new_obj.__class__.__name__ == 'Node':
objects.Node.create_attributes(new_obj)
objects.Node.update_interfaces(new_obj)
fire_callback_on_node_create(new_obj)
db().commit()

View File

@ -27,7 +27,6 @@ from nailgun.db.sqlalchemy.models.cluster import VmwareAttributes
from nailgun.db.sqlalchemy.models.release import Release
from nailgun.db.sqlalchemy.models.node import Node
from nailgun.db.sqlalchemy.models.node import NodeAttributes
from nailgun.db.sqlalchemy.models.node import NodeBondInterface
from nailgun.db.sqlalchemy.models.node import NodeNICInterface
from nailgun.db.sqlalchemy.models.node import NodeGroup

View File

@ -26,9 +26,10 @@ from sqlalchemy import String
from sqlalchemy import Text
from sqlalchemy import Unicode
from sqlalchemy import UniqueConstraint
from sqlalchemy.orm import relationship, backref
from sqlalchemy.dialects import postgresql as psql
from sqlalchemy.ext.mutable import MutableDict
from sqlalchemy.orm import relationship
from nailgun import consts
from nailgun.db.sqlalchemy.models.base import Base
@ -104,10 +105,7 @@ class Node(Base):
default=[], nullable=False, server_default='{}')
primary_roles = Column(psql.ARRAY(String(consts.ROLE_NAME_MAX_SIZE)),
default=[], nullable=False, server_default='{}')
attributes = relationship("NodeAttributes",
backref=backref("node"),
uselist=False,
cascade="all,delete")
nic_interfaces = relationship("NodeNICInterface", backref="node",
cascade="all, delete-orphan",
order_by="NodeNICInterface.name")
@ -125,6 +123,8 @@ class Node(Base):
server_default=None, nullable=True)
extensions = Column(psql.ARRAY(String(consts.EXTENSION_NAME_MAX_SIZE)),
default=[], nullable=False, server_default='{}')
vms_conf = Column(MutableList.as_mutable(JSON),
default=[], server_default='[]', nullable=False)
@property
def interfaces(self):
@ -227,15 +227,6 @@ class Node(Base):
self.meta = data
class NodeAttributes(Base):
__tablename__ = 'node_attributes'
id = Column(Integer, primary_key=True)
node_id = Column(Integer, ForeignKey('nodes.id', ondelete='CASCADE'))
interfaces = Column(MutableDict.as_mutable(JSON), default={})
vms_conf = Column(MutableList.as_mutable(JSON),
default=[], server_default='[]')
class NodeNICInterface(Base):
__tablename__ = 'node_nic_interfaces'
id = Column(Integer, primary_key=True)

View File

@ -82,8 +82,6 @@ class NodeDefaultsDisksHandler(BaseHandler):
* 404 (node or its attributes not found in db)
"""
node = self.get_object_or_404(objects.Node, node_id)
if not node.attributes:
raise self.http(404)
volumes = DisksFormatConvertor.format_disks_to_simple(
node.volume_manager.gen_volumes_info())

View File

@ -1124,7 +1124,7 @@ class Cluster(NailgunObject):
nodes = []
for node in cls.get_nodes_by_role(instance,
consts.VIRTUAL_NODE_TYPES.virt):
for vm in node.attributes.vms_conf:
for vm in node.vms_conf:
if not vm.get('created'):
nodes.append(node)
return nodes
@ -1133,11 +1133,9 @@ class Cluster(NailgunObject):
def set_vms_created_state(cls, instance):
nodes = cls.get_nodes_by_role(instance, consts.VIRTUAL_NODE_TYPES.virt)
for node in nodes:
for vm in node.attributes.vms_conf:
for vm in node.vms_conf:
if not vm.get('created'):
vm['created'] = True
# Second level data was changed in 'vms_conf'.
node.attributes.vms_conf.changed()
db().flush()
@classmethod

View File

@ -245,7 +245,6 @@ class Node(NailgunObject):
cls.update_primary_roles(new_node, primary_roles)
# creating attributes
cls.create_attributes(new_node)
cls.create_discover_notification(new_node)
if new_node.ip:
@ -332,20 +331,6 @@ class Node(NailgunObject):
db().add(instance)
db().flush()
@classmethod
def create_attributes(cls, instance):
"""Create attributes for Node instance
:param instance: Node instance
:returns: NodeAttributes instance
"""
new_attributes = models.NodeAttributes()
instance.attributes = new_attributes
db().add(new_attributes)
db().add(instance)
db().flush()
return new_attributes
@classmethod
def is_interfaces_configuration_locked(cls, instance):
"""Returns true if update of network configuration is not allowed.
@ -391,19 +376,6 @@ class Node(NailgunObject):
)
logger.warning(traceback.format_exc())
@classmethod
def set_vms_conf(cls, instance, vms_conf):
"""Set vms_conf for Node instance from JSON data.
:param instance: Node instance
:param volumes_data: JSON with new vms_conf data
:returns: None
"""
db().query(models.NodeAttributes).filter_by(
node_id=instance.id).update({'vms_conf': vms_conf})
db().flush()
db().refresh(instance)
@classmethod
def create_discover_notification(cls, instance):
"""Create notification about discovering new Node
@ -1006,10 +978,8 @@ class Node(NailgunObject):
if consts.VIRTUAL_NODE_TYPES.virt not in node.all_roles:
return
for vm in node.attributes.vms_conf:
for vm in node.vms_conf:
vm['created'] = False
# Was changed second level data in 'vms_conf'
node.attributes.vms_conf.changed()
class NodeCollection(NailgunCollection):

View File

@ -19,10 +19,8 @@
from copy import deepcopy
from itertools import groupby
import sqlalchemy as sa
from sqlalchemy.orm import joinedload
import six
import sqlalchemy as sa
from nailgun import consts
from nailgun.db import db
@ -171,7 +169,7 @@ class DeploymentMultinodeSerializer(object):
).filter(sa.or_(
Node.roles.any('ceph-osd'),
Node.pending_roles.any('ceph-osd')
)).options(joinedload('attributes'))
))
for node in nodes:
for disk in node_extension_call('get_node_volumes', node):
@ -250,7 +248,7 @@ class DeploymentMultinodeSerializer(object):
'fqdn': objects.Node.get_node_fqdn(node),
'status': node.status,
'role': role,
'vms_conf': node.attributes.vms_conf,
'vms_conf': node.vms_conf,
# TODO(eli): need to remove, requried for the fake thread only
'online': node.online
}

View File

@ -56,7 +56,6 @@ from nailgun.db.sqlalchemy.fixman import load_fake_deployment_tasks
from nailgun.db.sqlalchemy.fixman import load_fixture
from nailgun.db.sqlalchemy.fixman import upload_fixture
from nailgun.db.sqlalchemy.models import ClusterPluginLink
from nailgun.db.sqlalchemy.models import NodeAttributes
from nailgun.db.sqlalchemy.models import NodeNICInterface
from nailgun.db.sqlalchemy.models import Notification
from nailgun.db.sqlalchemy.models import PluginLink
@ -393,9 +392,6 @@ class EnvironmentManager(object):
self.db.commit()
return task
def create_attributes(self):
return NodeAttributes()
def create_notification(self, **kwargs):
notif_data = {
"topic": "discover",

View File

@ -249,7 +249,7 @@ class TestNovaOrchestratorSerializer(OrchestratorSerializerTestBase):
node_db = self.db.query(Node).get(node['id'])
vms_conf = [{'id': 1, 'cluster_id': self.cluster.id}]
objects.Node.set_vms_conf(node_db, vms_conf)
node_db.vms_conf = vms_conf
serialized_data = self.serializer.serialize_node(node_db, 'controller')
self.assertEqual(serialized_data['vms_conf'], vms_conf)

View File

@ -118,16 +118,16 @@ class TestProvisioning(BaseIntegrationTest):
)
nodes = self.env.nodes
nodes[0].attributes.vms_conf = [
nodes[0].vms_conf = [
{'id': 1, 'cpu': 1, 'mem': 2, 'created': True},
{'id': 2, 'cpu': 1, 'mem': 2, 'created': True}
]
nodes[1].attributes.vms_conf = [
nodes[1].vms_conf = [
{'id': 1, 'cpu': 2, 'mem': 4}
]
self.db.commit()
self.env.launch_provisioning_selected([str(nodes[0].id)])
for conf in nodes[0].attributes.vms_conf:
for conf in nodes[0].vms_conf:
self.assertFalse(conf['created'])

View File

@ -34,8 +34,7 @@ class TestSpawnVMs(BaseIntegrationTest):
]
)
cluster = self.env.clusters[0]
objects.Node.set_vms_conf(cluster.nodes[0],
[{'id': 1, 'cluster_id': cluster.id}])
cluster.nodes[0].vms_conf = [{'id': 1, 'cluster_id': cluster.id}]
resp = self.app.put(
reverse(

View File

@ -67,13 +67,10 @@ class TestLoadDbDriverWithSAExceptions(unittest.TestCase):
def test_sa_relationship_constraint(self):
def handler():
node = models.Node(
mac='60:a4:4c:35:28:95',
timestamp=datetime.datetime.now()
)
ip_addr = models.IPAddr()
node.attributes = models.IPAddr()
db.add(node)
ip_addr.network_data = models.IPAddr()
db.add(ip_addr)
db.flush()
self.assertRaises(AssertionError, load_db_driver, handler)

View File

@ -12,6 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import alembic
from oslo_serialization import jsonutils
import sqlalchemy as sa
@ -141,6 +143,31 @@ def prepare():
'fuel_version': '8.0',
}])
db.execute(
meta.tables['nodes'].insert(),
[{
'uuid': '26b508d0-0d76-4159-bce9-f67ec2765480',
'cluster_id': None,
'group_id': None,
'status': 'discover',
'meta': '{}',
'mac': 'aa:aa:aa:aa:aa:aa',
'timestamp': datetime.datetime.utcnow(),
}]
)
node_id = result.inserted_primary_key[0]
db.execute(
meta.tables['node_attributes'].insert(),
[{
'node_id': node_id,
'vms_conf': jsonutils.dumps([
{'cpu': 1, 'mem': 2},
{'cpu': 1, 'mem': 2},
])
}]
)
db.execute(
meta.tables['ip_addrs'].insert(),
[
@ -377,3 +404,23 @@ class TestNodeRolesMigration(base.BaseAlembicMigrationTest):
role_group,
role_groups.get(role_name, consts.NODE_ROLE_GROUPS.other)
)
class TestMergeNodeAttributes(base.BaseAlembicMigrationTest):
def test_node_attributes_not_exists(self):
self.assertNotIn('node_attributes', self.meta.tables)
def test_data_moved_into_nodes_table(self):
nodes_table = self.meta.tables['nodes']
records = list(db.execute(
sa.select([nodes_table.c.vms_conf])))
for record in records:
self.assertEqual(
jsonutils.loads(record[0]),
[
{'cpu': 1, 'mem': 2},
{'cpu': 1, 'mem': 2},
]
)

View File

@ -729,8 +729,7 @@ class TestTaskObject(BaseIntegrationTest):
kvm_node = self.cluster.nodes[0]
kvm_node.roles = [consts.VIRTUAL_NODE_TYPES.virt]
self.db.flush()
objects.Node.set_vms_conf(kvm_node,
[{'id': 1, 'cluster_id': self.cluster.id}])
kvm_node.vms_conf = [{'id': 1, 'cluster_id': self.cluster.id}]
task = Task(name=consts.TASK_NAMES.spawn_vms,
cluster=self.cluster,
status=consts.TASK_STATUSES.ready)
@ -742,7 +741,7 @@ class TestTaskObject(BaseIntegrationTest):
for node in self.cluster.nodes:
if consts.VIRTUAL_NODE_TYPES.virt in node.roles:
self.assertTrue(node.attributes.vms_conf[0].get('created'))
self.assertTrue(node.vms_conf[0].get('created'))
else:
self.assertNotEquals(node.status, consts.NODE_STATUSES.ready)
@ -1432,12 +1431,12 @@ class TestClusterObjectVirtRoles(BaseTestCase):
]
)
self.env.nodes[0].attributes.vms_conf = [
self.env.nodes[0].vms_conf = [
{'id': 1, 'cpu': 1, 'mem': 2},
{'id': 2, 'cpu': 1, 'mem': 2},
]
self.env.nodes[1].attributes.vms_conf = [
self.env.nodes[1].vms_conf = [
{'id': 1, 'cpu': 1, 'mem': 2},
{'id': 2, 'cpu': 1, 'mem': 2},
]
@ -1446,7 +1445,7 @@ class TestClusterObjectVirtRoles(BaseTestCase):
objects.Cluster.set_vms_created_state(self.env.clusters[0])
for node in self.env.nodes:
for conf in node.attributes.vms_conf:
for conf in node.vms_conf:
self.assertTrue(conf['created'])
def test_reset_vms_created_state(self):
@ -1454,10 +1453,10 @@ class TestClusterObjectVirtRoles(BaseTestCase):
objects.Node.reset_vms_created_state(self.env.nodes[0])
for conf in self.env.nodes[0].attributes.vms_conf:
for conf in self.env.nodes[0].vms_conf:
self.assertFalse(conf['created'])
for conf in self.env.nodes[1].attributes.vms_conf:
for conf in self.env.nodes[1].vms_conf:
self.assertTrue(conf['created'])