Merge "Refactor stats to avoid bad join"

This commit is contained in:
Jenkins 2014-02-12 01:09:53 +00:00 committed by Gerrit Code Review
commit 6d593641a0
18 changed files with 258 additions and 239 deletions

View File

@ -242,7 +242,7 @@ class ResourceTracker(object):
self.pci_tracker.update_pci_for_migration(instance,
sign=-1)
self._update_usage(self.compute_node, itype, sign=-1)
self.compute_node['stats'] = self.stats
self.compute_node['stats'] = jsonutils.dumps(self.stats)
ctxt = context.get_admin_context()
self._update(ctxt, self.compute_node)
@ -381,7 +381,7 @@ class ResourceTracker(object):
else:
# just update the record:
self._update(context, resources, prune_stats=True)
self._update(context, resources)
LOG.info(_('Compute_service record updated for %(host)s:%(node)s')
% {'host': self.host, 'node': self.nodename})
@ -448,12 +448,12 @@ class ResourceTracker(object):
if 'pci_devices' in resources:
LOG.audit(_("Free PCI devices: %s") % resources['pci_devices'])
def _update(self, context, values, prune_stats=False):
def _update(self, context, values):
"""Persist the compute node updates to the DB."""
if "service" in self.compute_node:
del self.compute_node['service']
self.compute_node = self.conductor_api.compute_node_update(
context, self.compute_node, values, prune_stats)
context, self.compute_node, values)
if self.pci_tracker:
self.pci_tracker.save(context)
@ -521,7 +521,7 @@ class ResourceTracker(object):
if self.pci_tracker:
self.pci_tracker.update_pci_for_migration(instance)
self._update_usage(resources, itype)
resources['stats'] = self.stats
resources['stats'] = jsonutils.dumps(self.stats)
if self.pci_tracker:
resources['pci_stats'] = jsonutils.dumps(
self.pci_tracker.stats)
@ -594,7 +594,7 @@ class ResourceTracker(object):
self._update_usage(resources, instance, sign=sign)
resources['current_workload'] = self.stats.calculate_workload()
resources['stats'] = self.stats
resources['stats'] = jsonutils.dumps(self.stats)
if self.pci_tracker:
resources['pci_stats'] = jsonutils.dumps(self.pci_tracker.stats)
else:

View File

@ -237,8 +237,8 @@ class LocalAPI(object):
return self._manager.compute_node_create(context, values)
def compute_node_update(self, context, node, values, prune_stats=False):
return self._manager.compute_node_update(context, node, values,
prune_stats)
# NOTE(belliott) ignore prune_stats param, it's no longer relevant
return self._manager.compute_node_update(context, node, values)
def compute_node_delete(self, context, node):
return self._manager.compute_node_delete(context, node)

View File

@ -453,8 +453,9 @@ class ConductorManager(manager.Manager):
return jsonutils.to_primitive(result)
def compute_node_update(self, context, node, values, prune_stats=False):
result = self.db.compute_node_update(context, node['id'], values,
prune_stats)
# NOTE(belliott) prune_stats is no longer relevant and will be
# ignored
result = self.db.compute_node_update(context, node['id'], values)
return jsonutils.to_primitive(result)
def compute_node_delete(self, context, node):

View File

@ -153,7 +153,7 @@ def compute_node_get(context, compute_id):
:param compute_id: ID of the compute node
:returns: Dictionary-like object containing properties of the compute node,
including its corresponding service and statistics
including its corresponding service
Raises ComputeHostNotFound if compute node with the given ID doesn't exist.
"""
@ -184,7 +184,7 @@ def compute_node_get_all(context, no_date_fields=False):
Set to False by default
:returns: List of dictionaries each containing compute node properties,
including corresponding service and stats
including corresponding service
"""
return IMPL.compute_node_get_all(context, no_date_fields)
@ -213,23 +213,19 @@ def compute_node_create(context, values):
return IMPL.compute_node_create(context, values)
def compute_node_update(context, compute_id, values, prune_stats=False):
def compute_node_update(context, compute_id, values):
"""Set the given properties on a compute node and update it.
:param context: The security context
:param compute_id: ID of the compute node
:param values: Dictionary containing compute node properties to be updated
:param prune_stats: If set to True, forces the compute node statistics
entries corresponding to the given compute node with
keys not present in the values['stats'] dictionary to
be deleted from the database. Set to False by default
:returns: Dictionary-like object containing the properties of the updated
compute node, including its corresponding service and statistics
Raises ComputeHostNotFound if compute node with the given ID doesn't exist.
"""
return IMPL.compute_node_update(context, compute_id, values, prune_stats)
return IMPL.compute_node_update(context, compute_id, values)
def compute_node_delete(context, compute_id):

View File

@ -21,7 +21,6 @@ import collections
import copy
import datetime
import functools
import itertools
import sys
import time
import uuid
@ -508,7 +507,6 @@ def _compute_node_get(context, compute_id, session=None):
result = model_query(context, models.ComputeNode, session=session).\
filter_by(id=compute_id).\
options(joinedload('service')).\
options(joinedload('stats')).\
first()
if not result:
@ -538,10 +536,9 @@ def compute_node_get_all(context, no_date_fields):
engine = get_engine()
# Retrieve ComputeNode, Service, Stat.
# Retrieve ComputeNode, Service
compute_node = models.ComputeNode.__table__
service = models.Service.__table__
stat = models.ComputeNodeStat.__table__
with engine.begin() as conn:
redundant_columns = set(['deleted_at', 'created_at', 'updated_at',
@ -561,14 +558,6 @@ def compute_node_get_all(context, no_date_fields):
order_by(service.c.id)
service_rows = conn.execute(service_query).fetchall()
stat_query = select(filter_columns(stat)).\
where(stat.c.deleted == 0).\
order_by(stat.c.compute_node_id)
stat_rows = conn.execute(stat_query).fetchall()
# NOTE(msdubov): Transferring sqla.RowProxy objects to dicts.
stats = [dict(proxy.items()) for proxy in stat_rows]
# Join ComputeNode & Service manually.
services = {}
for proxy in service_rows:
@ -581,21 +570,6 @@ def compute_node_get_all(context, no_date_fields):
compute_nodes.append(node)
# Join ComputeNode & ComputeNodeStat manually.
# NOTE(msdubov): ComputeNode and ComputeNodeStat map 1-to-Many.
# Running time is (asymptotically) optimal due to the use
# of iterators (itertools.groupby() for ComputeNodeStat and
# iter() for ComputeNode) - we handle each record only once.
compute_nodes.sort(key=lambda node: node['id'])
compute_nodes_iter = iter(compute_nodes)
for nid, nsts in itertools.groupby(stats, lambda s: s['compute_node_id']):
for node in compute_nodes_iter:
if node['id'] == nid:
node['stats'] = list(nsts)
break
else:
node['stats'] = []
return compute_nodes
@ -608,78 +582,28 @@ def compute_node_search_by_hypervisor(context, hypervisor_match):
all()
def _prep_stats_dict(values):
"""Make list of ComputeNodeStats."""
stats = []
d = values.get('stats', {})
for k, v in d.iteritems():
stat = models.ComputeNodeStat()
stat['key'] = k
stat['value'] = v
stats.append(stat)
values['stats'] = stats
@require_admin_context
def compute_node_create(context, values):
"""Creates a new ComputeNode and populates the capacity fields
with the most recent data.
"""
_prep_stats_dict(values)
datetime_keys = ('created_at', 'deleted_at', 'updated_at')
convert_objects_related_datetimes(values, *datetime_keys)
compute_node_ref = models.ComputeNode()
compute_node_ref.update(values)
compute_node_ref.save()
return compute_node_ref
def _update_stats(context, new_stats, compute_id, session, prune_stats=False):
existing = model_query(context, models.ComputeNodeStat, session=session,
read_deleted="no").filter_by(compute_node_id=compute_id).all()
statmap = {}
for stat in existing:
key = stat['key']
statmap[key] = stat
stats = []
for k, v in new_stats.iteritems():
old_stat = statmap.pop(k, None)
if old_stat:
if old_stat['value'] != unicode(v):
# update existing value:
old_stat.update({'value': v})
stats.append(old_stat)
else:
# add new stat:
stat = models.ComputeNodeStat()
stat['compute_node_id'] = compute_id
stat['key'] = k
stat['value'] = v
stats.append(stat)
if prune_stats:
# prune un-touched old stats:
for stat in statmap.values():
session.add(stat)
stat.soft_delete(session=session)
# add new and updated stats
for stat in stats:
session.add(stat)
@require_admin_context
@_retry_on_deadlock
def compute_node_update(context, compute_id, values, prune_stats=False):
def compute_node_update(context, compute_id, values):
"""Updates the ComputeNode record with the most recent data."""
stats = values.pop('stats', {})
session = get_session()
with session.begin():
_update_stats(context, stats, compute_id, session, prune_stats)
compute_ref = _compute_node_get(context, compute_id, session=session)
# Always update this, even if there's going to be no other
# changes in data. This ensures that we invalidate the
@ -688,16 +612,15 @@ def compute_node_update(context, compute_id, values, prune_stats=False):
datetime_keys = ('created_at', 'deleted_at', 'updated_at')
convert_objects_related_datetimes(values, *datetime_keys)
compute_ref.update(values)
return compute_ref
@require_admin_context
def compute_node_delete(context, compute_id):
"""Delete a ComputeNode record and prune its stats."""
"""Delete a ComputeNode record."""
session = get_session()
with session.begin():
# Prune the compute node's stats
_update_stats(context, {}, compute_id, session, True)
result = model_query(context, models.ComputeNode, session=session).\
filter_by(id=compute_id).\
soft_delete(synchronize_session=False)

View File

@ -0,0 +1,95 @@
# Copyright (c) 2014 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import ForeignKey
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import Text
from nova.openstack.common import timeutils
def upgrade(engine):
meta = MetaData()
meta.bind = engine
# Drop the compute_node_stats table and add a 'stats' column to
# compute_nodes directly. The data itself is transient and doesn't
# need to be copied over.
table_names = ('compute_node_stats', 'shadow_compute_node_stats')
for table_name in table_names:
table = Table(table_name, meta, autoload=True)
table.drop()
# Add a new stats column to compute nodes
table_names = ('compute_nodes', 'shadow_compute_nodes')
for table_name in table_names:
table = Table(table_name, meta, autoload=True)
stats = Column('stats', Text, default='{}')
table.create_column(stats)
def downgrade(engine):
meta = MetaData()
meta.bind = engine
table_names = ('compute_nodes', 'shadow_compute_nodes')
for table_name in table_names:
table = Table(table_name, meta, autoload=True)
table.drop_column('stats')
if engine.name == 'mysql':
fk_name = 'fk_compute_node_stats_compute_node_id'
else:
fk_name = 'compute_node_stats_compute_node_id_fkey'
table = Table('compute_node_stats', meta,
Column('created_at', DateTime, default=timeutils.utcnow),
Column('updated_at', DateTime, onupdate=timeutils.utcnow),
Column('deleted_at', DateTime),
Column('deleted', Integer, default=0),
Column('id', Integer, nullable=False),
Column('key', String(255), nullable=False),
Column('value', String(255), nullable=True),
Column('compute_node_id', Integer,
ForeignKey('compute_nodes.id', name=fk_name),
index=True),
Index('compute_node_stats_node_id_and_deleted_idx',
'compute_node_id', 'deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
table.create()
# shadow version has no fkey or index
table = Table('shadow_compute_node_stats', meta,
Column('created_at', DateTime, default=timeutils.utcnow),
Column('updated_at', DateTime, onupdate=timeutils.utcnow),
Column('deleted_at', DateTime),
Column('deleted', Integer, default=0),
Column('id', Integer, primary_key=True, nullable=False),
Column('key', String(255), nullable=False),
Column('value', String(255), nullable=True),
Column('compute_node_id', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
table.create()

View File

@ -120,32 +120,8 @@ class ComputeNode(BASE, NovaBase):
# data about additional resources.
extra_resources = Column(Text)
class ComputeNodeStat(BASE, NovaBase):
"""Stats related to the current workload of a compute host that are
intended to aid in making scheduler decisions.
"""
__tablename__ = 'compute_node_stats'
__table_args__ = (
Index('ix_compute_node_stats_compute_node_id', 'compute_node_id'),
Index('compute_node_stats_node_id_and_deleted_idx',
'compute_node_id', 'deleted')
)
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255))
compute_node_id = Column(Integer, ForeignKey('compute_nodes.id'),
nullable=False)
compute_node = relationship(ComputeNode, backref=backref('stats'),
foreign_keys=compute_node_id,
primaryjoin='and_('
'ComputeNodeStat.compute_node_id == '
'ComputeNode.id,'
'ComputeNodeStat.deleted == 0)')
def __str__(self):
return "{%d: %s = %s}" % (self.compute_node_id, self.key, self.value)
# json-encode string containing compute node statistics
stats = Column(Text, default='{}')
class Certificate(BASE, NovaBase):

View File

@ -1,4 +1,4 @@
# Copyright 2013 IBM Corp.
# Copyright 2013 IBM Corp
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -15,13 +15,15 @@
from nova import db
from nova.objects import base
from nova.objects import fields
from nova.openstack.common import jsonutils
class ComputeNode(base.NovaPersistentObject, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added get_by_service_id()
# Version 1.2: String attributes updated to support unicode
VERSION = '1.2'
# Version 1.3: Added stats field
VERSION = '1.3'
fields = {
'id': fields.IntegerField(),
@ -42,12 +44,29 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject):
'cpu_info': fields.StringField(nullable=True),
'disk_available_least': fields.IntegerField(nullable=True),
'metrics': fields.StringField(nullable=True),
'stats': fields.DictOfNullableStringsField(nullable=True),
}
def obj_make_compatible(self, primitive, target_version):
target_version = (int(target_version.split('.')[0]),
int(target_version.split('.')[1]))
if target_version < (1, 3):
# pre 1.3 version does not have a stats field
del primitive['stats']
@staticmethod
def _from_db_object(context, compute, db_compute):
for key in compute.fields:
fields = set(compute.fields) - set(['stats'])
for key in fields:
compute[key] = db_compute[key]
stats = db_compute['stats']
if stats:
compute['stats'] = jsonutils.loads(stats)
else:
compute['stats'] = {}
compute._context = context
compute.obj_reset_changes()
return compute
@ -70,10 +89,15 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject):
@base.remotable
def save(self, context, prune_stats=False):
# NOTE(belliott) ignore prune_stats param, no longer relevant
updates = self.obj_get_changes()
updates.pop('id', None)
db_compute = db.compute_node_update(context, self.id, updates,
prune_stats=prune_stats)
stats = updates.pop('stats', None)
if stats is not None:
updates['stats'] = jsonutils.dumps(stats)
db_compute = db.compute_node_update(context, self.id, updates)
self._from_db_object(context, self, db_compute)
@base.remotable
@ -93,13 +117,15 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject):
class ComputeNodeList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# ComputeNode <= version 1.2
VERSION = '1.0'
# Version 1.1 ComputeNode version 1.3
VERSION = '1.1'
fields = {
'objects': fields.ListOfObjectsField('ComputeNode'),
}
child_versions = {
'1.0': '1.2',
# NOTE(danms): ComputeNode was at 1.2 before we added this
'1.1': '1.3',
}
@base.remotable_classmethod

View File

@ -215,7 +215,9 @@ class HostState(object):
# Don't store stats directly in host_state to make sure these don't
# overwrite any values, or get overwritten themselves. Store in self so
# filters can schedule with them.
self.stats = self._statmap(compute.get('stats', []))
stats = compute.get('stats', None) or '{}'
self.stats = jsonutils.loads(stats)
self.hypervisor_version = compute['hypervisor_version']
# Track number of instances on host
@ -303,9 +305,6 @@ class HostState(object):
task_states.IMAGE_BACKUP]:
self.num_io_ops += 1
def _statmap(self, stats):
return dict((st['key'], st['value']) for st in stats)
def __repr__(self):
return ("(%s, %s) ram:%s disk:%s io_ops:%s instances:%s" %
(self.host, self.nodename, self.free_ram_mb, self.free_disk_mb,

View File

@ -305,7 +305,7 @@ class HostTestCase(test.TestCase):
'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10,
'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
'cpu_info': '', 'stats': {}}
'cpu_info': '', 'stats': ''}
db.compute_node_create(ctxt, dic)
return db.service_get(ctxt, s_ref['id'])

View File

@ -356,7 +356,7 @@ class HostTestCase(test.TestCase):
'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10,
'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
'cpu_info': '', 'stats': {}}
'cpu_info': '', 'stats': ''}
db.compute_node_create(ctxt, dic)
return db.service_get(ctxt, s_ref['id'])

View File

@ -347,8 +347,8 @@ class _BaseTestCase(object):
def test_compute_node_update(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_update')
db.compute_node_update(self.context, node['id'], 'fake-values',
False).AndReturn('fake-result')
db.compute_node_update(self.context, node['id'], 'fake-values').\
AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_update(self.context, node,
'fake-values', False)

View File

@ -46,6 +46,7 @@ from nova.db.sqlalchemy import utils as db_utils
from nova import exception
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import quota
@ -5465,31 +5466,21 @@ class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin):
supported_instances='',
pci_stats='',
metrics='',
extra_resources='')
extra_resources='',
stats='')
# add some random stats
self.stats = dict(num_instances=3, num_proj_12345=2,
num_proj_23456=2, num_vm_building=3)
self.compute_node_dict['stats'] = self.stats
self.compute_node_dict['stats'] = jsonutils.dumps(self.stats)
self.flags(reserved_host_memory_mb=0)
self.flags(reserved_host_disk_mb=0)
self.item = db.compute_node_create(self.ctxt, self.compute_node_dict)
def _stats_as_dict(self, stats):
d = {}
for s in stats:
key = s['key']
d[key] = s['value']
return d
def _stats_equal(self, stats, new_stats):
for k, v in stats.iteritems():
self.assertEqual(v, int(new_stats[k]))
def test_compute_node_create(self):
self._assertEqualObjects(self.compute_node_dict, self.item,
ignored_keys=self._ignored_keys + ['stats'])
new_stats = self._stats_as_dict(self.item['stats'])
self._stats_equal(self.stats, new_stats)
new_stats = jsonutils.loads(self.item['stats'])
self.assertEqual(self.stats, new_stats)
def test_compute_node_get_all(self):
date_fields = set(['created_at', 'updated_at',
@ -5506,8 +5497,8 @@ class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin):
self.assertFalse(date_fields & node_fields)
else:
self.assertTrue(date_fields <= node_fields)
new_stats = self._stats_as_dict(node['stats'])
self._stats_equal(self.stats, new_stats)
new_stats = jsonutils.loads(node['stats'])
self.assertEqual(self.stats, new_stats)
def test_compute_node_get_all_deleted_compute_node(self):
# Create a service and compute node and ensure we can find its stats;
@ -5521,7 +5512,7 @@ class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin):
# Create a compute node
compute_node_data = self.compute_node_dict.copy()
compute_node_data['service_id'] = service['id']
compute_node_data['stats'] = self.stats.copy()
compute_node_data['stats'] = jsonutils.dumps(self.stats.copy())
compute_node_data['hypervisor_hostname'] = 'hypervisor-%s' % x
node = db.compute_node_create(self.ctxt, compute_node_data)
@ -5535,7 +5526,7 @@ class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin):
break
self.assertIsNotNone(found)
# Now ensure the match has stats!
self.assertNotEqual(self._stats_as_dict(found['stats']), {})
self.assertNotEqual(jsonutils.loads(found['stats']), {})
# Now delete the newly-created compute node to ensure the related
# compute node stats are wiped in a cascaded fashion
@ -5556,7 +5547,7 @@ class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin):
for name in ['bm_node1', 'bm_node2']:
compute_node_data = self.compute_node_dict.copy()
compute_node_data['service_id'] = service['id']
compute_node_data['stats'] = self.stats
compute_node_data['stats'] = jsonutils.dumps(self.stats)
compute_node_data['hypervisor_hostname'] = 'bm_node_1'
node = db.compute_node_create(self.ctxt, compute_node_data)
@ -5576,24 +5567,24 @@ class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin):
node = db.compute_node_get(self.ctxt, compute_node_id)
self._assertEqualObjects(self.compute_node_dict, node,
ignored_keys=self._ignored_keys + ['stats', 'service'])
new_stats = self._stats_as_dict(node['stats'])
self._stats_equal(self.stats, new_stats)
new_stats = jsonutils.loads(node['stats'])
self.assertEqual(self.stats, new_stats)
def test_compute_node_update(self):
compute_node_id = self.item['id']
stats = self._stats_as_dict(self.item['stats'])
stats = jsonutils.loads(self.item['stats'])
# change some values:
stats['num_instances'] = 8
stats['num_tribbles'] = 1
values = {
'vcpus': 4,
'stats': stats,
'stats': jsonutils.dumps(stats),
}
item_updated = db.compute_node_update(self.ctxt, compute_node_id,
values)
self.assertEqual(4, item_updated['vcpus'])
new_stats = self._stats_as_dict(item_updated['stats'])
self._stats_equal(stats, new_stats)
new_stats = jsonutils.loads(item_updated['stats'])
self.assertEqual(stats, new_stats)
def test_compute_node_delete(self):
compute_node_id = self.item['id']
@ -5610,7 +5601,7 @@ class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin):
service = db.service_create(self.ctxt, new_service)
self.compute_node_dict['service_id'] = service['id']
self.compute_node_dict['hypervisor_hostname'] = 'testhost' + str(i)
self.compute_node_dict['stats'] = self.stats
self.compute_node_dict['stats'] = jsonutils.dumps(self.stats)
node = db.compute_node_create(self.ctxt, self.compute_node_dict)
nodes_created.append(node)
nodes = db.compute_node_search_by_hypervisor(self.ctxt, 'host')
@ -5647,39 +5638,6 @@ class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin):
'free_ram_mb': '13'})
self.assertNotEqual(first['updated_at'], second['updated_at'])
def test_compute_node_stat_unchanged(self):
# don't update unchanged stat values:
stats = self.item['stats']
stats_updated_at = dict([(stat['key'], stat['updated_at'])
for stat in stats])
stats_values = self._stats_as_dict(stats)
new_values = {'stats': stats_values}
compute_node_id = self.item['id']
db.compute_node_update(self.ctxt, compute_node_id, new_values)
updated_node = db.compute_node_get(self.ctxt, compute_node_id)
updated_stats = updated_node['stats']
for stat in updated_stats:
self.assertEqual(stat['updated_at'], stats_updated_at[stat['key']])
def test_compute_node_stat_prune(self):
for stat in self.item['stats']:
if stat['key'] == 'num_instances':
num_instance_stat = stat
break
values = {
'stats': dict(num_instances=1)
}
db.compute_node_update(self.ctxt, self.item['id'], values,
prune_stats=True)
item_updated = db.compute_node_get_all(self.ctxt)[0]
self.assertEqual(1, len(item_updated['stats']))
stat = item_updated['stats'][0]
self.assertEqual(num_instance_stat['id'], stat['id'])
self.assertEqual(num_instance_stat['key'], stat['key'])
self.assertEqual(1, int(stat['value']))
class ProviderFwRuleTestCase(test.TestCase, ModelsObjectComparatorMixin):

View File

@ -681,6 +681,22 @@ class TestNovaMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
for table_name in table_names:
self.assertTableNotExists(engine, 'dump_' + table_name)
def _check_233(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'stats')
compute_nodes = db_utils.get_table(engine, 'compute_nodes')
self.assertTrue(isinstance(compute_nodes.c.stats.type,
sqlalchemy.types.Text))
self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table,
engine, 'compute_node_stats')
def _post_downgrade_233(self, engine):
self.assertColumnNotExists(engine, 'compute_nodes', 'stats')
# confirm compute_node_stats exists
db_utils.get_table(engine, 'compute_node_stats')
class TestBaremetalMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
"""Test sqlalchemy-migrate migrations."""

View File

@ -42,6 +42,7 @@ fake_compute_node = {
'cpu_info': 'Schmintel i786',
'disk_available_least': 256,
'metrics': '',
'stats': '{}',
}
@ -51,7 +52,8 @@ class _TestComputeNodeObject(object):
db.compute_node_get(self.context, 123).AndReturn(fake_compute_node)
self.mox.ReplayAll()
compute = compute_node.ComputeNode.get_by_id(self.context, 123)
self.compare_obj(compute, fake_compute_node)
self.compare_obj(compute, fake_compute_node,
comparators={'stats': self.json_comparator})
def test_get_by_service_id(self):
self.mox.StubOutWithMock(db, 'compute_node_get_by_service_id')
@ -59,7 +61,8 @@ class _TestComputeNodeObject(object):
fake_compute_node)
self.mox.ReplayAll()
compute = compute_node.ComputeNode.get_by_service_id(self.context, 456)
self.compare_obj(compute, fake_compute_node)
self.compare_obj(compute, fake_compute_node,
comparators={'stats': self.json_comparator})
def test_create(self):
self.mox.StubOutWithMock(db, 'compute_node_create')
@ -69,19 +72,20 @@ class _TestComputeNodeObject(object):
compute = compute_node.ComputeNode()
compute.service_id = 456
compute.create(self.context)
self.compare_obj(compute, fake_compute_node)
self.compare_obj(compute, fake_compute_node,
comparators={'stats': self.json_comparator})
def test_save(self):
self.mox.StubOutWithMock(db, 'compute_node_update')
db.compute_node_update(self.context, 123, {'vcpus_used': 3},
prune_stats=False
).AndReturn(fake_compute_node)
db.compute_node_update(self.context, 123, {'vcpus_used': 3}).\
AndReturn(fake_compute_node)
self.mox.ReplayAll()
compute = compute_node.ComputeNode()
compute.id = 123
compute.vcpus_used = 3
compute.save(self.context)
self.compare_obj(compute, fake_compute_node)
self.compare_obj(compute, fake_compute_node,
comparators={'stats': self.json_comparator})
def test_destroy(self):
self.mox.StubOutWithMock(db, 'compute_node_delete')
@ -109,7 +113,8 @@ class _TestComputeNodeObject(object):
self.mox.ReplayAll()
computes = compute_node.ComputeNodeList.get_all(self.context)
self.assertEqual(1, len(computes))
self.compare_obj(computes[0], fake_compute_node)
self.compare_obj(computes[0], fake_compute_node,
comparators={'stats': self.json_comparator})
def test_get_by_hypervisor(self):
self.mox.StubOutWithMock(db, 'compute_node_search_by_hypervisor')
@ -119,7 +124,8 @@ class _TestComputeNodeObject(object):
computes = compute_node.ComputeNodeList.get_by_hypervisor(self.context,
'hyper')
self.assertEqual(1, len(computes))
self.compare_obj(computes[0], fake_compute_node)
self.compare_obj(computes[0], fake_compute_node,
comparators={'stats': self.json_comparator})
class TestComputeNodeObject(test_objects._LocalTest,

View File

@ -27,6 +27,7 @@ from nova import exception
from nova.objects import base
from nova.objects import fields
from nova.objects import utils
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import rpc
from nova import test
@ -294,7 +295,8 @@ class TestUtils(test.TestCase):
self.assertEqual(db_objs[index]['missing'], item.missing)
def compare_obj(test, obj, db_obj, subs=None, allow_missing=None):
def compare_obj(test, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
"""Compare a NovaObject and a dict-like database object.
This automatically converts TZ-aware datetimes and iterates over
@ -305,12 +307,15 @@ def compare_obj(test, obj, db_obj, subs=None, allow_missing=None):
:param:db_obj: The dict-like database object to use as reference
:param:subs: A dict of objkey=dbkey field substitutions
:param:allow_missing: A list of fields that may not be in db_obj
:param:comparators: Map of comparator functions to use for certain fields
"""
if subs is None:
subs = {}
if allow_missing is None:
allow_missing = []
if comparators is None:
comparators = {}
for key in obj.fields:
if key in allow_missing and not obj.obj_attr_is_set(key):
@ -320,7 +325,12 @@ def compare_obj(test, obj, db_obj, subs=None, allow_missing=None):
db_val = db_obj[db_key]
if isinstance(obj_val, datetime.datetime):
obj_val = obj_val.replace(tzinfo=None)
test.assertEqual(db_val, obj_val)
if key in comparators:
comparator = comparators[key]
comparator(db_val, obj_val)
else:
test.assertEqual(db_val, obj_val)
class _BaseTestCase(test.TestCase):
@ -329,8 +339,15 @@ class _BaseTestCase(test.TestCase):
self.remote_object_calls = list()
self.context = context.RequestContext('fake-user', 'fake-project')
def compare_obj(self, obj, db_obj, subs=None, allow_missing=None):
compare_obj(self, obj, db_obj, subs=subs, allow_missing=allow_missing)
def compare_obj(self, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
compare_obj(self, obj, db_obj, subs=subs, allow_missing=allow_missing,
comparators=comparators)
def json_comparator(self, expected, obj_val):
# json-ify an object field for comparison with its db str
#equivalent
self.assertEqual(expected, jsonutils.dumps(obj_val))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Python < v2.7 compatibility. Assert 'not isinstance(obj, cls)."""

View File

@ -73,7 +73,8 @@ class _TestServiceObject(object):
self.assertTrue(service_obj.obj_attr_is_set('compute_node'))
self.compare_obj(service_obj.compute_node,
test_compute_node.fake_compute_node,
allow_missing=OPTIONAL)
allow_missing=OPTIONAL,
comparators={'stats': self.json_comparator})
def test_create(self):
self.mox.StubOutWithMock(db, 'service_create')
@ -162,7 +163,8 @@ class _TestServiceObject(object):
service_obj.id = 123
self.compare_obj(service_obj.compute_node,
test_compute_node.fake_compute_node,
allow_missing=OPTIONAL)
allow_missing=OPTIONAL,
comparators={'stats': self.json_comparator})
# Make sure it doesn't re-fetch this
service_obj.compute_node

View File

@ -377,18 +377,20 @@ class HostStateTestCase(test.NoDBTestCase):
# in HostManagerTestCase.test_get_all_host_states()
def test_stat_consumption_from_compute_node(self):
stats = [
dict(key='num_instances', value='5'),
dict(key='num_proj_12345', value='3'),
dict(key='num_proj_23456', value='1'),
dict(key='num_vm_%s' % vm_states.BUILDING, value='2'),
dict(key='num_vm_%s' % vm_states.SUSPENDED, value='1'),
dict(key='num_task_%s' % task_states.RESIZE_MIGRATING, value='1'),
dict(key='num_task_%s' % task_states.MIGRATING, value='2'),
dict(key='num_os_type_linux', value='4'),
dict(key='num_os_type_windoze', value='1'),
dict(key='io_workload', value='42'),
]
stats = {
'num_instances': '5',
'num_proj_12345': '3',
'num_proj_23456': '1',
'num_vm_%s' % vm_states.BUILDING: '2',
'num_vm_%s' % vm_states.SUSPENDED: '1',
'num_task_%s' % task_states.RESIZE_MIGRATING: '1',
'num_task_%s' % task_states.MIGRATING: '2',
'num_os_type_linux': '4',
'num_os_type_windoze': '1',
'io_workload': '42',
}
stats = jsonutils.dumps(stats)
hyper_ver_int = utils.convert_version_to_int('6.0.0')
compute = dict(stats=stats, memory_mb=1, free_disk_gb=0, local_gb=0,
local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
@ -421,18 +423,20 @@ class HostStateTestCase(test.NoDBTestCase):
self.assertEqual(hyper_ver_int, host.hypervisor_version)
def test_stat_consumption_from_compute_node_non_pci(self):
stats = [
dict(key='num_instances', value='5'),
dict(key='num_proj_12345', value='3'),
dict(key='num_proj_23456', value='1'),
dict(key='num_vm_%s' % vm_states.BUILDING, value='2'),
dict(key='num_vm_%s' % vm_states.SUSPENDED, value='1'),
dict(key='num_task_%s' % task_states.RESIZE_MIGRATING, value='1'),
dict(key='num_task_%s' % task_states.MIGRATING, value='2'),
dict(key='num_os_type_linux', value='4'),
dict(key='num_os_type_windoze', value='1'),
dict(key='io_workload', value='42'),
]
stats = {
'num_instances': '5',
'num_proj_12345': '3',
'num_proj_23456': '1',
'num_vm_%s' % vm_states.BUILDING: '2',
'num_vm_%s' % vm_states.SUSPENDED: '1',
'num_task_%s' % task_states.RESIZE_MIGRATING: '1',
'num_task_%s' % task_states.MIGRATING: '2',
'num_os_type_linux': '4',
'num_os_type_windoze': '1',
'io_workload': '42',
}
stats = jsonutils.dumps(stats)
hyper_ver_int = utils.convert_version_to_int('6.0.0')
compute = dict(stats=stats, memory_mb=0, free_disk_gb=0, local_gb=0,
local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,