diff --git a/.gitignore b/.gitignore index 33f4d24b..6558502c 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ .*.swp .coverage .installed.cfg +.stestr .testrepository .tox .venv diff --git a/nfv/.coveragerc b/nfv/.coveragerc new file mode 100644 index 00000000..dc6f21c9 --- /dev/null +++ b/nfv/.coveragerc @@ -0,0 +1,7 @@ +[run] +branch = True +source = nfv_common,nfv_client,nfv_vim,nfv_plugins +omit = nfv-tests/* + +[report] +ignore_errors = True diff --git a/nfv/.stestr.conf b/nfv/.stestr.conf new file mode 100644 index 00000000..1150b35a --- /dev/null +++ b/nfv/.stestr.conf @@ -0,0 +1,4 @@ +[DEFAULT] +test_path=./nfv-tests/nfv_unit_tests/tests +top_dir=. + diff --git a/nfv/coverage.ini b/nfv/coverage.ini deleted file mode 100755 index ae9df785..00000000 --- a/nfv/coverage.ini +++ /dev/null @@ -1,15 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -[run] -branch = True -source = - nfv-common - nfv-plugins - nfv-vim - -[report] -include = - *nfv_vim/directors/_instance_director.py diff --git a/nfv/nfv-tests/__init__.py b/nfv/nfv-tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/nfv/nfv-tests/nfv_scenario_tests/tests/_alarms.py b/nfv/nfv-tests/nfv_scenario_tests/tests/_alarms.py index f3894a3e..d0cc3e97 100755 --- a/nfv/nfv-tests/nfv_scenario_tests/tests/_alarms.py +++ b/nfv/nfv-tests/nfv_scenario_tests/tests/_alarms.py @@ -71,7 +71,7 @@ def is_instance_reboot_alarm(alarms, instance, guest_hb=False): """ Check if an instance reboot alarm has been raised """ - expected_alarm = {'alarm_id': fm_constants.FM_ALARM_ID_VM_REBOOT, + expected_alarm = {'alarm_id': fm_constants.FM_ALARM_ID_VM_REBOOTING, 'severity': fm_constants.FM_ALARM_SEVERITY_CRITICAL} return _instance_alarm_raised(alarms, expected_alarm, instance) @@ -81,7 +81,7 @@ def is_instance_rebuild_alarm(alarms, instance, guest_hb=False): """ Check if an instance rebuild alarm has been raised """ - expected_alarm = {'alarm_id': fm_constants.FM_ALARM_ID_VM_REBUILD, + expected_alarm = {'alarm_id': fm_constants.FM_ALARM_ID_VM_REBUILDING, 'severity': fm_constants.FM_ALARM_SEVERITY_CRITICAL} return _instance_alarm_raised(alarms, expected_alarm, instance) diff --git a/nfv/nfv-tests/nfv_unit_tests/tests/__init__.py b/nfv/nfv-tests/nfv_unit_tests/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/nfv/nfv-tests/nfv_unit_tests/tests/test_database_upgrades.py b/nfv/nfv-tests/nfv_unit_tests/tests/test_database_upgrades.py index 9340b7f2..9875f7bd 100755 --- a/nfv/nfv-tests/nfv_unit_tests/tests/test_database_upgrades.py +++ b/nfv/nfv-tests/nfv_unit_tests/tests/test_database_upgrades.py @@ -9,25 +9,29 @@ import subprocess from nfv_vim import database from nfv_vim import tables +from . import testcase -def test_nfv_vim_database_upgrade_from_18_03(): - """ - Test VIM database upgrades from 18.03 GA - """ - root_dir = os.environ['VIRTUAL_ENV'] - devnull = open(os.devnull, 'w') - try: - vim_cmd = ("nfv-vim-manage db-load-data -d %s " - "-f %s/nfv_vim_db_18.03_GA" % (root_dir, root_dir)) +class TestNFVDatabaseUpgrade(testcase.NFVTestCase): - subprocess.check_call([vim_cmd], shell=True, stderr=devnull) + def test_nfv_vim_database_upgrade_from_18_03(self): + """ + Test VIM database upgrades from 18.03 GA + """ + root_dir = os.environ['VIRTUAL_ENV'] - except subprocess.CalledProcessError: - raise + devnull = open(os.devnull, 'w') + try: + vim_cmd = ("nfv-vim-manage db-load-data -d %s " + "-f %s/nfv_vim_db_18.03_GA" % (root_dir, root_dir)) - config = dict() - config['database_dir'] = root_dir - database.database_initialize(config) - database.database_migrate_data() - tables.tables_initialize() + subprocess.check_call([vim_cmd], shell=True, stderr=devnull) + + except subprocess.CalledProcessError: + raise + + config = dict() + config['database_dir'] = root_dir + database.database_initialize(config) + database.database_migrate_data() + tables.tables_initialize() diff --git a/nfv/nfv-tests/nfv_unit_tests/tests/test_instance.py b/nfv/nfv-tests/nfv_unit_tests/tests/test_instance.py index c3550b32..4f9bc86b 100755 --- a/nfv/nfv-tests/nfv_unit_tests/tests/test_instance.py +++ b/nfv/nfv-tests/nfv_unit_tests/tests/test_instance.py @@ -3,8 +3,8 @@ # # SPDX-License-Identifier: Apache-2.0 # +import fixtures import uuid -import mock from nfv_common import config @@ -24,342 +24,324 @@ from nfv_vim.tables._instance_group_table import InstanceGroupTable from nfv_vim.nfvi.objects import v1 as nfvi_objects -import utils - -# Constants - -# Globals -_tenant_table = Table() -_instance_type_table = InstanceTypeTable() -_image_table = ImageTable() -_instance_table = InstanceTable() -_instance_group_table = InstanceGroupTable() -_host_table = HostTable() -_host_group_table = HostGroupTable() -_host_aggregate_table = HostAggregateTable() - -# Don't attempt to write to the database while unit testing -_tenant_table.persist = False -_image_table.persist = False -_instance_type_table.persist = False -_instance_table.persist = False -_instance_group_table.persist = False -_host_table.persist = False -_host_group_table.persist = False -_host_aggregate_table.persist = False - - -def create_instance(instance_name, instance_type_name, image_name, host_name, - admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.UNLOCKED, - live_migration_timeout=None): - """ - Create an instance - """ - global _tenant_table, _instance_table, _image_table - - tenant_uuid = str(uuid.uuid4()) - - tenant = objects.Tenant(tenant_uuid, "%s_name" % tenant_uuid, '', True) - _tenant_table[tenant_uuid] = tenant - - for instance_type in _instance_type_table.values(): - if instance_type.name == instance_type_name: - for image in _image_table.values(): - if image.name == image_name: - instance_uuid = str(uuid.uuid4()) - - nfvi_instance = nfvi.objects.v1.Instance( - instance_uuid, instance_name, tenant_uuid, - admin_state=admin_state, - oper_state=nfvi.objects.v1.INSTANCE_OPER_STATE.ENABLED, - avail_status=list(), - action=nfvi.objects.v1.INSTANCE_ACTION.NONE, - host_name=host_name, - instance_type=utils.instance_type_to_flavor_dict( - instance_type), - image_uuid=image.uuid, - live_migration_timeout=live_migration_timeout) - - instance = objects.Instance(nfvi_instance) - _instance_table[instance.uuid] = instance - return instance - - assert 0, "Unknown instance_type_name: %s" % instance_type_name - - -def create_instance_type(instance_type_name, live_migration_timeout=None): - """ - Create an instance type - """ - global _instance_type_table - - instance_type_uuid = str(uuid.uuid4()) - - instance_type = objects.InstanceType(instance_type_uuid, - instance_type_name) - instance_type.update_details( - vcpus=1, mem_mb=64, disk_gb=1, ephemeral_gb=0, - swap_gb=0, guest_services=None, - auto_recovery=True, - live_migration_timeout=live_migration_timeout, - live_migration_max_downtime=500, - storage_type='local_image') - _instance_type_table[instance_type_uuid] = instance_type - - -def create_image(image_name, properties=None): - """ - Create an instance type - """ - global _image_table - - image_uuid = str(uuid.uuid4()) - - nfvi_image = nfvi_objects.Image(image_uuid, image_name, 'description', - nfvi_objects.IMAGE_AVAIL_STATUS.AVAILABLE, - nfvi_objects.IMAGE_ACTION.NONE, - 'BARE', - 'QCOW2', - 1, - 64, - 'public', - False, - properties=properties) - image = objects.Image(nfvi_image) - _image_table[image_uuid] = image - - -def create_instance_group(name, members, policies): - """ - Create an instance group - """ - global _instance_group_table - - member_uuids = [] - - for instance_uuid, instance in _instance_table.iteritems(): - if instance.name in members: - member_uuids.append(instance_uuid) - - nfvi_instance_group = nfvi.objects.v1.InstanceGroup( - uuid=str(uuid.uuid4()), - name=name, - member_uuids=member_uuids, - policies=policies - ) - - instance_group = objects.InstanceGroup(nfvi_instance_group) - _instance_group_table[instance_group.uuid] = instance_group - - -def create_host(host_name, - cpe=False, - admin_state=nfvi.objects.v1.HOST_ADMIN_STATE.UNLOCKED, - software_load='12.01', - target_load='12.01'): - """ - Create a host - """ - global _host_table - personality = '' - - if host_name.startswith('controller'): - personality = HOST_PERSONALITY.CONTROLLER - if cpe: - personality = personality + ',' + HOST_PERSONALITY.COMPUTE - elif host_name.startswith('compute'): - personality = HOST_PERSONALITY.COMPUTE - elif host_name.startswith('storage'): - personality = HOST_PERSONALITY.STORAGE - else: - assert 0, "Invalid host_name: %s" % host_name - - nfvi_host = nfvi.objects.v1.Host( - uuid=str(uuid.uuid4()), - name=host_name, - personality=personality, - admin_state=admin_state, - oper_state=nfvi.objects.v1.HOST_OPER_STATE.ENABLED, - avail_status=nfvi.objects.v1.HOST_AVAIL_STATUS.AVAILABLE, - action=nfvi.objects.v1.HOST_ACTION.NONE, - software_load=software_load, - target_load=target_load, - uptime='1000' - ) - - host = objects.Host(nfvi_host, - initial_state=host_fsm.HOST_STATE.ENABLED) - _host_table[host.name] = host - - -def create_host_group(name, members, policies): - """ - Create a host group - """ - global _host_group_table - - member_uuids = [] - - for instance_uuid, instance in _instance_table.iteritems(): - if instance.name in members: - member_uuids.append(instance_uuid) - - nfvi_host_group = nfvi.objects.v1.HostGroup( - name=name, - member_names=members, - policies=policies - ) - - host_group = objects.HostGroup(nfvi_host_group) - _host_group_table[host_group.name] = host_group - - -def create_host_aggregate(name, host_names): - """ - Create a host aggregate - """ - global _host_aggregate_table - - nfvi_host_aggregate = nfvi.objects.v1.HostAggregate( - name=name, - host_names=host_names, - availability_zone='' - ) - - host_aggregate = objects.HostAggregate(nfvi_host_aggregate) - _host_aggregate_table[host_aggregate.name] = host_aggregate +from . import testcase +from . import utils def fake_event_issue(a, b, c, d): """ Mock out the _event_issue function because it is being called when instance objects are created. It ends up trying to communicate with another thread - (that doesn't exist) and this eventually leads to nosetests hanging if + (that doesn't exist) and this eventually leads to unit tests hanging if enough events are issued. """ return None -@mock.patch('nfv_vim.event_log._instance._event_issue', fake_event_issue) -@mock.patch('nfv_vim.tables._tenant_table._tenant_table', _tenant_table) -@mock.patch('nfv_vim.tables._host_table._host_table', _host_table) -@mock.patch('nfv_vim.tables._instance_group_table._instance_group_table', _instance_group_table) -@mock.patch('nfv_vim.tables._host_group_table._host_group_table', _host_group_table) -@mock.patch('nfv_vim.tables._host_aggregate_table._host_aggregate_table', _host_aggregate_table) -@mock.patch('nfv_vim.tables._instance_table._instance_table', _instance_table) -@mock.patch('nfv_vim.tables._instance_type_table._instance_type_table', _instance_type_table) -@mock.patch('nfv_vim.tables._image_table._image_table', _image_table) -class TestInstance(object): +# NOTE: The following testcases test the same scenarios as the testcases in +# nova/tests/unit/virt/libvirt/test_driver.py +class TestInstance(testcase.NFVTestCase): - def setup(self): + _tenant_table = Table() + _instance_type_table = InstanceTypeTable() + _image_table = ImageTable() + _instance_table = InstanceTable() + _instance_group_table = InstanceGroupTable() + _host_table = HostTable() + _host_group_table = HostGroupTable() + _host_aggregate_table = HostAggregateTable() + + # Don't attempt to write to the database while unit testing + _tenant_table.persist = False + _image_table.persist = False + _instance_type_table.persist = False + _instance_table.persist = False + _instance_group_table.persist = False + _host_table.persist = False + _host_group_table.persist = False + _host_aggregate_table.persist = False + + def setUp(self): """ Setup for testing. """ - pass + super(TestInstance, self).setUp() + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._tenant_table._tenant_table', + self._tenant_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._host_table._host_table', + self._host_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._instance_group_table._instance_group_table', + self._instance_group_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._host_group_table._host_group_table', + self._host_group_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._host_aggregate_table._host_aggregate_table', + self._host_aggregate_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._instance_table._instance_table', + self._instance_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._instance_type_table._instance_type_table', + self._instance_type_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._image_table._image_table', + self._image_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.event_log._instance._event_issue', + fake_event_issue)) - def teardown(self): + def tearDown(self): """ Cleanup testing setup. """ - _tenant_table.clear() - _instance_type_table.clear() - _image_table.clear() - _instance_table.clear() - _instance_group_table.clear() - _host_table.clear() - _host_group_table.clear() - _host_aggregate_table.clear() + super(TestInstance, self).tearDown() + self._tenant_table.clear() + self._instance_type_table.clear() + self._image_table.clear() + self._instance_table.clear() + self._instance_group_table.clear() + self._host_table.clear() + self._host_group_table.clear() + self._host_aggregate_table.clear() - # NOTE: The following testcases test the same scenarios as the testcases in - # nova/tests/unit/virt/libvirt/test_driver.py + def create_instance(self, instance_name, instance_type_name, image_name, host_name, + admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.UNLOCKED, + live_migration_timeout=None): + """ + Create an instance + """ + tenant_uuid = str(uuid.uuid4()) + + tenant = objects.Tenant(tenant_uuid, "%s_name" % tenant_uuid, '', True) + self._tenant_table[tenant_uuid] = tenant + + for instance_type in self._instance_type_table.values(): + if instance_type.name == instance_type_name: + for image in self._image_table.values(): + if image.name == image_name: + instance_uuid = str(uuid.uuid4()) + + nfvi_instance = nfvi.objects.v1.Instance( + instance_uuid, instance_name, tenant_uuid, + admin_state=admin_state, + oper_state=nfvi.objects.v1.INSTANCE_OPER_STATE.ENABLED, + avail_status=list(), + action=nfvi.objects.v1.INSTANCE_ACTION.NONE, + host_name=host_name, + instance_type=utils.instance_type_to_flavor_dict( + instance_type), + image_uuid=image.uuid, + live_migration_timeout=live_migration_timeout) + + instance = objects.Instance(nfvi_instance) + self._instance_table[instance.uuid] = instance + return instance + + assert 0, "Unknown instance_type_name: %s" % instance_type_name + + def create_instance_type(self, instance_type_name, live_migration_timeout=None): + """ + Create an instance type + """ + instance_type_uuid = str(uuid.uuid4()) + instance_type = objects.InstanceType(instance_type_uuid, + instance_type_name) + instance_type.update_details( + vcpus=1, mem_mb=64, disk_gb=1, ephemeral_gb=0, + swap_gb=0, guest_services=None, + auto_recovery=True, + live_migration_timeout=live_migration_timeout, + live_migration_max_downtime=500, + storage_type='local_image') + self._instance_type_table[instance_type_uuid] = instance_type + + def create_image(self, image_name, properties=None): + """ + Create an image + """ + image_uuid = str(uuid.uuid4()) + nfvi_image = nfvi_objects.Image(image_uuid, image_name, 'description', + nfvi_objects.IMAGE_AVAIL_STATUS.AVAILABLE, + nfvi_objects.IMAGE_ACTION.NONE, + 'BARE', + 'QCOW2', + 1, + 64, + 'public', + False, + properties=properties) + image = objects.Image(nfvi_image) + self._image_table[image_uuid] = image + + def create_instance_group(self, name, members, policies): + """ + Create an instance group + """ + member_uuids = [] + for instance_uuid, instance in self._instance_table.iteritems(): + if instance.name in members: + member_uuids.append(instance_uuid) + + nfvi_instance_group = nfvi.objects.v1.InstanceGroup( + uuid=str(uuid.uuid4()), + name=name, + member_uuids=member_uuids, + policies=policies + ) + + instance_group = objects.InstanceGroup(nfvi_instance_group) + self._instance_group_table[instance_group.uuid] = instance_group + + def create_host(self, host_name, + cpe=False, + admin_state=nfvi.objects.v1.HOST_ADMIN_STATE.UNLOCKED, + software_load='12.01', + target_load='12.01'): + """ + Create a host + """ + personality = '' + if host_name.startswith('controller'): + personality = HOST_PERSONALITY.CONTROLLER + if cpe: + personality = personality + ',' + HOST_PERSONALITY.COMPUTE + elif host_name.startswith('compute'): + personality = HOST_PERSONALITY.COMPUTE + elif host_name.startswith('storage'): + personality = HOST_PERSONALITY.STORAGE + else: + assert 0, "Invalid host_name: %s" % host_name + + nfvi_host = nfvi.objects.v1.Host( + uuid=str(uuid.uuid4()), + name=host_name, + personality=personality, + admin_state=admin_state, + oper_state=nfvi.objects.v1.HOST_OPER_STATE.ENABLED, + avail_status=nfvi.objects.v1.HOST_AVAIL_STATUS.AVAILABLE, + action=nfvi.objects.v1.HOST_ACTION.NONE, + software_load=software_load, + target_load=target_load, + uptime='1000' + ) + + host = objects.Host(nfvi_host, + initial_state=host_fsm.HOST_STATE.ENABLED) + self._host_table[host.name] = host + + def create_host_group(self, name, members, policies): + """ + Create a host group + """ + member_uuids = [] + for instance_uuid, instance in self._instance_table.iteritems(): + if instance.name in members: + member_uuids.append(instance_uuid) + + nfvi_host_group = nfvi.objects.v1.HostGroup( + name=name, + member_names=members, + policies=policies + ) + + host_group = objects.HostGroup(nfvi_host_group) + self._host_group_table[host_group.name] = host_group + + def create_host_aggregate(self, name, host_names): + """ + Create a host aggregate + """ + nfvi_host_aggregate = nfvi.objects.v1.HostAggregate( + name=name, + host_names=host_names, + availability_zone='' + ) + + host_aggregate = objects.HostAggregate(nfvi_host_aggregate) + self._host_aggregate_table[host_aggregate.name] = host_aggregate def test_live_migration_completion_timeout(self): - create_instance_type('small') - create_image('image_0') - instance = create_instance('test_instance_0', - 'small', - 'image_0', - 'compute-0') + self.create_instance_type('small') + self.create_image('image_0') + instance = self.create_instance('test_instance_0', + 'small', + 'image_0', + 'compute-0') assert 800 == instance.max_live_migrate_wait_in_secs def test_live_migration_completion_timeout_from_flavor(self): - create_instance_type('small', live_migration_timeout=300) - create_image('image_0') - instance = create_instance('test_instance_0', - 'small', - 'image_0', - 'compute-0') + self.create_instance_type('small', live_migration_timeout=300) + self.create_image('image_0') + instance = self.create_instance('test_instance_0', + 'small', + 'image_0', + 'compute-0') assert 300 == instance.max_live_migrate_wait_in_secs def test_live_migration_completion_timeout_from_image(self): - create_instance_type('small') - create_image('image_0', - properties={nfvi_objects.IMAGE_PROPERTY.LIVE_MIGRATION_TIMEOUT: "400"}) - instance = create_instance('test_instance_0', - 'small', - 'image_0', - 'compute-0') + self.create_instance_type('small') + self.create_image('image_0', + properties={nfvi_objects.IMAGE_PROPERTY.LIVE_MIGRATION_TIMEOUT: "400"}) + instance = self.create_instance('test_instance_0', + 'small', + 'image_0', + 'compute-0') assert 400 == instance.max_live_migrate_wait_in_secs def test_live_migration_completion_timeout_from_instance(self): - create_instance_type('small') - create_image('image_0') - instance = create_instance('test_instance_0', - 'small', - 'image_0', - 'compute-0', - live_migration_timeout=200) + self.create_instance_type('small') + self.create_image('image_0') + instance = self.create_instance('test_instance_0', + 'small', + 'image_0', + 'compute-0', + live_migration_timeout=200) assert 200 == instance.max_live_migrate_wait_in_secs def test_live_migration_completion_timeout_flavor_overwrite_image(self): - create_instance_type('small', live_migration_timeout=300) - create_image('image_0', - properties={nfvi_objects.IMAGE_PROPERTY.LIVE_MIGRATION_TIMEOUT: "400"}) - instance = create_instance('test_instance_0', - 'small', - 'image_0', - 'compute-0') + self.create_instance_type('small', live_migration_timeout=300) + self.create_image('image_0', + properties={nfvi_objects.IMAGE_PROPERTY.LIVE_MIGRATION_TIMEOUT: "400"}) + instance = self.create_instance('test_instance_0', + 'small', + 'image_0', + 'compute-0') assert 300 == instance.max_live_migrate_wait_in_secs def test_live_migration_completion_timeout_image_overwrite_flavor(self): - create_instance_type('small', live_migration_timeout=300) - create_image('image_0', - properties={nfvi_objects.IMAGE_PROPERTY.LIVE_MIGRATION_TIMEOUT: "200"}) - instance = create_instance('test_instance_0', - 'small', - 'image_0', - 'compute-0') + self.create_instance_type('small', live_migration_timeout=300) + self.create_image('image_0', + properties={nfvi_objects.IMAGE_PROPERTY.LIVE_MIGRATION_TIMEOUT: "200"}) + instance = self.create_instance('test_instance_0', + 'small', + 'image_0', + 'compute-0') assert 200 == instance.max_live_migrate_wait_in_secs def test_live_migration_completion_timeout_instance_overwrite_all(self): - create_instance_type('small', live_migration_timeout=300) - create_image('image_0', - properties={nfvi_objects.IMAGE_PROPERTY.LIVE_MIGRATION_TIMEOUT: "200"}) - instance = create_instance('test_instance_0', - 'small', - 'image_0', - 'compute-0', - live_migration_timeout=100) + self.create_instance_type('small', live_migration_timeout=300) + self.create_image('image_0', + properties={nfvi_objects.IMAGE_PROPERTY.LIVE_MIGRATION_TIMEOUT: "200"}) + instance = self.create_instance('test_instance_0', + 'small', + 'image_0', + 'compute-0', + live_migration_timeout=100) assert 100 == instance.max_live_migrate_wait_in_secs def test_live_migration_completion_timeout_overwrite_zero(self): - create_instance_type('small', live_migration_timeout=300) - create_image('image_0', - properties={nfvi_objects.IMAGE_PROPERTY.LIVE_MIGRATION_TIMEOUT: "0"}) - instance = create_instance('test_instance_0', - 'small', - 'image_0', - 'compute-0', - live_migration_timeout=400) + self.create_instance_type('small', live_migration_timeout=300) + self.create_image('image_0', + properties={nfvi_objects.IMAGE_PROPERTY.LIVE_MIGRATION_TIMEOUT: "0"}) + instance = self.create_instance('test_instance_0', + 'small', + 'image_0', + 'compute-0', + live_migration_timeout=400) assert 300 == instance.max_live_migrate_wait_in_secs - create_instance_type('small_2', live_migration_timeout=0) - create_image('image_1', - properties={nfvi_objects.IMAGE_PROPERTY.LIVE_MIGRATION_TIMEOUT: "200"}) - instance = create_instance('test_instance_0', - 'small_2', - 'image_1', - 'compute-0') + self.create_instance_type('small_2', live_migration_timeout=0) + self.create_image('image_1', + properties={nfvi_objects.IMAGE_PROPERTY.LIVE_MIGRATION_TIMEOUT: "200"}) + instance = self.create_instance('test_instance_0', + 'small_2', + 'image_1', + 'compute-0') assert 200 == instance.max_live_migrate_wait_in_secs # NOTE: End of tests from nova @@ -367,18 +349,18 @@ class TestInstance(object): def test_live_migration_completion_timeout_out_of_range(self): config.CONF = {'instance-configuration': {}} - create_instance_type('small', live_migration_timeout=1000) - create_image('image_0') - instance = create_instance('test_instance_0', - 'small', - 'image_0', - 'compute-0') + self.create_instance_type('small', live_migration_timeout=1000) + self.create_image('image_0') + instance = self.create_instance('test_instance_0', + 'small', + 'image_0', + 'compute-0') assert 800 == instance.max_live_migrate_wait_in_secs - create_instance_type('small_2', live_migration_timeout=10) - create_image('image_0') - instance = create_instance('test_instance_1', - 'small_2', - 'image_0', - 'compute-0') + self.create_instance_type('small_2', live_migration_timeout=10) + self.create_image('image_0') + instance = self.create_instance('test_instance_1', + 'small_2', + 'image_0', + 'compute-0') assert 120 == instance.max_live_migrate_wait_in_secs diff --git a/nfv/nfv-tests/nfv_unit_tests/tests/test_instance_director.py b/nfv/nfv-tests/nfv_unit_tests/tests/test_instance_director.py index f2f2c4d8..53ca1ae0 100755 --- a/nfv/nfv-tests/nfv_unit_tests/tests/test_instance_director.py +++ b/nfv/nfv-tests/nfv_unit_tests/tests/test_instance_director.py @@ -3,17 +3,18 @@ # # SPDX-License-Identifier: Apache-2.0 # -import uuid import mock -import nose +import uuid from nfv_vim import nfvi from nfv_vim import objects +from nfv_vim.tables._image_table import ImageTable from nfv_vim.tables._table import Table from nfv_vim.directors._instance_director import InstanceDirector -import utils +from . import testcase +from . import utils # Constants _audit_interval = 330 @@ -23,369 +24,375 @@ _rebuild_timeout = 300 _reboot_timeout = 300 _migrate_timeout = 600 -# Globals -_tenant_table = Table() -_instance_table = Table() -_instance_type_table = Table() -_director = None + +def fake_timer(a, b, c, d): + return 1234 -def create_instance(instance_type_name, instance_name, recovery_priority=None): - """ - Create an instance - """ - global _tenant_table +class TestInstanceDirector(testcase.NFVTestCase): + _image_table = ImageTable() + _instance_table = Table() + _instance_type_table = Table() + _tenant_table = Table() - tenant_uuid = uuid.uuid4() - image_uuid = uuid.uuid4() + # Don't attempt to write to the database while unit testing + _image_table.persist = False + _instance_table.persist = False + _instance_type_table.persist = False + _tenant_table.persist = False - tenant = objects.Tenant(tenant_uuid, "%s_name" % tenant_uuid, '', True) - _tenant_table[tenant_uuid] = tenant + _director = None - for instance_type in _instance_type_table.values(): - if instance_type.name == instance_type_name: - instance_uuid = uuid.uuid4() + def setUp(self): + super(TestInstanceDirector, self).setUp() + self.instance_setup_func() - nfvi_instance = nfvi.objects.v1.Instance( - instance_uuid, instance_name, tenant_uuid, - admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.UNLOCKED, - oper_state=nfvi.objects.v1.INSTANCE_OPER_STATE.ENABLED, - avail_status=list(), - action=nfvi.objects.v1.INSTANCE_ACTION.NONE, - host_name='compute-0', - instance_type=utils.instance_type_to_flavor_dict( - instance_type), - image_uuid=image_uuid, - recovery_priority=recovery_priority) + def tearDown(self): + super(TestInstanceDirector, self).tearDown() + self._tenant_table.clear() + self._instance_table.clear() + self._instance_type_table.clear() + self._director = None - return objects.Instance(nfvi_instance) + def create_instance(self, instance_type_name, instance_name, recovery_priority=None): + """ + Create an instance + """ + tenant_uuid = uuid.uuid4() + image_uuid = uuid.uuid4() - return None + tenant = objects.Tenant(tenant_uuid, "%s_name" % tenant_uuid, '', True) + self._tenant_table[tenant_uuid] = tenant + for instance_type in self._instance_type_table.values(): + if instance_type.name == instance_type_name: + instance_uuid = uuid.uuid4() -@mock.patch('nfv_common.timers.timers_create_timer') -def instance_setup_func(timers_create_timer_mock): - """ - Setup for testing. - """ - global _instance_table, _instance_type_table, _director + nfvi_instance = nfvi.objects.v1.Instance( + instance_uuid, instance_name, tenant_uuid, + admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.UNLOCKED, + oper_state=nfvi.objects.v1.INSTANCE_OPER_STATE.ENABLED, + avail_status=list(), + action=nfvi.objects.v1.INSTANCE_ACTION.NONE, + host_name='compute-0', + instance_type=utils.instance_type_to_flavor_dict( + instance_type), + image_uuid=image_uuid, + recovery_priority=recovery_priority) - instance_type_uuid = uuid.uuid4() + return objects.Instance(nfvi_instance) - if 0 == len(_instance_type_table): - instance_type = objects.InstanceType(instance_type_uuid, 'small') - instance_type.update_details(vcpus=1, mem_mb=64, disk_gb=1, ephemeral_gb=0, - swap_gb=0, guest_services=None, - auto_recovery=True, - live_migration_timeout=800, - live_migration_max_downtime=500, - storage_type='local_image') - _instance_type_table[instance_type_uuid] = instance_type + return None - _instance_table.clear() + @mock.patch('nfv_common.timers.timers_create_timer', fake_timer) + def instance_setup_func(self): + """ + Setup for testing. + """ + instance_type_uuid = uuid.uuid4() - if _director is None: - _director = InstanceDirector( - max_concurrent_recovering_instances=4, - max_concurrent_migrates_per_host=1, - max_concurrent_evacuates_per_host=1, - recovery_audit_interval=_audit_interval, - recovery_audit_cooldown=_audit_cooldown, - recovery_audit_batch_interval=2, - recovery_cooldown=_recovery_cooldown, - rebuild_timeout=_rebuild_timeout, - reboot_timeout=_reboot_timeout, - migrate_timeout=_migrate_timeout, - single_hypervisor=False, - recovery_threshold=250, - max_throttled_recovering_instances=2 - ) + if 0 == len(self._instance_type_table): + instance_type = objects.InstanceType(instance_type_uuid, 'small') + instance_type.update_details(vcpus=1, mem_mb=64, disk_gb=1, ephemeral_gb=0, + swap_gb=0, guest_services=None, + auto_recovery=True, + live_migration_timeout=800, + live_migration_max_downtime=500, + storage_type='local_image') + self._instance_type_table[instance_type_uuid] = instance_type + self._instance_table.clear() -def instance_teardown_func(): - """ - Cleanup testing setup. - """ - pass + if self._director is None: + self._director = InstanceDirector( + max_concurrent_recovering_instances=4, + max_concurrent_migrates_per_host=1, + max_concurrent_evacuates_per_host=1, + recovery_audit_interval=_audit_interval, + recovery_audit_cooldown=_audit_cooldown, + recovery_audit_batch_interval=2, + recovery_cooldown=_recovery_cooldown, + rebuild_timeout=_rebuild_timeout, + reboot_timeout=_reboot_timeout, + migrate_timeout=_migrate_timeout, + single_hypervisor=False, + recovery_threshold=250, + max_throttled_recovering_instances=2 + ) + @mock.patch('nfv_vim.tables.tables_get_tenant_table') + @mock.patch('nfv_vim.tables.tables_get_instance_type_table') + @mock.patch('nfv_vim.tables.tables_get_instance_table') + def test_instance_director_recovery_list(self, + tables_get_instance_table_mock, + tables_get_instance_type_table_mock, + tables_get_tenant_table_mock): + """ + Test the instance director recovery list logic + """ + tables_get_tenant_table_mock.return_value = self._tenant_table + tables_get_instance_table_mock.return_value = self._instance_table + tables_get_instance_type_table_mock.return_value = self._instance_type_table -@nose.with_setup(instance_setup_func, instance_teardown_func) -@mock.patch('nfv_vim.tables.tables_get_tenant_table') -@mock.patch('nfv_vim.tables.tables_get_instance_type_table') -@mock.patch('nfv_vim.tables.tables_get_instance_table') -def test_instance_director_recovery_list( - tables_get_instance_table_mock, tables_get_instance_type_table_mock, - tables_get_tenant_table_mock): - """ - Test the instance director recovery list logic - """ - global _tenant_table, _instance_table, _instance_type_table, _director + instance_1 = self.create_instance('small', 'instance_1') + self._instance_table[instance_1.uuid] = instance_1 - tables_get_tenant_table_mock.return_value = _tenant_table - tables_get_instance_table_mock.return_value = _instance_table - tables_get_instance_type_table_mock.return_value = _instance_type_table + # Validate that the Instance Director recovery_list creation + # -- with no instances in the failed state, verify that the list is + # empty and the normal audit interval is returned + (next_audit_interval, instance_recovery_list, instance_failed_list, + instance_rebuilding_list, instance_migrating_list, + instance_rebooting_list) = self._director._get_instance_recovery_list() + assert next_audit_interval == _audit_interval + assert 0 == len(instance_recovery_list) - instance_1 = create_instance('small', 'instance_1') - _instance_table[instance_1.uuid] = instance_1 + instance_1._nfvi_instance.avail_status.append( + nfvi.objects.v1.INSTANCE_AVAIL_STATUS.FAILED) - # Validate that the Instance Director recovery_list creation - # -- with no instances in the failed state, verify that the list is - # empty and the normal audit interval is returned - (next_audit_interval, instance_recovery_list, instance_failed_list, - instance_rebuilding_list, instance_migrating_list, - instance_rebooting_list) = _director._get_instance_recovery_list() - assert next_audit_interval == _audit_interval - assert 0 == len(instance_recovery_list) + # -- with one instance in the failed state, but elapsed time is less + # than the recovery cooldown, verify that the list is empty, but + # the audit interval is set to the recovery cooldown period + instance_1._elapsed_time_in_state = _recovery_cooldown - 1 + (next_audit_interval, instance_recovery_list, instance_failed_list, + instance_rebuilding_list, instance_migrating_list, + instance_rebooting_list) = self._director._get_instance_recovery_list() + assert next_audit_interval == _recovery_cooldown + assert 0 == len(instance_recovery_list) - instance_1._nfvi_instance.avail_status.append( - nfvi.objects.v1.INSTANCE_AVAIL_STATUS.FAILED) + # -- with one instance in the failed state, but elapsed time is greater + # than or equal to the recovery cooldown, verify that the list contains + # one instance and the audit interval is set to the recovery cooldown + # period + instance_1._elapsed_time_in_state = _recovery_cooldown + (next_audit_interval, instance_recovery_list, instance_failed_list, + instance_rebuilding_list, instance_migrating_list, + instance_rebooting_list) = self._director._get_instance_recovery_list() + assert next_audit_interval == _recovery_cooldown + assert 1 == len(instance_recovery_list) + assert instance_recovery_list[0].name == instance_1.name - # -- with one instance in the failed state, but elapsed time is less - # than the recovery cooldown, verify that the list is empty, but - # the audit interval is set to the recovery cooldown period - instance_1._elapsed_time_in_state = _recovery_cooldown - 1 - (next_audit_interval, instance_recovery_list, instance_failed_list, - instance_rebuilding_list, instance_migrating_list, - instance_rebooting_list) = _director._get_instance_recovery_list() - assert next_audit_interval == _recovery_cooldown - assert 0 == len(instance_recovery_list) + @mock.patch('nfv_vim.tables.tables_get_tenant_table') + @mock.patch('nfv_vim.tables.tables_get_instance_type_table') + @mock.patch('nfv_vim.tables.tables_get_instance_table') + def test_instance_director_recovery_list_order(self, + tables_get_instance_table_mock, + tables_get_instance_type_table_mock, + tables_get_tenant_table_mock): + """ + Test the instance director recovery list ordering + """ + tables_get_tenant_table_mock.return_value = self._tenant_table + tables_get_instance_table_mock.return_value = self._instance_table + tables_get_instance_type_table_mock.return_value = self._instance_type_table - # -- with one instance in the failed state, but elapsed time is greater - # than or equal to the recovery cooldown, verify that the list contains - # one instance and the audit interval is set to the recovery cooldown - # period - instance_1._elapsed_time_in_state = _recovery_cooldown - (next_audit_interval, instance_recovery_list, instance_failed_list, - instance_rebuilding_list, instance_migrating_list, - instance_rebooting_list) = _director._get_instance_recovery_list() - assert next_audit_interval == _recovery_cooldown - assert 1 == len(instance_recovery_list) - assert instance_recovery_list[0].name == instance_1.name + instance_1 = self.create_instance('small', 'instance_1') + instance_1._elapsed_time_in_state = _recovery_cooldown + instance_1._nfvi_instance.avail_status.append( + nfvi.objects.v1.INSTANCE_AVAIL_STATUS.FAILED) + instance_1._nfvi_instance['instance_type']['vcpus'] = 1 + instance_1._nfvi_instance['instance_type']['ram'] = 32 + instance_1._nfvi_instance['instance_type']['disk'] = 2 + instance_1._nfvi_instance['instance_type']['swap'] = 0 + self._instance_table[instance_1.uuid] = instance_1 + # Validate the Instance Director recovery_list order + # -- with one instance in the failed state + (next_audit_interval, instance_recovery_list, instance_failed_list, + instance_rebuilding_list, instance_migrating_list, + instance_rebooting_list) = self._director._get_instance_recovery_list() + assert next_audit_interval == _recovery_cooldown + assert 1 == len(instance_recovery_list) + assert instance_recovery_list[0].name == instance_1.name -@nose.with_setup(instance_setup_func, instance_teardown_func) -@mock.patch('nfv_vim.tables.tables_get_tenant_table') -@mock.patch('nfv_vim.tables.tables_get_instance_type_table') -@mock.patch('nfv_vim.tables.tables_get_instance_table') -def test_instance_director_recovery_list_order( - tables_get_instance_table_mock, tables_get_instance_type_table_mock, - tables_get_tenant_table_mock): - """ - Test the instance director recovery list ordering - """ - global _tenant_table, _instance_table, _instance_type_table, _director + instance_2 = self.create_instance('small', 'instance_2') + instance_2._elapsed_time_in_state = _recovery_cooldown + instance_2._nfvi_instance.avail_status.append( + nfvi.objects.v1.INSTANCE_AVAIL_STATUS.FAILED) + instance_2._nfvi_instance['instance_type']['vcpus'] = 2 + instance_2._nfvi_instance['instance_type']['ram'] = 32 + instance_2._nfvi_instance['instance_type']['disk'] = 1 + instance_2._nfvi_instance['instance_type']['swap'] = 0 + self._instance_table[instance_2.uuid] = instance_2 - tables_get_tenant_table_mock.return_value = _tenant_table - tables_get_instance_table_mock.return_value = _instance_table - tables_get_instance_type_table_mock.return_value = _instance_type_table + # -- with two instances in the failed state + # vcpus takes precedence over disk_gb + (next_audit_interval, instance_recovery_list, instance_failed_list, + instance_rebuilding_list, instance_migrating_list, + instance_rebooting_list) = self._director._get_instance_recovery_list() + assert next_audit_interval == _recovery_cooldown + assert 2 == len(instance_recovery_list) + assert instance_recovery_list[0].name == instance_2.name + assert instance_recovery_list[1].name == instance_1.name - instance_1 = create_instance('small', 'instance_1') - instance_1._elapsed_time_in_state = _recovery_cooldown - instance_1._nfvi_instance.avail_status.append( - nfvi.objects.v1.INSTANCE_AVAIL_STATUS.FAILED) - instance_1._nfvi_instance['instance_type']['vcpus'] = 1 - instance_1._nfvi_instance['instance_type']['ram'] = 32 - instance_1._nfvi_instance['instance_type']['disk'] = 2 - instance_1._nfvi_instance['instance_type']['swap'] = 0 - _instance_table[instance_1.uuid] = instance_1 + instance_3 = self.create_instance('small', 'instance_3', recovery_priority=5) + instance_3._elapsed_time_in_state = _recovery_cooldown + instance_3._nfvi_instance.avail_status.append( + nfvi.objects.v1.INSTANCE_AVAIL_STATUS.FAILED) + instance_3._nfvi_instance['instance_type']['vcpus'] = 1 + instance_3._nfvi_instance['instance_type']['ram'] = 32 + instance_3._nfvi_instance['instance_type']['disk'] = 0 + instance_3._nfvi_instance['instance_type']['swap'] = 0 + self._instance_table[instance_3.uuid] = instance_3 - # Validate the Instance Director recovery_list order - # -- with one instance in the failed state - (next_audit_interval, instance_recovery_list, instance_failed_list, - instance_rebuilding_list, instance_migrating_list, - instance_rebooting_list) = _director._get_instance_recovery_list() - assert next_audit_interval == _recovery_cooldown - assert 1 == len(instance_recovery_list) - assert instance_recovery_list[0].name == instance_1.name + # -- with three instances in the failed state + # recovery_priority takes precedence over instance size + (next_audit_interval, instance_recovery_list, instance_failed_list, + instance_rebuilding_list, instance_migrating_list, + instance_rebooting_list) = self._director._get_instance_recovery_list() + assert next_audit_interval == _recovery_cooldown + assert 3 == len(instance_recovery_list) + assert instance_recovery_list[0].name == instance_3.name + assert instance_recovery_list[1].name == instance_2.name + assert instance_recovery_list[2].name == instance_1.name - instance_2 = create_instance('small', 'instance_2') - instance_2._elapsed_time_in_state = _recovery_cooldown - instance_2._nfvi_instance.avail_status.append( - nfvi.objects.v1.INSTANCE_AVAIL_STATUS.FAILED) - instance_2._nfvi_instance['instance_type']['vcpus'] = 2 - instance_2._nfvi_instance['instance_type']['ram'] = 32 - instance_2._nfvi_instance['instance_type']['disk'] = 1 - instance_2._nfvi_instance['instance_type']['swap'] = 0 - _instance_table[instance_2.uuid] = instance_2 + instance_4 = self.create_instance('small', 'instance_4', recovery_priority=1) + instance_4._elapsed_time_in_state = _recovery_cooldown + instance_4._nfvi_instance.avail_status.append( + nfvi.objects.v1.INSTANCE_AVAIL_STATUS.FAILED) + instance_4._nfvi_instance['instance_type']['vcpus'] = 1 + instance_4._nfvi_instance['instance_type']['ram'] = 32 + instance_4._nfvi_instance['instance_type']['disk'] = 0 + instance_4._nfvi_instance['instance_type']['swap'] = 0 + self._instance_table[instance_4.uuid] = instance_4 - # -- with two instances in the failed state - # vcpus takes precedence over disk_gb - (next_audit_interval, instance_recovery_list, instance_failed_list, - instance_rebuilding_list, instance_migrating_list, - instance_rebooting_list) = _director._get_instance_recovery_list() - assert next_audit_interval == _recovery_cooldown - assert 2 == len(instance_recovery_list) - assert instance_recovery_list[0].name == instance_2.name - assert instance_recovery_list[1].name == instance_1.name + # -- with four instances in the failed state + # recovery_priority sorts instances + (next_audit_interval, instance_recovery_list, instance_failed_list, + instance_rebuilding_list, instance_migrating_list, + instance_rebooting_list) = self._director._get_instance_recovery_list() + assert next_audit_interval == _recovery_cooldown + assert 4 == len(instance_recovery_list) + assert instance_recovery_list[0].name == instance_4.name + assert instance_recovery_list[1].name == instance_3.name + assert instance_recovery_list[2].name == instance_2.name + assert instance_recovery_list[3].name == instance_1.name - instance_3 = create_instance('small', 'instance_3', recovery_priority=5) - instance_3._elapsed_time_in_state = _recovery_cooldown - instance_3._nfvi_instance.avail_status.append( - nfvi.objects.v1.INSTANCE_AVAIL_STATUS.FAILED) - instance_3._nfvi_instance['instance_type']['vcpus'] = 1 - instance_3._nfvi_instance['instance_type']['ram'] = 32 - instance_3._nfvi_instance['instance_type']['disk'] = 0 - instance_3._nfvi_instance['instance_type']['swap'] = 0 - _instance_table[instance_3.uuid] = instance_3 + instance_5 = self.create_instance('small', 'instance_5', recovery_priority=10) + instance_5._elapsed_time_in_state = _recovery_cooldown + instance_5._nfvi_instance.avail_status.append( + nfvi.objects.v1.INSTANCE_AVAIL_STATUS.FAILED) + instance_5._nfvi_instance['instance_type']['vcpus'] = 2 + instance_5._nfvi_instance['instance_type']['ram'] = 32 + instance_5._nfvi_instance['instance_type']['disk'] = 0 + instance_5._nfvi_instance['instance_type']['swap'] = 0 + self._instance_table[instance_5.uuid] = instance_5 - # -- with three instances in the failed state - # recovery_priority takes precedence over instance size - (next_audit_interval, instance_recovery_list, instance_failed_list, - instance_rebuilding_list, instance_migrating_list, - instance_rebooting_list) = _director._get_instance_recovery_list() - assert next_audit_interval == _recovery_cooldown - assert 3 == len(instance_recovery_list) - assert instance_recovery_list[0].name == instance_3.name - assert instance_recovery_list[1].name == instance_2.name - assert instance_recovery_list[2].name == instance_1.name + # -- with five instances in the failed state + # no recovery_priority treated the same as priority 10 + (next_audit_interval, instance_recovery_list, instance_failed_list, + instance_rebuilding_list, instance_migrating_list, + instance_rebooting_list) = self._director._get_instance_recovery_list() + assert next_audit_interval == _recovery_cooldown + assert 5 == len(instance_recovery_list) + assert instance_recovery_list[0].name == instance_4.name + assert instance_recovery_list[1].name == instance_3.name + assert instance_recovery_list[2].name == instance_2.name + assert instance_recovery_list[3].name == instance_5.name + assert instance_recovery_list[4].name == instance_1.name - instance_4 = create_instance('small', 'instance_4', recovery_priority=1) - instance_4._elapsed_time_in_state = _recovery_cooldown - instance_4._nfvi_instance.avail_status.append( - nfvi.objects.v1.INSTANCE_AVAIL_STATUS.FAILED) - instance_4._nfvi_instance['instance_type']['vcpus'] = 1 - instance_4._nfvi_instance['instance_type']['ram'] = 32 - instance_4._nfvi_instance['instance_type']['disk'] = 0 - instance_4._nfvi_instance['instance_type']['swap'] = 0 - _instance_table[instance_4.uuid] = instance_4 + @mock.patch('nfv_vim.tables.tables_get_image_table') + @mock.patch('nfv_vim.tables.tables_get_tenant_table') + @mock.patch('nfv_vim.tables.tables_get_instance_type_table') + @mock.patch('nfv_vim.tables.tables_get_instance_table') + @mock.patch('nfv_vim.dor.system_is_stabilized') + @mock.patch('nfv_vim.dor.dor_is_complete') + def test_instance_director_recover_instance(self, + dor_is_complete_mock, + system_is_stabilized_mock, + tables_get_instance_table_mock, + tables_get_instance_type_table_mock, + tables_get_tenant_table_mock, + tables_get_image_table_mock): + """ + Test the instance director recover instance logic + """ + tables_get_tenant_table_mock.return_value = self._tenant_table + tables_get_instance_table_mock.return_value = self._instance_table + tables_get_instance_type_table_mock.return_value = self._instance_type_table + tables_get_image_table_mock.return_value = self._image_table - # -- with four instances in the failed state - # recovery_priority sorts instances - (next_audit_interval, instance_recovery_list, instance_failed_list, - instance_rebuilding_list, instance_migrating_list, - instance_rebooting_list) = _director._get_instance_recovery_list() - assert next_audit_interval == _recovery_cooldown - assert 4 == len(instance_recovery_list) - assert instance_recovery_list[0].name == instance_4.name - assert instance_recovery_list[1].name == instance_3.name - assert instance_recovery_list[2].name == instance_2.name - assert instance_recovery_list[3].name == instance_1.name + system_is_stabilized_mock.return_value = True + dor_is_complete_mock.return_value = True - instance_5 = create_instance('small', 'instance_5', recovery_priority=10) - instance_5._elapsed_time_in_state = _recovery_cooldown - instance_5._nfvi_instance.avail_status.append( - nfvi.objects.v1.INSTANCE_AVAIL_STATUS.FAILED) - instance_5._nfvi_instance['instance_type']['vcpus'] = 2 - instance_5._nfvi_instance['instance_type']['ram'] = 32 - instance_5._nfvi_instance['instance_type']['disk'] = 0 - instance_5._nfvi_instance['instance_type']['swap'] = 0 - _instance_table[instance_5.uuid] = instance_5 + instance_1 = self.create_instance('small', 'instance_1') + instance_1.fail = mock.Mock() + instance_1.do_action = mock.Mock() + instance_1._nfvi_instance.avail_status.append( + nfvi.objects.v1.INSTANCE_AVAIL_STATUS.FAILED) - # -- with five instances in the failed state - # no recovery_priority treated the same as priority 10 - (next_audit_interval, instance_recovery_list, instance_failed_list, - instance_rebuilding_list, instance_migrating_list, - instance_rebooting_list) = _director._get_instance_recovery_list() - assert next_audit_interval == _recovery_cooldown - assert 5 == len(instance_recovery_list) - assert instance_recovery_list[0].name == instance_4.name - assert instance_recovery_list[1].name == instance_3.name - assert instance_recovery_list[2].name == instance_2.name - assert instance_recovery_list[3].name == instance_5.name - assert instance_recovery_list[4].name == instance_1.name + self._director._is_host_enabled = mock.Mock(return_value=True) + self._director._is_hypervisor_enabled = mock.Mock(return_value=True) + self._director.upgrade_inprogress = mock.Mock(return_value=False) + # Set_A + # -- the first attempt to recover an instance that is failed on an + # enabled host, verify a reboot is attempted + self._director.instance_recovered(instance_1) + self._director.recover_instance(instance_1) + instance_1.do_action.assert_called_with(objects.INSTANCE_ACTION_TYPE.REBOOT, + initiated_by='director') -@nose.with_setup(instance_setup_func, instance_teardown_func) -@mock.patch('nfv_vim.tables.tables_get_tenant_table') -@mock.patch('nfv_vim.tables.tables_get_instance_type_table') -@mock.patch('nfv_vim.tables.tables_get_instance_table') -@mock.patch('nfv_vim.dor.system_is_stabilized') -@mock.patch('nfv_vim.dor.dor_is_complete') -def test_instance_director_recover_instance( - dor_is_complete_mock, system_is_stabilized_mock, - tables_get_instance_table_mock, tables_get_instance_type_table_mock, - tables_get_tenant_table_mock): - """ - Test the instance director recover instance logic - """ - global _instance_table, _instance_type_table, _director + # -- a subsequent attempt to recover an instance that is failed on an + # enabled host and the instance has an image to rebuild from, verify + # a rebuild is attempted + self._director.recover_instance(instance_1) + instance_1.do_action.assert_called_with(objects.INSTANCE_ACTION_TYPE.REBUILD, + initiated_by='director') - tables_get_tenant_table_mock.return_value = _tenant_table - tables_get_instance_table_mock.return_value = _instance_table - tables_get_instance_type_table_mock.return_value = _instance_type_table + # Set_B + # -- the first attempt to recover an instance that is failed on an + # enabled host, verify a reboot is attempted + original_image_uuid = instance_1._nfvi_instance.image_uuid + instance_1._nfvi_instance.image_uuid = None + self._director.instance_recovered(instance_1) + self._director.recover_instance(instance_1) + instance_1.do_action.assert_called_with(objects.INSTANCE_ACTION_TYPE.REBOOT, + initiated_by='director') - system_is_stabilized_mock.return_value = True - dor_is_complete_mock.return_value = True + # -- a subsequent attempt to recover an instance that is failed on an + # enabled host and the instance does not have an image to rebuild from, + # verify a reboot is attempted + self._director.recover_instance(instance_1) + instance_1.do_action.assert_called_with(objects.INSTANCE_ACTION_TYPE.REBOOT, + initiated_by='director') + instance_1._nfvi_instance.image_uuid = original_image_uuid - instance_1 = create_instance('small', 'instance_1') - instance_1.fail = mock.Mock() - instance_1.do_action = mock.Mock() - instance_1._nfvi_instance.avail_status.append( - nfvi.objects.v1.INSTANCE_AVAIL_STATUS.FAILED) + # Set_C + # -- instance is rebuilding or evacuating and the instance has an image + # to rebuild from, verify that a rebuild is attempted + self._director.instance_recovered(instance_1) + instance_1._nfvi_instance.action = nfvi.objects.v1.INSTANCE_ACTION.REBUILDING + self._director.recover_instance(instance_1) + instance_1.do_action.assert_called_with(objects.INSTANCE_ACTION_TYPE.REBUILD, + initiated_by='director') - _director._is_host_enabled = mock.Mock(return_value=True) - _director._is_hypervisor_enabled = mock.Mock(return_value=True) - _director.upgrade_inprogress = mock.Mock(return_value=False) + # Set_D + # -- instance is rebuilding or evacuating and the instance does not have an + # image to rebuild from, verify that a reboot is attempted + original_image_uuid = instance_1._nfvi_instance.image_uuid + instance_1._nfvi_instance.image_uuid = None + self._director.recover_instance(instance_1) + instance_1.do_action.assert_called_with(objects.INSTANCE_ACTION_TYPE.REBOOT, + initiated_by='director') + instance_1._nfvi_instance.image_uuid = original_image_uuid + instance_1._nfvi_instance.action = nfvi.objects.v1.INSTANCE_ACTION.NONE - # Set_A - # -- the first attempt to recover an instance that is failed on an - # enabled host, verify a reboot is attempted - _director.instance_recovered(instance_1) - _director.recover_instance(instance_1) - instance_1.do_action.assert_called_with(objects.INSTANCE_ACTION_TYPE.REBOOT, - initiated_by='director') + # Set_E + # -- instance is migrating and the instance has an image to rebuild from, + # verify that a rebuild is attempted + self._director.instance_recovered(instance_1) + instance_1._nfvi_instance.action = nfvi.objects.v1.INSTANCE_ACTION.MIGRATING + self._director.recover_instance(instance_1) + instance_1.do_action.assert_called_with(objects.INSTANCE_ACTION_TYPE.REBUILD, + initiated_by='director') - # -- a subsequent attempt to recover an instance that is failed on an - # enabled host and the instance has an image to rebuild from, verify - # a rebuild is attempted - _director.recover_instance(instance_1) - instance_1.do_action.assert_called_with(objects.INSTANCE_ACTION_TYPE.REBUILD, - initiated_by='director') - - # Set_B - # -- the first attempt to recover an instance that is failed on an - # enabled host, verify a reboot is attempted - original_image_uuid = instance_1._nfvi_instance.image_uuid - instance_1._nfvi_instance.image_uuid = None - _director.instance_recovered(instance_1) - _director.recover_instance(instance_1) - instance_1.do_action.assert_called_with(objects.INSTANCE_ACTION_TYPE.REBOOT, - initiated_by='director') - - # -- a subsequent attempt to recover an instance that is failed on an - # enabled host and the instance does not have an image to rebuild from, - # verify a reboot is attempted - _director.recover_instance(instance_1) - instance_1.do_action.assert_called_with(objects.INSTANCE_ACTION_TYPE.REBOOT, - initiated_by='director') - instance_1._nfvi_instance.image_uuid = original_image_uuid - - # Set_C - # -- instance is rebuilding or evacuating and the instance has an image - # to rebuild from, verify that a rebuild is attempted - _director.instance_recovered(instance_1) - instance_1._nfvi_instance.action = nfvi.objects.v1.INSTANCE_ACTION.REBUILDING - _director.recover_instance(instance_1) - instance_1.do_action.assert_called_with(objects.INSTANCE_ACTION_TYPE.REBUILD, - initiated_by='director') - - # Set_D - # -- instance is rebuilding or evacuating and the instance does not have an - # image to rebuild from, verify that a reboot is attempted - original_image_uuid = instance_1._nfvi_instance.image_uuid - instance_1._nfvi_instance.image_uuid = None - _director.recover_instance(instance_1) - instance_1.do_action.assert_called_with(objects.INSTANCE_ACTION_TYPE.REBOOT, - initiated_by='director') - instance_1._nfvi_instance.image_uuid = original_image_uuid - instance_1._nfvi_instance.action = nfvi.objects.v1.INSTANCE_ACTION.NONE - - # Set_E - # -- instance is migrating and the instance has an image to rebuild from, - # verify that a rebuild is attempted - _director.instance_recovered(instance_1) - instance_1._nfvi_instance.action = nfvi.objects.v1.INSTANCE_ACTION.MIGRATING - _director.recover_instance(instance_1) - instance_1.do_action.assert_called_with(objects.INSTANCE_ACTION_TYPE.REBUILD, - initiated_by='director') - - # Set_F - # -- instance is migrating and the instance does not have an image to - # rebuild from, verify that a reboot is attempted - original_image_uuid = instance_1._nfvi_instance.image_uuid - instance_1._nfvi_instance.image_uuid = None - _director.recover_instance(instance_1) - instance_1.do_action.assert_called_with(objects.INSTANCE_ACTION_TYPE.REBOOT, - initiated_by='director') - instance_1._nfvi_instance.image_uuid = original_image_uuid - instance_1._nfvi_instance.action = nfvi.objects.v1.INSTANCE_ACTION.NONE + # Set_F + # -- instance is migrating and the instance does not have an image to + # rebuild from, verify that a reboot is attempted + original_image_uuid = instance_1._nfvi_instance.image_uuid + instance_1._nfvi_instance.image_uuid = None + self._director.recover_instance(instance_1) + instance_1.do_action.assert_called_with(objects.INSTANCE_ACTION_TYPE.REBOOT, + initiated_by='director') + instance_1._nfvi_instance.image_uuid = original_image_uuid + instance_1._nfvi_instance.action = nfvi.objects.v1.INSTANCE_ACTION.NONE diff --git a/nfv/nfv-tests/nfv_unit_tests/tests/test_sw_patch_strategy.py b/nfv/nfv-tests/nfv_unit_tests/tests/test_sw_patch_strategy.py index c4ce8624..e173fd05 100755 --- a/nfv/nfv-tests/nfv_unit_tests/tests/test_sw_patch_strategy.py +++ b/nfv/nfv-tests/nfv_unit_tests/tests/test_sw_patch_strategy.py @@ -3,9 +3,10 @@ # # SPDX-License-Identifier: Apache-2.0 # -import uuid +import fixtures import mock import pprint +import uuid from nfv_common import strategy as common_strategy from nfv_vim import host_fsm @@ -23,161 +24,11 @@ from nfv_vim.tables._instance_table import InstanceTable from nfv_vim.tables._instance_group_table import InstanceGroupTable from nfv_vim.strategy._strategy import SwPatchStrategy, strategy_rebuild_from_dict -import utils - -# Constants - -# Globals -_tenant_table = Table() -_instance_type_table = Table() -_instance_table = InstanceTable() -_instance_group_table = InstanceGroupTable() -_host_table = HostTable() -_host_group_table = HostGroupTable() -_host_aggregate_table = HostAggregateTable() - -# Don't attempt to write to the database while unit testing -_tenant_table.persist = False -_instance_type_table.persist = False -_instance_table.persist = False -_instance_group_table.persist = False -_host_table.persist = False -_host_group_table.persist = False -_host_aggregate_table.persist = False +from . import testcase +from . import utils -def create_instance(instance_type_name, instance_name, host_name, - admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.UNLOCKED): - """ - Create an instance - """ - global _tenant_table, _instance_table - - tenant_uuid = str(uuid.uuid4()) - image_uuid = str(uuid.uuid4()) - - tenant = objects.Tenant(tenant_uuid, "%s_name" % tenant_uuid, '', True) - _tenant_table[tenant_uuid] = tenant - - for instance_type in _instance_type_table.values(): - if instance_type.name == instance_type_name: - instance_uuid = str(uuid.uuid4()) - - nfvi_instance = nfvi.objects.v1.Instance( - instance_uuid, instance_name, tenant_uuid, - admin_state=admin_state, - oper_state=nfvi.objects.v1.INSTANCE_OPER_STATE.ENABLED, - avail_status=list(), - action=nfvi.objects.v1.INSTANCE_ACTION.NONE, - host_name=host_name, - instance_type=utils.instance_type_to_flavor_dict( - instance_type), - image_uuid=image_uuid) - - instance = objects.Instance(nfvi_instance) - _instance_table[instance.uuid] = instance - return - - assert 0, "Unknown instance_type_name: %s" % instance_type_name - - -def create_instance_group(name, members, policies): - """ - Create an instance group - """ - global _instance_group_table - - member_uuids = [] - - for instance_uuid, instance in _instance_table.iteritems(): - if instance.name in members: - member_uuids.append(instance_uuid) - - nfvi_instance_group = nfvi.objects.v1.InstanceGroup( - uuid=str(uuid.uuid4()), - name=name, - member_uuids=member_uuids, - policies=policies - ) - - instance_group = objects.InstanceGroup(nfvi_instance_group) - _instance_group_table[instance_group.uuid] = instance_group - - -def create_host(host_name, - cpe=False, - admin_state=nfvi.objects.v1.HOST_ADMIN_STATE.UNLOCKED): - """ - Create a host - """ - global _host_table - personality = '' - - if host_name.startswith('controller'): - personality = HOST_PERSONALITY.CONTROLLER - if cpe: - personality = personality + ',' + HOST_PERSONALITY.COMPUTE - elif host_name.startswith('compute'): - personality = HOST_PERSONALITY.COMPUTE - elif host_name.startswith('storage'): - personality = HOST_PERSONALITY.STORAGE - else: - assert 0, "Invalid host_name: %s" % host_name - - nfvi_host = nfvi.objects.v1.Host( - uuid=str(uuid.uuid4()), - name=host_name, - personality=personality, - admin_state=admin_state, - oper_state=nfvi.objects.v1.HOST_OPER_STATE.ENABLED, - avail_status=nfvi.objects.v1.HOST_AVAIL_STATUS.AVAILABLE, - action=nfvi.objects.v1.HOST_ACTION.NONE, - software_load='12.01', - target_load='12.01', - uptime='1000' - ) - - host = objects.Host(nfvi_host, - initial_state=host_fsm.HOST_STATE.ENABLED) - _host_table[host.name] = host - - -def create_host_group(name, members, policies): - """ - Create a host group - """ - global _host_group_table - - member_uuids = [] - - for instance_uuid, instance in _instance_table.iteritems(): - if instance.name in members: - member_uuids.append(instance_uuid) - - nfvi_host_group = nfvi.objects.v1.HostGroup( - name=name, - member_names=members, - policies=policies - ) - - host_group = objects.HostGroup(nfvi_host_group) - _host_group_table[host_group.name] = host_group - - -def create_host_aggregate(name, host_names): - """ - Create a host aggregate - """ - global _host_aggregate_table - - nfvi_host_aggregate = nfvi.objects.v1.HostAggregate( - name=name, - host_names=host_names, - availability_zone='' - ) - - host_aggregate = objects.HostAggregate(nfvi_host_aggregate) - _host_aggregate_table[host_aggregate.name] = host_aggregate +DEBUG_PRINTING = False def create_sw_patch_strategy( @@ -216,11 +67,12 @@ def validate_strategy_persists(strategy): strategy_dict = strategy.as_dict() new_strategy = strategy_rebuild_from_dict(strategy_dict) - if strategy.as_dict() != new_strategy.as_dict(): - print("==================== Strategy ====================") - pprint.pprint(strategy.as_dict()) - print("============== Converted Strategy ================") - pprint.pprint(new_strategy.as_dict()) + if DEBUG_PRINTING: + if strategy.as_dict() != new_strategy.as_dict(): + print("==================== Strategy ====================") + pprint.pprint(strategy.as_dict()) + print("============== Converted Strategy ================") + pprint.pprint(new_strategy.as_dict()) assert strategy.as_dict() == new_strategy.as_dict(), \ "Strategy changed when converting to/from dict" @@ -231,10 +83,11 @@ def validate_phase(phase, expected_results): Note: there is probably a super generic, pythonic way to do this, but this is good enough (tm). """ - print("====================== Phase Results ========================") - pprint.pprint(phase) - print("===================== Expected Results ======================") - pprint.pprint(expected_results) + if DEBUG_PRINTING: + print("====================== Phase Results ========================") + pprint.pprint(phase) + print("===================== Expected Results ======================") + pprint.pprint(expected_results) for key in expected_results: if key == 'stages': @@ -287,27 +140,51 @@ def fake_event_issue(a, b, c, d): return None -@mock.patch('nfv_vim.event_log._instance._event_issue', fake_event_issue) @mock.patch('nfv_vim.objects._sw_update.SwUpdate.save', fake_save) @mock.patch('nfv_vim.objects._sw_update.timers.timers_create_timer', fake_timer) -@mock.patch('nfv_vim.tables._tenant_table._tenant_table', _tenant_table) -@mock.patch('nfv_vim.tables._host_table._host_table', _host_table) -@mock.patch('nfv_vim.tables._instance_group_table._instance_group_table', _instance_group_table) -@mock.patch('nfv_vim.tables._host_group_table._host_group_table', _host_group_table) -@mock.patch('nfv_vim.tables._host_aggregate_table._host_aggregate_table', _host_aggregate_table) -@mock.patch('nfv_vim.tables._instance_table._instance_table', _instance_table) @mock.patch('nfv_vim.strategy._strategy.get_local_host_name', fake_host_name) -class TestSwPatchStrategy(object): +@mock.patch('nfv_vim.event_log._instance._event_issue', fake_event_issue) +class TestSwPatchStrategy(testcase.NFVTestCase): - def setup(self): + _tenant_table = Table() + _instance_type_table = Table() + _instance_table = InstanceTable() + _instance_group_table = InstanceGroupTable() + _host_table = HostTable() + _host_group_table = HostGroupTable() + _host_aggregate_table = HostAggregateTable() + + # Don't attempt to write to the database while unit testing + _tenant_table.persist = False + _instance_type_table.persist = False + _instance_table.persist = False + _instance_group_table.persist = False + _host_table.persist = False + _host_group_table.persist = False + _host_aggregate_table.persist = False + + def setUp(self): """ Setup for testing. """ - global _instance_type_table + super(TestSwPatchStrategy, self).setUp() + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._tenant_table._tenant_table', + self._tenant_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._host_table._host_table', + self._host_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._instance_group_table._instance_group_table', + self._instance_group_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._host_group_table._host_group_table', + self._host_group_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._host_aggregate_table._host_aggregate_table', + self._host_aggregate_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._instance_table._instance_table', + self._instance_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._instance_type_table._instance_type_table', + self._instance_type_table)) instance_type_uuid = str(uuid.uuid4()) - - if 0 == len(_instance_type_table): + if 0 == len(self._instance_type_table): instance_type = objects.InstanceType(instance_type_uuid, 'small') instance_type.update_details(vcpus=1, mem_mb=64, disk_gb=1, ephemeral_gb=0, swap_gb=0, guest_services=None, @@ -315,19 +192,141 @@ class TestSwPatchStrategy(object): live_migration_timeout=800, live_migration_max_downtime=500, storage_type='local_image') - _instance_type_table[instance_type_uuid] = instance_type + self._instance_type_table[instance_type_uuid] = instance_type - def teardown(self): + def tearDown(self): """ Cleanup testing setup. """ - _tenant_table.clear() - _instance_type_table.clear() - _instance_table.clear() - _instance_group_table.clear() - _host_table.clear() - _host_group_table.clear() - _host_aggregate_table.clear() + super(TestSwPatchStrategy, self).tearDown() + self._tenant_table.clear() + self._instance_type_table.clear() + self._instance_table.clear() + self._instance_group_table.clear() + self._host_table.clear() + self._host_group_table.clear() + self._host_aggregate_table.clear() + + def create_instance(self, instance_type_name, instance_name, host_name, + admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.UNLOCKED): + """ + Create an instance + """ + tenant_uuid = str(uuid.uuid4()) + image_uuid = str(uuid.uuid4()) + + tenant = objects.Tenant(tenant_uuid, "%s_name" % tenant_uuid, '', True) + self._tenant_table[tenant_uuid] = tenant + + for instance_type in self._instance_type_table.values(): + if instance_type.name == instance_type_name: + instance_uuid = str(uuid.uuid4()) + + nfvi_instance = nfvi.objects.v1.Instance( + instance_uuid, instance_name, tenant_uuid, + admin_state=admin_state, + oper_state=nfvi.objects.v1.INSTANCE_OPER_STATE.ENABLED, + avail_status=list(), + action=nfvi.objects.v1.INSTANCE_ACTION.NONE, + host_name=host_name, + instance_type=utils.instance_type_to_flavor_dict( + instance_type), + image_uuid=image_uuid) + + instance = objects.Instance(nfvi_instance) + self._instance_table[instance.uuid] = instance + return + + assert 0, "Unknown instance_type_name: %s" % instance_type_name + + def create_instance_group(self, name, members, policies): + """ + Create an instance group + """ + member_uuids = [] + + for instance_uuid, instance in self._instance_table.iteritems(): + if instance.name in members: + member_uuids.append(instance_uuid) + + nfvi_instance_group = nfvi.objects.v1.InstanceGroup( + uuid=str(uuid.uuid4()), + name=name, + member_uuids=member_uuids, + policies=policies + ) + + instance_group = objects.InstanceGroup(nfvi_instance_group) + self._instance_group_table[instance_group.uuid] = instance_group + + def create_host(self, + host_name, + cpe=False, + admin_state=nfvi.objects.v1.HOST_ADMIN_STATE.UNLOCKED): + """ + Create a host + """ + personality = '' + + if host_name.startswith('controller'): + personality = HOST_PERSONALITY.CONTROLLER + if cpe: + personality = personality + ',' + HOST_PERSONALITY.COMPUTE + elif host_name.startswith('compute'): + personality = HOST_PERSONALITY.COMPUTE + elif host_name.startswith('storage'): + personality = HOST_PERSONALITY.STORAGE + else: + assert 0, "Invalid host_name: %s" % host_name + + nfvi_host = nfvi.objects.v1.Host( + uuid=str(uuid.uuid4()), + name=host_name, + personality=personality, + admin_state=admin_state, + oper_state=nfvi.objects.v1.HOST_OPER_STATE.ENABLED, + avail_status=nfvi.objects.v1.HOST_AVAIL_STATUS.AVAILABLE, + action=nfvi.objects.v1.HOST_ACTION.NONE, + software_load='12.01', + target_load='12.01', + uptime='1000' + ) + + host = objects.Host(nfvi_host, + initial_state=host_fsm.HOST_STATE.ENABLED) + self._host_table[host.name] = host + + def create_host_group(self, name, members, policies): + """ + Create a host group + """ + member_uuids = [] + + for instance_uuid, instance in self._instance_table.iteritems(): + if instance.name in members: + member_uuids.append(instance_uuid) + + nfvi_host_group = nfvi.objects.v1.HostGroup( + name=name, + member_names=members, + policies=policies + ) + + host_group = objects.HostGroup(nfvi_host_group) + self._host_group_table[host_group.name] = host_group + + def create_host_aggregate(self, name, host_names): + """ + Create a host aggregate + """ + nfvi_host_aggregate = nfvi.objects.v1.HostAggregate( + name=name, + host_names=host_names, + availability_zone='' + ) + + host_aggregate = objects.HostAggregate(nfvi_host_aggregate) + self._host_aggregate_table[host_aggregate.name] = host_aggregate def test_sw_patch_strategy_compute_stages_ignore(self): """ @@ -337,24 +336,24 @@ class TestSwPatchStrategy(object): Verify: - stages not created """ - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') - create_instance('small', - "test_instance_0", - 'compute-0') - create_instance('small', - "test_instance_1", - 'compute-1') + self.create_instance('small', + "test_instance_0", + 'compute-0') + self.create_instance('small', + "test_instance_1", + 'compute-1') - create_instance_group('instance_group_1', - ['test_instance_0', 'test_instance_1'], - [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) + self.create_instance_group('instance_group_1', + ['test_instance_0', 'test_instance_1'], + [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic @@ -389,24 +388,24 @@ class TestSwPatchStrategy(object): - hosts with no instances patched first - anti-affinity policy enforced """ - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') - create_instance('small', - "test_instance_0", - 'compute-0') - create_instance('small', - "test_instance_1", - 'compute-1') + self.create_instance('small', + "test_instance_0", + 'compute-0') + self.create_instance('small', + "test_instance_1", + 'compute-1') - create_instance_group('instance_group_1', - ['test_instance_0', 'test_instance_1'], - [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) + self.create_instance_group('instance_group_1', + ['test_instance_0', 'test_instance_1'], + [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic @@ -495,28 +494,28 @@ class TestSwPatchStrategy(object): - hosts with no instances patched first - instances migrated """ - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3') - create_host('compute-4') - create_host('compute-5') - create_host('compute-6') - create_host('compute-7') - create_host('compute-8') - create_host('compute-9') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') + self.create_host('compute-4') + self.create_host('compute-5') + self.create_host('compute-6') + self.create_host('compute-7') + self.create_host('compute-8') + self.create_host('compute-9') - create_instance('small', "test_instance_0", 'compute-0') - create_instance('small', "test_instance_2", 'compute-2') - create_instance('small', "test_instance_3", 'compute-3') - create_instance('small', "test_instance_4", 'compute-4') - create_instance('small', "test_instance_6", 'compute-6') - create_instance('small', "test_instance_7", 'compute-7') - create_instance('small', "test_instance_8", 'compute-8') - create_instance('small', "test_instance_9", 'compute-9') + self.create_instance('small', "test_instance_0", 'compute-0') + self.create_instance('small', "test_instance_2", 'compute-2') + self.create_instance('small', "test_instance_3", 'compute-3') + self.create_instance('small', "test_instance_4", 'compute-4') + self.create_instance('small', "test_instance_6", 'compute-6') + self.create_instance('small', "test_instance_7", 'compute-7') + self.create_instance('small', "test_instance_8", 'compute-8') + self.create_instance('small', "test_instance_9", 'compute-9') compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic @@ -647,39 +646,39 @@ class TestSwPatchStrategy(object): - hosts with no instances patched first - host aggregate limits enforced """ - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3') - create_host('compute-4') - create_host('compute-5') - create_host('compute-6') - create_host('compute-7') - create_host('compute-8') - create_host('compute-9') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') + self.create_host('compute-4') + self.create_host('compute-5') + self.create_host('compute-6') + self.create_host('compute-7') + self.create_host('compute-8') + self.create_host('compute-9') - create_host_aggregate('aggregate-1', ['compute-0', - 'compute-1', - 'compute-2', - 'compute-3', - 'compute-4']) - create_host_aggregate('aggregate-2', ['compute-5', - 'compute-6', - 'compute-7', - 'compute-8', - 'compute-9']) + self.create_host_aggregate('aggregate-1', ['compute-0', + 'compute-1', + 'compute-2', + 'compute-3', + 'compute-4']) + self.create_host_aggregate('aggregate-2', ['compute-5', + 'compute-6', + 'compute-7', + 'compute-8', + 'compute-9']) - create_instance('small', "test_instance_0", 'compute-0') - create_instance('small', "test_instance_2", 'compute-2') - create_instance('small', "test_instance_3", 'compute-3') - create_instance('small', "test_instance_4", 'compute-4') - create_instance('small', "test_instance_6", 'compute-6') - create_instance('small', "test_instance_7", 'compute-7') - create_instance('small', "test_instance_8", 'compute-8') - create_instance('small', "test_instance_9", 'compute-9') + self.create_instance('small', "test_instance_0", 'compute-0') + self.create_instance('small', "test_instance_2", 'compute-2') + self.create_instance('small', "test_instance_3", 'compute-3') + self.create_instance('small', "test_instance_4", 'compute-4') + self.create_instance('small', "test_instance_6", 'compute-6') + self.create_instance('small', "test_instance_7", 'compute-7') + self.create_instance('small', "test_instance_8", 'compute-8') + self.create_instance('small', "test_instance_9", 'compute-9') compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic @@ -810,49 +809,49 @@ class TestSwPatchStrategy(object): - hosts with no instances patched first - host aggregate limits enforced """ - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3') - create_host('compute-4') - create_host('compute-5') - create_host('compute-6') - create_host('compute-7') - create_host('compute-8') - create_host('compute-9') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') + self.create_host('compute-4') + self.create_host('compute-5') + self.create_host('compute-6') + self.create_host('compute-7') + self.create_host('compute-8') + self.create_host('compute-9') - create_host_aggregate('aggregate-1', ['compute-0', - 'compute-1', - 'compute-2', - 'compute-3', - 'compute-4']) - create_host_aggregate('aggregate-2', ['compute-5', - 'compute-6', - 'compute-7', - 'compute-8', - 'compute-9']) - create_host_aggregate('aggregate-3', ['compute-0', - 'compute-1', - 'compute-2', - 'compute-3', - 'compute-4', - 'compute-5', - 'compute-6', - 'compute-7', - 'compute-8', - 'compute-9']) + self.create_host_aggregate('aggregate-1', ['compute-0', + 'compute-1', + 'compute-2', + 'compute-3', + 'compute-4']) + self.create_host_aggregate('aggregate-2', ['compute-5', + 'compute-6', + 'compute-7', + 'compute-8', + 'compute-9']) + self.create_host_aggregate('aggregate-3', ['compute-0', + 'compute-1', + 'compute-2', + 'compute-3', + 'compute-4', + 'compute-5', + 'compute-6', + 'compute-7', + 'compute-8', + 'compute-9']) - create_instance('small', "test_instance_0", 'compute-0') - create_instance('small', "test_instance_2", 'compute-2') - create_instance('small', "test_instance_3", 'compute-3') - create_instance('small', "test_instance_4", 'compute-4') - create_instance('small', "test_instance_6", 'compute-6') - create_instance('small', "test_instance_7", 'compute-7') - create_instance('small', "test_instance_8", 'compute-8') - create_instance('small', "test_instance_9", 'compute-9') + self.create_instance('small', "test_instance_0", 'compute-0') + self.create_instance('small', "test_instance_2", 'compute-2') + self.create_instance('small', "test_instance_3", 'compute-3') + self.create_instance('small', "test_instance_4", 'compute-4') + self.create_instance('small', "test_instance_6", 'compute-6') + self.create_instance('small', "test_instance_7", 'compute-7') + self.create_instance('small', "test_instance_8", 'compute-8') + self.create_instance('small', "test_instance_9", 'compute-9') compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic @@ -983,41 +982,41 @@ class TestSwPatchStrategy(object): - hosts with no instances patched first - small host aggregate handled """ - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3') - create_host('compute-4') - create_host('compute-5') - create_host('compute-6') - create_host('compute-7') - create_host('compute-8') - create_host('compute-9') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') + self.create_host('compute-4') + self.create_host('compute-5') + self.create_host('compute-6') + self.create_host('compute-7') + self.create_host('compute-8') + self.create_host('compute-9') - create_host_aggregate('aggregate-1', ['compute-0', - 'compute-1']) - create_host_aggregate('aggregate-2', ['compute-2', - 'compute-3', - 'compute-4', - 'compute-5', - 'compute-6']) - create_host_aggregate('aggregate-3', ['compute-7', - 'compute-8', - 'compute-9']) + self.create_host_aggregate('aggregate-1', ['compute-0', + 'compute-1']) + self.create_host_aggregate('aggregate-2', ['compute-2', + 'compute-3', + 'compute-4', + 'compute-5', + 'compute-6']) + self.create_host_aggregate('aggregate-3', ['compute-7', + 'compute-8', + 'compute-9']) - create_instance('small', "test_instance_0", 'compute-0') - create_instance('small', "test_instance_1", 'compute-1') - create_instance('small', "test_instance_2", 'compute-2') - create_instance('small', "test_instance_3", 'compute-3') - create_instance('small', "test_instance_4", 'compute-4') - create_instance('small', "test_instance_5", 'compute-5') - create_instance('small', "test_instance_6", 'compute-6') - create_instance('small', "test_instance_7", 'compute-7') - create_instance('small', "test_instance_8", 'compute-8') - create_instance('small', "test_instance_9", 'compute-9') + self.create_instance('small', "test_instance_0", 'compute-0') + self.create_instance('small', "test_instance_1", 'compute-1') + self.create_instance('small', "test_instance_2", 'compute-2') + self.create_instance('small', "test_instance_3", 'compute-3') + self.create_instance('small', "test_instance_4", 'compute-4') + self.create_instance('small', "test_instance_5", 'compute-5') + self.create_instance('small', "test_instance_6", 'compute-6') + self.create_instance('small', "test_instance_7", 'compute-7') + self.create_instance('small', "test_instance_8", 'compute-8') + self.create_instance('small', "test_instance_9", 'compute-9') compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic @@ -1152,24 +1151,24 @@ class TestSwPatchStrategy(object): - hosts with no instances patched first - anti-affinity policy enforced """ - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') - create_instance('small', - "test_instance_0", - 'compute-0') - create_instance('small', - "test_instance_1", - 'compute-1') + self.create_instance('small', + "test_instance_0", + 'compute-0') + self.create_instance('small', + "test_instance_1", + 'compute-1') - create_instance_group('instance_group_1', - ['test_instance_0', 'test_instance_1'], - [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) + self.create_instance_group('instance_group_1', + ['test_instance_0', 'test_instance_1'], + [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic @@ -1259,25 +1258,25 @@ class TestSwPatchStrategy(object): Verify: - stage creation fails """ - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') - create_instance('small', - "test_instance_0", - 'compute-0') - create_instance('small', - "test_instance_1", - 'compute-1', - admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.LOCKED) + self.create_instance('small', + "test_instance_0", + 'compute-0') + self.create_instance('small', + "test_instance_1", + 'compute-1', + admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.LOCKED) - create_instance_group('instance_group_1', - ['test_instance_0', 'test_instance_1'], - [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) + self.create_instance_group('instance_group_1', + ['test_instance_0', 'test_instance_1'], + [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic @@ -1304,22 +1303,22 @@ class TestSwPatchStrategy(object): - hosts with no instances patched first - host aggregate limits enforced """ - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') - create_host_aggregate('aggregate-1', ['compute-0', 'compute-1']) + self.create_host_aggregate('aggregate-1', ['compute-0', 'compute-1']) - create_instance('small', - "test_instance_0", - 'compute-0') - create_instance('small', - "test_instance_1", - 'compute-1') + self.create_instance('small', + "test_instance_0", + 'compute-0') + self.create_instance('small', + "test_instance_1", + 'compute-1') compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic @@ -1453,21 +1452,21 @@ class TestSwPatchStrategy(object): - hosts with no instances patched first - locked host patched and rebooted """ - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3', - admin_state=nfvi.objects.v1.HOST_ADMIN_STATE.LOCKED) + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3', + admin_state=nfvi.objects.v1.HOST_ADMIN_STATE.LOCKED) - create_instance('small', - "test_instance_0", - 'compute-0') - create_instance('small', - "test_instance_1", - 'compute-1') + self.create_instance('small', + "test_instance_0", + 'compute-0') + self.create_instance('small', + "test_instance_1", + 'compute-1') compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic @@ -1538,23 +1537,23 @@ class TestSwPatchStrategy(object): - host aggregate limits enforced - locked instance not stopped or started """ - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') - create_host_aggregate('aggregate-1', ['compute-0', 'compute-1']) + self.create_host_aggregate('aggregate-1', ['compute-0', 'compute-1']) - create_instance('small', - "test_instance_0", - 'compute-0') - create_instance('small', - "test_instance_1", - 'compute-1', - admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.LOCKED) + self.create_instance('small', + "test_instance_0", + 'compute-0') + self.create_instance('small', + "test_instance_1", + 'compute-1', + admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.LOCKED) compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic @@ -1640,21 +1639,21 @@ class TestSwPatchStrategy(object): Verify: - host aggregates with a single host are patched in parallel """ - create_host('compute-0') - create_host('compute-1') + self.create_host('compute-0') + self.create_host('compute-1') - create_host_aggregate('aggregate-1', ['compute-0']) - create_host_aggregate('aggregate-2', ['compute-1']) + self.create_host_aggregate('aggregate-1', ['compute-0']) + self.create_host_aggregate('aggregate-2', ['compute-1']) - create_instance('small', - "test_instance_0", - 'compute-0') - create_instance('small', - "test_instance_1", - 'compute-1') + self.create_instance('small', + "test_instance_0", + 'compute-0') + self.create_instance('small', + "test_instance_1", + 'compute-1') compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic @@ -1708,32 +1707,32 @@ class TestSwPatchStrategy(object): - hosts with no instances patched first - anti-affinity policy and host aggregates enforced at same time """ - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') - create_host_aggregate('aggregate-1', ['compute-1', 'compute-2']) + self.create_host_aggregate('aggregate-1', ['compute-1', 'compute-2']) - create_instance('small', - "test_instance_0", - 'compute-0') - create_instance('small', - "test_instance_1", - 'compute-1') - create_instance('small', - "test_instance_2", - 'compute-2') - create_instance('small', - "test_instance_3", - 'compute-3') + self.create_instance('small', + "test_instance_0", + 'compute-0') + self.create_instance('small', + "test_instance_1", + 'compute-1') + self.create_instance('small', + "test_instance_2", + 'compute-2') + self.create_instance('small', + "test_instance_3", + 'compute-3') - create_instance_group('instance_group_1', - ['test_instance_0', 'test_instance_1'], - [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) + self.create_instance_group('instance_group_1', + ['test_instance_0', 'test_instance_1'], + [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic @@ -1807,24 +1806,24 @@ class TestSwPatchStrategy(object): Verify: - hosts with no instances patched first """ - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') - create_instance('small', - "test_instance_0", - 'compute-0') - create_instance('small', - "test_instance_1", - 'compute-1') + self.create_instance('small', + "test_instance_0", + 'compute-0') + self.create_instance('small', + "test_instance_1", + 'compute-1') - create_instance_group('instance_group_1', - ['test_instance_0', 'test_instance_1'], - [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) + self.create_instance_group('instance_group_1', + ['test_instance_0', 'test_instance_1'], + [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic @@ -1990,28 +1989,28 @@ class TestSwPatchStrategy(object): - hosts with no instances patched first - locked host patched and rebooted """ - create_host('compute-0') - create_host('compute-1') - create_host('compute-2', - admin_state=nfvi.objects.v1.HOST_ADMIN_STATE.LOCKED) - create_host('compute-3') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2', + admin_state=nfvi.objects.v1.HOST_ADMIN_STATE.LOCKED) + self.create_host('compute-3') - create_instance('small', - "test_instance_0", - 'compute-0') - create_instance('small', - "test_instance_1", - 'compute-1') - create_instance('small', - "test_instance_2", - 'compute-3') + self.create_instance('small', + "test_instance_0", + 'compute-0') + self.create_instance('small', + "test_instance_1", + 'compute-1') + self.create_instance('small', + "test_instance_2", + 'compute-3') - create_instance_group('instance_group_1', - ['test_instance_0', 'test_instance_1'], - [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) + self.create_instance_group('instance_group_1', + ['test_instance_0', 'test_instance_1'], + [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic @@ -2180,10 +2179,10 @@ class TestSwPatchStrategy(object): - maximum host limit enforced """ for x in range(0, 13): - create_host('compute-%02d' % x) + self.create_host('compute-%02d' % x) compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic @@ -2296,24 +2295,24 @@ class TestSwPatchStrategy(object): Verify: - hosts with no instances patched first """ - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') - create_instance('small', - "test_instance_0", - 'compute-0') - create_instance('small', - "test_instance_1", - 'compute-1') + self.create_instance('small', + "test_instance_0", + 'compute-0') + self.create_instance('small', + "test_instance_1", + 'compute-1') - create_instance_group('instance_group_1', - ['test_instance_0', 'test_instance_1'], - [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) + self.create_instance_group('instance_group_1', + ['test_instance_0', 'test_instance_1'], + [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic @@ -2480,25 +2479,25 @@ class TestSwPatchStrategy(object): - hosts with no instances patched first - locked instance is not migrated """ - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') - create_instance('small', - "test_instance_0", - 'compute-0', - admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.LOCKED) - create_instance('small', - "test_instance_1", - 'compute-1') + self.create_instance('small', + "test_instance_0", + 'compute-0', + admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.LOCKED) + self.create_instance('small', + "test_instance_1", + 'compute-1') - create_instance_group('instance_group_1', - ['test_instance_0', 'test_instance_1'], - [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) + self.create_instance_group('instance_group_1', + ['test_instance_0', 'test_instance_1'], + [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic @@ -2583,20 +2582,20 @@ class TestSwPatchStrategy(object): Verify: - stages not created """ - create_host('storage-0') - create_host('storage-1') - create_host('storage-2') - create_host('storage-3') + self.create_host('storage-0') + self.create_host('storage-1') + self.create_host('storage-2') + self.create_host('storage-3') - create_host_group('group-0', - ['storage-0', 'storage-1'], - [nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION]) - create_host_group('group-1', - ['storage-2', 'storage-3'], - [nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION]) + self.create_host_group('group-0', + ['storage-0', 'storage-1'], + [nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION]) + self.create_host_group('group-1', + ['storage-2', 'storage-3'], + [nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION]) storage_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.STORAGE in host.personality: storage_hosts.append(host) # Sort hosts so the order of the steps is deterministic @@ -2630,20 +2629,20 @@ class TestSwPatchStrategy(object): Verify: - host groups enforced """ - create_host('storage-0') - create_host('storage-1') - create_host('storage-2') - create_host('storage-3') + self.create_host('storage-0') + self.create_host('storage-1') + self.create_host('storage-2') + self.create_host('storage-3') - create_host_group('group-0', - ['storage-0', 'storage-1'], - [nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION]) - create_host_group('group-1', - ['storage-2', 'storage-3'], - [nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION]) + self.create_host_group('group-0', + ['storage-0', 'storage-1'], + [nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION]) + self.create_host_group('group-1', + ['storage-2', 'storage-3'], + [nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION]) storage_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.STORAGE in host.personality: storage_hosts.append(host) # Sort hosts so the order of the steps is deterministic @@ -2754,20 +2753,20 @@ class TestSwPatchStrategy(object): Test the sw_patch strategy add storage strategy stages: - serial apply """ - create_host('storage-0') - create_host('storage-1') - create_host('storage-2') - create_host('storage-3') + self.create_host('storage-0') + self.create_host('storage-1') + self.create_host('storage-2') + self.create_host('storage-3') - create_host_group('group-0', - ['storage-0', 'storage-1'], - [nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION]) - create_host_group('group-1', - ['storage-2', 'storage-3'], - [nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION]) + self.create_host_group('group-0', + ['storage-0', 'storage-1'], + [nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION]) + self.create_host_group('group-1', + ['storage-2', 'storage-3'], + [nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION]) storage_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.STORAGE in host.personality: storage_hosts.append(host) # Sort hosts so the order of the steps is deterministic @@ -2886,11 +2885,11 @@ class TestSwPatchStrategy(object): Verify: - stages not created """ - create_host('controller-0') - create_host('controller-1') + self.create_host('controller-0') + self.create_host('controller-1') controller_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.CONTROLLER in host.personality: controller_hosts.append(host) @@ -2921,11 +2920,11 @@ class TestSwPatchStrategy(object): Verify: - patch mate controller first """ - create_host('controller-0') - create_host('controller-1') + self.create_host('controller-0') + self.create_host('controller-1') controller_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.CONTROLLER in host.personality: controller_hosts.append(host) @@ -3031,18 +3030,18 @@ class TestSwPatchStrategy(object): - stop start instance action - test both reboot and no reboot cases """ - create_host('controller-0', cpe=True) - create_host('controller-1', cpe=True) + self.create_host('controller-0', cpe=True) + self.create_host('controller-1', cpe=True) - create_instance('small', - "test_instance_0", - 'controller-0') - create_instance('small', - "test_instance_1", - 'controller-1') + self.create_instance('small', + "test_instance_0", + 'controller-0') + self.create_instance('small', + "test_instance_1", + 'controller-1') compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic @@ -3157,18 +3156,18 @@ class TestSwPatchStrategy(object): - serial apply - stop start instance action """ - create_host('controller-0', cpe=True) - create_host('controller-1', cpe=True) + self.create_host('controller-0', cpe=True) + self.create_host('controller-1', cpe=True) - create_instance('small', - "test_instance_0", - 'controller-0') - create_instance('small', - "test_instance_1", - 'controller-1') + self.create_instance('small', + "test_instance_0", + 'controller-0') + self.create_instance('small', + "test_instance_1", + 'controller-1') compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic @@ -3245,11 +3244,11 @@ class TestSwPatchStrategy(object): - serial apply - stop start instance action """ - create_host('controller-0', cpe=True) - create_host('controller-1', cpe=True) + self.create_host('controller-0', cpe=True) + self.create_host('controller-1', cpe=True) compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic @@ -3319,17 +3318,17 @@ class TestSwPatchStrategy(object): Verify: - stage creation fails """ - create_host('controller-0', cpe=True) + self.create_host('controller-0', cpe=True) - create_instance('small', - "test_instance_0", - 'controller-0') - create_instance('small', - "test_instance_1", - 'controller-0') + self.create_instance('small', + "test_instance_0", + 'controller-0') + self.create_instance('small', + "test_instance_1", + 'controller-0') compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) @@ -3352,14 +3351,14 @@ class TestSwPatchStrategy(object): - serial apply - stop start instance action """ - create_host('controller-0', cpe=True) + self.create_host('controller-0', cpe=True) - create_instance('small', - "test_instance_0", - 'controller-0') + self.create_instance('small', + "test_instance_0", + 'controller-0') compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) @@ -3411,10 +3410,10 @@ class TestSwPatchStrategy(object): - serial apply - stop start instance action """ - create_host('controller-0', cpe=True) + self.create_host('controller-0', cpe=True) compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) @@ -3463,12 +3462,12 @@ class TestSwPatchStrategy(object): - hosts with no instances patched first - anti-affinity policy enforced """ - create_host('compute-0') - create_host('compute-1') + self.create_host('compute-0') + self.create_host('compute-1') - create_instance('small', - "test_instance_0", - 'compute-0') + self.create_instance('small', + "test_instance_0", + 'compute-0') strategy = create_sw_patch_strategy( compute_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL, diff --git a/nfv/nfv-tests/nfv_unit_tests/tests/test_sw_upgrade_strategy.py b/nfv/nfv-tests/nfv_unit_tests/tests/test_sw_upgrade_strategy.py index 4f934952..593e3b92 100755 --- a/nfv/nfv-tests/nfv_unit_tests/tests/test_sw_upgrade_strategy.py +++ b/nfv/nfv-tests/nfv_unit_tests/tests/test_sw_upgrade_strategy.py @@ -3,10 +3,11 @@ # # SPDX-License-Identifier: Apache-2.0 # -import uuid +import fixtures import mock import pprint -from nose.tools import nottest +import testtools +import uuid from nfv_common import strategy as common_strategy from nfv_vim import host_fsm @@ -26,188 +27,11 @@ from nfv_vim.strategy._strategy import SwUpgradeStrategy, strategy_rebuild_from_ from nfv_vim.nfvi.objects.v1 import UPGRADE_STATE -import utils - -# Constants - -# Globals -_tenant_table = Table() -_instance_type_table = Table() -_instance_table = InstanceTable() -_instance_group_table = InstanceGroupTable() -_host_table = HostTable() -_host_group_table = HostGroupTable() -_host_aggregate_table = HostAggregateTable() - -# Don't attempt to write to the database while unit testing -_tenant_table.persist = False -_instance_type_table.persist = False -_instance_table.persist = False -_instance_group_table.persist = False -_host_table.persist = False -_host_group_table.persist = False -_host_aggregate_table.persist = False +from . import testcase +from . import utils -def create_instance(instance_type_name, instance_name, host_name, - admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.UNLOCKED): - """ - Create an instance - """ - global _tenant_table, _instance_table - - tenant_uuid = str(uuid.uuid4()) - image_uuid = str(uuid.uuid4()) - - tenant = objects.Tenant(tenant_uuid, "%s_name" % tenant_uuid, '', True) - _tenant_table[tenant_uuid] = tenant - - for instance_type in _instance_type_table.values(): - if instance_type.name == instance_type_name: - instance_uuid = str(uuid.uuid4()) - - nfvi_instance = nfvi.objects.v1.Instance( - instance_uuid, instance_name, tenant_uuid, - admin_state=admin_state, - oper_state=nfvi.objects.v1.INSTANCE_OPER_STATE.ENABLED, - avail_status=list(), - action=nfvi.objects.v1.INSTANCE_ACTION.NONE, - host_name=host_name, - instance_type=utils.instance_type_to_flavor_dict(instance_type), - image_uuid=image_uuid) - - instance = objects.Instance(nfvi_instance) - _instance_table[instance.uuid] = instance - return - - assert 0, "Unknown instance_type_name: %s" % instance_type_name - - -def create_instance_group(name, members, policies): - """ - Create an instance group - """ - global _instance_group_table - - member_uuids = [] - - for instance_uuid, instance in _instance_table.iteritems(): - if instance.name in members: - member_uuids.append(instance_uuid) - - nfvi_instance_group = nfvi.objects.v1.InstanceGroup( - uuid=str(uuid.uuid4()), - name=name, - member_uuids=member_uuids, - policies=policies - ) - - instance_group = objects.InstanceGroup(nfvi_instance_group) - _instance_group_table[instance_group.uuid] = instance_group - - -def create_host(host_name, - cpe=False, - admin_state=nfvi.objects.v1.HOST_ADMIN_STATE.UNLOCKED, - software_load='12.01', - target_load='12.01'): - """ - Create a host - """ - global _host_table - personality = '' - - if host_name.startswith('controller'): - personality = HOST_PERSONALITY.CONTROLLER - if cpe: - personality = personality + ',' + HOST_PERSONALITY.COMPUTE - elif host_name.startswith('compute'): - personality = HOST_PERSONALITY.COMPUTE - elif host_name.startswith('storage'): - personality = HOST_PERSONALITY.STORAGE - else: - assert 0, "Invalid host_name: %s" % host_name - - nfvi_host = nfvi.objects.v1.Host( - uuid=str(uuid.uuid4()), - name=host_name, - personality=personality, - admin_state=admin_state, - oper_state=nfvi.objects.v1.HOST_OPER_STATE.ENABLED, - avail_status=nfvi.objects.v1.HOST_AVAIL_STATUS.AVAILABLE, - action=nfvi.objects.v1.HOST_ACTION.NONE, - software_load=software_load, - target_load=target_load, - uptime='1000' - ) - - host = objects.Host(nfvi_host, - initial_state=host_fsm.HOST_STATE.ENABLED) - _host_table[host.name] = host - - -def create_host_group(name, members, policies): - """ - Create a host group - """ - global _host_group_table - - member_uuids = [] - - for instance_uuid, instance in _instance_table.iteritems(): - if instance.name in members: - member_uuids.append(instance_uuid) - - nfvi_host_group = nfvi.objects.v1.HostGroup( - name=name, - member_names=members, - policies=policies - ) - - host_group = objects.HostGroup(nfvi_host_group) - _host_group_table[host_group.name] = host_group - - -def create_host_aggregate(name, host_names): - """ - Create a host aggregate - """ - global _host_aggregate_table - - nfvi_host_aggregate = nfvi.objects.v1.HostAggregate( - name=name, - host_names=host_names, - availability_zone='' - ) - - host_aggregate = objects.HostAggregate(nfvi_host_aggregate) - _host_aggregate_table[host_aggregate.name] = host_aggregate - - -def create_sw_upgrade_strategy( - storage_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE, - compute_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE, - max_parallel_compute_hosts=10, - alarm_restrictions=SW_UPDATE_ALARM_RESTRICTION.STRICT, - start_upgrade=False, - complete_upgrade=False, - nfvi_upgrade=None -): - """ - Create a software update strategy - """ - strategy = SwUpgradeStrategy( - uuid=str(uuid.uuid4()), - storage_apply_type=storage_apply_type, - compute_apply_type=compute_apply_type, - max_parallel_compute_hosts=max_parallel_compute_hosts, - alarm_restrictions=alarm_restrictions, - start_upgrade=start_upgrade, - complete_upgrade=complete_upgrade, - ignore_alarms=[] - ) - strategy.nfvi_upgrade = nfvi_upgrade - return strategy +DEBUG_PRINTING = False def validate_strategy_persists(strategy): @@ -220,11 +44,12 @@ def validate_strategy_persists(strategy): strategy_dict = strategy.as_dict() new_strategy = strategy_rebuild_from_dict(strategy_dict) - if strategy.as_dict() != new_strategy.as_dict(): - print("==================== Strategy ====================") - pprint.pprint(strategy.as_dict()) - print("============== Converted Strategy ================") - pprint.pprint(new_strategy.as_dict()) + if DEBUG_PRINTING: + if strategy.as_dict() != new_strategy.as_dict(): + print("==================== Strategy ====================") + pprint.pprint(strategy.as_dict()) + print("============== Converted Strategy ================") + pprint.pprint(new_strategy.as_dict()) assert strategy.as_dict() == new_strategy.as_dict(), \ "Strategy changed when converting to/from dict" @@ -235,10 +60,11 @@ def validate_phase(phase, expected_results): Note: there is probably a super generic, pythonic way to do this, but this is good enough (tm). """ - print("====================== Phase Results ========================") - pprint.pprint(phase) - print("===================== Expected Results ======================") - pprint.pprint(expected_results) + if DEBUG_PRINTING: + print("====================== Phase Results ========================") + pprint.pprint(phase) + print("===================== Expected Results ======================") + pprint.pprint(expected_results) for key in expected_results: if key == 'stages': @@ -298,23 +124,44 @@ def fake_event_issue(a, b, c, d): @mock.patch('nfv_vim.event_log._instance._event_issue', fake_event_issue) @mock.patch('nfv_vim.objects._sw_update.SwUpdate.save', fake_save) @mock.patch('nfv_vim.objects._sw_update.timers.timers_create_timer', fake_timer) -@mock.patch('nfv_vim.tables._tenant_table._tenant_table', _tenant_table) -@mock.patch('nfv_vim.tables._host_table._host_table', _host_table) -@mock.patch('nfv_vim.tables._instance_group_table._instance_group_table', _instance_group_table) -@mock.patch('nfv_vim.tables._host_group_table._host_group_table', _host_group_table) -@mock.patch('nfv_vim.tables._host_aggregate_table._host_aggregate_table', _host_aggregate_table) -@mock.patch('nfv_vim.tables._instance_table._instance_table', _instance_table) -class TestSwUpgradeStrategy(object): +class TestSwUpgradeStrategy(testcase.NFVTestCase): + _tenant_table = Table() + _instance_type_table = Table() + _instance_table = InstanceTable() + _instance_group_table = InstanceGroupTable() + _host_table = HostTable() + _host_group_table = HostGroupTable() + _host_aggregate_table = HostAggregateTable() - def setup(self): - """ - Setup for testing. - """ - global _instance_type_table + # Don't attempt to write to the database while unit testing + _tenant_table.persist = False + _instance_type_table.persist = False + _instance_table.persist = False + _instance_group_table.persist = False + _host_table.persist = False + _host_group_table.persist = False + _host_aggregate_table.persist = False + + def setUp(self): + super(TestSwUpgradeStrategy, self).setUp() + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._host_aggregate_table._host_aggregate_table', + self._host_aggregate_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._host_group_table._host_group_table', + self._host_group_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._host_table._host_table', + self._host_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._instance_group_table._instance_group_table', + self._instance_group_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._instance_table._instance_table', + self._instance_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._instance_type_table._instance_type_table', + self._instance_type_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._tenant_table._tenant_table', + self._tenant_table)) instance_type_uuid = str(uuid.uuid4()) - if 0 == len(_instance_type_table): + if 0 == len(self._instance_type_table): instance_type = objects.InstanceType(instance_type_uuid, 'small') instance_type.update_details(vcpus=1, mem_mb=64, disk_gb=1, ephemeral_gb=0, swap_gb=0, guest_services=None, @@ -322,19 +169,166 @@ class TestSwUpgradeStrategy(object): live_migration_timeout=800, live_migration_max_downtime=500, storage_type='local_image') - _instance_type_table[instance_type_uuid] = instance_type + self._instance_type_table[instance_type_uuid] = instance_type - def teardown(self): + def tearDown(self): """ Cleanup testing setup. """ - _tenant_table.clear() - _instance_type_table.clear() - _instance_table.clear() - _instance_group_table.clear() - _host_table.clear() - _host_group_table.clear() - _host_aggregate_table.clear() + super(TestSwUpgradeStrategy, self).tearDown() + self._tenant_table.clear() + self._instance_type_table.clear() + self._instance_table.clear() + self._instance_group_table.clear() + self._host_table.clear() + self._host_group_table.clear() + self._host_aggregate_table.clear() + + def create_instance(self, instance_type_name, instance_name, host_name, + admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.UNLOCKED): + """ + Create an instance + """ + tenant_uuid = str(uuid.uuid4()) + image_uuid = str(uuid.uuid4()) + + tenant = objects.Tenant(tenant_uuid, "%s_name" % tenant_uuid, '', True) + self._tenant_table[tenant_uuid] = tenant + + for instance_type in self._instance_type_table.values(): + if instance_type.name == instance_type_name: + instance_uuid = str(uuid.uuid4()) + + nfvi_instance = nfvi.objects.v1.Instance( + instance_uuid, instance_name, tenant_uuid, + admin_state=admin_state, + oper_state=nfvi.objects.v1.INSTANCE_OPER_STATE.ENABLED, + avail_status=list(), + action=nfvi.objects.v1.INSTANCE_ACTION.NONE, + host_name=host_name, + instance_type=utils.instance_type_to_flavor_dict(instance_type), + image_uuid=image_uuid) + + instance = objects.Instance(nfvi_instance) + self._instance_table[instance.uuid] = instance + return + + assert 0, "Unknown instance_type_name: %s" % instance_type_name + + def create_instance_group(self, name, members, policies): + """ + Create an instance group + """ + member_uuids = [] + + for instance_uuid, instance in self._instance_table.iteritems(): + if instance.name in members: + member_uuids.append(instance_uuid) + + nfvi_instance_group = nfvi.objects.v1.InstanceGroup( + uuid=str(uuid.uuid4()), + name=name, + member_uuids=member_uuids, + policies=policies + ) + + instance_group = objects.InstanceGroup(nfvi_instance_group) + self._instance_group_table[instance_group.uuid] = instance_group + + def create_host(self, + host_name, + cpe=False, + admin_state=nfvi.objects.v1.HOST_ADMIN_STATE.UNLOCKED, + software_load='12.01', + target_load='12.01'): + """ + Create a host + """ + personality = '' + if host_name.startswith('controller'): + personality = HOST_PERSONALITY.CONTROLLER + if cpe: + personality = personality + ',' + HOST_PERSONALITY.COMPUTE + elif host_name.startswith('compute'): + personality = HOST_PERSONALITY.COMPUTE + elif host_name.startswith('storage'): + personality = HOST_PERSONALITY.STORAGE + else: + assert 0, "Invalid host_name: %s" % host_name + + nfvi_host = nfvi.objects.v1.Host( + uuid=str(uuid.uuid4()), + name=host_name, + personality=personality, + admin_state=admin_state, + oper_state=nfvi.objects.v1.HOST_OPER_STATE.ENABLED, + avail_status=nfvi.objects.v1.HOST_AVAIL_STATUS.AVAILABLE, + action=nfvi.objects.v1.HOST_ACTION.NONE, + software_load=software_load, + target_load=target_load, + uptime='1000' + ) + + host = objects.Host(nfvi_host, + initial_state=host_fsm.HOST_STATE.ENABLED) + self._host_table[host.name] = host + + def create_host_group(self, name, members, policies): + """ + Create a host group + """ + member_uuids = [] + + for instance_uuid, instance in self._instance_table.iteritems(): + if instance.name in members: + member_uuids.append(instance_uuid) + + nfvi_host_group = nfvi.objects.v1.HostGroup( + name=name, + member_names=members, + policies=policies + ) + + host_group = objects.HostGroup(nfvi_host_group) + self._host_group_table[host_group.name] = host_group + + def create_host_aggregate(self, name, host_names): + """ + Create a host aggregate + """ + nfvi_host_aggregate = nfvi.objects.v1.HostAggregate( + name=name, + host_names=host_names, + availability_zone='' + ) + + host_aggregate = objects.HostAggregate(nfvi_host_aggregate) + self._host_aggregate_table[host_aggregate.name] = host_aggregate + + def create_sw_upgrade_strategy(self, + storage_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE, + compute_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE, + max_parallel_compute_hosts=10, + alarm_restrictions=SW_UPDATE_ALARM_RESTRICTION.STRICT, + start_upgrade=False, + complete_upgrade=False, + nfvi_upgrade=None + ): + """ + Create a software update strategy + """ + strategy = SwUpgradeStrategy( + uuid=str(uuid.uuid4()), + storage_apply_type=storage_apply_type, + compute_apply_type=compute_apply_type, + max_parallel_compute_hosts=max_parallel_compute_hosts, + alarm_restrictions=alarm_restrictions, + start_upgrade=start_upgrade, + complete_upgrade=complete_upgrade, + ignore_alarms=[] + ) + strategy.nfvi_upgrade = nfvi_upgrade + return strategy @mock.patch('nfv_vim.strategy._strategy.get_local_host_name', fake_host_name_controller_1) @@ -345,30 +339,30 @@ class TestSwUpgradeStrategy(object): Verify: - stages not created """ - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') - create_instance('small', - "test_instance_0", - 'compute-0') - create_instance('small', - "test_instance_1", - 'compute-1') + self.create_instance('small', + "test_instance_0", + 'compute-0') + self.create_instance('small', + "test_instance_1", + 'compute-1') - create_instance_group('instance_group_1', + self.create_instance_group('instance_group_1', ['test_instance_0', 'test_instance_1'], [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic sorted_compute_hosts = sorted(compute_hosts, key=lambda host: host.name) - strategy = create_sw_upgrade_strategy( + strategy = self.create_sw_upgrade_strategy( compute_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE ) @@ -398,30 +392,30 @@ class TestSwUpgradeStrategy(object): - hosts with no instances upgraded first - anti-affinity policy enforced """ - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') - create_instance('small', - "test_instance_0", - 'compute-0') - create_instance('small', - "test_instance_1", - 'compute-1') + self.create_instance('small', + "test_instance_0", + 'compute-0') + self.create_instance('small', + "test_instance_1", + 'compute-1') - create_instance_group('instance_group_1', - ['test_instance_0', 'test_instance_1'], - [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) + self.create_instance_group('instance_group_1', + ['test_instance_0', 'test_instance_1'], + [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic sorted_compute_hosts = sorted(compute_hosts, key=lambda host: host.name) - strategy = create_sw_upgrade_strategy( + strategy = self.create_sw_upgrade_strategy( compute_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL, max_parallel_compute_hosts=2 ) @@ -497,34 +491,34 @@ class TestSwUpgradeStrategy(object): - hosts with no instances upgraded first - instances migrated """ - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3') - create_host('compute-4') - create_host('compute-5') - create_host('compute-6') - create_host('compute-7') - create_host('compute-8') - create_host('compute-9') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') + self.create_host('compute-4') + self.create_host('compute-5') + self.create_host('compute-6') + self.create_host('compute-7') + self.create_host('compute-8') + self.create_host('compute-9') - create_instance('small', "test_instance_0", 'compute-0') - create_instance('small', "test_instance_2", 'compute-2') - create_instance('small', "test_instance_3", 'compute-3') - create_instance('small', "test_instance_4", 'compute-4') - create_instance('small', "test_instance_6", 'compute-6') - create_instance('small', "test_instance_7", 'compute-7') - create_instance('small', "test_instance_8", 'compute-8') - create_instance('small', "test_instance_9", 'compute-9') + self.create_instance('small', "test_instance_0", 'compute-0') + self.create_instance('small', "test_instance_2", 'compute-2') + self.create_instance('small', "test_instance_3", 'compute-3') + self.create_instance('small', "test_instance_4", 'compute-4') + self.create_instance('small', "test_instance_6", 'compute-6') + self.create_instance('small', "test_instance_7", 'compute-7') + self.create_instance('small', "test_instance_8", 'compute-8') + self.create_instance('small', "test_instance_9", 'compute-9') compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic sorted_compute_hosts = sorted(compute_hosts, key=lambda host: host.name) - strategy = create_sw_upgrade_strategy( + strategy = self.create_sw_upgrade_strategy( compute_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL, max_parallel_compute_hosts=3 ) @@ -622,26 +616,26 @@ class TestSwUpgradeStrategy(object): - host aggregate limits enforced """ for x in range(0, 50): - create_host('compute-%02d' % x) + self.create_host('compute-%02d' % x) for x in range(2, 47): - create_instance('small', - "test_instance_%02d" % x, - 'compute-%02d' % x) + self.create_instance('small', + "test_instance_%02d" % x, + 'compute-%02d' % x) - create_host_aggregate('aggregate-1', - ["compute-%02d" % x for x in range(0, 25)]) - create_host_aggregate('aggregate-2', - ["compute-%02d" % x for x in range(25, 50)]) + self.create_host_aggregate('aggregate-1', + ["compute-%02d" % x for x in range(0, 25)]) + self.create_host_aggregate('aggregate-2', + ["compute-%02d" % x for x in range(25, 50)]) compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic sorted_compute_hosts = sorted(compute_hosts, key=lambda host: host.name) - strategy = create_sw_upgrade_strategy( + strategy = self.create_sw_upgrade_strategy( compute_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL, max_parallel_compute_hosts=5 ) @@ -729,30 +723,30 @@ class TestSwUpgradeStrategy(object): Verify: - hosts with no instances upgraded first """ - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') - create_instance('small', - "test_instance_0", - 'compute-0') - create_instance('small', - "test_instance_1", - 'compute-1') + self.create_instance('small', + "test_instance_0", + 'compute-0') + self.create_instance('small', + "test_instance_1", + 'compute-1') - create_instance_group('instance_group_1', - ['test_instance_0', 'test_instance_1'], - [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) + self.create_instance_group('instance_group_1', + ['test_instance_0', 'test_instance_1'], + [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic sorted_compute_hosts = sorted(compute_hosts, key=lambda host: host.name) - strategy = create_sw_upgrade_strategy( + strategy = self.create_sw_upgrade_strategy( compute_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL ) @@ -837,31 +831,31 @@ class TestSwUpgradeStrategy(object): Verify: - stages not created """ - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') - create_instance('small', - "test_instance_0", - 'compute-0', - admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.LOCKED) - create_instance('small', - "test_instance_1", - 'compute-1') + self.create_instance('small', + "test_instance_0", + 'compute-0', + admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.LOCKED) + self.create_instance('small', + "test_instance_1", + 'compute-1') - create_instance_group('instance_group_1', - ['test_instance_0', 'test_instance_1'], - [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) + self.create_instance_group('instance_group_1', + ['test_instance_0', 'test_instance_1'], + [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) compute_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.COMPUTE in host.personality: compute_hosts.append(host) # Sort compute hosts so the order of the steps is deterministic sorted_compute_hosts = sorted(compute_hosts, key=lambda host: host.name) - strategy = create_sw_upgrade_strategy( + strategy = self.create_sw_upgrade_strategy( compute_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL ) @@ -880,26 +874,26 @@ class TestSwUpgradeStrategy(object): Verify: - stages not created """ - create_host('storage-0') - create_host('storage-1') - create_host('storage-2') - create_host('storage-3') + self.create_host('storage-0') + self.create_host('storage-1') + self.create_host('storage-2') + self.create_host('storage-3') - create_host_group('group-0', - ['storage-0', 'storage-1'], - [nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION]) - create_host_group('group-1', - ['storage-2', 'storage-3'], - [nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION]) + self.create_host_group('group-0', + ['storage-0', 'storage-1'], + [nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION]) + self.create_host_group('group-1', + ['storage-2', 'storage-3'], + [nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION]) storage_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.STORAGE in host.personality: storage_hosts.append(host) # Sort hosts so the order of the steps is deterministic sorted_storage_hosts = sorted(storage_hosts, key=lambda host: host.name) - strategy = create_sw_upgrade_strategy( + strategy = self.create_sw_upgrade_strategy( storage_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE ) @@ -928,26 +922,26 @@ class TestSwUpgradeStrategy(object): - storage-0 upgraded first - host groups enforced """ - create_host('storage-0') - create_host('storage-1') - create_host('storage-2') - create_host('storage-3') + self.create_host('storage-0') + self.create_host('storage-1') + self.create_host('storage-2') + self.create_host('storage-3') - create_host_group('group-0', - ['storage-0', 'storage-1'], - [nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION]) - create_host_group('group-1', - ['storage-2', 'storage-3'], - [nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION]) + self.create_host_group('group-0', + ['storage-0', 'storage-1'], + [nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION]) + self.create_host_group('group-1', + ['storage-2', 'storage-3'], + [nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION]) storage_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.STORAGE in host.personality: storage_hosts.append(host) # Sort hosts so the order of the steps is deterministic sorted_storage_hosts = sorted(storage_hosts, key=lambda host: host.name) - strategy = create_sw_upgrade_strategy( + strategy = self.create_sw_upgrade_strategy( storage_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL ) @@ -1014,26 +1008,26 @@ class TestSwUpgradeStrategy(object): Test the sw_upgrade strategy add storage strategy stages: - serial apply """ - create_host('storage-0') - create_host('storage-1') - create_host('storage-2') - create_host('storage-3') + self.create_host('storage-0') + self.create_host('storage-1') + self.create_host('storage-2') + self.create_host('storage-3') - create_host_group('group-0', - ['storage-0', 'storage-1'], - [nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION]) - create_host_group('group-1', - ['storage-2', 'storage-3'], - [nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION]) + self.create_host_group('group-0', + ['storage-0', 'storage-1'], + [nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION]) + self.create_host_group('group-1', + ['storage-2', 'storage-3'], + [nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION]) storage_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if HOST_PERSONALITY.STORAGE in host.personality: storage_hosts.append(host) # Sort hosts so the order of the steps is deterministic sorted_storage_hosts = sorted(storage_hosts, key=lambda host: host.name) - strategy = create_sw_upgrade_strategy( + strategy = self.create_sw_upgrade_strategy( storage_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL ) @@ -1120,16 +1114,16 @@ class TestSwUpgradeStrategy(object): Verify: - controller-0 upgraded """ - create_host('controller-0') - create_host('controller-1') + self.create_host('controller-0') + self.create_host('controller-1') controller_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if (HOST_PERSONALITY.CONTROLLER in host.personality and HOST_NAME.CONTROLLER_0 == host.name): controller_hosts.append(host) - strategy = create_sw_upgrade_strategy() + strategy = self.create_sw_upgrade_strategy() strategy._add_controller_strategy_stages(controllers=controller_hosts, reboot=True) @@ -1169,15 +1163,15 @@ class TestSwUpgradeStrategy(object): Verify: - controller-1 and controller-0 upgraded """ - create_host('controller-0') - create_host('controller-1') + self.create_host('controller-0') + self.create_host('controller-1') controller_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if (HOST_PERSONALITY.CONTROLLER in host.personality): controller_hosts.append(host) - strategy = create_sw_upgrade_strategy() + strategy = self.create_sw_upgrade_strategy() strategy._add_controller_strategy_stages(controllers=controller_hosts, reboot=True) @@ -1235,16 +1229,16 @@ class TestSwUpgradeStrategy(object): Verify: - controller-0 upgraded """ - create_host('controller-0', cpe=True) - create_host('controller-1', cpe=True) + self.create_host('controller-0', cpe=True) + self.create_host('controller-1', cpe=True) controller_hosts = [] - for host in _host_table.values(): + for host in self._host_table.values(): if (HOST_PERSONALITY.CONTROLLER in host.personality and HOST_NAME.CONTROLLER_0 == host.name): controller_hosts.append(host) - strategy = create_sw_upgrade_strategy() + strategy = self.create_sw_upgrade_strategy() success, reason = strategy._add_controller_strategy_stages( controllers=controller_hosts, @@ -1254,7 +1248,7 @@ class TestSwUpgradeStrategy(object): assert reason == "cannot apply software upgrades to CPE configuration", \ "Invalid failure reason" - @nottest + @testtools.skip('No support for start_upgrade') def test_sw_upgrade_strategy_build_complete_serial_migrate_start_complete(self): """ Test the sw_upgrade strategy build_complete: @@ -1264,18 +1258,18 @@ class TestSwUpgradeStrategy(object): Verify: - hosts with no instances upgraded first """ - create_host('controller-0') - create_host('controller-1') - create_host('storage-0') - create_host('storage-1') - create_host('compute-0') - create_host('compute-1') + self.create_host('controller-0') + self.create_host('controller-1') + self.create_host('storage-0') + self.create_host('storage-1') + self.create_host('compute-0') + self.create_host('compute-1') - create_instance('small', - "test_instance_0", - 'compute-0') + self.create_instance('small', + "test_instance_0", + 'compute-0') - strategy = create_sw_upgrade_strategy( + strategy = self.create_sw_upgrade_strategy( storage_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, compute_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, start_upgrade=True, @@ -1422,16 +1416,16 @@ class TestSwUpgradeStrategy(object): Verify: - hosts with no instances upgraded first """ - create_host('controller-0') - create_host('controller-1') - create_host('compute-0') - create_host('compute-1') + self.create_host('controller-0') + self.create_host('controller-1') + self.create_host('compute-0') + self.create_host('compute-1') - create_instance('small', - "test_instance_0", - 'compute-0') + self.create_instance('small', + "test_instance_0", + 'compute-0') - strategy = create_sw_upgrade_strategy( + strategy = self.create_sw_upgrade_strategy( compute_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, nfvi_upgrade=nfvi.objects.v1.Upgrade( UPGRADE_STATE.UPGRADING_CONTROLLERS, @@ -1525,21 +1519,21 @@ class TestSwUpgradeStrategy(object): Verify: - build fails """ - create_host('controller-0') - create_host('controller-1') - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3') + self.create_host('controller-0') + self.create_host('controller-1') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') - create_instance('small', - "test_instance_0", - 'compute-0') - create_instance('small', - "test_instance_1", - 'compute-1') + self.create_instance('small', + "test_instance_0", + 'compute-0') + self.create_instance('small', + "test_instance_1", + 'compute-1') - strategy = create_sw_upgrade_strategy( + strategy = self.create_sw_upgrade_strategy( compute_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, nfvi_upgrade=nfvi.objects.v1.Upgrade( UPGRADE_STATE.DATA_MIGRATION_COMPLETE, @@ -1570,21 +1564,21 @@ class TestSwUpgradeStrategy(object): Verify: - build fails """ - create_host('controller-0') - create_host('controller-1') - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3') + self.create_host('controller-0') + self.create_host('controller-1') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') - create_instance('small', - "test_instance_0", - 'compute-0') - create_instance('small', - "test_instance_1", - 'compute-1') + self.create_instance('small', + "test_instance_0", + 'compute-0') + self.create_instance('small', + "test_instance_1", + 'compute-1') - strategy = create_sw_upgrade_strategy( + strategy = self.create_sw_upgrade_strategy( compute_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL ) @@ -1611,21 +1605,21 @@ class TestSwUpgradeStrategy(object): Verify: - build fails """ - create_host('controller-0') - create_host('controller-1') - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3') + self.create_host('controller-0') + self.create_host('controller-1') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') - create_instance('small', - "test_instance_0", - 'compute-0') - create_instance('small', - "test_instance_1", - 'compute-1') + self.create_instance('small', + "test_instance_0", + 'compute-0') + self.create_instance('small', + "test_instance_1", + 'compute-1') - strategy = create_sw_upgrade_strategy( + strategy = self.create_sw_upgrade_strategy( compute_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, nfvi_upgrade=nfvi.objects.v1.Upgrade( UPGRADE_STATE.DATA_MIGRATION_COMPLETE, @@ -1656,21 +1650,21 @@ class TestSwUpgradeStrategy(object): Verify: - build fails """ - create_host('controller-0') - create_host('controller-1') - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3') + self.create_host('controller-0') + self.create_host('controller-1') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') - create_instance('small', - "test_instance_0", - 'compute-0') - create_instance('small', - "test_instance_1", - 'compute-1') + self.create_instance('small', + "test_instance_0", + 'compute-0') + self.create_instance('small', + "test_instance_1", + 'compute-1') - strategy = create_sw_upgrade_strategy( + strategy = self.create_sw_upgrade_strategy( compute_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, nfvi_upgrade=nfvi.objects.v1.Upgrade( UPGRADE_STATE.UPGRADING_CONTROLLERS, @@ -1702,22 +1696,22 @@ class TestSwUpgradeStrategy(object): Verify: - build fails """ - create_host('controller-0', - admin_state=nfvi.objects.v1.HOST_ADMIN_STATE.LOCKED) - create_host('controller-1') - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3') + self.create_host('controller-0', + admin_state=nfvi.objects.v1.HOST_ADMIN_STATE.LOCKED) + self.create_host('controller-1') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') - create_instance('small', - "test_instance_0", - 'compute-0') - create_instance('small', - "test_instance_1", - 'compute-1') + self.create_instance('small', + "test_instance_0", + 'compute-0') + self.create_instance('small', + "test_instance_1", + 'compute-1') - strategy = create_sw_upgrade_strategy( + strategy = self.create_sw_upgrade_strategy( compute_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, nfvi_upgrade=nfvi.objects.v1.Upgrade( UPGRADE_STATE.UPGRADING_CONTROLLERS, @@ -1748,22 +1742,22 @@ class TestSwUpgradeStrategy(object): Verify: - build fails """ - create_host('controller-0') - create_host('controller-1') - create_host('compute-0') - create_host('compute-1') - create_host('compute-2') - create_host('compute-3', - admin_state=nfvi.objects.v1.HOST_ADMIN_STATE.LOCKED) + self.create_host('controller-0') + self.create_host('controller-1') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3', + admin_state=nfvi.objects.v1.HOST_ADMIN_STATE.LOCKED) - create_instance('small', - "test_instance_0", - 'compute-0') - create_instance('small', - "test_instance_1", - 'compute-1') + self.create_instance('small', + "test_instance_0", + 'compute-0') + self.create_instance('small', + "test_instance_1", + 'compute-1') - strategy = create_sw_upgrade_strategy( + strategy = self.create_sw_upgrade_strategy( compute_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, nfvi_upgrade=nfvi.objects.v1.Upgrade( UPGRADE_STATE.UPGRADING_CONTROLLERS, diff --git a/nfv/nfv-tests/nfv_unit_tests/tests/testcase.py b/nfv/nfv-tests/nfv_unit_tests/tests/testcase.py new file mode 100644 index 00000000..d0071ea6 --- /dev/null +++ b/nfv/nfv-tests/nfv_unit_tests/tests/testcase.py @@ -0,0 +1,12 @@ +# +# Copyright (c) 2018 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +import testtools + + +class NFVTestCase(testtools.TestCase): + + def setUp(self): + super(NFVTestCase, self).setUp() diff --git a/nfv/tox.ini b/nfv/tox.ini index a3ee683a..97340dd9 100755 --- a/nfv/tox.ini +++ b/nfv/tox.ini @@ -25,12 +25,14 @@ nfv_client_dir = ./nfv-client nfv_common_dir = ./nfv-common nfv_plugins_dir = ./nfv-plugins nfv_vim_dir = ./nfv-vim +nfv_test_dir = ./nfv-tests stx_fault_dir = ../../stx-fault nfv_client_src_dir = {[nfv]nfv_client_dir}/nfv_client nfv_common_src_dir = {[nfv]nfv_common_dir}/nfv_common nfv_plugins_src_dir = {[nfv]nfv_plugins_dir}/nfv_plugins nfv_vim_src_dir = {[nfv]nfv_vim_dir}/nfv_vim +nfv_test_src_dir = {[nfv]nfv_test_dir} deps = {[nfv]nfv_client_dir} {[nfv]nfv_common_dir} @@ -100,12 +102,17 @@ commands = [testenv:pylint] deps = {[nfv]deps} + fixtures + jinja2 + mock + testtools pylint basepython = python2.7 commands = pylint {[nfv]nfv_client_src_dir} \ {[nfv]nfv_common_src_dir} \ {[nfv]nfv_plugins_src_dir} \ {[nfv]nfv_vim_src_dir} \ + {[nfv]nfv_test_src_dir} \ --rcfile=./pylint.rc [bandit] @@ -127,30 +134,44 @@ commands = bandit --ini tox.ini -f txt -r {[nfv]nfv_base_dir} [testenv:py27] basepython = python2.7 deps = {[nfv]deps} - mock - nose coverage + fixtures + mock + stestr + testtools setenv = PYTHONDONTWRITEBYTECODE=True commands = cp -v {[nfv]nfv_base_dir}/nfv-tests/nfv_unit_tests/test_data/nfv_vim_db_18.03_GA {envdir}/ - {envbindir}/nosetests --exe -w {[nfv]nfv_base_dir}/nfv-tests/nfv_unit_tests/tests/ '{posargs}' + stestr --test-path={[nfv]nfv_base_dir}/nfv-tests/nfv_unit_tests/tests run '{posargs}' + stestr slowest [testenv:py35] basepython = python3.5 deps = {[nfv]deps} - mock - nose coverage + fixtures + mock + stestr + testtools setenv = PYTHONDONTWRITEBYTECODE=True commands = cp -v {[nfv]nfv_base_dir}/nfv-tests/nfv_unit_tests/test_data/nfv_vim_db_18.03_GA {envdir}/ - {envbindir}/nosetests --exe -w {[nfv]nfv_base_dir}/nfv-tests/nfv_unit_tests/tests/ '{posargs}' + stestr --test-path={[nfv]nfv_base_dir}/nfv-tests/nfv_unit_tests/tests run '{posargs}' + stestr slowest [testenv:cover] basepython = python2.7 deps = {[nfv]deps} - mock - nose coverage -setenv = PYTHONDONTWRITEBYTECODE=True + fixtures + mock + stestr + testtools +setenv = + PYTHON=coverage run --parallel-mode + PYTHONDONTWRITEBYTECODE=True commands = cp -v {[nfv]nfv_base_dir}/nfv-tests/nfv_unit_tests/test_data/nfv_vim_db_18.03_GA {envdir}/ - {envbindir}/nosetests --exe --cover-erase --cover-branches \ - --cover-package=nfv_common,nfv_vim --with-coverage -w {[nfv]nfv_base_dir}/nfv-tests/nfv_unit_tests/tests/ + coverage erase + stestr --test-path={[nfv]nfv_base_dir}/nfv-tests/nfv_unit_tests/tests run '{posargs}' + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + coverage report