change assert(Not)Equals to assert(Not)Equal
According to http://docs.python.org/2/library/unittest.html assert(Not)Equals is a deprecated alias of assert(Not)Equal. Change-Id: If90709756ccb6022641b915e47a5c7af4fe1fc1c Closes-Bug: #1329757
This commit is contained in:
parent
eaabb2c9bb
commit
f78b4a0854
@ -46,15 +46,15 @@ class FakeFile(StringIO):
|
||||
class TestUtils(BaseTestCase):
|
||||
|
||||
def test_byte_to_megabyte(self):
|
||||
self.assertEquals(byte_to_megabyte(0), 0)
|
||||
self.assertEquals(byte_to_megabyte(1048576), 1)
|
||||
self.assertEqual(byte_to_megabyte(0), 0)
|
||||
self.assertEqual(byte_to_megabyte(1048576), 1)
|
||||
|
||||
def test_calculate_free_space(self):
|
||||
dev_info = mock.Mock()
|
||||
dev_info.f_bsize = 1048576
|
||||
dev_info.f_bavail = 2
|
||||
with patch.object(os, 'statvfs', return_value=dev_info) as st_mock:
|
||||
self.assertEquals(calculate_free_space('/tmp/dir/file'), 2)
|
||||
self.assertEqual(calculate_free_space('/tmp/dir/file'), 2)
|
||||
|
||||
st_mock.assert_called_once_with('/tmp/dir')
|
||||
|
||||
@ -63,7 +63,7 @@ class TestUtils(BaseTestCase):
|
||||
file_path = '/tmp/file'
|
||||
|
||||
with mock.patch('__builtin__.open', open_mock):
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
calculate_md5sum(file_path),
|
||||
'199df6f47108545693b5c9cb5344bf13')
|
||||
|
||||
@ -84,4 +84,4 @@ class TestUtils(BaseTestCase):
|
||||
|
||||
file_mock.assert_called_once_with(dst_path, 'wb')
|
||||
url_fake.assert_called_once_with(src_path)
|
||||
self.assertEquals(fake_file.getvalue(), content)
|
||||
self.assertEqual(fake_file.getvalue(), content)
|
||||
|
@ -63,7 +63,7 @@ class TestDockerUpgrader(BaseTestCase):
|
||||
retry_interval=1,
|
||||
retries_count=retries_count)
|
||||
|
||||
self.assertEquals(sleep.call_count, retries_count)
|
||||
self.assertEqual(sleep.call_count, retries_count)
|
||||
self.called_once(self.docker_mock.create_container)
|
||||
|
||||
def test_run_without_errors(self, exec_cmd):
|
||||
@ -127,7 +127,7 @@ class TestDockerUpgrader(BaseTestCase):
|
||||
self.upgrader.clean_docker_iptables_rules = mock.MagicMock()
|
||||
self.docker_mock.containers.return_value = all_images
|
||||
self.upgrader.stop_fuel_containers()
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
self.docker_mock.stop.call_args_list, [((3, 10),), ((4, 10),)])
|
||||
self.upgrader.clean_docker_iptables_rules.assert_called_once_with(
|
||||
ports)
|
||||
@ -140,7 +140,7 @@ class TestDockerUpgrader(BaseTestCase):
|
||||
{'docker_image': 'image2'}]
|
||||
|
||||
self.upgrader.upload_images()
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
exec_mock.call_args_list,
|
||||
[(('docker load < "image1"',),),
|
||||
(('docker load < "image2"',),)])
|
||||
@ -182,10 +182,10 @@ class TestDockerUpgrader(BaseTestCase):
|
||||
'binds': None, 'port_bindings': None,
|
||||
'privileged': False, 'links': []})]
|
||||
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
self.upgrader.create_container.call_args_list,
|
||||
create_container_calls)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
self.upgrader.start_container.call_args_list,
|
||||
start_container_calls)
|
||||
self.called_once(self.upgrader.run_after_container_creation_command)
|
||||
@ -223,7 +223,7 @@ class TestDockerUpgrader(BaseTestCase):
|
||||
'2': [],
|
||||
'3': ['2']}
|
||||
|
||||
self.assertEquals(actual_graph, expected_graph)
|
||||
self.assertEqual(actual_graph, expected_graph)
|
||||
|
||||
def test_get_container_links(self, _):
|
||||
fake_containers = [
|
||||
@ -232,16 +232,16 @@ class TestDockerUpgrader(BaseTestCase):
|
||||
{'id': 'id2', 'container_name': 'container_name2'}]
|
||||
self.upgrader.new_release_containers = fake_containers
|
||||
links = self.upgrader.get_container_links(fake_containers[0])
|
||||
self.assertEquals(links, [('container_name2', 'alias2')])
|
||||
self.assertEqual(links, [('container_name2', 'alias2')])
|
||||
|
||||
def test_get_port_bindings(self, _):
|
||||
port_bindings = {'port_bindings': {'53/udp': ['0.0.0.0', 53]}}
|
||||
bindings = self.upgrader.get_port_bindings(port_bindings)
|
||||
self.assertEquals({'53/udp': ('0.0.0.0', 53)}, bindings)
|
||||
self.assertEqual({'53/udp': ('0.0.0.0', 53)}, bindings)
|
||||
|
||||
def test_get_ports(self, _):
|
||||
ports = self.upgrader.get_ports({'ports': [[53, 'udp'], 100]})
|
||||
self.assertEquals([(53, 'udp'), 100], ports)
|
||||
self.assertEqual([(53, 'udp'), 100], ports)
|
||||
|
||||
def test_generate_configs(self, _):
|
||||
fake_containers = [
|
||||
|
@ -62,7 +62,7 @@ class TestSupervisorClient(BaseTestCase):
|
||||
self.supervisor.generate_config = mock.MagicMock()
|
||||
self.supervisor.generate_configs([1, 2, 3])
|
||||
args = self.supervisor.generate_config.call_args_list
|
||||
self.assertEquals(args, [((1,),), ((2,),), ((3,),)])
|
||||
self.assertEqual(args, [((1,),), ((2,),), ((3,),)])
|
||||
|
||||
def test_generate_config(self, _):
|
||||
config_path = '/config/path'
|
||||
|
@ -105,7 +105,7 @@ class TestUtils(BaseTestCase):
|
||||
urllib2, 'urlopen', return_value=response) as urlopen:
|
||||
|
||||
resp = get_request(url)
|
||||
self.assertEquals(({'key': 'value'}, 200), resp)
|
||||
self.assertEqual(({'key': 'value'}, 200), resp)
|
||||
|
||||
urlopen.assert_called_once_with(url)
|
||||
|
||||
@ -120,7 +120,7 @@ class TestUtils(BaseTestCase):
|
||||
}
|
||||
|
||||
order = topological_sorting(graph)
|
||||
self.assertEquals(order, ['A', 'B', 'C', 'G', 'D', 'E'])
|
||||
self.assertEqual(order, ['A', 'B', 'C', 'G', 'D', 'E'])
|
||||
|
||||
def test_topological_sorting_raises_cycle_dependencies_error(self):
|
||||
graph = {
|
||||
@ -160,7 +160,7 @@ class TestUtils(BaseTestCase):
|
||||
mock_makedirs.called_once(path)
|
||||
|
||||
def test_wait_for_true_does_not_raise_errors(self):
|
||||
self.assertEquals(wait_for_true(lambda: True, timeout=0), True)
|
||||
self.assertEqual(wait_for_true(lambda: True, timeout=0), True)
|
||||
|
||||
def test_wait_for_true_raises_timeout_error(self):
|
||||
self.assertRaisesRegexp(
|
||||
|
@ -140,7 +140,7 @@ class Environment(object):
|
||||
params=jsonutils.dumps(release_data),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.tester.assertEquals(resp.status_code, 201)
|
||||
self.tester.assertEqual(resp.status_code, 201)
|
||||
release = jsonutils.loads(resp.body)
|
||||
self.releases.append(
|
||||
self.db.query(Release).get(release['id'])
|
||||
@ -164,7 +164,7 @@ class Environment(object):
|
||||
params=jsonutils.dumps(release_data),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.tester.assertEquals(resp.status_code, 200)
|
||||
self.tester.assertEqual(resp.status_code, 200)
|
||||
download_task = jsonutils.loads(resp.body)
|
||||
return self.db.query(Task).get(download_task['id'])
|
||||
|
||||
@ -192,7 +192,7 @@ class Environment(object):
|
||||
headers=self.default_headers,
|
||||
expect_errors=True
|
||||
)
|
||||
self.tester.assertEquals(resp.status_code, 201)
|
||||
self.tester.assertEqual(resp.status_code, 201)
|
||||
cluster = jsonutils.loads(resp.body)
|
||||
self.clusters.append(
|
||||
Cluster.get_by_uid(cluster['id'])
|
||||
@ -248,12 +248,12 @@ class Environment(object):
|
||||
headers=self.default_headers,
|
||||
expect_errors=True
|
||||
)
|
||||
self.tester.assertEquals(resp.status_code, expect_http)
|
||||
self.tester.assertEqual(resp.status_code, expect_http)
|
||||
if expect_message:
|
||||
self.tester.assertEquals(resp.body, expect_message)
|
||||
self.tester.assertEqual(resp.body, expect_message)
|
||||
if str(expect_http)[0] != "2":
|
||||
return None
|
||||
self.tester.assertEquals(resp.status_code, expect_http)
|
||||
self.tester.assertEqual(resp.status_code, expect_http)
|
||||
node = jsonutils.loads(resp.body)
|
||||
node_db = Node.get_by_uid(node['id'])
|
||||
if 'interfaces' not in node_data['meta'] \
|
||||
@ -493,7 +493,7 @@ class Environment(object):
|
||||
headers=self.default_headers,
|
||||
expect_errors=True
|
||||
)
|
||||
self.tester.assertEquals(202, resp.status_code)
|
||||
self.tester.assertEqual(202, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
return self.db.query(Task).filter_by(
|
||||
uuid=response['uuid']
|
||||
@ -511,7 +511,7 @@ class Environment(object):
|
||||
kwargs={'cluster_id': self.clusters[0].id}),
|
||||
headers=self.default_headers)
|
||||
|
||||
self.tester.assertEquals(202, resp.status_code)
|
||||
self.tester.assertEqual(202, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
return self.db.query(Task).filter_by(
|
||||
uuid=response['uuid']
|
||||
@ -529,7 +529,7 @@ class Environment(object):
|
||||
kwargs={'cluster_id': self.clusters[0].id}),
|
||||
expect_errors=True,
|
||||
headers=self.default_headers)
|
||||
self.tester.assertEquals(expect_http, resp.status_code)
|
||||
self.tester.assertEqual(expect_http, resp.status_code)
|
||||
if not str(expect_http).startswith("2"):
|
||||
return resp.body
|
||||
response = jsonutils.loads(resp.body)
|
||||
@ -549,7 +549,7 @@ class Environment(object):
|
||||
kwargs={'cluster_id': self.clusters[0].id}),
|
||||
expect_errors=True,
|
||||
headers=self.default_headers)
|
||||
self.tester.assertEquals(resp.status_code, expect_http)
|
||||
self.tester.assertEqual(resp.status_code, expect_http)
|
||||
if not str(expect_http).startswith("2"):
|
||||
return resp.body
|
||||
response = jsonutils.loads(resp.body)
|
||||
@ -584,7 +584,7 @@ class Environment(object):
|
||||
),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.tester.assertEquals(200, resp.status_code)
|
||||
self.tester.assertEqual(200, resp.status_code)
|
||||
nets = resp.body
|
||||
|
||||
resp = self.app.put(
|
||||
@ -594,7 +594,7 @@ class Environment(object):
|
||||
nets,
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.tester.assertEquals(202, resp.status_code)
|
||||
self.tester.assertEqual(202, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
task_uuid = response['uuid']
|
||||
return self.db.query(Task).filter_by(uuid=task_uuid).first()
|
||||
@ -610,7 +610,7 @@ class Environment(object):
|
||||
reverse("NodeNICsHandler",
|
||||
kwargs={"node_id": node_id}),
|
||||
headers=self.default_headers)
|
||||
self.tester.assertEquals(resp.status_code, 200)
|
||||
self.tester.assertEqual(resp.status_code, 200)
|
||||
data = jsonutils.loads(resp.body)
|
||||
|
||||
nics = self.db.query(NodeNICInterface).filter(
|
||||
@ -618,7 +618,7 @@ class Environment(object):
|
||||
).filter(
|
||||
NodeNICInterface.node_id == node_id
|
||||
)
|
||||
self.tester.assertEquals(nics.count(), len(nic_names))
|
||||
self.tester.assertEqual(nics.count(), len(nic_names))
|
||||
|
||||
assigned_nets, slaves = [], []
|
||||
for nic in data:
|
||||
@ -634,7 +634,7 @@ class Environment(object):
|
||||
"assigned_networks": assigned_nets
|
||||
})
|
||||
resp = self.node_nics_put(node_id, data)
|
||||
self.tester.assertEquals(resp.status_code, 200)
|
||||
self.tester.assertEqual(resp.status_code, 200)
|
||||
|
||||
def refresh_nodes(self):
|
||||
for n in self.nodes[:]:
|
||||
@ -662,19 +662,19 @@ class Environment(object):
|
||||
)
|
||||
)
|
||||
time.sleep(1)
|
||||
self.tester.assertEquals(task.progress, 100)
|
||||
self.tester.assertEqual(task.progress, 100)
|
||||
if isinstance(message, type(re.compile("regexp"))):
|
||||
self.tester.assertIsNotNone(re.match(message, task.message))
|
||||
elif isinstance(message, str):
|
||||
self.tester.assertEquals(task.message, message)
|
||||
self.tester.assertEqual(task.message, message)
|
||||
|
||||
def wait_ready(self, task, timeout=60, message=None):
|
||||
self._wait_task(task, timeout, message)
|
||||
self.tester.assertEquals(task.status, 'ready')
|
||||
self.tester.assertEqual(task.status, 'ready')
|
||||
|
||||
def wait_error(self, task, timeout=60, message=None):
|
||||
self._wait_task(task, timeout, message)
|
||||
self.tester.assertEquals(task.status, 'error')
|
||||
self.tester.assertEqual(task.status, 'error')
|
||||
|
||||
def wait_for_nodes_status(self, nodes, status):
|
||||
def check_statuses():
|
||||
|
@ -34,8 +34,8 @@ class TestAttributes(BaseIntegrationTest):
|
||||
headers=self.default_headers
|
||||
)
|
||||
release = objects.Release.get_by_uid(cluster['release_id'])
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEquals(
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.assertEqual(
|
||||
jsonutils.loads(resp.body)['editable'],
|
||||
release.attributes_metadata['editable']
|
||||
)
|
||||
@ -61,7 +61,7 @@ class TestAttributes(BaseIntegrationTest):
|
||||
headers=self.default_headers,
|
||||
expect_errors=True
|
||||
)
|
||||
self.assertEquals(500, resp.status_code)
|
||||
self.assertEqual(500, resp.status_code)
|
||||
|
||||
def test_attributes_update_put(self):
|
||||
cluster_id = self.env.create_cluster(api=True)['id']
|
||||
@ -72,7 +72,7 @@ class TestAttributes(BaseIntegrationTest):
|
||||
kwargs={'cluster_id': cluster_id}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
resp = self.app.put(
|
||||
reverse(
|
||||
'ClusterAttributesHandler',
|
||||
@ -84,9 +84,9 @@ class TestAttributes(BaseIntegrationTest):
|
||||
}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
attrs = objects.Cluster.get_attributes(cluster_db)
|
||||
self.assertEquals("bar", attrs.editable["foo"])
|
||||
self.assertEqual("bar", attrs.editable["foo"])
|
||||
attrs.editable.pop('foo')
|
||||
self.assertEqual(attrs.editable, {})
|
||||
# 400 on generated update
|
||||
@ -102,7 +102,7 @@ class TestAttributes(BaseIntegrationTest):
|
||||
headers=self.default_headers,
|
||||
expect_errors=True
|
||||
)
|
||||
self.assertEquals(400, resp.status_code)
|
||||
self.assertEqual(400, resp.status_code)
|
||||
# 400 if editable is not dict
|
||||
resp = self.app.put(
|
||||
reverse(
|
||||
@ -114,7 +114,7 @@ class TestAttributes(BaseIntegrationTest):
|
||||
headers=self.default_headers,
|
||||
expect_errors=True
|
||||
)
|
||||
self.assertEquals(400, resp.status_code)
|
||||
self.assertEqual(400, resp.status_code)
|
||||
|
||||
def test_attributes_update_patch(self):
|
||||
cluster_id = self.env.create_cluster(api=True)['id']
|
||||
@ -125,7 +125,7 @@ class TestAttributes(BaseIntegrationTest):
|
||||
kwargs={'cluster_id': cluster_id}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
resp = self.app.patch(
|
||||
reverse(
|
||||
'ClusterAttributesHandler',
|
||||
@ -137,9 +137,9 @@ class TestAttributes(BaseIntegrationTest):
|
||||
}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
attrs = objects.Cluster.get_attributes(cluster_db)
|
||||
self.assertEquals("bar", attrs.editable["foo"])
|
||||
self.assertEqual("bar", attrs.editable["foo"])
|
||||
attrs.editable.pop('foo')
|
||||
self.assertNotEqual(attrs.editable, {})
|
||||
|
||||
@ -154,8 +154,8 @@ class TestAttributes(BaseIntegrationTest):
|
||||
kwargs={'cluster_id': cluster['id']}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEquals(
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.assertEqual(
|
||||
jsonutils.loads(resp.body)['editable'],
|
||||
release.attributes_metadata['editable']
|
||||
)
|
||||
@ -176,9 +176,9 @@ class TestAttributes(BaseIntegrationTest):
|
||||
headers=self.default_headers,
|
||||
expect_errors=True
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
attrs = objects.Cluster.get_attributes(cluster_db)
|
||||
self.assertEquals("bar", attrs.editable["foo"])
|
||||
self.assertEqual("bar", attrs.editable["foo"])
|
||||
# Set attributes to defaults.
|
||||
resp = self.app.put(
|
||||
reverse(
|
||||
@ -186,11 +186,11 @@ class TestAttributes(BaseIntegrationTest):
|
||||
kwargs={'cluster_id': cluster['id']}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
release = self.db.query(Release).get(
|
||||
cluster['release_id']
|
||||
)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
jsonutils.loads(resp.body)['editable'],
|
||||
release.attributes_metadata['editable']
|
||||
)
|
||||
@ -209,15 +209,15 @@ class TestAttributes(BaseIntegrationTest):
|
||||
for c, val in group_attrs.iteritems():
|
||||
self.assertIn(c, attrs)
|
||||
if 'value' in val:
|
||||
self.assertEquals(val["value"],
|
||||
attrs[c]["enabled"])
|
||||
self.assertEqual(val["value"],
|
||||
attrs[c]["enabled"])
|
||||
continue
|
||||
else:
|
||||
value = attrs[group][attr]
|
||||
if isinstance(orig_value, dict) and 'value' in orig_value:
|
||||
self.assertEquals(orig_value['value'], value)
|
||||
self.assertEqual(orig_value['value'], value)
|
||||
else:
|
||||
self.assertEquals(orig_value, value)
|
||||
self.assertEqual(orig_value, value)
|
||||
|
||||
def _compare(self, d1, d2):
|
||||
if isinstance(d1, dict) and isinstance(d2, dict):
|
||||
|
@ -31,7 +31,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
resp = self.app.put(
|
||||
reverse('CapacityLogHandler'),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
|
||||
capacity_task = self.db.query(Task).filter_by(
|
||||
name="capacity_log"
|
||||
@ -62,15 +62,15 @@ class TestHandlers(BaseIntegrationTest):
|
||||
for field in report_fields:
|
||||
self.assertTrue(field in report)
|
||||
|
||||
self.assertEquals(report['allocation_stats']['allocated'], 0)
|
||||
self.assertEquals(report['allocation_stats']['unallocated'], 1)
|
||||
self.assertEqual(report['allocation_stats']['allocated'], 0)
|
||||
self.assertEqual(report['allocation_stats']['unallocated'], 1)
|
||||
|
||||
@patch('nailgun.api.v1.handlers.version.settings.VERSION', {
|
||||
'release': '0.1b'})
|
||||
def test_capacity_csv_checksum(self):
|
||||
self._create_capacity_log()
|
||||
resp = self.app.get(reverse('CapacityLogCsvHandler'))
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
|
||||
response_stream = StringIO(resp.body)
|
||||
checksum = md5(''.join(response_stream.readlines()[:-2])).hexdigest()
|
||||
@ -114,19 +114,19 @@ class TestHandlers(BaseIntegrationTest):
|
||||
capacity_log = self._get_capacity_log_json()
|
||||
report = capacity_log['report']
|
||||
|
||||
self.assertEquals(report['allocation_stats']['allocated'], 6)
|
||||
self.assertEquals(report['allocation_stats']['unallocated'], 0)
|
||||
self.assertEqual(report['allocation_stats']['allocated'], 6)
|
||||
self.assertEqual(report['allocation_stats']['unallocated'], 0)
|
||||
|
||||
self.assertEquals(report['roles_stat']['controller'], 2)
|
||||
self.assertEquals(report['roles_stat']['cinder+controller'], 1)
|
||||
self.assertEquals(report['roles_stat']['cinder+compute'], 1)
|
||||
self.assertEquals(report['roles_stat']['compute'], 1)
|
||||
self.assertEquals(report['roles_stat']['cinder'], 1)
|
||||
self.assertEqual(report['roles_stat']['controller'], 2)
|
||||
self.assertEqual(report['roles_stat']['cinder+controller'], 1)
|
||||
self.assertEqual(report['roles_stat']['cinder+compute'], 1)
|
||||
self.assertEqual(report['roles_stat']['compute'], 1)
|
||||
self.assertEqual(report['roles_stat']['cinder'], 1)
|
||||
|
||||
self.assertEquals(len(report['environment_stats']), 1)
|
||||
self.assertEqual(len(report['environment_stats']), 1)
|
||||
test_env = report['environment_stats'][0]
|
||||
self.assertEquals(test_env['cluster'], 'test_name')
|
||||
self.assertEquals(test_env['nodes'], 6)
|
||||
self.assertEqual(test_env['cluster'], 'test_name')
|
||||
self.assertEqual(test_env['nodes'], 6)
|
||||
|
||||
@fake_tasks(godmode=True)
|
||||
def test_capacity_csv_log_with_unicode(self):
|
||||
@ -143,4 +143,4 @@ class TestHandlers(BaseIntegrationTest):
|
||||
|
||||
self._create_capacity_log()
|
||||
resp = self.app.get(reverse('CapacityLogCsvHandler'))
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
|
@ -35,13 +35,13 @@ class TestClusterChanges(BaseIntegrationTest):
|
||||
attributes_changes = self.db.query(ClusterChanges).filter_by(
|
||||
name="attributes"
|
||||
).all()
|
||||
self.assertEquals(len(attributes_changes), 1)
|
||||
self.assertEqual(len(attributes_changes), 1)
|
||||
networks_changes = self.db.query(ClusterChanges).filter_by(
|
||||
name="networks"
|
||||
).all()
|
||||
self.assertEquals(len(networks_changes), 1)
|
||||
self.assertEqual(len(networks_changes), 1)
|
||||
all_changes = self.db.query(ClusterChanges).all()
|
||||
self.assertEquals(len(all_changes), 2)
|
||||
self.assertEqual(len(all_changes), 2)
|
||||
|
||||
def test_node_volumes_modification_adds_pending_changes(self):
|
||||
cluster = self.env.create_cluster(api=True)
|
||||
@ -54,7 +54,7 @@ class TestClusterChanges(BaseIntegrationTest):
|
||||
name="disks",
|
||||
node_id=node_db.id
|
||||
).all()
|
||||
self.assertEquals(len(node_disks_changes), 1)
|
||||
self.assertEqual(len(node_disks_changes), 1)
|
||||
resp = self.app.get(
|
||||
reverse(
|
||||
'ClusterHandler',
|
||||
@ -78,7 +78,7 @@ class TestClusterChanges(BaseIntegrationTest):
|
||||
name="disks",
|
||||
node_id=node_db.id
|
||||
).all()
|
||||
self.assertEquals(len(node_disks_changes), 1)
|
||||
self.assertEqual(len(node_disks_changes), 1)
|
||||
self.app.put(
|
||||
reverse('NodeCollectionHandler'),
|
||||
jsonutils.dumps([{"id": node_db.id, "cluster_id": None}]),
|
||||
@ -89,14 +89,14 @@ class TestClusterChanges(BaseIntegrationTest):
|
||||
name="disks",
|
||||
node_id=node_db.id
|
||||
).all()
|
||||
self.assertEquals(len(node_disks_changes), 0)
|
||||
self.assertEqual(len(node_disks_changes), 0)
|
||||
|
||||
def test_attributes_changing_adds_pending_changes(self):
|
||||
cluster = self.env.create_cluster(api=True)
|
||||
cluster_db = self.env.clusters[0]
|
||||
objects.Cluster.clear_pending_changes(cluster_db)
|
||||
all_changes = self.db.query(ClusterChanges).all()
|
||||
self.assertEquals(len(all_changes), 0)
|
||||
self.assertEqual(len(all_changes), 0)
|
||||
self.app.put(
|
||||
reverse(
|
||||
'ClusterAttributesHandler',
|
||||
@ -111,14 +111,14 @@ class TestClusterChanges(BaseIntegrationTest):
|
||||
pending_changes = self.db.query(ClusterChanges).filter_by(
|
||||
name="attributes"
|
||||
).all()
|
||||
self.assertEquals(len(pending_changes), 1)
|
||||
self.assertEqual(len(pending_changes), 1)
|
||||
|
||||
def test_default_attributes_adds_pending_changes(self):
|
||||
cluster = self.env.create_cluster(api=True)
|
||||
cluster_db = self.env.clusters[0]
|
||||
objects.Cluster.clear_pending_changes(cluster_db)
|
||||
all_changes = self.db.query(ClusterChanges).all()
|
||||
self.assertEquals(len(all_changes), 0)
|
||||
self.assertEqual(len(all_changes), 0)
|
||||
self.app.put(
|
||||
reverse(
|
||||
'ClusterAttributesDefaultsHandler',
|
||||
@ -128,14 +128,14 @@ class TestClusterChanges(BaseIntegrationTest):
|
||||
pending_changes = self.db.query(ClusterChanges).filter_by(
|
||||
name="attributes"
|
||||
).all()
|
||||
self.assertEquals(len(pending_changes), 1)
|
||||
self.assertEqual(len(pending_changes), 1)
|
||||
|
||||
def test_network_changing_adds_pending_changes(self):
|
||||
cluster = self.env.create_cluster(api=True)
|
||||
cluster_db = self.env.clusters[0]
|
||||
objects.Cluster.clear_pending_changes(cluster_db)
|
||||
all_changes = self.db.query(ClusterChanges).all()
|
||||
self.assertEquals(len(all_changes), 0)
|
||||
self.assertEqual(len(all_changes), 0)
|
||||
resp = self.app.get(
|
||||
reverse(
|
||||
'NovaNetworkConfigurationHandler',
|
||||
@ -155,7 +155,7 @@ class TestClusterChanges(BaseIntegrationTest):
|
||||
pending_changes = self.db.query(ClusterChanges).filter_by(
|
||||
name="networks"
|
||||
).all()
|
||||
self.assertEquals(len(pending_changes), 1)
|
||||
self.assertEqual(len(pending_changes), 1)
|
||||
|
||||
@fake_tasks(godmode=True)
|
||||
def test_successful_deployment_drops_all_changes(self):
|
||||
@ -169,7 +169,7 @@ class TestClusterChanges(BaseIntegrationTest):
|
||||
cluster_db = self.db.query(Cluster).get(
|
||||
self.env.clusters[0].id
|
||||
)
|
||||
self.assertEquals(list(cluster_db.changes), [])
|
||||
self.assertEqual(list(cluster_db.changes), [])
|
||||
|
||||
@fake_tasks()
|
||||
def test_failed_deployment_does_nothing_with_changes(self):
|
||||
@ -184,17 +184,17 @@ class TestClusterChanges(BaseIntegrationTest):
|
||||
attributes_changes = self.db.query(ClusterChanges).filter_by(
|
||||
name="attributes"
|
||||
).all()
|
||||
self.assertEquals(len(attributes_changes), 1)
|
||||
self.assertEqual(len(attributes_changes), 1)
|
||||
networks_changes = self.db.query(ClusterChanges).filter_by(
|
||||
name="networks"
|
||||
).all()
|
||||
self.assertEquals(len(networks_changes), 1)
|
||||
self.assertEqual(len(networks_changes), 1)
|
||||
disks_changes = self.db.query(ClusterChanges).filter_by(
|
||||
name="disks"
|
||||
).all()
|
||||
self.assertEquals(len(disks_changes), 1)
|
||||
self.assertEqual(len(disks_changes), 1)
|
||||
all_changes = self.db.query(ClusterChanges).all()
|
||||
self.assertEquals(len(all_changes), 3)
|
||||
self.assertEqual(len(all_changes), 3)
|
||||
|
||||
@fake_tasks(godmode=True)
|
||||
def test_role_unassignment_drops_changes(self):
|
||||
@ -224,4 +224,4 @@ class TestClusterChanges(BaseIntegrationTest):
|
||||
cluster_id=self.env.clusters[0].id,
|
||||
node_id=new_node["id"]
|
||||
).all()
|
||||
self.assertEquals(all_changes, [])
|
||||
self.assertEqual(all_changes, [])
|
||||
|
@ -39,13 +39,13 @@ class TestCharsetIssues(BaseIntegrationTest):
|
||||
]
|
||||
)
|
||||
supertask = self.env.launch_deployment()
|
||||
self.assertEquals(supertask.name, 'deploy')
|
||||
self.assertEqual(supertask.name, 'deploy')
|
||||
self.assertIn(supertask.status, ('running', 'ready'))
|
||||
# we have three subtasks here
|
||||
# deletion
|
||||
# provision
|
||||
# deployment
|
||||
self.assertEquals(len(supertask.subtasks), 3)
|
||||
self.assertEqual(len(supertask.subtasks), 3)
|
||||
|
||||
self.env.wait_for_nodes_status(self.env.nodes, 'provisioning')
|
||||
self.env.wait_ready(supertask, 60)
|
||||
|
@ -330,8 +330,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'nodes': provision_nodes}}}
|
||||
|
||||
args, kwargs = nailgun.task.manager.rpc.cast.call_args
|
||||
self.assertEquals(len(args), 2)
|
||||
self.assertEquals(len(args[1]), 2)
|
||||
self.assertEqual(len(args), 2)
|
||||
self.assertEqual(len(args[1]), 2)
|
||||
|
||||
self.datadiff(args[1][0], provision_msg)
|
||||
self.datadiff(args[1][1], deployment_msg)
|
||||
@ -729,8 +729,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'nodes': provision_nodes}}}
|
||||
|
||||
args, kwargs = nailgun.task.manager.rpc.cast.call_args
|
||||
self.assertEquals(len(args), 2)
|
||||
self.assertEquals(len(args[1]), 2)
|
||||
self.assertEqual(len(args), 2)
|
||||
self.assertEqual(len(args[1]), 2)
|
||||
|
||||
self.datadiff(args[1][0], provision_msg)
|
||||
self.datadiff(args[1][1], deployment_msg)
|
||||
@ -766,8 +766,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||
# remove_nodes method call [0][0][1]
|
||||
n_rpc_remove = nailgun.task.task.rpc.cast. \
|
||||
call_args_list[0][0][1]['args']['nodes']
|
||||
self.assertEquals(len(n_rpc_remove), 1)
|
||||
self.assertEquals(n_rpc_remove[0]['uid'], self.env.nodes[1].id)
|
||||
self.assertEqual(len(n_rpc_remove), 1)
|
||||
self.assertEqual(n_rpc_remove[0]['uid'], self.env.nodes[1].id)
|
||||
|
||||
# provision method call [1][0][1][0]
|
||||
n_rpc_provision = nailgun.task.manager.rpc.cast. \
|
||||
@ -778,8 +778,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||
# 'status' = 'error' and 'error_type' = 'provision'
|
||||
# So, only one node from our list will be appended to
|
||||
# provision list.
|
||||
self.assertEquals(len(n_rpc_provision), 1)
|
||||
self.assertEquals(
|
||||
self.assertEqual(len(n_rpc_provision), 1)
|
||||
self.assertEqual(
|
||||
n_rpc_provision[0]['name'],
|
||||
TaskHelper.make_slave_name(self.env.nodes[0].id)
|
||||
)
|
||||
@ -787,8 +787,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||
# deploy method call [1][0][1][1]
|
||||
n_rpc_deploy = nailgun.task.manager.rpc.cast.call_args_list[
|
||||
1][0][1][1]['args']['deployment_info']
|
||||
self.assertEquals(len(n_rpc_deploy), 1)
|
||||
self.assertEquals(n_rpc_deploy[0]['uid'], str(self.env.nodes[0].id))
|
||||
self.assertEqual(len(n_rpc_deploy), 1)
|
||||
self.assertEqual(n_rpc_deploy[0]['uid'], str(self.env.nodes[0].id))
|
||||
|
||||
@fake_tasks(fake_rpc=False, mock_rpc=False)
|
||||
@patch('nailgun.rpc.cast')
|
||||
@ -812,23 +812,23 @@ class TestHandlers(BaseIntegrationTest):
|
||||
['172.16.0.20', '172.16.0.22']]})
|
||||
|
||||
resp = self.env.neutron_networks_put(self.env.clusters[0].id, net_data)
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
task = jsonutils.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'ready')
|
||||
self.assertEqual(task['status'], 'ready')
|
||||
|
||||
self.env.launch_deployment()
|
||||
|
||||
args, kwargs = nailgun.task.manager.rpc.cast.call_args
|
||||
self.assertEquals(len(args), 2)
|
||||
self.assertEquals(len(args[1]), 2)
|
||||
self.assertEqual(len(args), 2)
|
||||
self.assertEqual(len(args[1]), 2)
|
||||
|
||||
n_rpc_deploy = args[1][1]['args']['deployment_info']
|
||||
self.assertEquals(len(n_rpc_deploy), 5)
|
||||
self.assertEqual(len(n_rpc_deploy), 5)
|
||||
pub_ips = ['172.16.0.11', '172.16.0.12', '172.16.0.13',
|
||||
'172.16.0.20', '172.16.0.21']
|
||||
for n in n_rpc_deploy:
|
||||
for i, n_common_args in enumerate(n['nodes']):
|
||||
self.assertEquals(n_common_args['public_address'], pub_ips[i])
|
||||
self.assertEqual(n_common_args['public_address'], pub_ips[i])
|
||||
|
||||
@fake_tasks(fake_rpc=False, mock_rpc=False)
|
||||
@patch('nailgun.rpc.cast')
|
||||
@ -852,24 +852,24 @@ class TestHandlers(BaseIntegrationTest):
|
||||
['172.16.0.20', '172.16.0.22']]})
|
||||
|
||||
resp = self.env.neutron_networks_put(self.env.clusters[0].id, net_data)
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
task = jsonutils.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'ready')
|
||||
self.assertEqual(task['status'], 'ready')
|
||||
|
||||
self.env.launch_deployment()
|
||||
|
||||
args, kwargs = nailgun.task.manager.rpc.cast.call_args
|
||||
self.assertEquals(len(args), 2)
|
||||
self.assertEquals(len(args[1]), 2)
|
||||
self.assertEqual(len(args), 2)
|
||||
self.assertEqual(len(args[1]), 2)
|
||||
|
||||
n_rpc_deploy = args[1][1]['args']['deployment_info']
|
||||
self.assertEquals(len(n_rpc_deploy), 5)
|
||||
self.assertEqual(len(n_rpc_deploy), 5)
|
||||
pub_ips = ['172.16.0.11', '172.16.0.12',
|
||||
'172.16.0.20', '172.16.0.21', '172.16.0.22']
|
||||
for n in n_rpc_deploy:
|
||||
self.assertEquals(n['public_vip'], '172.16.0.10')
|
||||
self.assertEqual(n['public_vip'], '172.16.0.10')
|
||||
for i, n_common_args in enumerate(n['nodes']):
|
||||
self.assertEquals(n_common_args['public_address'], pub_ips[i])
|
||||
self.assertEqual(n_common_args['public_address'], pub_ips[i])
|
||||
|
||||
@fake_tasks(fake_rpc=False, mock_rpc=False)
|
||||
@patch('nailgun.rpc.cast')
|
||||
@ -892,22 +892,22 @@ class TestHandlers(BaseIntegrationTest):
|
||||
[['172.16.10.130', '172.16.10.254']]
|
||||
|
||||
resp = self.env.neutron_networks_put(self.env.clusters[0].id, net_data)
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
task = jsonutils.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'ready')
|
||||
self.assertEqual(task['status'], 'ready')
|
||||
|
||||
self.env.launch_deployment()
|
||||
|
||||
args, kwargs = nailgun.task.manager.rpc.cast.call_args
|
||||
self.assertEquals(len(args), 2)
|
||||
self.assertEquals(len(args[1]), 2)
|
||||
self.assertEqual(len(args), 2)
|
||||
self.assertEqual(len(args[1]), 2)
|
||||
|
||||
n_rpc_deploy = args[1][1]['args']['deployment_info']
|
||||
self.assertEquals(len(n_rpc_deploy), 2)
|
||||
self.assertEqual(len(n_rpc_deploy), 2)
|
||||
pub_ips = ['172.16.10.11', '172.16.10.12']
|
||||
for n in n_rpc_deploy:
|
||||
for i, n_common_args in enumerate(n['nodes']):
|
||||
self.assertEquals(n_common_args['public_address'], pub_ips[i])
|
||||
self.assertEqual(n_common_args['public_address'], pub_ips[i])
|
||||
|
||||
@fake_tasks(fake_rpc=False, mock_rpc=False)
|
||||
@patch('nailgun.rpc.cast')
|
||||
@ -927,14 +927,14 @@ class TestHandlers(BaseIntegrationTest):
|
||||
pub.update({'ip_ranges': [['172.16.0.10', '172.16.0.11']]})
|
||||
|
||||
resp = self.env.neutron_networks_put(self.env.clusters[0].id, net_data)
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
task = jsonutils.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'ready')
|
||||
self.assertEqual(task['status'], 'ready')
|
||||
|
||||
task = self.env.launch_deployment()
|
||||
|
||||
self.assertEquals(task.status, 'error')
|
||||
self.assertEquals(
|
||||
self.assertEqual(task.status, 'error')
|
||||
self.assertEqual(
|
||||
task.message,
|
||||
'Not enough IP addresses. Public network must have at least '
|
||||
'3 IP addresses for the current environment.')
|
||||
@ -971,8 +971,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||
|
||||
task = self.env.launch_deployment()
|
||||
|
||||
self.assertEquals(task.status, 'error')
|
||||
self.assertEquals(
|
||||
self.assertEqual(task.status, 'error')
|
||||
self.assertEqual(
|
||||
task.message,
|
||||
'Not enough IP addresses. Public network must have at least '
|
||||
'3 IP addresses for the current environment.')
|
||||
@ -995,8 +995,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||
|
||||
task = self.env.launch_deployment()
|
||||
|
||||
self.assertEquals(task.status, 'error')
|
||||
self.assertEquals(
|
||||
self.assertEqual(task.status, 'error')
|
||||
self.assertEqual(
|
||||
task.message,
|
||||
"Node '%s' has insufficient disk space" %
|
||||
node_db.human_readable_name)
|
||||
@ -1012,8 +1012,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||
|
||||
task = self.env.launch_deployment()
|
||||
|
||||
self.assertEquals(task.status, 'error')
|
||||
self.assertEquals(
|
||||
self.assertEqual(task.status, 'error')
|
||||
self.assertEqual(
|
||||
task.message,
|
||||
"Not enough controllers, multinode mode requires at least 1 "
|
||||
"controller")
|
||||
@ -1025,8 +1025,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||
|
||||
task = self.env.launch_deployment()
|
||||
|
||||
self.assertEquals(task.status, 'error')
|
||||
self.assertEquals(
|
||||
self.assertEqual(task.status, 'error')
|
||||
self.assertEqual(
|
||||
task.message,
|
||||
'Not enough controllers, ha_compact '
|
||||
'mode requires at least 1 controller')
|
||||
@ -1049,8 +1049,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||
|
||||
task = self.env.launch_deployment()
|
||||
|
||||
self.assertEquals(task.status, 'error')
|
||||
self.assertEquals(
|
||||
self.assertEqual(task.status, 'error')
|
||||
self.assertEqual(
|
||||
task.message,
|
||||
'Number of OSD nodes (1) cannot be less than '
|
||||
'the Ceph object replication factor (3). '
|
||||
|
@ -41,9 +41,9 @@ class TestHandlers(BaseIntegrationTest):
|
||||
reverse('ClusterCollectionHandler'),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals([], response)
|
||||
self.assertEqual([], response)
|
||||
|
||||
def test_cluster_create(self):
|
||||
release_id = self.env.create_release(api=False).id
|
||||
@ -55,7 +55,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(201, resp.status_code)
|
||||
self.assertEqual(201, resp.status_code)
|
||||
|
||||
def test_cluster_create_no_ip_addresses(self):
|
||||
"""In this test we check that no error is occured
|
||||
@ -100,7 +100,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
del net1[f]
|
||||
del net2[f]
|
||||
|
||||
self.assertEquals(cluster1_nets, cluster2_nets)
|
||||
self.assertEqual(cluster1_nets, cluster2_nets)
|
||||
|
||||
def test_cluster_creation_same_networks(self):
|
||||
cluster1_id = self.env.create_cluster(api=True)["id"]
|
||||
@ -116,7 +116,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
cluster1_nets = sorted(cluster1_nets, key=lambda n: n['vlan_start'])
|
||||
cluster2_nets = sorted(cluster2_nets, key=lambda n: n['vlan_start'])
|
||||
|
||||
self.assertEquals(cluster1_nets, cluster2_nets)
|
||||
self.assertEqual(cluster1_nets, cluster2_nets)
|
||||
|
||||
def test_if_cluster_creates_correct_networks(self):
|
||||
release = Release()
|
||||
@ -147,7 +147,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(201, resp.status_code)
|
||||
self.assertEqual(201, resp.status_code)
|
||||
nets = self.db.query(NetworkGroup).filter(
|
||||
not_(NetworkGroup.name == "fuelweb_admin")
|
||||
).all()
|
||||
@ -198,6 +198,6 @@ class TestHandlers(BaseIntegrationTest):
|
||||
nets = jsonutils.loads(self.env.nova_networks_get(cluster['id']).body)
|
||||
|
||||
resp = self.env.nova_networks_put(cluster['id'], nets)
|
||||
self.assertEquals(202, resp.status_code)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
task = jsonutils.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'ready')
|
||||
self.assertEqual(task['status'], 'ready')
|
||||
|
@ -41,11 +41,11 @@ class TestHandlers(BaseIntegrationTest):
|
||||
reverse('ClusterHandler', kwargs={'obj_id': cluster.id}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals(cluster.id, response['id'])
|
||||
self.assertEquals(cluster.name, response['name'])
|
||||
self.assertEquals(cluster.release.id, response['release_id'])
|
||||
self.assertEqual(cluster.id, response['id'])
|
||||
self.assertEqual(cluster.name, response['name'])
|
||||
self.assertEqual(cluster.release.id, response['release_id'])
|
||||
|
||||
def test_cluster_creation(self):
|
||||
release = self.env.create_release(api=False)
|
||||
@ -58,10 +58,10 @@ class TestHandlers(BaseIntegrationTest):
|
||||
}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(201, resp.status_code)
|
||||
self.assertEqual(201, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals(yet_another_cluster_name, response['name'])
|
||||
self.assertEquals(release.id, response['release_id'])
|
||||
self.assertEqual(yet_another_cluster_name, response['name'])
|
||||
self.assertEqual(release.id, response['release_id'])
|
||||
|
||||
def test_cluster_update(self):
|
||||
updated_name = u'Updated cluster'
|
||||
@ -75,27 +75,27 @@ class TestHandlers(BaseIntegrationTest):
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.db.refresh(cluster)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
clusters = self.db.query(Cluster).filter(
|
||||
Cluster.name == updated_name
|
||||
).all()
|
||||
self.assertEquals(len(clusters), 1)
|
||||
self.assertEquals(clusters[0].name, updated_name)
|
||||
self.assertEqual(len(clusters), 1)
|
||||
self.assertEqual(clusters[0].name, updated_name)
|
||||
|
||||
clusters_after = len(self.db.query(Cluster).all())
|
||||
self.assertEquals(clusters_before, clusters_after)
|
||||
self.assertEqual(clusters_before, clusters_after)
|
||||
|
||||
def test_cluster_update_fails_on_net_provider_change(self):
|
||||
cluster = self.env.create_cluster(api=False)
|
||||
self.assertEquals(cluster.net_provider, "nova_network")
|
||||
self.assertEqual(cluster.net_provider, "nova_network")
|
||||
resp = self.app.put(
|
||||
reverse('ClusterHandler', kwargs={'obj_id': cluster.id}),
|
||||
jsonutils.dumps({'net_provider': 'neutron'}),
|
||||
headers=self.default_headers,
|
||||
expect_errors=True
|
||||
)
|
||||
self.assertEquals(resp.status_code, 400)
|
||||
self.assertEquals(
|
||||
self.assertEqual(resp.status_code, 400)
|
||||
self.assertEqual(
|
||||
resp.body,
|
||||
"Changing 'net_provider' for environment is prohibited"
|
||||
)
|
||||
@ -110,29 +110,29 @@ class TestHandlers(BaseIntegrationTest):
|
||||
headers=self.default_headers,
|
||||
expect_errors=True
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
nodes = self.db.query(Node).filter(Node.cluster == cluster).all()
|
||||
self.assertEquals(1, len(nodes))
|
||||
self.assertEquals(nodes[0].id, node1.id)
|
||||
self.assertEqual(1, len(nodes))
|
||||
self.assertEqual(nodes[0].id, node1.id)
|
||||
|
||||
resp = self.app.put(
|
||||
reverse('ClusterHandler', kwargs={'obj_id': cluster.id}),
|
||||
jsonutils.dumps({'nodes': [node2.id]}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
nodes = self.db.query(Node).filter(Node.cluster == cluster)
|
||||
self.assertEquals(1, nodes.count())
|
||||
self.assertEqual(1, nodes.count())
|
||||
|
||||
def test_empty_cluster_deletion(self):
|
||||
cluster = self.env.create_cluster(api=True)
|
||||
resp = self.delete(cluster['id'])
|
||||
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEquals(self.db.query(Node).count(), 0)
|
||||
self.assertEquals(self.db.query(Cluster).count(), 0)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
self.assertEqual(self.db.query(Node).count(), 0)
|
||||
self.assertEqual(self.db.query(Cluster).count(), 0)
|
||||
|
||||
@fake_tasks()
|
||||
def test_cluster_deletion(self):
|
||||
@ -143,7 +143,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
{"status": "ready"}])
|
||||
|
||||
resp = self.delete(self.env.clusters[0].id)
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
|
||||
def cluster_is_empty():
|
||||
return self.db.query(Cluster).count() == 0
|
||||
@ -152,10 +152,10 @@ class TestHandlers(BaseIntegrationTest):
|
||||
self._wait_for_threads()
|
||||
|
||||
# Nodes should be in discover status
|
||||
self.assertEquals(self.db.query(Node).count(), 2)
|
||||
self.assertEqual(self.db.query(Node).count(), 2)
|
||||
for node in self.db.query(Node):
|
||||
self.assertEquals(node.status, 'discover')
|
||||
self.assertEquals(node.cluster_id, None)
|
||||
self.assertEqual(node.status, 'discover')
|
||||
self.assertEqual(node.cluster_id, None)
|
||||
|
||||
@fake_tasks()
|
||||
def test_cluster_deletion_with_offline_nodes(self):
|
||||
@ -166,7 +166,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
{'online': False, 'status': 'ready'}])
|
||||
|
||||
resp = self.delete(self.env.clusters[0].id)
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
|
||||
def cluster_is_empty_and_in_db_one_node():
|
||||
return self.db.query(Cluster).count() == 0 and \
|
||||
@ -176,8 +176,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||
self._wait_for_threads()
|
||||
|
||||
node = self.db.query(Node).first()
|
||||
self.assertEquals(node.status, 'discover')
|
||||
self.assertEquals(node.cluster_id, None)
|
||||
self.assertEqual(node.status, 'discover')
|
||||
self.assertEqual(node.cluster_id, None)
|
||||
|
||||
def test_cluster_deletion_delete_networks(self):
|
||||
cluster = self.env.create_cluster(api=True)
|
||||
@ -228,6 +228,6 @@ class TestHandlers(BaseIntegrationTest):
|
||||
kwargs={'cluster_id': cluster.id}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(get_resp.status_code, 200)
|
||||
self.assertEqual(get_resp.status_code, 200)
|
||||
self.datadiff(jsonutils.loads(get_resp.body),
|
||||
cluster.attributes.generated)
|
||||
|
@ -38,10 +38,10 @@ class TestClusterScaling(BaseIntegrationTest):
|
||||
{'roles': ['controller'], 'pending_addition': True}])
|
||||
|
||||
supertask = self.env.launch_deployment()
|
||||
self.assertEquals(supertask.name, 'deploy')
|
||||
self.assertEqual(supertask.name, 'deploy')
|
||||
|
||||
self.env.wait_ready(supertask)
|
||||
self.assertEquals(supertask.status, 'ready')
|
||||
self.assertEqual(supertask.status, 'ready')
|
||||
|
||||
@fake_tasks()
|
||||
def test_deploy_grow_controllers(self):
|
||||
@ -53,20 +53,20 @@ class TestClusterScaling(BaseIntegrationTest):
|
||||
|
||||
# We have to build 2 new controllers
|
||||
n_nodes = TaskHelper.nodes_to_provision(cluster)
|
||||
self.assertEquals(len(n_nodes), 2)
|
||||
self.assertEqual(len(n_nodes), 2)
|
||||
|
||||
# All controllers must re-deploy (run puppet)
|
||||
r_nodes = TaskHelper.nodes_to_deploy(cluster)
|
||||
self.assertEquals(len(r_nodes), 3)
|
||||
self.assertEqual(len(r_nodes), 3)
|
||||
|
||||
supertask = self.env.launch_deployment()
|
||||
self.assertEquals(supertask.name, 'deploy')
|
||||
self.assertEqual(supertask.name, 'deploy')
|
||||
|
||||
self.env.wait_ready(supertask)
|
||||
self.assertEquals(supertask.status, 'ready')
|
||||
self.assertEqual(supertask.status, 'ready')
|
||||
|
||||
controllers = self.filter_by_role(cluster.nodes, 'controller')
|
||||
self.assertEquals(len(controllers), 3)
|
||||
self.assertEqual(len(controllers), 3)
|
||||
|
||||
@fake_tasks()
|
||||
def test_deploy_shrink_controllers(self):
|
||||
@ -78,13 +78,13 @@ class TestClusterScaling(BaseIntegrationTest):
|
||||
|
||||
# Check that we are deleting 2 controllers
|
||||
d_nodes = TaskHelper.nodes_to_delete(cluster)
|
||||
self.assertEquals(len(d_nodes), 2)
|
||||
self.assertEqual(len(d_nodes), 2)
|
||||
|
||||
supertask = self.env.launch_deployment()
|
||||
self.assertEquals(supertask.name, 'deploy')
|
||||
self.assertEqual(supertask.name, 'deploy')
|
||||
|
||||
self.env.wait_ready(supertask)
|
||||
self.assertEquals(supertask.status, 'ready')
|
||||
self.assertEqual(supertask.status, 'ready')
|
||||
|
||||
controllers = self.filter_by_role(cluster.nodes, 'controller')
|
||||
self.assertEquals(len(controllers), 1)
|
||||
self.assertEqual(len(controllers), 1)
|
||||
|
@ -58,4 +58,4 @@ class TestDBRefresh(TestCase):
|
||||
self.db.query(Node).filter(
|
||||
Node.id == node.id
|
||||
).first()
|
||||
self.assertEquals(node.mac, u"12345678")
|
||||
self.assertEqual(node.mac, u"12345678")
|
||||
|
@ -56,7 +56,7 @@ class TestErrors(BaseIntegrationTest):
|
||||
sum(map(n_error, self.env.nodes)),
|
||||
1
|
||||
)
|
||||
self.assertEquals(supertask.cluster.status, 'error')
|
||||
self.assertEqual(supertask.cluster.status, 'error')
|
||||
|
||||
@fake_tasks(error="provisioning", error_msg="Terrible error")
|
||||
def test_deployment_error_from_orchestrator(self):
|
||||
@ -94,7 +94,7 @@ class TestErrors(BaseIntegrationTest):
|
||||
sum(map(n_error, self.env.nodes)),
|
||||
[1, 2]
|
||||
)
|
||||
self.assertEquals(supertask.cluster.status, 'error')
|
||||
self.assertEqual(supertask.cluster.status, 'error')
|
||||
|
||||
@fake_tasks(error="deployment")
|
||||
def test_deployment_error_during_deployment(self):
|
||||
@ -114,7 +114,7 @@ class TestErrors(BaseIntegrationTest):
|
||||
n_error = lambda n: (n.status, n.error_type) == ('error', 'deploy')
|
||||
|
||||
self.assertEqual(len(map(n_error, self.env.nodes)), 2)
|
||||
self.assertEquals(supertask.cluster.status, 'error')
|
||||
self.assertEqual(supertask.cluster.status, 'error')
|
||||
|
||||
@fake_tasks(error="deployment", task_ready=True)
|
||||
def test_task_ready_node_error(self):
|
||||
|
@ -47,9 +47,9 @@ class TestHorizonURL(BaseIntegrationTest):
|
||||
network=network.id,
|
||||
node=None
|
||||
).all()
|
||||
self.assertEquals(len(lost_ips), 1)
|
||||
self.assertEqual(len(lost_ips), 1)
|
||||
|
||||
self.assertEquals(supertask.message, (
|
||||
self.assertEqual(supertask.message, (
|
||||
u"Deployment of environment '{0}' is done. "
|
||||
"Access the OpenStack dashboard (Horizon) at http://{1}/"
|
||||
).format(
|
||||
|
@ -37,8 +37,8 @@ class TestNovaNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
||||
data = jsonutils.loads(response.body)
|
||||
cluster = self.db.query(Cluster).get(self.cluster.id)
|
||||
|
||||
self.assertEquals(data['networking_parameters']['net_manager'],
|
||||
self.cluster.network_config.net_manager)
|
||||
self.assertEqual(data['networking_parameters']['net_manager'],
|
||||
self.cluster.network_config.net_manager)
|
||||
for network_group in cluster.network_groups:
|
||||
network = [i for i in data['networks']
|
||||
if i['id'] == network_group.id][0]
|
||||
@ -51,16 +51,16 @@ class TestNovaNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
||||
'id']
|
||||
|
||||
for key in keys:
|
||||
self.assertEquals(network[key], getattr(network_group, key))
|
||||
self.assertEqual(network[key], getattr(network_group, key))
|
||||
|
||||
def test_not_found_cluster(self):
|
||||
resp = self.env.nova_networks_get(self.cluster.id + 999,
|
||||
expect_errors=True)
|
||||
self.assertEquals(404, resp.status_code)
|
||||
self.assertEqual(404, resp.status_code)
|
||||
|
||||
def test_change_net_manager(self):
|
||||
self.assertEquals(self.cluster.network_config.net_manager,
|
||||
'FlatDHCPManager')
|
||||
self.assertEqual(self.cluster.network_config.net_manager,
|
||||
'FlatDHCPManager')
|
||||
|
||||
new_net_manager = {
|
||||
'networking_parameters': {'net_manager': 'VlanManager'}
|
||||
@ -68,7 +68,7 @@ class TestNovaNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
||||
self.env.nova_networks_put(self.cluster.id, new_net_manager)
|
||||
|
||||
self.db.refresh(self.cluster)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
self.cluster.network_config.net_manager,
|
||||
new_net_manager['networking_parameters']['net_manager'])
|
||||
|
||||
@ -84,7 +84,7 @@ class TestNovaNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
||||
self.env.nova_networks_put(self.cluster.id, new_dns_nameservers)
|
||||
|
||||
self.db.refresh(self.cluster)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
self.cluster.network_config.dns_nameservers,
|
||||
new_dns_nameservers['networking_parameters']['dns_nameservers']
|
||||
)
|
||||
@ -99,14 +99,14 @@ class TestNovaNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
||||
mgmt['cidr'] = cidr
|
||||
|
||||
resp = self.env.nova_networks_put(self.cluster.id, data)
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
task = jsonutils.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'ready')
|
||||
self.assertEqual(task['status'], 'ready')
|
||||
|
||||
self.db.refresh(self.cluster)
|
||||
mgmt_ng = [ng for ng in self.cluster.network_groups
|
||||
if ng.name == 'management'][0]
|
||||
self.assertEquals(mgmt_ng.cidr, cidr)
|
||||
self.assertEqual(mgmt_ng.cidr, cidr)
|
||||
|
||||
def test_wrong_net_provider(self):
|
||||
resp = self.app.put(
|
||||
@ -117,8 +117,8 @@ class TestNovaNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
||||
headers=self.default_headers,
|
||||
expect_errors=True
|
||||
)
|
||||
self.assertEquals(resp.status_code, 400)
|
||||
self.assertEquals(
|
||||
self.assertEqual(resp.status_code, 400)
|
||||
self.assertEqual(
|
||||
resp.body,
|
||||
u"Wrong net provider - environment uses 'nova_network'"
|
||||
)
|
||||
@ -132,7 +132,7 @@ class TestNovaNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
||||
expect_errors=True)
|
||||
|
||||
self.db.refresh(self.cluster)
|
||||
self.assertNotEquals(
|
||||
self.assertNotEqual(
|
||||
self.cluster.network_config.net_manager,
|
||||
new_net_manager['networking_parameters']['net_manager'])
|
||||
|
||||
@ -146,9 +146,9 @@ class TestNovaNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
||||
'vlan_start': new_vlan_id}]}
|
||||
|
||||
resp = self.env.nova_networks_put(self.cluster.id, new_nets)
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
self.db.refresh(network)
|
||||
self.assertEquals(network.vlan_start, 500)
|
||||
self.assertEqual(network.vlan_start, 500)
|
||||
|
||||
def test_update_networks_and_net_manager(self):
|
||||
network = self.db.query(NetworkGroup).filter(
|
||||
@ -161,10 +161,10 @@ class TestNovaNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
||||
|
||||
self.db.refresh(self.cluster)
|
||||
self.db.refresh(network)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
self.cluster.network_config.net_manager,
|
||||
new_net['networking_parameters']['net_manager'])
|
||||
self.assertEquals(network.vlan_start, new_vlan_id)
|
||||
self.assertEqual(network.vlan_start, new_vlan_id)
|
||||
|
||||
def test_networks_update_fails_with_wrong_net_id(self):
|
||||
new_nets = {'networks': [{'id': 500,
|
||||
@ -172,10 +172,10 @@ class TestNovaNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
||||
|
||||
resp = self.env.nova_networks_put(self.cluster.id, new_nets,
|
||||
expect_errors=True)
|
||||
self.assertEquals(202, resp.status_code)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
task = jsonutils.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
self.assertEquals(
|
||||
self.assertEqual(task['status'], 'error')
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
'Invalid network ID: 500'
|
||||
)
|
||||
@ -191,7 +191,7 @@ class TestNovaNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
||||
|
||||
def test_mgmt_storage_networks_have_no_gateway(self):
|
||||
resp = self.env.nova_networks_get(self.cluster.id)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
data = jsonutils.loads(resp.body)
|
||||
for net in data['networks']:
|
||||
if net['name'] in ['management', 'storage']:
|
||||
@ -217,7 +217,7 @@ class TestNovaNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
||||
data = jsonutils.loads(resp.body)
|
||||
mgmt = filter(lambda n: n['name'] == 'management',
|
||||
data['networks'])[0]
|
||||
self.assertEquals(mgmt['gateway'], '192.168.0.1')
|
||||
self.assertEqual(mgmt['gateway'], '192.168.0.1')
|
||||
strg = filter(lambda n: n['name'] == 'storage',
|
||||
data['networks'])[0]
|
||||
self.assertIsNone(strg['gateway'])
|
||||
@ -227,7 +227,7 @@ class TestNovaNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
||||
mgmt = filter(lambda n: n['name'] == 'management',
|
||||
net_meta['nova_network']['networks'])[0]
|
||||
mgmt['gateway'] = '192.168.0.1'
|
||||
self.assertEquals(mgmt['use_gateway'], False)
|
||||
self.assertEqual(mgmt['use_gateway'], False)
|
||||
|
||||
def get_new_networks_metadata():
|
||||
return net_meta
|
||||
@ -260,8 +260,8 @@ class TestNeutronNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
||||
data = jsonutils.loads(response.body)
|
||||
cluster = self.db.query(Cluster).get(self.cluster.id)
|
||||
|
||||
self.assertEquals(data['networking_parameters']['segmentation_type'],
|
||||
self.cluster.network_config.segmentation_type)
|
||||
self.assertEqual(data['networking_parameters']['segmentation_type'],
|
||||
self.cluster.network_config.segmentation_type)
|
||||
for network_group in cluster.network_groups:
|
||||
network = [i for i in data['networks']
|
||||
if i['id'] == network_group.id][0]
|
||||
@ -274,7 +274,7 @@ class TestNeutronNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
||||
'id']
|
||||
|
||||
for key in keys:
|
||||
self.assertEquals(network[key], getattr(network_group, key))
|
||||
self.assertEqual(network[key], getattr(network_group, key))
|
||||
|
||||
def test_get_request_should_return_vips(self):
|
||||
response = self.env.neutron_networks_get(self.cluster.id)
|
||||
@ -286,7 +286,7 @@ class TestNeutronNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
||||
def test_not_found_cluster(self):
|
||||
resp = self.env.neutron_networks_get(self.cluster.id + 999,
|
||||
expect_errors=True)
|
||||
self.assertEquals(404, resp.status_code)
|
||||
self.assertEqual(404, resp.status_code)
|
||||
|
||||
def test_refresh_mask_on_cidr_change(self):
|
||||
response = self.env.neutron_networks_get(self.cluster.id)
|
||||
@ -298,14 +298,14 @@ class TestNeutronNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
||||
mgmt['cidr'] = cidr
|
||||
|
||||
resp = self.env.neutron_networks_put(self.cluster.id, data)
|
||||
self.assertEquals(202, resp.status_code)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
task = jsonutils.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'ready')
|
||||
self.assertEqual(task['status'], 'ready')
|
||||
|
||||
self.db.refresh(self.cluster)
|
||||
mgmt_ng = [ng for ng in self.cluster.network_groups
|
||||
if ng.name == 'management'][0]
|
||||
self.assertEquals(mgmt_ng.cidr, cidr)
|
||||
self.assertEqual(mgmt_ng.cidr, cidr)
|
||||
|
||||
def test_do_not_update_net_segmentation_type(self):
|
||||
resp = self.env.neutron_networks_get(self.cluster.id)
|
||||
@ -314,10 +314,10 @@ class TestNeutronNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
||||
|
||||
resp = self.env.neutron_networks_put(self.cluster.id, data,
|
||||
expect_errors=True)
|
||||
self.assertEquals(202, resp.status_code)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
task = jsonutils.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
self.assertEquals(
|
||||
self.assertEqual(task['status'], 'error')
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"Change of 'segmentation_type' is prohibited"
|
||||
)
|
||||
@ -331,10 +331,10 @@ class TestNeutronNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
||||
data['networks'][0]['vlan_start'] = 500 # non-used vlan id
|
||||
|
||||
resp = self.env.neutron_networks_put(self.cluster.id, data)
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
|
||||
self.db.refresh(network)
|
||||
self.assertEquals(network.vlan_start, 500)
|
||||
self.assertEqual(network.vlan_start, 500)
|
||||
|
||||
def test_update_networks_fails_if_change_net_segmentation_type(self):
|
||||
resp = self.env.neutron_networks_get(self.cluster.id)
|
||||
@ -347,10 +347,10 @@ class TestNeutronNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
||||
|
||||
resp = self.env.neutron_networks_put(self.cluster.id, data,
|
||||
expect_errors=True)
|
||||
self.assertEquals(202, resp.status_code)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
task = jsonutils.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
self.assertEquals(
|
||||
self.assertEqual(task['status'], 'error')
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"Change of 'segmentation_type' is prohibited"
|
||||
)
|
||||
@ -362,10 +362,10 @@ class TestNeutronNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
||||
|
||||
resp = self.env.neutron_networks_put(self.cluster.id, new_nets,
|
||||
expect_errors=True)
|
||||
self.assertEquals(202, resp.status_code)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
task = jsonutils.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
self.assertEquals(
|
||||
self.assertEqual(task['status'], 'error')
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
'Invalid network ID: 500'
|
||||
)
|
||||
@ -374,7 +374,7 @@ class TestNeutronNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
||||
data = jsonutils.loads(self.env.neutron_networks_get(
|
||||
self.cluster.id).body)
|
||||
publ = filter(lambda ng: ng['name'] == 'public', data['networks'])[0]
|
||||
self.assertEquals(publ['cidr'], '172.16.0.0/24')
|
||||
self.assertEqual(publ['cidr'], '172.16.0.0/24')
|
||||
|
||||
publ['cidr'] = '199.61.0.0/24'
|
||||
publ['gateway'] = '199.61.0.1'
|
||||
@ -384,14 +384,14 @@ class TestNeutronNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
||||
[['199.61.0.111', '199.61.0.122']]
|
||||
|
||||
resp = self.env.neutron_networks_put(self.cluster.id, data)
|
||||
self.assertEquals(202, resp.status_code)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
task = jsonutils.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'ready')
|
||||
self.assertEqual(task['status'], 'ready')
|
||||
|
||||
self.db.refresh(self.cluster)
|
||||
publ_ng = filter(lambda ng: ng.name == 'public',
|
||||
self.cluster.network_groups)[0]
|
||||
self.assertEquals(publ_ng.cidr, '199.61.0.0/24')
|
||||
self.assertEqual(publ_ng.cidr, '199.61.0.0/24')
|
||||
|
||||
def test_admin_public_untagged_others_tagged(self):
|
||||
resp = self.env.neutron_networks_get(self.cluster.id)
|
||||
@ -404,7 +404,7 @@ class TestNeutronNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
||||
|
||||
def test_mgmt_storage_networks_have_no_gateway(self):
|
||||
resp = self.env.neutron_networks_get(self.cluster.id)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
data = jsonutils.loads(resp.body)
|
||||
for net in data['networks']:
|
||||
if net['name'] in ['management', 'storage']:
|
||||
@ -430,7 +430,7 @@ class TestNeutronNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
||||
data = jsonutils.loads(resp.body)
|
||||
mgmt = filter(lambda n: n['name'] == 'management',
|
||||
data['networks'])[0]
|
||||
self.assertEquals(mgmt['gateway'], '192.168.0.1')
|
||||
self.assertEqual(mgmt['gateway'], '192.168.0.1')
|
||||
strg = filter(lambda n: n['name'] == 'storage',
|
||||
data['networks'])[0]
|
||||
self.assertIsNone(strg['gateway'])
|
||||
@ -447,11 +447,11 @@ class TestNovaNetworkConfigurationHandlerHA(BaseIntegrationTest):
|
||||
resp = jsonutils.loads(
|
||||
self.env.nova_networks_get(self.cluster.id).body)
|
||||
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
resp['management_vip'],
|
||||
self.net_manager.assign_vip(self.cluster.id, 'management'))
|
||||
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
resp['public_vip'],
|
||||
self.net_manager.assign_vip(self.cluster.id, 'public'))
|
||||
|
||||
@ -480,11 +480,11 @@ class TestAdminNetworkConfiguration(BaseIntegrationTest):
|
||||
nets = jsonutils.loads(resp.body)
|
||||
resp = self.env.nova_networks_put(self.cluster['id'], nets,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
task = jsonutils.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
self.assertEquals(task['progress'], 100)
|
||||
self.assertEquals(task['name'], 'check_networks')
|
||||
self.assertEqual(task['status'], 'error')
|
||||
self.assertEqual(task['progress'], 100)
|
||||
self.assertEqual(task['name'], 'check_networks')
|
||||
self.assertIn("Address space intersection between networks:\n"
|
||||
"admin (PXE), management.",
|
||||
task['message'])
|
||||
@ -492,11 +492,11 @@ class TestAdminNetworkConfiguration(BaseIntegrationTest):
|
||||
def test_deploy_error_when_admin_cidr_match_other_network_cidr(self):
|
||||
resp = self.env.cluster_changes_put(self.cluster['id'],
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
task = jsonutils.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
self.assertEquals(task['progress'], 100)
|
||||
self.assertEquals(task['name'], 'deploy')
|
||||
self.assertEqual(task['status'], 'error')
|
||||
self.assertEqual(task['progress'], 100)
|
||||
self.assertEqual(task['name'], 'deploy')
|
||||
self.assertIn("Address space intersection between networks:\n"
|
||||
"admin (PXE), management.",
|
||||
task['message'])
|
||||
|
@ -70,8 +70,8 @@ class TestNetworkManager(BaseIntegrationTest):
|
||||
filter_by(node=node.id).\
|
||||
filter_by(network=management_net.id).all()
|
||||
|
||||
self.assertEquals(1, len(ips))
|
||||
self.assertEquals(
|
||||
self.assertEqual(1, len(ips))
|
||||
self.assertEqual(
|
||||
True,
|
||||
self.env.network_manager.check_ip_belongs_to_net(
|
||||
ips[0].ip_addr,
|
||||
@ -81,15 +81,15 @@ class TestNetworkManager(BaseIntegrationTest):
|
||||
assigned_ips.append(ips[0].ip_addr)
|
||||
|
||||
# check for uniqueness of IPs:
|
||||
self.assertEquals(len(assigned_ips), len(list(set(assigned_ips))))
|
||||
self.assertEqual(len(assigned_ips), len(list(set(assigned_ips))))
|
||||
|
||||
# check it doesn't contain broadcast and other special IPs
|
||||
net_ip = IPNetwork(management_net.cidr)[0]
|
||||
gateway = management_net.gateway
|
||||
broadcast = IPNetwork(management_net.cidr)[-1]
|
||||
self.assertEquals(False, net_ip in assigned_ips)
|
||||
self.assertEquals(False, gateway in assigned_ips)
|
||||
self.assertEquals(False, broadcast in assigned_ips)
|
||||
self.assertEqual(False, net_ip in assigned_ips)
|
||||
self.assertEqual(False, gateway in assigned_ips)
|
||||
self.assertEqual(False, broadcast in assigned_ips)
|
||||
|
||||
@fake_tasks(fake_rpc=False, mock_rpc=False)
|
||||
@patch('nailgun.rpc.cast')
|
||||
@ -118,7 +118,7 @@ class TestNetworkManager(BaseIntegrationTest):
|
||||
|
||||
self.db.refresh(node_db)
|
||||
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
len(
|
||||
filter(
|
||||
lambda n: n['name'] == 'management',
|
||||
@ -140,7 +140,7 @@ class TestNetworkManager(BaseIntegrationTest):
|
||||
cluster['id'],
|
||||
"management"
|
||||
)
|
||||
self.assertEquals(vip, vip2)
|
||||
self.assertEqual(vip, vip2)
|
||||
|
||||
def test_get_node_networks_for_vlan_manager(self):
|
||||
cluster = self.env.create(
|
||||
@ -153,14 +153,14 @@ class TestNetworkManager(BaseIntegrationTest):
|
||||
{'networking_parameters': {'net_manager': 'VlanManager'}}
|
||||
resp = self.env.nova_networks_put(cluster['id'], networks_data)
|
||||
task = jsonutils.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'ready')
|
||||
self.assertEqual(task['status'], 'ready')
|
||||
network_data = self.env.network_manager.get_node_networks(
|
||||
self.env.nodes[0].id
|
||||
)
|
||||
|
||||
self.assertEquals(len(network_data), 4)
|
||||
self.assertEqual(len(network_data), 4)
|
||||
fixed_nets = filter(lambda net: net['name'] == 'fixed', network_data)
|
||||
self.assertEquals(fixed_nets, [])
|
||||
self.assertEqual(fixed_nets, [])
|
||||
|
||||
def test_ipaddr_joinedload_relations(self):
|
||||
self.env.create(
|
||||
@ -241,7 +241,7 @@ class TestNetworkManager(BaseIntegrationTest):
|
||||
def test_nets_empty_list_if_node_does_not_belong_to_cluster(self):
|
||||
node = self.env.create_node(api=False)
|
||||
network_data = self.env.network_manager.get_node_networks(node.id)
|
||||
self.assertEquals(network_data, [])
|
||||
self.assertEqual(network_data, [])
|
||||
|
||||
def test_assign_admin_ips(self):
|
||||
node = self.env.create_node()
|
||||
@ -253,7 +253,7 @@ class TestNetworkManager(BaseIntegrationTest):
|
||||
admin_ips = self.db.query(IPAddr).\
|
||||
filter_by(node=node.id).\
|
||||
filter_by(network=admin_ng_id).all()
|
||||
self.assertEquals(len(admin_ips), 2)
|
||||
self.assertEqual(len(admin_ips), 2)
|
||||
map(
|
||||
lambda x: self.assertIn(
|
||||
IPAddress(x.ip_addr),
|
||||
@ -288,7 +288,7 @@ class TestNetworkManager(BaseIntegrationTest):
|
||||
n, c = x
|
||||
l = len(self.db.query(IPAddr).filter_by(network=admin_ng_id).
|
||||
filter_by(node=n).all())
|
||||
self.assertEquals(l, c)
|
||||
self.assertEqual(l, c)
|
||||
map(asserter, nc)
|
||||
|
||||
def test_assign_admin_ips_idempotent(self):
|
||||
@ -302,7 +302,7 @@ class TestNetworkManager(BaseIntegrationTest):
|
||||
admin_ips2 = set([i.ip_addr for i in self.db.query(IPAddr).
|
||||
filter_by(node=node.id).
|
||||
filter_by(network=admin_net_id).all()])
|
||||
self.assertEquals(admin_ips, admin_ips2)
|
||||
self.assertEqual(admin_ips, admin_ips2)
|
||||
|
||||
def test_assign_admin_ips_only_one(self):
|
||||
map(self.db.delete, self.db.query(IPAddrRange).all())
|
||||
@ -323,8 +323,8 @@ class TestNetworkManager(BaseIntegrationTest):
|
||||
admin_ips = self.db.query(IPAddr).\
|
||||
filter_by(node=node.id).\
|
||||
filter_by(network=admin_net_id).all()
|
||||
self.assertEquals(len(admin_ips), 1)
|
||||
self.assertEquals(admin_ips[0].ip_addr, '10.0.0.1')
|
||||
self.assertEqual(len(admin_ips), 1)
|
||||
self.assertEqual(admin_ips[0].ip_addr, '10.0.0.1')
|
||||
|
||||
@fake_tasks(fake_rpc=False, mock_rpc=False)
|
||||
@patch('nailgun.rpc.cast')
|
||||
@ -412,12 +412,12 @@ class TestNovaNetworkManager(BaseIntegrationTest):
|
||||
def_admin_nic = [n for n in nics if n['id'] == admin_nic_id]
|
||||
def_other_nic = [n for n in nics if n['id'] == other_nic.id]
|
||||
|
||||
self.assertEquals(len(def_admin_nic), 1)
|
||||
self.assertEquals(len(def_other_nic), 1)
|
||||
self.assertEquals(
|
||||
self.assertEqual(len(def_admin_nic), 1)
|
||||
self.assertEqual(len(def_other_nic), 1)
|
||||
self.assertEqual(
|
||||
set(admin_nets),
|
||||
set([n['name'] for n in def_admin_nic[0]['assigned_networks']]))
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
set(other_nets),
|
||||
set([n['name'] for n in def_other_nic[0]['assigned_networks']]))
|
||||
|
||||
|
@ -73,10 +73,10 @@ class TestNetworkModels(BaseIntegrationTest):
|
||||
headers=self.default_headers,
|
||||
expect_errors=True
|
||||
)
|
||||
self.assertEquals(resp_nova_net.status_code, 403)
|
||||
self.assertEqual(resp_nova_net.status_code, 403)
|
||||
# it's 400 because we used Nova network
|
||||
self.assertEquals(resp_neutron_net.status_code, 400)
|
||||
self.assertEquals(resp_cluster.status_code, 403)
|
||||
self.assertEqual(resp_neutron_net.status_code, 400)
|
||||
self.assertEqual(resp_cluster.status_code, 403)
|
||||
|
||||
def test_nova_net_networking_parameters(self):
|
||||
cluster = self.env.create_cluster(api=False)
|
||||
@ -100,7 +100,7 @@ class TestNetworkModels(BaseIntegrationTest):
|
||||
serialize_network_params(cluster)
|
||||
|
||||
kw.pop("cluster_id")
|
||||
self.assertEquals(nw_params, kw)
|
||||
self.assertEqual(nw_params, kw)
|
||||
|
||||
def test_neutron_networking_parameters(self):
|
||||
cluster = self.env.create_cluster(api=False,
|
||||
@ -127,4 +127,4 @@ class TestNetworkModels(BaseIntegrationTest):
|
||||
serialize_network_params(cluster)
|
||||
|
||||
kw.pop("cluster_id")
|
||||
self.assertEquals(nw_params, kw)
|
||||
self.assertEqual(nw_params, kw)
|
||||
|
@ -53,52 +53,52 @@ class TestNetworkChecking(BaseIntegrationTest):
|
||||
def set_cluster_changes_w_error(self, cluster_id):
|
||||
resp = self.env.cluster_changes_put(cluster_id,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
task = jsonutils.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
self.assertEquals(task['progress'], 100)
|
||||
self.assertEquals(task['name'], 'deploy')
|
||||
self.assertEqual(task['status'], 'error')
|
||||
self.assertEqual(task['progress'], 100)
|
||||
self.assertEqual(task['name'], 'deploy')
|
||||
self.check_result_format(task, cluster_id)
|
||||
return task
|
||||
|
||||
def update_nova_networks_w_error(self, cluster_id, nets):
|
||||
resp = self.env.nova_networks_put(cluster_id, nets,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
task = jsonutils.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
self.assertEquals(task['progress'], 100)
|
||||
self.assertEquals(task['name'], 'check_networks')
|
||||
self.assertEqual(task['status'], 'error')
|
||||
self.assertEqual(task['progress'], 100)
|
||||
self.assertEqual(task['name'], 'check_networks')
|
||||
self.check_result_format(task, cluster_id)
|
||||
return task
|
||||
|
||||
def update_nova_networks_success(self, cluster_id, nets):
|
||||
resp = self.env.nova_networks_put(cluster_id, nets)
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
task = jsonutils.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'ready')
|
||||
self.assertEquals(task['progress'], 100)
|
||||
self.assertEquals(task['name'], 'check_networks')
|
||||
self.assertEqual(task['status'], 'ready')
|
||||
self.assertEqual(task['progress'], 100)
|
||||
self.assertEqual(task['name'], 'check_networks')
|
||||
return task
|
||||
|
||||
def update_neutron_networks_w_error(self, cluster_id, nets):
|
||||
resp = self.env.neutron_networks_put(cluster_id, nets,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
task = jsonutils.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
self.assertEquals(task['progress'], 100)
|
||||
self.assertEquals(task['name'], 'check_networks')
|
||||
self.assertEqual(task['status'], 'error')
|
||||
self.assertEqual(task['progress'], 100)
|
||||
self.assertEqual(task['name'], 'check_networks')
|
||||
self.check_result_format(task, cluster_id)
|
||||
return task
|
||||
|
||||
def update_neutron_networks_success(self, cluster_id, nets):
|
||||
resp = self.env.neutron_networks_put(cluster_id, nets)
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
task = jsonutils.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'ready')
|
||||
self.assertEquals(task['progress'], 100)
|
||||
self.assertEquals(task['name'], 'check_networks')
|
||||
self.assertEqual(task['status'], 'ready')
|
||||
self.assertEqual(task['progress'], 100)
|
||||
self.assertEqual(task['name'], 'check_networks')
|
||||
return task
|
||||
|
||||
|
||||
@ -128,7 +128,7 @@ class TestNovaHandlers(TestNetworkChecking):
|
||||
ngs_created = self.db.query(NetworkGroup).filter(
|
||||
NetworkGroup.name.in_([n['name'] for n in self.nets['networks']])
|
||||
).all()
|
||||
self.assertEquals(len(ngs_created), len(self.nets['networks']))
|
||||
self.assertEqual(len(ngs_created), len(self.nets['networks']))
|
||||
|
||||
def test_network_checking_fails_if_admin_intersection(self):
|
||||
admin_ng = self.env.network_manager.get_admin_network_group()
|
||||
@ -151,7 +151,7 @@ class TestNovaHandlers(TestNetworkChecking):
|
||||
[[flt_r0, flt_r1]]
|
||||
|
||||
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
"Address space intersection between floating range '{0}-{1}' and "
|
||||
"'admin (PXE)' network.".format(flt_r0, flt_r1),
|
||||
task['message'])
|
||||
@ -214,7 +214,7 @@ class TestNovaHandlers(TestNetworkChecking):
|
||||
self.find_net_by_name('public')["cidr"] = '192.18.17.0/24'
|
||||
|
||||
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"Address space intersection between ranges of public network."
|
||||
)
|
||||
@ -227,7 +227,7 @@ class TestNovaHandlers(TestNetworkChecking):
|
||||
self.find_net_by_name('public')["cidr"] = '192.18.18.0/24'
|
||||
|
||||
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"Public gateway and public ranges are not in one CIDR."
|
||||
)
|
||||
@ -240,7 +240,7 @@ class TestNovaHandlers(TestNetworkChecking):
|
||||
self.find_net_by_name('public')["cidr"] = '192.18.17.0/24'
|
||||
|
||||
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"Address intersection between public gateway and IP range of "
|
||||
"public network."
|
||||
@ -251,7 +251,7 @@ class TestNovaHandlers(TestNetworkChecking):
|
||||
self.find_net_by_name('public')["gateway"] = '192.18.17.55'
|
||||
|
||||
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"Address intersection between public gateway and IP range of "
|
||||
"public network."
|
||||
@ -263,7 +263,7 @@ class TestNovaHandlers(TestNetworkChecking):
|
||||
['192.18.17.133', '192.18.17.190']]
|
||||
|
||||
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"Address space intersection between ranges of floating network."
|
||||
)
|
||||
@ -274,7 +274,7 @@ class TestNovaHandlers(TestNetworkChecking):
|
||||
"10.10.0.0/23"
|
||||
|
||||
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"Network amount for 'fixed' is more than 1 "
|
||||
"while using FlatDHCP manager."
|
||||
@ -311,7 +311,7 @@ class TestNovaHandlers(TestNetworkChecking):
|
||||
self.find_net_by_name('public')["vlan_start"] = 5555
|
||||
|
||||
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"VLAN ID(s) is out of range for public network."
|
||||
)
|
||||
@ -327,7 +327,7 @@ class TestNovaHandlers(TestNetworkChecking):
|
||||
self.nets['networking_parameters']['fixed_network_size'] = \
|
||||
"512"
|
||||
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"Number of fixed networks (1) doesn't fit into "
|
||||
"fixed CIDR (10.10.0.0/24) and size of one fixed network (512)."
|
||||
@ -341,7 +341,7 @@ class TestNovaHandlers(TestNetworkChecking):
|
||||
|
||||
self.nets['networking_parameters']['fixed_networks_amount'] = 32
|
||||
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"Number of fixed networks (32) doesn't fit into "
|
||||
"fixed CIDR (10.10.0.0/24) and size of one fixed network (32)."
|
||||
@ -350,7 +350,7 @@ class TestNovaHandlers(TestNetworkChecking):
|
||||
def test_network_fit_abc_classes_exclude_loopback(self):
|
||||
self.find_net_by_name('management')['cidr'] = '127.19.216.0/24'
|
||||
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"management network address space is inside loopback range "
|
||||
"(127.0.0.0/8). It must have no intersection with "
|
||||
@ -359,7 +359,7 @@ class TestNovaHandlers(TestNetworkChecking):
|
||||
|
||||
self.find_net_by_name('management')['cidr'] = '227.19.216.0/24'
|
||||
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"management network address space does not belong to "
|
||||
"A, B, C network classes. It must belong to either "
|
||||
@ -369,7 +369,7 @@ class TestNovaHandlers(TestNetworkChecking):
|
||||
def test_network_gw_and_ranges_intersect_w_subnet_or_broadcast(self):
|
||||
self.find_net_by_name('public')['gateway'] = '172.16.0.0'
|
||||
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"public network gateway address is equal to either subnet address "
|
||||
"or broadcast address of the network."
|
||||
@ -377,7 +377,7 @@ class TestNovaHandlers(TestNetworkChecking):
|
||||
|
||||
self.find_net_by_name('public')['gateway'] = '172.16.0.255'
|
||||
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"public network gateway address is equal to either subnet address "
|
||||
"or broadcast address of the network."
|
||||
@ -387,7 +387,7 @@ class TestNovaHandlers(TestNetworkChecking):
|
||||
self.find_net_by_name('public')['ip_ranges'] = [['172.16.0.0',
|
||||
'172.16.0.122']]
|
||||
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"public network IP range [172.16.0.0-172.16.0.122] intersect "
|
||||
"with either subnet address or broadcast address of the network."
|
||||
@ -396,7 +396,7 @@ class TestNovaHandlers(TestNetworkChecking):
|
||||
self.find_net_by_name('public')['ip_ranges'] = [['172.16.0.255',
|
||||
'172.16.0.255']]
|
||||
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"public network IP range [172.16.0.255-172.16.0.255] intersect "
|
||||
"with either subnet address or broadcast address of the network."
|
||||
@ -436,7 +436,7 @@ class TestNeutronHandlersGre(TestNetworkChecking):
|
||||
ngs_created = self.db.query(NetworkGroup).filter(
|
||||
NetworkGroup.name.in_([n['name'] for n in self.nets['networks']])
|
||||
).all()
|
||||
self.assertEquals(len(ngs_created), len(self.nets['networks']))
|
||||
self.assertEqual(len(ngs_created), len(self.nets['networks']))
|
||||
|
||||
# TODO(adanin) Provide a positive test that it's allowed to move any
|
||||
# network to the Admin interface.
|
||||
@ -477,7 +477,7 @@ class TestNeutronHandlersGre(TestNetworkChecking):
|
||||
[['172.16.10.130', '172.16.10.254']]
|
||||
|
||||
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"Public gateway and public ranges are not in one CIDR."
|
||||
)
|
||||
@ -489,7 +489,7 @@ class TestNeutronHandlersGre(TestNetworkChecking):
|
||||
self.find_net_by_name('public')["gateway"] = '172.16.0.77'
|
||||
|
||||
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"Address intersection between public gateway and IP range of "
|
||||
"public network."
|
||||
@ -500,7 +500,7 @@ class TestNeutronHandlersGre(TestNetworkChecking):
|
||||
self.find_net_by_name('public')["gateway"] = '172.16.0.55'
|
||||
|
||||
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"Address intersection between public gateway and IP range of "
|
||||
"public network."
|
||||
@ -511,7 +511,7 @@ class TestNeutronHandlersGre(TestNetworkChecking):
|
||||
self.find_net_by_name('public')['gateway'] = '172.16.10.1'
|
||||
|
||||
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"Floating address range 172.16.0.130:172.16.0.254 is not in "
|
||||
"public address space 172.16.10.0/24."
|
||||
@ -532,7 +532,7 @@ class TestNeutronHandlersGre(TestNetworkChecking):
|
||||
self.find_net_by_name('public')['gateway'] = '172.16.0.11'
|
||||
|
||||
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"Address intersection between public gateway "
|
||||
"and IP range of public network."
|
||||
@ -544,7 +544,7 @@ class TestNeutronHandlersGre(TestNetworkChecking):
|
||||
['172.16.0.55', '172.16.0.121']]
|
||||
|
||||
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"Address space intersection between ranges "
|
||||
"of public network."
|
||||
@ -556,7 +556,7 @@ class TestNeutronHandlersGre(TestNetworkChecking):
|
||||
['172.16.0.55', '172.16.0.222']]
|
||||
|
||||
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"Address space intersection between ranges "
|
||||
"of public and external network."
|
||||
@ -573,8 +573,8 @@ class TestNeutronHandlersGre(TestNetworkChecking):
|
||||
self.update_neutron_networks_success(self.cluster.id, self.nets)
|
||||
resp = self.env.neutron_networks_get(self.cluster.id)
|
||||
self.nets = jsonutils.loads(resp.body)
|
||||
self.assertEquals(self.find_net_by_name('public')['cidr'],
|
||||
'172.16.0.0/25')
|
||||
self.assertEqual(self.find_net_by_name('public')['cidr'],
|
||||
'172.16.0.0/25')
|
||||
|
||||
def test_network_checking_fails_on_network_vlan_match(self):
|
||||
self.find_net_by_name('management')['vlan_start'] = '111'
|
||||
@ -593,7 +593,7 @@ class TestNeutronHandlersGre(TestNetworkChecking):
|
||||
self.nets['networking_parameters']['internal_gateway'] = '172.16.10.1'
|
||||
|
||||
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"Internal gateway 172.16.10.1 is not in "
|
||||
"internal address space 192.168.111.0/24."
|
||||
@ -604,7 +604,7 @@ class TestNeutronHandlersGre(TestNetworkChecking):
|
||||
self.nets['networking_parameters']['internal_gateway'] = '172.16.0.129'
|
||||
|
||||
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"Intersection between internal CIDR and floating range."
|
||||
)
|
||||
@ -612,7 +612,7 @@ class TestNeutronHandlersGre(TestNetworkChecking):
|
||||
def test_network_fit_abc_classes_exclude_loopback(self):
|
||||
self.find_net_by_name('management')['cidr'] = '127.19.216.0/24'
|
||||
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"management network address space is inside loopback range "
|
||||
"(127.0.0.0/8). It must have no intersection with "
|
||||
@ -621,7 +621,7 @@ class TestNeutronHandlersGre(TestNetworkChecking):
|
||||
|
||||
self.find_net_by_name('management')['cidr'] = '227.19.216.0/24'
|
||||
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"management network address space does not belong to "
|
||||
"A, B, C network classes. It must belong to either "
|
||||
@ -631,7 +631,7 @@ class TestNeutronHandlersGre(TestNetworkChecking):
|
||||
def test_network_gw_and_ranges_intersect_w_subnet_or_broadcast(self):
|
||||
self.find_net_by_name('public')['gateway'] = '172.16.0.0'
|
||||
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"public network gateway address is equal to either subnet address "
|
||||
"or broadcast address of the network."
|
||||
@ -639,7 +639,7 @@ class TestNeutronHandlersGre(TestNetworkChecking):
|
||||
|
||||
self.find_net_by_name('public')['gateway'] = '172.16.0.255'
|
||||
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"public network gateway address is equal to either subnet address "
|
||||
"or broadcast address of the network."
|
||||
@ -649,7 +649,7 @@ class TestNeutronHandlersGre(TestNetworkChecking):
|
||||
self.find_net_by_name('public')['ip_ranges'] = [['172.16.0.0',
|
||||
'172.16.0.122']]
|
||||
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"public network IP range [172.16.0.0-172.16.0.122] intersect "
|
||||
"with either subnet address or broadcast address of the network."
|
||||
@ -658,7 +658,7 @@ class TestNeutronHandlersGre(TestNetworkChecking):
|
||||
self.find_net_by_name('public')['ip_ranges'] = [['172.16.0.255',
|
||||
'172.16.0.255']]
|
||||
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"public network IP range [172.16.0.255-172.16.0.255] intersect "
|
||||
"with either subnet address or broadcast address of the network."
|
||||
@ -669,7 +669,7 @@ class TestNeutronHandlersGre(TestNetworkChecking):
|
||||
self.nets['networking_parameters']['floating_ranges'] = \
|
||||
[['172.16.0.0', '172.16.0.33']]
|
||||
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"Neutron L3 external floating range [172.16.0.0-172.16.0.33] "
|
||||
"intersect with either subnet address or broadcast address "
|
||||
@ -679,7 +679,7 @@ class TestNeutronHandlersGre(TestNetworkChecking):
|
||||
self.nets['networking_parameters']['floating_ranges'] = \
|
||||
[['172.16.0.155', '172.16.0.255']]
|
||||
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"Neutron L3 external floating range [172.16.0.155-172.16.0.255] "
|
||||
"intersect with either subnet address or broadcast address "
|
||||
@ -693,7 +693,7 @@ class TestNeutronHandlersGre(TestNetworkChecking):
|
||||
self.nets['networking_parameters']['internal_gateway'] = \
|
||||
'192.168.111.0'
|
||||
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"Neutron L3 internal network gateway address is equal to "
|
||||
"either subnet address or broadcast address of the network."
|
||||
@ -702,7 +702,7 @@ class TestNeutronHandlersGre(TestNetworkChecking):
|
||||
self.nets['networking_parameters']['internal_gateway'] = \
|
||||
'192.168.111.255'
|
||||
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"Neutron L3 internal network gateway address is equal to "
|
||||
"either subnet address or broadcast address of the network."
|
||||
@ -738,13 +738,13 @@ class TestNeutronHandlersVlan(TestNetworkChecking):
|
||||
ngs_created = self.db.query(NetworkGroup).filter(
|
||||
NetworkGroup.name.in_([n['name'] for n in self.nets['networks']])
|
||||
).all()
|
||||
self.assertEquals(len(ngs_created), len(self.nets['networks']))
|
||||
self.assertEqual(len(ngs_created), len(self.nets['networks']))
|
||||
|
||||
def test_network_checking_failed_if_networks_tags_in_neutron_range(self):
|
||||
self.find_net_by_name('storage')['vlan_start'] = 1000
|
||||
|
||||
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"VLAN tags of storage network(s) intersect with "
|
||||
"VLAN ID range defined for Neutron L2. "
|
||||
|
@ -28,8 +28,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||
def test_allocation_stats_unallocated(self):
|
||||
self.env.create_node(api=False)
|
||||
stats = self._get_allocation_stats()
|
||||
self.assertEquals(stats['total'], 1)
|
||||
self.assertEquals(stats['unallocated'], 1)
|
||||
self.assertEqual(stats['total'], 1)
|
||||
self.assertEqual(stats['unallocated'], 1)
|
||||
|
||||
def test_allocation_stats_total(self):
|
||||
self.env.create_node(api=False)
|
||||
@ -43,5 +43,5 @@ class TestHandlers(BaseIntegrationTest):
|
||||
)
|
||||
|
||||
stats = self._get_allocation_stats()
|
||||
self.assertEquals(stats['total'], 2)
|
||||
self.assertEquals(stats['unallocated'], 1)
|
||||
self.assertEqual(stats['total'], 2)
|
||||
self.assertEqual(stats['unallocated'], 1)
|
||||
|
@ -27,9 +27,9 @@ class TestHandlers(BaseIntegrationTest):
|
||||
reverse('NodeCollectionHandler'),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals([], response)
|
||||
self.assertEqual([], response)
|
||||
|
||||
def test_notification_node_id(self):
|
||||
node = self.env.create_node(
|
||||
@ -60,10 +60,10 @@ class TestHandlers(BaseIntegrationTest):
|
||||
params={'cluster_id': cluster.id},
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals(1, len(response))
|
||||
self.assertEquals(
|
||||
self.assertEqual(1, len(response))
|
||||
self.assertEqual(
|
||||
self.env.nodes[1].id,
|
||||
response[0]['id']
|
||||
)
|
||||
@ -82,10 +82,10 @@ class TestHandlers(BaseIntegrationTest):
|
||||
params={'cluster_id': ''},
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals(1, len(response))
|
||||
self.assertEquals(self.env.nodes[0].id, response[0]['id'])
|
||||
self.assertEqual(1, len(response))
|
||||
self.assertEqual(self.env.nodes[0].id, response[0]['id'])
|
||||
|
||||
def test_node_get_without_cluster_specification(self):
|
||||
self.env.create(
|
||||
@ -100,9 +100,9 @@ class TestHandlers(BaseIntegrationTest):
|
||||
reverse('NodeCollectionHandler'),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals(2, len(response))
|
||||
self.assertEqual(2, len(response))
|
||||
|
||||
def test_node_get_with_cluster_and_assigned_ip_addrs(self):
|
||||
self.env.create(
|
||||
@ -123,9 +123,9 @@ class TestHandlers(BaseIntegrationTest):
|
||||
headers=self.default_headers
|
||||
)
|
||||
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals(2, len(response))
|
||||
self.assertEqual(2, len(response))
|
||||
|
||||
def test_node_creation(self):
|
||||
resp = self.app.post(
|
||||
@ -134,9 +134,9 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'meta': self.env.default_metadata(),
|
||||
'status': 'discover'}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 201)
|
||||
self.assertEqual(resp.status_code, 201)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals('discover', response['status'])
|
||||
self.assertEqual('discover', response['status'])
|
||||
|
||||
def test_node_update(self):
|
||||
node = self.env.create_node(api=False)
|
||||
@ -144,13 +144,13 @@ class TestHandlers(BaseIntegrationTest):
|
||||
reverse('NodeCollectionHandler'),
|
||||
jsonutils.dumps([{'mac': node.mac, 'manufacturer': 'new'}]),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
resp = self.app.get(
|
||||
reverse('NodeCollectionHandler'),
|
||||
headers=self.default_headers
|
||||
)
|
||||
node = self.db.query(Node).get(node.id)
|
||||
self.assertEquals('new', node.manufacturer)
|
||||
self.assertEqual('new', node.manufacturer)
|
||||
|
||||
def test_node_update_empty_mac_or_id(self):
|
||||
node = self.env.create_node(api=False)
|
||||
@ -160,8 +160,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||
jsonutils.dumps([{'manufacturer': 'man0'}]),
|
||||
headers=self.default_headers,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status_code, 400)
|
||||
self.assertEquals(resp.body, "Neither MAC nor ID is specified")
|
||||
self.assertEqual(resp.status_code, 400)
|
||||
self.assertEqual(resp.body, "Neither MAC nor ID is specified")
|
||||
|
||||
resp = self.app.put(
|
||||
reverse('NodeCollectionHandler'),
|
||||
@ -169,8 +169,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'manufacturer': 'man1'}]),
|
||||
headers=self.default_headers,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status_code, 400)
|
||||
self.assertEquals(resp.body, "Neither MAC nor ID is specified")
|
||||
self.assertEqual(resp.status_code, 400)
|
||||
self.assertEqual(resp.body, "Neither MAC nor ID is specified")
|
||||
|
||||
resp = self.app.put(
|
||||
reverse('NodeCollectionHandler'),
|
||||
@ -178,8 +178,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'manufacturer': 'man2'}]),
|
||||
headers=self.default_headers,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status_code, 400)
|
||||
self.assertEquals(resp.body, "Neither MAC nor ID is specified")
|
||||
self.assertEqual(resp.status_code, 400)
|
||||
self.assertEqual(resp.body, "Neither MAC nor ID is specified")
|
||||
|
||||
resp = self.app.put(
|
||||
reverse('NodeCollectionHandler'),
|
||||
@ -188,8 +188,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'manufacturer': 'man3'}]),
|
||||
headers=self.default_headers,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status_code, 400)
|
||||
self.assertEquals(resp.body, "Neither MAC nor ID is specified")
|
||||
self.assertEqual(resp.status_code, 400)
|
||||
self.assertEqual(resp.body, "Neither MAC nor ID is specified")
|
||||
|
||||
resp = self.app.put(
|
||||
reverse('NodeCollectionHandler'),
|
||||
@ -198,8 +198,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'manufacturer': 'man4'}]),
|
||||
headers=self.default_headers,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status_code, 400)
|
||||
self.assertEquals(resp.body, "Null MAC is specified")
|
||||
self.assertEqual(resp.status_code, 400)
|
||||
self.assertEqual(resp.body, "Null MAC is specified")
|
||||
|
||||
resp = self.app.put(
|
||||
reverse('NodeCollectionHandler'),
|
||||
@ -208,7 +208,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'manufacturer': 'man5'}]),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
resp = self.app.put(
|
||||
reverse('NodeCollectionHandler'),
|
||||
@ -216,14 +216,14 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'manufacturer': 'man6'}]),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
resp = self.app.put(
|
||||
reverse('NodeCollectionHandler'),
|
||||
jsonutils.dumps([{'mac': node.mac,
|
||||
'manufacturer': 'man7'}]),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
resp = self.app.put(
|
||||
reverse('NodeCollectionHandler'),
|
||||
@ -231,7 +231,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'mac': node.mac,
|
||||
'manufacturer': 'man8'}]),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
def node_update_with_invalid_id(self):
|
||||
node = self.env.create_node(api=False)
|
||||
@ -242,8 +242,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'mac': node.mac}]),
|
||||
headers=self.default_headers,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status_code, 400)
|
||||
self.assertEquals(resp.body, "Invalid ID specified")
|
||||
self.assertEqual(resp.status_code, 400)
|
||||
self.assertEqual(resp.body, "Invalid ID specified")
|
||||
|
||||
def test_node_update_agent_discover(self):
|
||||
self.env.create_node(
|
||||
@ -260,14 +260,14 @@ class TestHandlers(BaseIntegrationTest):
|
||||
),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
resp = self.app.get(
|
||||
reverse('NodeCollectionHandler'),
|
||||
headers=self.default_headers
|
||||
)
|
||||
node_db = self.db.query(Node).get(node_db.id)
|
||||
self.assertEquals('new', node_db.manufacturer)
|
||||
self.assertEquals('provisioning', node_db.status)
|
||||
self.assertEqual('new', node_db.manufacturer)
|
||||
self.assertEqual('provisioning', node_db.status)
|
||||
|
||||
def test_node_timestamp_updated_only_by_agent(self):
|
||||
node = self.env.create_node(api=False)
|
||||
@ -279,9 +279,9 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'manufacturer': 'old'}
|
||||
]),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
node = self.db.query(Node).get(node.id)
|
||||
self.assertEquals(node.timestamp, timestamp)
|
||||
self.assertEqual(node.timestamp, timestamp)
|
||||
|
||||
resp = self.app.put(
|
||||
reverse('NodeAgentHandler'),
|
||||
@ -290,10 +290,10 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'manufacturer': 'new'}
|
||||
),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
node = self.db.query(Node).get(node.id)
|
||||
self.assertNotEquals(node.timestamp, timestamp)
|
||||
self.assertEquals('new', node.manufacturer)
|
||||
self.assertNotEqual(node.timestamp, timestamp)
|
||||
self.assertEqual('new', node.manufacturer)
|
||||
|
||||
def test_agent_caching(self):
|
||||
node = self.env.create_node(api=False)
|
||||
@ -306,7 +306,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
}),
|
||||
headers=self.default_headers)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
self.assertFalse('cached' in response and response['cached'])
|
||||
resp = self.app.put(
|
||||
reverse('NodeAgentHandler'),
|
||||
@ -317,7 +317,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
}),
|
||||
headers=self.default_headers)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
self.assertTrue('cached' in response and response['cached'])
|
||||
|
||||
def test_agent_updates_node_by_interfaces(self):
|
||||
@ -333,7 +333,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
}),
|
||||
headers=self.default_headers)
|
||||
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
def test_node_create_ip_not_in_admin_range(self):
|
||||
node = self.env.create_node(api=False)
|
||||
@ -343,7 +343,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
self.env.network_manager.update_interfaces_info(node)
|
||||
|
||||
# node.mac == eth0 mac so eth0 should now be admin interface
|
||||
self.assertEquals(node.admin_interface.name, 'eth0')
|
||||
self.assertEqual(node.admin_interface.name, 'eth0')
|
||||
|
||||
def test_node_create_ext_mac(self):
|
||||
node1 = self.env.create_node(
|
||||
@ -360,7 +360,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
jsonutils.dumps(node2_json),
|
||||
headers=self.default_headers,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status_code, 409)
|
||||
self.assertEqual(resp.status_code, 409)
|
||||
|
||||
def test_node_create_without_mac(self):
|
||||
node = self.env.create_node(
|
||||
@ -369,7 +369,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
expect_http=400,
|
||||
expect_message="No mac address specified"
|
||||
)
|
||||
self.assertEquals(node, None)
|
||||
self.assertEqual(node, None)
|
||||
|
||||
def test_node_create_with_invalid_disk_model(self):
|
||||
meta = self.env.default_metadata()
|
||||
@ -408,7 +408,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
expect_errors=(http_code != 201)
|
||||
)
|
||||
|
||||
self.assertEquals(response.status_code, http_code)
|
||||
self.assertEqual(response.status_code, http_code)
|
||||
|
||||
def test_node_update_ext_mac(self):
|
||||
meta = self.env.default_metadata()
|
||||
@ -444,7 +444,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
jsonutils.dumps({'mac': node.mac, 'status': 'discover'}),
|
||||
headers=self.default_headers,
|
||||
expect_errors=True)
|
||||
self.assertEquals(409, resp.status_code)
|
||||
self.assertEqual(409, resp.status_code)
|
||||
|
||||
def test_node_creation_fail(self):
|
||||
resp = self.app.post(
|
||||
@ -454,7 +454,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'status': 'error'}),
|
||||
headers=self.default_headers,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status_code, 403)
|
||||
self.assertEqual(resp.status_code, 403)
|
||||
|
||||
def test_reset_cluster_name_when_unassign_node(self):
|
||||
self.env.create(
|
||||
@ -472,13 +472,13 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'cluster_id': None,
|
||||
'pending_roles': []}]),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals(1, len(response))
|
||||
self.assertEquals(node.id, response[0]['id'])
|
||||
self.assertEquals(node.name, default_name)
|
||||
self.assertEquals(node.cluster, None)
|
||||
self.assertEquals(node.pending_role_list, [])
|
||||
self.assertEqual(1, len(response))
|
||||
self.assertEqual(node.id, response[0]['id'])
|
||||
self.assertEqual(node.name, default_name)
|
||||
self.assertEqual(node.cluster, None)
|
||||
self.assertEqual(node.pending_role_list, [])
|
||||
|
||||
def test_discovered_node_unified_name(self):
|
||||
node_mac = self.env.generate_random_mac()
|
||||
|
@ -29,22 +29,22 @@ class TestHandlers(BaseIntegrationTest):
|
||||
resp = self.app.get(
|
||||
reverse('NodeHandler', kwargs={'obj_id': node.id}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals(node.id, response['id'])
|
||||
self.assertEquals(node.name, response['name'])
|
||||
self.assertEquals(node.mac, response['mac'])
|
||||
self.assertEquals(
|
||||
self.assertEqual(node.id, response['id'])
|
||||
self.assertEqual(node.name, response['name'])
|
||||
self.assertEqual(node.mac, response['mac'])
|
||||
self.assertEqual(
|
||||
node.pending_addition, response['pending_addition'])
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
node.pending_deletion, response['pending_deletion'])
|
||||
self.assertEquals(node.status, response['status'])
|
||||
self.assertEquals(
|
||||
self.assertEqual(node.status, response['status'])
|
||||
self.assertEqual(
|
||||
node.meta['cpu']['total'],
|
||||
response['meta']['cpu']['total']
|
||||
)
|
||||
self.assertEquals(node.meta['disks'], response['meta']['disks'])
|
||||
self.assertEquals(node.meta['memory'], response['meta']['memory'])
|
||||
self.assertEqual(node.meta['disks'], response['meta']['disks'])
|
||||
self.assertEqual(node.meta['memory'], response['meta']['memory'])
|
||||
|
||||
def test_node_creation_with_id(self):
|
||||
node_id = '080000000003'
|
||||
@ -56,7 +56,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
headers=self.default_headers,
|
||||
expect_errors=True)
|
||||
# we now just ignore 'id' if present
|
||||
self.assertEquals(201, resp.status_code)
|
||||
self.assertEqual(201, resp.status_code)
|
||||
|
||||
def test_node_deletion(self):
|
||||
node = self.env.create_node(api=False)
|
||||
@ -66,7 +66,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
headers=self.default_headers,
|
||||
expect_errors=True
|
||||
)
|
||||
self.assertEquals(resp.status_code, 204)
|
||||
self.assertEqual(resp.status_code, 204)
|
||||
|
||||
def test_node_valid_metadata_gets_updated(self):
|
||||
new_metadata = self.env.default_metadata()
|
||||
@ -75,14 +75,14 @@ class TestHandlers(BaseIntegrationTest):
|
||||
reverse('NodeHandler', kwargs={'obj_id': node.id}),
|
||||
jsonutils.dumps({'meta': new_metadata}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
self.db.refresh(node)
|
||||
|
||||
nodes = self.db.query(Node).filter(
|
||||
Node.id == node.id
|
||||
).all()
|
||||
self.assertEquals(len(nodes), 1)
|
||||
self.assertEquals(nodes[0].meta, new_metadata)
|
||||
self.assertEqual(len(nodes), 1)
|
||||
self.assertEqual(nodes[0].meta, new_metadata)
|
||||
|
||||
def test_node_valid_status_gets_updated(self):
|
||||
node = self.env.create_node(api=False)
|
||||
@ -91,7 +91,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
reverse('NodeHandler', kwargs={'obj_id': node.id}),
|
||||
jsonutils.dumps(params),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
def test_node_action_flags_are_set(self):
|
||||
flags = ['pending_addition', 'pending_deletion']
|
||||
@ -102,14 +102,14 @@ class TestHandlers(BaseIntegrationTest):
|
||||
jsonutils.dumps({flag: True}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
self.db.refresh(node)
|
||||
|
||||
node_from_db = self.db.query(Node).filter(
|
||||
Node.id == node.id
|
||||
).first()
|
||||
for flag in flags:
|
||||
self.assertEquals(getattr(node_from_db, flag), True)
|
||||
self.assertEqual(getattr(node_from_db, flag), True)
|
||||
|
||||
def test_put_returns_400_if_no_body(self):
|
||||
node = self.env.create_node(api=False)
|
||||
@ -118,7 +118,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
"",
|
||||
headers=self.default_headers,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status_code, 400)
|
||||
self.assertEqual(resp.status_code, 400)
|
||||
|
||||
def test_put_returns_400_if_wrong_status(self):
|
||||
node = self.env.create_node(api=False)
|
||||
@ -128,7 +128,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
jsonutils.dumps(params),
|
||||
headers=self.default_headers,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status_code, 400)
|
||||
self.assertEqual(resp.status_code, 400)
|
||||
|
||||
def test_do_not_create_notification_if_disks_meta_is_empty(self):
|
||||
def get_notifications_count(**kwargs):
|
||||
@ -165,7 +165,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
jsonutils.dumps(node),
|
||||
headers=self.default_headers,
|
||||
)
|
||||
self.assertEquals(response.status_code, 200)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
# check there's not create notification
|
||||
after_count = get_notifications_count(node_id=node['id'])
|
||||
|
@ -41,7 +41,7 @@ class TestClusterHandlers(BaseIntegrationTest):
|
||||
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
|
||||
headers=self.default_headers)
|
||||
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
response = jsonutils.loads(resp.body)
|
||||
|
||||
@ -68,15 +68,15 @@ class TestClusterHandlers(BaseIntegrationTest):
|
||||
jsonutils.dumps({'nodes': []}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
resp = self.app.get(
|
||||
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
response = jsonutils.loads(resp.body)
|
||||
for resp_nic in response:
|
||||
self.assertEquals(resp_nic['assigned_networks'], [])
|
||||
self.assertEqual(resp_nic['assigned_networks'], [])
|
||||
|
||||
def test_assignment_is_removed_when_delete_cluster(self):
|
||||
mac = self.env.generate_random_mac()
|
||||
@ -92,7 +92,7 @@ class TestClusterHandlers(BaseIntegrationTest):
|
||||
self.db.commit()
|
||||
|
||||
net_assignment = self.db.query(NetworkNICAssignment).all()
|
||||
self.assertEquals(len(net_assignment), 0)
|
||||
self.assertEqual(len(net_assignment), 0)
|
||||
|
||||
|
||||
class TestNodeHandlers(BaseIntegrationTest):
|
||||
@ -110,7 +110,7 @@ class TestNodeHandlers(BaseIntegrationTest):
|
||||
resp = self.app.get(
|
||||
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
response = jsonutils.loads(resp.body)
|
||||
for resp_nic in response:
|
||||
net_names = [net['name'] for net in resp_nic['assigned_networks']]
|
||||
@ -134,12 +134,12 @@ class TestNodeHandlers(BaseIntegrationTest):
|
||||
jsonutils.dumps([{'id': node['id'], 'cluster_id': cluster['id']}]),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
resp = self.app.get(
|
||||
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
response = jsonutils.loads(resp.body)
|
||||
for resp_nic in response:
|
||||
net_names = [net['name'] for net in resp_nic['assigned_networks']]
|
||||
@ -158,9 +158,9 @@ class TestNodeHandlers(BaseIntegrationTest):
|
||||
if net['name'] == 'management':
|
||||
net['vlan_start'] = None
|
||||
resp = self.env.nova_networks_put(cluster['id'], nets)
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
task = jsonutils.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'ready')
|
||||
self.assertEqual(task['status'], 'ready')
|
||||
|
||||
mac = self.env.generate_random_mac()
|
||||
meta = self.env.default_metadata()
|
||||
@ -175,12 +175,12 @@ class TestNodeHandlers(BaseIntegrationTest):
|
||||
jsonutils.dumps([{'id': node['id'], 'cluster_id': cluster['id']}]),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
resp = self.app.get(
|
||||
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
response = jsonutils.loads(resp.body)
|
||||
net_name_per_nic = [['fuelweb_admin', 'storage', 'fixed'],
|
||||
['public'],
|
||||
@ -195,9 +195,9 @@ class TestNodeHandlers(BaseIntegrationTest):
|
||||
if net['name'] == 'management':
|
||||
net['vlan_start'] = 112
|
||||
resp = self.env.nova_networks_put(cluster['id'], nets)
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
task = jsonutils.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'ready')
|
||||
self.assertEqual(task['status'], 'ready')
|
||||
|
||||
mac = self.env.generate_random_mac()
|
||||
meta = self.env.default_metadata()
|
||||
@ -212,12 +212,12 @@ class TestNodeHandlers(BaseIntegrationTest):
|
||||
jsonutils.dumps([{'id': node['id'], 'cluster_id': cluster['id']}]),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
resp = self.app.get(
|
||||
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
response = jsonutils.loads(resp.body)
|
||||
net_name_per_nic = [['fuelweb_admin', 'storage', 'fixed',
|
||||
'public', 'management'],
|
||||
@ -236,9 +236,9 @@ class TestNodeHandlers(BaseIntegrationTest):
|
||||
if net['name'] == 'management':
|
||||
net['vlan_start'] = None
|
||||
resp = self.env.neutron_networks_put(cluster['id'], nets)
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
task = jsonutils.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'ready')
|
||||
self.assertEqual(task['status'], 'ready')
|
||||
|
||||
mac = self.env.generate_random_mac()
|
||||
meta = self.env.default_metadata()
|
||||
@ -253,12 +253,12 @@ class TestNodeHandlers(BaseIntegrationTest):
|
||||
jsonutils.dumps([{'id': node['id'], 'cluster_id': cluster['id']}]),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
resp = self.app.get(
|
||||
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
response = jsonutils.loads(resp.body)
|
||||
net_name_per_nic = [['fuelweb_admin', 'storage', 'private'],
|
||||
['public'],
|
||||
@ -273,9 +273,9 @@ class TestNodeHandlers(BaseIntegrationTest):
|
||||
if net['name'] == 'management':
|
||||
net['vlan_start'] = 112
|
||||
resp = self.env.neutron_networks_put(cluster['id'], nets)
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
task = jsonutils.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'ready')
|
||||
self.assertEqual(task['status'], 'ready')
|
||||
|
||||
mac = self.env.generate_random_mac()
|
||||
meta = self.env.default_metadata()
|
||||
@ -290,12 +290,12 @@ class TestNodeHandlers(BaseIntegrationTest):
|
||||
jsonutils.dumps([{'id': node['id'], 'cluster_id': cluster['id']}]),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
resp = self.app.get(
|
||||
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
response = jsonutils.loads(resp.body)
|
||||
net_name_per_nic = [['fuelweb_admin', 'storage', 'public',
|
||||
'management', 'private'],
|
||||
@ -319,15 +319,15 @@ class TestNodeHandlers(BaseIntegrationTest):
|
||||
jsonutils.dumps([{'id': node['id'], 'cluster_id': None}]),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
resp = self.app.get(
|
||||
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
response = jsonutils.loads(resp.body)
|
||||
for resp_nic in response:
|
||||
self.assertEquals(resp_nic['assigned_networks'], [])
|
||||
self.assertEqual(resp_nic['assigned_networks'], [])
|
||||
|
||||
def test_getting_default_nic_information_for_node(self):
|
||||
cluster = self.env.create_cluster(api=True)
|
||||
@ -348,7 +348,7 @@ class TestNodeHandlers(BaseIntegrationTest):
|
||||
lambda interface: interface["mac"],
|
||||
jsonutils.loads(resp.body)
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
self.assertItemsEqual(macs, resp_macs)
|
||||
|
||||
def test_try_add_node_with_same_mac(self):
|
||||
@ -424,8 +424,8 @@ class TestNodeNICAdminAssigning(BaseIntegrationTest):
|
||||
self.env.create_node(api=True, meta=meta, mac=mac1,
|
||||
cluster_id=cluster['id'])
|
||||
node_db = self.env.nodes[0]
|
||||
self.assertEquals(node_db.admin_interface.mac, mac2)
|
||||
self.assertEquals(node_db.admin_interface.ip_addr, admin_ip)
|
||||
self.assertEqual(node_db.admin_interface.mac, mac2)
|
||||
self.assertEqual(node_db.admin_interface.ip_addr, admin_ip)
|
||||
|
||||
meta = deepcopy(node_db.meta)
|
||||
for interface in meta['interfaces']:
|
||||
@ -442,11 +442,11 @@ class TestNodeNICAdminAssigning(BaseIntegrationTest):
|
||||
'meta': meta}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
self.db.refresh(node_db)
|
||||
self.assertEquals(node_db.admin_interface.mac, mac2)
|
||||
self.assertEquals(node_db.admin_interface.ip_addr, None)
|
||||
self.assertEqual(node_db.admin_interface.mac, mac2)
|
||||
self.assertEqual(node_db.admin_interface.ip_addr, None)
|
||||
|
||||
resp = self.app.put(
|
||||
reverse('NodeCollectionHandler'),
|
||||
@ -454,11 +454,11 @@ class TestNodeNICAdminAssigning(BaseIntegrationTest):
|
||||
'cluster_id': None}]),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
self.db.refresh(node_db)
|
||||
self.assertEquals(node_db.admin_interface.mac, mac1)
|
||||
self.assertEquals(node_db.admin_interface.ip_addr, admin_ip)
|
||||
self.assertEqual(node_db.admin_interface.mac, mac1)
|
||||
self.assertEqual(node_db.admin_interface.ip_addr, admin_ip)
|
||||
|
||||
|
||||
class TestNodePublicNetworkToNICAssignment(BaseIntegrationTest):
|
||||
@ -480,7 +480,7 @@ class TestNodePublicNetworkToNICAssignment(BaseIntegrationTest):
|
||||
resp = self.app.get(
|
||||
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
data = jsonutils.loads(resp.body)
|
||||
eth1 = [nic for nic in data if nic['name'] == 'eth1']
|
||||
self.assertEqual(len(eth1), 1)
|
||||
@ -523,7 +523,7 @@ class TestNodeNICsHandlersValidation(BaseIntegrationTest):
|
||||
reverse("NodeNICsHandler",
|
||||
kwargs={"node_id": self.env.nodes[0]["id"]}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
self.data = jsonutils.loads(resp.body)
|
||||
self.nics_w_nets = filter(lambda nic: nic.get("assigned_networks"),
|
||||
self.data)
|
||||
@ -542,8 +542,8 @@ class TestNodeNICsHandlersValidation(BaseIntegrationTest):
|
||||
def node_nics_put_check_error(self, message):
|
||||
for put_func in (self.put_single, self.put_collection):
|
||||
resp = put_func()
|
||||
self.assertEquals(resp.status_code, 400)
|
||||
self.assertEquals(resp.body, message)
|
||||
self.assertEqual(resp.status_code, 400)
|
||||
self.assertEqual(resp.body, message)
|
||||
|
||||
def test_assignment_change_failed_assigned_network_wo_id(self):
|
||||
self.nics_w_nets[0]["assigned_networks"] = [{}]
|
||||
|
@ -34,7 +34,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
resp = self.app.get(
|
||||
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
response = jsonutils.loads(resp.body)
|
||||
a_nets = filter(lambda nic: nic['mac'] == mac,
|
||||
response)[0]['assigned_networks']
|
||||
@ -50,6 +50,6 @@ class TestHandlers(BaseIntegrationTest):
|
||||
reverse('NodeCollectionNICsHandler'),
|
||||
jsonutils.dumps(nodes_list),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
new_response = jsonutils.loads(resp.body)
|
||||
self.assertEquals(new_response, nodes_list)
|
||||
self.assertEqual(new_response, nodes_list)
|
||||
|
@ -48,7 +48,7 @@ class TestNodeNICsBonding(BaseIntegrationTest):
|
||||
reverse("NodeNICsHandler",
|
||||
kwargs={"node_id": self.env.nodes[0]["id"]}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
self.data = jsonutils.loads(resp.body)
|
||||
self.admin_nic, self.other_nic, self.empty_nic = None, None, None
|
||||
for nic in self.data:
|
||||
@ -74,8 +74,8 @@ class TestNodeNICsBonding(BaseIntegrationTest):
|
||||
def node_nics_put_check_error(self, message):
|
||||
for put_func in (self.put_single, self.put_collection):
|
||||
resp = put_func()
|
||||
self.assertEquals(resp.status_code, 400)
|
||||
self.assertEquals(resp.body, message)
|
||||
self.assertEqual(resp.status_code, 400)
|
||||
self.assertEqual(resp.body, message)
|
||||
|
||||
def nics_bond_create(self, put_func):
|
||||
self.data.append({
|
||||
@ -90,20 +90,20 @@ class TestNodeNICsBonding(BaseIntegrationTest):
|
||||
self.other_nic["assigned_networks"] = []
|
||||
|
||||
resp = put_func()
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
resp = self.env.node_nics_get(self.env.nodes[0]["id"])
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
data = jsonutils.loads(resp.body)
|
||||
bonds = filter(
|
||||
lambda iface: iface["type"] == NETWORK_INTERFACE_TYPES.bond,
|
||||
data)
|
||||
self.assertEquals(len(bonds), 1)
|
||||
self.assertEqual(len(bonds), 1)
|
||||
self.assertEqual(bonds[0]["name"], 'ovs-bond0')
|
||||
|
||||
def nics_bond_remove(self, put_func):
|
||||
resp = self.env.node_nics_get(self.env.nodes[0]["id"])
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
self.data = jsonutils.loads(resp.body)
|
||||
for nic in self.data:
|
||||
if nic["type"] == NETWORK_INTERFACE_TYPES.bond:
|
||||
@ -120,7 +120,7 @@ class TestNodeNICsBonding(BaseIntegrationTest):
|
||||
self.data.remove(bond)
|
||||
|
||||
resp = put_func()
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
def test_nics_bond_delete(self):
|
||||
for put_func in (self.put_single, self.put_collection):
|
||||
@ -129,7 +129,7 @@ class TestNodeNICsBonding(BaseIntegrationTest):
|
||||
self.nics_bond_remove(put_func)
|
||||
|
||||
resp = self.env.node_nics_get(self.env.nodes[0]["id"])
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
data = jsonutils.loads(resp.body)
|
||||
for nic in data:
|
||||
self.assertNotEqual(nic["type"], NETWORK_INTERFACE_TYPES.bond)
|
||||
|
@ -95,7 +95,7 @@ class TestNotification(BaseIntegrationTest):
|
||||
'meta': self.env.default_metadata(),
|
||||
'status': 'discover'}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 201)
|
||||
self.assertEqual(resp.status_code, 201)
|
||||
|
||||
notifications = self.db.query(Notification).all()
|
||||
self.assertEqual(len(notifications), 1)
|
||||
|
@ -44,8 +44,8 @@ class TestOrchestratorInfoHandlers(BaseIntegrationTest):
|
||||
jsonutils.dumps(orchestrator_data),
|
||||
headers=self.default_headers)
|
||||
|
||||
self.assertEquals(put_resp.status_code, 200)
|
||||
self.assertEquals(get_info(), orchestrator_data)
|
||||
self.assertEqual(put_resp.status_code, 200)
|
||||
self.assertEqual(get_info(), orchestrator_data)
|
||||
|
||||
# getting provisioning info
|
||||
get_resp = self.app.get(
|
||||
@ -53,7 +53,7 @@ class TestOrchestratorInfoHandlers(BaseIntegrationTest):
|
||||
kwargs={'cluster_id': self.cluster.id}),
|
||||
headers=self.default_headers)
|
||||
|
||||
self.assertEquals(get_resp.status_code, 200)
|
||||
self.assertEqual(get_resp.status_code, 200)
|
||||
self.datadiff(orchestrator_data, jsonutils.loads(get_resp.body))
|
||||
|
||||
# deleting provisioning info
|
||||
@ -62,7 +62,7 @@ class TestOrchestratorInfoHandlers(BaseIntegrationTest):
|
||||
kwargs={'cluster_id': self.cluster.id}),
|
||||
headers=self.default_headers)
|
||||
|
||||
self.assertEquals(delete_resp.status_code, 202)
|
||||
self.assertEqual(delete_resp.status_code, 202)
|
||||
self.assertEqual(get_info(), {})
|
||||
|
||||
def test_cluster_provisioning_info(self):
|
||||
|
@ -46,7 +46,7 @@ class OrchestratorSerializerTestBase(BaseIntegrationTest):
|
||||
return filter(lambda node: node['uid'] == uid, nodes)
|
||||
|
||||
def assert_nodes_with_role(self, nodes, role, count):
|
||||
self.assertEquals(len(self.filter_by_role(nodes, role)), count)
|
||||
self.assertEqual(len(self.filter_by_role(nodes, role)), count)
|
||||
|
||||
def get_controllers(self, cluster_id):
|
||||
return self.db.query(Node).\
|
||||
@ -90,7 +90,7 @@ class TestNovaOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||
return cluster_db
|
||||
|
||||
def assert_roles_flattened(self, nodes):
|
||||
self.assertEquals(len(nodes), 7)
|
||||
self.assertEqual(len(nodes), 7)
|
||||
self.assert_nodes_with_role(nodes, 'controller', 1)
|
||||
self.assert_nodes_with_role(nodes, 'compute', 2)
|
||||
self.assert_nodes_with_role(nodes, 'cinder', 3)
|
||||
@ -107,7 +107,7 @@ class TestNovaOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||
|
||||
expected_node = self.serializer.serialize_node(
|
||||
node_db, serialized_node['role'])
|
||||
self.assertEquals(serialized_node, expected_node)
|
||||
self.assertEqual(serialized_node, expected_node)
|
||||
|
||||
def test_serialize_node(self):
|
||||
node = self.env.create_node(
|
||||
@ -117,13 +117,13 @@ class TestNovaOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||
node_db = self.db.query(Node).get(node['id'])
|
||||
serialized_data = self.serializer.serialize_node(node_db, 'controller')
|
||||
|
||||
self.assertEquals(serialized_data['role'], 'controller')
|
||||
self.assertEquals(serialized_data['uid'], str(node_db.id))
|
||||
self.assertEquals(serialized_data['status'], node_db.status)
|
||||
self.assertEquals(serialized_data['online'], node_db.online)
|
||||
self.assertEquals(serialized_data['fqdn'],
|
||||
'node-%d.%s' % (node_db.id, settings.DNS_DOMAIN))
|
||||
self.assertEquals(
|
||||
self.assertEqual(serialized_data['role'], 'controller')
|
||||
self.assertEqual(serialized_data['uid'], str(node_db.id))
|
||||
self.assertEqual(serialized_data['status'], node_db.status)
|
||||
self.assertEqual(serialized_data['online'], node_db.online)
|
||||
self.assertEqual(serialized_data['fqdn'],
|
||||
'node-%d.%s' % (node_db.id, settings.DNS_DOMAIN))
|
||||
self.assertEqual(
|
||||
serialized_data['glance'],
|
||||
{'image_cache_max_size': manager.calc_glance_cache_size(
|
||||
node_db.attributes.volumes)})
|
||||
@ -137,13 +137,13 @@ class TestNovaOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||
# Check common attrs
|
||||
for node in node_list:
|
||||
node_db = self.db.query(Node).get(int(node['uid']))
|
||||
self.assertEquals(node['public_netmask'], '255.255.255.0')
|
||||
self.assertEquals(node['internal_netmask'], '255.255.255.0')
|
||||
self.assertEquals(node['storage_netmask'], '255.255.255.0')
|
||||
self.assertEquals(node['uid'], str(node_db.id))
|
||||
self.assertEquals(node['name'], 'node-%d' % node_db.id)
|
||||
self.assertEquals(node['fqdn'], 'node-%d.%s' %
|
||||
(node_db.id, settings.DNS_DOMAIN))
|
||||
self.assertEqual(node['public_netmask'], '255.255.255.0')
|
||||
self.assertEqual(node['internal_netmask'], '255.255.255.0')
|
||||
self.assertEqual(node['storage_netmask'], '255.255.255.0')
|
||||
self.assertEqual(node['uid'], str(node_db.id))
|
||||
self.assertEqual(node['name'], 'node-%d' % node_db.id)
|
||||
self.assertEqual(node['fqdn'], 'node-%d.%s' %
|
||||
(node_db.id, settings.DNS_DOMAIN))
|
||||
|
||||
# Check uncommon attrs
|
||||
node_uids = sorted(set([n['uid'] for n in node_list]))
|
||||
@ -169,12 +169,12 @@ class TestNovaOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||
nodes = self.filter_by_role(node_list, role)
|
||||
node = self.filter_by_uid(nodes, attrs['uid'])[0]
|
||||
|
||||
self.assertEquals(attrs['internal_address'],
|
||||
node['internal_address'])
|
||||
self.assertEquals(attrs['public_address'],
|
||||
node['public_address'])
|
||||
self.assertEquals(attrs['storage_address'],
|
||||
node['storage_address'])
|
||||
self.assertEqual(attrs['internal_address'],
|
||||
node['internal_address'])
|
||||
self.assertEqual(attrs['public_address'],
|
||||
node['public_address'])
|
||||
self.assertEqual(attrs['storage_address'],
|
||||
node['storage_address'])
|
||||
|
||||
def test_vlan_manager(self):
|
||||
cluster = self.create_env('ha_compact')
|
||||
@ -187,16 +187,16 @@ class TestNovaOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||
facts = self.serializer.serialize(cluster, cluster.nodes)
|
||||
|
||||
for fact in facts:
|
||||
self.assertEquals(fact['vlan_interface'], 'eth0')
|
||||
self.assertEquals(fact['fixed_interface'], 'eth0')
|
||||
self.assertEquals(
|
||||
self.assertEqual(fact['vlan_interface'], 'eth0')
|
||||
self.assertEqual(fact['fixed_interface'], 'eth0')
|
||||
self.assertEqual(
|
||||
fact['novanetwork_parameters']['network_manager'],
|
||||
'VlanManager')
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
fact['novanetwork_parameters']['num_networks'], 1)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
fact['novanetwork_parameters']['vlan_start'], 103)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
fact['novanetwork_parameters']['network_size'], 256)
|
||||
|
||||
def test_floating_ranges_generation(self):
|
||||
@ -210,7 +210,7 @@ class TestNovaOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||
|
||||
facts = self.serializer.serialize(self.cluster, self.cluster.nodes)
|
||||
for fact in facts:
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
fact['floating_network_range'],
|
||||
['172.16.0.2-172.16.0.4',
|
||||
'172.16.0.3-172.16.0.5',
|
||||
@ -265,7 +265,7 @@ class TestNovaOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||
{'role': 'ceph-osd', 'priority': 500},
|
||||
{'role': 'other', 'priority': 500}
|
||||
]
|
||||
self.assertEquals(expected_priorities, nodes)
|
||||
self.assertEqual(expected_priorities, nodes)
|
||||
|
||||
|
||||
class TestNovaOrchestratorHASerializer(OrchestratorSerializerTestBase):
|
||||
@ -322,7 +322,7 @@ class TestNovaOrchestratorHASerializer(OrchestratorSerializerTestBase):
|
||||
{'role': 'ceph-osd', 'priority': 800},
|
||||
{'role': 'other', 'priority': 800}
|
||||
]
|
||||
self.assertEquals(expected_priorities, nodes)
|
||||
self.assertEqual(expected_priorities, nodes)
|
||||
|
||||
def test_set_primary_controller_priority_not_depend_on_nodes_order(self):
|
||||
controllers = filter(lambda n: 'controller' in n.roles, self.env.nodes)
|
||||
@ -335,8 +335,8 @@ class TestNovaOrchestratorHASerializer(OrchestratorSerializerTestBase):
|
||||
self.cluster, reverse_sorted_controllers)
|
||||
|
||||
high_priority = sorted(result_nodes, key=itemgetter('priority'))[0]
|
||||
self.assertEquals(high_priority['role'], 'primary-controller')
|
||||
self.assertEquals(
|
||||
self.assertEqual(high_priority['role'], 'primary-controller')
|
||||
self.assertEqual(
|
||||
int(high_priority['uid']),
|
||||
expected_primary_controller.id)
|
||||
|
||||
@ -345,29 +345,29 @@ class TestNovaOrchestratorHASerializer(OrchestratorSerializerTestBase):
|
||||
|
||||
for node in serialized_nodes:
|
||||
# Each node has swift_zone
|
||||
self.assertEquals(node['swift_zone'], node['uid'])
|
||||
self.assertEqual(node['swift_zone'], node['uid'])
|
||||
|
||||
def test_get_common_attrs(self):
|
||||
attrs = self.serializer.get_common_attrs(self.cluster)
|
||||
# vips
|
||||
self.assertEquals(attrs['management_vip'], '192.168.0.8')
|
||||
self.assertEquals(attrs['public_vip'], '172.16.0.9')
|
||||
self.assertEqual(attrs['management_vip'], '192.168.0.8')
|
||||
self.assertEqual(attrs['public_vip'], '172.16.0.9')
|
||||
|
||||
# last_contrller
|
||||
controllers = self.get_controllers(self.cluster.id)
|
||||
self.assertEquals(attrs['last_controller'],
|
||||
'node-%d' % controllers[-1].id)
|
||||
self.assertEqual(attrs['last_controller'],
|
||||
'node-%d' % controllers[-1].id)
|
||||
|
||||
# primary_controller
|
||||
controllers = self.filter_by_role(attrs['nodes'], 'primary-controller')
|
||||
self.assertEquals(controllers[0]['role'], 'primary-controller')
|
||||
self.assertEqual(controllers[0]['role'], 'primary-controller')
|
||||
|
||||
# primary_mongo
|
||||
mongo_nodes = self.filter_by_role(attrs['nodes'], 'primary-mongo')
|
||||
self.assertEquals(mongo_nodes[-1]['role'], 'primary-mongo')
|
||||
self.assertEqual(mongo_nodes[-1]['role'], 'primary-mongo')
|
||||
|
||||
# mountpoints and mp attrs
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
attrs['mp'],
|
||||
[{'point': '1', 'weight': '1'},
|
||||
{'point': '2', 'weight': '2'}])
|
||||
@ -399,7 +399,7 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||
return cluster_db
|
||||
|
||||
def assert_roles_flattened(self, nodes):
|
||||
self.assertEquals(len(nodes), 6)
|
||||
self.assertEqual(len(nodes), 6)
|
||||
self.assert_nodes_with_role(nodes, 'controller', 1)
|
||||
self.assert_nodes_with_role(nodes, 'compute', 2)
|
||||
self.assert_nodes_with_role(nodes, 'cinder', 3)
|
||||
@ -415,7 +415,7 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||
|
||||
expected_node = self.serializer.serialize_node(
|
||||
node_db, serialized_node['role'])
|
||||
self.assertEquals(serialized_node, expected_node)
|
||||
self.assertEqual(serialized_node, expected_node)
|
||||
|
||||
def test_serialize_node(self):
|
||||
node = self.env.create_node(
|
||||
@ -425,12 +425,12 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||
node_db = self.db.query(Node).get(node['id'])
|
||||
serialized_data = self.serializer.serialize_node(node_db, 'controller')
|
||||
|
||||
self.assertEquals(serialized_data['role'], 'controller')
|
||||
self.assertEquals(serialized_data['uid'], str(node_db.id))
|
||||
self.assertEquals(serialized_data['status'], node_db.status)
|
||||
self.assertEquals(serialized_data['online'], node_db.online)
|
||||
self.assertEquals(serialized_data['fqdn'],
|
||||
'node-%d.%s' % (node_db.id, settings.DNS_DOMAIN))
|
||||
self.assertEqual(serialized_data['role'], 'controller')
|
||||
self.assertEqual(serialized_data['uid'], str(node_db.id))
|
||||
self.assertEqual(serialized_data['status'], node_db.status)
|
||||
self.assertEqual(serialized_data['online'], node_db.online)
|
||||
self.assertEqual(serialized_data['fqdn'],
|
||||
'node-%d.%s' % (node_db.id, settings.DNS_DOMAIN))
|
||||
|
||||
def test_node_list(self):
|
||||
node_list = self.serializer.get_common_attrs(self.cluster)['nodes']
|
||||
@ -441,13 +441,13 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||
# Check common attrs
|
||||
for node in node_list:
|
||||
node_db = self.db.query(Node).get(int(node['uid']))
|
||||
self.assertEquals(node['public_netmask'], '255.255.255.0')
|
||||
self.assertEquals(node['internal_netmask'], '255.255.255.0')
|
||||
self.assertEquals(node['storage_netmask'], '255.255.255.0')
|
||||
self.assertEquals(node['uid'], str(node_db.id))
|
||||
self.assertEquals(node['name'], 'node-%d' % node_db.id)
|
||||
self.assertEquals(node['fqdn'], 'node-%d.%s' %
|
||||
(node_db.id, settings.DNS_DOMAIN))
|
||||
self.assertEqual(node['public_netmask'], '255.255.255.0')
|
||||
self.assertEqual(node['internal_netmask'], '255.255.255.0')
|
||||
self.assertEqual(node['storage_netmask'], '255.255.255.0')
|
||||
self.assertEqual(node['uid'], str(node_db.id))
|
||||
self.assertEqual(node['name'], 'node-%d' % node_db.id)
|
||||
self.assertEqual(node['fqdn'], 'node-%d.%s' %
|
||||
(node_db.id, settings.DNS_DOMAIN))
|
||||
|
||||
# Check uncommon attrs
|
||||
node_uids = sorted(set([n['uid'] for n in node_list]))
|
||||
@ -472,12 +472,12 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||
nodes = self.filter_by_role(node_list, role)
|
||||
node = self.filter_by_uid(nodes, attrs['uid'])[0]
|
||||
|
||||
self.assertEquals(attrs['internal_address'],
|
||||
node['internal_address'])
|
||||
self.assertEquals(attrs['public_address'],
|
||||
node['public_address'])
|
||||
self.assertEquals(attrs['storage_address'],
|
||||
node['storage_address'])
|
||||
self.assertEqual(attrs['internal_address'],
|
||||
node['internal_address'])
|
||||
self.assertEqual(attrs['public_address'],
|
||||
node['public_address'])
|
||||
self.assertEqual(attrs['storage_address'],
|
||||
node['storage_address'])
|
||||
|
||||
def test_neutron_l3_gateway(self):
|
||||
cluster = self.create_env('ha_compact', 'gre')
|
||||
@ -494,7 +494,7 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||
facts = self.serializer.serialize(cluster, cluster.nodes)
|
||||
|
||||
pd_nets = facts[0]["quantum_settings"]["predefined_networks"]
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
pd_nets["net04_ext"]["L3"]["gateway"],
|
||||
test_gateway
|
||||
)
|
||||
@ -504,11 +504,11 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||
facts = self.serializer.serialize(cluster, cluster.nodes)
|
||||
|
||||
for fact in facts:
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
fact['quantum_settings']['L2']['segmentation_type'], 'gre')
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
'br-prv' in fact['network_scheme']['endpoints'], False)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
'private' in (fact['network_scheme']['roles']), False)
|
||||
|
||||
def _create_cluster_for_vlan_splinters(self, segment_type='gre'):
|
||||
@ -577,8 +577,8 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||
|
||||
cluster = self.db.query(Cluster).get(cluster_id)
|
||||
editable_attrs = cluster.attributes.editable
|
||||
self.assertEquals(editable_attrs['vlan_splinters']['vswitch']['value'],
|
||||
'some_text')
|
||||
self.assertEqual(editable_attrs['vlan_splinters']['vswitch']['value'],
|
||||
'some_text')
|
||||
|
||||
node = self.serializer.serialize(cluster, cluster.nodes)[0]
|
||||
interfaces = node['network_scheme']['interfaces']
|
||||
@ -596,7 +596,7 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||
|
||||
cluster = self.db.query(Cluster).get(cluster_id)
|
||||
editable_attrs = cluster.attributes.editable
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
editable_attrs['vlan_splinters']['metadata']['enabled'],
|
||||
False
|
||||
)
|
||||
@ -607,7 +607,7 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||
self.assertIn('L2', iface_attrs)
|
||||
L2_attrs = iface_attrs['L2']
|
||||
self.assertIn('vlan_splinters', L2_attrs)
|
||||
self.assertEquals(L2_attrs['vlan_splinters'], 'off')
|
||||
self.assertEqual(L2_attrs['vlan_splinters'], 'off')
|
||||
self.assertNotIn('trunks', L2_attrs)
|
||||
|
||||
def test_kernel_lt_vlan_splinters(self):
|
||||
@ -623,8 +623,8 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||
|
||||
cluster = self.db.query(Cluster).get(cluster_id)
|
||||
editable_attrs = cluster.attributes.editable
|
||||
self.assertEquals(editable_attrs['vlan_splinters']['vswitch']['value'],
|
||||
'kernel_lt')
|
||||
self.assertEqual(editable_attrs['vlan_splinters']['vswitch']['value'],
|
||||
'kernel_lt')
|
||||
|
||||
node = self.serializer.serialize(cluster, cluster.nodes)[0]
|
||||
interfaces = node['network_scheme']['interfaces']
|
||||
@ -632,7 +632,7 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||
self.assertIn('L2', iface_attrs)
|
||||
L2_attrs = iface_attrs['L2']
|
||||
self.assertIn('vlan_splinters', L2_attrs)
|
||||
self.assertEquals(L2_attrs['vlan_splinters'], 'off')
|
||||
self.assertEqual(L2_attrs['vlan_splinters'], 'off')
|
||||
self.assertNotIn('trunks', L2_attrs)
|
||||
|
||||
def test_hard_vlan_splinters_in_gre(self):
|
||||
@ -653,14 +653,14 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||
self.assertIn('L2', iface_attrs)
|
||||
L2_attrs = iface_attrs['L2']
|
||||
self.assertIn('vlan_splinters', L2_attrs)
|
||||
self.assertEquals(L2_attrs['vlan_splinters'], 'auto')
|
||||
self.assertEqual(L2_attrs['vlan_splinters'], 'auto')
|
||||
self.assertIn('trunks', L2_attrs)
|
||||
self.assertIn(0, L2_attrs['trunks'])
|
||||
map(
|
||||
lambda n: vlan_set.remove(n) if n else None,
|
||||
L2_attrs['trunks']
|
||||
)
|
||||
self.assertEquals(len(vlan_set), 0)
|
||||
self.assertEqual(len(vlan_set), 0)
|
||||
|
||||
def test_hard_vlan_splinters_in_vlan(self):
|
||||
cluster = self._create_cluster_for_vlan_splinters('vlan')
|
||||
@ -684,14 +684,14 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||
self.assertIn('L2', iface_attrs)
|
||||
L2_attrs = iface_attrs['L2']
|
||||
self.assertIn('vlan_splinters', L2_attrs)
|
||||
self.assertEquals(L2_attrs['vlan_splinters'], 'auto')
|
||||
self.assertEqual(L2_attrs['vlan_splinters'], 'auto')
|
||||
self.assertIn('trunks', L2_attrs)
|
||||
self.assertIn(0, L2_attrs['trunks'])
|
||||
map(
|
||||
lambda n: vlan_set.remove(n) if n else None,
|
||||
L2_attrs['trunks']
|
||||
)
|
||||
self.assertEquals(len(vlan_set), 0)
|
||||
self.assertEqual(len(vlan_set), 0)
|
||||
|
||||
def test_soft_vlan_splinters_in_vlan(self):
|
||||
cluster = self._create_cluster_for_vlan_splinters('vlan')
|
||||
@ -708,9 +708,9 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||
self.assertIn('L2', iface_attrs)
|
||||
L2_attrs = iface_attrs['L2']
|
||||
self.assertIn('vlan_splinters', L2_attrs)
|
||||
self.assertEquals(L2_attrs['vlan_splinters'], 'auto')
|
||||
self.assertEqual(L2_attrs['vlan_splinters'], 'auto')
|
||||
self.assertIn('trunks', L2_attrs)
|
||||
self.assertEquals(L2_attrs['trunks'], [0])
|
||||
self.assertEqual(L2_attrs['trunks'], [0])
|
||||
|
||||
|
||||
class TestNeutronOrchestratorHASerializer(OrchestratorSerializerTestBase):
|
||||
@ -749,25 +749,25 @@ class TestNeutronOrchestratorHASerializer(OrchestratorSerializerTestBase):
|
||||
|
||||
for node in serialized_nodes:
|
||||
# Each node has swift_zone
|
||||
self.assertEquals(node['swift_zone'], node['uid'])
|
||||
self.assertEqual(node['swift_zone'], node['uid'])
|
||||
|
||||
def test_get_common_attrs(self):
|
||||
attrs = self.serializer.get_common_attrs(self.cluster)
|
||||
# vips
|
||||
self.assertEquals(attrs['management_vip'], '192.168.0.7')
|
||||
self.assertEquals(attrs['public_vip'], '172.16.0.8')
|
||||
self.assertEqual(attrs['management_vip'], '192.168.0.7')
|
||||
self.assertEqual(attrs['public_vip'], '172.16.0.8')
|
||||
|
||||
# last_contrller
|
||||
controllers = self.get_controllers(self.cluster.id)
|
||||
self.assertEquals(attrs['last_controller'],
|
||||
'node-%d' % controllers[-1].id)
|
||||
self.assertEqual(attrs['last_controller'],
|
||||
'node-%d' % controllers[-1].id)
|
||||
|
||||
# primary_controller
|
||||
controllers = self.filter_by_role(attrs['nodes'], 'primary-controller')
|
||||
self.assertEquals(controllers[0]['role'], 'primary-controller')
|
||||
self.assertEqual(controllers[0]['role'], 'primary-controller')
|
||||
|
||||
# mountpoints and mp attrs
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
attrs['mp'],
|
||||
[{'point': '1', 'weight': '1'},
|
||||
{'point': '2', 'weight': '2'}])
|
||||
@ -934,7 +934,7 @@ class TestMongoNodesSerialization(OrchestratorSerializerTestBase):
|
||||
cluster = self.create_env()
|
||||
ha_nodes = DeploymentHASerializer.serialize_nodes(cluster.nodes)
|
||||
mn_nodes = DeploymentMultinodeSerializer.serialize_nodes(cluster.nodes)
|
||||
self.assertEquals(mn_nodes, ha_nodes)
|
||||
self.assertEqual(mn_nodes, ha_nodes)
|
||||
|
||||
def test_primary_node_selected(self):
|
||||
cluster = self.create_env()
|
||||
@ -944,8 +944,8 @@ class TestMongoNodesSerialization(OrchestratorSerializerTestBase):
|
||||
def primary_nodes_count(nodes):
|
||||
return len(filter(lambda x: x['role'] == 'primary-mongo', nodes))
|
||||
|
||||
self.assertEquals(1, primary_nodes_count(ha_nodes))
|
||||
self.assertEquals(1, primary_nodes_count(mn_nodes))
|
||||
self.assertEqual(1, primary_nodes_count(ha_nodes))
|
||||
self.assertEqual(1, primary_nodes_count(mn_nodes))
|
||||
|
||||
|
||||
class TestRepoAndPuppetDataSerialization(OrchestratorSerializerTestBase):
|
||||
@ -973,7 +973,7 @@ class TestRepoAndPuppetDataSerialization(OrchestratorSerializerTestBase):
|
||||
headers=self.default_headers,
|
||||
expect_errors=True
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
|
||||
cluster_id = self.env.create(
|
||||
cluster_kwargs={
|
||||
@ -988,20 +988,20 @@ class TestRepoAndPuppetDataSerialization(OrchestratorSerializerTestBase):
|
||||
TaskHelper.prepare_for_deployment(cluster.nodes)
|
||||
facts = self.serializer.serialize(cluster, cluster.nodes)
|
||||
|
||||
self.assertEquals(1, len(facts))
|
||||
self.assertEqual(1, len(facts))
|
||||
fact = facts[0]
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
fact['repo_metadata'],
|
||||
{
|
||||
'nailgun': 'http://10.20.0.2:8080'
|
||||
'/centos-5.0/centos/fuelweb/x86_64/'
|
||||
}
|
||||
)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
fact['puppet_modules_source'],
|
||||
'rsync://10.20.0.2/puppet/release/5.0/modules'
|
||||
)
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
fact['puppet_manifests_source'],
|
||||
'rsync://10.20.0.2/puppet/release/5.0/manifests'
|
||||
)
|
||||
|
@ -75,9 +75,9 @@ class TestProvisioning(BaseIntegrationTest):
|
||||
self.env.launch_deployment()
|
||||
|
||||
self.env.refresh_nodes()
|
||||
self.assertEquals(self.env.nodes[0].status, 'ready')
|
||||
self.assertEquals(self.env.nodes[1].status, 'provisioning')
|
||||
self.assertEquals(self.env.nodes[2].status, 'provisioning')
|
||||
self.assertEquals(self.env.nodes[3].status, 'provisioning')
|
||||
self.assertEquals(self.env.nodes[4].status, 'error')
|
||||
self.assertEquals(self.env.nodes[5].status, 'provisioning')
|
||||
self.assertEqual(self.env.nodes[0].status, 'ready')
|
||||
self.assertEqual(self.env.nodes[1].status, 'provisioning')
|
||||
self.assertEqual(self.env.nodes[2].status, 'provisioning')
|
||||
self.assertEqual(self.env.nodes[3].status, 'provisioning')
|
||||
self.assertEqual(self.env.nodes[4].status, 'error')
|
||||
self.assertEqual(self.env.nodes[5].status, 'provisioning')
|
||||
|
@ -41,6 +41,6 @@ class TestProvisioningSerializer(BaseIntegrationTest):
|
||||
node_db = self.db.query(Node).filter_by(
|
||||
fqdn=node['hostname']
|
||||
).first()
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
node['kernel_options']['netcfg/choose_interface'],
|
||||
node_db.admin_interface.mac)
|
||||
|
@ -34,7 +34,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'release_id': self.release.id}),
|
||||
headers=self.default_headers,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
|
||||
supertask = self.db.query(Task).filter_by(
|
||||
name="redhat_check_credentials"
|
||||
|
@ -50,20 +50,20 @@ class TestResetEnvironment(BaseIntegrationTest):
|
||||
self.env.wait_ready(supertask, 60)
|
||||
|
||||
for n in cluster_db.nodes:
|
||||
self.assertEquals(n.status, "ready")
|
||||
self.assertEquals(n.pending_addition, False)
|
||||
self.assertEqual(n.status, "ready")
|
||||
self.assertEqual(n.pending_addition, False)
|
||||
|
||||
reset_task = self.env.reset_environment()
|
||||
self.env.wait_ready(reset_task, 60)
|
||||
|
||||
self.assertEquals(cluster_db.status, "new")
|
||||
self.assertEqual(cluster_db.status, "new")
|
||||
|
||||
for n in cluster_db.nodes:
|
||||
self.assertEquals(n.online, False)
|
||||
self.assertEquals(n.status, "discover")
|
||||
self.assertEquals(n.pending_addition, True)
|
||||
self.assertEquals(n.roles, [])
|
||||
self.assertNotEquals(n.pending_roles, [])
|
||||
self.assertEqual(n.online, False)
|
||||
self.assertEqual(n.status, "discover")
|
||||
self.assertEqual(n.pending_addition, True)
|
||||
self.assertEqual(n.roles, [])
|
||||
self.assertNotEqual(n.pending_roles, [])
|
||||
|
||||
msg = (
|
||||
u"Fuel couldn't reach these nodes during "
|
||||
@ -71,7 +71,7 @@ class TestResetEnvironment(BaseIntegrationTest):
|
||||
u"check may be needed."
|
||||
)
|
||||
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
self.db.query(Notification).filter(
|
||||
Notification.topic == "warning"
|
||||
).filter(
|
||||
|
@ -151,7 +151,7 @@ class TestVerifyNetworks(BaseIntegrationTest):
|
||||
reverse('TaskHandler', kwargs={'obj_id': task.id}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
task = jsonutils.loads(resp.body)
|
||||
self.assertEqual(task['status'], "error")
|
||||
error_nodes = [{'uid': node1.id, 'interface': 'eth0',
|
||||
@ -230,8 +230,8 @@ class TestVerifyNetworks(BaseIntegrationTest):
|
||||
{'uid': node1.id, 'networks': nets_sent}]}
|
||||
self.receiver.verify_networks_resp(**kwargs)
|
||||
self.db.refresh(task)
|
||||
self.assertEquals(task.status, "ready")
|
||||
self.assertEquals(task.message, '')
|
||||
self.assertEqual(task.status, "ready")
|
||||
self.assertEqual(task.message, '')
|
||||
|
||||
def test_verify_networks_with_dhcp_subtask(self):
|
||||
"""Test verifies that when dhcp subtask is ready and
|
||||
@ -981,7 +981,7 @@ class TestConsumer(BaseIntegrationTest):
|
||||
self.db.refresh(task)
|
||||
self.assertEqual(task.status, "ready")
|
||||
nodes_db = self.db.query(Node).all()
|
||||
self.assertEquals(len(nodes_db), 0)
|
||||
self.assertEqual(len(nodes_db), 0)
|
||||
|
||||
def test_remove_nodes_resp_failure(self):
|
||||
self.env.create(
|
||||
@ -1015,8 +1015,8 @@ class TestConsumer(BaseIntegrationTest):
|
||||
nodes_db = self.db.query(Node).all()
|
||||
error_node = self.db.query(Node).get(node1.id)
|
||||
self.db.refresh(error_node)
|
||||
self.assertEquals(len(nodes_db), 2)
|
||||
self.assertEquals(error_node.status, "error")
|
||||
self.assertEqual(len(nodes_db), 2)
|
||||
self.assertEqual(error_node.status, "error")
|
||||
|
||||
def test_remove_cluster_resp(self):
|
||||
self.env.create(
|
||||
@ -1059,27 +1059,27 @@ class TestConsumer(BaseIntegrationTest):
|
||||
|
||||
nodes_db = self.db.query(Node)\
|
||||
.filter_by(cluster_id=cluster_id).all()
|
||||
self.assertEquals(len(nodes_db), 0)
|
||||
self.assertEqual(len(nodes_db), 0)
|
||||
|
||||
ip_db = self.db.query(IPAddr)\
|
||||
.filter(IPAddr.node.in_([node1_id, node2_id])).all()
|
||||
self.assertEquals(len(ip_db), 0)
|
||||
self.assertEqual(len(ip_db), 0)
|
||||
|
||||
attrs_db = self.db.query(Attributes)\
|
||||
.filter_by(cluster_id=cluster_id).all()
|
||||
self.assertEquals(len(attrs_db), 0)
|
||||
self.assertEqual(len(attrs_db), 0)
|
||||
|
||||
nots_db = self.db.query(Notification)\
|
||||
.filter_by(cluster_id=cluster_id).all()
|
||||
self.assertEquals(len(nots_db), 0)
|
||||
self.assertEqual(len(nots_db), 0)
|
||||
|
||||
nets_db = self.db.query(NetworkGroup).\
|
||||
filter(NetworkGroup.cluster_id == cluster_id).all()
|
||||
self.assertEquals(len(nets_db), 0)
|
||||
self.assertEqual(len(nets_db), 0)
|
||||
|
||||
task_db = self.db.query(Task)\
|
||||
.filter_by(cluster_id=cluster_id).all()
|
||||
self.assertEquals(len(task_db), 0)
|
||||
self.assertEqual(len(task_db), 0)
|
||||
|
||||
cluster_db = self.db.query(Cluster).get(cluster_id)
|
||||
self.assertIsNone(cluster_db)
|
||||
|
@ -59,13 +59,13 @@ class TestStopDeployment(BaseIntegrationTest):
|
||||
uuid=deploy_task_uuid
|
||||
).first()
|
||||
)
|
||||
self.assertEquals(self.cluster.status, "stopped")
|
||||
self.assertEquals(stop_task.progress, 100)
|
||||
self.assertEqual(self.cluster.status, "stopped")
|
||||
self.assertEqual(stop_task.progress, 100)
|
||||
|
||||
for n in self.cluster.nodes:
|
||||
self.assertEquals(n.online, False)
|
||||
self.assertEquals(n.roles, [])
|
||||
self.assertNotEquals(n.pending_roles, [])
|
||||
self.assertEqual(n.online, False)
|
||||
self.assertEqual(n.roles, [])
|
||||
self.assertNotEqual(n.pending_roles, [])
|
||||
|
||||
@fake_tasks(fake_rpc=False, mock_rpc=False)
|
||||
@patch('nailgun.rpc.cast')
|
||||
@ -76,7 +76,7 @@ class TestStopDeployment(BaseIntegrationTest):
|
||||
for n in args[1]["args"]["nodes"]:
|
||||
self.assertIn("admin_ip", n)
|
||||
n_db = objects.Node.get_by_uid(n["uid"])
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
n["admin_ip"],
|
||||
objects.Node.get_network_manager(
|
||||
n_db
|
||||
@ -96,5 +96,5 @@ class TestStopDeployment(BaseIntegrationTest):
|
||||
uuid=provision_task_uuid
|
||||
).first()
|
||||
)
|
||||
self.assertEquals(self.cluster.status, "stopped")
|
||||
self.assertEquals(stop_task.progress, 100)
|
||||
self.assertEqual(self.cluster.status, "stopped")
|
||||
self.assertEqual(stop_task.progress, 100)
|
||||
|
@ -52,17 +52,17 @@ class TestTaskManagers(BaseIntegrationTest):
|
||||
]
|
||||
)
|
||||
supertask = self.env.launch_deployment()
|
||||
self.assertEquals(supertask.name, 'deploy')
|
||||
self.assertEqual(supertask.name, 'deploy')
|
||||
self.assertIn(supertask.status, ('running', 'ready'))
|
||||
# we have three subtasks here
|
||||
# deletion
|
||||
# provision
|
||||
# deployment
|
||||
self.assertEquals(len(supertask.subtasks), 3)
|
||||
self.assertEqual(len(supertask.subtasks), 3)
|
||||
# provisioning task has less weight then deployment
|
||||
provision_task = filter(
|
||||
lambda t: t.name == 'provision', supertask.subtasks)[0]
|
||||
self.assertEquals(provision_task.weight, 0.4)
|
||||
self.assertEqual(provision_task.weight, 0.4)
|
||||
|
||||
self.env.wait_for_nodes_status([self.env.nodes[0]], 'provisioning')
|
||||
self.env.wait_ready(
|
||||
@ -78,8 +78,8 @@ class TestTaskManagers(BaseIntegrationTest):
|
||||
lambda n: n.cluster_id == self.env.clusters[0].id,
|
||||
self.env.nodes
|
||||
):
|
||||
self.assertEquals(n.status, 'ready')
|
||||
self.assertEquals(n.progress, 100)
|
||||
self.assertEqual(n.status, 'ready')
|
||||
self.assertEqual(n.progress, 100)
|
||||
|
||||
@fake_tasks(fake_rpc=False, mock_rpc=False)
|
||||
@patch('nailgun.rpc.cast')
|
||||
@ -93,11 +93,11 @@ class TestTaskManagers(BaseIntegrationTest):
|
||||
self.env.launch_deployment()
|
||||
|
||||
args, kwargs = nailgun.task.manager.rpc.cast.call_args
|
||||
self.assertEquals(len(args[1]['args']['nodes']), 0)
|
||||
self.assertEqual(len(args[1]['args']['nodes']), 0)
|
||||
|
||||
self.env.refresh_nodes()
|
||||
for n in self.env.nodes:
|
||||
self.assertEquals(len(self.env.nodes), 0)
|
||||
self.assertEqual(len(self.env.nodes), 0)
|
||||
|
||||
@fake_tasks(fake_rpc=False, mock_rpc=False)
|
||||
@patch('nailgun.rpc.cast')
|
||||
@ -111,7 +111,7 @@ class TestTaskManagers(BaseIntegrationTest):
|
||||
self.env.launch_deployment()
|
||||
|
||||
args, kwargs = nailgun.task.manager.rpc.cast.call_args
|
||||
self.assertEquals(len(args[1]['args']['nodes']), 1)
|
||||
self.assertEqual(len(args[1]['args']['nodes']), 1)
|
||||
|
||||
@fake_tasks()
|
||||
def test_do_not_redeploy_nodes_in_ready_status(self):
|
||||
@ -130,17 +130,17 @@ class TestTaskManagers(BaseIntegrationTest):
|
||||
objects.Cluster.clear_pending_changes(cluster_db)
|
||||
|
||||
supertask = self.env.launch_deployment()
|
||||
self.assertEquals(supertask.name, 'deploy')
|
||||
self.assertEqual(supertask.name, 'deploy')
|
||||
self.assertIn(supertask.status, ('running', 'ready'))
|
||||
|
||||
self.assertEquals(self.env.nodes[0].status, 'ready')
|
||||
self.assertEqual(self.env.nodes[0].status, 'ready')
|
||||
self.env.wait_for_nodes_status([self.env.nodes[1]], 'provisioning')
|
||||
self.env.wait_ready(supertask)
|
||||
|
||||
self.env.refresh_nodes()
|
||||
|
||||
self.assertEquals(self.env.nodes[1].status, 'ready')
|
||||
self.assertEquals(self.env.nodes[1].progress, 100)
|
||||
self.assertEqual(self.env.nodes[1].status, 'ready')
|
||||
self.assertEqual(self.env.nodes[1].progress, 100)
|
||||
|
||||
@fake_tasks()
|
||||
def test_deployment_fails_if_node_offline(self):
|
||||
@ -195,8 +195,8 @@ class TestTaskManagers(BaseIntegrationTest):
|
||||
self.env.wait_ready(supertask, 60)
|
||||
self.env.refresh_nodes()
|
||||
for n in self.env.nodes:
|
||||
self.assertEquals(n.status, 'ready')
|
||||
self.assertEquals(n.progress, 100)
|
||||
self.assertEqual(n.status, 'ready')
|
||||
self.assertEqual(n.progress, 100)
|
||||
|
||||
def test_deletion_empty_cluster_task_manager(self):
|
||||
cluster = self.env.create_cluster(api=True)
|
||||
@ -206,7 +206,7 @@ class TestTaskManagers(BaseIntegrationTest):
|
||||
kwargs={'obj_id': self.env.clusters[0].id}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(202, resp.status_code)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
|
||||
timer = time.time()
|
||||
timeout = 15
|
||||
@ -246,7 +246,7 @@ class TestTaskManagers(BaseIntegrationTest):
|
||||
kwargs={'obj_id': cluster_id}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(202, resp.status_code)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
|
||||
timer = time.time()
|
||||
timeout = 15
|
||||
@ -329,7 +329,7 @@ class TestTaskManagers(BaseIntegrationTest):
|
||||
kwargs={'obj_id': cluster_id}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(202, resp.status_code)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
|
||||
timer = time.time()
|
||||
timeout = 15
|
||||
@ -364,7 +364,7 @@ class TestTaskManagers(BaseIntegrationTest):
|
||||
self.env.refresh_nodes()
|
||||
for node in self.env.nodes:
|
||||
fqdn = "node-%s.%s" % (node.id, settings.DNS_DOMAIN)
|
||||
self.assertEquals(fqdn, node.fqdn)
|
||||
self.assertEqual(fqdn, node.fqdn)
|
||||
|
||||
@fake_tasks()
|
||||
def test_no_node_no_cry(self):
|
||||
@ -402,7 +402,7 @@ class TestTaskManagers(BaseIntegrationTest):
|
||||
|
||||
supertask = self.env.launch_deployment()
|
||||
self.env.wait_ready(supertask, timeout=5)
|
||||
self.assertEquals(self.env.db.query(Node).count(), 1)
|
||||
self.assertEqual(self.env.db.query(Node).count(), 1)
|
||||
|
||||
@fake_tasks()
|
||||
def test_deletion_three_offline_nodes_and_one_online(self):
|
||||
@ -418,10 +418,10 @@ class TestTaskManagers(BaseIntegrationTest):
|
||||
supertask = self.env.launch_deployment()
|
||||
self.env.wait_ready(supertask, timeout=5)
|
||||
|
||||
self.assertEquals(self.env.db.query(Node).count(), 1)
|
||||
self.assertEqual(self.env.db.query(Node).count(), 1)
|
||||
node = self.db.query(Node).first()
|
||||
self.assertEquals(node.status, 'discover')
|
||||
self.assertEquals(node.cluster_id, None)
|
||||
self.assertEqual(node.status, 'discover')
|
||||
self.assertEqual(node.cluster_id, None)
|
||||
|
||||
@fake_tasks()
|
||||
def test_deletion_offline_node_when_cluster_has_only_one_node(self):
|
||||
@ -437,4 +437,4 @@ class TestTaskManagers(BaseIntegrationTest):
|
||||
|
||||
supertask = self.env.launch_deployment()
|
||||
self.env.wait_ready(supertask, timeout=5)
|
||||
self.assertEquals(self.env.db.query(Node).count(), 0)
|
||||
self.assertEqual(self.env.db.query(Node).count(), 0)
|
||||
|
@ -59,7 +59,7 @@ class TestVerifyNetworkTaskManagers(BaseIntegrationTest):
|
||||
),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
nets = jsonutils.loads(resp.body)
|
||||
|
||||
nets['networks'][-1]["vlan_start"] = 500
|
||||
@ -76,7 +76,7 @@ class TestVerifyNetworkTaskManagers(BaseIntegrationTest):
|
||||
),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
nets = jsonutils.loads(resp.body)
|
||||
|
||||
admin_ng = self.env.network_manager.get_admin_network_group()
|
||||
@ -90,7 +90,7 @@ class TestVerifyNetworkTaskManagers(BaseIntegrationTest):
|
||||
task.message)
|
||||
self.assertIn("admin (PXE)", task.message)
|
||||
self.assertIn("fixed", task.message)
|
||||
self.assertEquals(mocked_rpc.called, False)
|
||||
self.assertEqual(mocked_rpc.called, False)
|
||||
|
||||
@fake_tasks(fake_rpc=False)
|
||||
def test_network_verify_fails_if_untagged_intersection(self, mocked_rpc):
|
||||
@ -102,7 +102,7 @@ class TestVerifyNetworkTaskManagers(BaseIntegrationTest):
|
||||
),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
nets = jsonutils.loads(resp.body)
|
||||
|
||||
for net in nets['networks']:
|
||||
@ -119,7 +119,7 @@ class TestVerifyNetworkTaskManagers(BaseIntegrationTest):
|
||||
)
|
||||
for n in self.env.nodes:
|
||||
self.assertIn('"storage"', task.message)
|
||||
self.assertEquals(mocked_rpc.called, False)
|
||||
self.assertEqual(mocked_rpc.called, False)
|
||||
|
||||
@fake_tasks()
|
||||
def test_verify_networks_less_than_2_nodes_error(self):
|
||||
@ -168,7 +168,7 @@ class TestVerifyNetworkTaskManagers(BaseIntegrationTest):
|
||||
headers=self.default_headers,
|
||||
expect_errors=True
|
||||
)
|
||||
self.assertEquals(400, resp.status_code)
|
||||
self.assertEqual(400, resp.status_code)
|
||||
|
||||
|
||||
class TestVerifyNetworksDisabled(BaseIntegrationTest):
|
||||
@ -257,7 +257,7 @@ class TestNetworkVerificationWithBonds(BaseIntegrationTest):
|
||||
reverse('NodeNICsHandler',
|
||||
kwargs={'node_id': node['id']}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
data = jsonutils.loads(resp.body)
|
||||
admin_nic, other_nic, empty_nic = None, None, None
|
||||
for nic in data:
|
||||
@ -314,7 +314,7 @@ class TestNetworkVerificationWithBonds(BaseIntegrationTest):
|
||||
headers=self.default_headers,
|
||||
expect_errors=True
|
||||
)
|
||||
self.assertEquals(202, resp.status_code)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
data = jsonutils.loads(resp.body)
|
||||
self.assertEqual(
|
||||
data['result'],
|
||||
@ -365,7 +365,7 @@ class TestVerifyNeutronVlan(BaseIntegrationTest):
|
||||
self.env.launch_deployment()
|
||||
stop_task = self.env.stop_deployment()
|
||||
self.env.wait_ready(stop_task, 60)
|
||||
self.assertEquals(self.cluster.status, "stopped")
|
||||
self.assertEqual(self.cluster.status, "stopped")
|
||||
verify_task = self.env.launch_verify_networks()
|
||||
self.env.wait_ready(verify_task, 60)
|
||||
|
||||
@ -379,7 +379,7 @@ class TestVerifyNeutronVlan(BaseIntegrationTest):
|
||||
# get nodes NICs for private network
|
||||
resp = self.app.get(reverse('NodeCollectionHandler'),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
priv_nics = {}
|
||||
for node in jsonutils.loads(resp.body):
|
||||
for net in node['network_data']:
|
||||
|
@ -28,13 +28,13 @@ class TestAssignmentValidator(BaseUnitTest):
|
||||
pattern = 'parent.child.value'
|
||||
result = NodeAssignmentValidator._search_in_settings(self.settings,
|
||||
pattern)
|
||||
self.assertEquals(result, 1)
|
||||
self.assertEqual(result, 1)
|
||||
|
||||
def test_search_in_settings_non_exisxt(self):
|
||||
pattern = 'parent.fake.value'
|
||||
result = NodeAssignmentValidator._search_in_settings(self.settings,
|
||||
pattern)
|
||||
self.assertEquals(result, None)
|
||||
self.assertEqual(result, None)
|
||||
|
||||
def test_check_roles_requirement(self):
|
||||
roles = ['test']
|
||||
|
@ -76,9 +76,9 @@ class TestLogs(BaseIntegrationTest):
|
||||
reverse('LogSourceCollectionHandler'),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals(response, settings.LOGS)
|
||||
self.assertEqual(response, settings.LOGS)
|
||||
|
||||
def test_log_source_by_node_collection_handler(self):
|
||||
node_ip = '40.30.20.10'
|
||||
@ -89,9 +89,9 @@ class TestLogs(BaseIntegrationTest):
|
||||
kwargs={'node_id': node.id}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals(response, [])
|
||||
self.assertEqual(response, [])
|
||||
|
||||
log_entry = ['date111', 'level222', 'text333']
|
||||
self._create_logfile_for_node(settings.LOGS[1], [log_entry], node)
|
||||
@ -100,9 +100,9 @@ class TestLogs(BaseIntegrationTest):
|
||||
kwargs={'node_id': node.id}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals(response, [settings.LOGS[1]])
|
||||
self.assertEqual(response, [settings.LOGS[1]])
|
||||
|
||||
def test_log_entry_collection_handler(self):
|
||||
node_ip = '10.20.30.40'
|
||||
@ -128,20 +128,20 @@ class TestLogs(BaseIntegrationTest):
|
||||
params={'source': settings.LOGS[0]['id']},
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
response['entries'].reverse()
|
||||
self.assertEquals(response['entries'], log_entries)
|
||||
self.assertEqual(response['entries'], log_entries)
|
||||
|
||||
resp = self.app.get(
|
||||
reverse('LogEntryCollectionHandler'),
|
||||
params={'node': node.id, 'source': settings.LOGS[1]['id']},
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
response['entries'].reverse()
|
||||
self.assertEquals(response['entries'], log_entries)
|
||||
self.assertEqual(response['entries'], log_entries)
|
||||
|
||||
def test_multiline_log_entry(self):
|
||||
settings.LOGS[0]['multiline'] = True
|
||||
@ -170,10 +170,10 @@ class TestLogs(BaseIntegrationTest):
|
||||
params={'source': settings.LOGS[0]['id']},
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
response['entries'].reverse()
|
||||
self.assertEquals(response['entries'], log_entries)
|
||||
self.assertEqual(response['entries'], log_entries)
|
||||
settings.LOGS[0]['multiline'] = False
|
||||
|
||||
def test_backward_reader(self):
|
||||
@ -185,7 +185,7 @@ class TestLogs(BaseIntegrationTest):
|
||||
forward_lines = list(f)
|
||||
backward_lines = list(read_backwards(f))
|
||||
backward_lines.reverse()
|
||||
self.assertEquals(forward_lines, backward_lines)
|
||||
self.assertEqual(forward_lines, backward_lines)
|
||||
|
||||
# filling file with content
|
||||
contents = [
|
||||
@ -209,7 +209,7 @@ class TestLogs(BaseIntegrationTest):
|
||||
forward_lines = list(f)
|
||||
backward_lines = list(read_backwards(f, bufsize))
|
||||
backward_lines.reverse()
|
||||
self.assertEquals(forward_lines, backward_lines)
|
||||
self.assertEqual(forward_lines, backward_lines)
|
||||
|
||||
# test partial file reading from middle to beginning
|
||||
forward_lines = []
|
||||
@ -217,7 +217,7 @@ class TestLogs(BaseIntegrationTest):
|
||||
forward_lines.append(f.readline())
|
||||
backward_lines = list(read_backwards(f, bufsize))
|
||||
backward_lines.reverse()
|
||||
self.assertEquals(forward_lines, backward_lines)
|
||||
self.assertEqual(forward_lines, backward_lines)
|
||||
|
||||
f.close()
|
||||
|
||||
@ -308,7 +308,7 @@ class TestLogs(BaseIntegrationTest):
|
||||
}
|
||||
}
|
||||
args, kwargs = nailgun.task.task.rpc.cast.call_args
|
||||
self.assertEquals(len(args), 2)
|
||||
self.assertEqual(len(args), 2)
|
||||
self.datadiff(args[1], message)
|
||||
|
||||
def test_snapshot_task_manager(self):
|
||||
@ -344,8 +344,8 @@ class TestLogs(BaseIntegrationTest):
|
||||
)
|
||||
tm_patcher.stop()
|
||||
th_patcher.stop()
|
||||
self.assertEquals(task, resp.body)
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEqual(task, resp.body)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
|
||||
def test_log_package_handler_failed(self):
|
||||
tm_patcher = patch('nailgun.api.v1.handlers.logs.DumpTaskManager')
|
||||
@ -362,7 +362,7 @@ class TestLogs(BaseIntegrationTest):
|
||||
expect_errors=True
|
||||
)
|
||||
tm_patcher.stop()
|
||||
self.assertEquals(resp.status_code, 400)
|
||||
self.assertEqual(resp.status_code, 400)
|
||||
|
||||
def test_log_entry_collection_handler_sensitive(self):
|
||||
account = RedHatAccount()
|
||||
@ -392,10 +392,10 @@ class TestLogs(BaseIntegrationTest):
|
||||
params={'source': settings.LOGS[0]['id']},
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
response['entries'].reverse()
|
||||
self.assertEquals(response['entries'], response_log_entries)
|
||||
self.assertEqual(response['entries'], response_log_entries)
|
||||
|
||||
@patch('nailgun.api.v1.handlers.logs.DumpTaskManager')
|
||||
def test_log_package_handler_with_dump_task_manager_error(self,
|
||||
|
@ -254,7 +254,7 @@ class TestNetworkCheck(BaseIntegrationTest):
|
||||
self.assertRaises(
|
||||
errors.NetworkCheckError,
|
||||
checker.neutron_check_l3_addresses_not_match_subnet_and_broadcast)
|
||||
self.assertEquals(len(checker.err_msgs), 2)
|
||||
self.assertEqual(len(checker.err_msgs), 2)
|
||||
|
||||
def test_check_network_classes_exclude_loopback(self):
|
||||
checker = NetworkCheck(self.task, {})
|
||||
@ -270,7 +270,7 @@ class TestNetworkCheck(BaseIntegrationTest):
|
||||
checker.networks = [{'id': 1, 'cidr': network, 'name': 'fake'}]
|
||||
self.assertRaises(errors.NetworkCheckError,
|
||||
checker.check_network_classes_exclude_loopback)
|
||||
self.assertEquals(mocked_db.call_count, 4)
|
||||
self.assertEqual(mocked_db.call_count, 4)
|
||||
|
||||
@patch.object(helpers, 'db')
|
||||
def test_check_network_addresses_not_match_subnet_and_broadcast(self,
|
||||
@ -328,7 +328,7 @@ class TestNetworkCheck(BaseIntegrationTest):
|
||||
checker = NetworkCheck(FakeTask(cluster_db), {})
|
||||
checker.check_bond_slaves_speeds()
|
||||
|
||||
self.assertEquals(checker.err_msgs, [])
|
||||
self.assertEqual(checker.err_msgs, [])
|
||||
bond_if1 = node.NodeBondInterface()
|
||||
bond_if2 = node.NodeBondInterface()
|
||||
|
||||
@ -343,7 +343,7 @@ class TestNetworkCheck(BaseIntegrationTest):
|
||||
checker.cluster.nodes[0].bond_interfaces = [bond_if1, bond_if2]
|
||||
|
||||
checker.check_bond_slaves_speeds()
|
||||
self.assertEquals(len(checker.err_msgs), 2)
|
||||
self.assertEqual(len(checker.err_msgs), 2)
|
||||
|
||||
def test_check_configuration_neutron(self):
|
||||
checker = NetworkCheck(self.task, {})
|
||||
|
@ -43,7 +43,7 @@ class TestAssignmentHandlers(BaseIntegrationTest):
|
||||
jsonutils.dumps(assignment_data),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.assertEqual(node.cluster, cluster)
|
||||
self.datadiff(
|
||||
node.pending_roles,
|
||||
@ -59,7 +59,7 @@ class TestAssignmentHandlers(BaseIntegrationTest):
|
||||
headers=self.default_headers,
|
||||
expect_errors=True
|
||||
)
|
||||
self.assertEquals(400, resp.status_code)
|
||||
self.assertEqual(400, resp.status_code)
|
||||
|
||||
def test_unassignment(self):
|
||||
cluster = self.env.create(
|
||||
|
@ -50,12 +50,12 @@ class TestNodeDeletion(BaseIntegrationTest):
|
||||
kwargs={'obj_id': node.id}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(204, resp.status_code)
|
||||
self.assertEqual(204, resp.status_code)
|
||||
|
||||
node_try = self.db.query(Node).filter_by(
|
||||
cluster_id=cluster.id
|
||||
).first()
|
||||
self.assertEquals(node_try, None)
|
||||
self.assertEqual(node_try, None)
|
||||
|
||||
management_net = self.db.query(NetworkGroup).\
|
||||
filter(NetworkGroup.cluster_id == cluster.id).filter_by(
|
||||
@ -64,5 +64,5 @@ class TestNodeDeletion(BaseIntegrationTest):
|
||||
ipaddrs = self.db.query(IPAddr).\
|
||||
filter_by(node=node.id).all()
|
||||
|
||||
self.assertEquals(list(management_net.nodes), [])
|
||||
self.assertEquals(list(ipaddrs), [])
|
||||
self.assertEqual(list(management_net.nodes), [])
|
||||
self.assertEqual(list(ipaddrs), [])
|
||||
|
@ -49,7 +49,7 @@ class TestNodeDisksHandlers(BaseIntegrationTest):
|
||||
reverse('NodeDisksHandler', kwargs={'node_id': node_id}),
|
||||
headers=self.default_headers)
|
||||
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
return jsonutils.loads(resp.body)
|
||||
|
||||
def put(self, node_id, data, expect_errors=False):
|
||||
@ -60,7 +60,7 @@ class TestNodeDisksHandlers(BaseIntegrationTest):
|
||||
expect_errors=expect_errors)
|
||||
|
||||
if not expect_errors:
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
return jsonutils.loads(resp.body)
|
||||
else:
|
||||
return resp
|
||||
@ -95,35 +95,35 @@ class TestNodeDisksHandlers(BaseIntegrationTest):
|
||||
reverse('NodeCollectionHandler'),
|
||||
jsonutils.dumps([{'id': node_db.id, 'pending_roles': roles}]),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
|
||||
# adding role
|
||||
update_node_roles(['compute', 'cinder'])
|
||||
modified_roles_response = self.get(node_db.id)
|
||||
self.assertNotEquals(get_vgs(original_roles_response),
|
||||
get_vgs(modified_roles_response))
|
||||
self.assertNotEqual(get_vgs(original_roles_response),
|
||||
get_vgs(modified_roles_response))
|
||||
original_roles_response = modified_roles_response
|
||||
|
||||
# replacing role
|
||||
update_node_roles(['compute', 'ceph-osd'])
|
||||
modified_roles_response = self.get(node_db.id)
|
||||
self.assertNotEquals(get_vgs(original_roles_response),
|
||||
get_vgs(modified_roles_response))
|
||||
self.assertNotEqual(get_vgs(original_roles_response),
|
||||
get_vgs(modified_roles_response))
|
||||
original_roles_response = modified_roles_response
|
||||
|
||||
# removing role
|
||||
update_node_roles(['compute'])
|
||||
modified_roles_response = self.get(node_db.id)
|
||||
self.assertNotEquals(get_vgs(original_roles_response),
|
||||
get_vgs(modified_roles_response))
|
||||
self.assertNotEqual(get_vgs(original_roles_response),
|
||||
get_vgs(modified_roles_response))
|
||||
original_roles_response = modified_roles_response
|
||||
|
||||
# replacing role to itself
|
||||
update_node_roles(['controller'])
|
||||
update_node_roles(['compute'])
|
||||
modified_roles_response = self.get(node_db.id)
|
||||
self.assertEquals(get_vgs(original_roles_response),
|
||||
get_vgs(modified_roles_response))
|
||||
self.assertEqual(get_vgs(original_roles_response),
|
||||
get_vgs(modified_roles_response))
|
||||
|
||||
def test_disks_volumes_size_update(self):
|
||||
node_db = self.create_node()
|
||||
@ -135,10 +135,10 @@ class TestNodeDisksHandlers(BaseIntegrationTest):
|
||||
expect_disks = deepcopy(disks)
|
||||
|
||||
response = self.put(node_db.id, disks)
|
||||
self.assertEquals(response, expect_disks)
|
||||
self.assertEqual(response, expect_disks)
|
||||
|
||||
response = self.get(node_db.id)
|
||||
self.assertEquals(response, expect_disks)
|
||||
self.assertEqual(response, expect_disks)
|
||||
|
||||
def test_recalculates_vg_sizes_when_disks_volumes_size_update(self):
|
||||
node_db = self.create_node()
|
||||
@ -168,10 +168,10 @@ class TestNodeDisksHandlers(BaseIntegrationTest):
|
||||
size_volumes_after = sum([
|
||||
volume.get('size', 0) for volume in vg_after['volumes']])
|
||||
|
||||
self.assertNotEquals(size_volumes_before, size_volumes_after)
|
||||
self.assertNotEqual(size_volumes_before, size_volumes_after)
|
||||
|
||||
volume_group_size = new_volume_size * updated_disks_count
|
||||
self.assertEquals(size_volumes_after, volume_group_size)
|
||||
self.assertEqual(size_volumes_after, volume_group_size)
|
||||
|
||||
def test_update_ceph_partition(self):
|
||||
node = self.create_node(roles=['ceph-osd'])
|
||||
@ -189,12 +189,12 @@ class TestNodeDisksHandlers(BaseIntegrationTest):
|
||||
node.attributes.volumes)
|
||||
|
||||
for partition_after in partitions_after_update:
|
||||
self.assertEquals(partition_after['size'], new_volume_size)
|
||||
self.assertEqual(partition_after['size'], new_volume_size)
|
||||
|
||||
def test_validator_at_least_one_disk_exists(self):
|
||||
node = self.create_node()
|
||||
response = self.put(node.id, [], True)
|
||||
self.assertEquals(response.status_code, 400)
|
||||
self.assertEqual(response.status_code, 400)
|
||||
self.assertRegexpMatches(response.body,
|
||||
'^Node seems not to have disks')
|
||||
|
||||
@ -208,7 +208,7 @@ class TestNodeDisksHandlers(BaseIntegrationTest):
|
||||
volume['size'] = disk['size'] + 1
|
||||
|
||||
response = self.put(node.id, disks, True)
|
||||
self.assertEquals(response.status_code, 400)
|
||||
self.assertEqual(response.status_code, 400)
|
||||
self.assertRegexpMatches(
|
||||
response.body, '^Not enough free space on disk: .+')
|
||||
|
||||
@ -221,7 +221,7 @@ class TestNodeDisksHandlers(BaseIntegrationTest):
|
||||
del volume['size']
|
||||
|
||||
response = self.put(node.id, disks, True)
|
||||
self.assertEquals(response.status_code, 400)
|
||||
self.assertEqual(response.status_code, 400)
|
||||
self.assertRegexpMatches(
|
||||
response.body, "'size' is a required property")
|
||||
|
||||
@ -233,7 +233,7 @@ class TestNodeDefaultsDisksHandler(BaseIntegrationTest):
|
||||
reverse('NodeDefaultsDisksHandler', kwargs={'node_id': node_id}),
|
||||
headers=self.default_headers)
|
||||
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
return jsonutils.loads(resp.body)
|
||||
|
||||
def test_node_disk_amount_regenerates_volumes_info_if_new_disk_added(self):
|
||||
@ -244,7 +244,7 @@ class TestNodeDefaultsDisksHandler(BaseIntegrationTest):
|
||||
cluster_id=cluster['id'])
|
||||
node_db = self.env.nodes[0]
|
||||
response = self.get(node_db.id)
|
||||
self.assertEquals(len(response), 6)
|
||||
self.assertEqual(len(response), 6)
|
||||
|
||||
new_meta = node_db.meta.copy()
|
||||
new_meta['disks'].append({
|
||||
@ -263,12 +263,12 @@ class TestNodeDefaultsDisksHandler(BaseIntegrationTest):
|
||||
self.env.refresh_nodes()
|
||||
|
||||
response = self.get(node_db.id)
|
||||
self.assertEquals(len(response), 7)
|
||||
self.assertEqual(len(response), 7)
|
||||
|
||||
# check all groups on all disks
|
||||
vgs = ['os', 'vm']
|
||||
for disk in response:
|
||||
self.assertEquals(len(disk['volumes']), len(vgs))
|
||||
self.assertEqual(len(disk['volumes']), len(vgs))
|
||||
|
||||
def test_get_default_attrs(self):
|
||||
self.env.create_node(api=True)
|
||||
@ -278,7 +278,7 @@ class TestNodeDefaultsDisksHandler(BaseIntegrationTest):
|
||||
default_volumes = node_db.volume_manager.gen_volumes_info()
|
||||
disks = only_disks(default_volumes)
|
||||
|
||||
self.assertEquals(len(disks), len(volumes_from_api))
|
||||
self.assertEqual(len(disks), len(volumes_from_api))
|
||||
|
||||
|
||||
class TestNodeVolumesInformationHandler(BaseIntegrationTest):
|
||||
@ -289,7 +289,7 @@ class TestNodeVolumesInformationHandler(BaseIntegrationTest):
|
||||
kwargs={'node_id': node_id}),
|
||||
headers=self.default_headers)
|
||||
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
return jsonutils.loads(resp.body)
|
||||
|
||||
def create_node(self, role):
|
||||
@ -299,7 +299,7 @@ class TestNodeVolumesInformationHandler(BaseIntegrationTest):
|
||||
return self.env.nodes[0]
|
||||
|
||||
def check_volumes(self, volumes, volumes_ids):
|
||||
self.assertEquals(len(volumes), len(volumes_ids))
|
||||
self.assertEqual(len(volumes), len(volumes_ids))
|
||||
for volume_id in volumes_ids:
|
||||
# Volume has name
|
||||
volume = filter(
|
||||
@ -380,7 +380,7 @@ class TestVolumeManager(BaseIntegrationTest):
|
||||
return reserved_size
|
||||
|
||||
def should_contain_os_with_minimal_size(self, volume_manager):
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
self.os_size(volume_manager.volumes, with_lvm_meta=False),
|
||||
volume_manager.call_generator('calc_min_os_size'))
|
||||
|
||||
@ -398,7 +398,7 @@ class TestVolumeManager(BaseIntegrationTest):
|
||||
vg_size -= volume.get('lvm_meta_size', 0)
|
||||
sum_lvm_meta += volume.get('lvm_meta_size', 0)
|
||||
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
vg_size, disk_sum_size - os_size - reserved_size - sum_lvm_meta)
|
||||
|
||||
def all_free_space_except_os_disks_for_volume(self, volume_manager,
|
||||
@ -422,7 +422,7 @@ class TestVolumeManager(BaseIntegrationTest):
|
||||
vg_size -= volume.get('lvm_meta_size', 0)
|
||||
sum_lvm_meta += volume.get('lvm_meta_size', 0)
|
||||
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
vg_size, disk_sum_size - reserved_size - sum_lvm_meta)
|
||||
|
||||
def logical_volume_sizes_should_equal_all_phisical_volumes(self, spaces):
|
||||
@ -450,14 +450,14 @@ class TestVolumeManager(BaseIntegrationTest):
|
||||
pv_sizes[vg_name] += volume['size']
|
||||
pv_sizes[vg_name] -= volume['lvm_meta_size']
|
||||
|
||||
self.assertEquals(vg_sizes, pv_sizes)
|
||||
self.assertEqual(vg_sizes, pv_sizes)
|
||||
|
||||
def check_disk_size_equal_sum_of_all_volumes(self, spaces):
|
||||
for disk in only_disks(spaces):
|
||||
volumes_size = sum(
|
||||
[volume.get('size', 0) for volume in disk['volumes']])
|
||||
|
||||
self.assertEquals(volumes_size, disk['size'])
|
||||
self.assertEqual(volumes_size, disk['size'])
|
||||
|
||||
def test_volume_request_without_cluster(self):
|
||||
self.env.create_node(api=True)
|
||||
@ -468,7 +468,7 @@ class TestVolumeManager(BaseIntegrationTest):
|
||||
headers=self.default_headers,
|
||||
expect_errors=True
|
||||
)
|
||||
self.assertEquals(404, resp.status_code)
|
||||
self.assertEqual(404, resp.status_code)
|
||||
|
||||
def test_allocates_all_free_space_for_os_for_controller_role(self):
|
||||
node = self.create_node('controller')
|
||||
@ -478,8 +478,8 @@ class TestVolumeManager(BaseIntegrationTest):
|
||||
glance_sum_size = self.glance_size(disks)
|
||||
reserved_size = self.reserved_size(disks)
|
||||
|
||||
self.assertEquals(disks_size_sum - reserved_size,
|
||||
os_sum_size + glance_sum_size)
|
||||
self.assertEqual(disks_size_sum - reserved_size,
|
||||
os_sum_size + glance_sum_size)
|
||||
self.logical_volume_sizes_should_equal_all_phisical_volumes(
|
||||
node.attributes.volumes)
|
||||
self.check_disk_size_equal_sum_of_all_volumes(node.attributes.volumes)
|
||||
@ -642,7 +642,7 @@ class TestVolumeManager(BaseIntegrationTest):
|
||||
for role, space_info in volumes_roles_mapping.iteritems():
|
||||
node = self.create_node(role)
|
||||
vm = node.volume_manager
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
vm._VolumeManager__calc_minimal_installation_size(),
|
||||
self.__calc_minimal_installation_size(vm)
|
||||
)
|
||||
@ -680,7 +680,7 @@ class TestVolumeManager(BaseIntegrationTest):
|
||||
new_meta['memory']['total'] = (1024 ** 2) * size
|
||||
node.meta = new_meta
|
||||
self.env.db.commit()
|
||||
self.assertEquals(node.volume_manager._calc_swap_size(), swap_size)
|
||||
self.assertEqual(node.volume_manager._calc_swap_size(), swap_size)
|
||||
|
||||
def test_root_size_calculation(self):
|
||||
node = self.create_node('controller')
|
||||
@ -714,12 +714,12 @@ class TestDisks(BaseIntegrationTest):
|
||||
def test_create_mbr_as_raid_if_disks_count_greater_than_zero(self):
|
||||
disk = self.create_disk(boot_is_raid=True)
|
||||
boot_partition = self.get_boot(disk.volumes)
|
||||
self.assertEquals(boot_partition['type'], 'raid')
|
||||
self.assertEqual(boot_partition['type'], 'raid')
|
||||
|
||||
def test_create_mbr_as_partition_if_disks_count_less_than_zero(self):
|
||||
disk = self.create_disk()
|
||||
boot_partition = self.get_boot(disk.volumes)
|
||||
self.assertEquals(boot_partition['type'], 'partition')
|
||||
self.assertEqual(boot_partition['type'], 'partition')
|
||||
|
||||
def test_remove_pv(self):
|
||||
disk = self.create_disk(possible_pvs_count=1)
|
||||
@ -727,13 +727,13 @@ class TestDisks(BaseIntegrationTest):
|
||||
disk.create_pv({'id': 'pv_name'}, 100)
|
||||
disk.remove_pv('pv_name')
|
||||
|
||||
self.assertEquals(disk_without_pv.render(), disk.render())
|
||||
self.assertEqual(disk_without_pv.render(), disk.render())
|
||||
|
||||
def test_boot_partition_has_file_system(self):
|
||||
disk = self.create_disk(possible_pvs_count=1)
|
||||
boot_record = filter(
|
||||
lambda volume: volume.get('mount') == '/boot', disk.volumes)[0]
|
||||
self.assertEquals(boot_record['file_system'], 'ext2')
|
||||
self.assertEqual(boot_record['file_system'], 'ext2')
|
||||
|
||||
|
||||
class TestFixtures(BaseIntegrationTest):
|
||||
|
@ -26,7 +26,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
reverse('NodeNICsHandler', kwargs={'node_id': 1}),
|
||||
expect_errors=True,
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 404)
|
||||
self.assertEqual(resp.status_code, 404)
|
||||
|
||||
def test_get_handler_with_invalid_data(self):
|
||||
meta = self.env.default_metadata()
|
||||
@ -46,14 +46,14 @@ class TestHandlers(BaseIntegrationTest):
|
||||
expect_errors=True,
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(resp.status_code, 400)
|
||||
self.assertEqual(resp.status_code, 400)
|
||||
resp = self.app.get(
|
||||
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals(response, [])
|
||||
self.assertEqual(response, [])
|
||||
|
||||
def test_get_handler_with_incompleted_iface_data(self):
|
||||
meta = self.env.default_metadata()
|
||||
@ -76,13 +76,13 @@ class TestHandlers(BaseIntegrationTest):
|
||||
expect_errors=True,
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
resp = self.app.get(
|
||||
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
ifaces = jsonutils.loads(resp.body)
|
||||
self.assertEquals(ifaces, [])
|
||||
self.assertEqual(ifaces, [])
|
||||
|
||||
def test_get_handler_with_invalid_speed_data(self):
|
||||
meta = self.env.default_metadata()
|
||||
@ -113,13 +113,13 @@ class TestHandlers(BaseIntegrationTest):
|
||||
expect_errors=True,
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
resp = self.app.get(
|
||||
reverse('NodeHandler', kwargs={'obj_id': node['id']}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
ifaces = jsonutils.loads(resp.body)['meta']['interfaces']
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
ifaces,
|
||||
[
|
||||
{'name': 'eth0', 'mac': '00:00:00',
|
||||
@ -134,9 +134,9 @@ class TestHandlers(BaseIntegrationTest):
|
||||
resp = self.app.get(
|
||||
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals(response, [])
|
||||
self.assertEqual(response, [])
|
||||
|
||||
def test_get_handler_with_NICs(self):
|
||||
meta = self.env.default_metadata()
|
||||
@ -151,7 +151,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
resp = self.app.get(
|
||||
reverse('NodeNICsHandler', kwargs={'node_id': node_db.id}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertItemsEqual(
|
||||
map(lambda i: i['id'], response),
|
||||
@ -163,11 +163,11 @@ class TestHandlers(BaseIntegrationTest):
|
||||
response
|
||||
)
|
||||
resp_nic = filtered_nics[0]
|
||||
self.assertEquals(resp_nic['mac'], nic['mac'])
|
||||
self.assertEquals(resp_nic['current_speed'], nic['current_speed'])
|
||||
self.assertEquals(resp_nic['max_speed'], nic['max_speed'])
|
||||
self.assertEqual(resp_nic['mac'], nic['mac'])
|
||||
self.assertEqual(resp_nic['current_speed'], nic['current_speed'])
|
||||
self.assertEqual(resp_nic['max_speed'], nic['max_speed'])
|
||||
for conn in ('assigned_networks', ):
|
||||
self.assertEquals(resp_nic[conn], [])
|
||||
self.assertEqual(resp_nic[conn], [])
|
||||
|
||||
def test_NIC_updates_by_agent(self):
|
||||
meta = self.env.default_metadata()
|
||||
@ -184,22 +184,22 @@ class TestHandlers(BaseIntegrationTest):
|
||||
reverse('NodeAgentHandler'),
|
||||
jsonutils.dumps(node_data),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
resp = self.app.get(
|
||||
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals(len(response), 1)
|
||||
self.assertEqual(len(response), 1)
|
||||
resp_nic = response[0]
|
||||
nic = new_meta['interfaces'][0]
|
||||
self.assertEquals(resp_nic['mac'], nic['mac'])
|
||||
self.assertEquals(resp_nic['current_speed'], nic['current_speed'])
|
||||
self.assertEquals(resp_nic['max_speed'], nic['max_speed'])
|
||||
self.assertEquals(resp_nic['state'], nic['state'])
|
||||
self.assertEqual(resp_nic['mac'], nic['mac'])
|
||||
self.assertEqual(resp_nic['current_speed'], nic['current_speed'])
|
||||
self.assertEqual(resp_nic['max_speed'], nic['max_speed'])
|
||||
self.assertEqual(resp_nic['state'], nic['state'])
|
||||
for conn in ('assigned_networks', ):
|
||||
self.assertEquals(resp_nic[conn], [])
|
||||
self.assertEqual(resp_nic[conn], [])
|
||||
|
||||
def test_NIC_adds_by_agent(self):
|
||||
meta = self.env.default_metadata()
|
||||
@ -214,27 +214,27 @@ class TestHandlers(BaseIntegrationTest):
|
||||
reverse('NodeAgentHandler'),
|
||||
jsonutils.dumps(node_data),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
resp = self.app.get(
|
||||
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals(len(response), len(meta['interfaces']))
|
||||
self.assertEqual(len(response), len(meta['interfaces']))
|
||||
for nic in meta['interfaces']:
|
||||
filtered_nics = filter(
|
||||
lambda i: i['mac'] == nic['mac'],
|
||||
response
|
||||
)
|
||||
resp_nic = filtered_nics[0]
|
||||
self.assertEquals(resp_nic['mac'], nic['mac'])
|
||||
self.assertEquals(resp_nic['current_speed'],
|
||||
nic.get('current_speed'))
|
||||
self.assertEquals(resp_nic['max_speed'], nic.get('max_speed'))
|
||||
self.assertEquals(resp_nic['state'], nic.get('state'))
|
||||
self.assertEqual(resp_nic['mac'], nic['mac'])
|
||||
self.assertEqual(resp_nic['current_speed'],
|
||||
nic.get('current_speed'))
|
||||
self.assertEqual(resp_nic['max_speed'], nic.get('max_speed'))
|
||||
self.assertEqual(resp_nic['state'], nic.get('state'))
|
||||
for conn in ('assigned_networks', ):
|
||||
self.assertEquals(resp_nic[conn], [])
|
||||
self.assertEqual(resp_nic[conn], [])
|
||||
|
||||
def test_ignore_NIC_id_in_meta(self):
|
||||
fake_id = 'some_data'
|
||||
@ -245,7 +245,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
resp = self.app.get(
|
||||
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertNotEquals(response[0]['id'], fake_id)
|
||||
|
||||
@ -258,7 +258,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
resp = self.app.get(
|
||||
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertNotEquals(response[0]['mac'], new_mac.lower())
|
||||
|
||||
|
@ -26,9 +26,9 @@ class TestHandlers(BaseIntegrationTest):
|
||||
reverse('NotificationCollectionHandler'),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals([], response)
|
||||
self.assertEqual([], response)
|
||||
|
||||
def test_not_empty(self):
|
||||
c = self.env.create_cluster(api=False)
|
||||
@ -38,16 +38,16 @@ class TestHandlers(BaseIntegrationTest):
|
||||
reverse('NotificationCollectionHandler'),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals(len(response), 2)
|
||||
self.assertEqual(len(response), 2)
|
||||
if response[0]['id'] == n0.id:
|
||||
rn0 = response[0]
|
||||
rn1 = response[1]
|
||||
else:
|
||||
rn0 = response[1]
|
||||
rn1 = response[0]
|
||||
self.assertEquals(rn1['cluster'], n1.cluster_id)
|
||||
self.assertEqual(rn1['cluster'], n1.cluster_id)
|
||||
self.assertIsNone(rn0.get('cluster', None))
|
||||
|
||||
def test_update(self):
|
||||
@ -69,16 +69,16 @@ class TestHandlers(BaseIntegrationTest):
|
||||
jsonutils.dumps(notification_update),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals(len(response), 2)
|
||||
self.assertEqual(len(response), 2)
|
||||
if response[0]['id'] == n0.id:
|
||||
rn0 = response[0]
|
||||
rn1 = response[1]
|
||||
else:
|
||||
rn0 = response[1]
|
||||
rn1 = response[0]
|
||||
self.assertEquals(rn1['cluster'], n1.cluster_id)
|
||||
self.assertEquals(rn1['status'], 'read')
|
||||
self.assertEqual(rn1['cluster'], n1.cluster_id)
|
||||
self.assertEqual(rn1['status'], 'read')
|
||||
self.assertIsNone(rn0.get('cluster', None))
|
||||
self.assertEquals(rn0['status'], 'read')
|
||||
self.assertEqual(rn0['status'], 'read')
|
||||
|
@ -30,14 +30,14 @@ class TestHandlers(BaseIntegrationTest):
|
||||
),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertIsNone(response.get('cluster'))
|
||||
self.assertEquals(notification.status, 'unread')
|
||||
self.assertEquals(notification.id, response['id'])
|
||||
self.assertEquals(notification.status, response['status'])
|
||||
self.assertEquals(notification.topic, response['topic'])
|
||||
self.assertEquals(notification.message, response['message'])
|
||||
self.assertEqual(notification.status, 'unread')
|
||||
self.assertEqual(notification.id, response['id'])
|
||||
self.assertEqual(notification.status, response['status'])
|
||||
self.assertEqual(notification.topic, response['topic'])
|
||||
self.assertEqual(notification.message, response['message'])
|
||||
|
||||
def test_notification_datetime(self):
|
||||
self.env.create_node(
|
||||
@ -64,14 +64,14 @@ class TestHandlers(BaseIntegrationTest):
|
||||
),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals(response.get('cluster'), cluster.id)
|
||||
self.assertEquals(notification.status, 'unread')
|
||||
self.assertEquals(notification.id, response['id'])
|
||||
self.assertEquals(notification.status, response['status'])
|
||||
self.assertEquals(notification.topic, response['topic'])
|
||||
self.assertEquals(notification.message, response['message'])
|
||||
self.assertEqual(response.get('cluster'), cluster.id)
|
||||
self.assertEqual(notification.status, 'unread')
|
||||
self.assertEqual(notification.id, response['id'])
|
||||
self.assertEqual(notification.status, response['status'])
|
||||
self.assertEqual(notification.topic, response['topic'])
|
||||
self.assertEqual(notification.message, response['message'])
|
||||
|
||||
def test_notification_update(self):
|
||||
notification = self.env.create_notification()
|
||||
@ -87,10 +87,10 @@ class TestHandlers(BaseIntegrationTest):
|
||||
headers=self.default_headers
|
||||
)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals(notification.id, response['id'])
|
||||
self.assertEquals('read', response['status'])
|
||||
self.assertEquals(notification.topic, response['topic'])
|
||||
self.assertEquals(notification.message, response['message'])
|
||||
self.assertEqual(notification.id, response['id'])
|
||||
self.assertEqual('read', response['status'])
|
||||
self.assertEqual(notification.topic, response['topic'])
|
||||
self.assertEqual(notification.message, response['message'])
|
||||
|
||||
def test_notification_not_found(self):
|
||||
notification = self.env.create_notification()
|
||||
@ -102,4 +102,4 @@ class TestHandlers(BaseIntegrationTest):
|
||||
headers=self.default_headers,
|
||||
expect_errors=True
|
||||
)
|
||||
self.assertEquals(404, resp.status_code)
|
||||
self.assertEqual(404, resp.status_code)
|
||||
|
@ -53,8 +53,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'check_redhat_licenses',
|
||||
'download_release'
|
||||
)):
|
||||
self.assertEquals(rpc_message[i]['method'], name)
|
||||
self.assertEquals(
|
||||
self.assertEqual(rpc_message[i]['method'], name)
|
||||
self.assertEqual(
|
||||
rpc_message[i]['args']['release_info'],
|
||||
test_release_data
|
||||
)
|
||||
@ -67,7 +67,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'password': 'password'}),
|
||||
headers=self.default_headers,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status_code, 400)
|
||||
self.assertEqual(resp.status_code, 400)
|
||||
|
||||
@fake_tasks()
|
||||
def test_redhat_account_validation_success(self):
|
||||
@ -84,7 +84,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'password': 'password',
|
||||
'release_id': self.release.id}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
|
||||
@fake_tasks()
|
||||
def test_redhat_account_validation_failure(self):
|
||||
@ -96,7 +96,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'release_id': self.release.id}),
|
||||
headers=self.default_headers,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
|
||||
supertask = self.db.query(Task).filter_by(
|
||||
name="redhat_check_credentials"
|
||||
@ -108,7 +108,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
resp = self.app.get(
|
||||
reverse('RedHatAccountHandler'),
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status_code, 404)
|
||||
self.assertEqual(resp.status_code, 404)
|
||||
|
||||
resp = self.app.post(
|
||||
reverse('RedHatAccountHandler'),
|
||||
@ -117,12 +117,12 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'password': 'password',
|
||||
'release_id': self.release.id}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
resp = self.app.get(
|
||||
reverse('RedHatAccountHandler'),
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
response = jsonutils.loads(resp.body)
|
||||
|
||||
@ -139,7 +139,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'password': 'password',
|
||||
'release_id': self.release.id}),
|
||||
headers=self.default_headers)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
query = self.env.db.query(RedHatAccount)
|
||||
self.assertEquals(query.count(), 1)
|
||||
self.assertEquals(query.filter_by(username='rheltest').count(), 1)
|
||||
self.assertEqual(query.count(), 1)
|
||||
self.assertEqual(query.filter_by(username='rheltest').count(), 1)
|
||||
|
@ -26,9 +26,9 @@ class TestHandlers(BaseIntegrationTest):
|
||||
reverse('ReleaseCollectionHandler'),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals([], response)
|
||||
self.assertEqual([], response)
|
||||
|
||||
def test_release_creation(self):
|
||||
resp = self.app.post(
|
||||
@ -40,7 +40,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(resp.status_code, 201)
|
||||
self.assertEqual(resp.status_code, 201)
|
||||
|
||||
def test_release_create(self):
|
||||
release_name = "OpenStack"
|
||||
@ -84,7 +84,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(resp.status_code, 201)
|
||||
self.assertEqual(resp.status_code, 201)
|
||||
|
||||
resp = self.app.post(
|
||||
reverse('ReleaseCollectionHandler'),
|
||||
@ -114,14 +114,14 @@ class TestHandlers(BaseIntegrationTest):
|
||||
headers=self.default_headers,
|
||||
expect_errors=True
|
||||
)
|
||||
self.assertEquals(resp.status_code, 409)
|
||||
self.assertEqual(resp.status_code, 409)
|
||||
|
||||
release_from_db = self.db.query(Release).filter_by(
|
||||
name=release_name,
|
||||
version=release_version,
|
||||
description=release_description
|
||||
).all()
|
||||
self.assertEquals(len(release_from_db), 1)
|
||||
self.assertEqual(len(release_from_db), 1)
|
||||
|
||||
def test_release_create_already_exist(self):
|
||||
release_name = "OpenStack"
|
||||
@ -165,7 +165,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(resp.status_code, 201)
|
||||
self.assertEqual(resp.status_code, 201)
|
||||
|
||||
resp = self.app.post(
|
||||
reverse('ReleaseCollectionHandler'),
|
||||
@ -195,7 +195,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
headers=self.default_headers,
|
||||
expect_errors=True
|
||||
)
|
||||
self.assertEquals(resp.status_code, 409)
|
||||
self.assertEqual(resp.status_code, 409)
|
||||
|
||||
def test_release_w_orch_data_create(self):
|
||||
release_name = "OpenStack"
|
||||
@ -249,13 +249,13 @@ class TestHandlers(BaseIntegrationTest):
|
||||
}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(resp.status_code, 201)
|
||||
self.assertEqual(resp.status_code, 201)
|
||||
|
||||
resp = self.app.get(
|
||||
reverse("ReleaseCollectionHandler"),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
self.assertEquals(1, len(response))
|
||||
self.assertEquals(orch_data, response[0]["orchestrator_data"])
|
||||
self.assertEqual(1, len(response))
|
||||
self.assertEqual(orch_data, response[0]["orchestrator_data"])
|
||||
|
@ -31,13 +31,13 @@ class TestHandlers(BaseIntegrationTest):
|
||||
}),
|
||||
headers=self.default_headers,
|
||||
expect_errors=True)
|
||||
self.assertEquals(200, resp.status_code)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
response = jsonutils.loads(resp.body)
|
||||
release_from_db = self.db.query(Release).one()
|
||||
self.db.refresh(release_from_db)
|
||||
self.assertEquals('5.1', release_from_db.version)
|
||||
self.assertEquals('5.1', response['version'])
|
||||
self.assertEquals('modified release', response['name'])
|
||||
self.assertEqual('5.1', release_from_db.version)
|
||||
self.assertEqual('5.1', response['version'])
|
||||
self.assertEqual('modified release', response['name'])
|
||||
|
||||
def test_release_put_returns_400_if_no_body(self):
|
||||
release = self.env.create_release(api=False)
|
||||
@ -46,7 +46,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
"",
|
||||
headers=self.default_headers,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status_code, 400)
|
||||
self.assertEqual(resp.status_code, 400)
|
||||
|
||||
def test_release_delete_returns_400_if_clusters(self):
|
||||
cluster = self.env.create_cluster(api=False)
|
||||
@ -56,8 +56,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||
headers=self.default_headers,
|
||||
expect_errors=True
|
||||
)
|
||||
self.assertEquals(resp.status_code, 400)
|
||||
self.assertEquals(
|
||||
self.assertEqual(resp.status_code, 400)
|
||||
self.assertEqual(
|
||||
resp.body,
|
||||
"Can't delete release with "
|
||||
"clusters assigned"
|
||||
|
@ -27,7 +27,7 @@ class TestUtils(BaseTestCase):
|
||||
self.db.add(task)
|
||||
self.db.commit()
|
||||
task_by_uuid = TaskHelper.get_task_by_uuid(task.uuid)
|
||||
self.assertEquals(task.uuid, task_by_uuid.uuid)
|
||||
self.assertEqual(task.uuid, task_by_uuid.uuid)
|
||||
|
||||
def test_get_task_by_uuid_raises_error(self):
|
||||
self.assertRaises(errors.CannotFindTask,
|
||||
|
@ -34,13 +34,13 @@ class TestHelperUpdateClusterStatus(BaseTestCase):
|
||||
{'roles': ['cinder']}])
|
||||
|
||||
def node_should_be_error_with_type(self, node, error_type):
|
||||
self.assertEquals(node.status, 'error')
|
||||
self.assertEquals(node.error_type, error_type)
|
||||
self.assertEquals(node.progress, 0)
|
||||
self.assertEqual(node.status, 'error')
|
||||
self.assertEqual(node.error_type, error_type)
|
||||
self.assertEqual(node.progress, 0)
|
||||
|
||||
def nodes_should_not_be_error(self, nodes):
|
||||
for node in nodes:
|
||||
self.assertEquals(node.status, 'discover')
|
||||
self.assertEqual(node.status, 'discover')
|
||||
|
||||
@property
|
||||
def cluster(self):
|
||||
@ -55,7 +55,7 @@ class TestHelperUpdateClusterStatus(BaseTestCase):
|
||||
|
||||
TaskHelper.update_cluster_status(task.uuid)
|
||||
|
||||
self.assertEquals(self.cluster.status, 'error')
|
||||
self.assertEqual(self.cluster.status, 'error')
|
||||
self.node_should_be_error_with_type(self.cluster.nodes[0], 'deploy')
|
||||
self.nodes_should_not_be_error(self.cluster.nodes[1:])
|
||||
|
||||
@ -66,7 +66,7 @@ class TestHelperUpdateClusterStatus(BaseTestCase):
|
||||
|
||||
TaskHelper.update_cluster_status(task.uuid)
|
||||
|
||||
self.assertEquals(self.cluster.status, 'error')
|
||||
self.assertEqual(self.cluster.status, 'error')
|
||||
|
||||
def test_update_nodes_to_error_if_provision_task_failed(self):
|
||||
self.cluster.nodes[0].status = 'provisioning'
|
||||
@ -77,7 +77,7 @@ class TestHelperUpdateClusterStatus(BaseTestCase):
|
||||
|
||||
TaskHelper.update_cluster_status(task.uuid)
|
||||
|
||||
self.assertEquals(self.cluster.status, 'error')
|
||||
self.assertEqual(self.cluster.status, 'error')
|
||||
self.node_should_be_error_with_type(self.cluster.nodes[0], 'provision')
|
||||
self.nodes_should_not_be_error(self.cluster.nodes[1:])
|
||||
|
||||
@ -88,7 +88,7 @@ class TestHelperUpdateClusterStatus(BaseTestCase):
|
||||
|
||||
TaskHelper.update_cluster_status(task.uuid)
|
||||
|
||||
self.assertEquals(self.cluster.status, 'operational')
|
||||
self.assertEqual(self.cluster.status, 'operational')
|
||||
|
||||
def test_update_if_parent_task_is_ready_all_nodes_should_be_ready(self):
|
||||
for node in self.cluster.nodes:
|
||||
@ -104,11 +104,11 @@ class TestHelperUpdateClusterStatus(BaseTestCase):
|
||||
|
||||
TaskHelper.update_cluster_status(task.uuid)
|
||||
|
||||
self.assertEquals(self.cluster.status, 'operational')
|
||||
self.assertEqual(self.cluster.status, 'operational')
|
||||
|
||||
for node in self.cluster.nodes:
|
||||
self.assertEquals(node.status, 'ready')
|
||||
self.assertEquals(node.progress, 100)
|
||||
self.assertEqual(node.status, 'ready')
|
||||
self.assertEqual(node.progress, 100)
|
||||
|
||||
def test_update_cluster_status_if_task_was_already_in_error_status(self):
|
||||
for node in self.cluster.nodes:
|
||||
@ -121,12 +121,12 @@ class TestHelperUpdateClusterStatus(BaseTestCase):
|
||||
|
||||
TaskHelper.update_task_status(task.uuid, 'error', 100)
|
||||
|
||||
self.assertEquals(self.cluster.status, 'error')
|
||||
self.assertEquals(task.status, 'error')
|
||||
self.assertEqual(self.cluster.status, 'error')
|
||||
self.assertEqual(task.status, 'error')
|
||||
|
||||
for node in self.cluster.nodes:
|
||||
self.assertEquals(node.status, 'error')
|
||||
self.assertEquals(node.progress, 0)
|
||||
self.assertEqual(node.status, 'error')
|
||||
self.assertEqual(node.progress, 0)
|
||||
|
||||
def test_do_not_set_cluster_to_error_if_validation_failed(self):
|
||||
for task_name in ['check_before_deployment', 'check_networks']:
|
||||
@ -145,7 +145,7 @@ class TestHelperUpdateClusterStatus(BaseTestCase):
|
||||
self.db.commit()
|
||||
|
||||
TaskHelper.update_cluster_status(supertask.uuid)
|
||||
self.assertEquals(self.cluster.status, 'new')
|
||||
self.assertEqual(self.cluster.status, 'new')
|
||||
|
||||
|
||||
class TestCheckBeforeDeploymentTask(BaseTestCase):
|
||||
@ -165,12 +165,12 @@ class TestCheckBeforeDeploymentTask(BaseTestCase):
|
||||
def set_node_status(self, status):
|
||||
self.node.status = status
|
||||
self.env.db.commit()
|
||||
self.assertEquals(self.node.status, status)
|
||||
self.assertEqual(self.node.status, status)
|
||||
|
||||
def set_node_error_type(self, error_type):
|
||||
self.node.error_type = error_type
|
||||
self.env.db.commit()
|
||||
self.assertEquals(self.node.error_type, error_type)
|
||||
self.assertEqual(self.node.error_type, error_type)
|
||||
|
||||
def is_checking_required(self):
|
||||
return CheckBeforeDeploymentTask._is_disk_checking_required(self.node)
|
||||
@ -216,14 +216,14 @@ class TestCheckBeforeDeploymentTask(BaseTestCase):
|
||||
'check_disk_space_for_deployment') as check_mock:
|
||||
CheckBeforeDeploymentTask._check_disks(self.task)
|
||||
|
||||
self.assertEquals(check_mock.call_count, 1)
|
||||
self.assertEqual(check_mock.call_count, 1)
|
||||
|
||||
with patch.object(
|
||||
VolumeManager,
|
||||
'check_volume_sizes_for_deployment') as check_mock:
|
||||
CheckBeforeDeploymentTask._check_volumes(self.task)
|
||||
|
||||
self.assertEquals(check_mock.call_count, 1)
|
||||
self.assertEqual(check_mock.call_count, 1)
|
||||
|
||||
def test_check_nodes_online_raises_exception(self):
|
||||
self.node.online = False
|
||||
|
@ -80,4 +80,4 @@ class TestTaskHandlers(BaseTestCase):
|
||||
) + "?force=1",
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEquals(resp.status_code, 204)
|
||||
self.assertEqual(resp.status_code, 204)
|
||||
|
@ -50,10 +50,10 @@ class TestTaskHelpers(BaseTestCase):
|
||||
{'roles': ['cinder']}])
|
||||
|
||||
nodes = TaskHelper.nodes_to_deploy(cluster)
|
||||
self.assertEquals(len(nodes), 3)
|
||||
self.assertEqual(len(nodes), 3)
|
||||
|
||||
controllers = self.filter_by_role(nodes, 'controller')
|
||||
self.assertEquals(len(controllers), 3)
|
||||
self.assertEqual(len(controllers), 3)
|
||||
|
||||
def test_redeploy_only_compute_cinder(self):
|
||||
cluster = self.create_env([
|
||||
@ -65,13 +65,13 @@ class TestTaskHelpers(BaseTestCase):
|
||||
{'roles': ['cinder'], 'status': 'error'}])
|
||||
|
||||
nodes = TaskHelper.nodes_to_deploy(cluster)
|
||||
self.assertEquals(len(nodes), 2)
|
||||
self.assertEqual(len(nodes), 2)
|
||||
|
||||
cinders = self.filter_by_role(nodes, 'cinder')
|
||||
self.assertEquals(len(cinders), 1)
|
||||
self.assertEqual(len(cinders), 1)
|
||||
|
||||
computes = self.filter_by_role(nodes, 'compute')
|
||||
self.assertEquals(len(computes), 1)
|
||||
self.assertEqual(len(computes), 1)
|
||||
|
||||
def test_redeploy_all_controller_and_compute_cinder(self):
|
||||
cluster = self.create_env([
|
||||
@ -83,16 +83,16 @@ class TestTaskHelpers(BaseTestCase):
|
||||
{'roles': ['cinder'], 'status': 'error'}])
|
||||
|
||||
nodes = TaskHelper.nodes_to_deploy(cluster)
|
||||
self.assertEquals(len(nodes), 5)
|
||||
self.assertEqual(len(nodes), 5)
|
||||
|
||||
controllers = self.filter_by_role(nodes, 'controller')
|
||||
self.assertEquals(len(controllers), 3)
|
||||
self.assertEqual(len(controllers), 3)
|
||||
|
||||
cinders = self.filter_by_role(nodes, 'cinder')
|
||||
self.assertEquals(len(cinders), 2)
|
||||
self.assertEqual(len(cinders), 2)
|
||||
|
||||
computes = self.filter_by_role(nodes, 'compute')
|
||||
self.assertEquals(len(computes), 1)
|
||||
self.assertEqual(len(computes), 1)
|
||||
|
||||
def test_recalculate_deployment_task_progress(self):
|
||||
cluster = self.create_env([
|
||||
@ -114,7 +114,7 @@ class TestTaskHelpers(BaseTestCase):
|
||||
self.db.commit()
|
||||
|
||||
progress = TaskHelper.recalculate_deployment_task_progress(task)
|
||||
self.assertEquals(progress, 25)
|
||||
self.assertEqual(progress, 25)
|
||||
|
||||
def test_recalculate_provisioning_task_progress(self):
|
||||
cluster = self.create_env([
|
||||
@ -130,4 +130,4 @@ class TestTaskHelpers(BaseTestCase):
|
||||
self.db.commit()
|
||||
|
||||
progress = TaskHelper.recalculate_provisioning_task_progress(task)
|
||||
self.assertEquals(progress, 50)
|
||||
self.assertEqual(progress, 50)
|
||||
|
@ -33,7 +33,7 @@ class TestConfig(TestCase):
|
||||
MockedTime.strftime.side_effect = time.strftime
|
||||
conf = Config({})
|
||||
stamped = conf._timestamp("sample")
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
stamped,
|
||||
"sample-{0}".format(time.strftime('%Y-%m-%d_%H-%M-%S', t))
|
||||
)
|
||||
|
@ -74,7 +74,7 @@ class TestDriver(TestCase):
|
||||
{"host": {"address": "remote_host"}}, None)
|
||||
result = driver.command(command)
|
||||
shotgun.driver.fabric.api.run.assert_called_with(command, pty=True)
|
||||
self.assertEquals(result, out)
|
||||
self.assertEqual(result, out)
|
||||
shotgun.driver.fabric.api.settings.assert_called_with(
|
||||
host_string="remote_host", timeout=2, command_timeout=10,
|
||||
warn_only=True, key_filename=None)
|
||||
@ -82,7 +82,7 @@ class TestDriver(TestCase):
|
||||
driver = shotgun.driver.Driver({}, None)
|
||||
result = driver.command(command)
|
||||
shotgun.driver.execute.assert_called_with(command)
|
||||
self.assertEquals(result, out)
|
||||
self.assertEqual(result, out)
|
||||
|
||||
@patch('shotgun.driver.execute')
|
||||
@patch('shotgun.driver.fabric.api.settings')
|
||||
|
Loading…
Reference in New Issue
Block a user