Merge V2 and V2.1 hypervisor functional tests

Currently v2 and v2.1 have separate functional tests and their
corresponding sample files. As v2 and v2.1 are supposed to be identical,
there is overhead to maintain two set of functional tests and sample files.
We can have one set of tests which can run for both v2 and v2.1.

This commit merges hypervisor functional tests.
Also adding test for hypervisor servers.

In V2 hypervsior API has following extensions
- os-extended-hypervisors
- os-hypervisor-status
In V2.1 above extensions have been merged together in hypervisor plugins

Change-Id: I5ddf3c54dd80a67f71762769d6130db41e772b01
This commit is contained in:
ghanshyam
2015-04-16 16:54:04 +09:00
parent 4e2229e2d3
commit 7b3a32492c
24 changed files with 142 additions and 428 deletions

View File

@@ -1,25 +0,0 @@
{
"hypervisor": {
"cpu_info": "{\"arch\": \"x86_64\", \"model\": \"Nehalem\", \"vendor\": \"Intel\", \"features\": [\"pge\", \"clflush\"], \"topology\": {\"cores\": 1, \"threads\": 1, \"sockets\": 4}}",
"current_workload": 0,
"disk_available_least": 0,
"host_ip": "1.1.1.1",
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
"hypervisor_version": 1000,
"id": 1,
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"service": {
"host": "5641188ab2964f88a21042b493585ff8",
"id": 2
},
"vcpus": 1,
"vcpus_used": 0
}
}

View File

@@ -1,27 +0,0 @@
{
"hypervisor": {
"cpu_info": "{\"arch\": \"x86_64\", \"model\": \"Nehalem\", \"vendor\": \"Intel\", \"features\": [\"pge\", \"clflush\"], \"topology\": {\"cores\": 1, \"threads\": 1, \"sockets\": 4}}",
"current_workload": 0,
"disk_available_least": 0,
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
"hypervisor_version": 1000,
"id": 1,
"status": "enabled",
"state": "up",
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"service": {
"host": "5641188ab2964f88a21042b493585ff8",
"id": 2,
"disabled_reason": null
},
"vcpus": 1,
"vcpus_used": 0
}
}

View File

@@ -1,26 +0,0 @@
{
"hypervisors": [
{
"cpu_info": "{\"arch\": \"x86_64\", \"model\": \"Nehalem\", \"vendor\": \"Intel\", \"features\": [\"pge\", \"clflush\"], \"topology\": {\"cores\": 1, \"threads\": 1, \"sockets\": 4}}",
"current_workload": 0,
"disk_available_least": null,
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
"hypervisor_version": 1000,
"id": 1,
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"service": {
"host": "1e0d7892083548cfb347e782d3b20342",
"id": 2
},
"vcpus": 1,
"vcpus_used": 0
}
]
}

View File

@@ -1,8 +0,0 @@
{
"hypervisors": [
{
"hypervisor_hostname": "fake-mini",
"id": 1
}
]
}

View File

@@ -1,8 +0,0 @@
{
"hypervisors": [
{
"hypervisor_hostname": "fake-mini",
"id": 1
}
]
}

View File

@@ -1,8 +0,0 @@
{
"hypervisors": [
{
"hypervisor_hostname": "fake-mini",
"id": 1
}
]
}

View File

@@ -1,24 +0,0 @@
{
"hypervisor": {
"cpu_info": "{\"arch\": \"x86_64\", \"model\": \"Nehalem\", \"vendor\": \"Intel\", \"features\": [\"pge\", \"clflush\"], \"topology\": {\"cores\": 1, \"threads\": 1, \"sockets\": 4}}",
"current_workload": 0,
"disk_available_least": 0,
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
"hypervisor_version": 1000,
"id": 1,
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"service": {
"host": "5641188ab2964f88a21042b493585ff8",
"id": 2
},
"vcpus": 1,
"vcpus_used": 0
}
}

View File

@@ -1,16 +0,0 @@
{
"hypervisor_statistics": {
"count": 1,
"current_workload": 0,
"disk_available_least": 0,
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"vcpus": 1,
"vcpus_used": 0
}
}

View File

@@ -1,7 +0,0 @@
{
"hypervisor": {
"hypervisor_hostname": "fake-mini",
"id": 1,
"uptime": " 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14"
}
}

View File

@@ -0,0 +1,20 @@
{
"hypervisors": [
{
"hypervisor_hostname": "fake-mini",
"id": 1,
"state": "up",
"status": "enabled",
"servers": [
{
"name": "test_server1",
"uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
},
{
"name": "test_server2",
"uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb"
}
]
}
]
}

View File

@@ -1,25 +0,0 @@
{
"hypervisor": {
"cpu_info": "{\"arch\": \"x86_64\", \"model\": \"Nehalem\", \"vendor\": \"Intel\", \"features\": [\"pge\", \"clflush\"], \"topology\": {\"cores\": 1, \"threads\": 1, \"sockets\": 4}}",
"current_workload": 0,
"disk_available_least": 0,
"host_ip": "%(ip)s",
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
"hypervisor_version": 1000,
"id": %(hypervisor_id)s,
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"service": {
"host": "%(host_name)s",
"id": 2
},
"vcpus": 1,
"vcpus_used": 0
}
}

View File

@@ -1,27 +0,0 @@
{
"hypervisor": {
"cpu_info": "{\"arch\": \"x86_64\", \"model\": \"Nehalem\", \"vendor\": \"Intel\", \"features\": [\"pge\", \"clflush\"], \"topology\": {\"cores\": 1, \"threads\": 1, \"sockets\": 4}}",
"current_workload": 0,
"disk_available_least": 0,
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
"hypervisor_version": 1000,
"id": %(hypervisor_id)s,
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"state": "up",
"status": "enabled",
"service": {
"host": "%(host_name)s",
"id": 2,
"disabled_reason": null
},
"vcpus": 1,
"vcpus_used": 0
}
}

View File

@@ -1,27 +0,0 @@
{
"hypervisors": [
{
"cpu_info": "{\"arch\": \"x86_64\", \"model\": \"Nehalem\", \"vendor\": \"Intel\", \"features\": [\"pge\", \"clflush\"], \"topology\": {\"cores\": 1, \"threads\": 1, \"sockets\": 4}}",
"current_workload": 0,
"disk_available_least": null,
"host_ip": "%(ip)s",
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
"hypervisor_version": 1000,
"id": 1,
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"service": {
"host": "%(host_name)s",
"id": 2
},
"vcpus": 1,
"vcpus_used": 0
}
]
}

View File

@@ -1,8 +0,0 @@
{
"hypervisors": [
{
"hypervisor_hostname": "fake-mini",
"id": 1
}
]
}

View File

@@ -1,8 +0,0 @@
{
"hypervisors": [
{
"hypervisor_hostname": "fake-mini",
"id": 1
}
]
}

View File

@@ -1,8 +0,0 @@
{
"hypervisors": [
{
"hypervisor_hostname": "fake-mini",
"id": 1
}
]
}

View File

@@ -1,24 +0,0 @@
{
"hypervisor": {
"cpu_info": "{\"arch\": \"x86_64\", \"model\": \"Nehalem\", \"vendor\": \"Intel\", \"features\": [\"pge\", \"clflush\"], \"topology\": {\"cores\": 1, \"threads\": 1, \"sockets\": 4}}",
"current_workload": 0,
"disk_available_least": 0,
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
"hypervisor_version": 1000,
"id": %(hypervisor_id)s,
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"service": {
"host": "%(host_name)s",
"id": 2
},
"vcpus": 1,
"vcpus_used": 0
}
}

View File

@@ -1,16 +0,0 @@
{
"hypervisor_statistics": {
"count": 1,
"current_workload": 0,
"disk_available_least": 0,
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"vcpus": 1,
"vcpus_used": 0
}
}

View File

@@ -1,7 +0,0 @@
{
"hypervisor": {
"hypervisor_hostname": "fake-mini",
"id": %(hypervisor_id)s,
"uptime": " 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14"
}
}

View File

@@ -18,7 +18,6 @@ import inspect
import os
import uuid as uuid_lib
import mock
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
@@ -27,14 +26,10 @@ import testtools
from nova.api.metadata import password
from nova.api.openstack.compute import extensions
from nova.cells import utils as cells_utils
# Import extensions to pull in osapi_compute_extension CONF option used below.
from nova.compute import api as compute_api
from nova.compute import cells_api as cells_api
from nova.console import manager as console_manager # noqa - only for cfg
from nova.network.neutronv2 import api as neutron_api # noqa - only for cfg
from nova import objects
from nova.servicegroup import api as service_group_api
from nova import test
from nova.tests.functional import api_samples_test_base
from nova.tests.functional import integrated_helpers
@@ -814,128 +809,6 @@ class ConfigDriveSampleJsonTest(ServersSampleBase):
subs, response, 200)
@mock.patch.object(service_group_api.API, "service_is_up", lambda _: True)
class HypervisorsSampleJsonTests(ApiSampleTestBaseV2):
ADMIN_API = True
extension_name = ("nova.api.openstack.compute.contrib.hypervisors."
"Hypervisors")
def test_hypervisors_list(self):
response = self._do_get('os-hypervisors')
self._verify_response('hypervisors-list-resp', {}, response, 200)
def test_hypervisors_search(self):
response = self._do_get('os-hypervisors/fake/search')
self._verify_response('hypervisors-search-resp', {}, response, 200)
def test_hypervisors_servers(self):
response = self._do_get('os-hypervisors/fake/servers')
self._verify_response('hypervisors-servers-resp', {}, response, 200)
def test_hypervisors_show(self):
hypervisor_id = 1
subs = {
'hypervisor_id': hypervisor_id
}
response = self._do_get('os-hypervisors/%s' % hypervisor_id)
subs.update(self._get_regexes())
self._verify_response('hypervisors-show-resp', subs, response, 200)
def test_hypervisors_statistics(self):
response = self._do_get('os-hypervisors/statistics')
self._verify_response('hypervisors-statistics-resp', {}, response, 200)
def test_hypervisors_uptime(self):
def fake_get_host_uptime(self, context, hyp):
return (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
" 0.20, 0.12, 0.14")
self.stubs.Set(compute_api.HostAPI,
'get_host_uptime', fake_get_host_uptime)
hypervisor_id = 1
response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
subs = {
'hypervisor_id': hypervisor_id,
}
self._verify_response('hypervisors-uptime-resp', subs, response, 200)
class ExtendedHypervisorsJsonTest(ApiSampleTestBaseV2):
ADMIN_API = True
extends_name = ("nova.api.openstack.compute.contrib."
"hypervisors.Hypervisors")
extension_name = ("nova.api.openstack.compute.contrib."
"extended_hypervisors.Extended_hypervisors")
def test_hypervisors_show_with_ip(self):
hypervisor_id = 1
subs = {
'hypervisor_id': hypervisor_id
}
response = self._do_get('os-hypervisors/%s' % hypervisor_id)
subs.update(self._get_regexes())
self._verify_response('hypervisors-show-with-ip-resp',
subs, response, 200)
class HypervisorStatusJsonTest(ApiSampleTestBaseV2):
ADMIN_API = True
extends_name = ("nova.api.openstack.compute.contrib."
"hypervisors.Hypervisors")
extension_name = ("nova.api.openstack.compute.contrib."
"hypervisor_status.Hypervisor_status")
def test_hypervisors_show_with_status(self):
hypervisor_id = 1
subs = {
'hypervisor_id': hypervisor_id
}
response = self._do_get('os-hypervisors/%s' % hypervisor_id)
subs.update(self._get_regexes())
self._verify_response('hypervisors-show-with-status-resp',
subs, response, 200)
@mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
class HypervisorsCellsSampleJsonTests(ApiSampleTestBaseV2):
ADMIN_API = True
extension_name = ("nova.api.openstack.compute.contrib.hypervisors."
"Hypervisors")
def setUp(self):
self.flags(enable=True, cell_type='api', group='cells')
super(HypervisorsCellsSampleJsonTests, self).setUp()
def test_hypervisor_uptime(self, mocks):
fake_hypervisor = objects.ComputeNode(id=1, host='fake-mini',
hypervisor_hostname='fake-mini')
def fake_get_host_uptime(self, context, hyp):
return (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
" 0.20, 0.12, 0.14")
def fake_compute_node_get(self, context, hyp):
return fake_hypervisor
def fake_service_get_by_compute_host(self, context, host):
return cells_utils.ServiceProxy(
objects.Service(id=1, host='fake-mini', disabled=False,
disabled_reason=None),
'cell1')
self.stubs.Set(cells_api.HostAPI, 'compute_node_get',
fake_compute_node_get)
self.stubs.Set(cells_api.HostAPI, 'service_get_by_compute_host',
fake_service_get_by_compute_host)
self.stubs.Set(cells_api.HostAPI,
'get_host_uptime', fake_get_host_uptime)
hypervisor_id = fake_hypervisor['id']
response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
subs = {'hypervisor_id': hypervisor_id}
self._verify_response('hypervisors-uptime-resp', subs, response, 200)
class PreserveEphemeralOnRebuildJsonTest(ServersSampleBase):
extension_name = ('nova.api.openstack.compute.contrib.'
'preserve_ephemeral_rebuild.'

View File

@@ -0,0 +1,20 @@
{
"hypervisors": [
{
"hypervisor_hostname": "fake-mini",
"id": 1,
"state": "up",
"status": "enabled",
"servers": [
{
"name": "test_server1",
"uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
},
{
"name": "test_server2",
"uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb"
}
]
}
]
}

View File

@@ -13,13 +13,40 @@
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from nova.cells import utils as cells_utils
from nova.compute import api as compute_api
from nova.compute import cells_api as cells_api
from nova import objects
from nova.tests.functional.v3 import api_sample_base
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
class HypervisorsSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extension_name = "os-hypervisors"
# TODO(gmann): Overriding '_api_version' till all functional tests
# are merged between v2 and v2.1. After that base class variable
# itself can be changed to 'v2'
_api_version = 'v2'
def _get_flags(self):
f = super(HypervisorsSampleJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.hypervisors.Hypervisors')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.extended_hypervisors.'
'Extended_hypervisors')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.hypervisor_status.'
'Hypervisor_status')
return f
def test_hypervisors_list(self):
response = self._do_get('os-hypervisors')
@@ -29,9 +56,29 @@ class HypervisorsSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
response = self._do_get('os-hypervisors/fake/search')
self._verify_response('hypervisors-search-resp', {}, response, 200)
def test_hypervisors_servers(self):
def test_hypervisors_without_servers(self):
response = self._do_get('os-hypervisors/fake/servers')
self._verify_response('hypervisors-servers-resp', {}, response, 200)
self._verify_response('hypervisors-without-servers-resp',
{}, response, 200)
@mock.patch("nova.compute.api.HostAPI.instance_get_all_by_host")
def test_hypervisors_with_servers(self, mock_instance_get):
instance = [
{
"deleted": None,
"name": "test_server1",
"uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
},
{
"deleted": None,
"name": "test_server2",
"uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb"
}]
mock_instance_get.return_value = instance
response = self._do_get('os-hypervisors/fake/servers')
self._verify_response('hypervisors-with-servers-resp', {},
response, 200)
def test_hypervisors_detail(self):
hypervisor_id = 1
@@ -68,3 +115,56 @@ class HypervisorsSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
'hypervisor_id': hypervisor_id,
}
self._verify_response('hypervisors-uptime-resp', subs, response, 200)
@mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
class HypervisorsCellsSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extension_name = "os-hypervisors"
# TODO(gmann): Overriding '_api_version' till all functional tests
# are merged between v2 and v2.1. After that base class variable
# itself can be changed to 'v2'
_api_version = 'v2'
def _get_flags(self):
f = super(HypervisorsCellsSampleJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.hypervisors.Hypervisors')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.hypervisor_status.'
'Hypervisor_status')
return f
def setUp(self):
self.flags(enable=True, cell_type='api', group='cells')
super(HypervisorsCellsSampleJsonTests, self).setUp()
def test_hypervisor_uptime(self, mocks):
fake_hypervisor = objects.ComputeNode(id=1, host='fake-mini',
hypervisor_hostname='fake-mini')
def fake_get_host_uptime(self, context, hyp):
return (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
" 0.20, 0.12, 0.14")
def fake_compute_node_get(self, context, hyp):
return fake_hypervisor
def fake_service_get_by_compute_host(self, context, host):
return cells_utils.ServiceProxy(
objects.Service(id=1, host='fake-mini', disabled=False,
disabled_reason=None),
'cell1')
self.stubs.Set(cells_api.HostAPI, 'compute_node_get',
fake_compute_node_get)
self.stubs.Set(cells_api.HostAPI, 'service_get_by_compute_host',
fake_service_get_by_compute_host)
self.stubs.Set(cells_api.HostAPI,
'get_host_uptime', fake_get_host_uptime)
hypervisor_id = fake_hypervisor['id']
response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
subs = {'hypervisor_id': hypervisor_id}
self._verify_response('hypervisors-uptime-resp', subs, response, 200)