prior to nova-1336 merge
This commit is contained in:
@@ -65,40 +65,29 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
{"method": "notification",
|
||||
"args": {"event": event}})
|
||||
|
||||
def _compare_names(self, str1, str2):
|
||||
result = str1.lower() == str2.lower()
|
||||
# LOG.debug(_("Comparing %(str1)s and %(str2)s. "\
|
||||
# "Result %(result)s"), locals())
|
||||
return result
|
||||
|
||||
def _compare_sizes_exact_match(self, cap_capacity, size_gb):
|
||||
cap_capacity = BYTES_TO_GB(int(cap_capacity))
|
||||
size_gb = int(size_gb)
|
||||
result = cap_capacity == size_gb
|
||||
# LOG.debug(_("Comparing %(cap_capacity)d and %(size_gb)d. "\
|
||||
# "Result %(result)s"), locals())
|
||||
return result
|
||||
|
||||
def _compare_sizes_approxim(self, cap_capacity, size_gb):
|
||||
cap_capacity = BYTES_TO_GB(int(cap_capacity))
|
||||
size_gb = int(size_gb)
|
||||
size_perc = size_gb * FLAGS.drive_type_approx_capacity_percent / 100
|
||||
|
||||
result = cap_capacity >= size_gb - size_perc and \
|
||||
cap_capacity <= size_gb + size_perc
|
||||
# LOG.debug(_("Comparing %(cap_capacity)d and %(size_gb)d. "\
|
||||
# "Result %(result)s"), locals())
|
||||
return result
|
||||
|
||||
def _qosgrp_match(self, drive_type, qos_values):
|
||||
|
||||
def _compare_names(str1, str2):
|
||||
result = str1.lower() == str2.lower()
|
||||
return result
|
||||
|
||||
def _compare_sizes_approxim(cap_capacity, size_gb):
|
||||
cap_capacity = BYTES_TO_GB(int(cap_capacity))
|
||||
size_gb = int(size_gb)
|
||||
size_perc = size_gb * \
|
||||
FLAGS.drive_type_approx_capacity_percent / 100
|
||||
|
||||
result = cap_capacity >= size_gb - size_perc and \
|
||||
cap_capacity <= size_gb + size_perc
|
||||
return result
|
||||
|
||||
# Add more entries for additional comparisons
|
||||
compare_list = [{'cap1': 'DriveType',
|
||||
'cap2': 'type',
|
||||
'cmp_func': self._compare_names},
|
||||
'cmp_func': _compare_names},
|
||||
{'cap1': 'DriveCapacity',
|
||||
'cap2': 'size_gb',
|
||||
'cmp_func': self._compare_sizes_approxim}]
|
||||
'cmp_func': _compare_sizes_approxim}]
|
||||
|
||||
for cap in compare_list:
|
||||
if cap['cap1'] in qos_values.keys() and \
|
||||
@@ -106,20 +95,23 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
cap['cmp_func'] is not None and \
|
||||
cap['cmp_func'](qos_values[cap['cap1']],
|
||||
drive_type[cap['cap2']]):
|
||||
# LOG.debug(("One of required capabilities found: %s:%s"),
|
||||
# cap['cap1'], drive_type[cap['cap2']])
|
||||
pass
|
||||
else:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _get_service_states(self):
|
||||
return self.zone_manager.service_states
|
||||
|
||||
def _filter_hosts(self, topic, request_spec, host_list=None):
|
||||
|
||||
LOG.debug(_("_filter_hosts: %(request_spec)s"), locals())
|
||||
|
||||
drive_type = request_spec['drive_type']
|
||||
LOG.debug(_("Filter hosts for drive type %s"), drive_type['name'])
|
||||
|
||||
if host_list is None:
|
||||
host_list = self.zone_manager.service_states.iteritems()
|
||||
host_list = self._get_service_states().iteritems()
|
||||
|
||||
filtered_hosts = [] # returns list of (hostname, capability_dict)
|
||||
for host, host_dict in host_list:
|
||||
@@ -131,7 +123,6 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
for qosgrp, qos_values in gos_info.iteritems():
|
||||
if self._qosgrp_match(drive_type, qos_values):
|
||||
if qos_values['AvailableCapacity'] > 0:
|
||||
# LOG.debug(_("Adding host %s to the list"), host)
|
||||
filtered_hosts.append((host, gos_info))
|
||||
else:
|
||||
LOG.debug(_("Host %s has no free capacity. Skip"),
|
||||
@@ -226,7 +217,7 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
"args": {"volume_id": volume_ref['id'],
|
||||
"snapshot_id": None}})
|
||||
|
||||
def _check_host_enforcement(self, availability_zone):
|
||||
def _check_host_enforcement(self, context, availability_zone):
|
||||
if (availability_zone
|
||||
and ':' in availability_zone
|
||||
and context.is_admin):
|
||||
@@ -273,16 +264,10 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
vol['capabilities'] = qos_cap
|
||||
self._consume_resource(qos_cap, vol['size'], -1)
|
||||
|
||||
# LOG.debug(_("Volume %(name)s assigned to host %(host)s"),
|
||||
# locals())
|
||||
|
||||
def schedule_create_volumes(self, context, request_spec,
|
||||
availability_zone, *_args, **_kwargs):
|
||||
"""Picks hosts for hosting multiple volumes."""
|
||||
|
||||
LOG.debug(_("Service states BEFORE %s"),
|
||||
self.zone_manager.service_states)
|
||||
|
||||
num_volumes = request_spec.get('num_volumes')
|
||||
LOG.debug(_("Attempting to spawn %(num_volumes)d volume(s)") %
|
||||
locals())
|
||||
@@ -290,16 +275,13 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
vsa_id = request_spec.get('vsa_id')
|
||||
volume_params = request_spec.get('volumes')
|
||||
|
||||
host = self._check_host_enforcement(availability_zone)
|
||||
host = self._check_host_enforcement(context, availability_zone)
|
||||
|
||||
try:
|
||||
self._assign_hosts_to_volumes(context, volume_params, host)
|
||||
|
||||
for vol in volume_params:
|
||||
self._provision_volume(context, vol, vsa_id, availability_zone)
|
||||
|
||||
LOG.debug(_("Service states AFTER %s"),
|
||||
self.zone_manager.service_states)
|
||||
except:
|
||||
if vsa_id:
|
||||
db.vsa_update(context, vsa_id,
|
||||
@@ -309,8 +291,6 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
if 'capabilities' in vol:
|
||||
self._consume_resource(vol['capabilities'],
|
||||
vol['size'], 1)
|
||||
LOG.debug(_("Service states AFTER %s"),
|
||||
self.zone_manager.service_states)
|
||||
raise
|
||||
|
||||
return None
|
||||
@@ -319,7 +299,8 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
"""Picks the best host based on requested drive type capability."""
|
||||
volume_ref = db.volume_get(context, volume_id)
|
||||
|
||||
host = self._check_host_enforcement(volume_ref['availability_zone'])
|
||||
host = self._check_host_enforcement(context,
|
||||
volume_ref['availability_zone'])
|
||||
if host:
|
||||
now = utils.utcnow()
|
||||
db.volume_update(context, volume_id, {'host': host,
|
||||
@@ -333,9 +314,6 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
volume_id, *_args, **_kwargs)
|
||||
drive_type = dict(drive_type)
|
||||
|
||||
LOG.debug(_("Service states BEFORE %s"),
|
||||
self.zone_manager.service_states)
|
||||
|
||||
LOG.debug(_("Spawning volume %(volume_id)s with drive type "\
|
||||
"%(drive_type)s"), locals())
|
||||
|
||||
@@ -358,9 +336,6 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
db.volume_update(context, volume_id, {'host': host,
|
||||
'scheduled_at': now})
|
||||
self._consume_resource(qos_cap, volume_ref['size'], -1)
|
||||
|
||||
LOG.debug(_("Service states AFTER %s"),
|
||||
self.zone_manager.service_states)
|
||||
return host
|
||||
|
||||
def _consume_full_drive(self, qos_values, direction):
|
||||
|
||||
616
nova/tests/scheduler/test_vsa_scheduler.py
Normal file
616
nova/tests/scheduler/test_vsa_scheduler.py
Normal file
@@ -0,0 +1,616 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import stubout
|
||||
|
||||
import nova
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import db
|
||||
from nova import context
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova import log as logging
|
||||
|
||||
from nova.scheduler import vsa as vsa_sched
|
||||
from nova.scheduler import driver
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger('nova.tests.scheduler.vsa')
|
||||
|
||||
scheduled_volumes = []
|
||||
scheduled_volume = {}
|
||||
global_volume = {}
|
||||
|
||||
|
||||
class FakeVsaLeastUsedScheduler(
|
||||
vsa_sched.VsaSchedulerLeastUsedHost):
|
||||
# No need to stub anything at the moment
|
||||
pass
|
||||
|
||||
|
||||
class FakeVsaMostAvailCapacityScheduler(
|
||||
vsa_sched.VsaSchedulerMostAvailCapacity):
|
||||
# No need to stub anything at the moment
|
||||
pass
|
||||
|
||||
|
||||
class VsaSchedulerTestCase(test.TestCase):
|
||||
|
||||
def _get_vol_creation_request(self, num_vols, drive_ix, size=0):
|
||||
volume_params = []
|
||||
for i in range(num_vols):
|
||||
drive_type = {'id': i,
|
||||
'name': 'name_' + str(drive_ix),
|
||||
'type': 'type_' + str(drive_ix),
|
||||
'size_gb': 1 + 100 * (drive_ix)}
|
||||
volume = {'size': size,
|
||||
'snapshot_id': None,
|
||||
'name': 'vol_' + str(i),
|
||||
'description': None,
|
||||
'drive_ref': drive_type}
|
||||
volume_params.append(volume)
|
||||
|
||||
return {'num_volumes': len(volume_params),
|
||||
'vsa_id': 123,
|
||||
'volumes': volume_params}
|
||||
|
||||
def _generate_default_service_states(self):
|
||||
service_states = {}
|
||||
for i in range(self.host_num):
|
||||
host = {}
|
||||
hostname = 'host_' + str(i)
|
||||
if hostname in self.exclude_host_list:
|
||||
continue
|
||||
|
||||
host['volume'] = {'timestamp': utils.utcnow(),
|
||||
'drive_qos_info': {}}
|
||||
|
||||
for j in range(self.drive_type_start_ix,
|
||||
self.drive_type_start_ix + self.drive_type_num):
|
||||
dtype = {}
|
||||
dtype['Name'] = 'name_' + str(j)
|
||||
dtype['DriveType'] = 'type_' + str(j)
|
||||
dtype['TotalDrives'] = 2 * (self.init_num_drives + i)
|
||||
dtype['DriveCapacity'] = vsa_sched.GB_TO_BYTES(1 + 100 * j)
|
||||
dtype['TotalCapacity'] = dtype['TotalDrives'] * \
|
||||
dtype['DriveCapacity']
|
||||
dtype['AvailableCapacity'] = (dtype['TotalDrives'] - i) * \
|
||||
dtype['DriveCapacity']
|
||||
dtype['DriveRpm'] = 7200
|
||||
dtype['DifCapable'] = 0
|
||||
dtype['SedCapable'] = 0
|
||||
dtype['PartitionDrive'] = {
|
||||
'PartitionSize': 0,
|
||||
'NumOccupiedPartitions': 0,
|
||||
'NumFreePartitions': 0}
|
||||
dtype['FullDrive'] = {
|
||||
'NumFreeDrives': dtype['TotalDrives'] - i,
|
||||
'NumOccupiedDrives': i}
|
||||
host['volume']['drive_qos_info'][dtype['Name']] = dtype
|
||||
|
||||
service_states[hostname] = host
|
||||
|
||||
return service_states
|
||||
|
||||
def _print_service_states(self):
|
||||
for host, host_val in self.service_states.iteritems():
|
||||
LOG.info(_("Host %s"), host)
|
||||
total_used = 0
|
||||
total_available = 0
|
||||
qos = host_val['volume']['drive_qos_info']
|
||||
|
||||
for k, d in qos.iteritems():
|
||||
LOG.info("\t%s: type %s: drives (used %2d, total %2d) "\
|
||||
"size %3d, total %4d, used %4d, avail %d",
|
||||
k, d['DriveType'],
|
||||
d['FullDrive']['NumOccupiedDrives'], d['TotalDrives'],
|
||||
vsa_sched.BYTES_TO_GB(d['DriveCapacity']),
|
||||
vsa_sched.BYTES_TO_GB(d['TotalCapacity']),
|
||||
vsa_sched.BYTES_TO_GB(d['TotalCapacity'] - \
|
||||
d['AvailableCapacity']),
|
||||
vsa_sched.BYTES_TO_GB(d['AvailableCapacity']))
|
||||
|
||||
total_used += vsa_sched.BYTES_TO_GB(d['TotalCapacity'] - \
|
||||
d['AvailableCapacity'])
|
||||
total_available += vsa_sched.BYTES_TO_GB(
|
||||
d['AvailableCapacity'])
|
||||
LOG.info("Host %s: used %d, avail %d",
|
||||
host, total_used, total_available)
|
||||
|
||||
def _set_service_states(self, host_num,
|
||||
drive_type_start_ix, drive_type_num,
|
||||
init_num_drives=10,
|
||||
exclude_host_list=[]):
|
||||
self.host_num = host_num
|
||||
self.drive_type_start_ix = drive_type_start_ix
|
||||
self.drive_type_num = drive_type_num
|
||||
self.exclude_host_list = exclude_host_list
|
||||
self.init_num_drives = init_num_drives
|
||||
self.service_states = self._generate_default_service_states()
|
||||
|
||||
def _get_service_states(self):
|
||||
return self.service_states
|
||||
|
||||
def _fake_get_service_states(self):
|
||||
return self._get_service_states()
|
||||
|
||||
def _fake_provision_volume(self, context, vol, vsa_id, availability_zone):
|
||||
global scheduled_volumes
|
||||
scheduled_volumes.append(dict(vol=vol,
|
||||
vsa_id=vsa_id,
|
||||
az=availability_zone))
|
||||
name = vol['name']
|
||||
host = vol['host']
|
||||
LOG.debug(_("Test: provision vol %(name)s on host %(host)s"),
|
||||
locals())
|
||||
LOG.debug(_("\t vol=%(vol)s"), locals())
|
||||
pass
|
||||
|
||||
def _fake_vsa_update(self, context, vsa_id, values):
|
||||
LOG.debug(_("Test: VSA update request: vsa_id=%(vsa_id)s "\
|
||||
"values=%(values)s"), locals())
|
||||
pass
|
||||
|
||||
def _fake_volume_create(self, context, options):
|
||||
LOG.debug(_("Test: Volume create: %s"), options)
|
||||
options['id'] = 123
|
||||
global global_volume
|
||||
global_volume = options
|
||||
return options
|
||||
|
||||
def _fake_volume_get(self, context, volume_id):
|
||||
LOG.debug(_("Test: Volume get request: id=%(volume_id)s"), locals())
|
||||
global global_volume
|
||||
global_volume['id'] = volume_id
|
||||
global_volume['availability_zone'] = None
|
||||
return global_volume
|
||||
|
||||
def _fake_volume_update(self, context, volume_id, values):
|
||||
LOG.debug(_("Test: Volume update request: id=%(volume_id)s "\
|
||||
"values=%(values)s"), locals())
|
||||
global scheduled_volume
|
||||
scheduled_volume = {'id': volume_id, 'host': values['host']}
|
||||
pass
|
||||
|
||||
def _fake_service_get_by_args(self, context, host, binary):
|
||||
return "service"
|
||||
|
||||
def _fake_service_is_up_True(self, service):
|
||||
return True
|
||||
|
||||
def _fake_service_is_up_False(self, service):
|
||||
return False
|
||||
|
||||
def setUp(self, sched_class=None):
|
||||
super(VsaSchedulerTestCase, self).setUp()
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
self.context_non_admin = context.RequestContext(None, None)
|
||||
self.context = context.get_admin_context()
|
||||
|
||||
if sched_class is None:
|
||||
self.sched = FakeVsaLeastUsedScheduler()
|
||||
else:
|
||||
self.sched = sched_class
|
||||
|
||||
self.host_num = 10
|
||||
self.drive_type_num = 5
|
||||
|
||||
self.stubs.Set(self.sched,
|
||||
'_get_service_states', self._fake_get_service_states)
|
||||
self.stubs.Set(self.sched,
|
||||
'_provision_volume', self._fake_provision_volume)
|
||||
self.stubs.Set(nova.db, 'vsa_update', self._fake_vsa_update)
|
||||
|
||||
self.stubs.Set(nova.db, 'volume_get', self._fake_volume_get)
|
||||
self.stubs.Set(nova.db, 'volume_update', self._fake_volume_update)
|
||||
|
||||
def tearDown(self):
|
||||
self.stubs.UnsetAll()
|
||||
super(VsaSchedulerTestCase, self).tearDown()
|
||||
|
||||
def test_vsa_sched_create_volumes_simple(self):
|
||||
global scheduled_volumes
|
||||
scheduled_volumes = []
|
||||
self._set_service_states(host_num=10,
|
||||
drive_type_start_ix=0,
|
||||
drive_type_num=5,
|
||||
init_num_drives=10,
|
||||
exclude_host_list=['host_1', 'host_3'])
|
||||
prev = self._generate_default_service_states()
|
||||
request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
|
||||
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
|
||||
self.assertEqual(len(scheduled_volumes), 3)
|
||||
self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_0')
|
||||
self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_2')
|
||||
self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_4')
|
||||
|
||||
cur = self._get_service_states()
|
||||
for host in ['host_0', 'host_2', 'host_4']:
|
||||
cur_dtype = cur[host]['volume']['drive_qos_info']['name_2']
|
||||
prev_dtype = prev[host]['volume']['drive_qos_info']['name_2']
|
||||
self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
|
||||
self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
|
||||
prev_dtype['FullDrive']['NumFreeDrives'] - 1)
|
||||
self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
|
||||
prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
|
||||
|
||||
def test_vsa_sched_no_drive_type(self):
|
||||
self._set_service_states(host_num=10,
|
||||
drive_type_start_ix=0,
|
||||
drive_type_num=5,
|
||||
init_num_drives=1)
|
||||
request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=6)
|
||||
self.assertRaises(driver.WillNotSchedule,
|
||||
self.sched.schedule_create_volumes,
|
||||
self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
|
||||
def test_vsa_sched_no_enough_drives(self):
|
||||
global scheduled_volumes
|
||||
scheduled_volumes = []
|
||||
|
||||
self._set_service_states(host_num=3,
|
||||
drive_type_start_ix=0,
|
||||
drive_type_num=1,
|
||||
init_num_drives=0)
|
||||
prev = self._generate_default_service_states()
|
||||
request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=0)
|
||||
|
||||
self.assertRaises(driver.WillNotSchedule,
|
||||
self.sched.schedule_create_volumes,
|
||||
self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
|
||||
# check that everything was returned back
|
||||
cur = self._get_service_states()
|
||||
for k, v in prev.iteritems():
|
||||
self.assertEqual(prev[k]['volume']['drive_qos_info'],
|
||||
cur[k]['volume']['drive_qos_info'])
|
||||
|
||||
def test_vsa_sched_wrong_topic(self):
|
||||
self._set_service_states(host_num=1,
|
||||
drive_type_start_ix=0,
|
||||
drive_type_num=5,
|
||||
init_num_drives=1)
|
||||
states = self._get_service_states()
|
||||
new_states = {}
|
||||
new_states['host_0'] = {'compute': states['host_0']['volume']}
|
||||
self.service_states = new_states
|
||||
request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
|
||||
|
||||
self.assertRaises(driver.WillNotSchedule,
|
||||
self.sched.schedule_create_volumes,
|
||||
self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
|
||||
def test_vsa_sched_provision_volume(self):
|
||||
global global_volume
|
||||
global_volume = {}
|
||||
self._set_service_states(host_num=1,
|
||||
drive_type_start_ix=0,
|
||||
drive_type_num=1,
|
||||
init_num_drives=1)
|
||||
request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
|
||||
|
||||
self.stubs.UnsetAll()
|
||||
self.stubs.Set(self.sched,
|
||||
'_get_service_states', self._fake_get_service_states)
|
||||
self.stubs.Set(nova.db, 'volume_create', self._fake_volume_create)
|
||||
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
|
||||
self.assertEqual(request_spec['volumes'][0]['name'],
|
||||
global_volume['display_name'])
|
||||
|
||||
def test_vsa_sched_no_free_drives(self):
|
||||
self._set_service_states(host_num=1,
|
||||
drive_type_start_ix=0,
|
||||
drive_type_num=1,
|
||||
init_num_drives=1)
|
||||
request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
|
||||
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
|
||||
cur = self._get_service_states()
|
||||
cur_dtype = cur['host_0']['volume']['drive_qos_info']['name_0']
|
||||
self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], 1)
|
||||
|
||||
new_request = self._get_vol_creation_request(num_vols=1, drive_ix=0)
|
||||
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
self._print_service_states()
|
||||
|
||||
self.assertRaises(driver.WillNotSchedule,
|
||||
self.sched.schedule_create_volumes,
|
||||
self.context,
|
||||
new_request,
|
||||
availability_zone=None)
|
||||
|
||||
def test_vsa_sched_forced_host(self):
|
||||
global scheduled_volumes
|
||||
scheduled_volumes = []
|
||||
|
||||
self._set_service_states(host_num=10,
|
||||
drive_type_start_ix=0,
|
||||
drive_type_num=5,
|
||||
init_num_drives=10)
|
||||
|
||||
request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
|
||||
|
||||
self.assertRaises(exception.HostBinaryNotFound,
|
||||
self.sched.schedule_create_volumes,
|
||||
self.context,
|
||||
request_spec,
|
||||
availability_zone="nova:host_5")
|
||||
|
||||
self.stubs.Set(nova.db,
|
||||
'service_get_by_args', self._fake_service_get_by_args)
|
||||
self.stubs.Set(self.sched,
|
||||
'service_is_up', self._fake_service_is_up_False)
|
||||
|
||||
self.assertRaises(driver.WillNotSchedule,
|
||||
self.sched.schedule_create_volumes,
|
||||
self.context,
|
||||
request_spec,
|
||||
availability_zone="nova:host_5")
|
||||
|
||||
self.stubs.Set(self.sched,
|
||||
'service_is_up', self._fake_service_is_up_True)
|
||||
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
request_spec,
|
||||
availability_zone="nova:host_5")
|
||||
|
||||
self.assertEqual(len(scheduled_volumes), 3)
|
||||
self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_5')
|
||||
self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_5')
|
||||
self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_5')
|
||||
|
||||
def test_vsa_sched_create_volumes_partition(self):
|
||||
global scheduled_volumes
|
||||
scheduled_volumes = []
|
||||
self._set_service_states(host_num=5,
|
||||
drive_type_start_ix=0,
|
||||
drive_type_num=5,
|
||||
init_num_drives=1,
|
||||
exclude_host_list=['host_0', 'host_2'])
|
||||
prev = self._generate_default_service_states()
|
||||
request_spec = self._get_vol_creation_request(num_vols=3,
|
||||
drive_ix=3,
|
||||
size=50)
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
|
||||
self.assertEqual(len(scheduled_volumes), 3)
|
||||
self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_1')
|
||||
self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_3')
|
||||
self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_4')
|
||||
|
||||
cur = self._get_service_states()
|
||||
for host in ['host_1', 'host_3', 'host_4']:
|
||||
cur_dtype = cur[host]['volume']['drive_qos_info']['name_3']
|
||||
prev_dtype = prev[host]['volume']['drive_qos_info']['name_3']
|
||||
|
||||
self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
|
||||
self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
|
||||
prev_dtype['FullDrive']['NumFreeDrives'] - 1)
|
||||
self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
|
||||
prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
|
||||
|
||||
self.assertEqual(prev_dtype['PartitionDrive']
|
||||
['NumOccupiedPartitions'], 0)
|
||||
self.assertEqual(cur_dtype['PartitionDrive']
|
||||
['NumOccupiedPartitions'], 1)
|
||||
self.assertEqual(cur_dtype['PartitionDrive']
|
||||
['NumFreePartitions'], 5)
|
||||
|
||||
self.assertEqual(prev_dtype['PartitionDrive']
|
||||
['NumFreePartitions'], 0)
|
||||
self.assertEqual(prev_dtype['PartitionDrive']
|
||||
['PartitionSize'], 0)
|
||||
|
||||
def test_vsa_sched_create_single_volume_az(self):
|
||||
global scheduled_volume
|
||||
scheduled_volume = {}
|
||||
|
||||
def _fake_volume_get_az(context, volume_id):
|
||||
LOG.debug(_("Test: Volume get: id=%(volume_id)s"), locals())
|
||||
return {'id': volume_id, 'availability_zone': 'nova:host_3'}
|
||||
|
||||
self.stubs.Set(nova.db, 'volume_get', _fake_volume_get_az)
|
||||
self.stubs.Set(nova.db,
|
||||
'service_get_by_args', self._fake_service_get_by_args)
|
||||
self.stubs.Set(self.sched,
|
||||
'service_is_up', self._fake_service_is_up_True)
|
||||
|
||||
host = self.sched.schedule_create_volume(self.context,
|
||||
123, availability_zone=None)
|
||||
|
||||
self.assertEqual(host, 'host_3')
|
||||
self.assertEqual(scheduled_volume['id'], 123)
|
||||
self.assertEqual(scheduled_volume['host'], 'host_3')
|
||||
|
||||
def test_vsa_sched_create_single_non_vsa_volume(self):
|
||||
global scheduled_volume
|
||||
scheduled_volume = {}
|
||||
|
||||
global global_volume
|
||||
global_volume = {}
|
||||
global_volume['drive_type'] = None
|
||||
|
||||
self.assertRaises(driver.NoValidHost,
|
||||
self.sched.schedule_create_volume,
|
||||
self.context,
|
||||
123,
|
||||
availability_zone=None)
|
||||
|
||||
def test_vsa_sched_create_single_volume(self):
|
||||
global scheduled_volume
|
||||
scheduled_volume = {}
|
||||
self._set_service_states(host_num=10,
|
||||
drive_type_start_ix=0,
|
||||
drive_type_num=5,
|
||||
init_num_drives=10,
|
||||
exclude_host_list=['host_0', 'host_1'])
|
||||
prev = self._generate_default_service_states()
|
||||
|
||||
global global_volume
|
||||
global_volume = {}
|
||||
|
||||
drive_ix = 2
|
||||
drive_type = {'id': drive_ix,
|
||||
'name': 'name_' + str(drive_ix),
|
||||
'type': 'type_' + str(drive_ix),
|
||||
'size_gb': 1 + 100 * (drive_ix)}
|
||||
|
||||
global_volume['drive_type'] = drive_type
|
||||
global_volume['size'] = 0
|
||||
|
||||
host = self.sched.schedule_create_volume(self.context,
|
||||
123, availability_zone=None)
|
||||
|
||||
self.assertEqual(host, 'host_2')
|
||||
self.assertEqual(scheduled_volume['id'], 123)
|
||||
self.assertEqual(scheduled_volume['host'], 'host_2')
|
||||
|
||||
|
||||
class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(VsaSchedulerTestCaseMostAvail, self).setUp(
|
||||
FakeVsaMostAvailCapacityScheduler())
|
||||
|
||||
def tearDown(self):
|
||||
self.stubs.UnsetAll()
|
||||
super(VsaSchedulerTestCaseMostAvail, self).tearDown()
|
||||
|
||||
def test_vsa_sched_create_single_volume(self):
|
||||
global scheduled_volume
|
||||
scheduled_volume = {}
|
||||
self._set_service_states(host_num=10,
|
||||
drive_type_start_ix=0,
|
||||
drive_type_num=5,
|
||||
init_num_drives=10,
|
||||
exclude_host_list=['host_0', 'host_1'])
|
||||
prev = self._generate_default_service_states()
|
||||
|
||||
global global_volume
|
||||
global_volume = {}
|
||||
|
||||
drive_ix = 2
|
||||
drive_type = {'id': drive_ix,
|
||||
'name': 'name_' + str(drive_ix),
|
||||
'type': 'type_' + str(drive_ix),
|
||||
'size_gb': 1 + 100 * (drive_ix)}
|
||||
|
||||
global_volume['drive_type'] = drive_type
|
||||
global_volume['size'] = 0
|
||||
|
||||
host = self.sched.schedule_create_volume(self.context,
|
||||
123, availability_zone=None)
|
||||
|
||||
self.assertEqual(host, 'host_9')
|
||||
self.assertEqual(scheduled_volume['id'], 123)
|
||||
self.assertEqual(scheduled_volume['host'], 'host_9')
|
||||
|
||||
def test_vsa_sched_create_volumes_simple(self):
|
||||
global scheduled_volumes
|
||||
scheduled_volumes = []
|
||||
self._set_service_states(host_num=10,
|
||||
drive_type_start_ix=0,
|
||||
drive_type_num=5,
|
||||
init_num_drives=10,
|
||||
exclude_host_list=['host_1', 'host_3'])
|
||||
prev = self._generate_default_service_states()
|
||||
request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
|
||||
|
||||
self._print_service_states()
|
||||
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
|
||||
self.assertEqual(len(scheduled_volumes), 3)
|
||||
self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_9')
|
||||
self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_8')
|
||||
self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_7')
|
||||
|
||||
cur = self._get_service_states()
|
||||
for host in ['host_9', 'host_8', 'host_7']:
|
||||
cur_dtype = cur[host]['volume']['drive_qos_info']['name_2']
|
||||
prev_dtype = prev[host]['volume']['drive_qos_info']['name_2']
|
||||
self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
|
||||
self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
|
||||
prev_dtype['FullDrive']['NumFreeDrives'] - 1)
|
||||
self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
|
||||
prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
|
||||
|
||||
def test_vsa_sched_create_volumes_partition(self):
|
||||
global scheduled_volumes
|
||||
scheduled_volumes = []
|
||||
self._set_service_states(host_num=5,
|
||||
drive_type_start_ix=0,
|
||||
drive_type_num=5,
|
||||
init_num_drives=1,
|
||||
exclude_host_list=['host_0', 'host_2'])
|
||||
prev = self._generate_default_service_states()
|
||||
request_spec = self._get_vol_creation_request(num_vols=3,
|
||||
drive_ix=3,
|
||||
size=50)
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
|
||||
self.assertEqual(len(scheduled_volumes), 3)
|
||||
self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_4')
|
||||
self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_3')
|
||||
self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_1')
|
||||
|
||||
cur = self._get_service_states()
|
||||
for host in ['host_1', 'host_3', 'host_4']:
|
||||
cur_dtype = cur[host]['volume']['drive_qos_info']['name_3']
|
||||
prev_dtype = prev[host]['volume']['drive_qos_info']['name_3']
|
||||
|
||||
self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
|
||||
self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
|
||||
prev_dtype['FullDrive']['NumFreeDrives'] - 1)
|
||||
self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
|
||||
prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
|
||||
|
||||
self.assertEqual(prev_dtype['PartitionDrive']
|
||||
['NumOccupiedPartitions'], 0)
|
||||
self.assertEqual(cur_dtype['PartitionDrive']
|
||||
['NumOccupiedPartitions'], 1)
|
||||
self.assertEqual(cur_dtype['PartitionDrive']
|
||||
['NumFreePartitions'], 5)
|
||||
self.assertEqual(prev_dtype['PartitionDrive']
|
||||
['NumFreePartitions'], 0)
|
||||
self.assertEqual(prev_dtype['PartitionDrive']
|
||||
['PartitionSize'], 0)
|
||||
@@ -22,6 +22,7 @@ from xml.etree.ElementTree import Element, SubElement
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import vsa
|
||||
from nova import volume
|
||||
from nova import db
|
||||
from nova import context
|
||||
from nova import test
|
||||
@@ -50,6 +51,7 @@ class VsaTestCase(test.TestCase):
|
||||
super(VsaTestCase, self).setUp()
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
self.vsa_api = vsa.API()
|
||||
self.volume_api = volume.API()
|
||||
|
||||
self.context_non_admin = context.RequestContext(None, None)
|
||||
self.context = context.get_admin_context()
|
||||
|
||||
@@ -61,7 +61,8 @@ class VsaVolumesTestCase(test.TestCase):
|
||||
self.vsa_id = vsa_ref['id']
|
||||
|
||||
def tearDown(self):
|
||||
self.vsa_api.delete(self.context, self.vsa_id)
|
||||
if self.vsa_id:
|
||||
self.vsa_api.delete(self.context, self.vsa_id)
|
||||
self.stubs.UnsetAll()
|
||||
super(VsaVolumesTestCase, self).tearDown()
|
||||
|
||||
@@ -106,3 +107,23 @@ class VsaVolumesTestCase(test.TestCase):
|
||||
self.volume_api.update(self.context,
|
||||
volume_ref['id'], {'status': 'error'})
|
||||
self.volume_api.delete(self.context, volume_ref['id'])
|
||||
|
||||
def test_vsa_volume_delete_vsa_with_volumes(self):
|
||||
""" Check volume deleton in different states. """
|
||||
|
||||
vols1 = self.volume_api.get_all_by_vsa(self.context,
|
||||
self.vsa_id, "from")
|
||||
for i in range(3):
|
||||
volume_param = _default_volume_param()
|
||||
volume_param['from_vsa_id'] = self.vsa_id
|
||||
volume_ref = self.volume_api.create(self.context, **volume_param)
|
||||
|
||||
vols2 = self.volume_api.get_all_by_vsa(self.context,
|
||||
self.vsa_id, "from")
|
||||
self.assertEqual(len(vols1) + 3, len(vols2))
|
||||
|
||||
self.vsa_api.delete(self.context, self.vsa_id)
|
||||
|
||||
vols3 = self.volume_api.get_all_by_vsa(self.context,
|
||||
self.vsa_id, "from")
|
||||
self.assertEqual(len(vols1), len(vols3))
|
||||
|
||||
@@ -312,9 +312,8 @@ class API(base.Base):
|
||||
def _force_volume_delete(self, ctxt, volume):
|
||||
"""Delete a volume, bypassing the check that it must be available."""
|
||||
host = volume['host']
|
||||
|
||||
if not host:
|
||||
# Volume not yet assigned to host
|
||||
if not host or volume['from_vsa_id']:
|
||||
# Volume not yet assigned to host OR FE volume
|
||||
# Deleting volume from database and skipping rpc.
|
||||
self.db.volume_destroy(ctxt, volume['id'])
|
||||
return
|
||||
@@ -324,41 +323,33 @@ class API(base.Base):
|
||||
{"method": "delete_volume",
|
||||
"args": {"volume_id": volume['id']}})
|
||||
|
||||
def delete_be_volumes(self, context, vsa_id, force_delete=True):
|
||||
def delete_vsa_volumes(self, context, vsa_id, direction,
|
||||
force_delete=True):
|
||||
if direction == "FE":
|
||||
volumes = self.db.volume_get_all_assigned_from_vsa(context, vsa_id)
|
||||
else:
|
||||
volumes = self.db.volume_get_all_assigned_to_vsa(context, vsa_id)
|
||||
|
||||
be_volumes = self.db.volume_get_all_assigned_to_vsa(context, vsa_id)
|
||||
for volume in be_volumes:
|
||||
for volume in volumes:
|
||||
try:
|
||||
vol_name = volume['name']
|
||||
LOG.info(_("VSA ID %(vsa_id)s: Deleting BE volume "\
|
||||
"%(vol_name)s"), locals())
|
||||
LOG.info(_("VSA ID %(vsa_id)s: Deleting %(direction)s "\
|
||||
"volume %(vol_name)s"), locals())
|
||||
self.volume_api.delete(context, volume['id'])
|
||||
except exception.ApiError:
|
||||
LOG.info(_("Unable to delete volume %s"), volume['name'])
|
||||
if force_delete:
|
||||
LOG.info(_("VSA ID %(vsa_id)s: Forced delete. BE volume "\
|
||||
"%(vol_name)s"), locals())
|
||||
LOG.info(_("VSA ID %(vsa_id)s: Forced delete. "\
|
||||
"%(direction)s volume %(vol_name)s"), locals())
|
||||
self._force_volume_delete(context, volume)
|
||||
|
||||
def delete(self, context, vsa_id):
|
||||
"""Terminate a VSA instance."""
|
||||
LOG.info(_("Going to try to terminate VSA ID %s"), vsa_id)
|
||||
|
||||
# allow deletion of volumes in "abnormal" state
|
||||
|
||||
# Delete all FE volumes
|
||||
fe_volumes = self.db.volume_get_all_assigned_from_vsa(context, vsa_id)
|
||||
for volume in fe_volumes:
|
||||
try:
|
||||
vol_name = volume['name']
|
||||
LOG.info(_("VSA ID %(vsa_id)s: Deleting FE volume "\
|
||||
"%(vol_name)s"), locals())
|
||||
self.volume_api.delete(context, volume['id'])
|
||||
except exception.ApiError:
|
||||
LOG.info(_("Unable to delete volume %s"), volume['name'])
|
||||
|
||||
# Delete all BE volumes
|
||||
self.delete_be_volumes(context, vsa_id, force_delete=True)
|
||||
# Delete all FrontEnd and BackEnd volumes
|
||||
self.delete_vsa_volumes(context, vsa_id, "FE", force_delete=True)
|
||||
self.delete_vsa_volumes(context, vsa_id, "BE", force_delete=True)
|
||||
|
||||
# Delete all VC instances
|
||||
instances = self.db.instance_get_all_by_vsa(context, vsa_id)
|
||||
|
||||
@@ -145,7 +145,7 @@ class VsaManager(manager.SchedulerDependentManager):
|
||||
|
||||
if has_failed_volumes:
|
||||
LOG.info(_("VSA ID %(vsa_id)d: Delete all BE volumes"), locals())
|
||||
self.vsa_api.delete_be_volumes(context, vsa_id, force_delete=True)
|
||||
self.vsa_api.delete_vsa_volumes(context, vsa_id, "BE", True)
|
||||
self.vsa_api.update_vsa_status(context, vsa_id, VsaState.FAILED)
|
||||
return
|
||||
|
||||
|
||||
Reference in New Issue
Block a user