Enhance Storage models to support RAID config

The Storage, Volume, and Drive resource models are enhanced to enable
the implementation of RAID configuration management.

Change-Id: Ia1ba7f08c6574c7497ca7464a52f46a5b72bc0cd
Story: 2003514
Task: 30593
This commit is contained in:
Bill Dodd 2019-05-07 15:50:45 -05:00
parent 5a5550d164
commit 343f7a6c63
17 changed files with 393 additions and 9 deletions

View File

@ -0,0 +1,5 @@
---
features:
- |
Update the Storage, Volume, and Drive models to support RAID
configuration management.

View File

@ -97,10 +97,12 @@ class HTTPError(SushyError):
self.status_code})
error = 'unknown error'
else:
# TODO(dtantsur): parse @Message.ExtendedInfo
self.body = body.get('error', {})
self.code = self.body.get('code', 'Base.1.0.GeneralError')
self.detail = self.body.get('message')
ext_info = self.body.get('@Message.ExtendedInfo', [{}])
index = self._get_most_severe_msg_index(ext_info)
self.detail = ext_info[index].get('Message', self.detail)
error = '%s: %s' % (self.code, self.detail or 'unknown error')
kwargs = {'method': method, 'url': url, 'code': self.status_code,
@ -109,6 +111,15 @@ class HTTPError(SushyError):
'status code: %(code)s, error: %(error)s', kwargs)
super(HTTPError, self).__init__(**kwargs)
@staticmethod
def _get_most_severe_msg_index(extended_info):
if len(extended_info) > 0:
for sev in ['Critical', 'Warning']:
for i, m in enumerate(extended_info):
if m.get('Severity') == sev:
return i
return 0
class BadRequestError(HTTPError):
pass

View File

@ -56,6 +56,11 @@ class ResetActionField(ActionField):
adapter=list)
class InitializeActionField(ActionField):
allowed_values = base.Field('InitializeType@Redfish.AllowableValues',
adapter=list)
class StatusField(base.CompositeField):
"""This Field describes the status of a resource and its children.
@ -69,3 +74,13 @@ class StatusField(base.CompositeField):
state = base.MappedField('State', res_maps.STATE_VALUE_MAP)
"""Indicates the known state of the resource, such as if it is enabled."""
class IdentifiersListField(base.ListField):
"""This type describes any additional identifiers for a resource."""
durable_name = base.Field('DurableName')
"""This indicates the world wide, persistent name of the resource."""
durable_name_format = base.Field('DurableNameFormat')
"""This represents the format of the DurableName property."""

View File

@ -0,0 +1,41 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Volume Initialization Types
VOLUME_INIT_TYPE_FAST = 'fast'
"""The volume is prepared for use quickly, typically by erasing just the
beginning and end of the space so that partitioning can be performed."""
VOLUME_INIT_TYPE_SLOW = 'slow'
"""The volume is prepared for use slowly, typically by completely erasing
the volume."""
# VolumeType Types
VOLUME_TYPE_RAW_DEVICE = 'rawdevice'
"""The volume is a raw physical device without any RAID or other
virtualization applied."""
VOLUME_TYPE_NON_REDUNDANT = 'nonredundant'
"""The volume is a non-redundant storage device."""
VOLUME_TYPE_MIRRORED = 'mirrored'
"""The volume is a mirrored device."""
VOLUME_TYPE_STRIPED_WITH_PARITY = 'stripedwithparity'
"""The volume is a device which uses parity to retain redundant information."""
VOLUME_TYPE_SPANNED_MIRRORS = 'spannedmirrors'
"""The volume is a spanned set of mirrored devices."""
VOLUME_TYPE_SPANNED_STRIPES_WITH_PARITY = 'spannedstripeswithparity'
"""The volume is a spanned set of devices which uses parity to retain
redundant information."""

View File

@ -27,9 +27,15 @@ LOG = logging.getLogger(__name__)
class Drive(base.ResourceBase):
"""This class represents a disk drive or other physical storage medium."""
block_size_bytes = base.Field('BlockSizeBytes', adapter=utils.int_or_none)
"""The size of the smallest addressable unit of this drive in bytes"""
capacity_bytes = base.Field('CapacityBytes', adapter=utils.int_or_none)
"""The size in bytes of this Drive"""
identifiers = common.IdentifiersListField('Identifiers', default=[])
"""The Durable names for the drive"""
identity = base.Field('Id', required=True)
"""The Drive identity string"""
@ -40,6 +46,9 @@ class Drive(base.ResourceBase):
manufacturer = base.Field('Manufacturer')
"""This is the manufacturer of this drive"""
media_type = base.Field('MediaType')
"""The type of media contained in this drive"""
model = base.Field('Model')
"""This is the model number for the drive"""
@ -49,6 +58,9 @@ class Drive(base.ResourceBase):
part_number = base.Field('PartNumber')
"""The part number for this drive"""
protocol = base.Field('Protocol')
"""Protocol this drive is using to communicate to the storage controller"""
serial_number = base.Field('SerialNumber')
"""The serial number for this drive"""

View File

@ -0,0 +1,33 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sushy.resources.system.storage import constants as store_cons
from sushy import utils
VOLUME_INIT_TYPE_MAP = {
'Fast': store_cons.VOLUME_INIT_TYPE_FAST,
'Slow': store_cons.VOLUME_INIT_TYPE_SLOW
}
VOLUME_INIT_TYPE_MAP_REV = (
utils.revert_dictionary(VOLUME_INIT_TYPE_MAP)
)
VOLUME_TYPE_TYPE_MAP = {
'RawDevice': store_cons.VOLUME_TYPE_RAW_DEVICE,
'NonRedundant': store_cons.VOLUME_TYPE_NON_REDUNDANT,
'Mirrored': store_cons.VOLUME_TYPE_MIRRORED,
'StripedWithParity': store_cons.VOLUME_TYPE_STRIPED_WITH_PARITY,
'SpannedMirrors': store_cons.VOLUME_TYPE_SPANNED_MIRRORS,
'SpannedStripesWithParity':
store_cons.VOLUME_TYPE_SPANNED_STRIPES_WITH_PARITY
}

View File

@ -16,6 +16,7 @@
import logging
from sushy.resources import base
from sushy.resources import common
from sushy.resources.system.storage import drive
from sushy.resources.system.storage import volume
from sushy import utils
@ -24,6 +25,33 @@ from sushy import utils
LOG = logging.getLogger(__name__)
class StorageControllersListField(base.ListField):
"""The set of storage controllers represented by this resource."""
member_id = base.Field('MemberId', required=True)
"""Uniquely identifies the member within the collection."""
name = base.Field('Name', required=True)
"""The name of the storage controller"""
status = common.StatusField('Status')
"""Describes the status and health of the resource and its children."""
identifiers = common.IdentifiersListField('Identifiers', default=[])
"""The Durable names for the storage controller."""
speed_gbps = base.Field('SpeedGbps')
"""The maximum speed of the storage controller's device interface."""
controller_protocols = base.Field('SupportedControllerProtocols',
adapter=list)
"""The protocols by which this storage controller can be communicated to"""
device_protocols = base.Field('SupportedDeviceProtocols',
adapter=list)
"""The protocols which the controller can use tocommunicate with devices"""
class Storage(base.ResourceBase):
"""This class represents the storage subsystem resources.
@ -42,10 +70,13 @@ class Storage(base.ResourceBase):
adapter=utils.get_members_identities)
"""A tuple with the drive identities"""
status = common.StatusField('Status')
"""Describes the status and health of the resource and its children."""
def get_drive(self, drive_identity):
"""Given the drive identity return a ``Drive`` object
:param identity: The identity of the ``Drive``
:param drive_identity: The identity of the ``Drive``
:returns: The ``Drive`` object
:raises: ResourceNotFoundError
"""
@ -95,6 +126,10 @@ class Storage(base.ResourceBase):
self._conn, utils.get_sub_resource_path_by(self, 'Volumes'),
redfish_version=self.redfish_version)
storage_controllers = StorageControllersListField('StorageControllers',
default=[])
"""The storage devices associated with this resource."""
class StorageCollection(base.ResourceCollectionBase):
"""This class represents the collection of Storage resources"""

View File

@ -15,13 +15,19 @@
import logging
from sushy import exceptions
from sushy.resources import base
from sushy.resources import common
from sushy.resources.system.storage import mappings as store_maps
from sushy import utils
LOG = logging.getLogger(__name__)
class ActionsField(base.CompositeField):
initialize = common.InitializeActionField('#Volume.Initialize')
class Volume(base.ResourceBase):
"""This class adds the Storage Volume resource"""
@ -34,6 +40,73 @@ class Volume(base.ResourceBase):
capacity_bytes = base.Field('CapacityBytes', adapter=utils.int_or_none)
"""The size in bytes of this Volume."""
volume_type = base.MappedField('VolumeType',
store_maps.VOLUME_TYPE_TYPE_MAP)
"""The type of this volume."""
encrypted = base.Field('Encrypted', adapter=bool)
"""Is this Volume encrypted."""
identifiers = common.IdentifiersListField('Identifiers', default=[])
"""The Durable names for the volume."""
block_size_bytes = base.Field('BlockSizeBytes', adapter=int)
"""The size of the smallest addressable unit of this volume in bytes."""
operation_apply_time_support = common.OperationApplyTimeSupportField()
"""Indicates if a client is allowed to request for a specific apply
time of a create, delete, or action operation of a given resource"""
_actions = ActionsField('Actions', required=True)
def _get_initialize_action_element(self):
initialize_action = self._actions.initialize
if not initialize_action:
raise exceptions.MissingActionError(action='#Volume.Initialize',
resource=self._path)
return initialize_action
def get_allowed_initialize_volume_values(self):
"""Get the allowed values for initializing the volume.
:returns: A set with the allowed values.
"""
action = self._get_initialize_action_element()
if not action.allowed_values:
LOG.warning('Could not figure out the allowed values for the '
'initialize volume action for Volume %s',
self.identity)
return set(store_maps.VOLUME_INIT_TYPE_MAP_REV)
return set([store_maps.VOLUME_INIT_TYPE_MAP[v] for v in
set(store_maps.VOLUME_INIT_TYPE_MAP).
intersection(action.allowed_values)])
def initialize_volume(self, value):
"""Initialize the volume.
:param value: The InitializeType value.
:raises: InvalidParameterValueError, if the target value is not
allowed.
"""
valid_values = self.get_allowed_initialize_volume_values()
if value not in valid_values:
raise exceptions.InvalidParameterValueError(
parameter='value', value=value, valid_values=valid_values)
value = store_maps.VOLUME_INIT_TYPE_MAP_REV[value]
target_uri = self._get_initialize_action_element().target_uri
self._conn.post(target_uri, data={'InitializeType': value})
def delete_volume(self, payload=None):
"""Delete the volume.
:param payload: May contain @Redfish.OperationApplyTime property
:raises: ConnectionError
:raises: HTTPError
"""
self._conn.delete(self._path, data=payload)
class VolumeCollection(base.ResourceCollectionBase):
"""This class represents the Storage Volume collection"""
@ -67,3 +140,22 @@ class VolumeCollection(base.ResourceCollectionBase):
operation_apply_time_support = common.OperationApplyTimeSupportField()
"""Indicates if a client is allowed to request for a specific apply
time of a create, delete, or action operation of a given resource"""
def create_volume(self, payload):
"""Create a volume.
:param payload: The payload representing the new volume to create.
:raises: ConnectionError
:raises: HTTPError
"""
r = self._conn.post(self._path, data=payload)
location = r.headers.get('Location')
new_volume = None
if r.status_code == 201:
if location:
self.refresh()
new_volume = self.get_member(location)
elif r.status_code == 202:
# TODO(billdodd): TaskMonitor support to be added in subsequent PR
pass
return new_volume

View File

@ -29,6 +29,13 @@
],
"Severity": "Warning",
"Resolution": "Remove the property from the request body and resubmit the request if the operation failed"
},
{
"@odata.type": "/redfish/v1/$metadata#Message.1.0.0.Message",
"MessageId": "Base.1.0.MalformedJSON",
"Message": "The request body submitted was malformed JSON and could not be parsed by the receiving service.",
"Severity": "Critical",
"Resolution": "Ensure that the request body is valid JSON and resubmit the request."
}
]
}

View File

@ -0,0 +1,41 @@
{
"@odata.type": "#Volume.v1_0_3.Volume",
"Id": "4",
"Name": "My Volume 4",
"Status": {
"@odata.type": "#Resource.Status",
"State": "Enabled",
"Health": "OK"
},
"Encrypted": false,
"VolumeType": "Mirrored",
"CapacityBytes": 107374182400,
"Identifiers": [
{
"@odata.type": "#Resource.v1_1_0.Identifier",
"DurableNameFormat": "UUID",
"DurableName": "eb179a30-6f87-4fdb-8f92-639eb7aaabcb"
}
],
"Links": {
"@odata.type": "#Volume.v1_0_0.Links",
"Drives": [
{
"@odata.id": "/redfish/v1/Systems/437XR1138R2/Storage/1/Drives/3D58ECBC375FD9F2"
}
]
},
"Actions": {
"@odata.type": "#Volume.v1_0_0.Actions",
"#Volume.Initialize": {
"target": "/redfish/v1/Systems/437XR1138R2/Storage/1/Volumes/4/Actions/Volume.Initialize",
"InitializeType@Redfish.AllowableValues": [
"Fast",
"Slow"
]
}
},
"@odata.context": "/redfish/v1/$metadata#Volume.Volume",
"@odata.id": "/redfish/v1/Systems/437XR1138R2/Storage/1/Volumes/4",
"@Redfish.Copyright": "Copyright 2014-2017 Distributed Management Task Force, Inc. (DMTF). For the full DMTF copyright policy, see http://www.dmtf.org/about/policies/copyright."
}

View File

@ -89,7 +89,7 @@ class VirtualMediaTestCase(base.TestCase):
target_uri = ("/redfish/v1/Managers/BMC/VirtualMedia/Floppy1/Actions"
"/VirtualMedia.EjectMedia")
self.conn.post.side_effect = [exceptions.HTTPError(
method='POST', url=target_uri, response=mock.Mock(
method='POST', url=target_uri, response=mock.MagicMock(
status_code=http_client.UNSUPPORTED_MEDIA_TYPE)), '200']
self.sys_virtual_media.eject_media()
post_calls = [

View File

@ -27,7 +27,7 @@ class SessionServiceTestCase(base.TestCase):
def setUp(self):
super(SessionServiceTestCase, self).setUp()
self.conn = mock.Mock()
self.conn = mock.MagicMock()
with open('sushy/tests/unit/json_samples/session_service.json') as f:
self.conn.get.return_value.json.return_value = json.load(f)

View File

@ -38,10 +38,19 @@ class DriveTestCase(base.TestCase):
self.assertEqual('1.0.2', self.stor_drive.redfish_version)
self.assertEqual('32ADF365C6C1B7BD', self.stor_drive.identity)
self.assertEqual('Drive Sample', self.stor_drive.name)
self.assertEqual(512, self.stor_drive.block_size_bytes)
self.assertEqual(899527000000, self.stor_drive.capacity_bytes)
identifiers = self.stor_drive.identifiers
self.assertIsInstance(identifiers, list)
self.assertEqual(1, len(identifiers))
identifier = identifiers[0]
self.assertEqual('NAA', identifier.durable_name_format)
self.assertEqual('32ADF365C6C1B7BD', identifier.durable_name)
self.assertEqual('Contoso', self.stor_drive.manufacturer)
self.assertEqual('HDD', self.stor_drive.media_type)
self.assertEqual('C123', self.stor_drive.model)
self.assertEqual('C123-1111', self.stor_drive.part_number)
self.assertEqual('SAS', self.stor_drive.protocol)
self.assertEqual('1234570', self.stor_drive.serial_number)
self.assertEqual(sushy.STATE_ENABLED, self.stor_drive.status.state)
self.assertEqual(sushy.HEALTH_OK, self.stor_drive.status.health)

View File

@ -51,6 +51,9 @@ class StorageTestCase(base.TestCase):
self.assertEqual('1.0.2', self.storage.redfish_version)
self.assertEqual('1', self.storage.identity)
self.assertEqual('Local Storage Controller', self.storage.name)
self.assertEqual('ok', self.storage.status.health)
self.assertEqual('ok', self.storage.status.health_rollup)
self.assertEqual('enabled', self.storage.status.state)
self.assertEqual(
('/redfish/v1/Systems/437XR1138R2/Storage/1/Drives/35D38F11ACEF7BD3', # noqa
'/redfish/v1/Systems/437XR1138R2/Storage/1/Drives/3F5A8C54207B7233', # noqa
@ -101,6 +104,25 @@ class StorageTestCase(base.TestCase):
self.assertEqual(4, len(all_drives))
self.assertIsInstance(all_drives[0], drive.Drive.__class__)
def test_storage_controllers(self):
controllers = self.storage.storage_controllers
self.assertIsInstance(controllers, list)
self.assertEqual(1, len(controllers))
controller = controllers[0]
self.assertEqual('0', controller.member_id)
self.assertEqual('Contoso Integrated RAID', controller.name)
self.assertEqual('ok', controller.status.health)
self.assertEqual('enabled', controller.status.state)
identifiers = controller.identifiers
self.assertIsInstance(identifiers, list)
self.assertEqual(1, len(identifiers))
identifier = identifiers[0]
self.assertEqual('NAA', identifier.durable_name_format)
self.assertEqual('345C59DBD970859C', identifier.durable_name)
self.assertEqual(12, controller.speed_gbps)
self.assertEqual(["PCIe"], controller.controller_protocols)
self.assertEqual(["SAS", "SATA"], controller.device_protocols)
def test_drives_after_refresh(self):
self.storage.refresh()
self.conn.get.return_value.json.reset_mock()

View File

@ -15,6 +15,8 @@ import mock
from dateutil import parser
from sushy import exceptions
from sushy.resources.system.storage import constants as store_cons
from sushy.resources.system.storage import volume
from sushy.tests.unit import base
@ -37,6 +39,41 @@ class VolumeTestCase(base.TestCase):
self.assertEqual('1', self.stor_volume.identity)
self.assertEqual('Virtual Disk 1', self.stor_volume.name)
self.assertEqual(899527000000, self.stor_volume.capacity_bytes)
self.assertEqual(store_cons.VOLUME_TYPE_MIRRORED,
self.stor_volume.volume_type)
self.assertFalse(self.stor_volume.encrypted)
identifiers = self.stor_volume.identifiers
self.assertIsInstance(identifiers, list)
self.assertEqual(1, len(identifiers))
identifier = identifiers[0]
self.assertEqual('UUID', identifier.durable_name_format)
self.assertEqual('38f1818b-111e-463a-aa19-fa54f792e468',
identifier.durable_name)
self.assertIsNone(self.stor_volume.block_size_bytes)
def test_initialize_volume(self):
target_uri = '/redfish/v1/Systems/3/Storage/RAIDIntegrated/' \
'Volumes/1/Actions/Volume.Initialize'
self.stor_volume.initialize_volume('fast')
self.stor_volume._conn.post.assert_called_once_with(
target_uri, data={'InitializeType': 'Fast'})
def test_initialize_volume_bad_value(self):
self.assertRaisesRegex(
exceptions.InvalidParameterValueError,
'The parameter.*lazy.*invalid',
self.stor_volume.initialize_volume, 'lazy')
def test_delete_volume(self):
self.stor_volume.delete_volume()
self.stor_volume._conn.delete.assert_called_once_with(
self.stor_volume._path, data=None)
def test_delete_volume_with_payload(self):
payload = {'@Redfish.OperationApplyTime': 'OnReset'}
self.stor_volume.delete_volume(payload=payload)
self.stor_volume._conn.delete.assert_called_once_with(
self.stor_volume._path, data=payload)
class VolumeCollectionTestCase(base.TestCase):
@ -50,6 +87,7 @@ class VolumeCollectionTestCase(base.TestCase):
self.stor_vol_col = volume.VolumeCollection(
self.conn, '/redfish/v1/Systems/437XR1138R2/Storage/1/Volumes',
redfish_version='1.0.2')
self.stor_vol_col.refresh = mock.Mock()
def test__parse_attributes(self):
self.stor_vol_col._parse_attributes()
@ -130,3 +168,26 @@ class VolumeCollectionTestCase(base.TestCase):
self.conn.get.return_value.json.side_effect = successive_return_values
self.assertEqual(1073741824000, self.stor_vol_col.max_size_bytes)
def test_create_volume(self):
payload = {
'Name': 'My Volume 4',
'VolumeType': 'Mirrored',
'CapacityBytes': 107374182400
}
with open('sushy/tests/unit/json_samples/volume4.json') as f:
self.conn.get.return_value.json.return_value = json.load(f)
self.conn.post.return_value.status_code = 201
self.conn.post.return_value.headers.return_value = {
'Location': '/redfish/v1/Systems/437XR1138R2/Storage/1/Volumes/4'
}
new_vol = self.stor_vol_col.create_volume(payload)
self.stor_vol_col._conn.post.assert_called_once_with(
'/redfish/v1/Systems/437XR1138R2/Storage/1/Volumes',
data=payload)
self.stor_vol_col.refresh.assert_called_once()
self.assertIsNotNone(new_vol)
self.assertEqual('4', new_vol.identity)
self.assertEqual('My Volume 4', new_vol.name)
self.assertEqual(107374182400, new_vol.capacity_bytes)
self.assertEqual(store_cons.VOLUME_TYPE_MIRRORED, new_vol.volume_type)

View File

@ -227,7 +227,7 @@ class ResourceCollectionBaseTestCase(base.TestCase):
self.test_resource_collection.members_identities = ('1',)
self.conn.get.side_effect = exceptions.ResourceNotFoundError(
method='GET', url='http://foo.bar:8000/redfish/v1/Fakes/2',
response=mock.Mock(status_code=http_client.NOT_FOUND))
response=mock.MagicMock(status_code=http_client.NOT_FOUND))
# | WHEN & THEN |
self.assertRaises(exceptions.ResourceNotFoundError,
self.test_resource_collection.get_member, '2')

View File

@ -183,9 +183,9 @@ class ConnectorOpTestCase(base.TestCase):
self.session = mock.Mock(spec=requests.Session)
self.conn._session = self.session
self.request = self.session.request
first_response = mock.Mock()
first_response = mock.MagicMock()
first_response.status_code = http_client.FORBIDDEN
second_response = mock.Mock()
second_response = mock.MagicMock()
second_response.status_code = http_client.OK
second_response.json = {'Test': 'Testing'}
self.request.side_effect = [first_response, second_response]
@ -216,12 +216,12 @@ class ConnectorOpTestCase(base.TestCase):
self.request.return_value.json.return_value = json.load(f)
with self.assertRaisesRegex(exceptions.BadRequestError,
'A general error has occurred') as cm:
'body submitted was malformed JSON') as cm:
self.conn._op('GET', 'http://foo.bar')
exc = cm.exception
self.assertEqual(http_client.BAD_REQUEST, exc.status_code)
self.assertIsNotNone(exc.body)
self.assertIn('A general error has occurred', exc.detail)
self.assertIn('body submitted was malformed JSON', exc.detail)
def test_not_found_error(self):
self.request.return_value.status_code = http_client.NOT_FOUND