Merge "Validate metadata JSON file"
This commit is contained in:
@@ -25,6 +25,7 @@ import os
|
|||||||
import stat
|
import stat
|
||||||
import urlparse
|
import urlparse
|
||||||
|
|
||||||
|
import jsonschema
|
||||||
from oslo.serialization import jsonutils
|
from oslo.serialization import jsonutils
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
@@ -54,7 +55,9 @@ _FILESYSTEM_CONFIGS = [
|
|||||||
help=_("The path to a file which contains the "
|
help=_("The path to a file which contains the "
|
||||||
"metadata to be returned with any location "
|
"metadata to be returned with any location "
|
||||||
"associated with this store. The file must "
|
"associated with this store. The file must "
|
||||||
"contain a valid JSON dict.")),
|
"contain a valid JSON object. The object should contain "
|
||||||
|
"the keys 'id' and 'mountpoint'. The value for both "
|
||||||
|
"keys should be 'string'.")),
|
||||||
cfg.IntOpt('filesystem_store_file_perm',
|
cfg.IntOpt('filesystem_store_file_perm',
|
||||||
default=0,
|
default=0,
|
||||||
help=_("The required permission for created image file. "
|
help=_("The required permission for created image file. "
|
||||||
@@ -65,6 +68,18 @@ _FILESYSTEM_CONFIGS = [
|
|||||||
"default permission of the file. This value will be "
|
"default permission of the file. This value will be "
|
||||||
"decoded as an octal digit."))]
|
"decoded as an octal digit."))]
|
||||||
|
|
||||||
|
MULTI_FILESYSTEM_METADATA_SCHEMA = {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"id": {"type": "string"},
|
||||||
|
"mountpoint": {"type": "string"}
|
||||||
|
},
|
||||||
|
"required": ["id", "mountpoint"],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class StoreLocation(glance_store.location.StoreLocation):
|
class StoreLocation(glance_store.location.StoreLocation):
|
||||||
"""Class describing a Filesystem URI."""
|
"""Class describing a Filesystem URI."""
|
||||||
@@ -136,6 +151,7 @@ class Store(glance_store.driver.Store):
|
|||||||
OPTIONS = _FILESYSTEM_CONFIGS
|
OPTIONS = _FILESYSTEM_CONFIGS
|
||||||
READ_CHUNKSIZE = 64 * units.Ki
|
READ_CHUNKSIZE = 64 * units.Ki
|
||||||
WRITE_CHUNKSIZE = READ_CHUNKSIZE
|
WRITE_CHUNKSIZE = READ_CHUNKSIZE
|
||||||
|
FILESYSTEM_STORE_METADATA = None
|
||||||
|
|
||||||
def get_schemes(self):
|
def get_schemes(self):
|
||||||
return ('file', 'filesystem')
|
return ('file', 'filesystem')
|
||||||
@@ -217,6 +233,47 @@ class Store(glance_store.driver.Store):
|
|||||||
raise exceptions.BadStoreConfiguration(
|
raise exceptions.BadStoreConfiguration(
|
||||||
store_name="filesystem", reason=reason)
|
store_name="filesystem", reason=reason)
|
||||||
|
|
||||||
|
def _validate_metadata(self, metadata_file):
|
||||||
|
"""Validate metadata against json schema.
|
||||||
|
|
||||||
|
If metadata is valid then cache metadata and use it when
|
||||||
|
creating new image.
|
||||||
|
|
||||||
|
:param metadata_file: JSON metadata file path
|
||||||
|
:raises: BadStoreConfiguration exception if metadata is not valid.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
with open(metadata_file, 'r') as fptr:
|
||||||
|
metadata = jsonutils.load(fptr)
|
||||||
|
|
||||||
|
if isinstance(metadata, dict):
|
||||||
|
# If metadata is of type dictionary
|
||||||
|
# i.e. - it contains only one mountpoint
|
||||||
|
# then convert it to list of dictionary.
|
||||||
|
metadata = [metadata]
|
||||||
|
|
||||||
|
# Validate metadata against json schema
|
||||||
|
jsonschema.validate(metadata, MULTI_FILESYSTEM_METADATA_SCHEMA)
|
||||||
|
glance_store.check_location_metadata(metadata)
|
||||||
|
self.FILESYSTEM_STORE_METADATA = metadata
|
||||||
|
except (jsonschema.exceptions.ValidationError,
|
||||||
|
exceptions.BackendException, ValueError) as vee:
|
||||||
|
reason = _('The JSON in the metadata file %(file)s is '
|
||||||
|
'not valid and it can not be used: '
|
||||||
|
'%(vee)s.') % dict(file=metadata_file,
|
||||||
|
vee=utils.exception_to_str(vee))
|
||||||
|
LOG.error(reason)
|
||||||
|
raise exceptions.BadStoreConfiguration(
|
||||||
|
store_name="filesystem", reason=reason)
|
||||||
|
except IOError as ioe:
|
||||||
|
reason = _('The path for the metadata file %(file)s could '
|
||||||
|
'not be accessed: '
|
||||||
|
'%(ioe)s.') % dict(file=metadata_file,
|
||||||
|
ioe=utils.exception_to_str(ioe))
|
||||||
|
LOG.error(reason)
|
||||||
|
raise exceptions.BadStoreConfiguration(
|
||||||
|
store_name="filesystem", reason=reason)
|
||||||
|
|
||||||
def configure_add(self):
|
def configure_add(self):
|
||||||
"""
|
"""
|
||||||
Configure the Store to use the stored configuration options
|
Configure the Store to use the stored configuration options
|
||||||
@@ -275,6 +332,10 @@ class Store(glance_store.driver.Store):
|
|||||||
|
|
||||||
self._create_image_directories(directory_paths)
|
self._create_image_directories(directory_paths)
|
||||||
|
|
||||||
|
metadata_file = self.conf.glance_store.filesystem_store_metadata_file
|
||||||
|
if metadata_file:
|
||||||
|
self._validate_metadata(metadata_file)
|
||||||
|
|
||||||
def _check_directory_paths(self, datadir_path, directory_paths):
|
def _check_directory_paths(self, datadir_path, directory_paths):
|
||||||
"""
|
"""
|
||||||
Checks if directory_path is already present in directory_paths.
|
Checks if directory_path is already present in directory_paths.
|
||||||
@@ -334,35 +395,41 @@ class Store(glance_store.driver.Store):
|
|||||||
filesize = os.path.getsize(filepath)
|
filesize = os.path.getsize(filepath)
|
||||||
return filepath, filesize
|
return filepath, filesize
|
||||||
|
|
||||||
def _get_metadata(self):
|
def _get_metadata(self, filepath):
|
||||||
metadata_file = self.conf.glance_store.filesystem_store_metadata_file
|
"""Return metadata dictionary.
|
||||||
|
|
||||||
if metadata_file is None:
|
If metadata is provided as list of dictionaries then return
|
||||||
return {}
|
metadata as dictionary containing 'id' and 'mountpoint'.
|
||||||
|
|
||||||
try:
|
If there are multiple nfs directories (mountpoints) configured
|
||||||
with open(metadata_file, 'r') as fptr:
|
for glance, then we need to create metadata JSON file as list
|
||||||
metadata = jsonutils.load(fptr)
|
of dictionaries containing all mountpoints with unique id.
|
||||||
|
But Nova will not be able to find in which directory (mountpoint)
|
||||||
|
image is present if we store list of dictionary(containing mountpoints)
|
||||||
|
in glance image metadata. So if there are multiple mountpoints then
|
||||||
|
we will return dict containing exact mountpoint where image is stored.
|
||||||
|
|
||||||
glance_store.check_location_metadata(metadata)
|
If image path does not start with any of the 'mountpoint' provided
|
||||||
return metadata
|
in metadata JSON file then error is logged and empty
|
||||||
except exceptions.BackendException as bee:
|
dictionary is returned.
|
||||||
LOG.error(_('The JSON in the metadata file %(file)s could not '
|
|
||||||
'be used: %(bee)s An empty dictionary will be '
|
:param filepath: Path of image on store
|
||||||
'returned to the client.')
|
:returns: metadata dictionary
|
||||||
% dict(file=metadata_file, bee=str(bee)))
|
"""
|
||||||
return {}
|
if self.FILESYSTEM_STORE_METADATA:
|
||||||
except IOError as ioe:
|
for image_meta in self.FILESYSTEM_STORE_METADATA:
|
||||||
LOG.error(_('The path for the metadata file %(file)s could not be '
|
if filepath.startswith(image_meta['mountpoint']):
|
||||||
'opened: %(io)s An empty dictionary will be returned '
|
return image_meta
|
||||||
'to the client.')
|
|
||||||
% dict(file=metadata_file, io=ioe))
|
reason = (_LE("The image path %(path)s does not match with "
|
||||||
return {}
|
"any of the mountpoint defined in "
|
||||||
except Exception as ex:
|
"metadata: %(metadata)s. An empty dictionary "
|
||||||
LOG.exception(_('An error occurred processing the storage systems '
|
"will be returned to the client.")
|
||||||
'meta data file: %s. An empty dictionary will be '
|
% dict(path=filepath,
|
||||||
'returned to the client.') % str(ex))
|
metadata=self.FILESYSTEM_STORE_METADATA))
|
||||||
return {}
|
LOG.error(reason)
|
||||||
|
|
||||||
|
return {}
|
||||||
|
|
||||||
def get(self, location, offset=0, chunk_size=None, context=None):
|
def get(self, location, offset=0, chunk_size=None, context=None):
|
||||||
"""
|
"""
|
||||||
@@ -516,7 +583,7 @@ class Store(glance_store.driver.Store):
|
|||||||
self._delete_partial(filepath, image_id)
|
self._delete_partial(filepath, image_id)
|
||||||
|
|
||||||
checksum_hex = checksum.hexdigest()
|
checksum_hex = checksum.hexdigest()
|
||||||
metadata = self._get_metadata()
|
metadata = self._get_metadata(filepath)
|
||||||
|
|
||||||
LOG.debug(_("Wrote %(bytes_written)d bytes to %(filepath)s with "
|
LOG.debug(_("Wrote %(bytes_written)d bytes to %(filepath)s with "
|
||||||
"checksum %(checksum_hex)s"),
|
"checksum %(checksum_hex)s"),
|
||||||
|
|||||||
@@ -10,5 +10,7 @@ python-cinderclient>=1.0.6
|
|||||||
eventlet>=0.13.0 #s3
|
eventlet>=0.13.0 #s3
|
||||||
six>=1.4.1
|
six>=1.4.1
|
||||||
|
|
||||||
|
jsonschema>=2.0.0,<3.0.0
|
||||||
|
|
||||||
# py2.6 compat
|
# py2.6 compat
|
||||||
ordereddict
|
ordereddict
|
||||||
|
|||||||
@@ -56,6 +56,25 @@ class TestStore(base.StoreBaseTest):
|
|||||||
super(TestStore, self).tearDown()
|
super(TestStore, self).tearDown()
|
||||||
ChunkedFile.CHUNKSIZE = self.orig_chunksize
|
ChunkedFile.CHUNKSIZE = self.orig_chunksize
|
||||||
|
|
||||||
|
def _create_metadata_json_file(self, metadata):
|
||||||
|
expected_image_id = str(uuid.uuid4())
|
||||||
|
jsonfilename = os.path.join(self.test_dir,
|
||||||
|
"storage_metadata.%s" % expected_image_id)
|
||||||
|
|
||||||
|
self.config(filesystem_store_metadata_file=jsonfilename,
|
||||||
|
group="glance_store")
|
||||||
|
with open(jsonfilename, 'w') as fptr:
|
||||||
|
json.dump(metadata, fptr)
|
||||||
|
|
||||||
|
def _store_image(self, in_metadata):
|
||||||
|
expected_image_id = str(uuid.uuid4())
|
||||||
|
expected_file_size = 10
|
||||||
|
expected_file_contents = "*" * expected_file_size
|
||||||
|
image_file = StringIO.StringIO(expected_file_contents)
|
||||||
|
self.store.FILESYSTEM_STORE_METADATA = in_metadata
|
||||||
|
return self.store.add(expected_image_id, image_file,
|
||||||
|
expected_file_size)
|
||||||
|
|
||||||
def test_get(self):
|
def test_get(self):
|
||||||
"""Test a "normal" retrieval of an image in chunks."""
|
"""Test a "normal" retrieval of an image in chunks."""
|
||||||
# First add an image...
|
# First add an image...
|
||||||
@@ -162,45 +181,23 @@ class TestStore(base.StoreBaseTest):
|
|||||||
self.assertEqual(expected_file_contents, new_image_contents)
|
self.assertEqual(expected_file_contents, new_image_contents)
|
||||||
self.assertEqual(expected_file_size, new_image_file_size)
|
self.assertEqual(expected_file_size, new_image_file_size)
|
||||||
|
|
||||||
def test_add_check_metadata_success(self):
|
def test_add_check_metadata_with_invalid_mountpoint_location(self):
|
||||||
expected_image_id = str(uuid.uuid4())
|
in_metadata = [{'id': 'abcdefg',
|
||||||
in_metadata = {'akey': u'some value', 'list': [u'1', u'2', u'3']}
|
'mountpoint': '/xyz/images'}]
|
||||||
jsonfilename = os.path.join(self.test_dir,
|
location, size, checksum, metadata = self._store_image(in_metadata)
|
||||||
"storage_metadata.%s" % expected_image_id)
|
self.assertEqual({}, metadata)
|
||||||
|
|
||||||
self.config(filesystem_store_metadata_file=jsonfilename,
|
def test_add_check_metadata_list_with_invalid_mountpoint_locations(self):
|
||||||
group="glance_store")
|
in_metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'},
|
||||||
with open(jsonfilename, 'w') as fptr:
|
{'id': 'xyz1234', 'mountpoint': '/pqr/images'}]
|
||||||
json.dump(in_metadata, fptr)
|
location, size, checksum, metadata = self._store_image(in_metadata)
|
||||||
expected_file_size = 10
|
self.assertEqual({}, metadata)
|
||||||
expected_file_contents = "*" * expected_file_size
|
|
||||||
image_file = StringIO.StringIO(expected_file_contents)
|
|
||||||
|
|
||||||
location, size, checksum, metadata = self.store.add(expected_image_id,
|
def test_add_check_metadata_list_with_valid_mountpoint_locations(self):
|
||||||
image_file,
|
in_metadata = [{'id': 'abcdefg', 'mountpoint': '/tmp'},
|
||||||
expected_file_size)
|
{'id': 'xyz1234', 'mountpoint': '/xyz'}]
|
||||||
|
location, size, checksum, metadata = self._store_image(in_metadata)
|
||||||
self.assertEqual(metadata, in_metadata)
|
self.assertEqual(in_metadata[0], metadata)
|
||||||
|
|
||||||
def test_add_check_metadata_bad_data(self):
|
|
||||||
expected_image_id = str(uuid.uuid4())
|
|
||||||
in_metadata = {'akey': 10} # only unicode is allowed
|
|
||||||
jsonfilename = os.path.join(self.test_dir,
|
|
||||||
"storage_metadata.%s" % expected_image_id)
|
|
||||||
|
|
||||||
self.config(filesystem_store_metadata_file=jsonfilename,
|
|
||||||
group="glance_store")
|
|
||||||
with open(jsonfilename, 'w') as fptr:
|
|
||||||
json.dump(in_metadata, fptr)
|
|
||||||
expected_file_size = 10
|
|
||||||
expected_file_contents = "*" * expected_file_size
|
|
||||||
image_file = StringIO.StringIO(expected_file_contents)
|
|
||||||
|
|
||||||
location, size, checksum, metadata = self.store.add(expected_image_id,
|
|
||||||
image_file,
|
|
||||||
expected_file_size)
|
|
||||||
|
|
||||||
self.assertEqual(metadata, {})
|
|
||||||
|
|
||||||
def test_add_check_metadata_bad_nosuch_file(self):
|
def test_add_check_metadata_bad_nosuch_file(self):
|
||||||
expected_image_id = str(uuid.uuid4())
|
expected_image_id = str(uuid.uuid4())
|
||||||
@@ -360,6 +357,82 @@ class TestStore(base.StoreBaseTest):
|
|||||||
self.assertEqual(self.store.priority_data_map, expected_priority_map)
|
self.assertEqual(self.store.priority_data_map, expected_priority_map)
|
||||||
self.assertEqual(self.store.priority_list, expected_priority_list)
|
self.assertEqual(self.store.priority_list, expected_priority_list)
|
||||||
|
|
||||||
|
def test_configure_add_with_metadata_file_success(self):
|
||||||
|
metadata = {'id': 'asdf1234',
|
||||||
|
'mountpoint': '/tmp'}
|
||||||
|
self._create_metadata_json_file(metadata)
|
||||||
|
self.store.configure_add()
|
||||||
|
self.assertEqual([metadata], self.store.FILESYSTEM_STORE_METADATA)
|
||||||
|
|
||||||
|
def test_configure_add_check_metadata_list_of_dicts_success(self):
|
||||||
|
metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'},
|
||||||
|
{'id': 'xyz1234', 'mountpoint': '/tmp/'}]
|
||||||
|
self._create_metadata_json_file(metadata)
|
||||||
|
self.store.configure_add()
|
||||||
|
self.assertEqual(metadata, self.store.FILESYSTEM_STORE_METADATA)
|
||||||
|
|
||||||
|
def test_configure_add_check_metadata_success_list_val_for_some_key(self):
|
||||||
|
metadata = {'akey': ['value1', 'value2'], 'id': 'asdf1234',
|
||||||
|
'mountpoint': '/tmp'}
|
||||||
|
self._create_metadata_json_file(metadata)
|
||||||
|
self.store.configure_add()
|
||||||
|
self.assertEqual([metadata], self.store.FILESYSTEM_STORE_METADATA)
|
||||||
|
|
||||||
|
def test_configure_add_check_metadata_bad_data(self):
|
||||||
|
metadata = {'akey': 10, 'id': 'asdf1234',
|
||||||
|
'mountpoint': '/tmp'} # only unicode is allowed
|
||||||
|
self._create_metadata_json_file(metadata)
|
||||||
|
self.assertRaises(exceptions.BadStoreConfiguration,
|
||||||
|
self.store.configure_add)
|
||||||
|
|
||||||
|
def test_configure_add_check_metadata_with_no_id_or_mountpoint(self):
|
||||||
|
metadata = {'mountpoint': '/tmp'}
|
||||||
|
self._create_metadata_json_file(metadata)
|
||||||
|
self.assertRaises(exceptions.BadStoreConfiguration,
|
||||||
|
self.store.configure_add)
|
||||||
|
|
||||||
|
metadata = {'id': 'asdfg1234'}
|
||||||
|
self._create_metadata_json_file(metadata)
|
||||||
|
self.assertRaises(exceptions.BadStoreConfiguration,
|
||||||
|
self.store.configure_add)
|
||||||
|
|
||||||
|
def test_configure_add_check_metadata_id_or_mountpoint_is_not_string(self):
|
||||||
|
metadata = {'id': 10, 'mountpoint': '/tmp'}
|
||||||
|
self._create_metadata_json_file(metadata)
|
||||||
|
self.assertRaises(exceptions.BadStoreConfiguration,
|
||||||
|
self.store.configure_add)
|
||||||
|
|
||||||
|
metadata = {'id': 'asdf1234', 'mountpoint': 12345}
|
||||||
|
self._create_metadata_json_file(metadata)
|
||||||
|
self.assertRaises(exceptions.BadStoreConfiguration,
|
||||||
|
self.store.configure_add)
|
||||||
|
|
||||||
|
def test_configure_add_check_metadata_list_with_no_id_or_mountpoint(self):
|
||||||
|
metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'},
|
||||||
|
{'mountpoint': '/pqr/images'}]
|
||||||
|
self._create_metadata_json_file(metadata)
|
||||||
|
self.assertRaises(exceptions.BadStoreConfiguration,
|
||||||
|
self.store.configure_add)
|
||||||
|
|
||||||
|
metadata = [{'id': 'abcdefg'},
|
||||||
|
{'id': 'xyz1234', 'mountpoint': '/pqr/images'}]
|
||||||
|
self._create_metadata_json_file(metadata)
|
||||||
|
self.assertRaises(exceptions.BadStoreConfiguration,
|
||||||
|
self.store.configure_add)
|
||||||
|
|
||||||
|
def test_add_check_metadata_list_id_or_mountpoint_is_not_string(self):
|
||||||
|
metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'},
|
||||||
|
{'id': 1234, 'mountpoint': '/pqr/images'}]
|
||||||
|
self._create_metadata_json_file(metadata)
|
||||||
|
self.assertRaises(exceptions.BadStoreConfiguration,
|
||||||
|
self.store.configure_add)
|
||||||
|
|
||||||
|
metadata = [{'id': 'abcdefg', 'mountpoint': 1234},
|
||||||
|
{'id': 'xyz1234', 'mountpoint': '/pqr/images'}]
|
||||||
|
self._create_metadata_json_file(metadata)
|
||||||
|
self.assertRaises(exceptions.BadStoreConfiguration,
|
||||||
|
self.store.configure_add)
|
||||||
|
|
||||||
def test_configure_add_same_dir_multiple_times(self):
|
def test_configure_add_same_dir_multiple_times(self):
|
||||||
"""
|
"""
|
||||||
Tests BadStoreConfiguration exception is raised if same directory
|
Tests BadStoreConfiguration exception is raised if same directory
|
||||||
|
|||||||
Reference in New Issue
Block a user