Merge "Validate metadata JSON file"

This commit is contained in:
Jenkins 2015-02-03 16:38:45 +00:00 committed by Gerrit Code Review
commit 0185bcbbc0
3 changed files with 207 additions and 65 deletions

View File

@ -25,6 +25,7 @@ import os
import stat
import urlparse
import jsonschema
from oslo.serialization import jsonutils
from oslo_config import cfg
from oslo_utils import excutils
@ -54,7 +55,9 @@ _FILESYSTEM_CONFIGS = [
help=_("The path to a file which contains the "
"metadata to be returned with any location "
"associated with this store. The file must "
"contain a valid JSON dict.")),
"contain a valid JSON object. The object should contain "
"the keys 'id' and 'mountpoint'. The value for both "
"keys should be 'string'.")),
cfg.IntOpt('filesystem_store_file_perm',
default=0,
help=_("The required permission for created image file. "
@ -65,6 +68,18 @@ _FILESYSTEM_CONFIGS = [
"default permission of the file. This value will be "
"decoded as an octal digit."))]
MULTI_FILESYSTEM_METADATA_SCHEMA = {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {"type": "string"},
"mountpoint": {"type": "string"}
},
"required": ["id", "mountpoint"],
}
}
class StoreLocation(glance_store.location.StoreLocation):
"""Class describing a Filesystem URI."""
@ -136,6 +151,7 @@ class Store(glance_store.driver.Store):
OPTIONS = _FILESYSTEM_CONFIGS
READ_CHUNKSIZE = 64 * units.Ki
WRITE_CHUNKSIZE = READ_CHUNKSIZE
FILESYSTEM_STORE_METADATA = None
def get_schemes(self):
return ('file', 'filesystem')
@ -217,6 +233,47 @@ class Store(glance_store.driver.Store):
raise exceptions.BadStoreConfiguration(
store_name="filesystem", reason=reason)
def _validate_metadata(self, metadata_file):
"""Validate metadata against json schema.
If metadata is valid then cache metadata and use it when
creating new image.
:param metadata_file: JSON metadata file path
:raises: BadStoreConfiguration exception if metadata is not valid.
"""
try:
with open(metadata_file, 'r') as fptr:
metadata = jsonutils.load(fptr)
if isinstance(metadata, dict):
# If metadata is of type dictionary
# i.e. - it contains only one mountpoint
# then convert it to list of dictionary.
metadata = [metadata]
# Validate metadata against json schema
jsonschema.validate(metadata, MULTI_FILESYSTEM_METADATA_SCHEMA)
glance_store.check_location_metadata(metadata)
self.FILESYSTEM_STORE_METADATA = metadata
except (jsonschema.exceptions.ValidationError,
exceptions.BackendException, ValueError) as vee:
reason = _('The JSON in the metadata file %(file)s is '
'not valid and it can not be used: '
'%(vee)s.') % dict(file=metadata_file,
vee=utils.exception_to_str(vee))
LOG.error(reason)
raise exceptions.BadStoreConfiguration(
store_name="filesystem", reason=reason)
except IOError as ioe:
reason = _('The path for the metadata file %(file)s could '
'not be accessed: '
'%(ioe)s.') % dict(file=metadata_file,
ioe=utils.exception_to_str(ioe))
LOG.error(reason)
raise exceptions.BadStoreConfiguration(
store_name="filesystem", reason=reason)
def configure_add(self):
"""
Configure the Store to use the stored configuration options
@ -275,6 +332,10 @@ class Store(glance_store.driver.Store):
self._create_image_directories(directory_paths)
metadata_file = self.conf.glance_store.filesystem_store_metadata_file
if metadata_file:
self._validate_metadata(metadata_file)
def _check_directory_paths(self, datadir_path, directory_paths):
"""
Checks if directory_path is already present in directory_paths.
@ -334,35 +395,41 @@ class Store(glance_store.driver.Store):
filesize = os.path.getsize(filepath)
return filepath, filesize
def _get_metadata(self):
metadata_file = self.conf.glance_store.filesystem_store_metadata_file
def _get_metadata(self, filepath):
"""Return metadata dictionary.
if metadata_file is None:
return {}
If metadata is provided as list of dictionaries then return
metadata as dictionary containing 'id' and 'mountpoint'.
try:
with open(metadata_file, 'r') as fptr:
metadata = jsonutils.load(fptr)
If there are multiple nfs directories (mountpoints) configured
for glance, then we need to create metadata JSON file as list
of dictionaries containing all mountpoints with unique id.
But Nova will not be able to find in which directory (mountpoint)
image is present if we store list of dictionary(containing mountpoints)
in glance image metadata. So if there are multiple mountpoints then
we will return dict containing exact mountpoint where image is stored.
glance_store.check_location_metadata(metadata)
return metadata
except exceptions.BackendException as bee:
LOG.error(_('The JSON in the metadata file %(file)s could not '
'be used: %(bee)s An empty dictionary will be '
'returned to the client.')
% dict(file=metadata_file, bee=str(bee)))
return {}
except IOError as ioe:
LOG.error(_('The path for the metadata file %(file)s could not be '
'opened: %(io)s An empty dictionary will be returned '
'to the client.')
% dict(file=metadata_file, io=ioe))
return {}
except Exception as ex:
LOG.exception(_('An error occurred processing the storage systems '
'meta data file: %s. An empty dictionary will be '
'returned to the client.') % str(ex))
return {}
If image path does not start with any of the 'mountpoint' provided
in metadata JSON file then error is logged and empty
dictionary is returned.
:param filepath: Path of image on store
:returns: metadata dictionary
"""
if self.FILESYSTEM_STORE_METADATA:
for image_meta in self.FILESYSTEM_STORE_METADATA:
if filepath.startswith(image_meta['mountpoint']):
return image_meta
reason = (_LE("The image path %(path)s does not match with "
"any of the mountpoint defined in "
"metadata: %(metadata)s. An empty dictionary "
"will be returned to the client.")
% dict(path=filepath,
metadata=self.FILESYSTEM_STORE_METADATA))
LOG.error(reason)
return {}
def get(self, location, offset=0, chunk_size=None, context=None):
"""
@ -516,7 +583,7 @@ class Store(glance_store.driver.Store):
self._delete_partial(filepath, image_id)
checksum_hex = checksum.hexdigest()
metadata = self._get_metadata()
metadata = self._get_metadata(filepath)
LOG.debug(_("Wrote %(bytes_written)d bytes to %(filepath)s with "
"checksum %(checksum_hex)s"),

View File

@ -10,5 +10,7 @@ python-cinderclient>=1.0.6
eventlet>=0.13.0 #s3
six>=1.4.1
jsonschema>=2.0.0,<3.0.0
# py2.6 compat
ordereddict

View File

@ -56,6 +56,25 @@ class TestStore(base.StoreBaseTest):
super(TestStore, self).tearDown()
ChunkedFile.CHUNKSIZE = self.orig_chunksize
def _create_metadata_json_file(self, metadata):
expected_image_id = str(uuid.uuid4())
jsonfilename = os.path.join(self.test_dir,
"storage_metadata.%s" % expected_image_id)
self.config(filesystem_store_metadata_file=jsonfilename,
group="glance_store")
with open(jsonfilename, 'w') as fptr:
json.dump(metadata, fptr)
def _store_image(self, in_metadata):
expected_image_id = str(uuid.uuid4())
expected_file_size = 10
expected_file_contents = "*" * expected_file_size
image_file = StringIO.StringIO(expected_file_contents)
self.store.FILESYSTEM_STORE_METADATA = in_metadata
return self.store.add(expected_image_id, image_file,
expected_file_size)
def test_get(self):
"""Test a "normal" retrieval of an image in chunks."""
# First add an image...
@ -162,45 +181,23 @@ class TestStore(base.StoreBaseTest):
self.assertEqual(expected_file_contents, new_image_contents)
self.assertEqual(expected_file_size, new_image_file_size)
def test_add_check_metadata_success(self):
expected_image_id = str(uuid.uuid4())
in_metadata = {'akey': u'some value', 'list': [u'1', u'2', u'3']}
jsonfilename = os.path.join(self.test_dir,
"storage_metadata.%s" % expected_image_id)
def test_add_check_metadata_with_invalid_mountpoint_location(self):
in_metadata = [{'id': 'abcdefg',
'mountpoint': '/xyz/images'}]
location, size, checksum, metadata = self._store_image(in_metadata)
self.assertEqual({}, metadata)
self.config(filesystem_store_metadata_file=jsonfilename,
group="glance_store")
with open(jsonfilename, 'w') as fptr:
json.dump(in_metadata, fptr)
expected_file_size = 10
expected_file_contents = "*" * expected_file_size
image_file = StringIO.StringIO(expected_file_contents)
def test_add_check_metadata_list_with_invalid_mountpoint_locations(self):
in_metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'},
{'id': 'xyz1234', 'mountpoint': '/pqr/images'}]
location, size, checksum, metadata = self._store_image(in_metadata)
self.assertEqual({}, metadata)
location, size, checksum, metadata = self.store.add(expected_image_id,
image_file,
expected_file_size)
self.assertEqual(metadata, in_metadata)
def test_add_check_metadata_bad_data(self):
expected_image_id = str(uuid.uuid4())
in_metadata = {'akey': 10} # only unicode is allowed
jsonfilename = os.path.join(self.test_dir,
"storage_metadata.%s" % expected_image_id)
self.config(filesystem_store_metadata_file=jsonfilename,
group="glance_store")
with open(jsonfilename, 'w') as fptr:
json.dump(in_metadata, fptr)
expected_file_size = 10
expected_file_contents = "*" * expected_file_size
image_file = StringIO.StringIO(expected_file_contents)
location, size, checksum, metadata = self.store.add(expected_image_id,
image_file,
expected_file_size)
self.assertEqual(metadata, {})
def test_add_check_metadata_list_with_valid_mountpoint_locations(self):
in_metadata = [{'id': 'abcdefg', 'mountpoint': '/tmp'},
{'id': 'xyz1234', 'mountpoint': '/xyz'}]
location, size, checksum, metadata = self._store_image(in_metadata)
self.assertEqual(in_metadata[0], metadata)
def test_add_check_metadata_bad_nosuch_file(self):
expected_image_id = str(uuid.uuid4())
@ -360,6 +357,82 @@ class TestStore(base.StoreBaseTest):
self.assertEqual(self.store.priority_data_map, expected_priority_map)
self.assertEqual(self.store.priority_list, expected_priority_list)
def test_configure_add_with_metadata_file_success(self):
metadata = {'id': 'asdf1234',
'mountpoint': '/tmp'}
self._create_metadata_json_file(metadata)
self.store.configure_add()
self.assertEqual([metadata], self.store.FILESYSTEM_STORE_METADATA)
def test_configure_add_check_metadata_list_of_dicts_success(self):
metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'},
{'id': 'xyz1234', 'mountpoint': '/tmp/'}]
self._create_metadata_json_file(metadata)
self.store.configure_add()
self.assertEqual(metadata, self.store.FILESYSTEM_STORE_METADATA)
def test_configure_add_check_metadata_success_list_val_for_some_key(self):
metadata = {'akey': ['value1', 'value2'], 'id': 'asdf1234',
'mountpoint': '/tmp'}
self._create_metadata_json_file(metadata)
self.store.configure_add()
self.assertEqual([metadata], self.store.FILESYSTEM_STORE_METADATA)
def test_configure_add_check_metadata_bad_data(self):
metadata = {'akey': 10, 'id': 'asdf1234',
'mountpoint': '/tmp'} # only unicode is allowed
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
def test_configure_add_check_metadata_with_no_id_or_mountpoint(self):
metadata = {'mountpoint': '/tmp'}
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
metadata = {'id': 'asdfg1234'}
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
def test_configure_add_check_metadata_id_or_mountpoint_is_not_string(self):
metadata = {'id': 10, 'mountpoint': '/tmp'}
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
metadata = {'id': 'asdf1234', 'mountpoint': 12345}
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
def test_configure_add_check_metadata_list_with_no_id_or_mountpoint(self):
metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'},
{'mountpoint': '/pqr/images'}]
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
metadata = [{'id': 'abcdefg'},
{'id': 'xyz1234', 'mountpoint': '/pqr/images'}]
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
def test_add_check_metadata_list_id_or_mountpoint_is_not_string(self):
metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'},
{'id': 1234, 'mountpoint': '/pqr/images'}]
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
metadata = [{'id': 'abcdefg', 'mountpoint': 1234},
{'id': 'xyz1234', 'mountpoint': '/pqr/images'}]
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
def test_configure_add_same_dir_multiple_times(self):
"""
Tests BadStoreConfiguration exception is raised if same directory