343 lines
14 KiB
Python
Raw Normal View History

# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from eventlet import Timeout
from swift.common.swob import Request
from swift.proxy import server as proxy_server
from swift.proxy.controllers.base import headers_to_container_info
from test.unit import fake_http_connect, FakeRing, FakeMemcache
Add Storage Policy support to Containers Containers now have a storage policy index associated with them, stored in the container_stat table. This index is only settable at container creation time (PUT request), and cannot be changed without deleting and recreating the container. This is because a container's policy index will apply to all its objects, so changing a container's policy index would require moving large amounts of object data around. If a user wants to change the policy for data in a container, they must create a new container with the desired policy and move the data over. Keep status_changed_at up-to-date with status changes. In particular during container recreation and replication. When a container-server receives a PUT for a deleted database an extra UPDATE is issued against the container_stat table to notate the x-timestamp of the request. During replication if merge_timestamps causes a container's status to change (from DELETED to ACTIVE or vice-versa) the status_changed_at field is set to the current time. Accurate reporting of status_changed_at is useful for container replication forensics and allows resolution of "set on create" attributes like the upcoming storage_policy_index. Expose Backend container info on deleted containers. Include basic container info in backend headers on 404 responses from the container server. Default empty values are used as placeholders if the database does not exist. Specifically the X-Backend-Status-Changed-At, X-Backend-DELETE-Timestamp and the X-Backend-Storage-Policy-Index value will be needed by the reconciler to deal with reconciling out of order object writes in the face of recently deleted containers. * Add "status_changed_at" key to the response from ContainerBroker.get_info. * Add "Status Timestamp" field to swift.cli.info.print_db_info_metadata. * Add "status_changed_at" key to the response from AccountBroker.get_info. DocImpact Implements: blueprint storage-policies Change-Id: Ie6d388f067f5b096b0f96faef151120ba23c8748
2014-05-27 16:57:25 -07:00
from swift.common.storage_policy import StoragePolicy
Generic means for persisting system metadata. Middleware or core features may need to store metadata against accounts or containers. This patch adds a generic mechanism for system metadata to be persisted in backend databases, without polluting the user metadata namespace, by using the reserved header namespace x-<server_type>-sysmeta-*. Modifications are firstly that backend servers persist system metadata headers alongside user metadata and other system state. For accounts and containers, system metadata in PUT and POST requests is treated in a similar way to user metadata. System metadata is not yet supported for object requests. Secondly, changes in the proxy controllers ensure that headers in the system metadata namespace will pass through in requests to backend servers. Thirdly, system metadata returned from backend servers in GET or HEAD responses is added to the cached info dict, which middleware can access. Finally, a gatekeeper middleware module is provided which filters all system metadata headers from requests and responses by removing headers with names starting x-account-sysmeta-, x-container-sysmeta-. The gatekeeper also removes headers starting x-object-sysmeta- in anticipation of future support for system metadata being set for objects. This prevents clients from writing or reading system metadata. The required_filters list in swift/proxy/server.py is modified to include the gatekeeper middleware so that if the gatekeeper has not been configured in the pipeline then it will be automatically inserted close to the start of the pipeline. blueprint cluster-federation Change-Id: I80b8b14243cc59505f8c584920f8f527646b5f45
2013-12-03 22:02:39 +00:00
from swift.common.request_helpers import get_sys_meta_prefix
from test.unit import patch_policies, mocked_http_conn, debug_logger
Add Storage Policy support to Containers Containers now have a storage policy index associated with them, stored in the container_stat table. This index is only settable at container creation time (PUT request), and cannot be changed without deleting and recreating the container. This is because a container's policy index will apply to all its objects, so changing a container's policy index would require moving large amounts of object data around. If a user wants to change the policy for data in a container, they must create a new container with the desired policy and move the data over. Keep status_changed_at up-to-date with status changes. In particular during container recreation and replication. When a container-server receives a PUT for a deleted database an extra UPDATE is issued against the container_stat table to notate the x-timestamp of the request. During replication if merge_timestamps causes a container's status to change (from DELETED to ACTIVE or vice-versa) the status_changed_at field is set to the current time. Accurate reporting of status_changed_at is useful for container replication forensics and allows resolution of "set on create" attributes like the upcoming storage_policy_index. Expose Backend container info on deleted containers. Include basic container info in backend headers on 404 responses from the container server. Default empty values are used as placeholders if the database does not exist. Specifically the X-Backend-Status-Changed-At, X-Backend-DELETE-Timestamp and the X-Backend-Storage-Policy-Index value will be needed by the reconciler to deal with reconciling out of order object writes in the face of recently deleted containers. * Add "status_changed_at" key to the response from ContainerBroker.get_info. * Add "Status Timestamp" field to swift.cli.info.print_db_info_metadata. * Add "status_changed_at" key to the response from AccountBroker.get_info. DocImpact Implements: blueprint storage-policies Change-Id: Ie6d388f067f5b096b0f96faef151120ba23c8748
2014-05-27 16:57:25 -07:00
from test.unit.common.ring.test_ring import TestRingBase
from test.unit.proxy.test_server import node_error_count
Add Storage Policy support to Containers Containers now have a storage policy index associated with them, stored in the container_stat table. This index is only settable at container creation time (PUT request), and cannot be changed without deleting and recreating the container. This is because a container's policy index will apply to all its objects, so changing a container's policy index would require moving large amounts of object data around. If a user wants to change the policy for data in a container, they must create a new container with the desired policy and move the data over. Keep status_changed_at up-to-date with status changes. In particular during container recreation and replication. When a container-server receives a PUT for a deleted database an extra UPDATE is issued against the container_stat table to notate the x-timestamp of the request. During replication if merge_timestamps causes a container's status to change (from DELETED to ACTIVE or vice-versa) the status_changed_at field is set to the current time. Accurate reporting of status_changed_at is useful for container replication forensics and allows resolution of "set on create" attributes like the upcoming storage_policy_index. Expose Backend container info on deleted containers. Include basic container info in backend headers on 404 responses from the container server. Default empty values are used as placeholders if the database does not exist. Specifically the X-Backend-Status-Changed-At, X-Backend-DELETE-Timestamp and the X-Backend-Storage-Policy-Index value will be needed by the reconciler to deal with reconciling out of order object writes in the face of recently deleted containers. * Add "status_changed_at" key to the response from ContainerBroker.get_info. * Add "Status Timestamp" field to swift.cli.info.print_db_info_metadata. * Add "status_changed_at" key to the response from AccountBroker.get_info. DocImpact Implements: blueprint storage-policies Change-Id: Ie6d388f067f5b096b0f96faef151120ba23c8748
2014-05-27 16:57:25 -07:00
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestContainerController(TestRingBase):
CONTAINER_REPLICAS = 3
def setUp(self):
Add Storage Policy support to Containers Containers now have a storage policy index associated with them, stored in the container_stat table. This index is only settable at container creation time (PUT request), and cannot be changed without deleting and recreating the container. This is because a container's policy index will apply to all its objects, so changing a container's policy index would require moving large amounts of object data around. If a user wants to change the policy for data in a container, they must create a new container with the desired policy and move the data over. Keep status_changed_at up-to-date with status changes. In particular during container recreation and replication. When a container-server receives a PUT for a deleted database an extra UPDATE is issued against the container_stat table to notate the x-timestamp of the request. During replication if merge_timestamps causes a container's status to change (from DELETED to ACTIVE or vice-versa) the status_changed_at field is set to the current time. Accurate reporting of status_changed_at is useful for container replication forensics and allows resolution of "set on create" attributes like the upcoming storage_policy_index. Expose Backend container info on deleted containers. Include basic container info in backend headers on 404 responses from the container server. Default empty values are used as placeholders if the database does not exist. Specifically the X-Backend-Status-Changed-At, X-Backend-DELETE-Timestamp and the X-Backend-Storage-Policy-Index value will be needed by the reconciler to deal with reconciling out of order object writes in the face of recently deleted containers. * Add "status_changed_at" key to the response from ContainerBroker.get_info. * Add "Status Timestamp" field to swift.cli.info.print_db_info_metadata. * Add "status_changed_at" key to the response from AccountBroker.get_info. DocImpact Implements: blueprint storage-policies Change-Id: Ie6d388f067f5b096b0f96faef151120ba23c8748
2014-05-27 16:57:25 -07:00
TestRingBase.setUp(self)
self.logger = debug_logger()
self.container_ring = FakeRing(replicas=self.CONTAINER_REPLICAS,
max_more_nodes=9)
self.app = proxy_server.Application(None, FakeMemcache(),
logger=self.logger,
account_ring=FakeRing(),
container_ring=self.container_ring)
self.account_info = {
'status': 200,
'container_count': '10',
'total_object_count': '100',
'bytes': '1000',
'meta': {},
'sysmeta': {},
}
class FakeAccountInfoContainerController(
proxy_server.ContainerController):
def account_info(controller, *args, **kwargs):
Fix up get_account_info and get_container_info get_account_info used to work like this: * make an account HEAD request * ignore the response * get the account info by digging around in the request environment, where it had been deposited by elves or something Not actually elves, but the proxy's GETorHEAD_base method would take the HEAD response and cache it in the response environment, which was the same object as the request environment, thus enabling get_account_info to find it. This was extraordinarily brittle. If a WSGI middleware were to shallow-copy the request environment, then any middlewares to its left could not use get_account_info, as the left middleware's request environment would no longer be identical to the response environment down in GETorHEAD_base. Now, get_account_info works like this: * make an account HEAD request. * if the account info is in the request environment, return it. This is an optimization to avoid a double-set in memcached. * else, compute the account info from the response headers, store it in caches, and return it. This is much easier to think about; get_account_info can get and cache account info all on its own; the cache check and cache set are right next to each other. All the above is true for get_container_info as well. get_info() is still around, but it's just a shim. It was trying to unify get_account_info and get_container_info to exploit the commonalities, but the number of times that "if container:" showed up in get_info and its helpers really indicated that something was wrong. I'd rather have two functions with some duplication than one function with no duplication but a bunch of "if container:" branches. Other things of note: * a HEAD request to a deleted account returns 410, but get_account_info would return 404 since the 410 came from the account controller *after* GETorHEAD_base ran. Now get_account_info returns 410 as well. * cache validity period (recheck_account_existence and recheck_container_existence) is now communicated to get_account_info via an X-Backend header. This way, get_account_info doesn't need a reference to the swift.proxy.server.Application object. * both logged swift_source values are now correct for get_container_info calls; before, on a cold cache, get_container_info would call get_account_info but not pass along swift_source, resulting in get_account_info logging "GET_INFO" as the source. Amusingly, there was a unit test asserting this bogus behavior. * callers that modify the return value of get_account_info or of get_container_info don't modify what's stored in swift.infocache. * get_account_info on an account that *can* be autocreated but has not been will return a 200, same as a HEAD request. The old behavior was a 404 from get_account_info but a 200 from HEAD. Callers can tell the difference by looking at info['account_really_exists'] if they need to know the difference (there is one call site that needs to know, in container PUT). Note: this is for all accounts when the proxy's "account_autocreate" setting is on. Change-Id: I5167714025ec7237f7e6dd4759c2c6eb959b3fca
2016-02-11 15:51:45 -08:00
patch_path = 'swift.proxy.controllers.base.get_account_info'
with mock.patch(patch_path) as mock_get_info:
mock_get_info.return_value = dict(self.account_info)
return super(FakeAccountInfoContainerController,
controller).account_info(
*args, **kwargs)
_orig_get_controller = self.app.get_controller
def wrapped_get_controller(*args, **kwargs):
with mock.patch('swift.proxy.server.ContainerController',
new=FakeAccountInfoContainerController):
return _orig_get_controller(*args, **kwargs)
self.app.get_controller = wrapped_get_controller
def _make_callback_func(self, context):
def callback(ipaddr, port, device, partition, method, path,
headers=None, query_string=None, ssl=False):
context['method'] = method
context['path'] = path
context['headers'] = headers or {}
return callback
def _assert_responses(self, method, test_cases):
controller = proxy_server.ContainerController(self.app, 'a', 'c')
for responses, expected in test_cases:
with mock.patch(
'swift.proxy.controllers.base.http_connect',
fake_http_connect(*responses)):
req = Request.blank('/v1/a/c')
resp = getattr(controller, method)(req)
self.assertEqual(expected,
resp.status_int,
'Expected %s but got %s. Failed case: %s' %
(expected, resp.status_int, str(responses)))
Fix up get_account_info and get_container_info get_account_info used to work like this: * make an account HEAD request * ignore the response * get the account info by digging around in the request environment, where it had been deposited by elves or something Not actually elves, but the proxy's GETorHEAD_base method would take the HEAD response and cache it in the response environment, which was the same object as the request environment, thus enabling get_account_info to find it. This was extraordinarily brittle. If a WSGI middleware were to shallow-copy the request environment, then any middlewares to its left could not use get_account_info, as the left middleware's request environment would no longer be identical to the response environment down in GETorHEAD_base. Now, get_account_info works like this: * make an account HEAD request. * if the account info is in the request environment, return it. This is an optimization to avoid a double-set in memcached. * else, compute the account info from the response headers, store it in caches, and return it. This is much easier to think about; get_account_info can get and cache account info all on its own; the cache check and cache set are right next to each other. All the above is true for get_container_info as well. get_info() is still around, but it's just a shim. It was trying to unify get_account_info and get_container_info to exploit the commonalities, but the number of times that "if container:" showed up in get_info and its helpers really indicated that something was wrong. I'd rather have two functions with some duplication than one function with no duplication but a bunch of "if container:" branches. Other things of note: * a HEAD request to a deleted account returns 410, but get_account_info would return 404 since the 410 came from the account controller *after* GETorHEAD_base ran. Now get_account_info returns 410 as well. * cache validity period (recheck_account_existence and recheck_container_existence) is now communicated to get_account_info via an X-Backend header. This way, get_account_info doesn't need a reference to the swift.proxy.server.Application object. * both logged swift_source values are now correct for get_container_info calls; before, on a cold cache, get_container_info would call get_account_info but not pass along swift_source, resulting in get_account_info logging "GET_INFO" as the source. Amusingly, there was a unit test asserting this bogus behavior. * callers that modify the return value of get_account_info or of get_container_info don't modify what's stored in swift.infocache. * get_account_info on an account that *can* be autocreated but has not been will return a 200, same as a HEAD request. The old behavior was a 404 from get_account_info but a 200 from HEAD. Callers can tell the difference by looking at info['account_really_exists'] if they need to know the difference (there is one call site that needs to know, in container PUT). Note: this is for all accounts when the proxy's "account_autocreate" setting is on. Change-Id: I5167714025ec7237f7e6dd4759c2c6eb959b3fca
2016-02-11 15:51:45 -08:00
def test_container_info_got_cached(self):
controller = proxy_server.ContainerController(self.app, 'a', 'c')
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, 200, body='')):
req = Request.blank('/v1/a/c', {'PATH_INFO': '/v1/a/c'})
resp = controller.HEAD(req)
self.assertEqual(2, resp.status_int // 100)
Fix up get_account_info and get_container_info get_account_info used to work like this: * make an account HEAD request * ignore the response * get the account info by digging around in the request environment, where it had been deposited by elves or something Not actually elves, but the proxy's GETorHEAD_base method would take the HEAD response and cache it in the response environment, which was the same object as the request environment, thus enabling get_account_info to find it. This was extraordinarily brittle. If a WSGI middleware were to shallow-copy the request environment, then any middlewares to its left could not use get_account_info, as the left middleware's request environment would no longer be identical to the response environment down in GETorHEAD_base. Now, get_account_info works like this: * make an account HEAD request. * if the account info is in the request environment, return it. This is an optimization to avoid a double-set in memcached. * else, compute the account info from the response headers, store it in caches, and return it. This is much easier to think about; get_account_info can get and cache account info all on its own; the cache check and cache set are right next to each other. All the above is true for get_container_info as well. get_info() is still around, but it's just a shim. It was trying to unify get_account_info and get_container_info to exploit the commonalities, but the number of times that "if container:" showed up in get_info and its helpers really indicated that something was wrong. I'd rather have two functions with some duplication than one function with no duplication but a bunch of "if container:" branches. Other things of note: * a HEAD request to a deleted account returns 410, but get_account_info would return 404 since the 410 came from the account controller *after* GETorHEAD_base ran. Now get_account_info returns 410 as well. * cache validity period (recheck_account_existence and recheck_container_existence) is now communicated to get_account_info via an X-Backend header. This way, get_account_info doesn't need a reference to the swift.proxy.server.Application object. * both logged swift_source values are now correct for get_container_info calls; before, on a cold cache, get_container_info would call get_account_info but not pass along swift_source, resulting in get_account_info logging "GET_INFO" as the source. Amusingly, there was a unit test asserting this bogus behavior. * callers that modify the return value of get_account_info or of get_container_info don't modify what's stored in swift.infocache. * get_account_info on an account that *can* be autocreated but has not been will return a 200, same as a HEAD request. The old behavior was a 404 from get_account_info but a 200 from HEAD. Callers can tell the difference by looking at info['account_really_exists'] if they need to know the difference (there is one call site that needs to know, in container PUT). Note: this is for all accounts when the proxy's "account_autocreate" setting is on. Change-Id: I5167714025ec7237f7e6dd4759c2c6eb959b3fca
2016-02-11 15:51:45 -08:00
# Make sure it's in both swift.infocache and memcache
self.assertIn("container/a/c", resp.environ['swift.infocache'])
self.assertEqual(
headers_to_container_info(resp.headers),
resp.environ['swift.infocache']['container/a/c'])
Fix up get_account_info and get_container_info get_account_info used to work like this: * make an account HEAD request * ignore the response * get the account info by digging around in the request environment, where it had been deposited by elves or something Not actually elves, but the proxy's GETorHEAD_base method would take the HEAD response and cache it in the response environment, which was the same object as the request environment, thus enabling get_account_info to find it. This was extraordinarily brittle. If a WSGI middleware were to shallow-copy the request environment, then any middlewares to its left could not use get_account_info, as the left middleware's request environment would no longer be identical to the response environment down in GETorHEAD_base. Now, get_account_info works like this: * make an account HEAD request. * if the account info is in the request environment, return it. This is an optimization to avoid a double-set in memcached. * else, compute the account info from the response headers, store it in caches, and return it. This is much easier to think about; get_account_info can get and cache account info all on its own; the cache check and cache set are right next to each other. All the above is true for get_container_info as well. get_info() is still around, but it's just a shim. It was trying to unify get_account_info and get_container_info to exploit the commonalities, but the number of times that "if container:" showed up in get_info and its helpers really indicated that something was wrong. I'd rather have two functions with some duplication than one function with no duplication but a bunch of "if container:" branches. Other things of note: * a HEAD request to a deleted account returns 410, but get_account_info would return 404 since the 410 came from the account controller *after* GETorHEAD_base ran. Now get_account_info returns 410 as well. * cache validity period (recheck_account_existence and recheck_container_existence) is now communicated to get_account_info via an X-Backend header. This way, get_account_info doesn't need a reference to the swift.proxy.server.Application object. * both logged swift_source values are now correct for get_container_info calls; before, on a cold cache, get_container_info would call get_account_info but not pass along swift_source, resulting in get_account_info logging "GET_INFO" as the source. Amusingly, there was a unit test asserting this bogus behavior. * callers that modify the return value of get_account_info or of get_container_info don't modify what's stored in swift.infocache. * get_account_info on an account that *can* be autocreated but has not been will return a 200, same as a HEAD request. The old behavior was a 404 from get_account_info but a 200 from HEAD. Callers can tell the difference by looking at info['account_really_exists'] if they need to know the difference (there is one call site that needs to know, in container PUT). Note: this is for all accounts when the proxy's "account_autocreate" setting is on. Change-Id: I5167714025ec7237f7e6dd4759c2c6eb959b3fca
2016-02-11 15:51:45 -08:00
from_memcache = self.app.memcache.get('container/a/c')
self.assertTrue(from_memcache)
def test_swift_owner(self):
owner_headers = {
'x-container-read': 'value', 'x-container-write': 'value',
'x-container-sync-key': 'value', 'x-container-sync-to': 'value'}
controller = proxy_server.ContainerController(self.app, 'a', 'c')
req = Request.blank('/v1/a/c')
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, 200, headers=owner_headers)):
resp = controller.HEAD(req)
self.assertEqual(2, resp.status_int // 100)
for key in owner_headers:
self.assertTrue(key not in resp.headers)
req = Request.blank('/v1/a/c', environ={'swift_owner': True})
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, 200, headers=owner_headers)):
resp = controller.HEAD(req)
self.assertEqual(2, resp.status_int // 100)
for key in owner_headers:
self.assertTrue(key in resp.headers)
Generic means for persisting system metadata. Middleware or core features may need to store metadata against accounts or containers. This patch adds a generic mechanism for system metadata to be persisted in backend databases, without polluting the user metadata namespace, by using the reserved header namespace x-<server_type>-sysmeta-*. Modifications are firstly that backend servers persist system metadata headers alongside user metadata and other system state. For accounts and containers, system metadata in PUT and POST requests is treated in a similar way to user metadata. System metadata is not yet supported for object requests. Secondly, changes in the proxy controllers ensure that headers in the system metadata namespace will pass through in requests to backend servers. Thirdly, system metadata returned from backend servers in GET or HEAD responses is added to the cached info dict, which middleware can access. Finally, a gatekeeper middleware module is provided which filters all system metadata headers from requests and responses by removing headers with names starting x-account-sysmeta-, x-container-sysmeta-. The gatekeeper also removes headers starting x-object-sysmeta- in anticipation of future support for system metadata being set for objects. This prevents clients from writing or reading system metadata. The required_filters list in swift/proxy/server.py is modified to include the gatekeeper middleware so that if the gatekeeper has not been configured in the pipeline then it will be automatically inserted close to the start of the pipeline. blueprint cluster-federation Change-Id: I80b8b14243cc59505f8c584920f8f527646b5f45
2013-12-03 22:02:39 +00:00
def test_sys_meta_headers_PUT(self):
# check that headers in sys meta namespace make it through
# the container controller
sys_meta_key = '%stest' % get_sys_meta_prefix('container')
sys_meta_key = sys_meta_key.title()
user_meta_key = 'X-Container-Meta-Test'
controller = proxy_server.ContainerController(self.app, 'a', 'c')
context = {}
callback = self._make_callback_func(context)
hdrs_in = {sys_meta_key: 'foo',
user_meta_key: 'bar',
'x-timestamp': '1.0'}
req = Request.blank('/v1/a/c', headers=hdrs_in)
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, 200, give_connect=callback)):
controller.PUT(req)
self.assertEqual(context['method'], 'PUT')
self.assertTrue(sys_meta_key in context['headers'])
self.assertEqual(context['headers'][sys_meta_key], 'foo')
self.assertTrue(user_meta_key in context['headers'])
self.assertEqual(context['headers'][user_meta_key], 'bar')
self.assertNotEqual(context['headers']['x-timestamp'], '1.0')
def test_sys_meta_headers_POST(self):
# check that headers in sys meta namespace make it through
# the container controller
sys_meta_key = '%stest' % get_sys_meta_prefix('container')
sys_meta_key = sys_meta_key.title()
user_meta_key = 'X-Container-Meta-Test'
controller = proxy_server.ContainerController(self.app, 'a', 'c')
context = {}
callback = self._make_callback_func(context)
hdrs_in = {sys_meta_key: 'foo',
user_meta_key: 'bar',
'x-timestamp': '1.0'}
req = Request.blank('/v1/a/c', headers=hdrs_in)
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, 200, give_connect=callback)):
controller.POST(req)
self.assertEqual(context['method'], 'POST')
self.assertTrue(sys_meta_key in context['headers'])
self.assertEqual(context['headers'][sys_meta_key], 'foo')
self.assertTrue(user_meta_key in context['headers'])
self.assertEqual(context['headers'][user_meta_key], 'bar')
self.assertNotEqual(context['headers']['x-timestamp'], '1.0')
def test_node_errors(self):
self.app.sort_nodes = lambda n: n
for method in ('PUT', 'DELETE', 'POST'):
def test_status_map(statuses, expected):
self.app._error_limiting = {}
req = Request.blank('/v1/a/c', method=method)
with mocked_http_conn(*statuses) as fake_conn:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, expected)
for req in fake_conn.requests:
self.assertEqual(req['method'], method)
self.assertTrue(req['path'].endswith('/a/c'))
base_status = [201] * 3
# test happy path
test_status_map(list(base_status), 201)
for i in range(3):
self.assertEqual(node_error_count(
self.app, self.container_ring.devs[i]), 0)
# single node errors and test isolation
for i in range(3):
status_list = list(base_status)
status_list[i] = 503
status_list.append(201)
test_status_map(status_list, 201)
for j in range(3):
expected = 1 if j == i else 0
self.assertEqual(node_error_count(
self.app, self.container_ring.devs[j]), expected)
# timeout
test_status_map((201, Timeout(), 201, 201), 201)
self.assertEqual(node_error_count(
self.app, self.container_ring.devs[1]), 1)
# exception
test_status_map((Exception('kaboom!'), 201, 201, 201), 201)
self.assertEqual(node_error_count(
self.app, self.container_ring.devs[0]), 1)
# insufficient storage
test_status_map((201, 201, 507, 201), 201)
self.assertEqual(node_error_count(
self.app, self.container_ring.devs[2]),
self.app.error_suppression_limit + 1)
def test_response_code_for_PUT(self):
PUT_TEST_CASES = [
((201, 201, 201), 201),
((201, 201, 404), 201),
((201, 201, 503), 201),
((201, 404, 404), 404),
((201, 404, 503), 503),
((201, 503, 503), 503),
((404, 404, 404), 404),
((404, 404, 503), 404),
((404, 503, 503), 503),
((503, 503, 503), 503)
]
self._assert_responses('PUT', PUT_TEST_CASES)
def test_response_code_for_DELETE(self):
DELETE_TEST_CASES = [
((204, 204, 204), 204),
((204, 204, 404), 204),
((204, 204, 503), 204),
((204, 404, 404), 404),
((204, 404, 503), 503),
((204, 503, 503), 503),
((404, 404, 404), 404),
((404, 404, 503), 404),
((404, 503, 503), 503),
((503, 503, 503), 503)
]
self._assert_responses('DELETE', DELETE_TEST_CASES)
def test_response_code_for_POST(self):
POST_TEST_CASES = [
((204, 204, 204), 204),
((204, 204, 404), 204),
((204, 204, 503), 204),
((204, 404, 404), 404),
((204, 404, 503), 503),
((204, 503, 503), 503),
((404, 404, 404), 404),
((404, 404, 503), 404),
((404, 503, 503), 503),
((503, 503, 503), 503)
]
self._assert_responses('POST', POST_TEST_CASES)
@patch_policies(
[StoragePolicy(0, 'zero', True, object_ring=FakeRing(replicas=4))])
class TestContainerController4Replicas(TestContainerController):
CONTAINER_REPLICAS = 4
def test_response_code_for_PUT(self):
PUT_TEST_CASES = [
((201, 201, 201, 201), 201),
((201, 201, 201, 404), 201),
((201, 201, 201, 503), 201),
((201, 201, 404, 404), 201),
((201, 201, 404, 503), 201),
((201, 201, 503, 503), 201),
((201, 404, 404, 404), 404),
((201, 404, 404, 503), 404),
((201, 404, 503, 503), 503),
((201, 503, 503, 503), 503),
((404, 404, 404, 404), 404),
((404, 404, 404, 503), 404),
((404, 404, 503, 503), 404),
((404, 503, 503, 503), 503),
((503, 503, 503, 503), 503)
]
self._assert_responses('PUT', PUT_TEST_CASES)
def test_response_code_for_DELETE(self):
DELETE_TEST_CASES = [
((204, 204, 204, 204), 204),
((204, 204, 204, 404), 204),
((204, 204, 204, 503), 204),
((204, 204, 404, 404), 204),
((204, 204, 404, 503), 204),
((204, 204, 503, 503), 204),
((204, 404, 404, 404), 404),
((204, 404, 404, 503), 404),
((204, 404, 503, 503), 503),
((204, 503, 503, 503), 503),
((404, 404, 404, 404), 404),
((404, 404, 404, 503), 404),
((404, 404, 503, 503), 404),
((404, 503, 503, 503), 503),
((503, 503, 503, 503), 503)
]
self._assert_responses('DELETE', DELETE_TEST_CASES)
def test_response_code_for_POST(self):
POST_TEST_CASES = [
((204, 204, 204, 204), 204),
((204, 204, 204, 404), 204),
((204, 204, 204, 503), 204),
((204, 204, 404, 404), 204),
((204, 204, 404, 503), 204),
((204, 204, 503, 503), 204),
((204, 404, 404, 404), 404),
((204, 404, 404, 503), 404),
((204, 404, 503, 503), 503),
((204, 503, 503, 503), 503),
((404, 404, 404, 404), 404),
((404, 404, 404, 503), 404),
((404, 404, 503, 503), 404),
((404, 503, 503, 503), 503),
((503, 503, 503, 503), 503)
]
self._assert_responses('POST', POST_TEST_CASES)
if __name__ == '__main__':
unittest.main()