Merge master to feature/ec
Change-Id: I8df5b4c26b3f8c0513119d8e82eca1302c8e2fc1
This commit is contained in:
commit
bcaa00f25f
@ -101,6 +101,10 @@ Usage: %%prog [options] [conf_file]
|
||||
parser.add_option('--insecure', action='store_true', default=False,
|
||||
help='Allow accessing insecure keystone server. '
|
||||
'The keystone\'s certificate will not be verified.')
|
||||
parser.add_option('--no-overlap', action='store_true', default=False,
|
||||
help='No overlap of partitions if running populate \
|
||||
more than once. Will increase coverage by amount shown \
|
||||
in dispersion.conf file')
|
||||
options, args = parser.parse_args()
|
||||
|
||||
if args:
|
||||
@ -144,6 +148,19 @@ Usage: %%prog [options] [conf_file]
|
||||
container_ring = Ring(swift_dir, ring_name='container')
|
||||
parts_left = dict((x, x)
|
||||
for x in xrange(container_ring.partition_count))
|
||||
|
||||
if options.no_overlap:
|
||||
with connpool.item() as conn:
|
||||
containers = [cont['name'] for cont in conn.get_account(
|
||||
prefix='dispersion_', full_listing=True)[1]]
|
||||
containers_listed = len(containers)
|
||||
if containers_listed > 0:
|
||||
for container in containers:
|
||||
partition, _junk = container_ring.get_nodes(account,
|
||||
container)
|
||||
if partition in parts_left:
|
||||
del parts_left[partition]
|
||||
|
||||
item_type = 'containers'
|
||||
created = 0
|
||||
retries_done = 0
|
||||
@ -152,9 +169,9 @@ Usage: %%prog [options] [conf_file]
|
||||
begun = next_report = time()
|
||||
next_report += 2
|
||||
suffix = 0
|
||||
while need_to_queue >= 1:
|
||||
while need_to_queue >= 1 and parts_left:
|
||||
container = 'dispersion_%d' % suffix
|
||||
part, _junk = container_ring.get_nodes(account, container)
|
||||
part = container_ring.get_part(account, container)
|
||||
if part in parts_left:
|
||||
if suffix >= options.container_suffix_start:
|
||||
coropool.spawn(put_container, connpool, container, report)
|
||||
@ -168,7 +185,13 @@ Usage: %%prog [options] [conf_file]
|
||||
elapsed, elapsed_unit = get_time_units(time() - begun)
|
||||
print '\r\x1B[KCreated %d containers for dispersion reporting, ' \
|
||||
'%d%s, %d retries' % \
|
||||
(need_to_create, round(elapsed), elapsed_unit, retries_done)
|
||||
((need_to_create - need_to_queue), round(elapsed), elapsed_unit,
|
||||
retries_done)
|
||||
if options.no_overlap:
|
||||
con_coverage = container_ring.partition_count - len(parts_left)
|
||||
print '\r\x1B[KTotal container coverage is now %.2f%%.' % \
|
||||
((float(con_coverage) / container_ring.partition_count
|
||||
* 100))
|
||||
stdout.flush()
|
||||
|
||||
if object_populate:
|
||||
@ -176,6 +199,23 @@ Usage: %%prog [options] [conf_file]
|
||||
put_container(connpool, container, None)
|
||||
object_ring = Ring(swift_dir, ring_name='object')
|
||||
parts_left = dict((x, x) for x in xrange(object_ring.partition_count))
|
||||
|
||||
if options.no_overlap:
|
||||
with connpool.item() as conn:
|
||||
obj_container = [cont_b['name'] for cont_b in conn.get_account(
|
||||
prefix=container, full_listing=True)[1]]
|
||||
if obj_container:
|
||||
with connpool.item() as conn:
|
||||
objects = [o['name'] for o in
|
||||
conn.get_container(container,
|
||||
prefix='dispersion_',
|
||||
full_listing=True)[1]]
|
||||
for my_object in objects:
|
||||
partition = object_ring.get_part(account, container,
|
||||
my_object)
|
||||
if partition in parts_left:
|
||||
del parts_left[partition]
|
||||
|
||||
item_type = 'objects'
|
||||
created = 0
|
||||
retries_done = 0
|
||||
@ -184,9 +224,9 @@ Usage: %%prog [options] [conf_file]
|
||||
begun = next_report = time()
|
||||
next_report += 2
|
||||
suffix = 0
|
||||
while need_to_queue >= 1:
|
||||
while need_to_queue >= 1 and parts_left:
|
||||
obj = 'dispersion_%d' % suffix
|
||||
part, _junk = object_ring.get_nodes(account, container, obj)
|
||||
part = object_ring.get_part(account, container, obj)
|
||||
if part in parts_left:
|
||||
if suffix >= options.object_suffix_start:
|
||||
coropool.spawn(
|
||||
@ -201,5 +241,10 @@ Usage: %%prog [options] [conf_file]
|
||||
elapsed, elapsed_unit = get_time_units(time() - begun)
|
||||
print '\r\x1B[KCreated %d objects for dispersion reporting, ' \
|
||||
'%d%s, %d retries' % \
|
||||
(need_to_create, round(elapsed), elapsed_unit, retries_done)
|
||||
((need_to_create - need_to_queue), round(elapsed), elapsed_unit,
|
||||
retries_done)
|
||||
if options.no_overlap:
|
||||
obj_coverage = object_ring.partition_count - len(parts_left)
|
||||
print '\r\x1B[KTotal object coverage is now %.2f%%.' % \
|
||||
((float(obj_coverage) / object_ring.partition_count * 100))
|
||||
stdout.flush()
|
||||
|
@ -72,6 +72,8 @@ Only run object population
|
||||
Only run container population
|
||||
.IP "\fB--object-only\fR"
|
||||
Only run object population
|
||||
.IP "\fB--no-overlap\fR"
|
||||
Increase coverage by amount in dispersion_coverage option with no overlap of existing partitions (if run more than once)
|
||||
|
||||
.SH CONFIGURATION
|
||||
.PD 0
|
||||
|
@ -199,7 +199,7 @@ Configure the Proxy node
|
||||
export ZONE= # set the zone number for that storage device
|
||||
export STORAGE_LOCAL_NET_IP= # and the IP address
|
||||
export WEIGHT=100 # relative weight (higher for bigger/faster disks)
|
||||
export DEVICE=sdb1
|
||||
export DEVICE=<labelname> # <UUID> if a UUID is used
|
||||
swift-ring-builder account.builder add z$ZONE-$STORAGE_LOCAL_NET_IP:6002/$DEVICE $WEIGHT
|
||||
swift-ring-builder container.builder add z$ZONE-$STORAGE_LOCAL_NET_IP:6001/$DEVICE $WEIGHT
|
||||
swift-ring-builder object.builder add z$ZONE-$STORAGE_LOCAL_NET_IP:6000/$DEVICE $WEIGHT
|
||||
@ -252,14 +252,25 @@ Configure the Storage nodes
|
||||
|
||||
#. For every device on the node, setup the XFS volume (/dev/sdb is used
|
||||
as an example), add mounting option inode64 when your disk is bigger than
|
||||
1TB to archive a better performance.::
|
||||
1TB to archive a better performance. Since drives can get reordered after
|
||||
a reboot, create a label which acts as a static reference.::
|
||||
|
||||
fs_label="<labelname>"
|
||||
fdisk /dev/sdb (set up a single partition)
|
||||
mkfs.xfs -i size=512 /dev/sdb1
|
||||
echo "/dev/sdb1 /srv/node/sdb1 xfs noatime,nodiratime,nobarrier,logbufs=8 0 0" >> /etc/fstab
|
||||
mkdir -p /srv/node/sdb1
|
||||
mount /srv/node/sdb1
|
||||
chown swift:swift /srv/node/sdb1
|
||||
mkfs.xfs -i size=512 -L $fs_label /dev/sdb1
|
||||
echo "LABEL=$fs_label /srv/node/$fs_label xfs noatime,nodiratime,nobarrier,logbufs=8 0 0" >> /etc/fstab
|
||||
mkdir -p /srv/node/$fs_label
|
||||
mount /srv/node/$fs_label
|
||||
chown swift:swift /srv/node/$fs_label
|
||||
|
||||
#. If no label was created while setting up XFS volume, use the UUID. Get the
|
||||
UUID by using blkid command, edit the /etc/fstab entry and name the node
|
||||
accordingly (like it's done above for label name).::
|
||||
|
||||
$ blkid /dev/sdb
|
||||
/dev/sdb: UUID="<UUID>" TYPE="xfs"
|
||||
$ fs_uuid="<UUID>"
|
||||
# echo "UUID=$fs_uuid /srv/node/$fs_uuid xfs noatime,nodiratime,nobarrier,logbufs=8 0 0" >> /etc/fstab
|
||||
|
||||
#. Create /etc/rsyncd.conf::
|
||||
|
||||
|
@ -109,29 +109,33 @@ receive the auth token and a URL to the Swift system.
|
||||
Keystone Auth
|
||||
-------------
|
||||
|
||||
Swift is able to authenticate against OpenStack keystone via the
|
||||
:mod:`swift.common.middleware.keystoneauth` middleware.
|
||||
Swift is able to authenticate against OpenStack Keystone_ via the
|
||||
:ref:`keystoneauth` middleware.
|
||||
|
||||
In order to use the ``keystoneauth`` middleware the ``authtoken``
|
||||
middleware from keystonemiddleware will need to be configured.
|
||||
In order to use the ``keystoneauth`` middleware the ``auth_token``
|
||||
middleware from KeystoneMiddleware_ will need to be configured.
|
||||
|
||||
The ``authtoken`` middleware performs the authentication token
|
||||
validation and retrieves actual user authentication information. It
|
||||
can be found in the keystonemiddleware distribution.
|
||||
can be found in the KeystoneMiddleware_ distribution.
|
||||
|
||||
The ``keystoneauth`` middleware performs authorization and mapping the
|
||||
``keystone`` roles to Swift's ACLs.
|
||||
The :ref:`keystoneauth` middleware performs authorization and mapping the
|
||||
Keystone roles to Swift's ACLs.
|
||||
|
||||
.. _KeystoneMiddleware: http://docs.openstack.org/developer/keystonemiddleware/
|
||||
.. _Keystone: http://docs.openstack.org/developer/keystone/
|
||||
|
||||
Configuring Swift to use Keystone
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Configuring Swift to use Keystone is relatively straight
|
||||
forward. The first step is to ensure that you have the auth_token
|
||||
middleware installed, distributed with keystone it can either be
|
||||
dropped in your python path or installed via the keystone package.
|
||||
Configuring Swift to use Keystone_
|
||||
is relatively straight forward. The first
|
||||
step is to ensure that you have the ``auth_token`` middleware installed. It can
|
||||
either be dropped in your python path or installed via the KeystoneMiddleware_
|
||||
package.
|
||||
|
||||
You need at first make sure you have a service endpoint of type
|
||||
``object-store`` in keystone pointing to your Swift proxy. For example
|
||||
``object-store`` in Keystone pointing to your Swift proxy. For example
|
||||
having this in your ``/etc/keystone/default_catalog.templates`` ::
|
||||
|
||||
catalog.RegionOne.object_store.name = Swift Service
|
||||
@ -161,8 +165,10 @@ add the configuration for the authtoken middleware::
|
||||
include_service_catalog = False
|
||||
|
||||
The actual values for these variables will need to be set depending on
|
||||
your situation. For more information, please refer to the Keystone
|
||||
documentation on the ``auth_token`` middleware, but in short:
|
||||
your situation. For more information, please refer to the `Keystone
|
||||
auth_token middleware documentation
|
||||
<http://docs.openstack.org/developer/keystonemiddleware/middlewarearchitecture.html#configuration>`_,
|
||||
but in short:
|
||||
|
||||
* Those variables beginning with ``auth_`` point to the Keystone
|
||||
Admin service. This information is used by the middleware to actually
|
||||
@ -171,20 +177,23 @@ documentation on the ``auth_token`` middleware, but in short:
|
||||
* The admin auth credentials (``admin_user``, ``admin_tenant_name``,
|
||||
``admin_password``) will be used to retrieve an admin token. That
|
||||
token will be used to authorize user tokens behind the scenes.
|
||||
* cache is set to ``swift.cache``. This means that the middleware
|
||||
* ``cache`` is set to ``swift.cache``. This means that the middleware
|
||||
will get the Swift memcache from the request environment.
|
||||
* include_service_catalog defaults to True if not set. This means
|
||||
* ``include_service_catalog`` defaults to ``True`` if not set. This means
|
||||
that when validating a token, the service catalog is retrieved
|
||||
and stored in the X-Service-Catalog header. Since Swift does not
|
||||
use the X-Service-Catalog header, there is no point in getting
|
||||
the service catalog. We recommend you set include_service_catalog
|
||||
to False.
|
||||
and stored in the ``X-Service-Catalog`` header. Since Swift does not
|
||||
use the ``X-Service-Catalog`` header, there is no point in getting
|
||||
the service catalog. We recommend you set ``include_service_catalog``
|
||||
to ``False``.
|
||||
* If you wish to authenticate using Keystone's v3 API you must set the
|
||||
``auth_version`` option to ``v3.0``.
|
||||
|
||||
|
||||
.. note::
|
||||
|
||||
If support is required for unvalidated users (as with anonymous
|
||||
access) or for tempurl/formpost middleware, authtoken will need
|
||||
access or making capabilities requests using :ref:`discoverability`) or
|
||||
for tempurl/formpost middleware, authtoken will need
|
||||
to be configured with delay_auth_decision set to 1.
|
||||
|
||||
and you can finally add the keystoneauth configuration::
|
||||
@ -193,13 +202,40 @@ and you can finally add the keystoneauth configuration::
|
||||
use = egg:swift#keystoneauth
|
||||
operator_roles = admin, swiftoperator
|
||||
|
||||
By default the only users able to give ACL or to Create other
|
||||
containers are the ones who has the Keystone role specified in the
|
||||
``operator_roles`` setting.
|
||||
Access control using keystoneauth
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This user who have one of those role will be able to give ACLs to
|
||||
other users on containers, see the documentation on ACL here
|
||||
:mod:`swift.common.middleware.acl`.
|
||||
By default the only users able to perform operations (e.g. create a container)
|
||||
on an account are those having a Keystone role for the corresponding Keystone
|
||||
project that matches one of the roles specified in the ``operator_roles``
|
||||
option.
|
||||
|
||||
Users who have one of the ``operator_roles`` will be able to set container ACLs
|
||||
to grant other users permission to read and/or write objects in specific
|
||||
containers, using ``X-Container-Read`` and ``X-Container-Write`` headers
|
||||
respectively. In addition to the ACL formats described
|
||||
:mod:`here <swift.common.middleware.acl>`, keystoneauth supports ACLs using the
|
||||
format::
|
||||
|
||||
other_project_id:other_user_id.
|
||||
|
||||
where ``other_project_id`` is the UUID of a Keystone project and
|
||||
``other_user_id`` is the UUID of a Keystone user. This will allow the other
|
||||
user to access a container provided their token is scoped on the other
|
||||
project. Both ``other_project_id`` and ``other_user_id`` may be replaced with
|
||||
the wildcard character ``*`` which will match any project or user respectively.
|
||||
|
||||
Be sure to use Keystone UUIDs rather than names in container ACLs.
|
||||
|
||||
.. note::
|
||||
|
||||
For backwards compatibility, keystoneauth will by default grant container
|
||||
ACLs expressed as ``other_project_name:other_user_name`` (i.e. using
|
||||
Keystone names rather than UUIDs) in the special case when both the other
|
||||
project and the other user are in Keystone's default domain and the project
|
||||
being accessed is also in the default domain.
|
||||
|
||||
For further information see :ref:`keystoneauth`
|
||||
|
||||
Users with the Keystone role defined in ``reseller_admin_role``
|
||||
(``ResellerAdmin`` by default) can operate on any account. The auth system
|
||||
|
@ -134,7 +134,7 @@ For SAIO replication
|
||||
read only = false
|
||||
lock file = /var/lock/object6080.lock
|
||||
|
||||
#. Restart rsync deamon::
|
||||
#. Restart rsync daemon::
|
||||
|
||||
service rsync restart
|
||||
|
||||
|
@ -157,7 +157,7 @@ use = egg:swift#recon
|
||||
# requested by delay_reaping.
|
||||
# reap_warn_after = 2592000
|
||||
|
||||
# Note: Put it at the beginning of the pipleline to profile all middleware. But
|
||||
# Note: Put it at the beginning of the pipeline to profile all middleware. But
|
||||
# it is safer to put this after healthcheck.
|
||||
[filter:xprofile]
|
||||
use = egg:swift#xprofile
|
||||
|
@ -168,7 +168,7 @@ use = egg:swift#recon
|
||||
# Maximum amount of time to spend syncing each container per pass
|
||||
# container_time = 60
|
||||
|
||||
# Note: Put it at the beginning of the pipleline to profile all middleware. But
|
||||
# Note: Put it at the beginning of the pipeline to profile all middleware. But
|
||||
# it is safer to put this after healthcheck.
|
||||
[filter:xprofile]
|
||||
use = egg:swift#xprofile
|
||||
|
@ -127,6 +127,13 @@ use = egg:swift#object
|
||||
# an abort to occur.
|
||||
# replication_failure_threshold = 100
|
||||
# replication_failure_ratio = 1.0
|
||||
#
|
||||
# Use splice() for zero-copy object GETs. This requires Linux kernel
|
||||
# version 3.0 or greater. If you set "splice = yes" but the kernel
|
||||
# does not support it, error messages will appear in the object server
|
||||
# logs at startup, but your object servers should continue to function.
|
||||
#
|
||||
# splice = no
|
||||
|
||||
[filter:healthcheck]
|
||||
use = egg:swift#healthcheck
|
||||
|
@ -20,6 +20,7 @@ from random import random
|
||||
|
||||
import swift.common.db
|
||||
from swift.account.backend import AccountBroker, DATADIR
|
||||
from swift.common.exceptions import InvalidAccountInfo
|
||||
from swift.common.utils import get_logger, audit_location_generator, \
|
||||
config_true_value, dump_recon_cache, ratelimit_sleep
|
||||
from swift.common.daemon import Daemon
|
||||
@ -30,9 +31,9 @@ from eventlet import Timeout
|
||||
class AccountAuditor(Daemon):
|
||||
"""Audit accounts."""
|
||||
|
||||
def __init__(self, conf):
|
||||
def __init__(self, conf, logger=None):
|
||||
self.conf = conf
|
||||
self.logger = get_logger(conf, log_route='account-auditor')
|
||||
self.logger = logger or get_logger(conf, log_route='account-auditor')
|
||||
self.devices = conf.get('devices', '/srv/node')
|
||||
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
|
||||
self.interval = int(conf.get('interval', 1800))
|
||||
@ -104,6 +105,29 @@ class AccountAuditor(Daemon):
|
||||
dump_recon_cache({'account_auditor_pass_completed': elapsed},
|
||||
self.rcache, self.logger)
|
||||
|
||||
def validate_per_policy_counts(self, broker):
|
||||
info = broker.get_info()
|
||||
policy_stats = broker.get_policy_stats(do_migrations=True)
|
||||
policy_totals = {
|
||||
'container_count': 0,
|
||||
'object_count': 0,
|
||||
'bytes_used': 0,
|
||||
}
|
||||
for policy_stat in policy_stats.values():
|
||||
for key in policy_totals:
|
||||
policy_totals[key] += policy_stat[key]
|
||||
|
||||
for key in policy_totals:
|
||||
if policy_totals[key] == info[key]:
|
||||
continue
|
||||
raise InvalidAccountInfo(_(
|
||||
'The total %(key)s for the container (%(total)s) does not '
|
||||
'match the sum of %(key)s across policies (%(sum)s)') % {
|
||||
'key': key,
|
||||
'total': info[key],
|
||||
'sum': policy_totals[key],
|
||||
})
|
||||
|
||||
def account_audit(self, path):
|
||||
"""
|
||||
Audits the given account path
|
||||
@ -114,10 +138,15 @@ class AccountAuditor(Daemon):
|
||||
try:
|
||||
broker = AccountBroker(path)
|
||||
if not broker.is_deleted():
|
||||
broker.get_info()
|
||||
self.validate_per_policy_counts(broker)
|
||||
self.logger.increment('passes')
|
||||
self.account_passes += 1
|
||||
self.logger.debug('Audit passed for %s' % broker)
|
||||
except InvalidAccountInfo as e:
|
||||
self.logger.increment('failures')
|
||||
self.account_failures += 1
|
||||
self.logger.error(
|
||||
_('Audit Failed for %s: %s'), path, str(e))
|
||||
except (Exception, Timeout):
|
||||
self.logger.increment('failures')
|
||||
self.account_failures += 1
|
||||
|
@ -32,17 +32,19 @@ POLICY_STAT_TRIGGER_SCRIPT = """
|
||||
CREATE TRIGGER container_insert_ps AFTER INSERT ON container
|
||||
BEGIN
|
||||
INSERT OR IGNORE INTO policy_stat
|
||||
(storage_policy_index, object_count, bytes_used)
|
||||
VALUES (new.storage_policy_index, 0, 0);
|
||||
(storage_policy_index, container_count, object_count, bytes_used)
|
||||
VALUES (new.storage_policy_index, 0, 0, 0);
|
||||
UPDATE policy_stat
|
||||
SET object_count = object_count + new.object_count,
|
||||
SET container_count = container_count + (1 - new.deleted),
|
||||
object_count = object_count + new.object_count,
|
||||
bytes_used = bytes_used + new.bytes_used
|
||||
WHERE storage_policy_index = new.storage_policy_index;
|
||||
END;
|
||||
CREATE TRIGGER container_delete_ps AFTER DELETE ON container
|
||||
BEGIN
|
||||
UPDATE policy_stat
|
||||
SET object_count = object_count - old.object_count,
|
||||
SET container_count = container_count - (1 - old.deleted),
|
||||
object_count = object_count - old.object_count,
|
||||
bytes_used = bytes_used - old.bytes_used
|
||||
WHERE storage_policy_index = old.storage_policy_index;
|
||||
END;
|
||||
@ -165,13 +167,15 @@ class AccountBroker(DatabaseBroker):
|
||||
conn.executescript("""
|
||||
CREATE TABLE policy_stat (
|
||||
storage_policy_index INTEGER PRIMARY KEY,
|
||||
container_count INTEGER DEFAULT 0,
|
||||
object_count INTEGER DEFAULT 0,
|
||||
bytes_used INTEGER DEFAULT 0
|
||||
);
|
||||
INSERT OR IGNORE INTO policy_stat (
|
||||
storage_policy_index, object_count, bytes_used
|
||||
storage_policy_index, container_count, object_count,
|
||||
bytes_used
|
||||
)
|
||||
SELECT 0, object_count, bytes_used
|
||||
SELECT 0, container_count, object_count, bytes_used
|
||||
FROM account_stat
|
||||
WHERE container_count > 0;
|
||||
""")
|
||||
@ -296,24 +300,45 @@ class AccountBroker(DatabaseBroker):
|
||||
return row['status'] == "DELETED" or (
|
||||
row['delete_timestamp'] > row['put_timestamp'])
|
||||
|
||||
def get_policy_stats(self):
|
||||
def get_policy_stats(self, do_migrations=False):
|
||||
"""
|
||||
Get global policy stats for the account.
|
||||
|
||||
:param do_migrations: boolean, if True the policy stat dicts will
|
||||
always include the 'container_count' key;
|
||||
otherwise it may be ommited on legacy databases
|
||||
until they are migrated.
|
||||
|
||||
:returns: dict of policy stats where the key is the policy index and
|
||||
the value is a dictionary like {'object_count': M,
|
||||
'bytes_used': N}
|
||||
'bytes_used': N, 'container_count': L}
|
||||
"""
|
||||
info = []
|
||||
columns = [
|
||||
'storage_policy_index',
|
||||
'container_count',
|
||||
'object_count',
|
||||
'bytes_used',
|
||||
]
|
||||
|
||||
def run_query():
|
||||
return (conn.execute('''
|
||||
SELECT %s
|
||||
FROM policy_stat
|
||||
''' % ', '.join(columns)).fetchall())
|
||||
|
||||
self._commit_puts_stale_ok()
|
||||
info = []
|
||||
with self.get() as conn:
|
||||
try:
|
||||
info = (conn.execute('''
|
||||
SELECT storage_policy_index, object_count, bytes_used
|
||||
FROM policy_stat
|
||||
''').fetchall())
|
||||
info = run_query()
|
||||
except sqlite3.OperationalError as err:
|
||||
if "no such table: policy_stat" not in str(err):
|
||||
if "no such column: container_count" in str(err):
|
||||
if do_migrations:
|
||||
self._migrate_add_container_count(conn)
|
||||
else:
|
||||
columns.remove('container_count')
|
||||
info = run_query()
|
||||
elif "no such table: policy_stat" not in str(err):
|
||||
raise
|
||||
|
||||
policy_stats = {}
|
||||
@ -501,10 +526,72 @@ class AccountBroker(DatabaseBroker):
|
||||
self._migrate_add_storage_policy_index(conn)
|
||||
_really_merge_items(conn)
|
||||
|
||||
def _migrate_add_container_count(self, conn):
|
||||
"""
|
||||
Add the container_count column to the 'policy_stat' table and
|
||||
update it
|
||||
|
||||
:param conn: DB connection object
|
||||
"""
|
||||
# add the container_count column
|
||||
curs = conn.cursor()
|
||||
curs.executescript('''
|
||||
DROP TRIGGER container_delete_ps;
|
||||
DROP TRIGGER container_insert_ps;
|
||||
ALTER TABLE policy_stat
|
||||
ADD COLUMN container_count INTEGER DEFAULT 0;
|
||||
''' + POLICY_STAT_TRIGGER_SCRIPT)
|
||||
|
||||
# keep the simple case simple, if there's only one entry in the
|
||||
# policy_stat table we just copy the total container count from the
|
||||
# account_stat table
|
||||
|
||||
# if that triggers an update then the where changes <> 0 *would* exist
|
||||
# and the insert or replace from the count subqueries won't execute
|
||||
|
||||
curs.executescript("""
|
||||
UPDATE policy_stat
|
||||
SET container_count = (
|
||||
SELECT container_count
|
||||
FROM account_stat)
|
||||
WHERE (
|
||||
SELECT COUNT(storage_policy_index)
|
||||
FROM policy_stat
|
||||
) <= 1;
|
||||
|
||||
INSERT OR REPLACE INTO policy_stat (
|
||||
storage_policy_index,
|
||||
container_count,
|
||||
object_count,
|
||||
bytes_used
|
||||
)
|
||||
SELECT p.storage_policy_index,
|
||||
c.count,
|
||||
p.object_count,
|
||||
p.bytes_used
|
||||
FROM (
|
||||
SELECT storage_policy_index,
|
||||
COUNT(*) as count
|
||||
FROM container
|
||||
WHERE deleted = 0
|
||||
GROUP BY storage_policy_index
|
||||
) c
|
||||
JOIN policy_stat p
|
||||
ON p.storage_policy_index = c.storage_policy_index
|
||||
WHERE NOT EXISTS(
|
||||
SELECT changes() as change
|
||||
FROM policy_stat
|
||||
WHERE change <> 0
|
||||
);
|
||||
""")
|
||||
conn.commit()
|
||||
|
||||
def _migrate_add_storage_policy_index(self, conn):
|
||||
"""
|
||||
Add the storage_policy_index column to the 'container' table and
|
||||
set up triggers, creating the policy_stat table if needed.
|
||||
|
||||
:param conn: DB connection object
|
||||
"""
|
||||
try:
|
||||
self.create_policy_stat_table(conn)
|
||||
|
@ -79,6 +79,10 @@ class DeviceUnavailable(SwiftException):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidAccountInfo(SwiftException):
|
||||
pass
|
||||
|
||||
|
||||
class PathNotDir(OSError):
|
||||
pass
|
||||
|
||||
@ -145,6 +149,10 @@ class ReplicationLockTimeout(LockTimeout):
|
||||
pass
|
||||
|
||||
|
||||
class MimeInvalid(SwiftException):
|
||||
pass
|
||||
|
||||
|
||||
class ClientException(Exception):
|
||||
|
||||
def __init__(self, msg, http_scheme='', http_host='', http_port='',
|
||||
|
@ -112,14 +112,15 @@ the file are simply ignored).
|
||||
__all__ = ['FormPost', 'filter_factory', 'READ_CHUNK_SIZE', 'MAX_VALUE_LENGTH']
|
||||
|
||||
import hmac
|
||||
import re
|
||||
import rfc822
|
||||
from hashlib import sha1
|
||||
from time import time
|
||||
from urllib import quote
|
||||
|
||||
from swift.common.exceptions import MimeInvalid
|
||||
from swift.common.middleware.tempurl import get_tempurl_keys_from_metadata
|
||||
from swift.common.utils import streq_const_time, register_swift_info
|
||||
from swift.common.utils import streq_const_time, register_swift_info, \
|
||||
parse_content_disposition, iter_multipart_mime_documents
|
||||
from swift.common.wsgi import make_pre_authed_env
|
||||
from swift.common.swob import HTTPUnauthorized
|
||||
from swift.proxy.controllers.base import get_account_info
|
||||
@ -132,9 +133,6 @@ READ_CHUNK_SIZE = 4096
|
||||
#: truncated.
|
||||
MAX_VALUE_LENGTH = 4096
|
||||
|
||||
#: Regular expression to match form attributes.
|
||||
ATTRIBUTES_RE = re.compile(r'(\w+)=(".*?"|[^";]+)(; ?|$)')
|
||||
|
||||
|
||||
class FormInvalid(Exception):
|
||||
pass
|
||||
@ -144,125 +142,6 @@ class FormUnauthorized(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def _parse_attrs(header):
|
||||
"""
|
||||
Given the value of a header like:
|
||||
Content-Disposition: form-data; name="somefile"; filename="test.html"
|
||||
|
||||
Return data like
|
||||
("form-data", {"name": "somefile", "filename": "test.html"})
|
||||
|
||||
:param header: Value of a header (the part after the ': ').
|
||||
:returns: (value name, dict) of the attribute data parsed (see above).
|
||||
"""
|
||||
attributes = {}
|
||||
attrs = ''
|
||||
if '; ' in header:
|
||||
header, attrs = header.split('; ', 1)
|
||||
m = True
|
||||
while m:
|
||||
m = ATTRIBUTES_RE.match(attrs)
|
||||
if m:
|
||||
attrs = attrs[len(m.group(0)):]
|
||||
attributes[m.group(1)] = m.group(2).strip('"')
|
||||
return header, attributes
|
||||
|
||||
|
||||
class _IterRequestsFileLikeObject(object):
|
||||
|
||||
def __init__(self, wsgi_input, boundary, input_buffer):
|
||||
self.no_more_data_for_this_file = False
|
||||
self.no_more_files = False
|
||||
self.wsgi_input = wsgi_input
|
||||
self.boundary = boundary
|
||||
self.input_buffer = input_buffer
|
||||
|
||||
def read(self, length=None):
|
||||
if not length:
|
||||
length = READ_CHUNK_SIZE
|
||||
if self.no_more_data_for_this_file:
|
||||
return ''
|
||||
|
||||
# read enough data to know whether we're going to run
|
||||
# into a boundary in next [length] bytes
|
||||
if len(self.input_buffer) < length + len(self.boundary) + 2:
|
||||
to_read = length + len(self.boundary) + 2
|
||||
while to_read > 0:
|
||||
chunk = self.wsgi_input.read(to_read)
|
||||
to_read -= len(chunk)
|
||||
self.input_buffer += chunk
|
||||
if not chunk:
|
||||
self.no_more_files = True
|
||||
break
|
||||
|
||||
boundary_pos = self.input_buffer.find(self.boundary)
|
||||
|
||||
# boundary does not exist in the next (length) bytes
|
||||
if boundary_pos == -1 or boundary_pos > length:
|
||||
ret = self.input_buffer[:length]
|
||||
self.input_buffer = self.input_buffer[length:]
|
||||
# if it does, just return data up to the boundary
|
||||
else:
|
||||
ret, self.input_buffer = self.input_buffer.split(self.boundary, 1)
|
||||
self.no_more_files = self.input_buffer.startswith('--')
|
||||
self.no_more_data_for_this_file = True
|
||||
self.input_buffer = self.input_buffer[2:]
|
||||
return ret
|
||||
|
||||
def readline(self):
|
||||
if self.no_more_data_for_this_file:
|
||||
return ''
|
||||
boundary_pos = newline_pos = -1
|
||||
while newline_pos < 0 and boundary_pos < 0:
|
||||
chunk = self.wsgi_input.read(READ_CHUNK_SIZE)
|
||||
self.input_buffer += chunk
|
||||
newline_pos = self.input_buffer.find('\r\n')
|
||||
boundary_pos = self.input_buffer.find(self.boundary)
|
||||
if not chunk:
|
||||
self.no_more_files = True
|
||||
break
|
||||
# found a newline
|
||||
if newline_pos >= 0 and \
|
||||
(boundary_pos < 0 or newline_pos < boundary_pos):
|
||||
# Use self.read to ensure any logic there happens...
|
||||
ret = ''
|
||||
to_read = newline_pos + 2
|
||||
while to_read > 0:
|
||||
chunk = self.read(to_read)
|
||||
# Should never happen since we're reading from input_buffer,
|
||||
# but just for completeness...
|
||||
if not chunk:
|
||||
break
|
||||
to_read -= len(chunk)
|
||||
ret += chunk
|
||||
return ret
|
||||
else: # no newlines, just return up to next boundary
|
||||
return self.read(len(self.input_buffer))
|
||||
|
||||
|
||||
def _iter_requests(wsgi_input, boundary):
|
||||
"""
|
||||
Given a multi-part mime encoded input file object and boundary,
|
||||
yield file-like objects for each part.
|
||||
|
||||
:param wsgi_input: The file-like object to read from.
|
||||
:param boundary: The mime boundary to separate new file-like
|
||||
objects on.
|
||||
:returns: A generator of file-like objects for each part.
|
||||
"""
|
||||
boundary = '--' + boundary
|
||||
if wsgi_input.readline(len(boundary + '\r\n')).strip() != boundary:
|
||||
raise FormInvalid('invalid starting boundary')
|
||||
boundary = '\r\n' + boundary
|
||||
input_buffer = ''
|
||||
done = False
|
||||
while not done:
|
||||
it = _IterRequestsFileLikeObject(wsgi_input, boundary, input_buffer)
|
||||
yield it
|
||||
done = it.no_more_files
|
||||
input_buffer = it.input_buffer
|
||||
|
||||
|
||||
class _CappedFileLikeObject(object):
|
||||
"""
|
||||
A file-like object wrapping another file-like object that raises
|
||||
@ -328,7 +207,7 @@ class FormPost(object):
|
||||
if env['REQUEST_METHOD'] == 'POST':
|
||||
try:
|
||||
content_type, attrs = \
|
||||
_parse_attrs(env.get('CONTENT_TYPE') or '')
|
||||
parse_content_disposition(env.get('CONTENT_TYPE') or '')
|
||||
if content_type == 'multipart/form-data' and \
|
||||
'boundary' in attrs:
|
||||
http_user_agent = "%s FormPost" % (
|
||||
@ -338,7 +217,7 @@ class FormPost(object):
|
||||
env, attrs['boundary'])
|
||||
start_response(status, headers)
|
||||
return [body]
|
||||
except (FormInvalid, EOFError) as err:
|
||||
except (FormInvalid, MimeInvalid, EOFError) as err:
|
||||
body = 'FormPost: %s' % err
|
||||
start_response(
|
||||
'400 Bad Request',
|
||||
@ -365,10 +244,11 @@ class FormPost(object):
|
||||
attributes = {}
|
||||
subheaders = []
|
||||
file_count = 0
|
||||
for fp in _iter_requests(env['wsgi.input'], boundary):
|
||||
for fp in iter_multipart_mime_documents(
|
||||
env['wsgi.input'], boundary, read_chunk_size=READ_CHUNK_SIZE):
|
||||
hdrs = rfc822.Message(fp, 0)
|
||||
disp, attrs = \
|
||||
_parse_attrs(hdrs.getheader('Content-Disposition', ''))
|
||||
disp, attrs = parse_content_disposition(
|
||||
hdrs.getheader('Content-Disposition', ''))
|
||||
if disp == 'form-data' and attrs.get('filename'):
|
||||
file_count += 1
|
||||
try:
|
||||
|
@ -79,20 +79,25 @@ class KeystoneAuth(object):
|
||||
reseller_prefix = NEWAUTH
|
||||
|
||||
The keystoneauth middleware supports cross-tenant access control using
|
||||
the syntax <tenant>:<user> in container Access Control Lists (ACLs). For
|
||||
a request to be granted by an ACL, <tenant> must match the UUID of the
|
||||
tenant to which the request token is scoped and <user> must match the
|
||||
UUID of the user authenticated by the request token.
|
||||
the syntax ``<tenant>:<user>`` to specify a grantee in container Access
|
||||
Control Lists (ACLs). For a request to be granted by an ACL, the grantee
|
||||
``<tenant>`` must match the UUID of the tenant to which the request
|
||||
token is scoped and the grantee ``<user>`` must match the UUID of the
|
||||
user authenticated by the request token.
|
||||
|
||||
Note that names must no longer be used in cross-tenant ACLs because with
|
||||
the introduction of domains in keystone names are no longer globally
|
||||
unique. For backwards compatibility, ACLs using names will be granted by
|
||||
keystoneauth when it can be established that both the grantee and the
|
||||
tenant being accessed are either not yet in a domain (e.g. the request
|
||||
token has been obtained via the keystone v2 API) or are both in the
|
||||
default domain to which legacy accounts would have been migrated. The id
|
||||
of the default domain is specified by the config option
|
||||
``default_domain_id``:
|
||||
unique.
|
||||
|
||||
For backwards compatibility, ACLs using names will be granted by
|
||||
keystoneauth when it can be established that the grantee tenant,
|
||||
the grantee user and the tenant being accessed are either not yet in a
|
||||
domain (e.g. the request token has been obtained via the keystone v2
|
||||
API) or are all in the default domain to which legacy accounts would
|
||||
have been migrated. The default domain is identified by its UUID,
|
||||
which by default has the value ``default``. This can be changed by
|
||||
setting the ``default_domain_id`` option in the keystoneauth
|
||||
configuration::
|
||||
|
||||
default_domain_id = default
|
||||
|
||||
|
@ -14,6 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import bisect
|
||||
import copy
|
||||
import itertools
|
||||
import math
|
||||
import random
|
||||
@ -330,6 +331,7 @@ class RingBuilder(object):
|
||||
|
||||
:returns: (number_of_partitions_altered, resulting_balance)
|
||||
"""
|
||||
old_replica2part2dev = copy.deepcopy(self._replica2part2dev)
|
||||
|
||||
if seed is not None:
|
||||
random.seed(seed)
|
||||
@ -339,29 +341,46 @@ class RingBuilder(object):
|
||||
self._initial_balance()
|
||||
self.devs_changed = False
|
||||
return self.parts, self.get_balance()
|
||||
retval = 0
|
||||
changed_parts = 0
|
||||
self._update_last_part_moves()
|
||||
last_balance = 0
|
||||
new_parts, removed_part_count = self._adjust_replica2part2dev_size()
|
||||
retval += removed_part_count
|
||||
changed_parts += removed_part_count
|
||||
if new_parts or removed_part_count:
|
||||
self._set_parts_wanted()
|
||||
self._reassign_parts(new_parts)
|
||||
retval += len(new_parts)
|
||||
changed_parts += len(new_parts)
|
||||
while True:
|
||||
reassign_parts = self._gather_reassign_parts()
|
||||
self._reassign_parts(reassign_parts)
|
||||
retval += len(reassign_parts)
|
||||
changed_parts += len(reassign_parts)
|
||||
while self._remove_devs:
|
||||
self.devs[self._remove_devs.pop()['id']] = None
|
||||
balance = self.get_balance()
|
||||
if balance < 1 or abs(last_balance - balance) < 1 or \
|
||||
retval == self.parts:
|
||||
changed_parts == self.parts:
|
||||
break
|
||||
last_balance = balance
|
||||
self.devs_changed = False
|
||||
self.version += 1
|
||||
return retval, balance
|
||||
|
||||
# Compare the partition allocation before and after the rebalance
|
||||
# Only changed device ids are taken into account; devices might be
|
||||
# "touched" during the rebalance, but actually not really moved
|
||||
changed_parts = 0
|
||||
for rep_id, _rep in enumerate(self._replica2part2dev):
|
||||
for part_id, new_device in enumerate(_rep):
|
||||
# IndexErrors will be raised if the replicas are increased or
|
||||
# decreased, and that actually means the partition has changed
|
||||
try:
|
||||
old_device = old_replica2part2dev[rep_id][part_id]
|
||||
except IndexError:
|
||||
changed_parts += 1
|
||||
continue
|
||||
|
||||
if old_device != new_device:
|
||||
changed_parts += 1
|
||||
return changed_parts, balance
|
||||
|
||||
def validate(self, stats=False):
|
||||
"""
|
||||
|
@ -476,7 +476,7 @@ def parse_storage_policies(conf):
|
||||
if not section.startswith('storage-policy:'):
|
||||
continue
|
||||
policy_index = section.split(':', 1)[1]
|
||||
# map config option name to StoragePolicy paramater name
|
||||
# map config option name to StoragePolicy parameter name
|
||||
config_to_policy_option_map = {
|
||||
'name': 'name',
|
||||
'default': 'is_default',
|
||||
|
@ -49,7 +49,7 @@ import random
|
||||
import functools
|
||||
import inspect
|
||||
|
||||
from swift.common.utils import reiterate, split_path, Timestamp
|
||||
from swift.common.utils import reiterate, split_path, Timestamp, pairs
|
||||
from swift.common.exceptions import InvalidTimestamp
|
||||
|
||||
|
||||
@ -110,6 +110,10 @@ RESPONSE_REASONS = {
|
||||
'resource. Drive: %(drive)s'),
|
||||
}
|
||||
|
||||
MAX_RANGE_OVERLAPS = 2
|
||||
MAX_NONASCENDING_RANGES = 8
|
||||
MAX_RANGES = 50
|
||||
|
||||
|
||||
class _UTC(tzinfo):
|
||||
"""
|
||||
@ -584,6 +588,43 @@ class Range(object):
|
||||
# the total length of the content
|
||||
all_ranges.append((begin, min(end + 1, length)))
|
||||
|
||||
# RFC 7233 section 6.1 ("Denial-of-Service Attacks Using Range") says:
|
||||
#
|
||||
# Unconstrained multiple range requests are susceptible to denial-of-
|
||||
# service attacks because the effort required to request many
|
||||
# overlapping ranges of the same data is tiny compared to the time,
|
||||
# memory, and bandwidth consumed by attempting to serve the requested
|
||||
# data in many parts. Servers ought to ignore, coalesce, or reject
|
||||
# egregious range requests, such as requests for more than two
|
||||
# overlapping ranges or for many small ranges in a single set,
|
||||
# particularly when the ranges are requested out of order for no
|
||||
# apparent reason. Multipart range requests are not designed to
|
||||
# support random access.
|
||||
#
|
||||
# We're defining "egregious" here as:
|
||||
#
|
||||
# * more than 100 requested ranges OR
|
||||
# * more than 2 overlapping ranges OR
|
||||
# * more than 8 non-ascending-order ranges
|
||||
if len(all_ranges) > MAX_RANGES:
|
||||
return []
|
||||
|
||||
overlaps = 0
|
||||
for ((start1, end1), (start2, end2)) in pairs(all_ranges):
|
||||
if ((start1 < start2 < end1) or (start1 < end2 < end1) or
|
||||
(start2 < start1 < end2) or (start2 < end1 < end2)):
|
||||
overlaps += 1
|
||||
if overlaps > MAX_RANGE_OVERLAPS:
|
||||
return []
|
||||
|
||||
ascending = True
|
||||
for start1, start2 in zip(all_ranges, all_ranges[1:]):
|
||||
if start1 > start2:
|
||||
ascending = False
|
||||
break
|
||||
if not ascending and len(all_ranges) >= MAX_NONASCENDING_RANGES:
|
||||
return []
|
||||
|
||||
return all_ranges
|
||||
|
||||
|
||||
|
@ -84,6 +84,11 @@ SysLogHandler.priority_map['NOTICE'] = 'notice'
|
||||
# These are lazily pulled from libc elsewhere
|
||||
_sys_fallocate = None
|
||||
_posix_fadvise = None
|
||||
_libc_socket = None
|
||||
_libc_bind = None
|
||||
_libc_accept = None
|
||||
_libc_splice = None
|
||||
_libc_tee = None
|
||||
|
||||
# If set to non-zero, fallocate routines will fail based on free space
|
||||
# available being at or below this amount, in bytes.
|
||||
@ -97,6 +102,13 @@ HASH_PATH_PREFIX = ''
|
||||
|
||||
SWIFT_CONF_FILE = '/etc/swift/swift.conf'
|
||||
|
||||
# These constants are Linux-specific, and Python doesn't seem to know
|
||||
# about them. We ask anyway just in case that ever gets fixed.
|
||||
#
|
||||
# The values were copied from the Linux 3.0 kernel headers.
|
||||
AF_ALG = getattr(socket, 'AF_ALG', 38)
|
||||
F_SETPIPE_SZ = getattr(fcntl, 'F_SETPIPE_SZ', 1031)
|
||||
|
||||
|
||||
class InvalidHashPathConfigError(ValueError):
|
||||
|
||||
@ -292,16 +304,22 @@ def validate_configuration():
|
||||
sys.exit("Error: %s" % e)
|
||||
|
||||
|
||||
def load_libc_function(func_name, log_error=True):
|
||||
def load_libc_function(func_name, log_error=True,
|
||||
fail_if_missing=False):
|
||||
"""
|
||||
Attempt to find the function in libc, otherwise return a no-op func.
|
||||
|
||||
:param func_name: name of the function to pull from libc.
|
||||
:param log_error: log an error when a function can't be found
|
||||
:param fail_if_missing: raise an exception when a function can't be found.
|
||||
Default behavior is to return a no-op function.
|
||||
"""
|
||||
try:
|
||||
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
|
||||
return getattr(libc, func_name)
|
||||
except AttributeError:
|
||||
if fail_if_missing:
|
||||
raise
|
||||
if log_error:
|
||||
logging.warn(_("Unable to locate %s in libc. Leaving as a "
|
||||
"no-op."), func_name)
|
||||
@ -2424,6 +2442,17 @@ def streq_const_time(s1, s2):
|
||||
return result == 0
|
||||
|
||||
|
||||
def pairs(item_list):
|
||||
"""
|
||||
Returns an iterator of all pairs of elements from item_list.
|
||||
|
||||
:param items: items (no duplicates allowed)
|
||||
"""
|
||||
for i, item1 in enumerate(item_list):
|
||||
for item2 in item_list[(i + 1):]:
|
||||
yield (item1, item2)
|
||||
|
||||
|
||||
def replication(func):
|
||||
"""
|
||||
Decorator to declare which methods are accessible for different
|
||||
@ -2990,3 +3019,272 @@ def get_expirer_container(x_delete_at, expirer_divisor, acc, cont, obj):
|
||||
shard_int = int(hash_path(acc, cont, obj), 16) % 100
|
||||
return normalize_delete_at_timestamp(
|
||||
int(x_delete_at) / expirer_divisor * expirer_divisor - shard_int)
|
||||
|
||||
|
||||
class _MultipartMimeFileLikeObject(object):
|
||||
|
||||
def __init__(self, wsgi_input, boundary, input_buffer, read_chunk_size):
|
||||
self.no_more_data_for_this_file = False
|
||||
self.no_more_files = False
|
||||
self.wsgi_input = wsgi_input
|
||||
self.boundary = boundary
|
||||
self.input_buffer = input_buffer
|
||||
self.read_chunk_size = read_chunk_size
|
||||
|
||||
def read(self, length=None):
|
||||
if not length:
|
||||
length = self.read_chunk_size
|
||||
if self.no_more_data_for_this_file:
|
||||
return ''
|
||||
|
||||
# read enough data to know whether we're going to run
|
||||
# into a boundary in next [length] bytes
|
||||
if len(self.input_buffer) < length + len(self.boundary) + 2:
|
||||
to_read = length + len(self.boundary) + 2
|
||||
while to_read > 0:
|
||||
chunk = self.wsgi_input.read(to_read)
|
||||
to_read -= len(chunk)
|
||||
self.input_buffer += chunk
|
||||
if not chunk:
|
||||
self.no_more_files = True
|
||||
break
|
||||
|
||||
boundary_pos = self.input_buffer.find(self.boundary)
|
||||
|
||||
# boundary does not exist in the next (length) bytes
|
||||
if boundary_pos == -1 or boundary_pos > length:
|
||||
ret = self.input_buffer[:length]
|
||||
self.input_buffer = self.input_buffer[length:]
|
||||
# if it does, just return data up to the boundary
|
||||
else:
|
||||
ret, self.input_buffer = self.input_buffer.split(self.boundary, 1)
|
||||
self.no_more_files = self.input_buffer.startswith('--')
|
||||
self.no_more_data_for_this_file = True
|
||||
self.input_buffer = self.input_buffer[2:]
|
||||
return ret
|
||||
|
||||
def readline(self):
|
||||
if self.no_more_data_for_this_file:
|
||||
return ''
|
||||
boundary_pos = newline_pos = -1
|
||||
while newline_pos < 0 and boundary_pos < 0:
|
||||
chunk = self.wsgi_input.read(self.read_chunk_size)
|
||||
self.input_buffer += chunk
|
||||
newline_pos = self.input_buffer.find('\r\n')
|
||||
boundary_pos = self.input_buffer.find(self.boundary)
|
||||
if not chunk:
|
||||
self.no_more_files = True
|
||||
break
|
||||
# found a newline
|
||||
if newline_pos >= 0 and \
|
||||
(boundary_pos < 0 or newline_pos < boundary_pos):
|
||||
# Use self.read to ensure any logic there happens...
|
||||
ret = ''
|
||||
to_read = newline_pos + 2
|
||||
while to_read > 0:
|
||||
chunk = self.read(to_read)
|
||||
# Should never happen since we're reading from input_buffer,
|
||||
# but just for completeness...
|
||||
if not chunk:
|
||||
break
|
||||
to_read -= len(chunk)
|
||||
ret += chunk
|
||||
return ret
|
||||
else: # no newlines, just return up to next boundary
|
||||
return self.read(len(self.input_buffer))
|
||||
|
||||
|
||||
def iter_multipart_mime_documents(wsgi_input, boundary, read_chunk_size=4096):
|
||||
"""
|
||||
Given a multi-part-mime-encoded input file object and boundary,
|
||||
yield file-like objects for each part.
|
||||
|
||||
:param wsgi_input: The file-like object to read from.
|
||||
:param boundary: The mime boundary to separate new file-like
|
||||
objects on.
|
||||
:returns: A generator of file-like objects for each part.
|
||||
:raises: MimeInvalid if the document is malformed
|
||||
"""
|
||||
boundary = '--' + boundary
|
||||
if wsgi_input.readline(len(boundary + '\r\n')).strip() != boundary:
|
||||
raise swift.common.exceptions.MimeInvalid('invalid starting boundary')
|
||||
boundary = '\r\n' + boundary
|
||||
input_buffer = ''
|
||||
done = False
|
||||
while not done:
|
||||
it = _MultipartMimeFileLikeObject(wsgi_input, boundary, input_buffer,
|
||||
read_chunk_size)
|
||||
yield it
|
||||
done = it.no_more_files
|
||||
input_buffer = it.input_buffer
|
||||
|
||||
|
||||
#: Regular expression to match form attributes.
|
||||
ATTRIBUTES_RE = re.compile(r'(\w+)=(".*?"|[^";]+)(; ?|$)')
|
||||
|
||||
|
||||
def parse_content_disposition(header):
|
||||
"""
|
||||
Given the value of a header like:
|
||||
Content-Disposition: form-data; name="somefile"; filename="test.html"
|
||||
|
||||
Return data like
|
||||
("form-data", {"name": "somefile", "filename": "test.html"})
|
||||
|
||||
:param header: Value of a header (the part after the ': ').
|
||||
:returns: (value name, dict) of the attribute data parsed (see above).
|
||||
"""
|
||||
attributes = {}
|
||||
attrs = ''
|
||||
if '; ' in header:
|
||||
header, attrs = header.split('; ', 1)
|
||||
m = True
|
||||
while m:
|
||||
m = ATTRIBUTES_RE.match(attrs)
|
||||
if m:
|
||||
attrs = attrs[len(m.group(0)):]
|
||||
attributes[m.group(1)] = m.group(2).strip('"')
|
||||
return header, attributes
|
||||
|
||||
|
||||
class sockaddr_alg(ctypes.Structure):
|
||||
_fields_ = [("salg_family", ctypes.c_ushort),
|
||||
("salg_type", ctypes.c_ubyte * 14),
|
||||
("salg_feat", ctypes.c_uint),
|
||||
("salg_mask", ctypes.c_uint),
|
||||
("salg_name", ctypes.c_ubyte * 64)]
|
||||
|
||||
|
||||
_bound_md5_sockfd = None
|
||||
|
||||
|
||||
def get_md5_socket():
|
||||
"""
|
||||
Get an MD5 socket file descriptor. One can MD5 data with it by writing it
|
||||
to the socket with os.write, then os.read the 16 bytes of the checksum out
|
||||
later.
|
||||
|
||||
NOTE: It is the caller's responsibility to ensure that os.close() is
|
||||
called on the returned file descriptor. This is a bare file descriptor,
|
||||
not a Python object. It doesn't close itself.
|
||||
"""
|
||||
|
||||
# Linux's AF_ALG sockets work like this:
|
||||
#
|
||||
# First, initialize a socket with socket() and bind(). This tells the
|
||||
# socket what algorithm to use, as well as setting up any necessary bits
|
||||
# like crypto keys. Of course, MD5 doesn't need any keys, so it's just the
|
||||
# algorithm name.
|
||||
#
|
||||
# Second, to hash some data, get a second socket by calling accept() on
|
||||
# the first socket. Write data to the socket, then when finished, read the
|
||||
# checksum from the socket and close it. This lets you checksum multiple
|
||||
# things without repeating all the setup code each time.
|
||||
#
|
||||
# Since we only need to bind() one socket, we do that here and save it for
|
||||
# future re-use. That way, we only use one file descriptor to get an MD5
|
||||
# socket instead of two, and we also get to save some syscalls.
|
||||
|
||||
global _bound_md5_sockfd
|
||||
global _libc_socket
|
||||
global _libc_bind
|
||||
global _libc_accept
|
||||
|
||||
if _libc_accept is None:
|
||||
_libc_accept = load_libc_function('accept', fail_if_missing=True)
|
||||
if _libc_socket is None:
|
||||
_libc_socket = load_libc_function('socket', fail_if_missing=True)
|
||||
if _libc_bind is None:
|
||||
_libc_bind = load_libc_function('bind', fail_if_missing=True)
|
||||
|
||||
# Do this at first call rather than at import time so that we don't use a
|
||||
# file descriptor on systems that aren't using any MD5 sockets.
|
||||
if _bound_md5_sockfd is None:
|
||||
sockaddr_setup = sockaddr_alg(
|
||||
AF_ALG,
|
||||
(ord('h'), ord('a'), ord('s'), ord('h'), 0),
|
||||
0, 0,
|
||||
(ord('m'), ord('d'), ord('5'), 0))
|
||||
hash_sockfd = _libc_socket(ctypes.c_int(AF_ALG),
|
||||
ctypes.c_int(socket.SOCK_SEQPACKET),
|
||||
ctypes.c_int(0))
|
||||
if hash_sockfd < 0:
|
||||
raise IOError(ctypes.get_errno(),
|
||||
"Failed to initialize MD5 socket")
|
||||
|
||||
bind_result = _libc_bind(ctypes.c_int(hash_sockfd),
|
||||
ctypes.pointer(sockaddr_setup),
|
||||
ctypes.c_int(ctypes.sizeof(sockaddr_alg)))
|
||||
if bind_result < 0:
|
||||
os.close(hash_sockfd)
|
||||
raise IOError(ctypes.get_errno(), "Failed to bind MD5 socket")
|
||||
|
||||
_bound_md5_sockfd = hash_sockfd
|
||||
|
||||
md5_sockfd = _libc_accept(ctypes.c_int(_bound_md5_sockfd), None, 0)
|
||||
if md5_sockfd < 0:
|
||||
raise IOError(ctypes.get_errno(), "Failed to accept MD5 socket")
|
||||
|
||||
return md5_sockfd
|
||||
|
||||
|
||||
# Flags for splice() and tee()
|
||||
SPLICE_F_MOVE = 1
|
||||
SPLICE_F_NONBLOCK = 2
|
||||
SPLICE_F_MORE = 4
|
||||
SPLICE_F_GIFT = 8
|
||||
|
||||
|
||||
def splice(fd_in, off_in, fd_out, off_out, length, flags):
|
||||
"""
|
||||
Calls splice - a Linux-specific syscall for zero-copy data movement.
|
||||
|
||||
On success, returns the number of bytes moved.
|
||||
|
||||
On failure where errno is EWOULDBLOCK, returns None.
|
||||
|
||||
On all other failures, raises IOError.
|
||||
"""
|
||||
global _libc_splice
|
||||
if _libc_splice is None:
|
||||
_libc_splice = load_libc_function('splice', fail_if_missing=True)
|
||||
|
||||
ret = _libc_splice(ctypes.c_int(fd_in), ctypes.c_long(off_in),
|
||||
ctypes.c_int(fd_out), ctypes.c_long(off_out),
|
||||
ctypes.c_int(length), ctypes.c_int(flags))
|
||||
if ret < 0:
|
||||
err = ctypes.get_errno()
|
||||
if err == errno.EWOULDBLOCK:
|
||||
return None
|
||||
else:
|
||||
raise IOError(err, "splice() failed: %s" % os.strerror(err))
|
||||
return ret
|
||||
|
||||
|
||||
def tee(fd_in, fd_out, length, flags):
|
||||
"""
|
||||
Calls tee - a Linux-specific syscall to let pipes share data.
|
||||
|
||||
On success, returns the number of bytes "copied".
|
||||
|
||||
On failure, raises IOError.
|
||||
"""
|
||||
global _libc_tee
|
||||
if _libc_tee is None:
|
||||
_libc_tee = load_libc_function('tee', fail_if_missing=True)
|
||||
|
||||
ret = _libc_tee(ctypes.c_int(fd_in), ctypes.c_int(fd_out),
|
||||
ctypes.c_int(length), ctypes.c_int(flags))
|
||||
if ret < 0:
|
||||
err = ctypes.get_errno()
|
||||
raise IOError(err, "tee() failed: %s" % os.strerror(err))
|
||||
return ret
|
||||
|
||||
|
||||
def system_has_splice():
|
||||
global _libc_splice
|
||||
try:
|
||||
_libc_splice = load_libc_function('splice', fail_if_missing=True)
|
||||
return True
|
||||
except AttributeError:
|
||||
return False
|
||||
|
@ -682,6 +682,10 @@ class ContainerBroker(DatabaseBroker):
|
||||
'storage_policy_index'}
|
||||
:param source: if defined, update incoming_sync with the source
|
||||
"""
|
||||
for item in item_list:
|
||||
if isinstance(item['name'], unicode):
|
||||
item['name'] = item['name'].encode('utf-8')
|
||||
|
||||
def _really_merge_items(conn):
|
||||
curs = conn.cursor()
|
||||
if self.get_db_version(conn) >= 1:
|
||||
|
21
swift/locale/en_GB/LC_MESSAGES/swift-log-critical.po
Normal file
21
swift/locale/en_GB/LC_MESSAGES/swift-log-critical.po
Normal file
@ -0,0 +1,21 @@
|
||||
# Translations template for heat.
|
||||
# Copyright (C) 2014 ORGANIZATION
|
||||
# This file is distributed under the same license as the heat project.
|
||||
#
|
||||
# Translators:
|
||||
# Andi Chandler <andi@gowling.com>, 2014
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: Swift\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2014-09-22 06:07+0000\n"
|
||||
"PO-Revision-Date: 2014-07-25 15:03+0000\n"
|
||||
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
|
||||
"Language-Team: English (United Kingdom) (http://www.transifex.com/projects/p/"
|
||||
"swift/language/en_GB/)\n"
|
||||
"Language: en_GB\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 1.3\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
|
21
swift/locale/en_GB/LC_MESSAGES/swift-log-error.po
Normal file
21
swift/locale/en_GB/LC_MESSAGES/swift-log-error.po
Normal file
@ -0,0 +1,21 @@
|
||||
# Translations template for heat.
|
||||
# Copyright (C) 2014 ORGANIZATION
|
||||
# This file is distributed under the same license as the heat project.
|
||||
#
|
||||
# Translators:
|
||||
# Andi Chandler <andi@gowling.com>, 2014
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: Swift\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2014-09-22 06:07+0000\n"
|
||||
"PO-Revision-Date: 2014-07-25 23:08+0000\n"
|
||||
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
|
||||
"Language-Team: English (United Kingdom) (http://www.transifex.com/projects/p/"
|
||||
"swift/language/en_GB/)\n"
|
||||
"Language: en_GB\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 1.3\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
|
21
swift/locale/en_GB/LC_MESSAGES/swift-log-info.po
Normal file
21
swift/locale/en_GB/LC_MESSAGES/swift-log-info.po
Normal file
@ -0,0 +1,21 @@
|
||||
# Translations template for heat.
|
||||
# Copyright (C) 2014 ORGANIZATION
|
||||
# This file is distributed under the same license as the heat project.
|
||||
#
|
||||
# Translators:
|
||||
# Andi Chandler <andi@gowling.com>, 2014
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: Swift\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2014-09-22 06:07+0000\n"
|
||||
"PO-Revision-Date: 2014-07-25 15:03+0000\n"
|
||||
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
|
||||
"Language-Team: English (United Kingdom) (http://www.transifex.com/projects/p/"
|
||||
"swift/language/en_GB/)\n"
|
||||
"Language: en_GB\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 1.3\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
|
21
swift/locale/en_GB/LC_MESSAGES/swift-log-warning.po
Normal file
21
swift/locale/en_GB/LC_MESSAGES/swift-log-warning.po
Normal file
@ -0,0 +1,21 @@
|
||||
# Translations template for heat.
|
||||
# Copyright (C) 2014 ORGANIZATION
|
||||
# This file is distributed under the same license as the heat project.
|
||||
#
|
||||
# Translators:
|
||||
# Andi Chandler <andi@gowling.com>, 2014
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: Swift\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2014-09-22 06:07+0000\n"
|
||||
"PO-Revision-Date: 2014-07-25 15:02+0000\n"
|
||||
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
|
||||
"Language-Team: English (United Kingdom) (http://www.transifex.com/projects/p/"
|
||||
"swift/language/en_GB/)\n"
|
||||
"Language: en_GB\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 1.3\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
|
21
swift/locale/ko_KR/LC_MESSAGES/swift-log-critical.po
Normal file
21
swift/locale/ko_KR/LC_MESSAGES/swift-log-critical.po
Normal file
@ -0,0 +1,21 @@
|
||||
# Translations template for heat.
|
||||
# Copyright (C) 2014 ORGANIZATION
|
||||
# This file is distributed under the same license as the heat project.
|
||||
#
|
||||
# Translators:
|
||||
# Mario Cho <hephaex@gmail.com>, 2014
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: Swift\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2014-09-22 06:07+0000\n"
|
||||
"PO-Revision-Date: 2014-09-18 02:40+0000\n"
|
||||
"Last-Translator: Mario Cho <hephaex@gmail.com>\n"
|
||||
"Language-Team: Korean (Korea) (http://www.transifex.com/projects/p/swift/"
|
||||
"language/ko_KR/)\n"
|
||||
"Language: ko_KR\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 1.3\n"
|
||||
"Plural-Forms: nplurals=1; plural=0;\n"
|
21
swift/locale/ko_KR/LC_MESSAGES/swift-log-error.po
Normal file
21
swift/locale/ko_KR/LC_MESSAGES/swift-log-error.po
Normal file
@ -0,0 +1,21 @@
|
||||
# Translations template for heat.
|
||||
# Copyright (C) 2014 ORGANIZATION
|
||||
# This file is distributed under the same license as the heat project.
|
||||
#
|
||||
# Translators:
|
||||
# Mario Cho <hephaex@gmail.com>, 2014
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: Swift\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2014-09-22 06:07+0000\n"
|
||||
"PO-Revision-Date: 2014-09-18 02:40+0000\n"
|
||||
"Last-Translator: Mario Cho <hephaex@gmail.com>\n"
|
||||
"Language-Team: Korean (Korea) (http://www.transifex.com/projects/p/swift/"
|
||||
"language/ko_KR/)\n"
|
||||
"Language: ko_KR\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 1.3\n"
|
||||
"Plural-Forms: nplurals=1; plural=0;\n"
|
21
swift/locale/ko_KR/LC_MESSAGES/swift-log-info.po
Normal file
21
swift/locale/ko_KR/LC_MESSAGES/swift-log-info.po
Normal file
@ -0,0 +1,21 @@
|
||||
# Translations template for heat.
|
||||
# Copyright (C) 2014 ORGANIZATION
|
||||
# This file is distributed under the same license as the heat project.
|
||||
#
|
||||
# Translators:
|
||||
# Mario Cho <hephaex@gmail.com>, 2014
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: Swift\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2014-09-22 06:07+0000\n"
|
||||
"PO-Revision-Date: 2014-09-18 02:40+0000\n"
|
||||
"Last-Translator: Mario Cho <hephaex@gmail.com>\n"
|
||||
"Language-Team: Korean (Korea) (http://www.transifex.com/projects/p/swift/"
|
||||
"language/ko_KR/)\n"
|
||||
"Language: ko_KR\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 1.3\n"
|
||||
"Plural-Forms: nplurals=1; plural=0;\n"
|
21
swift/locale/ko_KR/LC_MESSAGES/swift-log-warning.po
Normal file
21
swift/locale/ko_KR/LC_MESSAGES/swift-log-warning.po
Normal file
@ -0,0 +1,21 @@
|
||||
# Translations template for heat.
|
||||
# Copyright (C) 2014 ORGANIZATION
|
||||
# This file is distributed under the same license as the heat project.
|
||||
#
|
||||
# Translators:
|
||||
# Mario Cho <hephaex@gmail.com>, 2014
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: Swift\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2014-09-22 06:07+0000\n"
|
||||
"PO-Revision-Date: 2014-09-18 02:40+0000\n"
|
||||
"Last-Translator: Mario Cho <hephaex@gmail.com>\n"
|
||||
"Language-Team: Korean (Korea) (http://www.transifex.com/projects/p/swift/"
|
||||
"language/ko_KR/)\n"
|
||||
"Language: ko_KR\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 1.3\n"
|
||||
"Plural-Forms: nplurals=1; plural=0;\n"
|
19
swift/locale/swift-log-critical.pot
Normal file
19
swift/locale/swift-log-critical.pot
Normal file
@ -0,0 +1,19 @@
|
||||
# Translations template for swift.
|
||||
# Copyright (C) 2014 ORGANIZATION
|
||||
# This file is distributed under the same license as the swift project.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2014.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: swift 2.1.0.77.g0d0c16d\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2014-09-22 06:07+0000\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 1.3\n"
|
||||
|
19
swift/locale/swift-log-error.pot
Normal file
19
swift/locale/swift-log-error.pot
Normal file
@ -0,0 +1,19 @@
|
||||
# Translations template for swift.
|
||||
# Copyright (C) 2014 ORGANIZATION
|
||||
# This file is distributed under the same license as the swift project.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2014.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: swift 2.1.0.77.g0d0c16d\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2014-09-22 06:07+0000\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 1.3\n"
|
||||
|
19
swift/locale/swift-log-info.pot
Normal file
19
swift/locale/swift-log-info.pot
Normal file
@ -0,0 +1,19 @@
|
||||
# Translations template for swift.
|
||||
# Copyright (C) 2014 ORGANIZATION
|
||||
# This file is distributed under the same license as the swift project.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2014.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: swift 2.1.0.77.g0d0c16d\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2014-09-22 06:07+0000\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 1.3\n"
|
||||
|
19
swift/locale/swift-log-warning.pot
Normal file
19
swift/locale/swift-log-warning.pot
Normal file
@ -0,0 +1,19 @@
|
||||
# Translations template for swift.
|
||||
# Copyright (C) 2014 ORGANIZATION
|
||||
# This file is distributed under the same license as the swift project.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2014.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: swift 2.1.0.77.g0d0c16d\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2014-09-22 06:07+0000\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 1.3\n"
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -32,6 +32,7 @@ are also not considered part of the backend API.
|
||||
|
||||
import cPickle as pickle
|
||||
import errno
|
||||
import fcntl
|
||||
import os
|
||||
import time
|
||||
import uuid
|
||||
@ -46,6 +47,7 @@ from collections import defaultdict
|
||||
|
||||
from xattr import getxattr, setxattr
|
||||
from eventlet import Timeout
|
||||
from eventlet.hubs import trampoline
|
||||
|
||||
from swift import gettext_ as _
|
||||
from swift.common.constraints import check_mount
|
||||
@ -53,7 +55,9 @@ from swift.common.request_helpers import is_sys_meta
|
||||
from swift.common.utils import mkdirs, Timestamp, \
|
||||
storage_directory, hash_path, renamer, fallocate, fsync, \
|
||||
fdatasync, drop_buffer_cache, ThreadPool, lock_path, write_pickle, \
|
||||
config_true_value, listdir, split_path, ismount, remove_file
|
||||
config_true_value, listdir, split_path, ismount, remove_file, \
|
||||
get_md5_socket, system_has_splice, splice, tee, SPLICE_F_MORE, \
|
||||
F_SETPIPE_SZ
|
||||
from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist, \
|
||||
DiskFileCollision, DiskFileNoSpace, DiskFileDeviceUnavailable, \
|
||||
DiskFileDeleted, DiskFileError, DiskFileNotOpen, PathNotDir, \
|
||||
@ -62,10 +66,12 @@ from swift.common.swob import multi_range_iterator
|
||||
from swift.common.storage_policy import get_policy_string, POLICIES
|
||||
from functools import partial
|
||||
|
||||
|
||||
PICKLE_PROTOCOL = 2
|
||||
ONE_WEEK = 604800
|
||||
HASH_FILE = 'hashes.pkl'
|
||||
METADATA_KEY = 'user.swift.metadata'
|
||||
DROP_CACHE_WINDOW = 1024 * 1024
|
||||
# These are system-set metadata keys that cannot be changed with a POST.
|
||||
# They should be lowercase.
|
||||
DATAFILE_SYSTEM_META = set('content-length content-type deleted etag'.split())
|
||||
@ -75,6 +81,7 @@ TMP_BASE = 'tmp'
|
||||
get_data_dir = partial(get_policy_string, DATADIR_BASE)
|
||||
get_async_dir = partial(get_policy_string, ASYNCDIR_BASE)
|
||||
get_tmp_dir = partial(get_policy_string, TMP_BASE)
|
||||
MD5_OF_EMPTY_STRING = 'd41d8cd98f00b204e9800998ecf8427e'
|
||||
|
||||
|
||||
def read_metadata(fd):
|
||||
@ -498,6 +505,37 @@ class DiskFileManager(object):
|
||||
self.threadpools = defaultdict(
|
||||
lambda: ThreadPool(nthreads=threads_per_disk))
|
||||
|
||||
self.use_splice = False
|
||||
self.pipe_size = None
|
||||
|
||||
splice_available = system_has_splice()
|
||||
|
||||
conf_wants_splice = config_true_value(conf.get('splice', 'no'))
|
||||
# If the operator wants zero-copy with splice() but we don't have the
|
||||
# requisite kernel support, complain so they can go fix it.
|
||||
if conf_wants_splice and not splice_available:
|
||||
self.logger.warn(
|
||||
"Use of splice() requested (config says \"splice = %s\"), "
|
||||
"but the system does not support it. "
|
||||
"splice() will not be used." % conf.get('splice'))
|
||||
elif conf_wants_splice and splice_available:
|
||||
try:
|
||||
sockfd = get_md5_socket()
|
||||
os.close(sockfd)
|
||||
except IOError as err:
|
||||
# AF_ALG socket support was introduced in kernel 2.6.38; on
|
||||
# systems with older kernels (or custom-built kernels lacking
|
||||
# AF_ALG support), we can't use zero-copy.
|
||||
if err.errno != errno.EAFNOSUPPORT:
|
||||
raise
|
||||
self.logger.warn("MD5 sockets not supported. "
|
||||
"splice() will not be used.")
|
||||
else:
|
||||
self.use_splice = True
|
||||
with open('/proc/sys/fs/pipe-max-size') as f:
|
||||
max_pipe_size = int(f.read())
|
||||
self.pipe_size = min(max_pipe_size, self.disk_chunk_size)
|
||||
|
||||
def construct_dev_path(self, device):
|
||||
"""
|
||||
Construct the path to a device without checking if it is mounted.
|
||||
@ -564,7 +602,9 @@ class DiskFileManager(object):
|
||||
raise DiskFileDeviceUnavailable()
|
||||
return DiskFile(self, dev_path, self.threadpools[device],
|
||||
partition, account, container, obj,
|
||||
policy_idx=policy_idx, **kwargs)
|
||||
policy_idx=policy_idx,
|
||||
use_splice=self.use_splice, pipe_size=self.pipe_size,
|
||||
**kwargs)
|
||||
|
||||
def object_audit_location_generator(self, device_dirs=None):
|
||||
return object_audit_location_generator(self.devices, self.mount_check,
|
||||
@ -830,11 +870,13 @@ class DiskFileReader(object):
|
||||
:param device_path: on-disk device path, used when quarantining an obj
|
||||
:param logger: logger caller wants this object to use
|
||||
:param quarantine_hook: 1-arg callable called w/reason when quarantined
|
||||
:param use_splice: if true, use zero-copy splice() to send data
|
||||
:param pipe_size: size of pipe buffer used in zero-copy operations
|
||||
:param keep_cache: should resulting reads be kept in the buffer cache
|
||||
"""
|
||||
def __init__(self, fp, data_file, obj_size, etag, threadpool,
|
||||
disk_chunk_size, keep_cache_size, device_path, logger,
|
||||
quarantine_hook, keep_cache=False):
|
||||
quarantine_hook, use_splice, pipe_size, keep_cache=False):
|
||||
# Parameter tracking
|
||||
self._fp = fp
|
||||
self._data_file = data_file
|
||||
@ -845,6 +887,8 @@ class DiskFileReader(object):
|
||||
self._device_path = device_path
|
||||
self._logger = logger
|
||||
self._quarantine_hook = quarantine_hook
|
||||
self._use_splice = use_splice
|
||||
self._pipe_size = pipe_size
|
||||
if keep_cache:
|
||||
# Caller suggests we keep this in cache, only do it if the
|
||||
# object's size is less than the maximum.
|
||||
@ -857,6 +901,7 @@ class DiskFileReader(object):
|
||||
self._bytes_read = 0
|
||||
self._started_at_0 = False
|
||||
self._read_to_eof = False
|
||||
self._md5_of_sent_bytes = None
|
||||
self._suppress_file_closing = False
|
||||
self._quarantined_dir = None
|
||||
|
||||
@ -877,7 +922,7 @@ class DiskFileReader(object):
|
||||
if self._iter_etag:
|
||||
self._iter_etag.update(chunk)
|
||||
self._bytes_read += len(chunk)
|
||||
if self._bytes_read - dropped_cache > (1024 * 1024):
|
||||
if self._bytes_read - dropped_cache > DROP_CACHE_WINDOW:
|
||||
self._drop_cache(self._fp.fileno(), dropped_cache,
|
||||
self._bytes_read - dropped_cache)
|
||||
dropped_cache = self._bytes_read
|
||||
@ -891,6 +936,109 @@ class DiskFileReader(object):
|
||||
if not self._suppress_file_closing:
|
||||
self.close()
|
||||
|
||||
def can_zero_copy_send(self):
|
||||
return self._use_splice
|
||||
|
||||
def zero_copy_send(self, wsockfd):
|
||||
"""
|
||||
Does some magic with splice() and tee() to move stuff from disk to
|
||||
network without ever touching userspace.
|
||||
|
||||
:param wsockfd: file descriptor (integer) of the socket out which to
|
||||
send data
|
||||
"""
|
||||
# Note: if we ever add support for zero-copy ranged GET responses,
|
||||
# we'll have to make this conditional.
|
||||
self._started_at_0 = True
|
||||
|
||||
rfd = self._fp.fileno()
|
||||
client_rpipe, client_wpipe = os.pipe()
|
||||
hash_rpipe, hash_wpipe = os.pipe()
|
||||
md5_sockfd = get_md5_socket()
|
||||
|
||||
# The actual amount allocated to the pipe may be rounded up to the
|
||||
# nearest multiple of the page size. If we have the memory allocated,
|
||||
# we may as well use it.
|
||||
#
|
||||
# Note: this will raise IOError on failure, so we don't bother
|
||||
# checking the return value.
|
||||
pipe_size = fcntl.fcntl(client_rpipe, F_SETPIPE_SZ, self._pipe_size)
|
||||
fcntl.fcntl(hash_rpipe, F_SETPIPE_SZ, pipe_size)
|
||||
|
||||
dropped_cache = 0
|
||||
self._bytes_read = 0
|
||||
try:
|
||||
while True:
|
||||
# Read data from disk to pipe
|
||||
bytes_in_pipe = self._threadpool.run_in_thread(
|
||||
splice, rfd, 0, client_wpipe, 0, pipe_size, 0)
|
||||
if bytes_in_pipe == 0:
|
||||
self._read_to_eof = True
|
||||
self._drop_cache(rfd, dropped_cache,
|
||||
self._bytes_read - dropped_cache)
|
||||
break
|
||||
self._bytes_read += bytes_in_pipe
|
||||
|
||||
# "Copy" data from pipe A to pipe B (really just some pointer
|
||||
# manipulation in the kernel, not actual copying).
|
||||
bytes_copied = tee(client_rpipe, hash_wpipe, bytes_in_pipe, 0)
|
||||
if bytes_copied != bytes_in_pipe:
|
||||
# We teed data between two pipes of equal size, and the
|
||||
# destination pipe was empty. If, somehow, the destination
|
||||
# pipe was full before all the data was teed, we should
|
||||
# fail here. If we don't raise an exception, then we will
|
||||
# have the incorrect MD5 hash once the object has been
|
||||
# sent out, causing a false-positive quarantine.
|
||||
raise Exception("tee() failed: tried to move %d bytes, "
|
||||
"but only moved %d" %
|
||||
(bytes_in_pipe, bytes_copied))
|
||||
# Take the data and feed it into an in-kernel MD5 socket. The
|
||||
# MD5 socket hashes data that is written to it. Reading from
|
||||
# it yields the MD5 checksum of the written data.
|
||||
#
|
||||
# Note that we don't have to worry about splice() returning
|
||||
# None here (which happens on EWOULDBLOCK); we're splicing
|
||||
# $bytes_in_pipe bytes from a pipe with exactly that many
|
||||
# bytes in it, so read won't block, and we're splicing it into
|
||||
# an MD5 socket, which synchronously hashes any data sent to
|
||||
# it, so writing won't block either.
|
||||
hashed = splice(hash_rpipe, 0, md5_sockfd, 0,
|
||||
bytes_in_pipe, SPLICE_F_MORE)
|
||||
if hashed != bytes_in_pipe:
|
||||
raise Exception("md5 socket didn't take all the data? "
|
||||
"(tried to write %d, but wrote %d)" %
|
||||
(bytes_in_pipe, hashed))
|
||||
|
||||
while bytes_in_pipe > 0:
|
||||
sent = splice(client_rpipe, 0, wsockfd, 0,
|
||||
bytes_in_pipe, 0)
|
||||
if sent is None: # would have blocked
|
||||
trampoline(wsockfd, write=True)
|
||||
else:
|
||||
bytes_in_pipe -= sent
|
||||
|
||||
if self._bytes_read - dropped_cache > DROP_CACHE_WINDOW:
|
||||
self._drop_cache(rfd, dropped_cache,
|
||||
self._bytes_read - dropped_cache)
|
||||
dropped_cache = self._bytes_read
|
||||
finally:
|
||||
# Linux MD5 sockets return '00000000000000000000000000000000' for
|
||||
# the checksum if you didn't write any bytes to them, instead of
|
||||
# returning the correct value.
|
||||
if self._bytes_read > 0:
|
||||
bin_checksum = os.read(md5_sockfd, 16)
|
||||
hex_checksum = ''.join("%02x" % ord(c) for c in bin_checksum)
|
||||
else:
|
||||
hex_checksum = MD5_OF_EMPTY_STRING
|
||||
self._md5_of_sent_bytes = hex_checksum
|
||||
|
||||
os.close(client_rpipe)
|
||||
os.close(client_wpipe)
|
||||
os.close(hash_rpipe)
|
||||
os.close(hash_wpipe)
|
||||
os.close(md5_sockfd)
|
||||
self.close()
|
||||
|
||||
def app_iter_range(self, start, stop):
|
||||
"""Returns an iterator over the data file for range (start, stop)"""
|
||||
if start or start == 0:
|
||||
@ -942,15 +1090,18 @@ class DiskFileReader(object):
|
||||
|
||||
def _handle_close_quarantine(self):
|
||||
"""Check if file needs to be quarantined"""
|
||||
if self._iter_etag and not self._md5_of_sent_bytes:
|
||||
self._md5_of_sent_bytes = self._iter_etag.hexdigest()
|
||||
|
||||
if self._bytes_read != self._obj_size:
|
||||
self._quarantine(
|
||||
"Bytes read: %s, does not match metadata: %s" % (
|
||||
self._bytes_read, self._obj_size))
|
||||
elif self._iter_etag and \
|
||||
self._etag != self._iter_etag.hexdigest():
|
||||
elif self._md5_of_sent_bytes and \
|
||||
self._etag != self._md5_of_sent_bytes:
|
||||
self._quarantine(
|
||||
"ETag %s and file's md5 %s do not match" % (
|
||||
self._etag, self._iter_etag.hexdigest()))
|
||||
self._etag, self._md5_of_sent_bytes))
|
||||
|
||||
def close(self):
|
||||
"""
|
||||
@ -998,17 +1149,21 @@ class DiskFile(object):
|
||||
:param obj: object name for the object
|
||||
:param _datadir: override the full datadir otherwise constructed here
|
||||
:param policy_idx: used to get the data dir when constructing it here
|
||||
:param use_splice: if true, use zero-copy splice() to send data
|
||||
:param pipe_size: size of pipe buffer used in zero-copy operations
|
||||
"""
|
||||
|
||||
def __init__(self, mgr, device_path, threadpool, partition,
|
||||
account=None, container=None, obj=None, _datadir=None,
|
||||
policy_idx=0):
|
||||
policy_idx=0, use_splice=False, pipe_size=None):
|
||||
self._mgr = mgr
|
||||
self._device_path = device_path
|
||||
self._threadpool = threadpool or ThreadPool(nthreads=0)
|
||||
self._logger = mgr.logger
|
||||
self._disk_chunk_size = mgr.disk_chunk_size
|
||||
self._bytes_per_sync = mgr.bytes_per_sync
|
||||
self._use_splice = use_splice
|
||||
self._pipe_size = pipe_size
|
||||
if account and container and obj:
|
||||
self._name = '/' + '/'.join((account, container, obj))
|
||||
self._account = account
|
||||
@ -1377,7 +1532,8 @@ class DiskFile(object):
|
||||
self._fp, self._data_file, int(self._metadata['Content-Length']),
|
||||
self._metadata['ETag'], self._threadpool, self._disk_chunk_size,
|
||||
self._mgr.keep_cache_size, self._device_path, self._logger,
|
||||
quarantine_hook=_quarantine_hook, keep_cache=keep_cache)
|
||||
use_splice=self._use_splice, quarantine_hook=_quarantine_hook,
|
||||
pipe_size=self._pipe_size, keep_cache=keep_cache)
|
||||
# At this point the reader object is now responsible for closing
|
||||
# the file pointer.
|
||||
self._fp = None
|
||||
|
@ -25,7 +25,7 @@ import math
|
||||
from swift import gettext_ as _
|
||||
from hashlib import md5
|
||||
|
||||
from eventlet import sleep, Timeout
|
||||
from eventlet import sleep, wsgi, Timeout
|
||||
|
||||
from swift.common.utils import public, get_logger, \
|
||||
config_true_value, timing_stats, replication, \
|
||||
@ -50,6 +50,19 @@ from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPCreated, \
|
||||
from swift.obj.diskfile import DATAFILE_SYSTEM_META, DiskFileManager
|
||||
|
||||
|
||||
class EventletPlungerString(str):
|
||||
"""
|
||||
Eventlet won't send headers until it's accumulated at least
|
||||
eventlet.wsgi.MINIMUM_CHUNK_SIZE bytes or the app iter is exhausted. If we
|
||||
want to send the response body behind Eventlet's back, perhaps with some
|
||||
zero-copy wizardry, then we have to unclog the plumbing in eventlet.wsgi
|
||||
to force the headers out, so we use an EventletPlungerString to empty out
|
||||
all of Eventlet's buffers.
|
||||
"""
|
||||
def __len__(self):
|
||||
return wsgi.MINIMUM_CHUNK_SIZE + 1
|
||||
|
||||
|
||||
class ObjectController(object):
|
||||
"""Implements the WSGI application for the Swift Object Server."""
|
||||
|
||||
@ -710,7 +723,57 @@ class ObjectController(object):
|
||||
slow = self.slow - trans_time
|
||||
if slow > 0:
|
||||
sleep(slow)
|
||||
return res(env, start_response)
|
||||
|
||||
# To be able to zero-copy send the object, we need a few things.
|
||||
# First, we have to be responding successfully to a GET, or else we're
|
||||
# not sending the object. Second, we have to be able to extract the
|
||||
# socket file descriptor from the WSGI input object. Third, the
|
||||
# diskfile has to support zero-copy send.
|
||||
#
|
||||
# There's a good chance that this could work for 206 responses too,
|
||||
# but the common case is sending the whole object, so we'll start
|
||||
# there.
|
||||
if req.method == 'GET' and res.status_int == 200 and \
|
||||
isinstance(env['wsgi.input'], wsgi.Input):
|
||||
app_iter = getattr(res, 'app_iter', None)
|
||||
checker = getattr(app_iter, 'can_zero_copy_send', None)
|
||||
if checker and checker():
|
||||
# For any kind of zero-copy thing like sendfile or splice, we
|
||||
# need the file descriptor. Eventlet doesn't provide a clean
|
||||
# way of getting that, so we resort to this.
|
||||
wsock = env['wsgi.input'].get_socket()
|
||||
wsockfd = wsock.fileno()
|
||||
|
||||
# Don't call zero_copy_send() until after we force the HTTP
|
||||
# headers out of Eventlet and into the socket.
|
||||
def zero_copy_iter():
|
||||
# If possible, set TCP_CORK so that headers don't
|
||||
# immediately go on the wire, but instead, wait for some
|
||||
# response body to make the TCP frames as large as
|
||||
# possible (and hence as few packets as possible).
|
||||
#
|
||||
# On non-Linux systems, we might consider TCP_NODELAY, but
|
||||
# since the only known zero-copy-capable diskfile uses
|
||||
# Linux-specific syscalls, we'll defer that work until
|
||||
# someone needs it.
|
||||
if hasattr(socket, 'TCP_CORK'):
|
||||
wsock.setsockopt(socket.IPPROTO_TCP,
|
||||
socket.TCP_CORK, 1)
|
||||
yield EventletPlungerString()
|
||||
try:
|
||||
app_iter.zero_copy_send(wsockfd)
|
||||
except Exception:
|
||||
self.logger.exception("zero_copy_send() blew up")
|
||||
raise
|
||||
yield ''
|
||||
|
||||
# Get headers ready to go out
|
||||
res(env, start_response)
|
||||
return zero_copy_iter()
|
||||
else:
|
||||
return res(env, start_response)
|
||||
else:
|
||||
return res(env, start_response)
|
||||
|
||||
|
||||
def global_conf_callback(preloaded_app_conf, global_conf):
|
||||
|
@ -388,7 +388,7 @@ def _set_info_cache(app, env, account, container, resp):
|
||||
else:
|
||||
cache_time = None
|
||||
|
||||
# Next actually set both memcache and the env chache
|
||||
# Next actually set both memcache and the env cache
|
||||
memcache = getattr(app, 'memcache', None) or env.get('swift.cache')
|
||||
if not cache_time:
|
||||
env.pop(env_key, None)
|
||||
|
@ -35,7 +35,7 @@ password3 = testing3
|
||||
|
||||
collate = C
|
||||
|
||||
# Only necessary if a pre-exising server uses self-signed certificate
|
||||
# Only necessary if a pre-existing server uses self-signed certificate
|
||||
insecure = no
|
||||
|
||||
[unit_test]
|
||||
|
@ -382,7 +382,8 @@ class FakeLogger(logging.Logger):
|
||||
|
||||
def _clear(self):
|
||||
self.log_dict = defaultdict(list)
|
||||
self.lines_dict = defaultdict(list)
|
||||
self.lines_dict = {'critical': [], 'error': [], 'info': [],
|
||||
'warning': [], 'debug': []}
|
||||
|
||||
def _store_in(store_name):
|
||||
def stub_fn(self, *args, **kwargs):
|
||||
@ -396,8 +397,17 @@ class FakeLogger(logging.Logger):
|
||||
return stub_fn
|
||||
|
||||
def get_lines_for_level(self, level):
|
||||
if level not in self.lines_dict:
|
||||
raise KeyError(
|
||||
"Invalid log level '%s'; valid levels are %s" %
|
||||
(level,
|
||||
', '.join("'%s'" % lvl for lvl in sorted(self.lines_dict))))
|
||||
return self.lines_dict[level]
|
||||
|
||||
def all_log_lines(self):
|
||||
return dict((level, msgs) for level, msgs in self.lines_dict.items()
|
||||
if len(msgs) > 0)
|
||||
|
||||
error = _store_and_log_in('error', logging.ERROR)
|
||||
info = _store_and_log_in('info', logging.INFO)
|
||||
warning = _store_and_log_in('warning', logging.WARNING)
|
||||
|
@ -13,6 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from collections import defaultdict
|
||||
import itertools
|
||||
import unittest
|
||||
import mock
|
||||
import time
|
||||
@ -23,7 +25,11 @@ from shutil import rmtree
|
||||
from eventlet import Timeout
|
||||
|
||||
from swift.account import auditor
|
||||
from test.unit import FakeLogger
|
||||
from swift.common.storage_policy import POLICIES
|
||||
from swift.common.utils import Timestamp
|
||||
from test.unit import debug_logger, patch_policies, with_tempdir
|
||||
from test.unit.account.test_backend import (
|
||||
AccountBrokerPreTrackContainerCountSetup)
|
||||
|
||||
|
||||
class FakeAccountBroker(object):
|
||||
@ -37,16 +43,22 @@ class FakeAccountBroker(object):
|
||||
|
||||
def get_info(self):
|
||||
if self.file.startswith('fail'):
|
||||
raise ValueError
|
||||
raise ValueError()
|
||||
if self.file.startswith('true'):
|
||||
return 'ok'
|
||||
return defaultdict(int)
|
||||
|
||||
def get_policy_stats(self, **kwargs):
|
||||
if self.file.startswith('fail'):
|
||||
raise ValueError()
|
||||
if self.file.startswith('true'):
|
||||
return defaultdict(int)
|
||||
|
||||
|
||||
class TestAuditor(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.testdir = os.path.join(mkdtemp(), 'tmp_test_account_auditor')
|
||||
self.logger = FakeLogger()
|
||||
self.logger = debug_logger()
|
||||
rmtree(self.testdir, ignore_errors=1)
|
||||
os.mkdir(self.testdir)
|
||||
fnames = ['true1.db', 'true2.db', 'true3.db',
|
||||
@ -69,9 +81,7 @@ class TestAuditor(unittest.TestCase):
|
||||
|
||||
def sleep(self, sec):
|
||||
self.times += 1
|
||||
if self.times < sleep_times:
|
||||
time.sleep(0.1)
|
||||
else:
|
||||
if self.times >= sleep_times:
|
||||
# stop forever by an error
|
||||
raise ValueError()
|
||||
|
||||
@ -79,7 +89,7 @@ class TestAuditor(unittest.TestCase):
|
||||
return time.time()
|
||||
|
||||
conf = {}
|
||||
test_auditor = auditor.AccountAuditor(conf)
|
||||
test_auditor = auditor.AccountAuditor(conf, logger=self.logger)
|
||||
|
||||
with mock.patch('swift.account.auditor.time', FakeTime()):
|
||||
def fake_audit_location_generator(*args, **kwargs):
|
||||
@ -106,7 +116,7 @@ class TestAuditor(unittest.TestCase):
|
||||
@mock.patch('swift.account.auditor.AccountBroker', FakeAccountBroker)
|
||||
def test_run_once(self):
|
||||
conf = {}
|
||||
test_auditor = auditor.AccountAuditor(conf)
|
||||
test_auditor = auditor.AccountAuditor(conf, logger=self.logger)
|
||||
|
||||
def fake_audit_location_generator(*args, **kwargs):
|
||||
files = os.listdir(self.testdir)
|
||||
@ -121,7 +131,7 @@ class TestAuditor(unittest.TestCase):
|
||||
@mock.patch('swift.account.auditor.AccountBroker', FakeAccountBroker)
|
||||
def test_one_audit_pass(self):
|
||||
conf = {}
|
||||
test_auditor = auditor.AccountAuditor(conf)
|
||||
test_auditor = auditor.AccountAuditor(conf, logger=self.logger)
|
||||
|
||||
def fake_audit_location_generator(*args, **kwargs):
|
||||
files = os.listdir(self.testdir)
|
||||
@ -138,7 +148,7 @@ class TestAuditor(unittest.TestCase):
|
||||
@mock.patch('swift.account.auditor.AccountBroker', FakeAccountBroker)
|
||||
def test_account_auditor(self):
|
||||
conf = {}
|
||||
test_auditor = auditor.AccountAuditor(conf)
|
||||
test_auditor = auditor.AccountAuditor(conf, logger=self.logger)
|
||||
files = os.listdir(self.testdir)
|
||||
for f in files:
|
||||
path = os.path.join(self.testdir, f)
|
||||
@ -146,5 +156,108 @@ class TestAuditor(unittest.TestCase):
|
||||
self.assertEqual(test_auditor.account_failures, 2)
|
||||
self.assertEqual(test_auditor.account_passes, 3)
|
||||
|
||||
|
||||
@patch_policies
|
||||
class TestAuditorRealBrokerMigration(
|
||||
AccountBrokerPreTrackContainerCountSetup, unittest.TestCase):
|
||||
|
||||
def test_db_migration(self):
|
||||
# add a few containers
|
||||
policies = itertools.cycle(POLICIES)
|
||||
num_containers = len(POLICIES) * 3
|
||||
per_policy_container_counts = defaultdict(int)
|
||||
for i in range(num_containers):
|
||||
name = 'test-container-%02d' % i
|
||||
policy = next(policies)
|
||||
self.broker.put_container(name, next(self.ts),
|
||||
0, 0, 0, int(policy))
|
||||
per_policy_container_counts[int(policy)] += 1
|
||||
|
||||
self.broker._commit_puts()
|
||||
self.assertEqual(num_containers,
|
||||
self.broker.get_info()['container_count'])
|
||||
|
||||
# still un-migrated
|
||||
self.assertUnmigrated(self.broker)
|
||||
|
||||
# run auditor, and validate migration
|
||||
conf = {'devices': self.tempdir, 'mount_check': False,
|
||||
'recon_cache_path': self.tempdir}
|
||||
test_auditor = auditor.AccountAuditor(conf, logger=debug_logger())
|
||||
test_auditor.run_once()
|
||||
|
||||
self.restore_account_broker()
|
||||
|
||||
broker = auditor.AccountBroker(self.db_path)
|
||||
# go after rows directly to avoid unintentional migration
|
||||
with broker.get() as conn:
|
||||
rows = conn.execute('''
|
||||
SELECT storage_policy_index, container_count
|
||||
FROM policy_stat
|
||||
''').fetchall()
|
||||
for policy_index, container_count in rows:
|
||||
self.assertEqual(container_count,
|
||||
per_policy_container_counts[policy_index])
|
||||
|
||||
|
||||
class TestAuditorRealBroker(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.logger = debug_logger()
|
||||
|
||||
@with_tempdir
|
||||
def test_db_validate_fails(self, tempdir):
|
||||
ts = (Timestamp(t).internal for t in itertools.count(int(time.time())))
|
||||
db_path = os.path.join(tempdir, 'sda', 'accounts',
|
||||
'0', '0', '0', 'test.db')
|
||||
broker = auditor.AccountBroker(db_path, account='a')
|
||||
broker.initialize(next(ts))
|
||||
# add a few containers
|
||||
policies = itertools.cycle(POLICIES)
|
||||
num_containers = len(POLICIES) * 3
|
||||
per_policy_container_counts = defaultdict(int)
|
||||
for i in range(num_containers):
|
||||
name = 'test-container-%02d' % i
|
||||
policy = next(policies)
|
||||
broker.put_container(name, next(ts), 0, 0, 0, int(policy))
|
||||
per_policy_container_counts[int(policy)] += 1
|
||||
|
||||
broker._commit_puts()
|
||||
self.assertEqual(broker.get_info()['container_count'], num_containers)
|
||||
|
||||
messed_up_policy = random.choice(list(POLICIES))
|
||||
|
||||
# now mess up a policy_stats table count
|
||||
with broker.get() as conn:
|
||||
conn.executescript('''
|
||||
UPDATE policy_stat
|
||||
SET container_count = container_count - 1
|
||||
WHERE storage_policy_index = %d;
|
||||
''' % int(messed_up_policy))
|
||||
|
||||
# validate it's messed up
|
||||
policy_stats = broker.get_policy_stats()
|
||||
self.assertEqual(
|
||||
policy_stats[int(messed_up_policy)]['container_count'],
|
||||
per_policy_container_counts[int(messed_up_policy)] - 1)
|
||||
|
||||
# do an audit
|
||||
conf = {'devices': tempdir, 'mount_check': False,
|
||||
'recon_cache_path': tempdir}
|
||||
test_auditor = auditor.AccountAuditor(conf, logger=self.logger)
|
||||
test_auditor.run_once()
|
||||
|
||||
# validate errors
|
||||
self.assertEqual(test_auditor.account_failures, 1)
|
||||
error_lines = test_auditor.logger.get_lines_for_level('error')
|
||||
self.assertEqual(len(error_lines), 1)
|
||||
error_message = error_lines[0]
|
||||
self.assert_(broker.db_file in error_message)
|
||||
self.assert_('container_count' in error_message)
|
||||
self.assert_('does not match' in error_message)
|
||||
self.assertEqual(test_auditor.logger.get_increment_counts(),
|
||||
{'failures': 1})
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@ -15,7 +15,9 @@
|
||||
|
||||
""" Tests for swift.account.backend """
|
||||
|
||||
from collections import defaultdict
|
||||
import hashlib
|
||||
import json
|
||||
import unittest
|
||||
import pickle
|
||||
import os
|
||||
@ -579,6 +581,34 @@ class TestAccountBroker(unittest.TestCase):
|
||||
self.assertEqual(['a', 'b', 'c'],
|
||||
sorted([rec['name'] for rec in items]))
|
||||
|
||||
def test_merge_items_overwrite_unicode(self):
|
||||
snowman = u'\N{SNOWMAN}'.encode('utf-8')
|
||||
broker1 = AccountBroker(':memory:', account='a')
|
||||
broker1.initialize(Timestamp('1').internal, 0)
|
||||
id1 = broker1.get_info()['id']
|
||||
broker2 = AccountBroker(':memory:', account='a')
|
||||
broker2.initialize(Timestamp('1').internal, 0)
|
||||
broker1.put_container(snowman, Timestamp(2).internal, 0, 1, 100,
|
||||
POLICIES.default.idx)
|
||||
broker1.put_container('b', Timestamp(3).internal, 0, 0, 0,
|
||||
POLICIES.default.idx)
|
||||
broker2.merge_items(json.loads(json.dumps(broker1.get_items_since(
|
||||
broker2.get_sync(id1), 1000))), id1)
|
||||
broker1.put_container(snowman, Timestamp(4).internal, 0, 2, 200,
|
||||
POLICIES.default.idx)
|
||||
broker2.merge_items(json.loads(json.dumps(broker1.get_items_since(
|
||||
broker2.get_sync(id1), 1000))), id1)
|
||||
items = broker2.get_items_since(-1, 1000)
|
||||
self.assertEquals(['b', snowman],
|
||||
sorted([rec['name'] for rec in items]))
|
||||
items_by_name = dict((rec['name'], rec) for rec in items)
|
||||
|
||||
self.assertEqual(items_by_name[snowman]['object_count'], 2)
|
||||
self.assertEqual(items_by_name[snowman]['bytes_used'], 200)
|
||||
|
||||
self.assertEqual(items_by_name['b']['object_count'], 0)
|
||||
self.assertEqual(items_by_name['b']['bytes_used'], 0)
|
||||
|
||||
def test_load_old_pending_puts(self):
|
||||
# pending puts from pre-storage-policy account brokers won't contain
|
||||
# the storage policy index
|
||||
@ -634,9 +664,10 @@ class TestAccountBroker(unittest.TestCase):
|
||||
put_timestamp, 0,
|
||||
0, 0,
|
||||
policy.idx)
|
||||
|
||||
policy_stats = broker.get_policy_stats()
|
||||
stats = policy_stats[policy.idx]
|
||||
if 'container_count' in stats:
|
||||
self.assertEqual(stats['container_count'], 1)
|
||||
self.assertEqual(stats['object_count'], 0)
|
||||
self.assertEqual(stats['bytes_used'], 0)
|
||||
|
||||
@ -652,6 +683,8 @@ class TestAccountBroker(unittest.TestCase):
|
||||
|
||||
policy_stats = broker.get_policy_stats()
|
||||
stats = policy_stats[policy.idx]
|
||||
if 'container_count' in stats:
|
||||
self.assertEqual(stats['container_count'], 1)
|
||||
self.assertEqual(stats['object_count'], count)
|
||||
self.assertEqual(stats['bytes_used'], count)
|
||||
|
||||
@ -659,6 +692,8 @@ class TestAccountBroker(unittest.TestCase):
|
||||
for policy_index, stats in policy_stats.items():
|
||||
policy = POLICIES[policy_index]
|
||||
count = policy.idx * 100 # coupled with policy for test
|
||||
if 'container_count' in stats:
|
||||
self.assertEqual(stats['container_count'], 1)
|
||||
self.assertEqual(stats['object_count'], count)
|
||||
self.assertEqual(stats['bytes_used'], count)
|
||||
|
||||
@ -673,6 +708,8 @@ class TestAccountBroker(unittest.TestCase):
|
||||
|
||||
policy_stats = broker.get_policy_stats()
|
||||
stats = policy_stats[policy.idx]
|
||||
if 'container_count' in stats:
|
||||
self.assertEqual(stats['container_count'], 0)
|
||||
self.assertEqual(stats['object_count'], 0)
|
||||
self.assertEqual(stats['bytes_used'], 0)
|
||||
|
||||
@ -696,8 +733,12 @@ class TestAccountBroker(unittest.TestCase):
|
||||
|
||||
stats = broker.get_policy_stats()
|
||||
self.assertEqual(len(stats), 2)
|
||||
if 'container_count' in stats[0]:
|
||||
self.assertEqual(stats[0]['container_count'], 1)
|
||||
self.assertEqual(stats[0]['object_count'], 13)
|
||||
self.assertEqual(stats[0]['bytes_used'], 8156441)
|
||||
if 'container_count' in stats[1]:
|
||||
self.assertEqual(stats[1]['container_count'], 1)
|
||||
self.assertEqual(stats[1]['object_count'], 8)
|
||||
self.assertEqual(stats[1]['bytes_used'], 6085379)
|
||||
|
||||
@ -1001,8 +1042,12 @@ class TestAccountBrokerBeforeSPI(TestAccountBroker):
|
||||
# we should have stats for both containers
|
||||
stats = broker.get_policy_stats()
|
||||
self.assertEqual(len(stats), 2)
|
||||
if 'container_count' in stats[0]:
|
||||
self.assertEqual(stats[0]['container_count'], 1)
|
||||
self.assertEqual(stats[0]['object_count'], 1)
|
||||
self.assertEqual(stats[0]['bytes_used'], 2)
|
||||
if 'container_count' in stats[1]:
|
||||
self.assertEqual(stats[1]['container_count'], 1)
|
||||
self.assertEqual(stats[1]['object_count'], 3)
|
||||
self.assertEqual(stats[1]['bytes_used'], 4)
|
||||
|
||||
@ -1014,8 +1059,12 @@ class TestAccountBrokerBeforeSPI(TestAccountBroker):
|
||||
conn.commit()
|
||||
stats = broker.get_policy_stats()
|
||||
self.assertEqual(len(stats), 2)
|
||||
if 'container_count' in stats[0]:
|
||||
self.assertEqual(stats[0]['container_count'], 0)
|
||||
self.assertEqual(stats[0]['object_count'], 0)
|
||||
self.assertEqual(stats[0]['bytes_used'], 0)
|
||||
if 'container_count' in stats[1]:
|
||||
self.assertEqual(stats[1]['container_count'], 1)
|
||||
self.assertEqual(stats[1]['object_count'], 3)
|
||||
self.assertEqual(stats[1]['bytes_used'], 4)
|
||||
|
||||
@ -1081,3 +1130,351 @@ class TestAccountBrokerBeforeSPI(TestAccountBroker):
|
||||
with broker.get() as conn:
|
||||
conn.execute('SELECT * FROM policy_stat')
|
||||
conn.execute('SELECT storage_policy_index FROM container')
|
||||
|
||||
|
||||
def pre_track_containers_create_policy_stat(self, conn):
|
||||
"""
|
||||
Copied from AccountBroker before the container_count column was
|
||||
added.
|
||||
Create policy_stat table which is specific to the account DB.
|
||||
Not a part of Pluggable Back-ends, internal to the baseline code.
|
||||
|
||||
:param conn: DB connection object
|
||||
"""
|
||||
conn.executescript("""
|
||||
CREATE TABLE policy_stat (
|
||||
storage_policy_index INTEGER PRIMARY KEY,
|
||||
object_count INTEGER DEFAULT 0,
|
||||
bytes_used INTEGER DEFAULT 0
|
||||
);
|
||||
INSERT OR IGNORE INTO policy_stat (
|
||||
storage_policy_index, object_count, bytes_used
|
||||
)
|
||||
SELECT 0, object_count, bytes_used
|
||||
FROM account_stat
|
||||
WHERE container_count > 0;
|
||||
""")
|
||||
|
||||
|
||||
def pre_track_containers_create_container_table(self, conn):
|
||||
"""
|
||||
Copied from AccountBroker before the container_count column was
|
||||
added (using old stat trigger script)
|
||||
Create container table which is specific to the account DB.
|
||||
|
||||
:param conn: DB connection object
|
||||
"""
|
||||
# revert to old trigger script to support one of the tests
|
||||
OLD_POLICY_STAT_TRIGGER_SCRIPT = """
|
||||
CREATE TRIGGER container_insert_ps AFTER INSERT ON container
|
||||
BEGIN
|
||||
INSERT OR IGNORE INTO policy_stat
|
||||
(storage_policy_index, object_count, bytes_used)
|
||||
VALUES (new.storage_policy_index, 0, 0);
|
||||
UPDATE policy_stat
|
||||
SET object_count = object_count + new.object_count,
|
||||
bytes_used = bytes_used + new.bytes_used
|
||||
WHERE storage_policy_index = new.storage_policy_index;
|
||||
END;
|
||||
CREATE TRIGGER container_delete_ps AFTER DELETE ON container
|
||||
BEGIN
|
||||
UPDATE policy_stat
|
||||
SET object_count = object_count - old.object_count,
|
||||
bytes_used = bytes_used - old.bytes_used
|
||||
WHERE storage_policy_index = old.storage_policy_index;
|
||||
END;
|
||||
|
||||
"""
|
||||
conn.executescript("""
|
||||
CREATE TABLE container (
|
||||
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT,
|
||||
put_timestamp TEXT,
|
||||
delete_timestamp TEXT,
|
||||
object_count INTEGER,
|
||||
bytes_used INTEGER,
|
||||
deleted INTEGER DEFAULT 0,
|
||||
storage_policy_index INTEGER DEFAULT 0
|
||||
);
|
||||
|
||||
CREATE INDEX ix_container_deleted_name ON
|
||||
container (deleted, name);
|
||||
|
||||
CREATE TRIGGER container_insert AFTER INSERT ON container
|
||||
BEGIN
|
||||
UPDATE account_stat
|
||||
SET container_count = container_count + (1 - new.deleted),
|
||||
object_count = object_count + new.object_count,
|
||||
bytes_used = bytes_used + new.bytes_used,
|
||||
hash = chexor(hash, new.name,
|
||||
new.put_timestamp || '-' ||
|
||||
new.delete_timestamp || '-' ||
|
||||
new.object_count || '-' || new.bytes_used);
|
||||
END;
|
||||
|
||||
CREATE TRIGGER container_update BEFORE UPDATE ON container
|
||||
BEGIN
|
||||
SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');
|
||||
END;
|
||||
|
||||
|
||||
CREATE TRIGGER container_delete AFTER DELETE ON container
|
||||
BEGIN
|
||||
UPDATE account_stat
|
||||
SET container_count = container_count - (1 - old.deleted),
|
||||
object_count = object_count - old.object_count,
|
||||
bytes_used = bytes_used - old.bytes_used,
|
||||
hash = chexor(hash, old.name,
|
||||
old.put_timestamp || '-' ||
|
||||
old.delete_timestamp || '-' ||
|
||||
old.object_count || '-' || old.bytes_used);
|
||||
END;
|
||||
""" + OLD_POLICY_STAT_TRIGGER_SCRIPT)
|
||||
|
||||
|
||||
class AccountBrokerPreTrackContainerCountSetup(object):
|
||||
def assertUnmigrated(self, broker):
|
||||
with broker.get() as conn:
|
||||
try:
|
||||
conn.execute('''
|
||||
SELECT container_count FROM policy_stat
|
||||
''').fetchone()[0]
|
||||
except sqlite3.OperationalError as err:
|
||||
# confirm that the column really isn't there
|
||||
self.assert_('no such column: container_count' in str(err))
|
||||
else:
|
||||
self.fail('broker did not raise sqlite3.OperationalError '
|
||||
'trying to select container_count from policy_stat!')
|
||||
|
||||
def setUp(self):
|
||||
# use old version of policy_stat
|
||||
self._imported_create_policy_stat_table = \
|
||||
AccountBroker.create_policy_stat_table
|
||||
AccountBroker.create_policy_stat_table = \
|
||||
pre_track_containers_create_policy_stat
|
||||
# use old container table so we use old trigger for
|
||||
# updating policy_stat
|
||||
self._imported_create_container_table = \
|
||||
AccountBroker.create_container_table
|
||||
AccountBroker.create_container_table = \
|
||||
pre_track_containers_create_container_table
|
||||
|
||||
broker = AccountBroker(':memory:', account='a')
|
||||
broker.initialize(Timestamp('1').internal)
|
||||
self.assertUnmigrated(broker)
|
||||
|
||||
self.tempdir = mkdtemp()
|
||||
self.ts = (Timestamp(t).internal for t in itertools.count(int(time())))
|
||||
|
||||
self.db_path = os.path.join(self.tempdir, 'sda', 'accounts',
|
||||
'0', '0', '0', 'test.db')
|
||||
self.broker = AccountBroker(self.db_path, account='a')
|
||||
self.broker.initialize(next(self.ts))
|
||||
|
||||
# Common sanity-check that our starting, pre-migration state correctly
|
||||
# does not have the container_count column.
|
||||
self.assertUnmigrated(self.broker)
|
||||
|
||||
def tearDown(self):
|
||||
rmtree(self.tempdir, ignore_errors=True)
|
||||
|
||||
self.restore_account_broker()
|
||||
|
||||
broker = AccountBroker(':memory:', account='a')
|
||||
broker.initialize(Timestamp('1').internal)
|
||||
with broker.get() as conn:
|
||||
conn.execute('SELECT container_count FROM policy_stat')
|
||||
|
||||
def restore_account_broker(self):
|
||||
AccountBroker.create_policy_stat_table = \
|
||||
self._imported_create_policy_stat_table
|
||||
AccountBroker.create_container_table = \
|
||||
self._imported_create_container_table
|
||||
|
||||
|
||||
@patch_policies([
|
||||
StoragePolicy.from_conf(
|
||||
REPL_POLICY, {'idx': 0, 'name': 'zero', 'is_default': True}),
|
||||
StoragePolicy.from_conf(
|
||||
REPL_POLICY, {'idx': 1, 'name': 'one'}),
|
||||
StoragePolicy.from_conf(
|
||||
REPL_POLICY, {'idx': 2, 'name': 'two'}),
|
||||
StoragePolicy.from_conf(
|
||||
REPL_POLICY, {'idx': 37, 'name': 'three'})
|
||||
])
|
||||
class TestAccountBrokerBeforePerPolicyContainerTrack(
|
||||
AccountBrokerPreTrackContainerCountSetup, TestAccountBroker):
|
||||
"""
|
||||
Tests for AccountBroker against databases created before
|
||||
the container_count column was added to the policy_stat table.
|
||||
"""
|
||||
|
||||
def test_policy_table_cont_count_do_migrations(self):
|
||||
# add a few containers
|
||||
num_containers = 8
|
||||
policies = itertools.cycle(POLICIES)
|
||||
per_policy_container_counts = defaultdict(int)
|
||||
|
||||
# add a few container entries
|
||||
for i in range(num_containers):
|
||||
name = 'test-container-%02d' % i
|
||||
policy = next(policies)
|
||||
self.broker.put_container(name, next(self.ts),
|
||||
0, 0, 0, int(policy))
|
||||
per_policy_container_counts[int(policy)] += 1
|
||||
|
||||
total_container_count = self.broker.get_info()['container_count']
|
||||
self.assertEqual(total_container_count, num_containers)
|
||||
|
||||
# still un-migrated
|
||||
self.assertUnmigrated(self.broker)
|
||||
|
||||
policy_stats = self.broker.get_policy_stats()
|
||||
self.assertEqual(len(policy_stats), len(per_policy_container_counts))
|
||||
for stats in policy_stats.values():
|
||||
self.assertEqual(stats['object_count'], 0)
|
||||
self.assertEqual(stats['bytes_used'], 0)
|
||||
# un-migrated dbs should not return container_count
|
||||
self.assertFalse('container_count' in stats)
|
||||
|
||||
# now force the migration
|
||||
policy_stats = self.broker.get_policy_stats(do_migrations=True)
|
||||
self.assertEqual(len(policy_stats), len(per_policy_container_counts))
|
||||
for policy_index, stats in policy_stats.items():
|
||||
self.assertEqual(stats['object_count'], 0)
|
||||
self.assertEqual(stats['bytes_used'], 0)
|
||||
self.assertEqual(stats['container_count'],
|
||||
per_policy_container_counts[policy_index])
|
||||
|
||||
def test_policy_table_cont_count_update_get_stats(self):
|
||||
# add a few container entries
|
||||
for policy in POLICIES:
|
||||
for i in range(0, policy.idx + 1):
|
||||
container_name = 'c%s_0' % policy.idx
|
||||
self.broker.put_container('c%s_%s' % (policy.idx, i),
|
||||
0, 0, 0, 0, policy.idx)
|
||||
# _commit_puts_stale_ok() called by get_policy_stats()
|
||||
|
||||
# calling get_policy_stats() with do_migrations will alter the table
|
||||
# and populate it based on what's in the container table now
|
||||
stats = self.broker.get_policy_stats(do_migrations=True)
|
||||
|
||||
# now confirm that the column was created
|
||||
with self.broker.get() as conn:
|
||||
conn.execute('SELECT container_count FROM policy_stat')
|
||||
|
||||
# confirm stats reporting back correctly
|
||||
self.assertEqual(len(stats), 4)
|
||||
for policy in POLICIES:
|
||||
self.assertEqual(stats[policy.idx]['container_count'],
|
||||
policy.idx + 1)
|
||||
|
||||
# now delete one from each policy and check the stats
|
||||
with self.broker.get() as conn:
|
||||
for policy in POLICIES:
|
||||
container_name = 'c%s_0' % policy.idx
|
||||
conn.execute('''
|
||||
DELETE FROM container
|
||||
WHERE name = ?
|
||||
''', (container_name,))
|
||||
conn.commit()
|
||||
stats = self.broker.get_policy_stats()
|
||||
self.assertEqual(len(stats), 4)
|
||||
for policy in POLICIES:
|
||||
self.assertEqual(stats[policy.idx]['container_count'],
|
||||
policy.idx)
|
||||
|
||||
# now put them back and make sure things are still cool
|
||||
for policy in POLICIES:
|
||||
container_name = 'c%s_0' % policy.idx
|
||||
self.broker.put_container(container_name, 0, 0, 0, 0, policy.idx)
|
||||
# _commit_puts_stale_ok() called by get_policy_stats()
|
||||
|
||||
# confirm stats reporting back correctly
|
||||
stats = self.broker.get_policy_stats()
|
||||
self.assertEqual(len(stats), 4)
|
||||
for policy in POLICIES:
|
||||
self.assertEqual(stats[policy.idx]['container_count'],
|
||||
policy.idx + 1)
|
||||
|
||||
def test_per_policy_cont_count_migration_with_deleted(self):
|
||||
num_containers = 15
|
||||
policies = itertools.cycle(POLICIES)
|
||||
container_policy_map = {}
|
||||
|
||||
# add a few container entries
|
||||
for i in range(num_containers):
|
||||
name = 'test-container-%02d' % i
|
||||
policy = next(policies)
|
||||
self.broker.put_container(name, next(self.ts),
|
||||
0, 0, 0, int(policy))
|
||||
# keep track of stub container policies
|
||||
container_policy_map[name] = policy
|
||||
|
||||
# delete about half of the containers
|
||||
for i in range(0, num_containers, 2):
|
||||
name = 'test-container-%02d' % i
|
||||
policy = container_policy_map[name]
|
||||
self.broker.put_container(name, 0, next(self.ts),
|
||||
0, 0, int(policy))
|
||||
|
||||
total_container_count = self.broker.get_info()['container_count']
|
||||
self.assertEqual(total_container_count, num_containers / 2)
|
||||
|
||||
# trigger migration
|
||||
policy_info = self.broker.get_policy_stats(do_migrations=True)
|
||||
self.assertEqual(len(policy_info), min(num_containers, len(POLICIES)))
|
||||
policy_container_count = sum(p['container_count'] for p in
|
||||
policy_info.values())
|
||||
self.assertEqual(total_container_count, policy_container_count)
|
||||
|
||||
def test_per_policy_cont_count_migration_with_single_policy(self):
|
||||
num_containers = 100
|
||||
|
||||
with patch_policies(legacy_only=True):
|
||||
policy = POLICIES[0]
|
||||
# add a few container entries
|
||||
for i in range(num_containers):
|
||||
name = 'test-container-%02d' % i
|
||||
self.broker.put_container(name, next(self.ts),
|
||||
0, 0, 0, int(policy))
|
||||
# delete about half of the containers
|
||||
for i in range(0, num_containers, 2):
|
||||
name = 'test-container-%02d' % i
|
||||
self.broker.put_container(name, 0, next(self.ts),
|
||||
0, 0, int(policy))
|
||||
|
||||
total_container_count = self.broker.get_info()['container_count']
|
||||
# trigger migration
|
||||
policy_info = self.broker.get_policy_stats(do_migrations=True)
|
||||
|
||||
self.assertEqual(total_container_count, num_containers / 2)
|
||||
|
||||
self.assertEqual(len(policy_info), 1)
|
||||
policy_container_count = sum(p['container_count'] for p in
|
||||
policy_info.values())
|
||||
self.assertEqual(total_container_count, policy_container_count)
|
||||
|
||||
def test_per_policy_cont_count_migration_impossible(self):
|
||||
with patch_policies(legacy_only=True):
|
||||
# add a container for the legacy policy
|
||||
policy = POLICIES[0]
|
||||
self.broker.put_container('test-legacy-container', next(self.ts),
|
||||
0, 0, 0, int(policy))
|
||||
|
||||
# now create an impossible situation by adding a container for a
|
||||
# policy index that doesn't exist
|
||||
non_existant_policy_index = int(policy) + 1
|
||||
self.broker.put_container('test-non-existant-policy',
|
||||
next(self.ts), 0, 0, 0,
|
||||
non_existant_policy_index)
|
||||
|
||||
total_container_count = self.broker.get_info()['container_count']
|
||||
|
||||
# trigger migration
|
||||
policy_info = self.broker.get_policy_stats(do_migrations=True)
|
||||
|
||||
self.assertEqual(total_container_count, 2)
|
||||
self.assertEqual(len(policy_info), 2)
|
||||
for policy_stat in policy_info.values():
|
||||
self.assertEqual(policy_stat['container_count'], 1)
|
||||
|
@ -1709,6 +1709,9 @@ class TestAccountController(unittest.TestCase):
|
||||
self.assertEquals(
|
||||
resp.headers['X-Account-Storage-Policy-%s-Bytes-Used' %
|
||||
POLICIES[0].name], '4')
|
||||
self.assertEquals(
|
||||
resp.headers['X-Account-Storage-Policy-%s-Container-Count' %
|
||||
POLICIES[0].name], '1')
|
||||
|
||||
def test_policy_stats_non_default(self):
|
||||
ts = itertools.count()
|
||||
@ -1744,6 +1747,9 @@ class TestAccountController(unittest.TestCase):
|
||||
self.assertEquals(
|
||||
resp.headers['X-Account-Storage-Policy-%s-Bytes-Used' %
|
||||
policy.name], '4')
|
||||
self.assertEquals(
|
||||
resp.headers['X-Account-Storage-Policy-%s-Container-Count' %
|
||||
policy.name], '1')
|
||||
|
||||
def test_empty_policy_stats(self):
|
||||
ts = itertools.count()
|
||||
|
@ -117,9 +117,70 @@ class TestAccountUtils(unittest.TestCase):
|
||||
})
|
||||
for policy in POLICIES:
|
||||
prefix = 'X-Account-Storage-Policy-%s-' % policy.name
|
||||
expected[prefix + 'Container-Count'] = 1
|
||||
expected[prefix + 'Object-Count'] = int(policy)
|
||||
expected[prefix + 'Bytes-Used'] = int(policy) * 10
|
||||
resp_headers = utils.get_response_headers(broker)
|
||||
per_policy_container_headers = [
|
||||
h for h in resp_headers if
|
||||
h.lower().startswith('x-account-storage-policy-') and
|
||||
h.lower().endswith('-container-count')]
|
||||
self.assertTrue(per_policy_container_headers)
|
||||
for key, value in resp_headers.items():
|
||||
expected_value = expected.pop(key)
|
||||
self.assertEqual(expected_value, str(value),
|
||||
'value for %r was %r not %r' % (
|
||||
key, value, expected_value))
|
||||
self.assertFalse(expected)
|
||||
|
||||
@patch_policies
|
||||
def test_get_response_headers_with_legacy_data(self):
|
||||
broker = backend.AccountBroker(':memory:', account='a')
|
||||
now = time.time()
|
||||
with mock.patch('time.time', new=lambda: now):
|
||||
broker.initialize(Timestamp(now).internal)
|
||||
# add some container data
|
||||
ts = (Timestamp(t).internal for t in itertools.count(int(now)))
|
||||
total_containers = 0
|
||||
total_objects = 0
|
||||
total_bytes = 0
|
||||
for policy in POLICIES:
|
||||
delete_timestamp = ts.next()
|
||||
put_timestamp = ts.next()
|
||||
object_count = int(policy)
|
||||
bytes_used = int(policy) * 10
|
||||
broker.put_container('c-%s' % policy.name, put_timestamp,
|
||||
delete_timestamp, object_count, bytes_used,
|
||||
int(policy))
|
||||
total_containers += 1
|
||||
total_objects += object_count
|
||||
total_bytes += bytes_used
|
||||
expected = HeaderKeyDict({
|
||||
'X-Account-Container-Count': total_containers,
|
||||
'X-Account-Object-Count': total_objects,
|
||||
'X-Account-Bytes-Used': total_bytes,
|
||||
'X-Timestamp': Timestamp(now).normal,
|
||||
'X-PUT-Timestamp': Timestamp(now).normal,
|
||||
})
|
||||
for policy in POLICIES:
|
||||
prefix = 'X-Account-Storage-Policy-%s-' % policy.name
|
||||
expected[prefix + 'Object-Count'] = int(policy)
|
||||
expected[prefix + 'Bytes-Used'] = int(policy) * 10
|
||||
orig_policy_stats = broker.get_policy_stats
|
||||
|
||||
def stub_policy_stats(*args, **kwargs):
|
||||
policy_stats = orig_policy_stats(*args, **kwargs)
|
||||
for stats in policy_stats.values():
|
||||
# legacy db's won't return container_count
|
||||
del stats['container_count']
|
||||
return policy_stats
|
||||
broker.get_policy_stats = stub_policy_stats
|
||||
resp_headers = utils.get_response_headers(broker)
|
||||
per_policy_container_headers = [
|
||||
h for h in resp_headers if
|
||||
h.lower().startswith('x-account-storage-policy-') and
|
||||
h.lower().endswith('-container-count')]
|
||||
self.assertFalse(per_policy_container_headers)
|
||||
for key, value in resp_headers.items():
|
||||
expected_value = expected.pop(key)
|
||||
self.assertEqual(expected_value, str(value),
|
||||
|
@ -68,167 +68,6 @@ class FakeApp(object):
|
||||
return ['Client Disconnect\n']
|
||||
|
||||
|
||||
class TestParseAttrs(unittest.TestCase):
|
||||
|
||||
def test_basic_content_type(self):
|
||||
name, attrs = formpost._parse_attrs('text/plain')
|
||||
self.assertEquals(name, 'text/plain')
|
||||
self.assertEquals(attrs, {})
|
||||
|
||||
def test_content_type_with_charset(self):
|
||||
name, attrs = formpost._parse_attrs('text/plain; charset=UTF8')
|
||||
self.assertEquals(name, 'text/plain')
|
||||
self.assertEquals(attrs, {'charset': 'UTF8'})
|
||||
|
||||
def test_content_disposition(self):
|
||||
name, attrs = formpost._parse_attrs(
|
||||
'form-data; name="somefile"; filename="test.html"')
|
||||
self.assertEquals(name, 'form-data')
|
||||
self.assertEquals(attrs, {'name': 'somefile', 'filename': 'test.html'})
|
||||
|
||||
|
||||
class TestIterRequests(unittest.TestCase):
|
||||
|
||||
def test_bad_start(self):
|
||||
it = formpost._iter_requests(StringIO('blah'), 'unique')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
except formpost.FormInvalid as err:
|
||||
exc = err
|
||||
self.assertEquals(str(exc), 'invalid starting boundary')
|
||||
|
||||
def test_empty(self):
|
||||
it = formpost._iter_requests(StringIO('--unique'), 'unique')
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.read(), '')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
except StopIteration as err:
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
|
||||
def test_basic(self):
|
||||
it = formpost._iter_requests(
|
||||
StringIO('--unique\r\nabcdefg\r\n--unique--'), 'unique')
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.read(), 'abcdefg')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
except StopIteration as err:
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
|
||||
def test_basic2(self):
|
||||
it = formpost._iter_requests(
|
||||
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
|
||||
'unique')
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.read(), 'abcdefg')
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.read(), 'hijkl')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
except StopIteration as err:
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
|
||||
def test_tiny_reads(self):
|
||||
it = formpost._iter_requests(
|
||||
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
|
||||
'unique')
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.read(2), 'ab')
|
||||
self.assertEquals(fp.read(2), 'cd')
|
||||
self.assertEquals(fp.read(2), 'ef')
|
||||
self.assertEquals(fp.read(2), 'g')
|
||||
self.assertEquals(fp.read(2), '')
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.read(), 'hijkl')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
except StopIteration as err:
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
|
||||
def test_big_reads(self):
|
||||
it = formpost._iter_requests(
|
||||
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
|
||||
'unique')
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.read(65536), 'abcdefg')
|
||||
self.assertEquals(fp.read(), '')
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.read(), 'hijkl')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
except StopIteration as err:
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
|
||||
def test_broken_mid_stream(self):
|
||||
# We go ahead and accept whatever is sent instead of rejecting the
|
||||
# whole request, in case the partial form is still useful.
|
||||
it = formpost._iter_requests(
|
||||
StringIO('--unique\r\nabc'), 'unique')
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.read(), 'abc')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
except StopIteration as err:
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
|
||||
def test_readline(self):
|
||||
it = formpost._iter_requests(
|
||||
StringIO('--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n\r\n'
|
||||
'jkl\r\n\r\n--unique--'), 'unique')
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.readline(), 'ab\r\n')
|
||||
self.assertEquals(fp.readline(), 'cd\ref\ng')
|
||||
self.assertEquals(fp.readline(), '')
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.readline(), 'hi\r\n')
|
||||
self.assertEquals(fp.readline(), '\r\n')
|
||||
self.assertEquals(fp.readline(), 'jkl\r\n')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
except StopIteration as err:
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
|
||||
def test_readline_with_tiny_chunks(self):
|
||||
orig_read_chunk_size = formpost.READ_CHUNK_SIZE
|
||||
try:
|
||||
formpost.READ_CHUNK_SIZE = 2
|
||||
it = formpost._iter_requests(
|
||||
StringIO('--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n'
|
||||
'\r\njkl\r\n\r\n--unique--'), 'unique')
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.readline(), 'ab\r\n')
|
||||
self.assertEquals(fp.readline(), 'cd\ref\ng')
|
||||
self.assertEquals(fp.readline(), '')
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.readline(), 'hi\r\n')
|
||||
self.assertEquals(fp.readline(), '\r\n')
|
||||
self.assertEquals(fp.readline(), 'jkl\r\n')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
except StopIteration as err:
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
finally:
|
||||
formpost.READ_CHUNK_SIZE = orig_read_chunk_size
|
||||
|
||||
|
||||
class TestCappedFileLikeObject(unittest.TestCase):
|
||||
|
||||
def test_whole(self):
|
||||
|
@ -195,7 +195,8 @@ class Test_profile_log(unittest.TestCase):
|
||||
def setUp(self):
|
||||
if xprofile is None:
|
||||
raise SkipTest
|
||||
self.log_filename_prefix1 = tempfile.mkdtemp() + '/unittest.profile'
|
||||
self.tempdirs = [tempfile.mkdtemp(), tempfile.mkdtemp()]
|
||||
self.log_filename_prefix1 = self.tempdirs[0] + '/unittest.profile'
|
||||
self.profile_log1 = ProfileLog(self.log_filename_prefix1, False)
|
||||
self.pids1 = ['123', '456', str(os.getpid())]
|
||||
profiler1 = xprofile.get_profiler('eventlet.green.profile')
|
||||
@ -203,7 +204,7 @@ class Test_profile_log(unittest.TestCase):
|
||||
profiler1.runctx('import os;os.getcwd();', globals(), locals())
|
||||
self.profile_log1.dump_profile(profiler1, pid)
|
||||
|
||||
self.log_filename_prefix2 = tempfile.mkdtemp() + '/unittest.profile'
|
||||
self.log_filename_prefix2 = self.tempdirs[1] + '/unittest.profile'
|
||||
self.profile_log2 = ProfileLog(self.log_filename_prefix2, True)
|
||||
self.pids2 = ['321', '654', str(os.getpid())]
|
||||
profiler2 = xprofile.get_profiler('eventlet.green.profile')
|
||||
@ -214,6 +215,8 @@ class Test_profile_log(unittest.TestCase):
|
||||
def tearDown(self):
|
||||
self.profile_log1.clear('all')
|
||||
self.profile_log2.clear('all')
|
||||
for tempdir in self.tempdirs:
|
||||
shutil.rmtree(tempdir, ignore_errors=True)
|
||||
|
||||
def test_get_all_pids(self):
|
||||
self.assertEquals(self.profile_log1.get_all_pids(),
|
||||
|
@ -703,14 +703,14 @@ class TestRingBuilder(unittest.TestCase):
|
||||
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
|
||||
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 2,
|
||||
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
|
||||
rb.rebalance()
|
||||
rb.rebalance(seed=2)
|
||||
|
||||
rb.add_dev({'id': 2, 'region': 1, 'zone': 0, 'weight': 0.25,
|
||||
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
|
||||
rb.add_dev({'id': 3, 'region': 1, 'zone': 1, 'weight': 0.25,
|
||||
'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'})
|
||||
rb.pretend_min_part_hours_passed()
|
||||
rb.rebalance(seed=2)
|
||||
changed_parts, _balance = rb.rebalance(seed=2)
|
||||
|
||||
# there's not enough room in r1 for every partition to have a replica
|
||||
# in it, so only 86 assignments occur in r1 (that's ~1/5 of the total,
|
||||
@ -718,6 +718,10 @@ class TestRingBuilder(unittest.TestCase):
|
||||
population_by_region = self._get_population_by_region(rb)
|
||||
self.assertEquals(population_by_region, {0: 682, 1: 86})
|
||||
|
||||
# Rebalancing will reassign 143 of the partitions, which is ~1/5
|
||||
# of the total amount of partitions (3*256)
|
||||
self.assertEqual(143, changed_parts)
|
||||
|
||||
# and since there's not enough room, subsequent rebalances will not
|
||||
# cause additional assignments to r1
|
||||
rb.pretend_min_part_hours_passed()
|
||||
|
@ -28,7 +28,7 @@ class TestUtils(unittest.TestCase):
|
||||
logger = FakeLogger()
|
||||
csr = ContainerSyncRealms(unique, logger)
|
||||
self.assertEqual(
|
||||
logger.lines_dict,
|
||||
logger.all_log_lines(),
|
||||
{'debug': [
|
||||
"Could not load '%s': [Errno 2] No such file or directory: "
|
||||
"'%s'" % (unique, unique)]})
|
||||
@ -45,7 +45,7 @@ class TestUtils(unittest.TestCase):
|
||||
csr = ContainerSyncRealms(fpath, logger)
|
||||
try:
|
||||
self.assertEqual(
|
||||
logger.lines_dict,
|
||||
logger.all_log_lines(),
|
||||
{'error': [
|
||||
"Could not load '%s': [Errno 13] Permission denied: "
|
||||
"'%s'" % (fpath, fpath)]})
|
||||
@ -61,7 +61,7 @@ class TestUtils(unittest.TestCase):
|
||||
logger = FakeLogger()
|
||||
fpath = os.path.join(tempdir, fname)
|
||||
csr = ContainerSyncRealms(fpath, logger)
|
||||
self.assertEqual(logger.lines_dict, {})
|
||||
self.assertEqual(logger.all_log_lines(), {})
|
||||
self.assertEqual(csr.mtime_check_interval, 300)
|
||||
self.assertEqual(csr.realms(), [])
|
||||
|
||||
@ -73,7 +73,7 @@ class TestUtils(unittest.TestCase):
|
||||
fpath = os.path.join(tempdir, fname)
|
||||
csr = ContainerSyncRealms(fpath, logger)
|
||||
self.assertEqual(
|
||||
logger.lines_dict,
|
||||
logger.all_log_lines(),
|
||||
{'error': [
|
||||
"Could not load '%s': File contains no section headers.\n"
|
||||
"file: %s, line: 1\n"
|
||||
@ -92,7 +92,7 @@ cluster_dfw1 = http://dfw1.host/v1/
|
||||
logger = FakeLogger()
|
||||
fpath = os.path.join(tempdir, fname)
|
||||
csr = ContainerSyncRealms(fpath, logger)
|
||||
self.assertEqual(logger.lines_dict, {})
|
||||
self.assertEqual(logger.all_log_lines(), {})
|
||||
self.assertEqual(csr.mtime_check_interval, 300)
|
||||
self.assertEqual(csr.realms(), ['US'])
|
||||
self.assertEqual(csr.key('US'), '9ff3b71c849749dbaec4ccdd3cbab62b')
|
||||
@ -120,7 +120,7 @@ cluster_lon3 = http://lon3.host/v1/
|
||||
logger = FakeLogger()
|
||||
fpath = os.path.join(tempdir, fname)
|
||||
csr = ContainerSyncRealms(fpath, logger)
|
||||
self.assertEqual(logger.lines_dict, {})
|
||||
self.assertEqual(logger.all_log_lines(), {})
|
||||
self.assertEqual(csr.mtime_check_interval, 60)
|
||||
self.assertEqual(sorted(csr.realms()), ['UK', 'US'])
|
||||
self.assertEqual(csr.key('US'), '9ff3b71c849749dbaec4ccdd3cbab62b')
|
||||
@ -144,7 +144,7 @@ cluster_lon3 = http://lon3.host/v1/
|
||||
logger = FakeLogger()
|
||||
fpath = os.path.join(tempdir, fname)
|
||||
csr = ContainerSyncRealms(fpath, logger)
|
||||
self.assertEqual(logger.lines_dict, {})
|
||||
self.assertEqual(logger.all_log_lines(), {})
|
||||
self.assertEqual(csr.mtime_check_interval, 300)
|
||||
self.assertEqual(csr.realms(), ['US'])
|
||||
self.assertEqual(csr.key('US'), None)
|
||||
@ -163,7 +163,7 @@ mtime_check_interval = invalid
|
||||
fpath = os.path.join(tempdir, fname)
|
||||
csr = ContainerSyncRealms(fpath, logger)
|
||||
self.assertEqual(
|
||||
logger.lines_dict,
|
||||
logger.all_log_lines(),
|
||||
{'error': [
|
||||
"Error in '%s' with mtime_check_interval: invalid literal "
|
||||
"for int() with base 10: 'invalid'" % fpath]})
|
||||
|
@ -179,17 +179,21 @@ class TestRange(unittest.TestCase):
|
||||
self.assertEquals(range.ranges_for_length(5), [(4, 5), (0, 5)])
|
||||
|
||||
def test_ranges_for_length_multi(self):
|
||||
range = swift.common.swob.Range('bytes=-20,4-,30-150,-10')
|
||||
# the length of the ranges should be 4
|
||||
self.assertEquals(len(range.ranges_for_length(200)), 4)
|
||||
range = swift.common.swob.Range('bytes=-20,4-')
|
||||
self.assertEquals(len(range.ranges_for_length(200)), 2)
|
||||
|
||||
# the actual length less than any of the range
|
||||
self.assertEquals(range.ranges_for_length(90),
|
||||
[(70, 90), (4, 90), (30, 90), (80, 90)])
|
||||
# the actual length greater than each range element
|
||||
self.assertEquals(range.ranges_for_length(200), [(180, 200), (4, 200)])
|
||||
|
||||
range = swift.common.swob.Range('bytes=30-150,-10')
|
||||
self.assertEquals(len(range.ranges_for_length(200)), 2)
|
||||
|
||||
# the actual length lands in the middle of a range
|
||||
self.assertEquals(range.ranges_for_length(90), [(30, 90), (80, 90)])
|
||||
|
||||
# the actual length greater than any of the range
|
||||
self.assertEquals(range.ranges_for_length(200),
|
||||
[(180, 200), (4, 200), (30, 151), (190, 200)])
|
||||
[(30, 151), (190, 200)])
|
||||
|
||||
self.assertEquals(range.ranges_for_length(None), None)
|
||||
|
||||
@ -206,6 +210,56 @@ class TestRange(unittest.TestCase):
|
||||
self.assertEquals(range.ranges_for_length(5),
|
||||
[(0, 5), (0, 2)])
|
||||
|
||||
def test_ranges_for_length_overlapping(self):
|
||||
# Fewer than 3 overlaps is okay
|
||||
range = swift.common.swob.Range('bytes=10-19,15-24')
|
||||
self.assertEquals(range.ranges_for_length(100),
|
||||
[(10, 20), (15, 25)])
|
||||
range = swift.common.swob.Range('bytes=10-19,15-24,20-29')
|
||||
self.assertEquals(range.ranges_for_length(100),
|
||||
[(10, 20), (15, 25), (20, 30)])
|
||||
|
||||
# Adjacent ranges, though suboptimal, don't overlap
|
||||
range = swift.common.swob.Range('bytes=10-19,20-29,30-39')
|
||||
self.assertEquals(range.ranges_for_length(100),
|
||||
[(10, 20), (20, 30), (30, 40)])
|
||||
|
||||
# Ranges that share a byte do overlap
|
||||
range = swift.common.swob.Range('bytes=10-20,20-30,30-40,40-50')
|
||||
self.assertEquals(range.ranges_for_length(100), [])
|
||||
|
||||
# With suffix byte range specs (e.g. bytes=-2), make sure that we
|
||||
# correctly determine overlapping-ness based on the entity length
|
||||
range = swift.common.swob.Range('bytes=10-15,15-20,30-39,-9')
|
||||
self.assertEquals(range.ranges_for_length(100),
|
||||
[(10, 16), (15, 21), (30, 40), (91, 100)])
|
||||
self.assertEquals(range.ranges_for_length(20), [])
|
||||
|
||||
def test_ranges_for_length_nonascending(self):
|
||||
few_ranges = ("bytes=100-109,200-209,300-309,500-509,"
|
||||
"400-409,600-609,700-709")
|
||||
many_ranges = few_ranges + ",800-809"
|
||||
|
||||
range = swift.common.swob.Range(few_ranges)
|
||||
self.assertEquals(range.ranges_for_length(100000),
|
||||
[(100, 110), (200, 210), (300, 310), (500, 510),
|
||||
(400, 410), (600, 610), (700, 710)])
|
||||
|
||||
range = swift.common.swob.Range(many_ranges)
|
||||
self.assertEquals(range.ranges_for_length(100000), [])
|
||||
|
||||
def test_ranges_for_length_too_many(self):
|
||||
at_the_limit_ranges = (
|
||||
"bytes=" + ",".join("%d-%d" % (x * 1000, x * 1000 + 10)
|
||||
for x in range(50)))
|
||||
too_many_ranges = at_the_limit_ranges + ",10000000-10000009"
|
||||
|
||||
rng = swift.common.swob.Range(at_the_limit_ranges)
|
||||
self.assertEquals(len(rng.ranges_for_length(1000000000)), 50)
|
||||
|
||||
rng = swift.common.swob.Range(too_many_ranges)
|
||||
self.assertEquals(rng.ranges_for_length(1000000000), [])
|
||||
|
||||
def test_range_invalid_syntax(self):
|
||||
|
||||
def _check_invalid_range(range_value):
|
||||
|
@ -54,7 +54,8 @@ from mock import MagicMock, patch
|
||||
|
||||
from swift.common.exceptions import (Timeout, MessageTimeout,
|
||||
ConnectionTimeout, LockTimeout,
|
||||
ReplicationLockTimeout)
|
||||
ReplicationLockTimeout,
|
||||
MimeInvalid)
|
||||
from swift.common import utils
|
||||
from swift.common.container_sync_realms import ContainerSyncRealms
|
||||
from swift.common.swob import Request, Response
|
||||
@ -1502,6 +1503,9 @@ class TestUtils(unittest.TestCase):
|
||||
utils.load_libc_function('printf')))
|
||||
self.assert_(callable(
|
||||
utils.load_libc_function('some_not_real_function')))
|
||||
self.assertRaises(AttributeError,
|
||||
utils.load_libc_function, 'some_not_real_function',
|
||||
fail_if_missing=True)
|
||||
|
||||
def test_readconf(self):
|
||||
conf = '''[section1]
|
||||
@ -4169,5 +4173,177 @@ class TestLRUCache(unittest.TestCase):
|
||||
self.assertEqual(f.size(), 4)
|
||||
|
||||
|
||||
class TestParseContentDisposition(unittest.TestCase):
|
||||
|
||||
def test_basic_content_type(self):
|
||||
name, attrs = utils.parse_content_disposition('text/plain')
|
||||
self.assertEquals(name, 'text/plain')
|
||||
self.assertEquals(attrs, {})
|
||||
|
||||
def test_content_type_with_charset(self):
|
||||
name, attrs = utils.parse_content_disposition(
|
||||
'text/plain; charset=UTF8')
|
||||
self.assertEquals(name, 'text/plain')
|
||||
self.assertEquals(attrs, {'charset': 'UTF8'})
|
||||
|
||||
def test_content_disposition(self):
|
||||
name, attrs = utils.parse_content_disposition(
|
||||
'form-data; name="somefile"; filename="test.html"')
|
||||
self.assertEquals(name, 'form-data')
|
||||
self.assertEquals(attrs, {'name': 'somefile', 'filename': 'test.html'})
|
||||
|
||||
|
||||
class TestIterMultipartMimeDocuments(unittest.TestCase):
|
||||
|
||||
def test_bad_start(self):
|
||||
it = utils.iter_multipart_mime_documents(StringIO('blah'), 'unique')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
except MimeInvalid as err:
|
||||
exc = err
|
||||
self.assertEquals(str(exc), 'invalid starting boundary')
|
||||
|
||||
def test_empty(self):
|
||||
it = utils.iter_multipart_mime_documents(StringIO('--unique'),
|
||||
'unique')
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.read(), '')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
except StopIteration as err:
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
|
||||
def test_basic(self):
|
||||
it = utils.iter_multipart_mime_documents(
|
||||
StringIO('--unique\r\nabcdefg\r\n--unique--'), 'unique')
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.read(), 'abcdefg')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
except StopIteration as err:
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
|
||||
def test_basic2(self):
|
||||
it = utils.iter_multipart_mime_documents(
|
||||
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
|
||||
'unique')
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.read(), 'abcdefg')
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.read(), 'hijkl')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
except StopIteration as err:
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
|
||||
def test_tiny_reads(self):
|
||||
it = utils.iter_multipart_mime_documents(
|
||||
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
|
||||
'unique')
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.read(2), 'ab')
|
||||
self.assertEquals(fp.read(2), 'cd')
|
||||
self.assertEquals(fp.read(2), 'ef')
|
||||
self.assertEquals(fp.read(2), 'g')
|
||||
self.assertEquals(fp.read(2), '')
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.read(), 'hijkl')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
except StopIteration as err:
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
|
||||
def test_big_reads(self):
|
||||
it = utils.iter_multipart_mime_documents(
|
||||
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
|
||||
'unique')
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.read(65536), 'abcdefg')
|
||||
self.assertEquals(fp.read(), '')
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.read(), 'hijkl')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
except StopIteration as err:
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
|
||||
def test_broken_mid_stream(self):
|
||||
# We go ahead and accept whatever is sent instead of rejecting the
|
||||
# whole request, in case the partial form is still useful.
|
||||
it = utils.iter_multipart_mime_documents(
|
||||
StringIO('--unique\r\nabc'), 'unique')
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.read(), 'abc')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
except StopIteration as err:
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
|
||||
def test_readline(self):
|
||||
it = utils.iter_multipart_mime_documents(
|
||||
StringIO('--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n\r\n'
|
||||
'jkl\r\n\r\n--unique--'), 'unique')
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.readline(), 'ab\r\n')
|
||||
self.assertEquals(fp.readline(), 'cd\ref\ng')
|
||||
self.assertEquals(fp.readline(), '')
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.readline(), 'hi\r\n')
|
||||
self.assertEquals(fp.readline(), '\r\n')
|
||||
self.assertEquals(fp.readline(), 'jkl\r\n')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
except StopIteration as err:
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
|
||||
def test_readline_with_tiny_chunks(self):
|
||||
it = utils.iter_multipart_mime_documents(
|
||||
StringIO('--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n'
|
||||
'\r\njkl\r\n\r\n--unique--'),
|
||||
'unique',
|
||||
read_chunk_size=2)
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.readline(), 'ab\r\n')
|
||||
self.assertEquals(fp.readline(), 'cd\ref\ng')
|
||||
self.assertEquals(fp.readline(), '')
|
||||
fp = it.next()
|
||||
self.assertEquals(fp.readline(), 'hi\r\n')
|
||||
self.assertEquals(fp.readline(), '\r\n')
|
||||
self.assertEquals(fp.readline(), 'jkl\r\n')
|
||||
exc = None
|
||||
try:
|
||||
it.next()
|
||||
except StopIteration as err:
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
|
||||
|
||||
class TestPairs(unittest.TestCase):
|
||||
def test_pairs(self):
|
||||
items = [10, 20, 30, 40, 50, 60]
|
||||
got_pairs = set(utils.pairs(items))
|
||||
self.assertEqual(got_pairs,
|
||||
set([(10, 20), (10, 30), (10, 40), (10, 50), (10, 60),
|
||||
(20, 30), (20, 40), (20, 50), (20, 60),
|
||||
(30, 40), (30, 50), (30, 60),
|
||||
(40, 50), (40, 60),
|
||||
(50, 60)]))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@ -26,6 +26,7 @@ from collections import defaultdict
|
||||
from contextlib import contextmanager
|
||||
import sqlite3
|
||||
import pickle
|
||||
import json
|
||||
|
||||
from swift.container.backend import ContainerBroker
|
||||
from swift.common.utils import Timestamp
|
||||
@ -1202,6 +1203,33 @@ class TestContainerBroker(unittest.TestCase):
|
||||
self.assertEquals(['a', 'b', 'c'],
|
||||
sorted([rec['name'] for rec in items]))
|
||||
|
||||
def test_merge_items_overwrite_unicode(self):
|
||||
# test DatabaseBroker.merge_items
|
||||
snowman = u'\N{SNOWMAN}'.encode('utf-8')
|
||||
broker1 = ContainerBroker(':memory:', account='a', container='c')
|
||||
broker1.initialize(Timestamp('1').internal, 0)
|
||||
id = broker1.get_info()['id']
|
||||
broker2 = ContainerBroker(':memory:', account='a', container='c')
|
||||
broker2.initialize(Timestamp('1').internal, 0)
|
||||
broker1.put_object(snowman, Timestamp(2).internal, 0,
|
||||
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
|
||||
broker1.put_object('b', Timestamp(3).internal, 0,
|
||||
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
|
||||
broker2.merge_items(json.loads(json.dumps(broker1.get_items_since(
|
||||
broker2.get_sync(id), 1000))), id)
|
||||
broker1.put_object(snowman, Timestamp(4).internal, 0, 'text/plain',
|
||||
'd41d8cd98f00b204e9800998ecf8427e')
|
||||
broker2.merge_items(json.loads(json.dumps(broker1.get_items_since(
|
||||
broker2.get_sync(id), 1000))), id)
|
||||
items = broker2.get_items_since(-1, 1000)
|
||||
self.assertEquals(['b', snowman],
|
||||
sorted([rec['name'] for rec in items]))
|
||||
for rec in items:
|
||||
if rec['name'] == snowman:
|
||||
self.assertEquals(rec['created_at'], Timestamp(4).internal)
|
||||
if rec['name'] == 'b':
|
||||
self.assertEquals(rec['created_at'], Timestamp(3).internal)
|
||||
|
||||
def test_merge_items_overwrite(self):
|
||||
# test DatabaseBroker.merge_items
|
||||
broker1 = ContainerBroker(':memory:', account='a', container='c')
|
||||
|
@ -36,6 +36,7 @@ from eventlet import tpool
|
||||
from test.unit import (FakeLogger, mock as unit_mock, temptree,
|
||||
patch_policies, debug_logger)
|
||||
|
||||
from nose import SkipTest
|
||||
from swift.obj import diskfile
|
||||
from swift.common import utils
|
||||
from swift.common.utils import hash_path, mkdirs, Timestamp
|
||||
@ -951,6 +952,18 @@ class TestDiskFileManager(unittest.TestCase):
|
||||
lock_exc = err
|
||||
self.assertTrue(lock_exc is None)
|
||||
|
||||
def test_missing_splice_warning(self):
|
||||
logger = FakeLogger()
|
||||
with mock.patch('swift.obj.diskfile.system_has_splice',
|
||||
lambda: False):
|
||||
self.conf['splice'] = 'yes'
|
||||
mgr = diskfile.DiskFileManager(self.conf, logger)
|
||||
|
||||
warnings = logger.get_lines_for_level('warning')
|
||||
self.assertTrue(len(warnings) > 0)
|
||||
self.assertTrue('splice()' in warnings[-1])
|
||||
self.assertFalse(mgr.use_splice)
|
||||
|
||||
|
||||
@patch_policies
|
||||
class TestDiskFile(unittest.TestCase):
|
||||
@ -2183,6 +2196,50 @@ class TestDiskFile(unittest.TestCase):
|
||||
self.assertEquals(len(dl), 2)
|
||||
self.assertTrue(exp_name in set(dl))
|
||||
|
||||
def _system_can_zero_copy(self):
|
||||
if not utils.system_has_splice():
|
||||
return False
|
||||
|
||||
try:
|
||||
utils.get_md5_socket()
|
||||
except IOError:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def test_zero_copy_cache_dropping(self):
|
||||
if not self._system_can_zero_copy():
|
||||
raise SkipTest("zero-copy support is missing")
|
||||
|
||||
self.conf['splice'] = 'on'
|
||||
self.conf['keep_cache_size'] = 16384
|
||||
self.conf['disk_chunk_size'] = 4096
|
||||
self.df_mgr = diskfile.DiskFileManager(self.conf, FakeLogger())
|
||||
|
||||
df = self._get_open_disk_file(fsize=16385)
|
||||
reader = df.reader()
|
||||
self.assertTrue(reader.can_zero_copy_send())
|
||||
with mock.patch("swift.obj.diskfile.drop_buffer_cache") as dbc:
|
||||
with mock.patch("swift.obj.diskfile.DROP_CACHE_WINDOW", 4095):
|
||||
with open('/dev/null', 'w') as devnull:
|
||||
reader.zero_copy_send(devnull.fileno())
|
||||
self.assertEqual(len(dbc.mock_calls), 5)
|
||||
|
||||
def test_zero_copy_turns_off_when_md5_sockets_not_supported(self):
|
||||
if not self._system_can_zero_copy():
|
||||
raise SkipTest("zero-copy support is missing")
|
||||
|
||||
self.conf['splice'] = 'on'
|
||||
with mock.patch('swift.obj.diskfile.get_md5_socket') as mock_md5sock:
|
||||
mock_md5sock.side_effect = IOError(
|
||||
errno.EAFNOSUPPORT, "MD5 socket busted")
|
||||
df = self._get_open_disk_file(fsize=128)
|
||||
reader = df.reader()
|
||||
self.assertFalse(reader.can_zero_copy_send())
|
||||
|
||||
log_lines = self.df_mgr.logger.get_lines_for_level('warning')
|
||||
self.assert_('MD5 sockets' in log_lines[-1])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@ -566,7 +566,7 @@ class TestObjectReplicator(unittest.TestCase):
|
||||
try:
|
||||
object_replicator.http_connect = mock_http_connect(200)
|
||||
# Write some files into '1' and run replicate- they should be moved
|
||||
# to the other partitoins and then node should get deleted.
|
||||
# to the other partitions and then node should get deleted.
|
||||
cur_part = '1'
|
||||
df = self.df_mgr.get_diskfile('sda', cur_part, 'a', 'c', 'o')
|
||||
mkdirs(df._datadir)
|
||||
@ -756,7 +756,7 @@ class TestObjectReplicator(unittest.TestCase):
|
||||
resp.read.return_value = pickle.dumps({})
|
||||
for job in jobs:
|
||||
set_default(self)
|
||||
# limit local job to policy 0 for simplicty
|
||||
# limit local job to policy 0 for simplicity
|
||||
if job['partition'] == '0' and job['policy_idx'] == 0:
|
||||
local_job = job.copy()
|
||||
continue
|
||||
@ -795,7 +795,7 @@ class TestObjectReplicator(unittest.TestCase):
|
||||
node['replication_ip'] = '127.0.0.11'
|
||||
node['replication_port'] = '6011'
|
||||
set_default(self)
|
||||
# with only one set of headers make sure we speicy index 0 here
|
||||
# with only one set of headers make sure we specify index 0 here
|
||||
# as otherwise it may be different from earlier tests
|
||||
self.headers['X-Backend-Storage-Policy-Index'] = 0
|
||||
self.replicator.update(repl_job)
|
||||
|
@ -33,6 +33,7 @@ import itertools
|
||||
import tempfile
|
||||
|
||||
from eventlet import sleep, spawn, wsgi, listen, Timeout, tpool
|
||||
from eventlet.green import httplib
|
||||
|
||||
from nose import SkipTest
|
||||
|
||||
@ -521,7 +522,7 @@ class TestObjectController(unittest.TestCase):
|
||||
self.assertEquals(resp.status_int, 412)
|
||||
|
||||
def test_PUT_if_none_match(self):
|
||||
# PUT with if-none-match set and nothing there should succede
|
||||
# PUT with if-none-match set and nothing there should succeed
|
||||
timestamp = normalize_timestamp(time())
|
||||
req = Request.blank(
|
||||
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
|
||||
@ -4388,5 +4389,136 @@ class TestObjectServer(unittest.TestCase):
|
||||
resp.close()
|
||||
|
||||
|
||||
class TestZeroCopy(unittest.TestCase):
|
||||
"""Test the object server's zero-copy functionality"""
|
||||
|
||||
def _system_can_zero_copy(self):
|
||||
if not utils.system_has_splice():
|
||||
return False
|
||||
|
||||
try:
|
||||
utils.get_md5_socket()
|
||||
except IOError:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def setUp(self):
|
||||
if not self._system_can_zero_copy():
|
||||
raise SkipTest("zero-copy support is missing")
|
||||
|
||||
self.testdir = mkdtemp(suffix="obj_server_zero_copy")
|
||||
mkdirs(os.path.join(self.testdir, 'sda1', 'tmp'))
|
||||
|
||||
conf = {'devices': self.testdir,
|
||||
'mount_check': 'false',
|
||||
'splice': 'yes',
|
||||
'disk_chunk_size': '4096'}
|
||||
self.object_controller = object_server.ObjectController(
|
||||
conf, logger=debug_logger())
|
||||
self.df_mgr = diskfile.DiskFileManager(
|
||||
conf, self.object_controller.logger)
|
||||
|
||||
listener = listen(('localhost', 0))
|
||||
port = listener.getsockname()[1]
|
||||
self.wsgi_greenlet = spawn(
|
||||
wsgi.server, listener, self.object_controller, NullLogger())
|
||||
|
||||
self.http_conn = httplib.HTTPConnection('localhost', port)
|
||||
self.http_conn.connect()
|
||||
|
||||
def tearDown(self):
|
||||
"""Tear down for testing swift.object.server.ObjectController"""
|
||||
self.wsgi_greenlet.kill()
|
||||
rmtree(self.testdir)
|
||||
|
||||
def test_GET(self):
|
||||
url_path = '/sda1/2100/a/c/o'
|
||||
|
||||
self.http_conn.request('PUT', url_path, 'obj contents',
|
||||
{'X-Timestamp': '127082564.24709'})
|
||||
response = self.http_conn.getresponse()
|
||||
self.assertEqual(response.status, 201)
|
||||
response.read()
|
||||
|
||||
self.http_conn.request('GET', url_path)
|
||||
response = self.http_conn.getresponse()
|
||||
self.assertEqual(response.status, 200)
|
||||
contents = response.read()
|
||||
self.assertEqual(contents, 'obj contents')
|
||||
|
||||
def test_GET_big(self):
|
||||
# Test with a large-ish object to make sure we handle full socket
|
||||
# buffers correctly.
|
||||
obj_contents = 'A' * 4 * 1024 * 1024 # 4 MiB
|
||||
url_path = '/sda1/2100/a/c/o'
|
||||
|
||||
self.http_conn.request('PUT', url_path, obj_contents,
|
||||
{'X-Timestamp': '1402600322.52126'})
|
||||
response = self.http_conn.getresponse()
|
||||
self.assertEqual(response.status, 201)
|
||||
response.read()
|
||||
|
||||
self.http_conn.request('GET', url_path)
|
||||
response = self.http_conn.getresponse()
|
||||
self.assertEqual(response.status, 200)
|
||||
contents = response.read()
|
||||
self.assertEqual(contents, obj_contents)
|
||||
|
||||
def test_quarantine(self):
|
||||
obj_hash = hash_path('a', 'c', 'o')
|
||||
url_path = '/sda1/2100/a/c/o'
|
||||
ts = '1402601849.47475'
|
||||
|
||||
self.http_conn.request('PUT', url_path, 'obj contents',
|
||||
{'X-Timestamp': ts})
|
||||
response = self.http_conn.getresponse()
|
||||
self.assertEqual(response.status, 201)
|
||||
response.read()
|
||||
|
||||
# go goof up the file on disk
|
||||
fname = os.path.join(self.testdir, 'sda1', 'objects', '2100',
|
||||
obj_hash[-3:], obj_hash, ts + '.data')
|
||||
|
||||
with open(fname, 'rb+') as fh:
|
||||
fh.write('XYZ')
|
||||
|
||||
self.http_conn.request('GET', url_path)
|
||||
response = self.http_conn.getresponse()
|
||||
self.assertEqual(response.status, 200)
|
||||
contents = response.read()
|
||||
self.assertEqual(contents, 'XYZ contents')
|
||||
|
||||
self.http_conn.request('GET', url_path)
|
||||
response = self.http_conn.getresponse()
|
||||
# it was quarantined by the previous request
|
||||
self.assertEqual(response.status, 404)
|
||||
response.read()
|
||||
|
||||
def test_quarantine_on_well_formed_zero_byte_file(self):
|
||||
# Make sure we work around an oddity in Linux's hash sockets
|
||||
url_path = '/sda1/2100/a/c/o'
|
||||
ts = '1402700497.71333'
|
||||
|
||||
self.http_conn.request(
|
||||
'PUT', url_path, '',
|
||||
{'X-Timestamp': ts, 'Content-Length': '0'})
|
||||
response = self.http_conn.getresponse()
|
||||
self.assertEqual(response.status, 201)
|
||||
response.read()
|
||||
|
||||
self.http_conn.request('GET', url_path)
|
||||
response = self.http_conn.getresponse()
|
||||
self.assertEqual(response.status, 200)
|
||||
contents = response.read()
|
||||
self.assertEqual(contents, '')
|
||||
|
||||
self.http_conn.request('GET', url_path)
|
||||
response = self.http_conn.getresponse()
|
||||
self.assertEqual(response.status, 200) # still there
|
||||
contents = response.read()
|
||||
self.assertEqual(contents, '')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
Loading…
Reference in New Issue
Block a user