Merge from trunk

This commit is contained in:
gholt
2011-01-25 15:24:09 -08:00
13 changed files with 77 additions and 59 deletions

View File

@@ -24,9 +24,13 @@ Paul Jimenez
Brian K. Jones Brian K. Jones
Ed Leafe Ed Leafe
Stephen Milton Stephen Milton
Russ Nelson
Colin Nicholson
Andrew Clay Shafer Andrew Clay Shafer
Monty Taylor Monty Taylor
Caleb Tennis Caleb Tennis
FUJITA Tomonori
Kapil Thangavelu Kapil Thangavelu
Conrad Weidenkeller Conrad Weidenkeller
Chris Wedgwood
Cory Wright Cory Wright

View File

@@ -1,5 +1,5 @@
import gettext import gettext
__version__ = '1.1.0' __version__ = '1.2.0'
gettext.install('swift') gettext.install('swift')

View File

@@ -149,31 +149,32 @@ class AuthController(object):
previous_prefix = '' previous_prefix = ''
if '_' in row[0]: if '_' in row[0]:
previous_prefix = row[0].split('_', 1)[0] previous_prefix = row[0].split('_', 1)[0]
msg = _((''' msg = (_('''
THERE ARE ACCOUNTS IN YOUR auth.db THAT DO NOT BEGIN WITH YOUR NEW RESELLER THERE ARE ACCOUNTS IN YOUR auth.db THAT DO NOT BEGIN WITH YOUR NEW RESELLER
PREFIX OF "%s". PREFIX OF "%(reseller)s".
YOU HAVE A FEW OPTIONS: YOU HAVE A FEW OPTIONS:
1) RUN "swift-auth-update-reseller-prefixes %s %s", 1. RUN "swift-auth-update-reseller-prefixes %(db_file)s %(reseller)s",
"swift-init auth-server restart", AND "swift-init auth-server restart", AND
"swift-auth-recreate-accounts -K ..." TO CREATE FRESH ACCOUNTS. "swift-auth-recreate-accounts -K ..." TO CREATE FRESH ACCOUNTS.
OR OR
2) REMOVE %s, RUN "swift-init auth-server restart", AND RUN 2. REMOVE %(db_file)s, RUN "swift-init auth-server restart", AND RUN
"swift-auth-add-user ..." TO CREATE BRAND NEW ACCOUNTS THAT WAY. "swift-auth-add-user ..." TO CREATE BRAND NEW ACCOUNTS THAT WAY.
OR OR
3) ADD "reseller_prefix = %s" (WITHOUT THE QUOTES) TO YOUR 3. ADD "reseller_prefix = %(previous)s" (WITHOUT THE QUOTES) TO YOUR
proxy-server.conf IN THE [filter:auth] SECTION AND TO YOUR proxy-server.conf IN THE [filter:auth] SECTION AND TO YOUR
auth-server.conf IN THE [app:auth-server] SECTION AND RUN auth-server.conf IN THE [app:auth-server] SECTION AND RUN
"swift-init proxy-server restart" AND "swift-init auth-server restart" "swift-init proxy-server restart" AND "swift-init auth-server restart"
TO REVERT BACK TO YOUR PREVIOUS RESELLER PREFIX. TO REVERT BACK TO YOUR PREVIOUS RESELLER PREFIX.
%s %(note)s
''') % (self.reseller_prefix.rstrip('_'), self.db_file, ''') % {'reseller': self.reseller_prefix.rstrip('_'),
self.reseller_prefix.rstrip('_'), self.db_file, 'db_file': self.db_file,
previous_prefix, previous_prefix and ' ' or _(''' 'previous': previous_prefix,
'note': previous_prefix and ' ' or _('''
SINCE YOUR PREVIOUS RESELLER PREFIX WAS AN EMPTY STRING, IT IS NOT SINCE YOUR PREVIOUS RESELLER PREFIX WAS AN EMPTY STRING, IT IS NOT
RECOMMENDED TO PERFORM OPTION 3 AS THAT WOULD MAKE SUPPORTING MULTIPLE RECOMMENDED TO PERFORM OPTION 3 AS THAT WOULD MAKE SUPPORTING MULTIPLE
RESELLERS MORE DIFFICULT. RESELLERS MORE DIFFICULT.
''').strip())).strip() ''').strip()}).strip()
self.logger.critical(_('CRITICAL: ') + ' '.join(msg.split())) self.logger.critical(_('CRITICAL: ') + ' '.join(msg.split()))
raise Exception('\n' + msg) raise Exception('\n' + msg)
@@ -243,7 +244,8 @@ YOU HAVE A FEW OPTIONS:
raise err raise err
def validate_s3_sign(self, request, token): def validate_s3_sign(self, request, token):
account, user, sign = request.headers['Authorization'].split(' ')[-1].split(':') account, user, sign = \
request.headers['Authorization'].split(' ')[-1].split(':')
msg = base64.urlsafe_b64decode(unquote(token)) msg = base64.urlsafe_b64decode(unquote(token))
rv = False rv = False
with self.get_conn() as conn: with self.get_conn() as conn:
@@ -253,7 +255,8 @@ YOU HAVE A FEW OPTIONS:
(account, user)).fetchone() (account, user)).fetchone()
rv = (84000, account, user, row[1]) rv = (84000, account, user, row[1])
if rv: if rv:
s = base64.encodestring(hmac.new(row[0], msg, sha1).digest()).strip() s = base64.encodestring(hmac.new(row[0], msg,
sha1).digest()).strip()
self.logger.info("orig %s, calc %s" % (sign, s)) self.logger.info("orig %s, calc %s" % (sign, s))
if sign != s: if sign != s:
rv = False rv = False
@@ -340,10 +343,14 @@ YOU HAVE A FEW OPTIONS:
'SELECT url FROM account WHERE account = ? AND user = ?', 'SELECT url FROM account WHERE account = ? AND user = ?',
(account, user)).fetchone() (account, user)).fetchone()
if row: if row:
self.logger.info( self.logger.info(_('ALREADY EXISTS create_user(%(account)s, '
_('ALREADY EXISTS create_user(%s, %s, _, %s, %s) [%.02f]') % '%(user)s, _, %(admin)s, %(reseller_admin)s) '
(repr(account), repr(user), repr(admin), '[%(elapsed).02f]') %
repr(reseller_admin), time() - begin)) {'account': repr(account),
'user': repr(user),
'admin': repr(admin),
'reseller_admin': repr(reseller_admin),
'elapsed': time() - begin})
return 'already exists' return 'already exists'
row = conn.execute( row = conn.execute(
'SELECT url, cfaccount FROM account WHERE account = ?', 'SELECT url, cfaccount FROM account WHERE account = ?',
@@ -354,10 +361,14 @@ YOU HAVE A FEW OPTIONS:
else: else:
account_hash = self.add_storage_account() account_hash = self.add_storage_account()
if not account_hash: if not account_hash:
self.logger.info( self.logger.info(_('FAILED create_user(%(account)s, '
_('FAILED create_user(%s, %s, _, %s, %s) [%.02f]') % '%(user)s, _, %(admin)s, %(reseller_admin)s) '
(repr(account), repr(user), repr(admin), '[%(elapsed).02f]') %
repr(reseller_admin), time() - begin)) {'account': repr(account),
'user': repr(user),
'admin': repr(admin),
'reseller_admin': repr(reseller_admin),
'elapsed': time() - begin})
return False return False
url = self.default_cluster_url.rstrip('/') + '/' + account_hash url = self.default_cluster_url.rstrip('/') + '/' + account_hash
conn.execute('''INSERT INTO account conn.execute('''INSERT INTO account
@@ -367,10 +378,11 @@ YOU HAVE A FEW OPTIONS:
(account, url, account_hash, user, password, (account, url, account_hash, user, password,
admin and 't' or '', reseller_admin and 't' or '')) admin and 't' or '', reseller_admin and 't' or ''))
conn.commit() conn.commit()
self.logger.info( self.logger.info(_('SUCCESS create_user(%(account)s, %(user)s, _, '
_('SUCCESS create_user(%s, %s, _, %s, %s) = %s [%.02f]') % '%(admin)s, %(reseller_admin)s) = %(url)s [%(elapsed).02f]') %
(repr(account), repr(user), repr(admin), repr(reseller_admin), {'account': repr(account), 'user': repr(user),
repr(url), time() - begin)) 'admin': repr(admin), 'reseller_admin': repr(reseller_admin),
'url': repr(url), 'elapsed': time() - begin})
return url return url
def recreate_accounts(self): def recreate_accounts(self):

View File

@@ -59,8 +59,8 @@ class DevAuth(object):
if s3 or (token and token.startswith(self.reseller_prefix)): if s3 or (token and token.startswith(self.reseller_prefix)):
# Note: Empty reseller_prefix will match all tokens. # Note: Empty reseller_prefix will match all tokens.
# Attempt to auth my token with my auth server # Attempt to auth my token with my auth server
groups = \ groups = self.get_groups(env, token,
self.get_groups(env, token, memcache_client=cache_from_env(env)) memcache_client=cache_from_env(env))
if groups: if groups:
env['REMOTE_USER'] = groups env['REMOTE_USER'] = groups
user = groups and groups.split(',', 1)[0] or '' user = groups and groups.split(',', 1)[0] or ''
@@ -154,10 +154,12 @@ class DevAuth(object):
timeout=expiration) timeout=expiration)
if env.get('HTTP_AUTHORIZATION'): if env.get('HTTP_AUTHORIZATION'):
account, user, sign = env['HTTP_AUTHORIZATION'].split(' ')[-1].split(':') account, user, sign = \
env['HTTP_AUTHORIZATION'].split(' ')[-1].split(':')
cfaccount = resp.getheader('x-auth-account-suffix') cfaccount = resp.getheader('x-auth-account-suffix')
path = env['PATH_INFO'] path = env['PATH_INFO']
env['PATH_INFO'] = path.replace("%s:%s" % (account, user), cfaccount, 1) env['PATH_INFO'] = \
path.replace("%s:%s" % (account, user), cfaccount, 1)
return groups return groups

View File

@@ -111,4 +111,3 @@ def filter_factory(global_conf, **local_conf):
def domain_filter(app): def domain_filter(app):
return DomainRemapMiddleware(app, conf) return DomainRemapMiddleware(app, conf)
return domain_filter return domain_filter

View File

@@ -407,7 +407,8 @@ class ObjectReplicator(Daemon):
conn.getresponse().read() conn.getresponse().read()
self.suffix_sync += len(suffixes) self.suffix_sync += len(suffixes)
except (Exception, Timeout): except (Exception, Timeout):
self.logger.exception(_("Error syncing with node: %s") % node) self.logger.exception(_("Error syncing with node: %s") %
node)
self.suffix_count += len(local_hash) self.suffix_count += len(local_hash)
except (Exception, Timeout): except (Exception, Timeout):
self.logger.exception(_("Error syncing partition")) self.logger.exception(_("Error syncing partition"))

View File

@@ -55,7 +55,8 @@ class AccountStat(Daemon):
self.logger.info(_("Gathering account stats")) self.logger.info(_("Gathering account stats"))
start = time.time() start = time.time()
self.find_and_process() self.find_and_process()
self.logger.info(_("Gathering account stats complete (%0.2f minutes)") % self.logger.info(
_("Gathering account stats complete (%0.2f minutes)") %
((time.time() - start) / 60)) ((time.time() - start) / 60))
def find_and_process(self): def find_and_process(self):
@@ -70,8 +71,8 @@ class AccountStat(Daemon):
# Account Name, Container Count, Object Count, Bytes Used # Account Name, Container Count, Object Count, Bytes Used
for device in os.listdir(self.devices): for device in os.listdir(self.devices):
if self.mount_check and not check_mount(self.devices, device): if self.mount_check and not check_mount(self.devices, device):
self.logger.error(_("Device %s is not mounted, skipping.") % self.logger.error(
device) _("Device %s is not mounted, skipping.") % device)
continue continue
accounts = os.path.join(self.devices, accounts = os.path.join(self.devices,
device, device,

View File

@@ -280,7 +280,8 @@ class LogProcessorDaemon(Daemon):
logs_to_process = self.log_processor.get_data_list(lookback_start, logs_to_process = self.log_processor.get_data_list(lookback_start,
lookback_end, lookback_end,
already_processed_files) already_processed_files)
self.logger.info(_('loaded %d files to process') % len(logs_to_process)) self.logger.info(_('loaded %d files to process') %
len(logs_to_process))
if not logs_to_process: if not logs_to_process:
self.logger.info(_("Log processing done (%0.2f minutes)") % self.logger.info(_("Log processing done (%0.2f minutes)") %
((time.time() - start) / 60)) ((time.time() - start) / 60))

View File

@@ -45,7 +45,7 @@ class TestContainerController(unittest.TestCase):
def tearDown(self): def tearDown(self):
""" Tear down for testing swift.object_server.ObjectController """ """ Tear down for testing swift.object_server.ObjectController """
rmtree(self.testdir, ignore_errors=1) rmtree(os.path.dirname(self.testdir), ignore_errors=1)
def test_acl_container(self): def test_acl_container(self):
# Ensure no acl by default # Ensure no acl by default

View File

@@ -51,7 +51,7 @@ class TestContainerUpdater(unittest.TestCase):
os.mkdir(self.sda1) os.mkdir(self.sda1)
def tearDown(self): def tearDown(self):
rmtree(self.testdir, ignore_errors=1) rmtree(os.path.dirname(self.testdir), ignore_errors=1)
def test_creation(self): def test_creation(self):
cu = container_updater.ContainerUpdater({ cu = container_updater.ContainerUpdater({

View File

@@ -56,7 +56,7 @@ class TestAuditor(unittest.TestCase):
mount_check='false') mount_check='false')
def tearDown(self): def tearDown(self):
rmtree(self.testdir, ignore_errors=1) rmtree(os.path.dirname(self.testdir), ignore_errors=1)
def test_object_audit_extra_data(self): def test_object_audit_extra_data(self):
self.auditor = auditor.ObjectAuditor(self.conf) self.auditor = auditor.ObjectAuditor(self.conf)
@@ -123,21 +123,17 @@ class TestAuditor(unittest.TestCase):
self.assertEquals(self.auditor.quarantines, pre_quarantines + 1) self.assertEquals(self.auditor.quarantines, pre_quarantines + 1)
def test_object_audit_no_meta(self): def test_object_audit_no_meta(self):
self.auditor = auditor.ObjectAuditor(self.conf)
cur_part = '0' cur_part = '0'
disk_file = DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o') disk_file = DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o')
data = '0' * 1024
etag = md5()
pre_quarantines = self.auditor.quarantines
with disk_file.mkstemp() as (fd, tmppath):
os.write(fd, data)
etag.update(data)
etag = etag.hexdigest()
timestamp = str(normalize_timestamp(time.time())) timestamp = str(normalize_timestamp(time.time()))
os.fsync(fd) path = os.path.join(disk_file.datadir, timestamp + '.data')
mkdirs(disk_file.datadir)
fp = open(path, 'w')
fp.write('0' * 1024)
fp.close()
invalidate_hash(os.path.dirname(disk_file.datadir)) invalidate_hash(os.path.dirname(disk_file.datadir))
renamer(tmppath, os.path.join(disk_file.datadir, self.auditor = auditor.ObjectAuditor(self.conf)
timestamp + '.data')) pre_quarantines = self.auditor.quarantines
self.auditor.object_audit( self.auditor.object_audit(
os.path.join(disk_file.datadir, timestamp + '.data'), os.path.join(disk_file.datadir, timestamp + '.data'),
'sda', cur_part) 'sda', cur_part)

View File

@@ -53,7 +53,7 @@ class TestObjectController(unittest.TestCase):
def tearDown(self): def tearDown(self):
""" Tear down for testing swift.object_server.ObjectController """ """ Tear down for testing swift.object_server.ObjectController """
rmtree(self.testdir) rmtree(os.path.dirname(self.testdir))
def test_POST_update_meta(self): def test_POST_update_meta(self):
""" Test swift.object_server.ObjectController.POST """ """ Test swift.object_server.ObjectController.POST """

View File

@@ -142,7 +142,7 @@ def teardown():
for server in _test_coros: for server in _test_coros:
server.kill() server.kill()
proxy_server.CONTAINER_LISTING_LIMIT = _orig_container_listing_limit proxy_server.CONTAINER_LISTING_LIMIT = _orig_container_listing_limit
rmtree(_testdir) rmtree(os.path.dirname(_testdir))
def fake_http_connect(*code_iter, **kwargs): def fake_http_connect(*code_iter, **kwargs):
@@ -3425,5 +3425,7 @@ class TestSegmentedIterable(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
setup() setup()
try:
unittest.main() unittest.main()
finally:
teardown() teardown()