 3d3ed34f44
			
		
	
	3d3ed34f44
	
	
	
		
			
			Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware.  It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client.  Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
    self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method).  So I wrote my
swift.common.utils.StatsdClient.  The interface is the same as
pystatsd.Client, but the code was written from scratch.  It's pretty
simple, and the tests I added cover it.  This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
		
	
		
			
				
	
	
		
			286 lines
		
	
	
		
			12 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			286 lines
		
	
	
		
			12 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| # Copyright (c) 2010-2012 OpenStack, LLC.
 | |
| #
 | |
| # Licensed under the Apache License, Version 2.0 (the "License");
 | |
| # you may not use this file except in compliance with the License.
 | |
| # You may obtain a copy of the License at
 | |
| #
 | |
| #    http://www.apache.org/licenses/LICENSE-2.0
 | |
| #
 | |
| # Unless required by applicable law or agreed to in writing, software
 | |
| # distributed under the License is distributed on an "AS IS" BASIS,
 | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
 | |
| # implied.
 | |
| # See the License for the specific language governing permissions and
 | |
| # limitations under the License.
 | |
| 
 | |
| import logging
 | |
| import os
 | |
| import signal
 | |
| import sys
 | |
| import time
 | |
| from random import random, shuffle
 | |
| from tempfile import mkstemp
 | |
| 
 | |
| from eventlet import spawn, patcher, Timeout
 | |
| 
 | |
| import swift.common.db
 | |
| from swift.container.server import DATADIR
 | |
| from swift.common.bufferedhttp import http_connect
 | |
| from swift.common.db import ContainerBroker
 | |
| from swift.common.exceptions import ConnectionTimeout
 | |
| from swift.common.ring import Ring
 | |
| from swift.common.utils import get_logger, TRUE_VALUES
 | |
| from swift.common.daemon import Daemon
 | |
| from swift.common.http import is_success, HTTP_INTERNAL_SERVER_ERROR
 | |
| 
 | |
| 
 | |
| class ContainerUpdater(Daemon):
 | |
|     """Update container information in account listings."""
 | |
| 
 | |
|     def __init__(self, conf):
 | |
|         self.conf = conf
 | |
|         self.logger = get_logger(conf, log_route='container-updater')
 | |
|         self.devices = conf.get('devices', '/srv/node')
 | |
|         self.mount_check = conf.get('mount_check', 'true').lower() in \
 | |
|                               ('true', 't', '1', 'on', 'yes', 'y')
 | |
|         self.swift_dir = conf.get('swift_dir', '/etc/swift')
 | |
|         self.interval = int(conf.get('interval', 300))
 | |
|         self.account_ring = None
 | |
|         self.concurrency = int(conf.get('concurrency', 4))
 | |
|         self.slowdown = float(conf.get('slowdown', 0.01))
 | |
|         self.node_timeout = int(conf.get('node_timeout', 3))
 | |
|         self.conn_timeout = float(conf.get('conn_timeout', 0.5))
 | |
|         self.no_changes = 0
 | |
|         self.successes = 0
 | |
|         self.failures = 0
 | |
|         self.account_suppressions = {}
 | |
|         self.account_suppression_time = \
 | |
|             float(conf.get('account_suppression_time', 60))
 | |
|         self.new_account_suppressions = None
 | |
|         swift.common.db.DB_PREALLOCATION = \
 | |
|             conf.get('db_preallocation', 't').lower() in TRUE_VALUES
 | |
| 
 | |
|     def get_account_ring(self):
 | |
|         """Get the account ring.  Load it if it hasn't been yet."""
 | |
|         if not self.account_ring:
 | |
|             self.account_ring = Ring(self.swift_dir, ring_name='account')
 | |
|         return self.account_ring
 | |
| 
 | |
|     def get_paths(self):
 | |
|         """
 | |
|         Get paths to all of the partitions on each drive to be processed.
 | |
| 
 | |
|         :returns: a list of paths
 | |
|         """
 | |
|         paths = []
 | |
|         for device in os.listdir(self.devices):
 | |
|             dev_path = os.path.join(self.devices, device)
 | |
|             if self.mount_check and not os.path.ismount(dev_path):
 | |
|                 self.logger.warn(_('%s is not mounted'), device)
 | |
|                 continue
 | |
|             con_path = os.path.join(dev_path, DATADIR)
 | |
|             if not os.path.exists(con_path):
 | |
|                 continue
 | |
|             for partition in os.listdir(con_path):
 | |
|                 paths.append(os.path.join(con_path, partition))
 | |
|         shuffle(paths)
 | |
|         return paths
 | |
| 
 | |
|     def _load_suppressions(self, filename):
 | |
|         try:
 | |
|             with open(filename, 'r') as tmpfile:
 | |
|                 for line in tmpfile:
 | |
|                     account, until = line.split()
 | |
|                     until = float(until)
 | |
|                     self.account_suppressions[account] = until
 | |
|         except Exception:
 | |
|             self.logger.exception(
 | |
|                 _('ERROR with loading suppressions from %s: ') % filename)
 | |
|         finally:
 | |
|             os.unlink(filename)
 | |
| 
 | |
|     def run_forever(self, *args, **kwargs):
 | |
|         """
 | |
|         Run the updator continuously.
 | |
|         """
 | |
|         time.sleep(random() * self.interval)
 | |
|         while True:
 | |
|             self.logger.info(_('Begin container update sweep'))
 | |
|             begin = time.time()
 | |
|             now = time.time()
 | |
|             expired_suppressions = \
 | |
|                [a for a, u in self.account_suppressions.iteritems() if u < now]
 | |
|             for account in expired_suppressions:
 | |
|                 del self.account_suppressions[account]
 | |
|             pid2filename = {}
 | |
|             # read from account ring to ensure it's fresh
 | |
|             self.get_account_ring().get_nodes('')
 | |
|             for path in self.get_paths():
 | |
|                 while len(pid2filename) >= self.concurrency:
 | |
|                     pid = os.wait()[0]
 | |
|                     try:
 | |
|                         self._load_suppressions(pid2filename[pid])
 | |
|                     finally:
 | |
|                         del pid2filename[pid]
 | |
|                 fd, tmpfilename = mkstemp()
 | |
|                 os.close(fd)
 | |
|                 pid = os.fork()
 | |
|                 if pid:
 | |
|                     pid2filename[pid] = tmpfilename
 | |
|                 else:
 | |
|                     signal.signal(signal.SIGTERM, signal.SIG_DFL)
 | |
|                     patcher.monkey_patch(all=False, socket=True)
 | |
|                     self.no_changes = 0
 | |
|                     self.successes = 0
 | |
|                     self.failures = 0
 | |
|                     self.new_account_suppressions = open(tmpfilename, 'w')
 | |
|                     forkbegin = time.time()
 | |
|                     self.container_sweep(path)
 | |
|                     elapsed = time.time() - forkbegin
 | |
|                     self.logger.debug(
 | |
|                         _('Container update sweep of %(path)s completed: '
 | |
|                           '%(elapsed).02fs, %(success)s successes, %(fail)s '
 | |
|                           'failures, %(no_change)s with no changes'),
 | |
|                         {'path': path, 'elapsed': elapsed,
 | |
|                          'success': self.successes, 'fail': self.failures,
 | |
|                          'no_change': self.no_changes})
 | |
|                     sys.exit()
 | |
|             while pid2filename:
 | |
|                 pid = os.wait()[0]
 | |
|                 try:
 | |
|                     self._load_suppressions(pid2filename[pid])
 | |
|                 finally:
 | |
|                     del pid2filename[pid]
 | |
|             elapsed = time.time() - begin
 | |
|             self.logger.info(_('Container update sweep completed: %.02fs'),
 | |
|                              elapsed)
 | |
|             if elapsed < self.interval:
 | |
|                 time.sleep(self.interval - elapsed)
 | |
| 
 | |
|     def run_once(self, *args, **kwargs):
 | |
|         """
 | |
|         Run the updater once.
 | |
|         """
 | |
|         patcher.monkey_patch(all=False, socket=True)
 | |
|         self.logger.info(_('Begin container update single threaded sweep'))
 | |
|         begin = time.time()
 | |
|         self.no_changes = 0
 | |
|         self.successes = 0
 | |
|         self.failures = 0
 | |
|         for path in self.get_paths():
 | |
|             self.container_sweep(path)
 | |
|         elapsed = time.time() - begin
 | |
|         self.logger.info(_('Container update single threaded sweep completed: '
 | |
|             '%(elapsed).02fs, %(success)s successes, %(fail)s failures, '
 | |
|             '%(no_change)s with no changes'),
 | |
|             {'elapsed': elapsed, 'success': self.successes,
 | |
|              'fail': self.failures, 'no_change': self.no_changes})
 | |
| 
 | |
|     def container_sweep(self, path):
 | |
|         """
 | |
|         Walk the path looking for container DBs and process them.
 | |
| 
 | |
|         :param path: path to walk
 | |
|         """
 | |
|         for root, dirs, files in os.walk(path):
 | |
|             for file in files:
 | |
|                 if file.endswith('.db'):
 | |
|                     self.process_container(os.path.join(root, file))
 | |
|                     time.sleep(self.slowdown)
 | |
| 
 | |
|     def process_container(self, dbfile):
 | |
|         """
 | |
|         Process a container, and update the information in the account.
 | |
| 
 | |
|         :param dbfile: container DB to process
 | |
|         """
 | |
|         start_time = time.time()
 | |
|         broker = ContainerBroker(dbfile, logger=self.logger)
 | |
|         info = broker.get_info()
 | |
|         # Don't send updates if the container was auto-created since it
 | |
|         # definitely doesn't have up to date statistics.
 | |
|         if float(info['put_timestamp']) <= 0:
 | |
|             return
 | |
|         if self.account_suppressions.get(info['account'], 0) > time.time():
 | |
|             return
 | |
|         if info['put_timestamp'] > info['reported_put_timestamp'] or \
 | |
|                 info['delete_timestamp'] > info['reported_delete_timestamp'] \
 | |
|                 or info['object_count'] != info['reported_object_count'] or \
 | |
|                 info['bytes_used'] != info['reported_bytes_used']:
 | |
|             container = '/%s/%s' % (info['account'], info['container'])
 | |
|             part, nodes = self.get_account_ring().get_nodes(info['account'])
 | |
|             events = [spawn(self.container_report, node, part, container,
 | |
|                             info['put_timestamp'], info['delete_timestamp'],
 | |
|                             info['object_count'], info['bytes_used'])
 | |
|                       for node in nodes]
 | |
|             successes = 0
 | |
|             failures = 0
 | |
|             for event in events:
 | |
|                 if is_success(event.wait()):
 | |
|                     successes += 1
 | |
|                 else:
 | |
|                     failures += 1
 | |
|             if successes > failures:
 | |
|                 self.logger.increment('successes')
 | |
|                 self.successes += 1
 | |
|                 self.logger.debug(
 | |
|                     _('Update report sent for %(container)s %(dbfile)s'),
 | |
|                     {'container': container, 'dbfile': dbfile})
 | |
|                 broker.reported(info['put_timestamp'],
 | |
|                                 info['delete_timestamp'], info['object_count'],
 | |
|                                 info['bytes_used'])
 | |
|             else:
 | |
|                 self.logger.increment('failures')
 | |
|                 self.failures += 1
 | |
|                 self.logger.debug(
 | |
|                     _('Update report failed for %(container)s %(dbfile)s'),
 | |
|                     {'container': container, 'dbfile': dbfile})
 | |
|                 self.account_suppressions[info['account']] = until = \
 | |
|                     time.time() + self.account_suppression_time
 | |
|                 if self.new_account_suppressions:
 | |
|                     print >>self.new_account_suppressions, \
 | |
|                         info['account'], until
 | |
|             # Only track timing data for attempted updates:
 | |
|             self.logger.timing_since('timing', start_time)
 | |
|         else:
 | |
|             self.logger.increment('no_changes')
 | |
|             self.no_changes += 1
 | |
| 
 | |
|     def container_report(self, node, part, container, put_timestamp,
 | |
|                          delete_timestamp, count, bytes):
 | |
|         """
 | |
|         Report container info to an account server.
 | |
| 
 | |
|         :param node: node dictionary from the account ring
 | |
|         :param part: partition the account is on
 | |
|         :param container: container name
 | |
|         :param put_timestamp: put timestamp
 | |
|         :param delete_timestamp: delete timestamp
 | |
|         :param count: object count in the container
 | |
|         :param bytes: bytes used in the container
 | |
|         """
 | |
|         with ConnectionTimeout(self.conn_timeout):
 | |
|             try:
 | |
|                 conn = http_connect(
 | |
|                     node['ip'], node['port'], node['device'], part,
 | |
|                     'PUT', container,
 | |
|                     headers={'X-Put-Timestamp': put_timestamp,
 | |
|                              'X-Delete-Timestamp': delete_timestamp,
 | |
|                              'X-Object-Count': count,
 | |
|                              'X-Bytes-Used': bytes,
 | |
|                              'X-Account-Override-Deleted': 'yes'})
 | |
|             except (Exception, Timeout):
 | |
|                 self.logger.exception(_('ERROR account update failed with '
 | |
|                     '%(ip)s:%(port)s/%(device)s (will retry later): '), node)
 | |
|                 return HTTP_INTERNAL_SERVER_ERROR
 | |
|         with Timeout(self.node_timeout):
 | |
|             try:
 | |
|                 resp = conn.getresponse()
 | |
|                 resp.read()
 | |
|                 return resp.status
 | |
|             except (Exception, Timeout):
 | |
|                 if self.logger.getEffectiveLevel() <= logging.DEBUG:
 | |
|                     self.logger.exception(
 | |
|                         _('Exception with %(ip)s:%(port)s/%(device)s'), node)
 | |
|                 return HTTP_INTERNAL_SERVER_ERROR
 |