From e28e1a2f41f38bb300884b25c8ea6b333da9488b Mon Sep 17 00:00:00 2001 From: Eric Harney Date: Tue, 26 Jan 2021 10:54:45 -0500 Subject: [PATCH] Move trace methods from utils to volume_utils This continues moving code only used by volume drivers or the volume manager to volume_utils. The result of this is that fewer drivers import cinder.utils (and everything it imports), and less code is loaded by non-volume cinder code (api service, cinder-manage, etc.) Closes-Bug: #1912278 Change-Id: I5aa82cf0de4c70f53ddc998bf25e64f4ad8f5774 --- cinder/tests/fake_driver.py | 43 ++- cinder/tests/unit/test_utils.py | 325 +---------------- cinder/tests/unit/test_volume_utils.py | 326 ++++++++++++++++++ cinder/utils.py | 163 --------- cinder/volume/driver.py | 3 +- cinder/volume/drivers/ceph/rbd_iscsi.py | 15 +- cinder/volume/drivers/datera/datera_iscsi.py | 6 +- .../volume/drivers/dell_emc/unity/adapter.py | 12 +- cinder/volume/drivers/hitachi/hbsd_fc.py | 42 +-- cinder/volume/drivers/hitachi/hbsd_iscsi.py | 42 +-- cinder/volume/drivers/hpe/hpe_3par_base.py | 56 +-- cinder/volume/drivers/hpe/hpe_3par_fc.py | 6 +- cinder/volume/drivers/hpe/hpe_3par_iscsi.py | 9 +- .../drivers/ibm/storwize_svc/replication.py | 3 +- .../ibm/storwize_svc/storwize_svc_common.py | 20 +- .../drivers/inspur/as13000/as13000_driver.py | 62 ++-- .../inspur/instorage/instorage_common.py | 24 +- .../drivers/inspur/instorage/instorage_fc.py | 6 +- .../inspur/instorage/instorage_iscsi.py | 10 +- .../drivers/inspur/instorage/replication.py | 5 +- .../drivers/kaminario/kaminario_common.py | 50 +-- .../volume/drivers/kaminario/kaminario_fc.py | 10 +- .../drivers/kaminario/kaminario_iscsi.py | 11 +- cinder/volume/drivers/macrosan/driver.py | 32 +- cinder/volume/drivers/nec/volume_helper.py | 3 +- .../drivers/netapp/dataontap/block_base.py | 3 +- .../drivers/netapp/dataontap/block_cmode.py | 3 +- .../drivers/netapp/dataontap/client/api.py | 4 +- .../netapp/dataontap/client/client_base.py | 3 +- .../netapp/dataontap/client/client_cmode.py | 5 +- .../drivers/netapp/dataontap/nfs_base.py | 2 +- .../drivers/netapp/dataontap/nfs_cmode.py | 3 +- .../drivers/netapp/dataontap/utils/utils.py | 4 +- cinder/volume/drivers/remotefs.py | 6 +- cinder/volume/drivers/rsd.py | 23 +- cinder/volume/drivers/stx/client.py | 4 +- cinder/volume/drivers/stx/common.py | 4 +- .../drivers/toyou/acs5000/acs5000_common.py | 4 +- cinder/volume/volume_utils.py | 167 +++++++++ 39 files changed, 761 insertions(+), 758 deletions(-) diff --git a/cinder/tests/fake_driver.py b/cinder/tests/fake_driver.py index 03bdc3f0362..547c405ae5f 100644 --- a/cinder/tests/fake_driver.py +++ b/cinder/tests/fake_driver.py @@ -18,7 +18,6 @@ from cinder import exception from cinder import objects from cinder.objects import fields from cinder.tests.unit.brick import fake_lvm -from cinder import utils from cinder.volume import driver from cinder.volume.drivers import lvm from cinder.volume import volume_utils @@ -38,12 +37,12 @@ class FakeLoggingVolumeDriver(lvm.LVMVolumeDriver): None, 'default', self.fake_execute) - @utils.trace_method + @volume_utils.trace_method def check_for_setup_error(self): """No setup necessary in fake mode.""" pass - @utils.trace_method + @volume_utils.trace_method def create_volume(self, volume): """Creates a volume.""" super(FakeLoggingVolumeDriver, self).create_volume(volume) @@ -60,51 +59,51 @@ class FakeLoggingVolumeDriver(lvm.LVMVolumeDriver): if model_update: return model_update - @utils.trace_method + @volume_utils.trace_method def delete_volume(self, volume): pass - @utils.trace_method + @volume_utils.trace_method def create_snapshot(self, snapshot): pass - @utils.trace_method + @volume_utils.trace_method def delete_snapshot(self, snapshot): pass - @utils.trace_method + @volume_utils.trace_method def ensure_export(self, context, volume): pass - @utils.trace_method + @volume_utils.trace_method def create_export(self, context, volume, connector): pass - @utils.trace_method + @volume_utils.trace_method def remove_export(self, context, volume): pass - @utils.trace_method + @volume_utils.trace_method def create_export_snapshot(self, context, snapshot): pass - @utils.trace_method + @volume_utils.trace_method def remove_export_snapshot(self, context, snapshot): pass - @utils.trace_method + @volume_utils.trace_method def terminate_connection_snapshot(self, snapshot, connector): pass - @utils.trace_method + @volume_utils.trace_method def create_cloned_volume(self, volume, src_vol): pass - @utils.trace_method + @volume_utils.trace_method def create_volume_from_snapshot(self, volume, snapshot): pass - @utils.trace_method + @volume_utils.trace_method def initialize_connection(self, volume, connector): # NOTE(thangp): There are several places in the core cinder code where # the volume passed through is a dict and not an oslo_versionedobject. @@ -126,18 +125,18 @@ class FakeLoggingVolumeDriver(lvm.LVMVolumeDriver): return {'driver_volume_type': 'iscsi', 'data': {'access_mode': access_mode}} - @utils.trace_method + @volume_utils.trace_method def initialize_connection_snapshot(self, snapshot, connector): return { 'driver_volume_type': 'iscsi', } - @utils.trace_method + @volume_utils.trace_method def terminate_connection(self, volume, connector, **kwargs): pass # Replication Group (Tiramisu) - @utils.trace_method + @volume_utils.trace_method def enable_replication(self, context, group, volumes): """Enables replication for a group and volumes in the group.""" model_update = { @@ -152,7 +151,7 @@ class FakeLoggingVolumeDriver(lvm.LVMVolumeDriver): return model_update, volume_model_updates # Replication Group (Tiramisu) - @utils.trace_method + @volume_utils.trace_method def disable_replication(self, context, group, volumes): """Disables replication for a group and volumes in the group.""" model_update = { @@ -167,7 +166,7 @@ class FakeLoggingVolumeDriver(lvm.LVMVolumeDriver): return model_update, volume_model_updates # Replication Group (Tiramisu) - @utils.trace_method + @volume_utils.trace_method def failover_replication(self, context, group, volumes, secondary_backend_id=None): """Fails over replication for a group and volumes in the group.""" @@ -183,7 +182,7 @@ class FakeLoggingVolumeDriver(lvm.LVMVolumeDriver): return model_update, volume_model_updates # Replication Group (Tiramisu) - @utils.trace_method + @volume_utils.trace_method def create_group(self, context, group): """Creates a group.""" model_update = super(FakeLoggingVolumeDriver, self).create_group( @@ -424,6 +423,6 @@ class FakeGateDriver(lvm.LVMVolumeDriver): class FakeHAReplicatedLoggingVolumeDriver(FakeLoggingVolumeDriver): SUPPORTS_ACTIVE_ACTIVE = True - @utils.trace_method + @volume_utils.trace_method def failover_completed(self, context, active_backend_id=None): pass diff --git a/cinder/tests/unit/test_utils.py b/cinder/tests/unit/test_utils.py index fb2417f9f09..ec1b63da993 100644 --- a/cinder/tests/unit/test_utils.py +++ b/cinder/tests/unit/test_utils.py @@ -13,11 +13,9 @@ # under the License. import datetime -import functools import json import os import sys -import time from unittest import mock import ddt @@ -1060,328 +1058,7 @@ class TestRetryDecorator(test.TestCase): @ddt.ddt -class LogTracingTestCase(test.TestCase): - - def test_utils_setup_tracing(self): - self.mock_object(utils, 'LOG') - - utils.setup_tracing(None) - self.assertFalse(utils.TRACE_API) - self.assertFalse(utils.TRACE_METHOD) - self.assertEqual(0, utils.LOG.warning.call_count) - - utils.setup_tracing(['method']) - self.assertFalse(utils.TRACE_API) - self.assertTrue(utils.TRACE_METHOD) - self.assertEqual(0, utils.LOG.warning.call_count) - - utils.setup_tracing(['method', 'api']) - self.assertTrue(utils.TRACE_API) - self.assertTrue(utils.TRACE_METHOD) - self.assertEqual(0, utils.LOG.warning.call_count) - - def test_utils_setup_tracing_invalid_key(self): - self.mock_object(utils, 'LOG') - - utils.setup_tracing(['fake']) - - self.assertFalse(utils.TRACE_API) - self.assertFalse(utils.TRACE_METHOD) - self.assertEqual(1, utils.LOG.warning.call_count) - - def test_utils_setup_tracing_valid_and_invalid_key(self): - self.mock_object(utils, 'LOG') - - utils.setup_tracing(['method', 'fake']) - - self.assertFalse(utils.TRACE_API) - self.assertTrue(utils.TRACE_METHOD) - self.assertEqual(1, utils.LOG.warning.call_count) - - def test_trace_no_tracing(self): - self.mock_object(utils, 'LOG') - - @utils.trace_method - def _trace_test_method(*args, **kwargs): - return 'OK' - - utils.setup_tracing(None) - - result = _trace_test_method() - - self.assertEqual('OK', result) - self.assertEqual(0, utils.LOG.debug.call_count) - - def test_utils_trace_method(self): - self.mock_object(utils, 'LOG') - - @utils.trace_method - def _trace_test_method(*args, **kwargs): - return 'OK' - - utils.setup_tracing(['method']) - - result = _trace_test_method() - self.assertEqual('OK', result) - self.assertEqual(2, utils.LOG.debug.call_count) - - def test_utils_trace_api(self): - self.mock_object(utils, 'LOG') - - @utils.trace_api - def _trace_test_api(*args, **kwargs): - return 'OK' - - utils.setup_tracing(['api']) - - result = _trace_test_api() - self.assertEqual('OK', result) - self.assertEqual(2, utils.LOG.debug.call_count) - - def test_utils_trace_api_filtered(self): - self.mock_object(utils, 'LOG') - - def filter_func(all_args): - return False - - @utils.trace_api(filter_function=filter_func) - def _trace_test_api(*args, **kwargs): - return 'OK' - - utils.setup_tracing(['api']) - - result = _trace_test_api() - self.assertEqual('OK', result) - self.assertEqual(0, utils.LOG.debug.call_count) - - def test_utils_trace_filtered(self): - self.mock_object(utils, 'LOG') - - def filter_func(all_args): - return False - - @utils.trace(filter_function=filter_func) - def _trace_test(*args, **kwargs): - return 'OK' - - utils.setup_tracing(['api']) - - result = _trace_test() - self.assertEqual('OK', result) - self.assertEqual(0, utils.LOG.debug.call_count) - - def test_utils_trace_method_default_logger(self): - mock_log = self.mock_object(utils, 'LOG') - - @utils.trace_method - def _trace_test_method_custom_logger(*args, **kwargs): - return 'OK' - utils.setup_tracing(['method']) - - result = _trace_test_method_custom_logger() - - self.assertEqual('OK', result) - self.assertEqual(2, mock_log.debug.call_count) - - def test_utils_trace_method_inner_decorator(self): - mock_logging = self.mock_object(utils, 'logging') - mock_log = mock.Mock() - mock_log.isEnabledFor = lambda x: True - mock_logging.getLogger = mock.Mock(return_value=mock_log) - - def _test_decorator(f): - def blah(*args, **kwargs): - return f(*args, **kwargs) - return blah - - @_test_decorator - @utils.trace_method - def _trace_test_method(*args, **kwargs): - return 'OK' - - utils.setup_tracing(['method']) - - result = _trace_test_method(self) - - self.assertEqual('OK', result) - self.assertEqual(2, mock_log.debug.call_count) - # Ensure the correct function name was logged - for call in mock_log.debug.call_args_list: - self.assertIn('_trace_test_method', str(call)) - self.assertNotIn('blah', str(call)) - - def test_utils_trace_method_outer_decorator(self): - mock_logging = self.mock_object(utils, 'logging') - mock_log = mock.Mock() - mock_log.isEnabledFor = lambda x: True - mock_logging.getLogger = mock.Mock(return_value=mock_log) - - def _test_decorator(f): - def blah(*args, **kwargs): - return f(*args, **kwargs) - return blah - - @utils.trace_method - @_test_decorator - def _trace_test_method(*args, **kwargs): - return 'OK' - - utils.setup_tracing(['method']) - - result = _trace_test_method(self) - - self.assertEqual('OK', result) - self.assertEqual(2, mock_log.debug.call_count) - # Ensure the incorrect function name was logged - for call in mock_log.debug.call_args_list: - self.assertNotIn('_trace_test_method', str(call)) - self.assertIn('blah', str(call)) - - def test_utils_trace_method_outer_decorator_with_functools(self): - mock_log = mock.Mock() - mock_log.isEnabledFor = lambda x: True - self.mock_object(utils.logging, 'getLogger', mock_log) - mock_log = self.mock_object(utils, 'LOG') - - def _test_decorator(f): - @functools.wraps(f) - def wraps(*args, **kwargs): - return f(*args, **kwargs) - return wraps - - @utils.trace_method - @_test_decorator - def _trace_test_method(*args, **kwargs): - return 'OK' - - utils.setup_tracing(['method']) - - result = _trace_test_method() - - self.assertEqual('OK', result) - self.assertEqual(2, mock_log.debug.call_count) - # Ensure the incorrect function name was logged - for call in mock_log.debug.call_args_list: - self.assertIn('_trace_test_method', str(call)) - self.assertNotIn('wraps', str(call)) - - def test_utils_trace_method_with_exception(self): - self.LOG = self.mock_object(utils, 'LOG') - - @utils.trace_method - def _trace_test_method(*args, **kwargs): - raise exception.APITimeout('test message') - - utils.setup_tracing(['method']) - - self.assertRaises(exception.APITimeout, _trace_test_method) - - exception_log = self.LOG.debug.call_args_list[1] - self.assertIn('exception', str(exception_log)) - self.assertIn('test message', str(exception_log)) - - def test_utils_trace_method_with_time(self): - mock_logging = self.mock_object(utils, 'logging') - mock_log = mock.Mock() - mock_log.isEnabledFor = lambda x: True - mock_logging.getLogger = mock.Mock(return_value=mock_log) - - mock_time = mock.Mock(side_effect=[3.1, 6]) - self.mock_object(time, 'time', mock_time) - - @utils.trace_method - def _trace_test_method(*args, **kwargs): - return 'OK' - - utils.setup_tracing(['method']) - - result = _trace_test_method(self) - - self.assertEqual('OK', result) - return_log = mock_log.debug.call_args_list[1] - self.assertIn('2900', str(return_log)) - - def test_utils_trace_wrapper_class(self): - mock_logging = self.mock_object(utils, 'logging') - mock_log = mock.Mock() - mock_log.isEnabledFor = lambda x: True - mock_logging.getLogger = mock.Mock(return_value=mock_log) - - utils.setup_tracing(['method']) - - class MyClass(object, metaclass=utils.TraceWrapperMetaclass): - def trace_test_method(self): - return 'OK' - - test_class = MyClass() - result = test_class.trace_test_method() - - self.assertEqual('OK', result) - self.assertEqual(2, mock_log.debug.call_count) - - def test_utils_trace_method_with_password_dict(self): - mock_logging = self.mock_object(utils, 'logging') - mock_log = mock.Mock() - mock_log.isEnabledFor = lambda x: True - mock_logging.getLogger = mock.Mock(return_value=mock_log) - - @utils.trace_method - def _trace_test_method(*args, **kwargs): - return {'something': 'test', - 'password': 'Now you see me'} - - utils.setup_tracing(['method']) - result = _trace_test_method(self) - expected_unmasked_dict = {'something': 'test', - 'password': 'Now you see me'} - - self.assertEqual(expected_unmasked_dict, result) - self.assertEqual(2, mock_log.debug.call_count) - self.assertIn("'password': '***'", - str(mock_log.debug.call_args_list[1])) - - def test_utils_trace_method_with_password_str(self): - mock_logging = self.mock_object(utils, 'logging') - mock_log = mock.Mock() - mock_log.isEnabledFor = lambda x: True - mock_logging.getLogger = mock.Mock(return_value=mock_log) - - @utils.trace_method - def _trace_test_method(*args, **kwargs): - return "'adminPass': 'Now you see me'" - - utils.setup_tracing(['method']) - result = _trace_test_method(self) - expected_unmasked_str = "'adminPass': 'Now you see me'" - - self.assertEqual(expected_unmasked_str, result) - self.assertEqual(2, mock_log.debug.call_count) - self.assertIn("'adminPass': '***'", - str(mock_log.debug.call_args_list[1])) - - def test_utils_trace_method_with_password_in_formal_params(self): - mock_logging = self.mock_object(utils, 'logging') - mock_log = mock.Mock() - mock_log.isEnabledFor = lambda x: True - mock_logging.getLogger = mock.Mock(return_value=mock_log) - - @utils.trace - def _trace_test_method(*args, **kwargs): - self.assertEqual('verybadpass', - kwargs['test_args']['data']['password']) - pass - - test_args = { - 'data': { - 'password': 'verybadpass' - } - } - _trace_test_method(self, test_args=test_args) - - self.assertEqual(2, mock_log.debug.call_count) - self.assertIn("'password': '***'", - str(mock_log.debug.call_args_list[0])) - +class TestCalculateVirtualFree(test.TestCase): @ddt.data( {'total': 30.01, 'free': 28.01, 'provisioned': 2.0, 'max_ratio': 1.0, 'thin_support': False, 'thick_support': True, diff --git a/cinder/tests/unit/test_volume_utils.py b/cinder/tests/unit/test_volume_utils.py index 504171da8f8..d859e4a1437 100644 --- a/cinder/tests/unit/test_volume_utils.py +++ b/cinder/tests/unit/test_volume_utils.py @@ -17,7 +17,9 @@ import datetime +import functools import io +import time from unittest import mock from castellan import key_manager @@ -1229,3 +1231,327 @@ class VolumeUtilsTestCase(test.TestCase): volume_utils.get_backend_configuration('backendA') mock_configuration.assert_called_with(driver.volume_opts, config_group='backendA') + + +@ddt.ddt +class LogTracingTestCase(test.TestCase): + + def test_utils_setup_tracing(self): + self.mock_object(volume_utils, 'LOG') + + volume_utils.setup_tracing(None) + self.assertFalse(volume_utils.TRACE_API) + self.assertFalse(volume_utils.TRACE_METHOD) + self.assertEqual(0, volume_utils.LOG.warning.call_count) + + volume_utils.setup_tracing(['method']) + self.assertFalse(volume_utils.TRACE_API) + self.assertTrue(volume_utils.TRACE_METHOD) + self.assertEqual(0, volume_utils.LOG.warning.call_count) + + volume_utils.setup_tracing(['method', 'api']) + self.assertTrue(volume_utils.TRACE_API) + self.assertTrue(volume_utils.TRACE_METHOD) + self.assertEqual(0, volume_utils.LOG.warning.call_count) + + def test_utils_setup_tracing_invalid_key(self): + self.mock_object(volume_utils, 'LOG') + + volume_utils.setup_tracing(['fake']) + + self.assertFalse(volume_utils.TRACE_API) + self.assertFalse(volume_utils.TRACE_METHOD) + self.assertEqual(1, volume_utils.LOG.warning.call_count) + + def test_utils_setup_tracing_valid_and_invalid_key(self): + self.mock_object(volume_utils, 'LOG') + + volume_utils.setup_tracing(['method', 'fake']) + + self.assertFalse(volume_utils.TRACE_API) + self.assertTrue(volume_utils.TRACE_METHOD) + self.assertEqual(1, volume_utils.LOG.warning.call_count) + + def test_trace_no_tracing(self): + self.mock_object(volume_utils, 'LOG') + + @volume_utils.trace_method + def _trace_test_method(*args, **kwargs): + return 'OK' + + volume_utils.setup_tracing(None) + + result = _trace_test_method() + + self.assertEqual('OK', result) + self.assertEqual(0, volume_utils.LOG.debug.call_count) + + def test_utils_trace_method(self): + self.mock_object(volume_utils, 'LOG') + + @volume_utils.trace_method + def _trace_test_method(*args, **kwargs): + return 'OK' + + volume_utils.setup_tracing(['method']) + + result = _trace_test_method() + self.assertEqual('OK', result) + self.assertEqual(2, volume_utils.LOG.debug.call_count) + + def test_utils_trace_api(self): + self.mock_object(volume_utils, 'LOG') + + @volume_utils.trace_api + def _trace_test_api(*args, **kwargs): + return 'OK' + + volume_utils.setup_tracing(['api']) + + result = _trace_test_api() + self.assertEqual('OK', result) + self.assertEqual(2, volume_utils.LOG.debug.call_count) + + def test_utils_trace_api_filtered(self): + self.mock_object(volume_utils, 'LOG') + + def filter_func(all_args): + return False + + @volume_utils.trace_api(filter_function=filter_func) + def _trace_test_api(*args, **kwargs): + return 'OK' + + volume_utils.setup_tracing(['api']) + + result = _trace_test_api() + self.assertEqual('OK', result) + self.assertEqual(0, volume_utils.LOG.debug.call_count) + + def test_utils_trace_filtered(self): + self.mock_object(volume_utils, 'LOG') + + def filter_func(all_args): + return False + + @volume_utils.trace(filter_function=filter_func) + def _trace_test(*args, **kwargs): + return 'OK' + + volume_utils.setup_tracing(['api']) + + result = _trace_test() + self.assertEqual('OK', result) + self.assertEqual(0, volume_utils.LOG.debug.call_count) + + def test_utils_trace_method_default_logger(self): + mock_log = self.mock_object(volume_utils, 'LOG') + + @volume_utils.trace_method + def _trace_test_method_custom_logger(*args, **kwargs): + return 'OK' + volume_utils.setup_tracing(['method']) + + result = _trace_test_method_custom_logger() + + self.assertEqual('OK', result) + self.assertEqual(2, mock_log.debug.call_count) + + def test_utils_trace_method_inner_decorator(self): + mock_logging = self.mock_object(volume_utils, 'logging') + mock_log = mock.Mock() + mock_log.isEnabledFor = lambda x: True + mock_logging.getLogger = mock.Mock(return_value=mock_log) + + def _test_decorator(f): + def blah(*args, **kwargs): + return f(*args, **kwargs) + return blah + + @_test_decorator + @volume_utils.trace_method + def _trace_test_method(*args, **kwargs): + return 'OK' + + volume_utils.setup_tracing(['method']) + + result = _trace_test_method(self) + + self.assertEqual('OK', result) + self.assertEqual(2, mock_log.debug.call_count) + # Ensure the correct function name was logged + for call in mock_log.debug.call_args_list: + self.assertIn('_trace_test_method', str(call)) + self.assertNotIn('blah', str(call)) + + def test_utils_trace_method_outer_decorator(self): + mock_logging = self.mock_object(volume_utils, 'logging') + mock_log = mock.Mock() + mock_log.isEnabledFor = lambda x: True + mock_logging.getLogger = mock.Mock(return_value=mock_log) + + def _test_decorator(f): + def blah(*args, **kwargs): + return f(*args, **kwargs) + return blah + + @volume_utils.trace_method + @_test_decorator + def _trace_test_method(*args, **kwargs): + return 'OK' + + volume_utils.setup_tracing(['method']) + + result = _trace_test_method(self) + + self.assertEqual('OK', result) + self.assertEqual(2, mock_log.debug.call_count) + # Ensure the incorrect function name was logged + for call in mock_log.debug.call_args_list: + self.assertNotIn('_trace_test_method', str(call)) + self.assertIn('blah', str(call)) + + def test_utils_trace_method_outer_decorator_with_functools(self): + mock_log = mock.Mock() + mock_log.isEnabledFor = lambda x: True + self.mock_object(utils.logging, 'getLogger', mock_log) + mock_log = self.mock_object(volume_utils, 'LOG') + + def _test_decorator(f): + @functools.wraps(f) + def wraps(*args, **kwargs): + return f(*args, **kwargs) + return wraps + + @volume_utils.trace_method + @_test_decorator + def _trace_test_method(*args, **kwargs): + return 'OK' + + volume_utils.setup_tracing(['method']) + + result = _trace_test_method() + + self.assertEqual('OK', result) + self.assertEqual(2, mock_log.debug.call_count) + # Ensure the incorrect function name was logged + for call in mock_log.debug.call_args_list: + self.assertIn('_trace_test_method', str(call)) + self.assertNotIn('wraps', str(call)) + + def test_utils_trace_method_with_exception(self): + self.LOG = self.mock_object(volume_utils, 'LOG') + + @volume_utils.trace_method + def _trace_test_method(*args, **kwargs): + raise exception.APITimeout('test message') + + volume_utils.setup_tracing(['method']) + + self.assertRaises(exception.APITimeout, _trace_test_method) + + exception_log = self.LOG.debug.call_args_list[1] + self.assertIn('exception', str(exception_log)) + self.assertIn('test message', str(exception_log)) + + def test_utils_trace_method_with_time(self): + mock_logging = self.mock_object(volume_utils, 'logging') + mock_log = mock.Mock() + mock_log.isEnabledFor = lambda x: True + mock_logging.getLogger = mock.Mock(return_value=mock_log) + + mock_time = mock.Mock(side_effect=[3.1, 6]) + self.mock_object(time, 'time', mock_time) + + @volume_utils.trace_method + def _trace_test_method(*args, **kwargs): + return 'OK' + + volume_utils.setup_tracing(['method']) + + result = _trace_test_method(self) + + self.assertEqual('OK', result) + return_log = mock_log.debug.call_args_list[1] + self.assertIn('2900', str(return_log)) + + def test_utils_trace_wrapper_class(self): + mock_logging = self.mock_object(volume_utils, 'logging') + mock_log = mock.Mock() + mock_log.isEnabledFor = lambda x: True + mock_logging.getLogger = mock.Mock(return_value=mock_log) + + volume_utils.setup_tracing(['method']) + + class MyClass(object, metaclass=volume_utils.TraceWrapperMetaclass): + def trace_test_method(self): + return 'OK' + + test_class = MyClass() + result = test_class.trace_test_method() + + self.assertEqual('OK', result) + self.assertEqual(2, mock_log.debug.call_count) + + def test_utils_trace_method_with_password_dict(self): + mock_logging = self.mock_object(volume_utils, 'logging') + mock_log = mock.Mock() + mock_log.isEnabledFor = lambda x: True + mock_logging.getLogger = mock.Mock(return_value=mock_log) + + @volume_utils.trace_method + def _trace_test_method(*args, **kwargs): + return {'something': 'test', + 'password': 'Now you see me'} + + volume_utils.setup_tracing(['method']) + result = _trace_test_method(self) + expected_unmasked_dict = {'something': 'test', + 'password': 'Now you see me'} + + self.assertEqual(expected_unmasked_dict, result) + self.assertEqual(2, mock_log.debug.call_count) + self.assertIn("'password': '***'", + str(mock_log.debug.call_args_list[1])) + + def test_utils_trace_method_with_password_str(self): + mock_logging = self.mock_object(volume_utils, 'logging') + mock_log = mock.Mock() + mock_log.isEnabledFor = lambda x: True + mock_logging.getLogger = mock.Mock(return_value=mock_log) + + @volume_utils.trace_method + def _trace_test_method(*args, **kwargs): + return "'adminPass': 'Now you see me'" + + volume_utils.setup_tracing(['method']) + result = _trace_test_method(self) + expected_unmasked_str = "'adminPass': 'Now you see me'" + + self.assertEqual(expected_unmasked_str, result) + self.assertEqual(2, mock_log.debug.call_count) + self.assertIn("'adminPass': '***'", + str(mock_log.debug.call_args_list[1])) + + def test_utils_trace_method_with_password_in_formal_params(self): + mock_logging = self.mock_object(volume_utils, 'logging') + mock_log = mock.Mock() + mock_log.isEnabledFor = lambda x: True + mock_logging.getLogger = mock.Mock(return_value=mock_log) + + @volume_utils.trace + def _trace_test_method(*args, **kwargs): + self.assertEqual('verybadpass', + kwargs['test_args']['data']['password']) + pass + + test_args = { + 'data': { + 'password': 'verybadpass' + } + } + _trace_test_method(self, test_args=test_args) + + self.assertEqual(2, mock_log.debug.call_count) + self.assertIn("'password': '***'", + str(mock_log.debug.call_args_list[0])) diff --git a/cinder/utils.py b/cinder/utils.py index 4d807c7674d..20c530b668c 100644 --- a/cinder/utils.py +++ b/cinder/utils.py @@ -17,13 +17,11 @@ """Utilities and helper functions.""" -import abc from collections import OrderedDict import contextlib import datetime import functools import inspect -import logging as py_logging import math import multiprocessing import operator @@ -34,8 +32,6 @@ import shutil import stat import sys import tempfile -import time -import types import eventlet from eventlet import tpool @@ -56,9 +52,6 @@ CONF = cfg.CONF LOG = logging.getLogger(__name__) ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S" PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f" -VALID_TRACE_FLAGS = {'method', 'api'} -TRACE_METHOD = False -TRACE_API = False INITIAL_AUTO_MOSR = 20 INFINITE_UNKNOWN_VALUES = ('infinite', 'unknown') @@ -629,162 +622,6 @@ def convert_str(text): return text -def trace_method(f): - """Decorates a function if TRACE_METHOD is true.""" - @functools.wraps(f) - def trace_method_logging_wrapper(*args, **kwargs): - if TRACE_METHOD: - return trace(f)(*args, **kwargs) - return f(*args, **kwargs) - return trace_method_logging_wrapper - - -def trace_api(*dec_args, **dec_kwargs): - """Decorates a function if TRACE_API is true.""" - - def _decorator(f): - @functools.wraps(f) - def trace_api_logging_wrapper(*args, **kwargs): - if TRACE_API: - return trace(f, *dec_args, **dec_kwargs)(*args, **kwargs) - return f(*args, **kwargs) - return trace_api_logging_wrapper - - if len(dec_args) == 0: - # filter_function is passed and args does not contain f - return _decorator - else: - # filter_function is not passed - return _decorator(dec_args[0]) - - -def trace(*dec_args, **dec_kwargs): - """Trace calls to the decorated function. - - This decorator should always be defined as the outermost decorator so it - is defined last. This is important so it does not interfere - with other decorators. - - Using this decorator on a function will cause its execution to be logged at - `DEBUG` level with arguments, return values, and exceptions. - - :returns: a function decorator - """ - - def _decorator(f): - - func_name = f.__name__ - - @functools.wraps(f) - def trace_logging_wrapper(*args, **kwargs): - filter_function = dec_kwargs.get('filter_function') - - if len(args) > 0: - maybe_self = args[0] - else: - maybe_self = kwargs.get('self', None) - - if maybe_self and hasattr(maybe_self, '__module__'): - logger = logging.getLogger(maybe_self.__module__) - else: - logger = LOG - - # NOTE(ameade): Don't bother going any further if DEBUG log level - # is not enabled for the logger. - if not logger.isEnabledFor(py_logging.DEBUG): - return f(*args, **kwargs) - - all_args = inspect.getcallargs(f, *args, **kwargs) - - pass_filter = filter_function is None or filter_function(all_args) - - if pass_filter: - logger.debug('==> %(func)s: call %(all_args)r', - {'func': func_name, - 'all_args': strutils.mask_password( - str(all_args))}) - - start_time = time.time() * 1000 - try: - result = f(*args, **kwargs) - except Exception as exc: - total_time = int(round(time.time() * 1000)) - start_time - logger.debug('<== %(func)s: exception (%(time)dms) %(exc)r', - {'func': func_name, - 'time': total_time, - 'exc': exc}) - raise - total_time = int(round(time.time() * 1000)) - start_time - - if isinstance(result, dict): - mask_result = strutils.mask_dict_password(result) - elif isinstance(result, str): - mask_result = strutils.mask_password(result) - else: - mask_result = result - - if pass_filter: - logger.debug('<== %(func)s: return (%(time)dms) %(result)r', - {'func': func_name, - 'time': total_time, - 'result': mask_result}) - return result - return trace_logging_wrapper - - if len(dec_args) == 0: - # filter_function is passed and args does not contain f - return _decorator - else: - # filter_function is not passed - return _decorator(dec_args[0]) - - -class TraceWrapperMetaclass(type): - """Metaclass that wraps all methods of a class with trace_method. - - This metaclass will cause every function inside of the class to be - decorated with the trace_method decorator. - - To use the metaclass you define a class like so: - class MyClass(object, metaclass=utils.TraceWrapperMetaclass): - """ - def __new__(meta, classname, bases, classDict): - newClassDict = {} - for attributeName, attribute in classDict.items(): - if isinstance(attribute, types.FunctionType): - # replace it with a wrapped version - attribute = functools.update_wrapper(trace_method(attribute), - attribute) - newClassDict[attributeName] = attribute - - return type.__new__(meta, classname, bases, newClassDict) - - -class TraceWrapperWithABCMetaclass(abc.ABCMeta, TraceWrapperMetaclass): - """Metaclass that wraps all methods of a class with trace.""" - pass - - -def setup_tracing(trace_flags): - """Set global variables for each trace flag. - - Sets variables TRACE_METHOD and TRACE_API, which represent - whether to log methods or api traces. - - :param trace_flags: a list of strings - """ - global TRACE_METHOD - global TRACE_API - try: - trace_flags = [flag.strip() for flag in trace_flags] - except TypeError: # Handle when trace_flags is None or a test mock - trace_flags = [] - for invalid_flag in (set(trace_flags) - VALID_TRACE_FLAGS): - LOG.warning('Invalid trace flag: %s', invalid_flag) - TRACE_METHOD = 'method' in trace_flags - TRACE_API = 'api' in trace_flags - - def build_or_str(elements, str_format=None): """Builds a string of elements joined by 'or'. diff --git a/cinder/volume/driver.py b/cinder/volume/driver.py index ebd2599aba6..d8b90e14a62 100644 --- a/cinder/volume/driver.py +++ b/cinder/volume/driver.py @@ -414,7 +414,8 @@ class BaseVD(object, metaclass=abc.ABCMeta): self.configuration.append_config_values(backup_opts) self.configuration.append_config_values(image_opts) self.configuration.append_config_values(fqdn_opts) - utils.setup_tracing(self.configuration.safe_get('trace_flags')) + volume_utils.setup_tracing( + self.configuration.safe_get('trace_flags')) # NOTE(geguileo): Don't allow to start if we are enabling # replication on a cluster service with a backend that doesn't diff --git a/cinder/volume/drivers/ceph/rbd_iscsi.py b/cinder/volume/drivers/ceph/rbd_iscsi.py index 3c99ce13e5e..43d24983b44 100644 --- a/cinder/volume/drivers/ceph/rbd_iscsi.py +++ b/cinder/volume/drivers/ceph/rbd_iscsi.py @@ -20,7 +20,6 @@ from oslo_utils import netutils from cinder import exception from cinder.i18n import _ from cinder import interface -from cinder import utils from cinder.volume import configuration from cinder.volume.drivers import rbd from cinder.volume import volume_utils @@ -243,7 +242,7 @@ class RBDISCSIDriver(rbd.RBDDriver): resp, disks = self.client.get_disks() return disks['disks'] - @utils.trace + @volume_utils.trace def create_disk(self, volume_name): """Register the volume with the iscsi gateways. @@ -265,7 +264,7 @@ class RBDISCSIDriver(rbd.RBDDriver): raise exception.VolumeBackendAPIException( data=ex.get_description()) - @utils.trace + @volume_utils.trace def register_disk(self, target_iqn, volume_name): """Register the disk with the target_iqn.""" lun_name = self._lun_name(volume_name) @@ -288,7 +287,7 @@ class RBDISCSIDriver(rbd.RBDDriver): raise exception.VolumeBackendAPIException( data=ex.get_description()) - @utils.trace + @volume_utils.trace def unregister_disk(self, target_iqn, volume_name): """Unregister the volume from the gateway.""" lun_name = self._lun_name(volume_name) @@ -300,7 +299,7 @@ class RBDISCSIDriver(rbd.RBDDriver): raise exception.VolumeBackendAPIException( data=ex.get_description()) - @utils.trace + @volume_utils.trace def export_disk(self, initiator_iqn, volume_name, iscsi_config): """Export a volume to an initiator.""" lun_name = self._lun_name(volume_name) @@ -323,7 +322,7 @@ class RBDISCSIDriver(rbd.RBDDriver): raise exception.VolumeBackendAPIException( data=ex.get_description()) - @utils.trace + @volume_utils.trace def unexport_disk(self, initiator_iqn, volume_name, iscsi_config): """Remove a volume from an initiator.""" lun_name = self._lun_name(volume_name) @@ -354,7 +353,7 @@ class RBDISCSIDriver(rbd.RBDDriver): luns = client['luns'] return luns - @utils.trace + @volume_utils.trace def initialize_connection(self, volume, connector): """Export a volume to a host.""" # create client @@ -480,7 +479,7 @@ class RBDISCSIDriver(rbd.RBDDriver): self._delete_disk(volume) - @utils.trace + @volume_utils.trace def terminate_connection(self, volume, connector, **kwargs): """Unexport the volume from the gateway.""" iscsi_config = self._get_config() diff --git a/cinder/volume/drivers/datera/datera_iscsi.py b/cinder/volume/drivers/datera/datera_iscsi.py index b7ea0c92b74..6ef4c213cdd 100644 --- a/cinder/volume/drivers/datera/datera_iscsi.py +++ b/cinder/volume/drivers/datera/datera_iscsi.py @@ -25,11 +25,11 @@ import six from cinder import exception from cinder.i18n import _ from cinder import interface -from cinder import utils import cinder.volume.drivers.datera.datera_api21 as api21 import cinder.volume.drivers.datera.datera_api22 as api22 import cinder.volume.drivers.datera.datera_common as datc from cinder.volume.drivers.san import san +from cinder.volume import volume_utils LOG = logging.getLogger(__name__) @@ -103,7 +103,7 @@ CONF.import_opt('driver_use_ssl', 'cinder.volume.driver') CONF.register_opts(d_opts) -@six.add_metaclass(utils.TraceWrapperWithABCMetaclass) +@six.add_metaclass(volume_utils.TraceWrapperWithABCMetaclass) @interface.volumedriver class DateraDriver(san.SanISCSIDriver, api21.DateraApi, api22.DateraApi): """The OpenStack Datera iSCSI volume driver. @@ -211,7 +211,7 @@ class DateraDriver(san.SanISCSIDriver, api21.DateraApi, api22.DateraApi): self.datera_debug = self.configuration.datera_debug if self.datera_debug: - utils.setup_tracing(['method']) + volume_utils.setup_tracing(['method']) self.tenant_id = self.configuration.datera_tenant_id if self.tenant_id is None: self.tenant_id = '' diff --git a/cinder/volume/drivers/dell_emc/unity/adapter.py b/cinder/volume/drivers/dell_emc/unity/adapter.py index a73e193b987..93a8eb7d95f 100644 --- a/cinder/volume/drivers/dell_emc/unity/adapter.py +++ b/cinder/volume/drivers/dell_emc/unity/adapter.py @@ -530,7 +530,7 @@ class CommonAdapter(object): } return conn_info - @cinder_utils.trace + @volume_utils.trace def initialize_connection(self, volume, connector): lun = self.client.get_lun(lun_id=self.get_lun_id(volume)) return self._initialize_connection(lun, connector, volume.id) @@ -578,7 +578,7 @@ class CommonAdapter(object): 'data': data, } - @cinder_utils.trace + @volume_utils.trace def terminate_connection(self, volume, connector): lun = self.client.get_lun(lun_id=self.get_lun_id(volume)) # None `connector` indicates force detach, then detach all even the @@ -952,17 +952,17 @@ class CommonAdapter(object): def get_pool_id_by_name(self, name): return self.client.get_pool_id_by_name(name=name) - @cinder_utils.trace + @volume_utils.trace def initialize_connection_snapshot(self, snapshot, connector): snap = self.client.get_snap(snapshot.name) return self._initialize_connection(snap, connector, snapshot.id) - @cinder_utils.trace + @volume_utils.trace def terminate_connection_snapshot(self, snapshot, connector): snap = self.client.get_snap(snapshot.name) return self._terminate_connection(snap, connector) - @cinder_utils.trace + @volume_utils.trace def restore_snapshot(self, volume, snapshot): return self.client.restore_snapshot(snapshot.name) @@ -1265,7 +1265,7 @@ class CommonAdapter(object): """The failover only happens manually, no need to update the status.""" return [], [] - @cinder_utils.trace + @volume_utils.trace def failover(self, volumes, secondary_id=None, groups=None): # TODO(ryan) support group failover after group bp merges # https://review.opendev.org/#/c/574119/ diff --git a/cinder/volume/drivers/hitachi/hbsd_fc.py b/cinder/volume/drivers/hitachi/hbsd_fc.py index b7f990b9fec..0a0c11f7442 100644 --- a/cinder/volume/drivers/hitachi/hbsd_fc.py +++ b/cinder/volume/drivers/hitachi/hbsd_fc.py @@ -17,11 +17,11 @@ from oslo_config import cfg from cinder import interface -from cinder import utils as cinder_utils from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers.hitachi import hbsd_common as common from cinder.volume.drivers.hitachi import hbsd_utils as utils +from cinder.volume import volume_utils FC_VOLUME_OPTS = [ cfg.BoolOpt( @@ -87,32 +87,32 @@ class HBSDFCDriver(driver.FibreChannelDriver): def check_for_setup_error(self): pass - @cinder_utils.trace + @volume_utils.trace def create_volume(self, volume): """Create a volume and return its properties.""" return self.common.create_volume(volume) - @cinder_utils.trace + @volume_utils.trace def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot and return its properties.""" return self.common.create_volume_from_snapshot(volume, snapshot) - @cinder_utils.trace + @volume_utils.trace def create_cloned_volume(self, volume, src_vref): """Create a clone of the specified volume and return its properties.""" return self.common.create_cloned_volume(volume, src_vref) - @cinder_utils.trace + @volume_utils.trace def delete_volume(self, volume): """Delete the specified volume.""" self.common.delete_volume(volume) - @cinder_utils.trace + @volume_utils.trace def create_snapshot(self, snapshot): """Create a snapshot from a volume and return its properties.""" return self.common.create_snapshot(snapshot) - @cinder_utils.trace + @volume_utils.trace def delete_snapshot(self, snapshot): """Delete the specified snapshot.""" self.common.delete_snapshot(snapshot) @@ -129,7 +129,7 @@ class HBSDFCDriver(driver.FibreChannelDriver): self.get_goodness_function()) return data - @cinder_utils.trace + @volume_utils.trace def update_migrated_volume( self, ctxt, volume, new_volume, original_volume_status): """Do any remaining jobs after migration.""" @@ -137,34 +137,34 @@ class HBSDFCDriver(driver.FibreChannelDriver): super(HBSDFCDriver, self).update_migrated_volume( ctxt, volume, new_volume, original_volume_status) - @cinder_utils.trace + @volume_utils.trace def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume.""" super(HBSDFCDriver, self).copy_image_to_volume( context, volume, image_service, image_id) self.common.discard_zero_page(volume) - @cinder_utils.trace + @volume_utils.trace def extend_volume(self, volume, new_size): """Extend the specified volume to the specified size.""" self.common.extend_volume(volume, new_size) - @cinder_utils.trace + @volume_utils.trace def manage_existing(self, volume, existing_ref): """Return volume properties which Cinder needs to manage the volume.""" return self.common.manage_existing(volume, existing_ref) - @cinder_utils.trace + @volume_utils.trace def manage_existing_get_size(self, volume, existing_ref): """Return the size[GB] of the specified volume.""" return self.common.manage_existing_get_size(existing_ref) - @cinder_utils.trace + @volume_utils.trace def unmanage(self, volume): """Prepare the volume for removing it from Cinder management.""" self.common.unmanage(volume) - @cinder_utils.trace + @volume_utils.trace def do_setup(self, context): """Prepare for the startup of the driver.""" self.common.do_setup(context) @@ -187,12 +187,12 @@ class HBSDFCDriver(driver.FibreChannelDriver): def remove_export_snapshot(self, context, snapshot): pass - @cinder_utils.trace + @volume_utils.trace def initialize_connection(self, volume, connector): """Initialize connection between the server and the volume.""" return self.common.initialize_connection(volume, connector) - @cinder_utils.trace + @volume_utils.trace def terminate_connection(self, volume, connector, **kwargs): """Terminate connection between the server and the volume.""" if connector is None: @@ -201,22 +201,22 @@ class HBSDFCDriver(driver.FibreChannelDriver): return self.common.terminate_connection(volume, connector) - @cinder_utils.trace + @volume_utils.trace def initialize_connection_snapshot(self, snapshot, connector, **kwargs): """Initialize connection between the server and the snapshot.""" return self.common.initialize_connection(snapshot, connector) - @cinder_utils.trace + @volume_utils.trace def terminate_connection_snapshot(self, snapshot, connector, **kwargs): """Terminate connection between the server and the snapshot.""" self.common.terminate_connection(snapshot, connector) - @cinder_utils.trace + @volume_utils.trace def unmanage_snapshot(self, snapshot): """Prepare the snapshot for removing it from Cinder management.""" return self.common.unmanage_snapshot(snapshot) - @cinder_utils.trace + @volume_utils.trace def retype(self, ctxt, volume, new_type, diff, host): """Retype the specified volume.""" return self.common.retype() @@ -224,7 +224,7 @@ class HBSDFCDriver(driver.FibreChannelDriver): def backup_use_temp_snapshot(self): return True - @cinder_utils.trace + @volume_utils.trace def revert_to_snapshot(self, context, volume, snapshot): """Rollback the specified snapshot""" return self.common.revert_to_snapshot(volume, snapshot) diff --git a/cinder/volume/drivers/hitachi/hbsd_iscsi.py b/cinder/volume/drivers/hitachi/hbsd_iscsi.py index 5b749663ba7..57b91ca87da 100644 --- a/cinder/volume/drivers/hitachi/hbsd_iscsi.py +++ b/cinder/volume/drivers/hitachi/hbsd_iscsi.py @@ -15,10 +15,10 @@ """iSCSI module for Hitachi HBSD Driver.""" from cinder import interface -from cinder import utils as cinder_utils from cinder.volume import driver from cinder.volume.drivers.hitachi import hbsd_common as common from cinder.volume.drivers.hitachi import hbsd_utils as utils +from cinder.volume import volume_utils MSG = utils.HBSDMsg @@ -71,32 +71,32 @@ class HBSDISCSIDriver(driver.ISCSIDriver): def check_for_setup_error(self): pass - @cinder_utils.trace + @volume_utils.trace def create_volume(self, volume): """Create a volume and return its properties.""" return self.common.create_volume(volume) - @cinder_utils.trace + @volume_utils.trace def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot and return its properties.""" return self.common.create_volume_from_snapshot(volume, snapshot) - @cinder_utils.trace + @volume_utils.trace def create_cloned_volume(self, volume, src_vref): """Create a clone of the specified volume and return its properties.""" return self.common.create_cloned_volume(volume, src_vref) - @cinder_utils.trace + @volume_utils.trace def delete_volume(self, volume): """Delete the specified volume.""" self.common.delete_volume(volume) - @cinder_utils.trace + @volume_utils.trace def create_snapshot(self, snapshot): """Create a snapshot from a volume and return its properties.""" return self.common.create_snapshot(snapshot) - @cinder_utils.trace + @volume_utils.trace def delete_snapshot(self, snapshot): """Delete the specified snapshot.""" self.common.delete_snapshot(snapshot) @@ -113,7 +113,7 @@ class HBSDISCSIDriver(driver.ISCSIDriver): self.get_goodness_function()) return data - @cinder_utils.trace + @volume_utils.trace def update_migrated_volume( self, ctxt, volume, new_volume, original_volume_status): """Do any remaining jobs after migration.""" @@ -121,34 +121,34 @@ class HBSDISCSIDriver(driver.ISCSIDriver): super(HBSDISCSIDriver, self).update_migrated_volume( ctxt, volume, new_volume, original_volume_status) - @cinder_utils.trace + @volume_utils.trace def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume.""" super(HBSDISCSIDriver, self).copy_image_to_volume( context, volume, image_service, image_id) self.common.discard_zero_page(volume) - @cinder_utils.trace + @volume_utils.trace def extend_volume(self, volume, new_size): """Extend the specified volume to the specified size.""" self.common.extend_volume(volume, new_size) - @cinder_utils.trace + @volume_utils.trace def manage_existing(self, volume, existing_ref): """Return volume properties which Cinder needs to manage the volume.""" return self.common.manage_existing(volume, existing_ref) - @cinder_utils.trace + @volume_utils.trace def manage_existing_get_size(self, volume, existing_ref): """Return the size[GB] of the specified volume.""" return self.common.manage_existing_get_size(existing_ref) - @cinder_utils.trace + @volume_utils.trace def unmanage(self, volume): """Prepare the volume for removing it from Cinder management.""" self.common.unmanage(volume) - @cinder_utils.trace + @volume_utils.trace def do_setup(self, context): """Prepare for the startup of the driver.""" self.common.do_setup(context) @@ -171,12 +171,12 @@ class HBSDISCSIDriver(driver.ISCSIDriver): def remove_export_snapshot(self, context, snapshot): pass - @cinder_utils.trace + @volume_utils.trace def initialize_connection(self, volume, connector): """Initialize connection between the server and the volume.""" return self.common.initialize_connection(volume, connector) - @cinder_utils.trace + @volume_utils.trace def terminate_connection(self, volume, connector, **kwargs): """Terminate connection between the server and the volume.""" if connector is None: @@ -185,22 +185,22 @@ class HBSDISCSIDriver(driver.ISCSIDriver): return self.common.terminate_connection(volume, connector) - @cinder_utils.trace + @volume_utils.trace def initialize_connection_snapshot(self, snapshot, connector, **kwargs): """Initialize connection between the server and the snapshot.""" return self.common.initialize_connection(snapshot, connector) - @cinder_utils.trace + @volume_utils.trace def terminate_connection_snapshot(self, snapshot, connector, **kwargs): """Terminate connection between the server and the snapshot.""" self.common.terminate_connection(snapshot, connector) - @cinder_utils.trace + @volume_utils.trace def unmanage_snapshot(self, snapshot): """Prepare the snapshot for removing it from Cinder management.""" return self.common.unmanage_snapshot(snapshot) - @cinder_utils.trace + @volume_utils.trace def retype(self, ctxt, volume, new_type, diff, host): """Retype the specified volume.""" return self.common.retype() @@ -208,7 +208,7 @@ class HBSDISCSIDriver(driver.ISCSIDriver): def backup_use_temp_snapshot(self): return True - @cinder_utils.trace + @volume_utils.trace def revert_to_snapshot(self, context, volume, snapshot): """Rollback the specified snapshot""" return self.common.revert_to_snapshot(volume, snapshot) diff --git a/cinder/volume/drivers/hpe/hpe_3par_base.py b/cinder/volume/drivers/hpe/hpe_3par_base.py index 575e5c0968f..b6eb1d9ec2e 100644 --- a/cinder/volume/drivers/hpe/hpe_3par_base.py +++ b/cinder/volume/drivers/hpe/hpe_3par_base.py @@ -34,10 +34,10 @@ from oslo_log import log as logging from cinder import exception from cinder.i18n import _ -from cinder import utils from cinder.volume import driver from cinder.volume.drivers.hpe import hpe_3par_common as hpecommon from cinder.volume.drivers.san import san +from cinder.volume import volume_utils LOG = logging.getLogger(__name__) @@ -117,7 +117,7 @@ class HPE3PARDriverBase(driver.ManageableVD, return int(volume.get("replication_driver_data")) return None - @utils.trace + @volume_utils.trace def get_volume_stats(self, refresh=False): # NOTE(geguileo): We don't need to login to the backed if we are not # going to refresh the stats, furthermore if we login, then we'll @@ -146,7 +146,7 @@ class HPE3PARDriverBase(driver.ManageableVD, """Setup errors are already checked for in do_setup so return pass.""" pass - @utils.trace + @volume_utils.trace def create_volume(self, volume): common = self._login() try: @@ -154,7 +154,7 @@ class HPE3PARDriverBase(driver.ManageableVD, finally: self._logout(common) - @utils.trace + @volume_utils.trace def create_cloned_volume(self, volume, src_vref): """Clone an existing volume.""" common = self._login() @@ -163,7 +163,7 @@ class HPE3PARDriverBase(driver.ManageableVD, finally: self._logout(common) - @utils.trace + @volume_utils.trace def delete_volume(self, volume): common = self._login() try: @@ -171,7 +171,7 @@ class HPE3PARDriverBase(driver.ManageableVD, finally: self._logout(common) - @utils.trace + @volume_utils.trace def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. @@ -183,7 +183,7 @@ class HPE3PARDriverBase(driver.ManageableVD, finally: self._logout(common) - @utils.trace + @volume_utils.trace def create_snapshot(self, snapshot): common = self._login() try: @@ -191,7 +191,7 @@ class HPE3PARDriverBase(driver.ManageableVD, finally: self._logout(common) - @utils.trace + @volume_utils.trace def delete_snapshot(self, snapshot): common = self._login() try: @@ -199,7 +199,7 @@ class HPE3PARDriverBase(driver.ManageableVD, finally: self._logout(common) - @utils.trace + @volume_utils.trace def extend_volume(self, volume, new_size): common = self._login() try: @@ -207,7 +207,7 @@ class HPE3PARDriverBase(driver.ManageableVD, finally: self._logout(common) - @utils.trace + @volume_utils.trace def create_group(self, context, group): common = self._login() try: @@ -215,7 +215,7 @@ class HPE3PARDriverBase(driver.ManageableVD, finally: self._logout(common) - @utils.trace + @volume_utils.trace def create_group_from_src(self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): @@ -227,7 +227,7 @@ class HPE3PARDriverBase(driver.ManageableVD, finally: self._logout(common) - @utils.trace + @volume_utils.trace def delete_group(self, context, group, volumes): common = self._login() try: @@ -235,7 +235,7 @@ class HPE3PARDriverBase(driver.ManageableVD, finally: self._logout(common) - @utils.trace + @volume_utils.trace def update_group(self, context, group, add_volumes=None, remove_volumes=None): common = self._login() @@ -245,7 +245,7 @@ class HPE3PARDriverBase(driver.ManageableVD, finally: self._logout(common) - @utils.trace + @volume_utils.trace def create_group_snapshot(self, context, group_snapshot, snapshots): common = self._login() try: @@ -254,7 +254,7 @@ class HPE3PARDriverBase(driver.ManageableVD, finally: self._logout(common) - @utils.trace + @volume_utils.trace def delete_group_snapshot(self, context, group_snapshot, snapshots): common = self._login() try: @@ -263,7 +263,7 @@ class HPE3PARDriverBase(driver.ManageableVD, finally: self._logout(common) - @utils.trace + @volume_utils.trace def manage_existing(self, volume, existing_ref): common = self._login() try: @@ -271,7 +271,7 @@ class HPE3PARDriverBase(driver.ManageableVD, finally: self._logout(common) - @utils.trace + @volume_utils.trace def manage_existing_snapshot(self, snapshot, existing_ref): common = self._login() try: @@ -279,7 +279,7 @@ class HPE3PARDriverBase(driver.ManageableVD, finally: self._logout(common) - @utils.trace + @volume_utils.trace def manage_existing_get_size(self, volume, existing_ref): common = self._login() try: @@ -287,7 +287,7 @@ class HPE3PARDriverBase(driver.ManageableVD, finally: self._logout(common) - @utils.trace + @volume_utils.trace def manage_existing_snapshot_get_size(self, snapshot, existing_ref): common = self._login() try: @@ -296,7 +296,7 @@ class HPE3PARDriverBase(driver.ManageableVD, finally: self._logout(common) - @utils.trace + @volume_utils.trace def unmanage(self, volume): common = self._login() try: @@ -304,7 +304,7 @@ class HPE3PARDriverBase(driver.ManageableVD, finally: self._logout(common) - @utils.trace + @volume_utils.trace def unmanage_snapshot(self, snapshot): common = self._login() try: @@ -312,7 +312,7 @@ class HPE3PARDriverBase(driver.ManageableVD, finally: self._logout(common) - @utils.trace + @volume_utils.trace def retype(self, context, volume, new_type, diff, host): """Convert the volume to be of the new type.""" common = self._login() @@ -321,7 +321,7 @@ class HPE3PARDriverBase(driver.ManageableVD, finally: self._logout(common) - @utils.trace + @volume_utils.trace def migrate_volume(self, context, volume, host): if volume['status'] == 'in-use': protocol = host['capabilities']['storage_protocol'] @@ -339,7 +339,7 @@ class HPE3PARDriverBase(driver.ManageableVD, finally: self._logout(common) - @utils.trace + @volume_utils.trace def update_migrated_volume(self, context, volume, new_volume, original_volume_status): """Update the name of the migrated volume to it's new ID.""" @@ -350,7 +350,7 @@ class HPE3PARDriverBase(driver.ManageableVD, finally: self._logout(common) - @utils.trace + @volume_utils.trace def get_pool(self, volume): common = self._login() try: @@ -362,7 +362,7 @@ class HPE3PARDriverBase(driver.ManageableVD, finally: self._logout(common) - @utils.trace + @volume_utils.trace def revert_to_snapshot(self, context, volume, snapshot): """Revert volume to snapshot.""" common = self._login() @@ -371,7 +371,7 @@ class HPE3PARDriverBase(driver.ManageableVD, finally: self._logout(common) - @utils.trace + @volume_utils.trace def failover_host(self, context, volumes, secondary_id=None, groups=None): """Force failover to a secondary replication target.""" common = self._login(timeout=30) @@ -455,7 +455,7 @@ class HPE3PARDriverBase(driver.ManageableVD, def initialize_connection(self, volume, connector): pass - @utils.trace + @volume_utils.trace def _init_vendor_properties(self): """Create a dictionary of vendor unique properties. diff --git a/cinder/volume/drivers/hpe/hpe_3par_fc.py b/cinder/volume/drivers/hpe/hpe_3par_fc.py index d4461ca8511..4fd702492e2 100644 --- a/cinder/volume/drivers/hpe/hpe_3par_fc.py +++ b/cinder/volume/drivers/hpe/hpe_3par_fc.py @@ -39,8 +39,8 @@ from oslo_utils.excutils import save_and_reraise_exception from cinder import coordination from cinder import interface -from cinder import utils from cinder.volume.drivers.hpe import hpe_3par_base as hpebasedriver +from cinder.volume import volume_utils from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) @@ -164,7 +164,7 @@ class HPE3PARFCDriver(hpebasedriver.HPE3PARDriverBase): return info_backend - @utils.trace + @volume_utils.trace @coordination.synchronized('3par-{volume.id}') def initialize_connection(self, volume, connector): """Assigns the volume to a server. @@ -292,7 +292,7 @@ class HPE3PARFCDriver(hpebasedriver.HPE3PARDriverBase): finally: self._logout(common) - @utils.trace + @volume_utils.trace @coordination.synchronized('3par-{volume.id}') def terminate_connection(self, volume, connector, **kwargs): """Driver entry point to detach a volume from an instance.""" diff --git a/cinder/volume/drivers/hpe/hpe_3par_iscsi.py b/cinder/volume/drivers/hpe/hpe_3par_iscsi.py index 0d809e048f1..ab851868d9c 100644 --- a/cinder/volume/drivers/hpe/hpe_3par_iscsi.py +++ b/cinder/volume/drivers/hpe/hpe_3par_iscsi.py @@ -42,7 +42,6 @@ from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder import interface -from cinder import utils from cinder.volume.drivers.hpe import hpe_3par_base as hpebasedriver from cinder.volume import volume_utils @@ -284,7 +283,7 @@ class HPE3PARISCSIDriver(hpebasedriver.HPE3PARDriverBase): "hpe3par_iscsi_ips list defined in " "cinder.conf.", iscsi_ip) - @utils.trace + @volume_utils.trace @coordination.synchronized('3par-{volume.id}') def initialize_connection(self, volume, connector): """Assigns the volume to a server. @@ -460,7 +459,7 @@ class HPE3PARISCSIDriver(hpebasedriver.HPE3PARDriverBase): finally: self._logout(common) - @utils.trace + @volume_utils.trace @coordination.synchronized('3par-{volume.id}') def terminate_connection(self, volume, connector, **kwargs): """Driver entry point to detach a volume from an instance.""" @@ -743,7 +742,7 @@ class HPE3PARISCSIDriver(hpebasedriver.HPE3PARDriverBase): return model_update - @utils.trace + @volume_utils.trace def create_export(self, context, volume, connector): common = self._login() try: @@ -751,7 +750,7 @@ class HPE3PARISCSIDriver(hpebasedriver.HPE3PARDriverBase): finally: self._logout(common) - @utils.trace + @volume_utils.trace def ensure_export(self, context, volume): """Ensure the volume still exists on the 3PAR. diff --git a/cinder/volume/drivers/ibm/storwize_svc/replication.py b/cinder/volume/drivers/ibm/storwize_svc/replication.py index 54ae29c7f88..927a49ea195 100644 --- a/cinder/volume/drivers/ibm/storwize_svc/replication.py +++ b/cinder/volume/drivers/ibm/storwize_svc/replication.py @@ -27,6 +27,7 @@ from cinder.i18n import _ from cinder import ssh_utils from cinder import utils from cinder.volume.drivers.ibm.storwize_svc import storwize_const +from cinder.volume import volume_utils LOG = logging.getLogger(__name__) @@ -36,7 +37,7 @@ class StorwizeSVCReplication(object): self.driver = driver self.target = replication_target or {} - @utils.trace + @volume_utils.trace def failover_volume_host(self, context, vref): # Make the aux volume writeable. try: diff --git a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py index 5a037738662..20b9c76ee4a 100644 --- a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py +++ b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py @@ -1213,7 +1213,7 @@ class StorwizeHelpers(object): return result_lun - @cinder_utils.trace + @volume_utils.trace def map_vol_to_host(self, volume_name, host_name, multihostmap): """Create a mapping between a volume to a host.""" @@ -2100,7 +2100,7 @@ class StorwizeHelpers(object): return None return resp[0] - @cinder_utils.trace + @volume_utils.trace def _check_delete_vdisk_fc_mappings(self, name, allow_snaps=True, allow_fctgt=False): """FlashCopy mapping check helper.""" @@ -2188,7 +2188,7 @@ class StorwizeHelpers(object): if not wait_for_copy or not len(mapping_ids): raise loopingcall.LoopingCallDone(retvalue=True) - @cinder_utils.trace + @volume_utils.trace def _check_vdisk_fc_mappings(self, name, allow_snaps=True, allow_fctgt=False): """FlashCopy mapping check helper.""" @@ -3636,7 +3636,7 @@ class StorwizeSVCCommonDriver(san.SanDriver, LOG.debug("Exit: update volume copy status.") # #### V2.1 replication methods #### # - @cinder_utils.trace + @volume_utils.trace def failover_host(self, context, volumes, secondary_id=None, groups=None): if not self._replica_enabled: msg = _("Replication is not properly enabled on backend.") @@ -4199,7 +4199,7 @@ class StorwizeSVCCommonDriver(san.SanDriver, self._update_storwize_state(self._aux_state, self._aux_backend_helpers) # Replication Group (Tiramisu) - @cinder_utils.trace + @volume_utils.trace def enable_replication(self, context, group, volumes): """Enables replication for a group and volumes in the group.""" model_update = {'replication_status': fields.ReplicationStatus.ENABLED} @@ -4233,7 +4233,7 @@ class StorwizeSVCCommonDriver(san.SanDriver, 'replication_status': model_update['replication_status']}) return model_update, volumes_update - @cinder_utils.trace + @volume_utils.trace def disable_replication(self, context, group, volumes): """Disables replication for a group and volumes in the group.""" model_update = { @@ -4265,7 +4265,7 @@ class StorwizeSVCCommonDriver(san.SanDriver, 'replication_status': model_update['replication_status']}) return model_update, volumes_update - @cinder_utils.trace + @volume_utils.trace def failover_replication(self, context, group, volumes, secondary_backend_id=None): """Fails over replication for a group and volumes in the group.""" @@ -4356,7 +4356,7 @@ class StorwizeSVCCommonDriver(san.SanDriver, raise exception.UnableToFailOver(reason=msg) return model_update - @cinder_utils.trace + @volume_utils.trace def _rep_grp_failover(self, ctxt, group): """Fail over all the volume in the replication group.""" model_update = { @@ -4374,7 +4374,7 @@ class StorwizeSVCCommonDriver(san.SanDriver, LOG.exception(msg) raise exception.UnableToFailOver(reason=msg) - @cinder_utils.trace + @volume_utils.trace def _sync_with_aux_grp(self, ctxt, rccg_name): try: rccg = self._helpers.get_rccg(rccg_name) @@ -5624,7 +5624,7 @@ class StorwizeSVCCommonDriver(san.SanDriver, return model_update, snapshots_model - @cinder_utils.trace + @volume_utils.trace def revert_to_snapshot(self, context, volume, snapshot): """Revert volume to snapshot.""" if snapshot.volume_size != volume.size: diff --git a/cinder/volume/drivers/inspur/as13000/as13000_driver.py b/cinder/volume/drivers/inspur/as13000/as13000_driver.py index f144860a117..26ca0e68186 100644 --- a/cinder/volume/drivers/inspur/as13000/as13000_driver.py +++ b/cinder/volume/drivers/inspur/as13000/as13000_driver.py @@ -89,7 +89,7 @@ class RestAPIExecutor(object): raise @staticmethod - @utils.trace_method + @volume_utils.trace_method def do_request(cmd, url, header, data): """Send request to the storage and handle the response.""" if cmd in ['post', 'get', 'put', 'delete']: @@ -113,7 +113,7 @@ class RestAPIExecutor(object): return response - @utils.trace + @volume_utils.trace def send_api(self, method, params=None, request_type='post'): if params: params = json.dumps(params) @@ -205,7 +205,7 @@ class AS13000Driver(san.SanISCSIDriver): def get_driver_options(): return inspur_as13000_opts - @utils.trace + @volume_utils.trace def do_setup(self, context): # get tokens for the driver self._rest.login() @@ -231,7 +231,7 @@ class AS13000Driver(san.SanISCSIDriver): self._check_meta_pool() - @utils.trace + @volume_utils.trace def check_for_setup_error(self): """Do check to make sure service is available.""" # check the required flags in conf @@ -276,7 +276,7 @@ class AS13000Driver(san.SanISCSIDriver): LOG.error(msg) raise exception.InvalidInput(reason=msg) - @utils.trace + @volume_utils.trace def create_volume(self, volume): """Create volume in the backend.""" pool = volume_utils.extract_host(volume.host, level='pool') @@ -295,7 +295,7 @@ class AS13000Driver(san.SanISCSIDriver): self._rest.send_rest_api(method=method, params=params, request_type=request_type) - @utils.trace + @volume_utils.trace def create_volume_from_snapshot(self, volume, snapshot): """Create a new volume base on a specific snapshot.""" if snapshot.volume_size > volume.size: @@ -343,7 +343,7 @@ class AS13000Driver(san.SanISCSIDriver): if volume.size > snapshot.volume_size: self.extend_volume(volume, volume.size) - @utils.trace + @volume_utils.trace def create_cloned_volume(self, volume, src_vref): """Clone a volume.""" if src_vref.size > volume.size: @@ -374,7 +374,7 @@ class AS13000Driver(san.SanISCSIDriver): if volume.size > src_vref.size: self.extend_volume(volume, volume.size) - @utils.trace + @volume_utils.trace def extend_volume(self, volume, new_size): """Extend volume to new size.""" name = self._trans_name_down(volume.name) @@ -395,7 +395,7 @@ class AS13000Driver(san.SanISCSIDriver): params=params, request_type=request_type) - @utils.trace + @volume_utils.trace def delete_volume(self, volume): """Delete volume from AS13000.""" name = self._trans_name_down(volume.name) @@ -412,7 +412,7 @@ class AS13000Driver(san.SanISCSIDriver): request_type = 'delete' self._rest.send_rest_api(method=method, request_type=request_type) - @utils.trace + @volume_utils.trace def create_snapshot(self, snapshot): """Create snapshot of volume in backend. @@ -438,7 +438,7 @@ class AS13000Driver(san.SanISCSIDriver): self._rest.send_rest_api(method=method, params=params, request_type=request_type) - @utils.trace + @volume_utils.trace def delete_snapshot(self, snapshot): """Delete snapshot of volume.""" source_volume = snapshot.volume @@ -457,7 +457,7 @@ class AS13000Driver(san.SanISCSIDriver): request_type = 'delete' self._rest.send_rest_api(method=method, request_type=request_type) - @utils.trace + @volume_utils.trace def _update_volume_stats(self): """Update the backend stats including driver info and pools info.""" @@ -490,7 +490,7 @@ class AS13000Driver(san.SanISCSIDriver): ipaddr = '[%s]' % ip return '%(ip)s:%(port)s' % {'ip': ipaddr, 'port': port} - @utils.trace + @volume_utils.trace def initialize_connection(self, volume, connector, **kwargs): """Initialize connection steps: @@ -567,7 +567,7 @@ class AS13000Driver(san.SanISCSIDriver): return {'driver_volume_type': 'iscsi', 'data': connection_data} - @utils.trace + @volume_utils.trace def terminate_connection(self, volume, connector, **kwargs): """Delete lun from target. @@ -613,7 +613,7 @@ class AS13000Driver(san.SanISCSIDriver): return pools_info - @utils.trace + @volume_utils.trace def _get_pools_stats(self): """Generate the pool stat information.""" pools_info = self._get_pools_info(self.pools) @@ -637,7 +637,7 @@ class AS13000Driver(san.SanISCSIDriver): return pools - @utils.trace + @volume_utils.trace def _get_target_from_conn(self, host_ip): """Get target information base on the host ip.""" host_exist = False @@ -654,7 +654,7 @@ class AS13000Driver(san.SanISCSIDriver): return host_exist, target_name, node - @utils.trace + @volume_utils.trace def _get_target_list(self): """Get a list of all targets in the backend.""" method = 'block/target/detail' @@ -663,7 +663,7 @@ class AS13000Driver(san.SanISCSIDriver): request_type=request_type) return data - @utils.trace + @volume_utils.trace def _create_target(self, target_name, target_node): """Create a target on the specified node.""" method = 'block/target' @@ -673,7 +673,7 @@ class AS13000Driver(san.SanISCSIDriver): params=params, request_type=request_type) - @utils.trace + @volume_utils.trace def _delete_target(self, target_name): """Delete all target of all the node.""" method = 'block/target?name=%s' % target_name @@ -681,7 +681,7 @@ class AS13000Driver(san.SanISCSIDriver): self._rest.send_rest_api(method=method, request_type=request_type) - @utils.trace + @volume_utils.trace def _add_chap_to_target(self, target_name, chap_username, chap_password): """Add CHAP to target.""" method = 'block/chap/bond' @@ -693,7 +693,7 @@ class AS13000Driver(san.SanISCSIDriver): params=params, request_type=request_type) - @utils.trace + @volume_utils.trace def _add_host_to_target(self, host_ip, target_name): """Add the authority of host to target.""" method = 'block/host' @@ -703,7 +703,7 @@ class AS13000Driver(san.SanISCSIDriver): params=params, request_type=request_type) - @utils.trace + @volume_utils.trace @utils.retry(exceptions=exception.VolumeDriverException, interval=1, retries=3) @@ -721,14 +721,14 @@ class AS13000Driver(san.SanISCSIDriver): params=params, request_type=request_type) - @utils.trace + @volume_utils.trace def _delete_lun_from_target(self, target_name, lun_id): """Delete lun from target_name.""" method = 'block/lun?name=%s&id=%s&force=1' % (target_name, lun_id) request_type = 'delete' self._rest.send_rest_api(method=method, request_type=request_type) - @utils.trace + @volume_utils.trace def _get_lun_list(self, target_name): """Get all lun list of the target.""" method = 'block/lun?name=%s' % target_name @@ -736,7 +736,7 @@ class AS13000Driver(san.SanISCSIDriver): return self._rest.send_rest_api(method=method, request_type=request_type) - @utils.trace + @volume_utils.trace def _snapshot_lock_op(self, op, vol_name, snap_name, pool_name): """Lock or unlock a snapshot to protect the snapshot. @@ -751,7 +751,7 @@ class AS13000Driver(san.SanISCSIDriver): params=params, request_type=request_type) - @utils.trace + @volume_utils.trace def _filling_volume(self, name, pool): """Filling a volume so that make it independently.""" method = 'block/lvm/filling' @@ -774,7 +774,7 @@ class AS13000Driver(san.SanISCSIDriver): msg = (_('Volume %s is not filled.') % name) raise exception.VolumeDriverException(msg) - @utils.trace + @volume_utils.trace def _check_volume(self, volume): """Check if the volume exists in the backend.""" pool = volume_utils.extract_host(volume.host, 'pool') @@ -789,7 +789,7 @@ class AS13000Driver(san.SanISCSIDriver): eventlet.sleep(1) return False - @utils.trace + @volume_utils.trace def _get_volumes(self, pool): """Get all the volumes in the pool.""" method = 'block/lvm?pool=%s' % pool @@ -797,7 +797,7 @@ class AS13000Driver(san.SanISCSIDriver): return self._rest.send_rest_api(method=method, request_type=request_type) - @utils.trace + @volume_utils.trace def _get_cluster_status(self): """Get all nodes of the backend.""" method = 'cluster/node' @@ -805,7 +805,7 @@ class AS13000Driver(san.SanISCSIDriver): return self._rest.send_rest_api(method=method, request_type=request_type) - @utils.trace + @volume_utils.trace def _get_lun_id(self, volume, target_name): """Get lun id of the voluem in a target.""" pool = volume_utils.extract_host(volume.host, level='pool') @@ -827,7 +827,7 @@ class AS13000Driver(san.SanISCSIDriver): """ return name.replace('-', '_') - @utils.trace + @volume_utils.trace def _unit_convert(self, capacity): """Convert all units to GB. diff --git a/cinder/volume/drivers/inspur/instorage/instorage_common.py b/cinder/volume/drivers/inspur/instorage/instorage_common.py index 9feaaabd170..6ba594b7118 100644 --- a/cinder/volume/drivers/inspur/instorage/instorage_common.py +++ b/cinder/volume/drivers/inspur/instorage/instorage_common.py @@ -170,7 +170,7 @@ class InStorageMCSCommonDriver(driver.VolumeDriver, san.SanDriver): def get_driver_options(): return instorage_mcs_opts - @cinder_utils.trace + @volume_utils.trace def do_setup(self, ctxt): """Check that we have all configuration details from the storage.""" # InStorage has the limitation that can not burst more than 3 new ssh @@ -239,7 +239,7 @@ class InStorageMCSCommonDriver(driver.VolumeDriver, san.SanDriver): msg = _('Failed getting details for pool %s.') % pool raise exception.InvalidInput(reason=msg) - @cinder_utils.trace + @volume_utils.trace def check_for_setup_error(self): """Ensure that the flags are set properly.""" @@ -411,7 +411,7 @@ class InStorageMCSCommonDriver(driver.VolumeDriver, san.SanDriver): volume_type=volume_type, volume_metadata=volume_metadata) - @cinder_utils.trace + @volume_utils.trace def create_volume(self, volume): opts = self._get_vdisk_params( volume.volume_type_id, @@ -527,7 +527,7 @@ class InStorageMCSCommonDriver(driver.VolumeDriver, san.SanDriver): def extend_volume(self, volume, new_size): self._extend_volume_op(volume, new_size) - @cinder_utils.trace + @volume_utils.trace def _extend_volume_op(self, volume, new_size, old_size=None): volume_name = self._get_target_vol(volume) ret = self._assistant.ensure_vdisk_no_lc_mappings(volume_name, @@ -568,7 +568,7 @@ class InStorageMCSCommonDriver(driver.VolumeDriver, san.SanDriver): else: self._assistant.extend_vdisk(volume_name, extend_amt) - @cinder_utils.trace + @volume_utils.trace def delete_volume(self, volume): ctxt = context.get_admin_context() @@ -688,7 +688,7 @@ class InStorageMCSCommonDriver(driver.VolumeDriver, san.SanDriver): copy_op[1]) LOG.debug("Exit: update volume copy status.") - @cinder_utils.trace + @volume_utils.trace def migrate_volume(self, ctxt, volume, host): """Migrate directly if source and dest are managed by same storage. @@ -719,7 +719,7 @@ class InStorageMCSCommonDriver(driver.VolumeDriver, san.SanDriver): self._add_vdisk_copy_op(ctxt, volume, new_op) return (True, None) - @cinder_utils.trace + @volume_utils.trace def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type. @@ -1270,7 +1270,7 @@ class InStorageMCSCommonDriver(driver.VolumeDriver, san.SanDriver): return vdisk # #### V2.1 replication methods #### # - @cinder_utils.trace + @volume_utils.trace def failover_host(self, context, volumes, secondary_id=None): if not self._replica_enabled: msg = _("Replication is not properly enabled on backend.") @@ -1329,7 +1329,7 @@ class InStorageMCSCommonDriver(driver.VolumeDriver, san.SanDriver): self._update_volume_stats() return instorage_const.FAILBACK_VALUE, volumes_update - @cinder_utils.trace + @volume_utils.trace def _failback_replica_volumes(self, ctxt, rep_volumes): volumes_update = [] @@ -1389,7 +1389,7 @@ class InStorageMCSCommonDriver(driver.VolumeDriver, san.SanDriver): 'replication_driver_data': ''}}) return volumes_update - @cinder_utils.trace + @volume_utils.trace def _sync_with_aux(self, ctxt, volumes): try: rep_mgr = self._get_replica_mgr() @@ -1443,7 +1443,7 @@ class InStorageMCSCommonDriver(driver.VolumeDriver, san.SanDriver): 'error:%(err)s.', {'volume': tgt_volume, 'err': ex}) - @cinder_utils.trace + @volume_utils.trace def _wait_replica_vol_ready(self, ctxt, volume): def _replica_vol_ready(): rep_info = self._assistant.get_relationship_info(volume) @@ -1509,7 +1509,7 @@ class InStorageMCSCommonDriver(driver.VolumeDriver, san.SanDriver): self._update_volume_stats() return self._active_backend_id, volumes_update - @cinder_utils.trace + @volume_utils.trace def _failover_replica_volumes(self, ctxt, rep_volumes): volumes_update = [] diff --git a/cinder/volume/drivers/inspur/instorage/instorage_fc.py b/cinder/volume/drivers/inspur/instorage/instorage_fc.py index 0de27f1805a..f4b6de6e8e1 100644 --- a/cinder/volume/drivers/inspur/instorage/instorage_fc.py +++ b/cinder/volume/drivers/inspur/instorage/instorage_fc.py @@ -25,9 +25,9 @@ from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder import interface -from cinder import utils as cinder_utils from cinder.volume import driver from cinder.volume.drivers.inspur.instorage import instorage_common +from cinder.volume import volume_utils from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) @@ -67,7 +67,7 @@ class InStorageMCSFCDriver(instorage_common.InStorageMCSCommonDriver, return i_t_map - @cinder_utils.trace + @volume_utils.trace @coordination.synchronized('instorage-host' '{self._state[system_id]}' '{connector[host]}') @@ -185,7 +185,7 @@ class InStorageMCSFCDriver(instorage_common.InStorageMCSCommonDriver, **kwargs) return _do_terminate_connection_locked() - @cinder_utils.trace + @volume_utils.trace def _do_terminate_connection(self, volume, connector, **kwargs): """Cleanup after an FC connection has been terminated. diff --git a/cinder/volume/drivers/inspur/instorage/instorage_iscsi.py b/cinder/volume/drivers/inspur/instorage/instorage_iscsi.py index 7928088f407..68bf7b55686 100644 --- a/cinder/volume/drivers/inspur/instorage/instorage_iscsi.py +++ b/cinder/volume/drivers/inspur/instorage/instorage_iscsi.py @@ -50,9 +50,9 @@ from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder import interface -from cinder import utils as cinder_utils from cinder.volume import driver from cinder.volume.drivers.inspur.instorage import instorage_common +from cinder.volume import volume_utils LOG = logging.getLogger(__name__) @@ -90,7 +90,7 @@ class InStorageMCSISCSIDriver(instorage_common.InStorageMCSCommonDriver, self.configuration.append_config_values( instorage_mcs_iscsi_opts) - @cinder_utils.trace + @volume_utils.trace @coordination.synchronized('instorage-host' '{self._state[system_id]}' '{connector[host]}') @@ -132,7 +132,7 @@ class InStorageMCSISCSIDriver(instorage_common.InStorageMCSCommonDriver, return {'driver_volume_type': 'iscsi', 'data': properties} - @cinder_utils.trace + @volume_utils.trace def _get_single_iscsi_data(self, volume, connector, lun_id, chap_secret): volume_name = self._get_target_vol(volume) volume_attributes = self._assistant.get_vdisk_attributes(volume_name) @@ -200,7 +200,7 @@ class InStorageMCSISCSIDriver(instorage_common.InStorageMCSCommonDriver, discovery_auth_password=chap_secret) return properties - @cinder_utils.trace + @volume_utils.trace def _get_multi_iscsi_data(self, volume, connector, lun_id, properties): try: resp = self._assistant.ssh.lsportip() @@ -256,7 +256,7 @@ class InStorageMCSISCSIDriver(instorage_common.InStorageMCSCommonDriver, return self._do_terminate_connection(volume, connector, **kwargs) return _do_terminate_connection_locked() - @cinder_utils.trace + @volume_utils.trace def _do_terminate_connection(self, volume, connector, **kwargs): """Cleanup after an iSCSI connection has been terminated. diff --git a/cinder/volume/drivers/inspur/instorage/replication.py b/cinder/volume/drivers/inspur/instorage/replication.py index e9df361bdc9..47c74026919 100644 --- a/cinder/volume/drivers/inspur/instorage/replication.py +++ b/cinder/volume/drivers/inspur/instorage/replication.py @@ -28,6 +28,7 @@ from cinder.objects import fields from cinder import ssh_utils from cinder import utils as cinder_utils from cinder.volume.drivers.inspur.instorage import instorage_const +from cinder.volume import volume_utils LOG = logging.getLogger(__name__) @@ -142,7 +143,7 @@ class InStorageMCSReplication(object): self.target = replication_target or {} self.target_assistant = target_assistant - @cinder_utils.trace + @volume_utils.trace def volume_replication_setup(self, context, vref): target_vol_name = instorage_const.REPLICA_AUX_VOL_PREFIX + vref.name try: @@ -167,7 +168,7 @@ class InStorageMCSReplication(object): LOG.error(msg) raise exception.VolumeDriverException(message=msg) - @cinder_utils.trace + @volume_utils.trace def failover_volume_host(self, context, vref): target_vol = instorage_const.REPLICA_AUX_VOL_PREFIX + vref.name diff --git a/cinder/volume/drivers/kaminario/kaminario_common.py b/cinder/volume/drivers/kaminario/kaminario_common.py index 7a644b20303..498e9efc9d4 100644 --- a/cinder/volume/drivers/kaminario/kaminario_common.py +++ b/cinder/volume/drivers/kaminario/kaminario_common.py @@ -134,7 +134,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): 'volume_dd_blocksize', 'unique_fqdn_network') return kaminario_opts + additional_opts - @utils.trace + @volume_utils.trace def check_for_setup_error(self): if krest is None: msg = _("Unable to import 'krest' python module.") @@ -183,12 +183,12 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): raise exception.InvalidInput(reason=msg) self.replica = Replication(replica[0]) - @utils.trace + @volume_utils.trace def do_setup(self, context): super(KaminarioCinderDriver, self).do_setup(context) self._check_ops() - @utils.trace + @volume_utils.trace def create_volume(self, volume): """Volume creation in K2 needs a volume group. @@ -222,7 +222,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): if self._get_is_replica(volume.volume_type) and self.replica: self._create_volume_replica(volume, vg, vol, self.replica.rpo) - @utils.trace + @volume_utils.trace def _create_volume_replica(self, volume, vg, vol, rpo): """Volume replica creation in K2 needs session and remote volume. @@ -280,7 +280,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): self._delete_by_ref(self.client, "volume_groups", vg.name, "vg") raise KaminarioCinderDriverException(reason=ex) - @utils.trace + @volume_utils.trace def _create_failover_volume_replica(self, volume, vg_name, vol_name): """Volume replica creation in K2 needs session and remote volume. @@ -335,14 +335,14 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): self._delete_by_ref(self.client, "volume_groups", vg_name, "vg") raise KaminarioCinderDriverException(reason=ex) - @utils.trace + @volume_utils.trace def _delete_by_ref(self, device, url, name, msg): rs = device.search(url, name=name) for result in rs.hits: result.delete() LOG.debug("Deleting %(msg)s: %(name)s", {'msg': msg, 'name': name}) - @utils.trace + @volume_utils.trace def _failover_volume(self, volume): """Promoting a secondary volume to primary volume.""" session_name = self.get_session_name(volume.id) @@ -355,7 +355,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): LOG.debug("The target session: %s state is " "changed to failed_over ", rsession_name) - @utils.trace + @volume_utils.trace def failover_host(self, context, volumes, secondary_id=None, groups=None): """Failover to replication target.""" volume_updates = [] @@ -509,7 +509,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): back_end_ip = self.replica.backend_id return back_end_ip, volume_updates, [] - @utils.trace + @volume_utils.trace def _create_volume_replica_user_snap(self, k2, sess): snap = k2.new("snapshots") snap.is_application_consistent = "False" @@ -538,7 +538,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): self.snap_updates.remove(update) eventlet.sleep(1) - @utils.trace + @volume_utils.trace def create_volume_from_snapshot(self, volume, snapshot): """Create volume from snapshot. @@ -603,7 +603,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): "failed", {"vol": vol_name, "view": view_name}) raise KaminarioCinderDriverException(reason=ex) - @utils.trace + @volume_utils.trace def create_cloned_volume(self, volume, src_vref): """Create a clone from source volume. @@ -648,7 +648,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): LOG.exception("Create a clone: %s failed.", clone_name) raise KaminarioCinderDriverException(reason=ex) - @utils.trace + @volume_utils.trace def delete_volume(self, volume): """Volume in K2 exists in a volume group. @@ -673,7 +673,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): LOG.exception("Deletion of volume %s failed.", vol_name) raise KaminarioCinderDriverException(reason=ex) - @utils.trace + @volume_utils.trace def _delete_volume_replica(self, volume, vg_name, vol_name): rvg_name = self.get_rep_name(vg_name) rvol_name = self.get_rep_name(vol_name) @@ -707,7 +707,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): self._delete_by_ref(self.target, "volume_groups", rvg_name, "remote vg") - @utils.trace + @volume_utils.trace def _delete_failover_volume_replica(self, volume, vg_name, vol_name): rvg_name = self.get_rep_name(vg_name) rvol_name = self.get_rep_name(vol_name) @@ -732,7 +732,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): obj.refresh() eventlet.sleep(1) - @utils.trace + @volume_utils.trace def get_volume_stats(self, refresh=False): if refresh: self.update_volume_stats() @@ -754,7 +754,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): def remove_export(self, context, volume): pass - @utils.trace + @volume_utils.trace def create_snapshot(self, snapshot): """Create a snapshot from a volume_group.""" vg_name = self.get_volume_group_name(snapshot.volume_id) @@ -772,7 +772,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): LOG.exception("Creation of snapshot: %s failed.", snap_name) raise KaminarioCinderDriverException(reason=ex) - @utils.trace + @volume_utils.trace def delete_snapshot(self, snapshot): """Delete a snapshot.""" snap_name = self.get_snap_name(snapshot.id) @@ -785,7 +785,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): LOG.exception("Deletion of snapshot: %s failed.", snap_name) raise KaminarioCinderDriverException(reason=ex) - @utils.trace + @volume_utils.trace def extend_volume(self, volume, new_size): """Extend volume.""" vol_name = self.get_volume_name(volume.id) @@ -867,7 +867,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): """Return the corresponding replication names.""" return "r{0}".format(name) - @utils.trace + @volume_utils.trace def _delete_host_by_name(self, name): """Deleting host by name.""" host_rs = self.client.search("hosts", name=name) @@ -911,7 +911,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): def initialize_connection(self, volume, connector): pass - @utils.trace + @volume_utils.trace def terminate_connection(self, volume, connector): """Terminate connection of volume from host.""" # Get volume object @@ -947,7 +947,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): else: LOG.warning("Host: %s not found on K2.", host_name) - @utils.trace + @volume_utils.trace def k2_initialize_connection(self, volume, connector): # Get volume object. if type(volume).__name__ != 'RestObject': @@ -1006,7 +1006,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): return True return False - @utils.trace + @volume_utils.trace def manage_existing(self, volume, existing_ref): vol_name = existing_ref['source-name'] new_name = self.get_volume_name(volume.id) @@ -1057,7 +1057,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): vg.save() raise - @utils.trace + @volume_utils.trace def manage_existing_get_size(self, volume, existing_ref): vol_name = existing_ref['source-name'] v_rs = self.client.search("volumes", name=vol_name) @@ -1070,7 +1070,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): existing_ref=existing_ref, reason=_('Unable to get size of manage volume.')) - @utils.trace + @volume_utils.trace def after_volume_copy(self, ctxt, volume, new_volume, remote=None): self.delete_volume(volume) vg_name_old = self.get_volume_group_name(volume.id) @@ -1084,7 +1084,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): vol_new.name = vol_name_old vol_new.save() - @utils.trace + @volume_utils.trace def retype(self, ctxt, volume, new_type, diff, host): old_type = volume.get('volume_type') vg_name = self.get_volume_group_name(volume.id) diff --git a/cinder/volume/drivers/kaminario/kaminario_fc.py b/cinder/volume/drivers/kaminario/kaminario_fc.py index ad30b2ceea8..f1db77db2cb 100644 --- a/cinder/volume/drivers/kaminario/kaminario_fc.py +++ b/cinder/volume/drivers/kaminario/kaminario_fc.py @@ -18,8 +18,8 @@ from oslo_log import log as logging from cinder import coordination from cinder.i18n import _ from cinder.objects import fields -from cinder import utils from cinder.volume.drivers.kaminario import kaminario_common as common +from cinder.volume import volume_utils from cinder.zonemanager import utils as fczm_utils K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER @@ -42,13 +42,13 @@ class KaminarioFCDriver(common.KaminarioCinderDriver): # ThirdPartySystems wiki page name CI_WIKI_NAME = "Kaminario_K2_CI" - @utils.trace + @volume_utils.trace def __init__(self, *args, **kwargs): super(KaminarioFCDriver, self).__init__(*args, **kwargs) self._protocol = 'FC' self.lookup_service = fczm_utils.create_lookup_service() - @utils.trace + @volume_utils.trace @coordination.synchronized('{self.k2_lock_name}') def initialize_connection(self, volume, connector): """Attach K2 volume to host.""" @@ -98,7 +98,7 @@ class KaminarioFCDriver(common.KaminarioCinderDriver): init_pwwn.append((port.pwwn).replace(':', '')) return init_host_name, init_pwwn - @utils.trace + @volume_utils.trace @coordination.synchronized('{self.k2_lock_name}') def terminate_connection(self, volume, connector, **kwargs): if connector is None: @@ -147,7 +147,7 @@ class KaminarioFCDriver(common.KaminarioCinderDriver): raise common.KaminarioCinderDriverException(reason=msg) return target_wwpns - @utils.trace + @volume_utils.trace def _get_host_object(self, connector): host_name = self.get_initiator_host_name(connector) LOG.debug("Searching initiator hostname: %s in K2.", host_name) diff --git a/cinder/volume/drivers/kaminario/kaminario_iscsi.py b/cinder/volume/drivers/kaminario/kaminario_iscsi.py index 263c2682a57..5d97c3ce104 100644 --- a/cinder/volume/drivers/kaminario/kaminario_iscsi.py +++ b/cinder/volume/drivers/kaminario/kaminario_iscsi.py @@ -19,13 +19,12 @@ from cinder import coordination from cinder.i18n import _ from cinder import interface from cinder.objects import fields -from cinder import utils from cinder.volume.drivers.kaminario import kaminario_common as common +from cinder.volume import volume_utils ISCSI_TCP_PORT = "3260" K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER LOG = logging.getLogger(__name__) -utils.trace = common.utils.trace @interface.volumedriver @@ -48,12 +47,12 @@ class KaminarioISCSIDriver(common.KaminarioCinderDriver): # ThirdPartySystems wiki page name CI_WIKI_NAME = "Kaminario_K2_CI" - @utils.trace + @volume_utils.trace def __init__(self, *args, **kwargs): super(KaminarioISCSIDriver, self).__init__(*args, **kwargs) self._protocol = 'iSCSI' - @utils.trace + @volume_utils.trace @coordination.synchronized('{self.k2_lock_name}') def initialize_connection(self, volume, connector): """Attach K2 volume to host.""" @@ -83,7 +82,7 @@ class KaminarioISCSIDriver(common.KaminarioCinderDriver): target_luns=[lun] * len(target_iqns)) return result - @utils.trace + @volume_utils.trace @coordination.synchronized('{self.k2_lock_name}') def terminate_connection(self, volume, connector, **kwargs): # To support replication failback @@ -122,7 +121,7 @@ class KaminarioISCSIDriver(common.KaminarioCinderDriver): raise common.KaminarioCinderDriverException(reason=msg) return iscsi_portals, target_iqns - @utils.trace + @volume_utils.trace def _get_host_object(self, connector): host_name = self.get_initiator_host_name(connector) LOG.debug("Searching initiator hostname: %s in K2.", host_name) diff --git a/cinder/volume/drivers/macrosan/driver.py b/cinder/volume/drivers/macrosan/driver.py index c25f302104e..3f4dcc2003c 100644 --- a/cinder/volume/drivers/macrosan/driver.py +++ b/cinder/volume/drivers/macrosan/driver.py @@ -411,7 +411,7 @@ class MacroSANBaseDriver(driver.VolumeDriver): @synchronized(lock_name) @record_request_id - @utils.trace + @volume_utils.trace def create_volume(self, volume): """Create a volume.""" name = volume['name'] @@ -466,7 +466,7 @@ class MacroSANBaseDriver(driver.VolumeDriver): @synchronized(lock_name) @record_request_id - @utils.trace + @volume_utils.trace def delete_volume(self, volume): """Delete a volume.""" name = self._volume_name(volume) @@ -525,7 +525,7 @@ class MacroSANBaseDriver(driver.VolumeDriver): @synchronized(lock_name) @record_request_id - @utils.trace + @volume_utils.trace def create_snapshot(self, snapshot): """Create a snapshot.""" volume = snapshot['volume'] @@ -554,7 +554,7 @@ class MacroSANBaseDriver(driver.VolumeDriver): @synchronized(lock_name) @record_request_id - @utils.trace + @volume_utils.trace def delete_snapshot(self, snapshot): """Delete a snapshot.""" volume = snapshot['volume'] @@ -606,7 +606,7 @@ class MacroSANBaseDriver(driver.VolumeDriver): @synchronized(lock_name) @record_request_id - @utils.trace + @volume_utils.trace def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot.""" snapshot_volume = snapshot['volume'] @@ -647,7 +647,7 @@ class MacroSANBaseDriver(driver.VolumeDriver): self._delete_snapshot(snp_name, src_vol_name, pointid) @record_request_id - @utils.trace + @volume_utils.trace def create_cloned_volume(self, volume, src_vref): """Create a clone of the specified volume.""" vol_name = volume['id'] @@ -687,7 +687,7 @@ class MacroSANBaseDriver(driver.VolumeDriver): @synchronized(lock_name) @record_request_id - @utils.trace + @volume_utils.trace def extend_volume(self, volume, new_size): """Extend a volume.""" name = self._volume_name(volume) @@ -747,7 +747,7 @@ class MacroSANBaseDriver(driver.VolumeDriver): self._stats = data @record_request_id - @utils.trace + @volume_utils.trace def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status=None): """Return model update for migrated volume.""" @@ -772,7 +772,7 @@ class MacroSANBaseDriver(driver.VolumeDriver): @synchronized(lock_name) @record_request_id - @utils.trace + @volume_utils.trace def initialize_connection_snapshot(self, snapshot, connector, **kwargs): volume = snapshot['volume'] provider = snapshot['provider_location'] @@ -811,7 +811,7 @@ class MacroSANBaseDriver(driver.VolumeDriver): @synchronized(lock_name) @record_request_id - @utils.trace + @volume_utils.trace def manage_existing(self, volume, external_ref): vol_params = self._parse_volume_params(volume) self._check_volume_params(vol_params) @@ -901,7 +901,7 @@ class MacroSANBaseDriver(driver.VolumeDriver): @synchronized(lock_name) @record_request_id - @utils.trace + @volume_utils.trace def manage_existing_snapshot(self, snapshot, existing_ref): volume = snapshot['volume'] src_name = self._get_existing_snapname(existing_ref).lstrip('_') @@ -948,7 +948,7 @@ class MacroSANBaseDriver(driver.VolumeDriver): @synchronized(lock_name) @record_request_id - @utils.trace + @volume_utils.trace def migrate_volume(self, ctxt, volume, host): if not self.migration_valid(volume, host): return False, None @@ -1148,7 +1148,7 @@ class MacroSANISCSIDriver(MacroSANBaseDriver, driver.ISCSIDriver): @synchronized(lock_name) @record_request_id - @utils.trace + @volume_utils.trace def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" @@ -1184,7 +1184,7 @@ class MacroSANISCSIDriver(MacroSANBaseDriver, driver.ISCSIDriver): @synchronized(lock_name) @record_request_id - @utils.trace + @volume_utils.trace def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" @@ -1433,7 +1433,7 @@ class MacroSANFCDriver(MacroSANBaseDriver, driver.FibreChannelDriver): @synchronized(lock_name) @record_request_id - @utils.trace + @volume_utils.trace def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" @@ -1496,7 +1496,7 @@ class MacroSANFCDriver(MacroSANBaseDriver, driver.FibreChannelDriver): @synchronized(lock_name) @record_request_id - @utils.trace + @volume_utils.trace def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" diff --git a/cinder/volume/drivers/nec/volume_helper.py b/cinder/volume/drivers/nec/volume_helper.py index 5ae477aea1f..6bca861ef49 100644 --- a/cinder/volume/drivers/nec/volume_helper.py +++ b/cinder/volume/drivers/nec/volume_helper.py @@ -24,7 +24,6 @@ from oslo_utils import units from cinder import coordination from cinder import exception from cinder.i18n import _ -from cinder import utils from cinder.volume.drivers.nec import cli from cinder.volume.drivers.nec import volume_common from cinder.volume import volume_utils @@ -1322,7 +1321,7 @@ class MStorageDriver(volume_common.MStorageVolumeCommon): '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) - @utils.trace + @volume_utils.trace def _fc_terminate_connection(self, vol_or_snap, connector, is_snapshot=False): """Disallow connection from connector.""" diff --git a/cinder/volume/drivers/netapp/dataontap/block_base.py b/cinder/volume/drivers/netapp/dataontap/block_base.py index 96c81d46d0f..d65bfbca970 100644 --- a/cinder/volume/drivers/netapp/dataontap/block_base.py +++ b/cinder/volume/drivers/netapp/dataontap/block_base.py @@ -38,7 +38,6 @@ import six from cinder import exception from cinder.i18n import _ -from cinder import utils from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls from cinder.volume.drivers.netapp import options as na_opts @@ -71,7 +70,7 @@ class NetAppLun(object): self.handle, self.name, self.size, self.metadata) -@six.add_metaclass(utils.TraceWrapperMetaclass) +@six.add_metaclass(volume_utils.TraceWrapperMetaclass) class NetAppBlockStorageLibrary(object): """NetApp block storage library for Data ONTAP.""" diff --git a/cinder/volume/drivers/netapp/dataontap/block_cmode.py b/cinder/volume/drivers/netapp/dataontap/block_cmode.py index d5395405b30..a4b3a5b586d 100644 --- a/cinder/volume/drivers/netapp/dataontap/block_cmode.py +++ b/cinder/volume/drivers/netapp/dataontap/block_cmode.py @@ -31,7 +31,6 @@ import six from cinder import exception from cinder.i18n import _ from cinder.objects import fields -from cinder import utils from cinder.volume.drivers.netapp.dataontap import block_base from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode from cinder.volume.drivers.netapp.dataontap.utils import capabilities @@ -46,7 +45,7 @@ from cinder.volume import volume_utils LOG = logging.getLogger(__name__) -@six.add_metaclass(utils.TraceWrapperMetaclass) +@six.add_metaclass(volume_utils.TraceWrapperMetaclass) class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary, data_motion.DataMotionMixin): """NetApp block storage library for Data ONTAP (Cluster-mode).""" diff --git a/cinder/volume/drivers/netapp/dataontap/client/api.py b/cinder/volume/drivers/netapp/dataontap/client/api.py index e7504c204a6..ada92e7f107 100644 --- a/cinder/volume/drivers/netapp/dataontap/client/api.py +++ b/cinder/volume/drivers/netapp/dataontap/client/api.py @@ -32,8 +32,8 @@ from six.moves import urllib from cinder import exception from cinder.i18n import _ from cinder import ssh_utils -from cinder import utils from cinder.volume.drivers.netapp import utils as na_utils +from cinder.volume import volume_utils LOG = logging.getLogger(__name__) @@ -180,7 +180,7 @@ class NaServer(object): """Set the vserver to use if tunneling gets enabled.""" self._vserver = vserver - @utils.trace_api(filter_function=na_utils.trace_filter_func_api) + @volume_utils.trace_api(filter_function=na_utils.trace_filter_func_api) def send_http_request(self, na_element, enable_tunneling=False): """Invoke the API on the server.""" if not na_element or not isinstance(na_element, NaElement): diff --git a/cinder/volume/drivers/netapp/dataontap/client/client_base.py b/cinder/volume/drivers/netapp/dataontap/client/client_base.py index 0a94d204ec0..cfb970fb5a4 100644 --- a/cinder/volume/drivers/netapp/dataontap/client/client_base.py +++ b/cinder/volume/drivers/netapp/dataontap/client/client_base.py @@ -26,6 +26,7 @@ from cinder.i18n import _ from cinder import utils from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp import utils as na_utils +from cinder.volume import volume_utils LOG = logging.getLogger(__name__) @@ -33,7 +34,7 @@ DELETED_PREFIX = 'deleted_cinder_' MAX_SIZE_FOR_A_LUN = '17555678822400' -@six.add_metaclass(utils.TraceWrapperMetaclass) +@six.add_metaclass(volume_utils.TraceWrapperMetaclass) class Client(object): def __init__(self, **kwargs): diff --git a/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py b/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py index 43c715ae38d..0d4885f9ac6 100644 --- a/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py +++ b/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py @@ -27,7 +27,6 @@ import six from cinder import exception from cinder.i18n import _ -from cinder import utils from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.client import client_base from cinder.volume.drivers.netapp import utils as na_utils @@ -37,7 +36,7 @@ LOG = logging.getLogger(__name__) DEFAULT_MAX_PAGE_LENGTH = 50 -@six.add_metaclass(utils.TraceWrapperMetaclass) +@six.add_metaclass(volume_utils.TraceWrapperMetaclass) class Client(client_base.Client): def __init__(self, **kwargs): @@ -1334,7 +1333,7 @@ class Client(client_base.Client): } self.connection.send_request('sis-set-config', api_args) - @utils.trace_method + @volume_utils.trace_method def delete_file(self, path_to_file): """Delete file at path.""" diff --git a/cinder/volume/drivers/netapp/dataontap/nfs_base.py b/cinder/volume/drivers/netapp/dataontap/nfs_base.py index 0bda913df2b..79614e4528a 100644 --- a/cinder/volume/drivers/netapp/dataontap/nfs_base.py +++ b/cinder/volume/drivers/netapp/dataontap/nfs_base.py @@ -54,7 +54,7 @@ CONF = cfg.CONF HOUSEKEEPING_INTERVAL_SECONDS = 600 # ten minutes -@six.add_metaclass(utils.TraceWrapperWithABCMetaclass) +@six.add_metaclass(volume_utils.TraceWrapperWithABCMetaclass) class NetAppNfsDriver(driver.ManageableVD, driver.CloneableImageVD, nfs.NfsDriver): diff --git a/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py b/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py index 5751932b2a2..a00cb01ad31 100644 --- a/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py +++ b/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py @@ -33,7 +33,6 @@ from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder.objects import fields -from cinder import utils from cinder.volume.drivers.netapp.dataontap import nfs_base from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode from cinder.volume.drivers.netapp.dataontap.utils import capabilities @@ -49,7 +48,7 @@ LOG = logging.getLogger(__name__) @interface.volumedriver -@six.add_metaclass(utils.TraceWrapperWithABCMetaclass) +@six.add_metaclass(volume_utils.TraceWrapperWithABCMetaclass) class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver, data_motion.DataMotionMixin): """NetApp NFS driver for Data ONTAP (Cluster-mode).""" diff --git a/cinder/volume/drivers/netapp/dataontap/utils/utils.py b/cinder/volume/drivers/netapp/dataontap/utils/utils.py index b8b66252a19..33fb546b349 100644 --- a/cinder/volume/drivers/netapp/dataontap/utils/utils.py +++ b/cinder/volume/drivers/netapp/dataontap/utils/utils.py @@ -23,11 +23,11 @@ from oslo_config import cfg from cinder import exception from cinder.i18n import _ -from cinder import utils from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers.netapp.dataontap.client import client_cmode from cinder.volume.drivers.netapp import options as na_opts +from cinder.volume import volume_utils CONF = cfg.CONF @@ -71,7 +71,7 @@ def get_client_for_backend(backend_name, vserver_name=None): hostname=config.netapp_server_hostname, port=config.netapp_server_port, vserver=vserver_name or config.netapp_vserver, - trace=utils.TRACE_API, + trace=volume_utils.TRACE_API, api_trace_pattern=config.netapp_api_trace_pattern) return client diff --git a/cinder/volume/drivers/remotefs.py b/cinder/volume/drivers/remotefs.py index 67980b55d18..0c66b03dff6 100644 --- a/cinder/volume/drivers/remotefs.py +++ b/cinder/volume/drivers/remotefs.py @@ -181,7 +181,7 @@ class RemoteFSDriver(driver.BaseVD): """Just to override parent behavior.""" pass - @utils.trace + @volume_utils.trace def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info. @@ -268,7 +268,7 @@ class RemoteFSDriver(driver.BaseVD): acceptable_states=acceptable_states, current_state=current_state)) - @utils.trace + @volume_utils.trace def create_volume(self, volume): """Creates a volume. @@ -343,7 +343,7 @@ class RemoteFSDriver(driver.BaseVD): LOG.debug('Available shares %s', self._mounted_shares) - @utils.trace + @volume_utils.trace def delete_volume(self, volume): """Deletes a logical volume. diff --git a/cinder/volume/drivers/rsd.py b/cinder/volume/drivers/rsd.py index 9c82e7fcdfe..e0e1013dd05 100644 --- a/cinder/volume/drivers/rsd.py +++ b/cinder/volume/drivers/rsd.py @@ -31,6 +31,7 @@ from cinder.i18n import _ from cinder import interface from cinder import utils from cinder.volume import driver +from cinder.volume import volume_utils LOG = logging.getLogger(__name__) @@ -539,7 +540,7 @@ class RSDDriver(driver.VolumeDriver): def get_driver_options(): return RSD_OPTS - @utils.trace + @volume_utils.trace def do_setup(self, context): self.rsdClient = RSDClient.initialize( self.configuration.podm_url, @@ -550,13 +551,13 @@ class RSDDriver(driver.VolumeDriver): def check_for_setup_error(self): pass - @utils.trace + @volume_utils.trace def create_volume(self, volume): size_in_gb = int(volume['size']) volume_url = self.rsdClient.create_volume(size_in_gb) return {'provider_location': volume_url} - @utils.trace + @volume_utils.trace def delete_volume(self, volume): volume_url = volume['provider_location'] if not volume_url: @@ -570,7 +571,7 @@ class RSDDriver(driver.VolumeDriver): volume_name=volume.name, ignore_non_exist=True) - @utils.trace + @volume_utils.trace def _update_volume_stats(self): backend_name = ( self.configuration.safe_get('volume_backend_name') or 'RSD') @@ -597,7 +598,7 @@ class RSDDriver(driver.VolumeDriver): # SinglePool self._stats['pools'] = [spool] - @utils.trace + @volume_utils.trace def initialize_connection(self, volume, connector, **kwargs): uuid = connector.get("system uuid") if not uuid: @@ -626,7 +627,7 @@ class RSDDriver(driver.VolumeDriver): } return conn_info - @utils.trace + @volume_utils.trace def terminate_connection(self, volume, connector, **kwargs): if connector is None: # None connector means force-detach @@ -658,7 +659,7 @@ class RSDDriver(driver.VolumeDriver): def remove_export(self, context, volume): pass - @utils.trace + @volume_utils.trace def create_volume_from_snapshot(self, volume, snapshot): snap_url = snapshot.provider_location old_size_in_gb = snapshot.volume_size @@ -673,14 +674,14 @@ class RSDDriver(driver.VolumeDriver): raise return {'provider_location': volume_url} - @utils.trace + @volume_utils.trace def create_snapshot(self, snapshot): volume_url = snapshot.volume.provider_location snap_url = self.rsdClient.create_snap(volume_url) snapshot.provider_location = snap_url snapshot.save() - @utils.trace + @volume_utils.trace def delete_snapshot(self, snapshot): snap_url = snapshot.provider_location if not snap_url: @@ -689,7 +690,7 @@ class RSDDriver(driver.VolumeDriver): volume_name=snapshot.name, ignore_non_exist=True) - @utils.trace + @volume_utils.trace def extend_volume(self, volume, new_size): volume_url = volume.provider_location self.rsdClient.extend_volume(volume_url, new_size) @@ -699,7 +700,7 @@ class RSDDriver(driver.VolumeDriver): image_service): return None, False - @utils.trace + @volume_utils.trace def create_cloned_volume(self, volume, src_vref): volume_url = src_vref.provider_location old_size_in_gb = src_vref.size diff --git a/cinder/volume/drivers/stx/client.py b/cinder/volume/drivers/stx/client.py index 5b2c1e28cb9..f3a6951abde 100644 --- a/cinder/volume/drivers/stx/client.py +++ b/cinder/volume/drivers/stx/client.py @@ -29,13 +29,13 @@ import six from cinder import coordination from cinder.i18n import _ -from cinder import utils import cinder.volume.drivers.stx.exception as stx_exception +from cinder.volume import volume_utils LOG = logging.getLogger(__name__) -@six.add_metaclass(utils.TraceWrapperMetaclass) +@six.add_metaclass(volume_utils.TraceWrapperMetaclass) class STXClient(object): def __init__(self, host, login, password, protocol, ssl_verify): self._mgmt_ip_addrs = list(map(str.strip, host.split(','))) diff --git a/cinder/volume/drivers/stx/common.py b/cinder/volume/drivers/stx/common.py index a2394b265c2..f9734d1bec4 100644 --- a/cinder/volume/drivers/stx/common.py +++ b/cinder/volume/drivers/stx/common.py @@ -26,10 +26,10 @@ import six from cinder import exception from cinder.i18n import _ from cinder.objects import fields -from cinder import utils from cinder.volume import configuration import cinder.volume.drivers.stx.client as client import cinder.volume.drivers.stx.exception as stx_exception +from cinder.volume import volume_utils LOG = logging.getLogger(__name__) @@ -54,7 +54,7 @@ CONF.register_opts(common_opts, group=configuration.SHARED_CONF_GROUP) CONF.register_opts(iscsi_opts, group=configuration.SHARED_CONF_GROUP) -@six.add_metaclass(utils.TraceWrapperMetaclass) +@six.add_metaclass(volume_utils.TraceWrapperMetaclass) class STXCommon(object): VERSION = "2.0" diff --git a/cinder/volume/drivers/toyou/acs5000/acs5000_common.py b/cinder/volume/drivers/toyou/acs5000/acs5000_common.py index f69bf1c0f20..d67bbe588cc 100644 --- a/cinder/volume/drivers/toyou/acs5000/acs5000_common.py +++ b/cinder/volume/drivers/toyou/acs5000/acs5000_common.py @@ -355,7 +355,7 @@ class Acs5000CommonDriver(san.SanDriver, 'code_level': None, 'version': None} - @cinder_utils.trace_method + @volume_utils.trace_method def do_setup(self, ctxt): """Check that we have all configuration details from the storage.""" self._validate_pools_exist() @@ -383,7 +383,7 @@ class Acs5000CommonDriver(san.SanDriver, raise exception.InvalidInput(reason=msg) return True - @cinder_utils.trace_method + @volume_utils.trace_method def check_for_setup_error(self): """Ensure that the params are set properly.""" if self._state['system_name'] is None: diff --git a/cinder/volume/volume_utils.py b/cinder/volume/volume_utils.py index 08870aa26ed..23773f9dba5 100644 --- a/cinder/volume/volume_utils.py +++ b/cinder/volume/volume_utils.py @@ -14,9 +14,12 @@ """Volume-related Utilities and helpers.""" +import abc import ast import functools +import inspect import json +import logging as py_logging import math import operator import os @@ -26,6 +29,7 @@ import re import socket import tempfile import time +import types import uuid from castellan.common.credentials import keystone_password @@ -76,6 +80,9 @@ IMAGE_ATTRIBUTES = ( 'min_ram', 'size', ) +VALID_TRACE_FLAGS = {'method', 'api'} +TRACE_API = False +TRACE_METHOD = False def null_safe_str(s): @@ -1363,3 +1370,163 @@ def brick_detach_volume_encryptor(attach_info, encryption): encryptor = brick_get_encryptor(connection_info, **encryption) encryptor.detach_volume(**encryption) + + +# NOTE: the trace methods are included in volume_utils because +# they are currently only called by code in the volume area +# of Cinder. These can be moved to a different file if they +# are needed elsewhere. +def trace(*dec_args, **dec_kwargs): + """Trace calls to the decorated function. + + This decorator should always be defined as the outermost decorator so it + is defined last. This is important so it does not interfere + with other decorators. + + Using this decorator on a function will cause its execution to be logged at + `DEBUG` level with arguments, return values, and exceptions. + + :returns: a function decorator + """ + + def _decorator(f): + + func_name = f.__name__ + + @functools.wraps(f) + def trace_logging_wrapper(*args, **kwargs): + filter_function = dec_kwargs.get('filter_function') + + if len(args) > 0: + maybe_self = args[0] + else: + maybe_self = kwargs.get('self', None) + + if maybe_self and hasattr(maybe_self, '__module__'): + logger = logging.getLogger(maybe_self.__module__) + else: + logger = LOG + + # NOTE(ameade): Don't bother going any further if DEBUG log level + # is not enabled for the logger. + if not logger.isEnabledFor(py_logging.DEBUG): + return f(*args, **kwargs) + + all_args = inspect.getcallargs(f, *args, **kwargs) + + pass_filter = filter_function is None or filter_function(all_args) + + if pass_filter: + logger.debug('==> %(func)s: call %(all_args)r', + {'func': func_name, + 'all_args': strutils.mask_password( + str(all_args))}) + + start_time = time.time() * 1000 + try: + result = f(*args, **kwargs) + except Exception as exc: + total_time = int(round(time.time() * 1000)) - start_time + logger.debug('<== %(func)s: exception (%(time)dms) %(exc)r', + {'func': func_name, + 'time': total_time, + 'exc': exc}) + raise + total_time = int(round(time.time() * 1000)) - start_time + + if isinstance(result, dict): + mask_result = strutils.mask_dict_password(result) + elif isinstance(result, str): + mask_result = strutils.mask_password(result) + else: + mask_result = result + + if pass_filter: + logger.debug('<== %(func)s: return (%(time)dms) %(result)r', + {'func': func_name, + 'time': total_time, + 'result': mask_result}) + return result + return trace_logging_wrapper + + if len(dec_args) == 0: + # filter_function is passed and args does not contain f + return _decorator + else: + # filter_function is not passed + return _decorator(dec_args[0]) + + +def trace_api(*dec_args, **dec_kwargs): + """Decorates a function if TRACE_API is true.""" + + def _decorator(f): + @functools.wraps(f) + def trace_api_logging_wrapper(*args, **kwargs): + if TRACE_API: + return trace(f, *dec_args, **dec_kwargs)(*args, **kwargs) + return f(*args, **kwargs) + return trace_api_logging_wrapper + + if len(dec_args) == 0: + # filter_function is passed and args does not contain f + return _decorator + else: + # filter_function is not passed + return _decorator(dec_args[0]) + + +def trace_method(f): + """Decorates a function if TRACE_METHOD is true.""" + @functools.wraps(f) + def trace_method_logging_wrapper(*args, **kwargs): + if TRACE_METHOD: + return trace(f)(*args, **kwargs) + return f(*args, **kwargs) + return trace_method_logging_wrapper + + +class TraceWrapperMetaclass(type): + """Metaclass that wraps all methods of a class with trace_method. + + This metaclass will cause every function inside of the class to be + decorated with the trace_method decorator. + + To use the metaclass you define a class like so: + class MyClass(object, metaclass=utils.TraceWrapperMetaclass): + """ + def __new__(meta, classname, bases, classDict): + newClassDict = {} + for attributeName, attribute in classDict.items(): + if isinstance(attribute, types.FunctionType): + # replace it with a wrapped version + attribute = functools.update_wrapper(trace_method(attribute), + attribute) + newClassDict[attributeName] = attribute + + return type.__new__(meta, classname, bases, newClassDict) + + +class TraceWrapperWithABCMetaclass(abc.ABCMeta, TraceWrapperMetaclass): + """Metaclass that wraps all methods of a class with trace.""" + pass + + +def setup_tracing(trace_flags): + """Set global variables for each trace flag. + + Sets variables TRACE_METHOD and TRACE_API, which represent + whether to log methods or api traces. + + :param trace_flags: a list of strings + """ + global TRACE_METHOD + global TRACE_API + try: + trace_flags = [flag.strip() for flag in trace_flags] + except TypeError: # Handle when trace_flags is None or a test mock + trace_flags = [] + for invalid_flag in (set(trace_flags) - VALID_TRACE_FLAGS): + LOG.warning('Invalid trace flag: %s', invalid_flag) + TRACE_METHOD = 'method' in trace_flags + TRACE_API = 'api' in trace_flags