From 14ad28105791e4506e3bb20bf726e2bd3d70ea7b Mon Sep 17 00:00:00 2001 From: xiaodongwang Date: Thu, 25 Jun 2015 13:56:20 -0700 Subject: [PATCH] move adapter from db to memory Change-Id: I366052e23d72dd94229513d6a0992338d0d44638 --- bin/clean_installers.py | 2 +- bin/client.py | 113 +- compass/actions/delete.py | 28 +- compass/actions/deploy.py | 7 +- compass/actions/health_check/base.py | 31 +- compass/actions/install_callback.py | 12 +- compass/actions/poll_switch.py | 15 + compass/actions/update_progress.py | 27 +- compass/actions/util.py | 21 + compass/api/api.py | 949 ++++++-- compass/api/api.raml | 80 +- compass/apiclient/example.py | 10 +- compass/apiclient/restful.py | 17 +- compass/db/api/adapter.py | 369 +-- compass/db/api/adapter_holder.py | 95 +- compass/db/api/cluster.py | 1976 +++++++++-------- compass/db/api/database.py | 212 +- compass/db/api/health_check_report.py | 112 +- compass/db/api/host.py | 611 ++--- compass/db/api/installer.py | 53 - compass/db/api/machine.py | 88 +- compass/db/api/metadata.py | 651 +++--- compass/db/api/metadata_holder.py | 672 ++++-- compass/db/api/network.py | 58 +- compass/db/api/permission.py | 32 +- compass/db/api/switch.py | 870 +++++--- compass/db/api/user.py | 253 ++- compass/db/api/user_log.py | 20 +- compass/db/api/utils.py | 655 +++++- compass/db/exception.py | 1 + compass/db/models.py | 1520 +++---------- .../os_installers/cobbler/cobbler.py | 2 +- compass/deployment/utils/constants.py | 1 - compass/log_analyzor/adapter_matcher.py | 12 +- compass/log_analyzor/environment.py | 29 + compass/log_analyzor/line_matcher.py | 2 +- compass/log_analyzor/progress_calculator.py | 170 +- compass/tasks/tasks.py | 3 + compass/tests/actions/deploy/test_deploy.py | 14 +- .../update_progress/test_update_progress.py | 34 +- compass/tests/api/data/adapter/ceph.conf | 1 - compass/tests/api/data/adapter/openstack.conf | 1 - .../tests/api/data/flavor/openstack_chef.conf | 16 - .../api/data/flavor_mapping/allinone.conf | 4 +- .../api/data/flavor_metadata/allinone.conf | 1 + .../tests/api/data/os_mapping/os_mapping.conf | 3 +- compass/tests/api/test_api.py | 60 +- compass/tests/api/test_health_check_api.py | 19 +- compass/tests/db/api/base.py | 10 +- compass/tests/db/api/data/adapter/ceph.conf | 1 - .../tests/db/api/data/adapter/openstack.conf | 1 - .../data/flavor_mapping/ha-multinodes.conf | 4 +- .../data/flavor_metadata/HA-multinodes.conf | 1 + .../db/api/data/flavor_metadata/allinone.conf | 1 + .../single-contoller-multi-compute.conf | 1 + .../db/api/data/os_mapping/os_mapping.conf | 3 +- compass/tests/db/api/test_adapter_holder.py | 166 +- compass/tests/db/api/test_cluster.py | 16 +- compass/tests/db/api/test_host.py | 10 +- compass/tests/db/api/test_metadata_holder.py | 62 +- compass/tests/db/api/test_switch.py | 12 +- compass/tests/db/api/test_user.py | 7 +- compass/tests/db/api/test_user_log.py | 9 +- compass/tests/db/api/test_utils.py | 75 +- .../os_installers/cobbler/test_cobbler.py | 14 +- .../pk_installers/chef_installer/test_chef.py | 13 +- .../installers/test_config_manager.py | 15 +- .../deployment/installers/test_installer.py | 15 +- .../tests/deployment/test_data/config_data.py | 1 - .../tests/deployment/test_deploy_manager.py | 8 + .../log_analyzor/test_adapter_matcher.py | 12 +- compass/utils/logsetting.py | 8 + compass/utils/setting_wrapper.py | 140 +- compass/utils/util.py | 70 +- conf/adapter/ceph.conf | 1 - conf/adapter/chef_ceph.conf | 2 +- conf/adapter/chef_ceph_openstack.conf | 1 - conf/adapter/openstack.conf | 1 - conf/flavor_mapping/allinone.conf | 4 +- conf/flavor_mapping/ceph_firefly.conf | 4 +- .../ceph_openstack_multinodes.conf | 4 +- .../ceph_openstack_single_controller.conf | 4 +- conf/flavor_mapping/ha-multinodes.conf | 4 +- conf/flavor_mapping/multinodes.conf | 4 +- .../single-contoller-multi-compute.conf | 4 +- conf/flavor_metadata/HA-multinodes.conf | 1 + conf/flavor_metadata/allinone.conf | 1 + conf/flavor_metadata/ceph_firefly.conf | 1 + .../ceph_openstack_multinodes.conf | 1 + .../ceph_openstack_single_controller.conf | 1 + conf/flavor_metadata/multinodes.conf | 1 + .../single-contoller-multi-compute.conf | 1 + conf/os_mapping/os_mapping.conf | 3 +- .../progress_calculator.conf | 34 +- conf/role/openstack_chef.conf | 6 +- install/ansible.sh | 2 +- install/chef.sh | 8 +- regtest/ansible/ci/conf/allinone.conf | 1 - regtest/ansible/ci/conf/four-ansible.conf | 1 - regtest/ansible/ci/conf/two.conf | 1 - regtest/ansible/ci/deploy-vm.sh | 1 - regtest/ansible/ci/test.sh | 1 - regtest/ansible/install/regtest.yml | 1 - .../regtest-compass/templates/client.sh.j2 | 1 - regtest/regtest.conf | 3 +- regtest/regtest.sh | 2 +- regtest/regtest6.conf | 1 - requirements.txt | 1 - test-requirements.txt | 4 +- 109 files changed, 5972 insertions(+), 4746 deletions(-) delete mode 100644 compass/db/api/installer.py create mode 100644 compass/log_analyzor/environment.py diff --git a/bin/clean_installers.py b/bin/clean_installers.py index 6b1ae3ed..ae6dab2f 100755 --- a/bin/clean_installers.py +++ b/bin/clean_installers.py @@ -75,7 +75,7 @@ def clean_installers(): filtered_os_installers[os_installer_name] = os_installer else: logging.info( - 'ignore os isntaller %s', os_installer_name + 'ignore os installer %s', os_installer_name ) else: logging.info( diff --git a/bin/client.py b/bin/client.py index fa936e17..350a85b8 100755 --- a/bin/client.py +++ b/bin/client.py @@ -75,9 +75,6 @@ flags.add('adapter_name', flags.add('adapter_os_pattern', help='adapter os name', default=r'^(?i)centos.*') -flags.add('adapter_target_system_pattern', - help='adapter target system name', - default='^openstack$') flags.add('adapter_flavor_pattern', help='adapter flavor name', default='allinone') @@ -342,120 +339,58 @@ def _poll_switches(client): def _get_adapter(client): """get adapter.""" - status, resp = client.list_adapters() + adapter_name = flags.OPTIONS.adapter_name + status, resp = client.list_adapters( + name=adapter_name + ) logging.info( - 'get all adapters status: %s, resp: %s', - status, resp + 'get all adapters for name %s status: %s, resp: %s', + adapter_name, status, resp ) if status >= 400: msg = 'failed to get adapters' raise Exception(msg) - adapter_name = flags.OPTIONS.adapter_name + if not resp: + msg = 'no adapter found' + raise Exception(msg) + + adapter = resp[0] os_pattern = flags.OPTIONS.adapter_os_pattern if os_pattern: os_re = re.compile(os_pattern) else: os_re = None - target_system_pattern = flags.OPTIONS.adapter_target_system_pattern - if target_system_pattern: - target_system_re = re.compile(target_system_pattern) - else: - target_system_re = None flavor_pattern = flags.OPTIONS.adapter_flavor_pattern if flavor_pattern: flavor_re = re.compile(flavor_pattern) else: flavor_re = None - adapter_id = None + + adapter_id = adapter['id'] os_id = None - distributed_system_id = None flavor_id = None - adapter = None - for item in resp: - adapter_id = None - os_id = None - flavor_id = None - adapter = item - for supported_os in adapter['supported_oses']: - if not os_re or os_re.match(supported_os['name']): - os_id = supported_os['os_id'] - break - - if not os_id: - logging.info('no os found for adapter %s', adapter) - continue - - if 'flavors' in adapter: - for flavor in adapter['flavors']: - if not flavor_re or flavor_re.match(flavor['name']): - flavor_id = flavor['id'] - break - - if adapter_name: - if adapter['name'] == adapter_name: - adapter_id = adapter['id'] - logging.info('adapter name %s matches: %s', - adapter_name, adapter) - else: - logging.info('adapter name %s does not match %s', - adapter_name, adapter) - elif ( - 'distributed_system_name' in item and - adapter['distributed_system_name'] - ): - if ( - target_system_re and - target_system_re.match(adapter['distributed_system_name']) - ): - adapter_id = adapter['id'] - logging.info( - 'distributed system name pattern %s matches: %s', - target_system_pattern, adapter - ) - else: - logging.info( - 'distributed system name pattern %s does not match: %s', - target_system_pattern, adapter - ) - else: - if not target_system_re: - adapter_id = adapter['id'] - logging.info( - 'os only adapter matches no target_system_pattern' - ) - else: - logging.info( - 'distributed system name pattern defined ' - 'but the adapter does not have ' - 'distributed_system_name attributes' - ) - - if adapter_id and target_system_re: - distributed_system_id = adapter['distributed_system_id'] - - if adapter_id: - logging.info('adadpter matches: %s', adapter) + for supported_os in adapter['supported_oses']: + if not os_re or os_re.match(supported_os['name']): + os_id = supported_os['os_id'] break - if not adapter_id: - msg = 'no adapter found' - raise Exception(msg) + if 'flavors' in adapter: + for flavor in adapter['flavors']: + if not flavor_re or flavor_re.match(flavor['name']): + flavor_id = flavor['id'] + break if not os_id: msg = 'no os found for %s' % os_pattern raise Exception(msg) - if target_system_re and not distributed_system_id: - msg = 'no distributed system found for %s' % target_system_pattern - raise Exception(msg) - if flavor_re and not flavor_id: msg = 'no flavor found for %s' % flavor_pattern raise Exception(msg) logging.info('adpater for deploying a cluster: %s', adapter_id) - return (adapter_id, os_id, distributed_system_id, flavor_id) + return (adapter_id, os_id, flavor_id) def _add_subnets(client): @@ -1059,14 +994,14 @@ def main(): machines = _get_machines(client) logging.info('machines are %s', machines) subnet_mapping = _add_subnets(client) - adapter_id, os_id, distributed_system_id, flavor_id = _get_adapter(client) + adapter_id, os_id, flavor_id = _get_adapter(client) cluster_id, host_mapping, role_mapping = _add_cluster( client, adapter_id, os_id, flavor_id, machines) host_ips = _set_host_networking( client, host_mapping, subnet_mapping ) _set_cluster_os_config(client, cluster_id, host_ips) - if distributed_system_id: + if flavor_id: _set_cluster_package_config(client, cluster_id) if role_mapping: _set_hosts_roles(client, cluster_id, host_mapping, role_mapping) diff --git a/compass/actions/delete.py b/compass/actions/delete.py index 010e9ebd..d89994d0 100644 --- a/compass/actions/delete.py +++ b/compass/actions/delete.py @@ -28,10 +28,15 @@ def delete_cluster( cluster_id, host_id_list, username=None, delete_underlying_host=False ): - """Delete cluster. + """Delete cluster and all clusterhosts on it. :param cluster_id: id of the cluster. :type cluster_id: int + :param host_id_list: list of host id. + :type host_id_list: list of int. + + If delete_underlying_host is set, all underlying hosts will + be deleted. .. note:: The function should be called out of database session. @@ -66,6 +71,19 @@ def delete_cluster_host( cluster_id, host_id, username=None, delete_underlying_host=False ): + """Delete clusterhost. + + :param cluster_id: id of the cluster. + :type cluster_id: int + :param host_id: id of the host. + :type host_id: int + + If delete_underlying_host is set, the underlying host + will be deleted too. + + .. note:: + The function should be called out of database session. + """ with util.lock('serialized_action', timeout=100) as lock: if not lock: raise Exception('failed to acquire lock to delete clusterhost') @@ -94,6 +112,14 @@ def delete_cluster_host( def delete_host( host_id, cluster_id_list, username=None ): + """Delete host and all clusterhosts on it. + + :param host_id: id of the host. + :type host_id: int + + .. note:: + The function should be called out of database session. + """ with util.lock('serialized_action', timeout=100) as lock: if not lock: raise Exception('failed to acquire lock to delete host') diff --git a/compass/actions/deploy.py b/compass/actions/deploy.py index 711bb5ef..e5fbe9d6 100644 --- a/compass/actions/deploy.py +++ b/compass/actions/deploy.py @@ -128,16 +128,17 @@ def health_check(cluster_id, report_uri, username): except Exception as exc: logging.error("health_check exception: ============= %s" % exc) data = {'state': 'error', 'error_message': str(exc), 'report': {}} - reports = health_check_db.list_health_reports(user, cluster_id) + reports = health_check_db.list_health_reports( + cluster_id, user=user) if not reports: # Exception before executing command remotely for health check. # No reports names sending back yet. Create a report name = 'pre_remote_health_check' health_check_db.add_report_record( - cluster_id, name=name, **data + cluster_id, name, user=user, **data ) - health_check_db.update_multi_reports(cluster_id, **data) + health_check_db.update_multi_reports(cluster_id, user=user, **data) class ServerPowerMgmt(object): diff --git a/compass/actions/health_check/base.py b/compass/actions/health_check/base.py index 141369c0..22b6fae4 100644 --- a/compass/actions/health_check/base.py +++ b/compass/actions/health_check/base.py @@ -14,9 +14,7 @@ """Base class for Compass Health Check.""" from compass.actions.health_check import utils as health_check_utils -from compass.db.api import database -from compass.db.api import utils -from compass.db import models +from compass.db.api import adapter as adapter_api from compass.utils import setting_wrapper as setting @@ -28,30 +26,25 @@ class BaseCheck(object): self.code = 1 self.messages = [] self.dist, self.version, self.release = health_check_utils.get_dist() + adapter_api.load_adapters_internal() self.os_installer = self._get_os_installer() self.package_installer = self._get_package_installer() def _get_os_installer(self): - with database.session() as session: - installer = utils.get_db_object( - session, models.OSInstaller - ) - os_installer = {} - os_installer['name'] = health_check_utils.strip_name( - installer.name) - os_installer.update(installer.settings) + installer = adapter_api.OS_INSTALLERS.values()[0] + os_installer = {} + os_installer['name'] = health_check_utils.strip_name( + installer['name']) + os_installer.update(installer['settings']) return os_installer def _get_package_installer(self): package_installer = {} - with database.session() as session: - installer = session.query( - models.PackageInstaller - ).first() - package_installer = {} - package_installer['name'] = health_check_utils.strip_name( - installer.name) - package_installer.update(installer.settings) + installer = adapter_api.PACKAGE_INSTALLERS.values()[0] + package_installer = {} + package_installer['name'] = health_check_utils.strip_name( + installer['name']) + package_installer.update(installer['settings']) return package_installer def _set_status(self, code, message): diff --git a/compass/actions/install_callback.py b/compass/actions/install_callback.py index ba4b4dfb..dcf278ac 100644 --- a/compass/actions/install_callback.py +++ b/compass/actions/install_callback.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Module to reinstall a given cluster +"""Module to receive installation callback. .. moduleauthor:: Xiaodong Wang """ @@ -34,6 +34,8 @@ def os_installed( :param host_id: host that os is installed. :type host_id: integer + :param clusterhosts_ready: the clusterhosts that should trigger ready. + :param clusters_os_ready: the cluster that should trigger os ready. .. note:: The function should be called out of database session. @@ -110,6 +112,11 @@ def package_installed( ): """Callback when package is installed. + :param cluster_id: cluster id. + :param host_id: host id. + :param cluster_ready: if the cluster should trigger ready. + :param host_ready: if the host should trigger ready. + .. note:: The function should be called out of database session. """ @@ -153,6 +160,9 @@ def cluster_installed( ): """Callback when cluster is installed. + :param cluster_id: cluster id + :param clusterhosts_ready: clusterhosts that should trigger ready. + .. note:: The function should be called out of database session. """ diff --git a/compass/actions/poll_switch.py b/compass/actions/poll_switch.py index 0e7f2850..d759a8cc 100644 --- a/compass/actions/poll_switch.py +++ b/compass/actions/poll_switch.py @@ -24,6 +24,15 @@ from compass.hdsdiscovery.hdmanager import HDManager def _poll_switch(ip_addr, credentials, req_obj='mac', oper="SCAN"): + """Poll switch by ip addr. + + + Args: + ip_addr: ip addr of the switch. + credentials: credentials of the switch. + + Returns: switch attributes dict and list of machine attributes dict. + """ under_monitoring = 'under_monitoring' unreachable = 'unreachable' polling_error = 'error' @@ -124,6 +133,12 @@ def poll_switch(poller_email, ip_addr, credentials, 'failed to acquire lock to poll switch %s' % ip_addr ) + # TODO(grace): before repoll the switch, set the state to repolling. + # and when the poll switch is timeout, set the state to error. + # the frontend should only consider some main state like INTIALIZED, + # ERROR and SUCCESSFUL, REPOLLING is as an intermediate state to + # indicate the switch is in learning the mac of the machines connected + # to it. logging.debug('poll switch: %s', ip_addr) switch_dict, machine_dicts = _poll_switch( ip_addr, credentials, req_obj=req_obj, oper=oper diff --git a/compass/actions/update_progress.py b/compass/actions/update_progress.py index 5581f102..67a9963f 100644 --- a/compass/actions/update_progress.py +++ b/compass/actions/update_progress.py @@ -147,19 +147,6 @@ def update_progress(): ) continue clusterhost_id = clusterhost['clusterhost_id'] - if 'distributed_system_name' not in clusterhost: - logging.error( - 'distributed_system_name is not in clusterhost %s', - clusterhost - ) - continue - clusterhost_dirname = setting.CLUSTERHOST_INATALLATION_LOGDIR_NAME - if clusterhost_dirname not in clusterhost: - logging.error( - '%s is not in clusterhost %s', - clusterhost_dirname, clusterhost - ) - continue if 'cluster_id' not in clusterhost: logging.error( 'cluster_id not in clusterhost %s', @@ -176,6 +163,19 @@ def update_progress(): ) continue cluster, _ = cluster_mapping[cluster_id] + if 'flavor_name' not in cluster: + logging.error( + 'flavor_name is not in clusterhost %s related cluster', + clusterhost + ) + continue + clusterhost_dirname = setting.CLUSTERHOST_INATALLATION_LOGDIR_NAME + if clusterhost_dirname not in clusterhost: + logging.error( + '%s is not in clusterhost %s', + clusterhost_dirname, clusterhost + ) + continue adapter_id = cluster['adapter_id'] if adapter_id not in adapter_mapping: logging.info( @@ -196,6 +196,7 @@ def update_progress(): continue package_installer = adapter['package_installer'] clusterhost['package_installer'] = package_installer + clusterhost['adapter_name'] = adapter['name'] clusterhost_state = cluster_api.get_clusterhost_self_state( clusterhost_id, user=user ) diff --git a/compass/actions/util.py b/compass/actions/util.py index 34f403a8..4665c297 100644 --- a/compass/actions/util.py +++ b/compass/actions/util.py @@ -30,6 +30,14 @@ from compass.deployment.utils import constants as const @contextmanager def lock(lock_name, blocking=True, timeout=10): + """acquire a lock to do some actions. + + The lock is acquired by lock_name among the whole distributed + systems. + """ + # TODO(xicheng): in future we should explicitly told which redis + # server we want to talk to make the lock works on distributed + # systems. redis_instance = redis.Redis() instance_lock = redis_instance.lock(lock_name, timeout=timeout) owned = False @@ -220,6 +228,7 @@ class ActionHelper(object): @staticmethod def save_deployed_config(deployed_config, user): + """Save deployed config.""" cluster_config = deployed_config[const.CLUSTER] cluster_id = cluster_config[const.ID] del cluster_config[const.ID] @@ -259,6 +268,11 @@ class ActionHelper(object): def delete_cluster( cluster_id, host_id_list, user, delete_underlying_host=False ): + """Delete cluster. + + If delete_underlying_host is set, underlying hosts will also + be deleted. + """ if delete_underlying_host: for host_id in host_id_list: host_db.del_host( @@ -272,6 +286,10 @@ class ActionHelper(object): def delete_cluster_host( cluster_id, host_id, user, delete_underlying_host=False ): + """Delete clusterhost. + + If delete_underlying_host set, also delete underlying host. + """ if delete_underlying_host: host_db.del_host( host_id, True, True, user=user @@ -288,6 +306,7 @@ class ActionHelper(object): @staticmethod def host_ready(host_id, from_database_only, user): + """Trigger host ready.""" host_db.update_host_state_internal( host_id, from_database_only=from_database_only, user=user, ready=True @@ -297,6 +316,7 @@ class ActionHelper(object): def cluster_host_ready( cluster_id, host_id, from_database_only, user ): + """Trigger clusterhost ready.""" cluster_db.update_cluster_host_state_internal( cluster_id, host_id, from_database_only=from_database_only, user=user, ready=True @@ -304,6 +324,7 @@ class ActionHelper(object): @staticmethod def cluster_ready(cluster_id, from_database_only, user): + """Trigger cluster ready.""" cluster_db.update_cluster_state_internal( cluster_id, from_database_only=from_database_only, user=user, ready=True diff --git a/compass/api/api.py b/compass/api/api.py index 163ef90a..84d5340f 100644 --- a/compass/api/api.py +++ b/compass/api/api.py @@ -51,14 +51,18 @@ from compass.utils import util def log_user_action(func): + """decorator used to log api request url.""" @functools.wraps(func) def decorated_api(*args, **kwargs): + # TODO(xicheng): save request args for GET + # and request data for POST/PUT. user_log_api.log_user_action(current_user.id, request.path) return func(*args, **kwargs) return decorated_api def update_user_token(func): + """decorator used to update user token expire time after api request.""" @functools.wraps(func) def decorated_api(*args, **kwargs): response = func(*args, **kwargs) @@ -73,12 +77,14 @@ def update_user_token(func): def _clean_data(data, keys): + """remove keys from dict.""" for key in keys: if key in data: del data[key] def _replace_data(data, key_mapping): + """replace key names in dict.""" for key, replaced_key in key_mapping.items(): if key in data: data[replaced_key] = data[key] @@ -86,6 +92,26 @@ def _replace_data(data, key_mapping): def _get_data(data, key): + """get key's value from request arg dict. + + When the value is list, return the element in the list + if the list size is one. If the list size is greater than one, + raise exception_handler.BadRequest. + + Example: data = {'a': ['b'], 'b': 5, 'c': ['d', 'e'], 'd': []} + _get_data(data, 'a') == 'b' + _get_data(data, 'b') == 5 + _get_data(data, 'c') raises exception_handler.BadRequest + _get_data(data, 'd') == None + _get_data(data, 'e') == None + + Usage: Used to parse the key-value pair in request.args to expected types. + Depends on the different flask plugins and what kind of parameters + passed in, the request.args format may be as below: + {'a': 'b'} or {'a': ['b']}. _get_data forces translate the + request.args to the format {'a': 'b'}. It raises exception when some + parameter declares multiple times. + """ if key in data: if isinstance(data[key], list): if data[key]: @@ -106,6 +132,24 @@ def _get_data(data, key): def _get_data_list(data, key): + """get key's value as list from request arg dict. + + If the value type is list, return it, otherwise return the list + whos only element is the value got from the dict. + + Example: data = {'a': ['b'], 'b': 5, 'c': ['d', 'e'], 'd': []} + _get_data_list(data, 'a') == ['b'] + _get_data_list(data, 'b') == [5] + _get_data_list(data, 'd') == [] + _get_data_list(data, 'e') == [] + + Usage: Used to parse the key-value pair in request.args to expected types. + Depends on the different flask plugins and what kind of parameters + passed in, the request.args format may be as below: + {'a': 'b'} or {'a': ['b']}. _get_data_list forces translate the + request.args to the format {'a': ['b']}. It accepts the case that + some parameter declares multiple times. + """ if key in data: if isinstance(data[key], list): return data[key] @@ -116,38 +160,95 @@ def _get_data_list(data, key): def _get_request_data(): + """Convert reqeust data from string to python dict. + + If the request data is not json formatted, raises + exception_handler.BadRequest. + If the request data is not json formatted dict, raises + exception_handler.BadRequest + If the request data is empty, return default as empty dict. + + Usage: It is used to add or update a single resource. + """ if request.data: try: - return json.loads(request.data) + data = json.loads(request.data) except Exception: raise exception_handler.BadRequest( 'request data is not json formatted: %s' % request.data ) + if not isinstance(data, dict): + raise exception_handler.BadRequest( + 'request data is not json formatted dict: %s' % request.data + ) + return data else: return {} def _get_request_data_as_list(): + """Convert reqeust data from string to python list. + + If the request data is not json formatted, raises + exception_handler.BadRequest. + If the request data is not json formatted list, raises + exception_handler.BadRequest. + If the request data is empty, return default as empty list. + + Usage: It is used to batch add or update a list of resources. + """ if request.data: try: - return json.loads(request.data) + data = json.loads(request.data) except Exception: raise exception_handler.BadRequest( 'request data is not json formatted: %s' % request.data ) + if not isinstance(data, list): + raise exception_handler.BadRequest( + 'request data is not json formatted list: %s' % request.data + ) + return data else: return [] def _bool_converter(value): + """Convert string value to bool. + + This function is used to convert value in requeset args to expected type. + If the key exists in request args but the value is not set, it means the + value should be true. + + Examples: + /?is_admin parsed to {'is_admin', None} and it should + be converted to {'is_admin': True}. + /?is_admin=0 parsed and converted to {'is_admin': False}. + /?is_admin=1 parsed and converted to {'is_admin': True}. + """ if not value: return True if value in ['False', 'false', '0']: return False - return True + if value in ['True', 'true', '1']: + return True + raise exception_handler.BadRequest( + '%r type is not bool' % value + ) def _int_converter(value): + """Convert string value to int. + + We do not use the int converter default exception since we want to make + sure the exact http response code. + + Raises: exception_handler.BadRequest if value can not be parsed to int. + + Examples: + /?count=10 parsed to {'count': '10'} and it should be + converted to {'count': 10}. + """ try: return int(value) except Exception: @@ -157,8 +258,18 @@ def _int_converter(value): def _get_request_args(**kwargs): + """Get request args as dict. + + The value in the dict is converted to expected type. + + Args: + kwargs: for each key, the value is the type converter. + """ args = dict(request.args) - logging.debug('origin request args: %s', args) + logging.log( + logsetting.getLevelByName('fine'), + 'origin request args: %s', args + ) for key, value in args.items(): if key in kwargs: converter = kwargs[key] @@ -166,11 +277,40 @@ def _get_request_args(**kwargs): args[key] = [converter(item) for item in value] else: args[key] = converter(value) - logging.debug('request args: %s', args) + logging.log( + logsetting.getLevelByName('fine'), + 'request args: %s', args + ) return args def _group_data_action(data, **data_callbacks): + """Group api actions and pass data to grouped action callback. + + Example: + data = { + 'add_hosts': [{'name': 'a'}, {'name': 'b'}], + 'update_hosts': {'c': {'mac': '123'}}, + 'remove_hosts': ['d', 'e'] + } + data_callbacks = { + 'add_hosts': update_cluster_action, + 'update_hosts': update_cluster_action, + 'remove_hosts': update_cluster_action + } + it converts to update_cluster_action( + add_hosts=[{'name': 'a'}, {'name': 'b'}], + update_hosts={'c': {'mac': '123'}}, + remove_hosts=['d', 'e'] + ) + + Raises: + exception_handler.BadRequest if data is empty. + exception_handler.BadMethod if there are some keys in data but + not in data_callbacks. + exception_handler.BadRequest if it groups to multiple + callbacks. + """ if not data: raise exception_handler.BadRequest( 'no action to take' @@ -196,6 +336,7 @@ def _group_data_action(data, **data_callbacks): def _wrap_response(func, response_code): + """wrap function response to json formatted http response.""" def wrapped_func(*args, **kwargs): return utils.make_json_response( response_code, @@ -205,6 +346,20 @@ def _wrap_response(func, response_code): def _reformat_host_networks(networks): + """Reformat networks from list to dict. + + The key in the dict is the value of the key 'interface' + in each network. + + Example: networks = [{'interface': 'eth0', 'ip': '10.1.1.1'}] + is reformatted to { + 'eth0': {'interface': 'eth0', 'ip': '10.1.1.1'} + } + + Usage: The networks got from db api is a list of network, + For better parsing in json frontend, we converted the + format into dict to easy reference. + """ network_mapping = {} for network in networks: if 'interface' in network: @@ -213,6 +368,7 @@ def _reformat_host_networks(networks): def _reformat_host(host): + """Reformat host's networks.""" if isinstance(host, list): return [_reformat_host(item) for item in host] if 'networks' in host: @@ -221,7 +377,12 @@ def _reformat_host(host): def _login(use_cookie): - """User login helper function.""" + """User login helper function. + + The request data should contain at least 'email' and 'password'. + The cookie expiration duration is defined in flask app config. + If user is not authenticated, it raises Unauthorized exception. + """ data = _get_request_data() if 'email' not in data or 'password' not in data: raise exception_handler.BadRequest( @@ -244,7 +405,7 @@ def _login(use_cookie): @app.route('/users/token', methods=['POST']) def get_token(): - """Get token from email and password after user authentication.""" + """user login and return token.""" return _login(False) @@ -271,7 +432,10 @@ def logout(): @login_required @update_user_token def list_users(): - """list users.""" + """list users. + + Supported paramters: ['email', 'is_admin', 'active'] + """ data = _get_request_args( is_admin=_bool_converter, active=_bool_converter @@ -286,7 +450,11 @@ def list_users(): @login_required @update_user_token def add_user(): - """add user.""" + """add user. + + Must parameters: ['email', 'password'], + Optional paramters: ['is_admin', 'active'] + """ data = _get_request_data() user_dict = user_api.add_user(user=current_user, **data) return utils.make_json_response( @@ -299,7 +467,7 @@ def add_user(): @login_required @update_user_token def show_user(user_id): - """Get user.""" + """Get user by id.""" data = _get_request_args() return utils.make_json_response( 200, user_api.get_user(user_id, user=current_user, **data) @@ -311,7 +479,7 @@ def show_user(user_id): @login_required @update_user_token def show_current_user(): - """Get user.""" + """Get current user.""" data = _get_request_args() return utils.make_json_response( 200, user_api.get_current_user(user=current_user, **data) @@ -323,7 +491,13 @@ def show_current_user(): @login_required @update_user_token def update_user(user_id): - """Update user.""" + """Update user. + + Supported parameters by self: [ + 'email', 'firstname', 'lastname', 'password' + ] + Supported parameters by admin ['is_admin', 'active'] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -340,7 +514,10 @@ def update_user(user_id): @login_required @update_user_token def delete_user(user_id): - """Delete user.""" + """Delete user. + + Delete is only permitted by admin user. + """ data = _get_request_data() return utils.make_json_response( 200, @@ -367,7 +544,14 @@ def list_user_permissions(user_id): @login_required @update_user_token def take_user_action(user_id): - """Take user action.""" + """Take user action. + + Support actions: [ + 'add_permissions', 'remove_permissions', + 'set_permissions', 'enable_user', + 'disable_user' + ] + """ data = _get_request_data() update_permissions_func = _wrap_response( functools.partial( @@ -429,7 +613,11 @@ def show_user_permission(user_id, permission_id): @login_required @update_user_token def add_user_permission(user_id): - """Add permission to a specific user.""" + """Add permission to a specific user. + + add_user_permission is only permitted by admin user. + Must parameters: ['permission_id'] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -441,7 +629,7 @@ def add_user_permission(user_id): @app.route( - '/users//permissions/', + '/users//permissions/', methods=['DELETE'] ) @log_user_action @@ -464,7 +652,10 @@ def delete_user_permission(user_id, permission_id): @login_required @update_user_token def list_permissions(): - """List permissions.""" + """List permissions. + + Supported filters: ['id', 'name', 'alias', 'description'] + """ data = _get_request_args() return utils.make_json_response( 200, @@ -486,6 +677,22 @@ def show_permission(permission_id): def _filter_timestamp(data): + """parse timestamp related params to db api understandable params. + + Example: + {'timestamp_start': '2005-12-23 12:00:00'} to + {'timestamp': {'ge': timestamp('2005-12-23 12:00:00')}}, + {'timestamp_end': '2005-12-23 12:00:00'} to + {'timestamp': {'le': timestamp('2005-12-23 12:00:00')}}, + {'timestamp_range': '2005-12-23 12:00:00,2005-12-24 12:00:00'} to + {'timestamp': {'between': [ + timestamp('2005-12-23 12:00:00'), + timestamp('2005-12-24 12:00:00') + ] + }} + + The timestamp related params can be declared multi times. + """ timestamp_filter = {} start = _get_data(data, 'timestamp_start') if start is not None: @@ -520,7 +727,13 @@ def _filter_timestamp(data): @login_required @update_user_token def list_all_user_actions(): - """List all users actions.""" + """List all users actions. + + Supported filters: [ + 'timestamp_start', 'timestamp_end', 'timestamp_range', + 'user_email' + ] + """ data = _get_request_args() _filter_timestamp(data) return utils.make_json_response( @@ -536,7 +749,12 @@ def list_all_user_actions(): @login_required @update_user_token def list_user_actions(user_id): - """List user actions.""" + """List user actions for specific user. + + Supported filters: [ + 'timestamp_start', 'timestamp_end', 'timestamp_range', + ] + """ data = _get_request_args() _filter_timestamp(data) return utils.make_json_response( @@ -567,7 +785,7 @@ def delete_all_user_actions(): @login_required @update_user_token def delete_user_actions(user_id): - """Delete user actions.""" + """Delete user actions for specific user.""" data = _get_request_data() return utils.make_json_response( 200, @@ -577,7 +795,20 @@ def delete_user_actions(user_id): ) -def _filter_ip(data): +def _filter_switch_ip(data): + """filter switch ip related params to db/api understandable format. + + Examples: + {'switchIp': '10.0.0.1'} to {'ip_int': {'eq': int of '10.0.0.1'}} + {'switchIpStart': '10.0.0.1'} to + {'ip_int': {'ge': int of '10.0.0.1'}} + {'switchIpEnd': '10.0.0.1'} to + {'ip_int': {'le': int of '10.0.0.1'}} + {'switchIpRange': '10.0.0.1,10.0.0.254'} to + {'ip_int': {'between': [int of '10.0.0.1', int of '10.0.0.254']}} + + the switch ip related params can be declared multi times. + """ ip_filter = {} switch_ips = _get_data_list(data, 'switchIp') if switch_ips: @@ -621,9 +852,15 @@ def _filter_ip(data): @login_required @update_user_token def list_switches(): - """List switches.""" + """List switches. + + Supported filters: [ + 'switchIp', 'switchIpStart', 'switchIpEnd', + 'switchIpEnd', 'vendor', 'state' + ] + """ data = _get_request_args() - _filter_ip(data) + _filter_switch_ip(data) return utils.make_json_response( 200, switch_api.list_switches( @@ -649,8 +886,16 @@ def show_switch(switch_id): @login_required @update_user_token def add_switch(): - """add switch.""" + """add switch. + + Must fields: ['ip'] + Optional fields: [ + 'credentials', 'vendor', 'state', + 'err_msg', 'filters' + ] + """ data = _get_request_data() + _replace_data(data, {'filters': 'machine_filters'}) return utils.make_json_response( 200, switch_api.add_switch(user=current_user, **data) @@ -662,8 +907,16 @@ def add_switch(): @login_required @update_user_token def add_switches(): - """add switches.""" - data = _get_request_data() + """batch add switches. + + request data is a list of dict. Each dict must contain ['ip'], + may contain [ + 'credentials', 'vendor', 'state', 'err_msg', 'filters' + ] + """ + data = _get_request_data_as_list() + for item_data in data: + _replace_data(item_data, {'filters': 'machine_filters'}) return utils.make_json_response( 200, switch_api.add_switches( @@ -677,8 +930,15 @@ def add_switches(): @login_required @update_user_token def update_switch(switch_id): - """update switch.""" + """update switch. + + Supported fields: [ + 'ip', 'credentials', 'vendor', 'state', + 'err_msg', 'filters' + ] + """ data = _get_request_data() + _replace_data(data, {'filters': 'machine_filters'}) return utils.make_json_response( 200, switch_api.update_switch(switch_id, user=current_user, **data) @@ -690,8 +950,14 @@ def update_switch(switch_id): @login_required @update_user_token def patch_switch(switch_id): - """patch switch.""" + """patch switch. + + Supported fields: [ + 'credentials', 'filters' + ] + """ data = _get_request_data() + _replace_data(data, {'filters': 'machine_filters'}) return utils.make_json_response( 200, switch_api.patch_switch(switch_id, user=current_user, **data) @@ -711,6 +977,7 @@ def delete_switch(switch_id): ) +@util.deprecated @app.route("/switch-filters", methods=['GET']) @log_user_action @login_required @@ -718,7 +985,7 @@ def delete_switch(switch_id): def list_switch_filters(): """List switch filters.""" data = _get_request_args() - _filter_ip(data) + _filter_switch_ip(data) return utils.make_json_response( 200, switch_api.list_switch_filters( @@ -727,6 +994,7 @@ def list_switch_filters(): ) +@util.deprecated @app.route("/switch-filters/", methods=['GET']) @log_user_action @login_required @@ -740,6 +1008,7 @@ def show_switch_filters(switch_id): ) +@util.deprecated @app.route("/switch-filters/", methods=['PUT']) @log_user_action @login_required @@ -747,12 +1016,14 @@ def show_switch_filters(switch_id): def update_switch_filters(switch_id): """update switch filters.""" data = _get_request_data() + _replace_data(data, {'filters': 'machine_filters'}) return utils.make_json_response( 200, switch_api.update_switch_filters(switch_id, user=current_user, **data) ) +@util.deprecated @app.route("/switch-filters/", methods=['PATCH']) @log_user_action @login_required @@ -760,13 +1031,31 @@ def update_switch_filters(switch_id): def patch_switch_filters(switch_id): """patch switch filters.""" data = _get_request_data() + _replace_data(data, {'filters': 'machine_filters'}) return utils.make_json_response( 200, switch_api.patch_switch_filter(switch_id, user=current_user, **data) ) -def _filter_port(data): +def _filter_switch_port(data): + """Generate switch machine filters by switch port related fields. + + Examples: + {'port': 'ae20'} to {'port': {'eq': 'ae20'}} + {'portStart': 20, 'portPrefix': 'ae', 'portSuffix': ''} to + {'port': {'startswith': 'ae', 'endswith': '', 'resp_ge': 20}} + {'portEnd': 20, 'portPrefix': 'ae', 'portSuffix': ''} to + {'port': {'startswith': 'ae', 'endswith': '', 'resp_le': 20}} + {'portRange': '20,40', 'portPrefix': 'ae', 'portSuffix': ''} to + {'port': { + 'startswith': 'ae', 'endswith': '', 'resp_range': [(20. 40)] + }} + + For each switch machines port, it extracts portNumber from + '' and filter the returned switch + machines by the filters. + """ port_filter = {} ports = _get_data_list(data, 'port') if ports: @@ -803,6 +1092,13 @@ def _filter_port(data): def _filter_general(data, key): + """Generate general filter for db/api returned list. + + Supported filter type: [ + 'resp_eq', 'resp_in', 'resp_le', 'resp_ge', + 'resp_gt', 'resp_lt', 'resp_match' + ] + """ general_filter = {} general = _get_data_list(data, key) if general: @@ -810,7 +1106,30 @@ def _filter_general(data, key): data[key] = general_filter -def _filter_tag(data): +def _filter_machine_tag(data): + """Generate filter for machine tag. + + Examples: + original returns: + [{'tag': { + 'city': 'beijing', + 'building': 'tsinghua main building', + 'room': '205', 'rack': 'a2b3', + 'stack': '20' + }},{'location': { + 'city': 'beijing', + 'building': 'tsinghua main building', + 'room': '205', 'rack': 'a2b2', + 'stack': '20' + }}] + filter: {'tag': 'room=205;rack=a2b3'} + filtered: [{'tag': { + 'city': 'beijing', + 'building': 'tsinghua main building', + 'room': '205', 'rack': 'a2b3', + 'stack': '20' + }}] + """ tag_filter = {} tags = _get_data_list(data, 'tag') if tags: @@ -822,7 +1141,30 @@ def _filter_tag(data): data['tag'] = tag_filter -def _filter_location(data): +def _filter_machine_location(data): + """Generate filter for machine location. + + Examples: + original returns: + [{'location': { + 'city': 'beijing', + 'building': 'tsinghua main building', + 'room': '205', 'rack': 'a2b3', + 'stack': '20' + }},{'location': { + 'city': 'beijing', + 'building': 'tsinghua main building', + 'room': '205', 'rack': 'a2b2', + 'stack': '20' + }}] + filter: {'location': 'room=205;rack=a2b3'} + filtered: [{'location': { + 'city': 'beijing', + 'building': 'tsinghua main building', + 'room': '205', 'rack': 'a2b3', + 'stack': '20' + }}] + """ location_filter = {} locations = _get_data_list(data, 'location') if locations: @@ -839,12 +1181,18 @@ def _filter_location(data): @login_required @update_user_token def list_switch_machines(switch_id): - """Get switch machines.""" + """Get switch machines. + + Supported filters: [ + 'port', 'portStart', 'portEnd', 'portRange', + 'portPrefix', 'portSuffix', 'vlans', 'tag', 'location' + ] + """ data = _get_request_args(vlans=_int_converter) - _filter_port(data) + _filter_switch_port(data) _filter_general(data, 'vlans') - _filter_tag(data) - _filter_location(data) + _filter_machine_tag(data) + _filter_machine_location(data) return utils.make_json_response( 200, switch_api.list_switch_machines( @@ -858,13 +1206,22 @@ def list_switch_machines(switch_id): @login_required @update_user_token def list_switch_machines_hosts(switch_id): - """Get switch machines or hosts.""" + """Get switch machines or hosts. + + Supported filters: [ + 'port', 'portStart', 'portEnd', 'portRange', + 'portPrefix', 'portSuffix', 'vlans', 'tag', 'location', + 'os_name', 'os_id' + ] + + """ data = _get_request_args(vlans=_int_converter, os_id=_int_converter) - _filter_port(data) + _filter_switch_port(data) _filter_general(data, 'vlans') - _filter_tag(data) - _filter_location(data) + _filter_machine_tag(data) + _filter_machine_location(data) _filter_general(data, 'os_name') + # TODO(xicheng): os_id filter should be removed later _filter_general(data, 'os_id') return utils.make_json_response( 200, @@ -879,7 +1236,11 @@ def list_switch_machines_hosts(switch_id): @login_required @update_user_token def add_switch_machine(switch_id): - """add switch machine.""" + """add switch machine. + + Must fields: ['mac', 'port'] + Optional fields: ['vlans', 'ipmi_credentials', 'tag', 'location'] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -892,7 +1253,12 @@ def add_switch_machine(switch_id): @login_required @update_user_token def add_switch_machines(): - """add switch machines.""" + """batch add switch machines. + + request data is list of dict which contains switch machine fields. + Each dict must contain ['switch_ip', 'mac', 'port'], + may contain ['vlans', 'ipmi_credentials', 'tag', 'location']. + """ data = _get_request_data_as_list() return utils.make_json_response( 200, switch_api.add_switch_machines( @@ -927,7 +1293,12 @@ def show_switch_machine(switch_id, machine_id): @login_required @update_user_token def update_switch_machine(switch_id, machine_id): - """update switch machine.""" + """update switch machine. + + Supported fields: [ + 'port', 'vlans', 'ipmi_credentials', 'tag', 'location' + ] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -945,7 +1316,12 @@ def update_switch_machine(switch_id, machine_id): @login_required @update_user_token def patch_switch_machine(switch_id, machine_id): - """patch switch machine.""" + """patch switch machine. + + Supported fields: [ + 'vlans', 'ipmi_credentials', 'tag', 'location' + ] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -978,11 +1354,17 @@ def delete_switch_machine(switch_id, machine_id): @login_required @update_user_token def take_switch_action(switch_id): - """update switch.""" + """take switch action. + + Supported actions: [ + 'find_machines', 'add_machines', 'remove_machines', + 'set_machines' + ] + """ data = _get_request_data() - poll_switch_machines_func = _wrap_response( + poll_switch_func = _wrap_response( functools.partial( - switch_api.poll_switch_machines, switch_id, user=current_user, + switch_api.poll_switch, switch_id, user=current_user, ), 202 ) @@ -994,7 +1376,7 @@ def take_switch_action(switch_id): ) return _group_data_action( data, - find_machines=poll_switch_machines_func, + find_machines=poll_switch_func, add_machines=update_switch_machines_func, remove_machines=update_switch_machines_func, set_machines=update_switch_machines_func @@ -1006,7 +1388,10 @@ def take_switch_action(switch_id): @login_required @update_user_token def take_machine_action(machine_id): - """update machine.""" + """take machine action. + + Supported actions: ['tag', 'poweron', 'poweroff', 'reset'] + """ data = _get_request_data() tag_func = _wrap_response( functools.partial( @@ -1046,13 +1431,21 @@ def take_machine_action(machine_id): @login_required @update_user_token def list_switchmachines(): - """List switch machines.""" + """List switch machines. + + Supported filters: [ + 'vlans', 'switchIp', 'SwitchIpStart', + 'SwitchIpEnd', 'SwitchIpRange', 'port', + 'portStart', 'portEnd', 'portRange', + 'location', 'tag', 'mac' + ] + """ data = _get_request_args(vlans=_int_converter) - _filter_ip(data) - _filter_port(data) + _filter_switch_ip(data) + _filter_switch_port(data) _filter_general(data, 'vlans') - _filter_tag(data) - _filter_location(data) + _filter_machine_tag(data) + _filter_machine_location(data) return utils.make_json_response( 200, switch_api.list_switchmachines( @@ -1066,15 +1459,23 @@ def list_switchmachines(): @login_required @update_user_token def list_switchmachines_hosts(): - """List switch machines or hosts.""" + """List switch machines or hosts. + + Supported filters: [ + 'vlans', 'switchIp', 'SwitchIpStart', + 'SwitchIpEnd', 'SwitchIpRange', 'port', + 'portStart', 'portEnd', 'portRange', + 'location', 'tag', 'mac', 'os_name' + ] + + """ data = _get_request_args(vlans=_int_converter, os_id=_int_converter) - _filter_ip(data) - _filter_port(data) + _filter_switch_ip(data) + _filter_switch_port(data) _filter_general(data, 'vlans') - _filter_tag(data) - _filter_location(data) + _filter_machine_tag(data) + _filter_machine_location(data) _filter_general(data, 'os_name') - _filter_general(data, 'os_id') return utils.make_json_response( 200, switch_api.list_switchmachines_hosts( @@ -1109,7 +1510,12 @@ def show_switchmachine(switch_machine_id): @login_required @update_user_token def update_switchmachine(switch_machine_id): - """update switch machine.""" + """update switch machine. + + Support fields: [ + ''port', 'vlans', 'ipmi_credentials', 'tag', 'location' + ] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -1124,7 +1530,12 @@ def update_switchmachine(switch_machine_id): @login_required @update_user_token def patch_switchmachine(switch_machine_id): - """patch switch machine.""" + """patch switch machine. + + Support fields: [ + 'vlans', 'ipmi_credentials', 'tag', 'location' + ] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -1154,10 +1565,15 @@ def delete_switchmachine(switch_machine_id): @login_required @update_user_token def list_machines(): - """List machines.""" + """List machines. + + Supported filters: [ + 'tag', 'location', 'mac' + ] + """ data = _get_request_args() - _filter_tag(data) - _filter_location(data) + _filter_machine_tag(data) + _filter_machine_location(data) return utils.make_json_response( 200, machine_api.list_machines( @@ -1186,7 +1602,12 @@ def show_machine(machine_id): @login_required @update_user_token def update_machine(machine_id): - """update machine.""" + """update machine. + + Supported fields: [ + 'tag', 'location', 'ipmi_credentials' + ] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -1201,7 +1622,12 @@ def update_machine(machine_id): @login_required @update_user_token def patch_machine(machine_id): - """patch machine.""" + """patch machine. + + Supported fields: [ + 'tag', 'location', 'ipmi_credentials' + ] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -1231,7 +1657,12 @@ def delete_machine(machine_id): @login_required @update_user_token def list_subnets(): - """List subnets.""" + """List subnets. + + Supported filters: [ + 'subnet', 'name' + ] + """ data = _get_request_args() return utils.make_json_response( 200, @@ -1261,7 +1692,11 @@ def show_subnet(subnet_id): @login_required @update_user_token def add_subnet(): - """add subnet.""" + """add subnet. + + Must fields: ['subnet'] + Optional fields: ['name'] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -1274,7 +1709,10 @@ def add_subnet(): @login_required @update_user_token def update_subnet(subnet_id): - """update subnet.""" + """update subnet. + + Support fields: ['subnet', 'name'] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -1304,12 +1742,14 @@ def delete_subnet(subnet_id): @login_required @update_user_token def list_adapters(): - """List adapters.""" + """List adapters. + + Supported filters: [ + 'name' + ] + """ data = _get_request_args() _filter_general(data, 'name') - _filter_general(data, 'distributed_system_name') - _filter_general(data, 'os_installer_name') - _filter_general(data, 'package_installer_name') return utils.make_json_response( 200, adapter_api.list_adapters( @@ -1318,7 +1758,7 @@ def list_adapters(): ) -@app.route("/adapters/", methods=['GET']) +@app.route("/adapters/", methods=['GET']) @log_user_action @login_required @update_user_token @@ -1333,7 +1773,7 @@ def show_adapter(adapter_id): ) -@app.route("/adapters//metadata", methods=['GET']) +@app.route("/adapters//metadata", methods=['GET']) @log_user_action @login_required @update_user_token @@ -1348,7 +1788,7 @@ def show_adapter_metadata(adapter_id): ) -@app.route("/oses//metadata", methods=['GET']) +@app.route("/oses//metadata", methods=['GET']) @log_user_action @login_required @update_user_token @@ -1363,27 +1803,22 @@ def show_os_metadata(os_id): ) -@app.route("/oses//ui_metadata", methods=['GET']) +@app.route("/oses//ui_metadata", methods=['GET']) @log_user_action @login_required @update_user_token def convert_os_metadata(os_id): """Convert os metadata to ui os metadata.""" - metadatas = metadata_api.get_os_metadata( - os_id, user=current_user - ) - configs = util.load_configs(setting.OS_MAPPING_DIR) - metadata = metadatas['os_config'] - config = configs[0]['OS_CONFIG_MAPPING'] + data = _get_request_args() return utils.make_json_response( 200, - metadata_api.get_ui_metadata( - metadata, config + metadata_api.get_os_ui_metadata( + os_id, user=current_user, **data ) ) -@app.route("/flavors//metadata", methods=['GET']) +@app.route("/flavors//metadata", methods=['GET']) @log_user_action @login_required @update_user_token @@ -1398,35 +1833,23 @@ def show_flavor_metadata(flavor_id): ) -@app.route("/flavors//ui_metadata", methods=['GET']) +@app.route("/flavors//ui_metadata", methods=['GET']) @log_user_action @login_required @update_user_token def convert_flavor_metadata(flavor_id): - """Convert flavor metadat to ui flavor metadata.""" - metadatas = metadata_api.get_flavor_metadata( - flavor_id, user=current_user - ) - metadata = metadatas['flavor_config'] - flavor = metadata_api.get_flavor( - flavor_id, - user=current_user - ) - flavor_name = flavor['name'].replace('-', '_') - configs = util.load_configs(setting.FLAVOR_MAPPING_DIR) - for item in configs: - if flavor_name in item.keys(): - config = item[flavor_name] + """Convert flavor metadata to ui flavor metadata.""" + data = _get_request_args() return utils.make_json_response( 200, - metadata_api.get_ui_metadata( - metadata, config + metadata_api.get_flavor_ui_metadata( + flavor_id, user=current_user, **data ) ) @app.route( - "/adapters//oses//metadata", + "/adapters//oses//metadata", methods=['GET'] ) @log_user_action @@ -1448,7 +1871,12 @@ def show_adapter_os_metadata(adapter_id, os_id): @login_required @update_user_token def list_clusters(): - """List clusters.""" + """List clusters. + + Supported filters: [ + 'name', 'os_name', 'owner', 'adapter_name', 'flavor_name' + ] + """ data = _get_request_args() return utils.make_json_response( 200, @@ -1464,7 +1892,7 @@ def list_clusters(): @update_user_token def show_cluster(cluster_id): """Get cluster.""" - data = _get_request_args(adapter_id=_int_converter) + data = _get_request_args() return utils.make_json_response( 200, cluster_api.get_cluster( @@ -1478,7 +1906,11 @@ def show_cluster(cluster_id): @login_required @update_user_token def add_cluster(): - """add cluster.""" + """add cluster. + + Must fields: ['name', 'adapter_id', 'os_id'] + Optional fields: ['flavor_id'] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -1491,7 +1923,10 @@ def add_cluster(): @login_required @update_user_token def update_cluster(cluster_id): - """update cluster.""" + """update cluster. + + Supported fields: ['name', 'reinstall_distributed_system'] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -1541,7 +1976,7 @@ def show_cluster_config(cluster_id): @login_required @update_user_token def show_cluster_metadata(cluster_id): - """Get cluster config.""" + """Get cluster metadata.""" data = _get_request_args() return utils.make_json_response( 200, @@ -1556,7 +1991,10 @@ def show_cluster_metadata(cluster_id): @login_required @update_user_token def update_cluster_config(cluster_id): - """update cluster config.""" + """update cluster config. + + Supported fields: ['os_config', 'package_config', 'config_step'] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -1571,7 +2009,10 @@ def update_cluster_config(cluster_id): @login_required @update_user_token def patch_cluster_config(cluster_id): - """patch cluster config.""" + """patch cluster config. + + Supported fields: ['os_config', 'package_config', 'config_step'] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -1599,7 +2040,13 @@ def delete_cluster_config(cluster_id): @login_required @update_user_token def take_cluster_action(cluster_id): - """take cluster action.""" + """take cluster action. + + Supported actions: [ + 'add_hosts', 'remove_hosts', 'set_hosts', + 'review', 'deploy', 'check_health' + ] + """ data = _get_request_data() url_root = request.url_root @@ -1624,8 +2071,9 @@ def take_cluster_action(cluster_id): check_cluster_health_func = _wrap_response( functools.partial( health_report_api.start_check_cluster_health, - current_user, cluster_id, - '%s/clusters/%s/healthreports' % (url_root, cluster_id) + cluster_id, + '%s/clusters/%s/healthreports' % (url_root, cluster_id), + user=current_user ), 202 ) @@ -1657,20 +2105,33 @@ def get_cluster_state(cluster_id): @app.route("/clusters//healthreports", methods=['POST']) def create_health_reports(cluster_id): - """Create a health check report.""" + """Create a health check report. + + Must fields: ['name'] + Optional fields: [ + 'display_name', 'report', 'category', 'state', 'error_message' + ] + """ data = _get_request_data() output = [] + logging.info('create_health_reports for cluster %s: %s', + cluster_id, data) if 'report_list' in data: for report in data['report_list']: try: output.append( - health_report_api.add_report_record(cluster_id, **report) + health_report_api.add_report_record( + cluster_id, **report + ) ) - except Exception: + except Exception as error: + logging.exception(error) continue else: - output = health_report_api.add_report_record(cluster_id, **data) + output = health_report_api.add_report_record( + cluster_id, **data + ) return utils.make_json_response( 200, @@ -1680,11 +2141,20 @@ def create_health_reports(cluster_id): @app.route("/clusters//healthreports", methods=['PUT']) def bulk_update_reports(cluster_id): - """Bulk update reports.""" + """Bulk update reports. + + request data is a list of health report. + Each health report must contain ['name'], + may contain [ + 'display_name', 'report', 'category', 'state', 'error_message' + ] + """ data = _get_request_data() return utils.make_json_response( 200, - health_report_api.update_multi_reports(cluster_id, **data) + health_report_api.update_multi_reports( + cluster_id, **data + ) ) @@ -1693,21 +2163,31 @@ def bulk_update_reports(cluster_id): @login_required @update_user_token def list_health_reports(cluster_id): + """list health report for a cluster.""" + data = _get_request_data() return utils.make_json_response( 200, - health_report_api.list_health_reports(current_user, cluster_id) + health_report_api.list_health_reports( + cluster_id, user=current_user, **data + ) ) @app.route("/clusters//healthreports/", methods=['PUT']) def update_health_report(cluster_id, name): + """Update cluster health report. + + Supported fields: ['report', 'state', 'error_message'] + """ data = _get_request_data() if 'error_message' not in data: data['error_message'] = "" return utils.make_json_response( 200, - health_report_api.update_report(cluster_id, name, **data) + health_report_api.update_report( + cluster_id, name, **data + ) ) @@ -1716,9 +2196,13 @@ def update_health_report(cluster_id, name): @login_required @update_user_token def get_health_report(cluster_id, name): + """Get health report by cluster id and name.""" + data = _get_request_data() return utils.make_json_response( 200, - health_report_api.get_health_report(current_user, cluster_id, name) + health_report_api.get_health_report( + cluster_id, name, user=current_user, **data + ) ) @@ -1787,7 +2271,11 @@ def show_clusterhost(clusterhost_id): @login_required @update_user_token def add_cluster_host(cluster_id): - """update cluster hosts.""" + """update cluster hosts. + + Must fields: ['machine_id'] + Optional fields: ['name', 'reinstall_os', 'roles'] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -1803,7 +2291,10 @@ def add_cluster_host(cluster_id): @login_required @update_user_token def update_cluster_host(cluster_id, host_id): - """Update cluster host.""" + """Update cluster host. + + Supported fields: ['name', 'reinstall_os', 'roles'] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -1821,7 +2312,10 @@ def update_cluster_host(cluster_id, host_id): @login_required @update_user_token def update_clusterhost(clusterhost_id): - """Update cluster host.""" + """Update cluster host. + + Supported fields: ['name', 'reinstall_os', 'roles'] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -1839,7 +2333,10 @@ def update_clusterhost(clusterhost_id): @login_required @update_user_token def patch_cluster_host(cluster_id, host_id): - """Update cluster host.""" + """Update cluster host. + + Supported fields: ['roles'] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -1857,7 +2354,10 @@ def patch_cluster_host(cluster_id, host_id): @login_required @update_user_token def patch_clusterhost(clusterhost_id): - """Update cluster host.""" + """Update cluster host. + + Supported fields: ['roles'] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -1954,7 +2454,10 @@ def show_clusterhost_config(clusterhost_id): @login_required @update_user_token def update_cluster_host_config(cluster_id, host_id): - """update clusterhost config.""" + """update clusterhost config. + + Supported fields: ['os_config', package_config'] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -1969,7 +2472,10 @@ def update_cluster_host_config(cluster_id, host_id): @login_required @update_user_token def update_clusterhost_config(clusterhost_id): - """update clusterhost config.""" + """update clusterhost config. + + Supported fields: ['os_config', 'package_config'] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -2086,7 +2592,10 @@ def show_clusterhost_state(clusterhost_id): @login_required @update_user_token def update_cluster_host_state(cluster_id, host_id): - """update clusterhost state.""" + """update clusterhost state. + + Supported fields: ['state', 'percentage', 'message', 'severity'] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -2096,17 +2605,35 @@ def update_cluster_host_state(cluster_id, host_id): ) +@util.deprecated @app.route( "/clusters//hosts//state_internal", methods=['PUT', 'POST'] ) def update_cluster_host_state_internal(clustername, hostname): - """update clusterhost state.""" + """update clusterhost state. + + Supported fields: ['ready'] + """ + # TODO(xicheng): it should be merged into update_cluster_host_state. + # TODO(xicheng): the api is not login required and no user checking. data = _get_request_data() + clusters = cluster_api.list_clusters(name=clustername) + if not clusters: + raise exception_handler.ItemNotFound( + 'no clusters found for clustername %s' % clustername + ) + cluster_id = clusters[0]['id'] + hosts = host_api.list_hosts(name=hostname) + if not hosts: + raise exception_handler.ItemNotFound( + 'no hosts found for hostname %s' % hostname + ) + host_id = hosts[0]['id'] return utils.make_json_response( 200, cluster_api.update_clusterhost_state_internal( - clustername, hostname, **data + cluster_id, host_id, **data ) ) @@ -2119,7 +2646,10 @@ def update_cluster_host_state_internal(clustername, hostname): @login_required @update_user_token def update_clusterhost_state(clusterhost_id): - """update clusterhost state.""" + """update clusterhost state. + + Supported fields: ['state', 'percentage', 'message', 'severity'] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -2129,17 +2659,33 @@ def update_clusterhost_state(clusterhost_id): ) +@util.deprecated @app.route( "/clusterhosts//state_internal", methods=['PUT', 'POST'] ) def update_clusterhost_state_internal(clusterhost_name): - """update clusterhost state.""" + """update clusterhost state. + + Supported fields: ['ready'] + """ data = _get_request_data() + clusterhosts = cluster_api.list_clusterhosts() + clusterhost_id = None + for clusterhost in clusterhosts: + if clusterhost['name'] == clusterhost_name: + clusterhost_id = clusterhost['clusterhost_id'] + break + if not clusterhost_id: + raise exception_handler.ItemNotFound( + 'no clusterhost found for clusterhost_name %s' % ( + clusterhost_name + ) + ) return utils.make_json_response( 200, cluster_api.update_clusterhost_state_internal( - clusterhost_name, **data + clusterhost_id, **data ) ) @@ -2149,7 +2695,10 @@ def update_clusterhost_state_internal(clusterhost_name): @login_required @update_user_token def list_hosts(): - """List hosts.""" + """List hosts. + + Supported fields: ['name', 'os_name', 'owner', 'mac'] + """ data = _get_request_args() return utils.make_json_response( 200, @@ -2179,10 +2728,15 @@ def show_host(host_id): @login_required @update_user_token def list_machines_or_hosts(): - """Get host.""" + """Get list of machine of host if the host exists. + + Supported filters: [ + 'mac', 'tag', 'location', 'os_name', 'os_id' + ] + """ data = _get_request_args(os_id=_int_converter) - _filter_tag(data) - _filter_location(data) + _filter_machine_tag(data) + _filter_machine_location(data) _filter_general(data, 'os_name') _filter_general(data, 'os_id') return utils.make_json_response( @@ -2213,7 +2767,10 @@ def show_machine_or_host(host_id): @login_required @update_user_token def update_host(host_id): - """update host.""" + """update host. + + Supported fields: ['name', 'reinstall_os'] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -2228,7 +2785,12 @@ def update_host(host_id): @login_required @update_user_token def update_hosts(): - """update hosts.""" + """update hosts. + + update a list of host as dict each may contains following keys: [ + 'name', 'reinstall_os' + ] + """ data = _get_request_data_as_list() return utils.make_json_response( 200, @@ -2334,7 +2896,12 @@ def delete_host_config(host_id): @login_required @update_user_token def list_host_networks(host_id): - """list host networks.""" + """list host networks. + + Supported filters: [ + 'interface', 'ip', 'is_mgmt', 'is_promiscuous' + ] + """ data = _get_request_args() return utils.make_json_response( 200, @@ -2351,7 +2918,12 @@ def list_host_networks(host_id): @login_required @update_user_token def list_hostnetworks(): - """list host networks.""" + """list host networks. + + Supported filters: [ + 'interface', 'ip', 'is_mgmt', 'is_promiscuous' + ] + """ data = _get_request_args( is_mgmt=_bool_converter, is_promiscuous=_bool_converter @@ -2402,7 +2974,11 @@ def show_hostnetwork(host_network_id): @login_required @update_user_token def add_host_network(host_id): - """add host network.""" + """add host network. + + Must fields: ['interface', 'ip', 'subnet_id'] + Optional fields: ['is_mgmt', 'is_promiscuous'] + """ data = _get_request_data() return utils.make_json_response( 200, host_api.add_host_network(host_id, user=current_user, **data) @@ -2414,7 +2990,12 @@ def add_host_network(host_id): @login_required @update_user_token def update_host_networks(): - """add host networks.""" + """add host networks. + + update a list of host network each may contain [ + 'interface', 'ip', 'subnet_id', 'is_mgmt', 'is_promiscuous' + ] + """ data = _get_request_data_as_list() return utils.make_json_response( 200, host_api.add_host_networks( @@ -2430,7 +3011,13 @@ def update_host_networks(): @login_required @update_user_token def update_host_network(host_id, host_network_id): - """update host network.""" + """update host network. + + supported fields: [ + 'interface', 'ip', 'subnet_id', 'subnet', 'is_mgmt', + 'is_promiscuous' + ] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -2445,7 +3032,13 @@ def update_host_network(host_id, host_network_id): @login_required @update_user_token def update_hostnetwork(host_network_id): - """update host network.""" + """update host network. + + supported fields: [ + 'interface', 'ip', 'subnet_id', 'subnet', 'is_mgmt', + 'is_promiscuous' + ] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -2508,7 +3101,12 @@ def show_host_state(host_id): @login_required @update_user_token def update_host_state(host_id): - """update host state.""" + """update host state. + + Supported fields: [ + 'state', 'percentage', 'message', 'severity' + ] + """ data = _get_request_data() return utils.make_json_response( 200, @@ -2518,41 +3116,24 @@ def update_host_state(host_id): ) +@util.deprecated @app.route("/hosts//state_internal", methods=['PUT', 'POST']) def update_host_state_internal(hostname): - """update host state.""" + """update host state. + + Supported fields: ['ready'] + """ data = _get_request_data() + hosts = host_api.list_hosts(name=hostname) + if not hosts: + raise exception_handler.ItemNotFound( + 'no hosts found for hostname %s' % hostname + ) + host_id = hosts[0]['id'] return utils.make_json_response( 200, host_api.update_host_state_internal( - hostname, **data - ) - ) - - -def _poweron_host(*args, **kwargs): - return utils.make_json_response( - 202, - host_api.poweron_host( - *args, **kwargs - ) - ) - - -def _poweroff_host(*args, **kwargs): - return utils.make_json_response( - 202, - host_api.poweroff_host( - *args, **kwargs - ) - ) - - -def _reset_host(*args, **kwargs): - return utils.make_json_response( - 202, - host_api.reset_host( - *args, **kwargs + host_id, **data ) ) @@ -2562,7 +3143,12 @@ def _reset_host(*args, **kwargs): @login_required @update_user_token def take_host_action(host_id): - """take host action.""" + """take host action. + + Supported actions: [ + 'poweron', 'poweroff', 'reset' + ] + """ data = _get_request_data() poweron_func = _wrap_response( functools.partial( @@ -2590,6 +3176,7 @@ def take_host_action(host_id): def _get_headers(*keys): + """Get proxied request headers.""" headers = {} for key in keys: if key in request.headers: @@ -2598,6 +3185,7 @@ def _get_headers(*keys): def _get_response_json(response): + """Get proxies request json formatted response.""" try: return response.json() except ValueError: @@ -2739,6 +3327,7 @@ def init(): database.init() adapter_api.load_adapters() metadata_api.load_metadatas() + adapter_api.load_flavors() if __name__ == '__main__': diff --git a/compass/api/api.raml b/compass/api/api.raml index 961c8324..6855b57d 100644 --- a/compass/api/api.raml +++ b/compass/api/api.raml @@ -1510,7 +1510,6 @@ mediaType: application/json "cobbler_url": "http://10.145.88.211/cobbler_api" } }, - "distributed_system_id": 1, "supported_oses": [ { "os_id": 1, @@ -1523,13 +1522,11 @@ mediaType: application/json "name": "CentOS-6.5-x86_64" } ], - "distributed_system_name": "openstack", "display_name": "OpenStack Icehouse", "id": 3 }] queryParameters: name: - distributed_system_name: description: Lists information for all adapters headers: Access-token: @@ -1870,44 +1867,41 @@ mediaType: application/json application/json: example: | [ - { - "created_at": "2014-10-18 23:01:23", - "os_name": "CentOS-6.5-x86_64", - "name": "cluster1", - "reinstall_distributed_system": true, - "distributed_system_id": 1, - "adapter_id": 3, - "updated_at": "2014-10-18 23:01:23", - "owner": "admin@huawei.com", - "os_id": 2, - "distributed_system_name": "openstack", - "distributed_system_installed": false, - "flavor": { - "display_name": "All-In-One", - "name": "allinone", - "roles": [ - { - "display_name": "all in one compute", - "description": "all in one compute", - "adapter_id": 3, - "role_id": 35, - "flavor_id": 4, - "optional": true, - "id": 35, - "name": "allinone-compute" - } - ], - "adapter_id": 3, - "template": "allinone.tmpl", - "id": 4 - }, - "id": 1 - } + { + "created_at": "2014-10-18 23:01:23", + "os_name": "CentOS-6.5-x86_64", + "name": "cluster1", + "reinstall_distributed_system": true, + "adapter_id": 3, + "updated_at": "2014-10-18 23:01:23", + "owner": "admin@huawei.com", + "os_id": 2, + "distributed_system_installed": false, + "flavor": { + "display_name": "All-In-One", + "name": "allinone", + "roles": [ + { + "display_name": "all in one compute", + "description": "all in one compute", + "adapter_id": 3, + "role_id": 35, + "flavor_id": 4, + "optional": true, + "id": 35, + "name": "allinone-compute" + } + ], + "adapter_id": 3, + "template": "allinone.tmpl", + "id": 4 + }, + "id": 1 + } ] queryParameters: name: os_name: - distributed_system_name: owner: adapter_name: flavor_name: @@ -1937,12 +1931,10 @@ mediaType: application/json "os_name": "CentOS-6.5-x86_64", "name": "cluster1", "reinstall_distributed_system": true, - "distributed_system_id": 1, "adapter_id": 3, "updated_at": "2014-10-18 23:01:23", "owner": "admin@huawei.com", "os_id": 2, - "distributed_system_name": "openstack", "distributed_system_installed": false, "flavor": { "display_name": "All-In-One", @@ -1990,12 +1982,10 @@ mediaType: application/json "os_name": "CentOS-6.5-x86_64", "name": "cluster1", "reinstall_distributed_system": true, - "distributed_system_id": 1, "adapter_id": 3, "updated_at": "2014-10-18 23:01:23", "owner": "admin@huawei.com", "os_id": 2, - "distributed_system_name": "openstack", "distributed_system_installed": false, "flavor": { "display_name": "All-In-One", @@ -2048,12 +2038,10 @@ mediaType: application/json "os_name": "CentOS-6.5-x86_64", "name": "cluster_new", "reinstall_distributed_system": true, - "distributed_system_id": 1, "adapter_id": 3, "updated_at": "2014-10-18 23:16:39", "owner": "admin@huawei.com", "os_id": 2, - "distributed_system_name": "openstack", "distributed_system_installed": false, "flavor": { "display_name": "All-In-One", @@ -2100,12 +2088,10 @@ mediaType: application/json "os_name": "CentOS-6.5-x86_64", "name": "cluster1", "reinstall_distributed_system": true, - "distributed_system_id": 1, "adapter_id": 3, "updated_at": "2014-10-18 23:01:23", "owner": "admin@huawei.com", "os_id": 2, - "distributed_system_name": "openstack", "distributed_system_installed": false, "flavor": { "display_name": "All-In-One", @@ -2454,7 +2440,6 @@ mediaType: application/json "owner": "admin@huawei.com", "port": "4", "location": {}, - "distributed_system_name": "openstack", "os_name": "CentOS-6.5-x86_64", "reinstall_distributed_system": true, "mac": "00:0c:29:2b:c9:d4", @@ -2568,7 +2553,6 @@ mediaType: application/json "owner": "admin@huawei.com", "port": "4", "location": {}, - "distributed_system_name": "openstack", "os_name": "CentOS-6.5-x86_64", "reinstall_distributed_system": true, "mac": "00:0c:29:2b:c9:d4", @@ -2650,7 +2634,6 @@ mediaType: application/json "owner": "admin@huawei.com", "port": "4", "location": {}, - "distributed_system_name": "openstack", "os_name": "CentOS-6.5-x86_64", "reinstall_distributed_system": true, "mac": "00:0c:29:2b:c9:d4", @@ -3336,7 +3319,6 @@ mediaType: application/json "created_at": "2014-10-18 23:16:02", "adapter_id": 3, "updated_at": "2014-10-18 23:16:39", - "distributed_system_name": "openstack", "owner": "admin@huawei.com", "distributed_system_installed": false, "id": 2 diff --git a/compass/apiclient/example.py b/compass/apiclient/example.py index 967c7b42..4c01b982 100755 --- a/compass/apiclient/example.py +++ b/compass/apiclient/example.py @@ -77,7 +77,7 @@ PRESET_VALUES = { 'GATEWAY': '10.145.88.1', 'PROXY': 'http://10.145.89.100:3128', 'OS_NAME_PATTERN': 'CentOS.*', - 'DISTRIBUTED_SYSTEM_NAME_PATTERN': 'openstack.*', + 'ADAPTER_NAME': 'openstack_icehouse', 'FLAVOR_PATTERN': 'allinone.*', 'ROLES_LIST': ['allinone-compute'], 'MACHINES_TO_ADD': ['00:0c:29:a7:ea:4b'], @@ -185,14 +185,11 @@ adapters = response adapter_id = None os_id = None flavor_id = None -adapter_pattern = re.compile(PRESET_VALUES['DISTRIBUTED_SYSTEM_NAME_PATTERN']) +adapter_name = PRESET_VALUES['ADPATER_NAME'] os_pattern = re.compile(PRESET_VALUES['OS_NAME_PATTERN']) flavor_pattern = re.compile(PRESET_VALUES['FLAVOR_PATTERN']) for adapter in adapters: - if ( - 'distributed_system_name' in adapter and - adapter_pattern.match(adapter['distributed_system_name']) - ): + if adapter_name == adapter['name']: adapter_id = adapter['id'] for supported_os in adapter['supported_oses']: if os_pattern.match(supported_os['name']): @@ -201,7 +198,6 @@ for adapter in adapters: for flavor in adapter['flavors']: if flavor_pattern.match(flavor['name']): flavor_id = flavor['id'] - if adapter_id and os_id and flavor_id: break diff --git a/compass/apiclient/restful.py b/compass/apiclient/restful.py index 57d2590f..27a02ead 100644 --- a/compass/apiclient/restful.py +++ b/compass/apiclient/restful.py @@ -490,21 +490,11 @@ class Client(object): def delete_subnet(self, subnet_id): return self._delete('/subnets/%s' % subnet_id) - def list_adapters(self, name=None, distributed_system_name=None, - os_installer_name=None, package_installer_name=None): + def list_adapters(self, name=None): data = {} if name: data['name'] = name - if distributed_system_name: - data['distributed_system_name'] = distributed_system_name - - if os_installer_name: - data['os_installer_name'] = os_installer_name - - if package_installer_name: - data['package_installer_name'] = package_installer_name - return self._get('/adapters', data=data) def get_adapter(self, adapter_id): @@ -520,7 +510,7 @@ class Client(object): return self._get('/oses/%s/metadata' % os_id) def list_clusters(self, name=None, os_name=None, - distributed_system_name=None, owner=None, + owner=None, adapter_id=None): data = {} if name: @@ -529,9 +519,6 @@ class Client(object): if os_name: data['os_name'] = os_name - if distributed_system_name: - data['distributed_system_name'] = distributed_system_name - if owner: data['owner'] = owner diff --git a/compass/db/api/adapter.py b/compass/db/api/adapter.py index 8a380f57..c3ad48d2 100644 --- a/compass/db/api/adapter.py +++ b/compass/db/api/adapter.py @@ -25,174 +25,289 @@ from compass.utils import setting_wrapper as setting from compass.utils import util -def _add_system(session, model, configs, exception_when_existing=True): - parents = {} - for config in configs: - logging.info( - 'add config %s to %s', - config, model - ) - object = utils.add_db_object( - session, model, - exception_when_existing, config['NAME'], - deployable=config.get('DEPLOYABLE', False) - ) - parents[config['NAME']] = ( - object, config.get('PARENT', None) - ) - for name, (object, parent_name) in parents.items(): - if parent_name: - parent, _ = parents[parent_name] - else: - parent = None - utils.update_db_object(session, object, parent=parent) +OSES = None +OS_INSTALLERS = None +PACKAGE_INSTALLERS = None +ADAPTERS = None +ADAPTERS_FLAVORS = None +ADAPTERS_ROLES = None -def add_oses_internal(session, exception_when_existing=True): +def _get_oses_from_configuration(): + """Get all os configs from os configuration dir. + + Example: { + : { + 'name': , + 'id': , + 'os_id': , + 'deployable': True + } + } + """ configs = util.load_configs(setting.OS_DIR) - _add_system( - session, models.OperatingSystem, configs, - exception_when_existing=exception_when_existing - ) - - -def add_distributed_systems_internal(session, exception_when_existing=True): - configs = util.load_configs(setting.DISTRIBUTED_SYSTEM_DIR) - _add_system( - session, models.DistributedSystem, configs, - exception_when_existing=exception_when_existing - ) - - -def add_adapters_internal(session, exception_when_existing=True): + systems = {} + for config in configs: + logging.info('get config %s', config) + system_name = config['NAME'] + parent_name = config.get('PARENT', None) + system = { + 'name': system_name, + 'id': system_name, + 'os_id': system_name, + 'parent': parent_name, + 'parent_id': parent_name, + 'deployable': config.get('DEPLOYABLE', False) + } + systems[system_name] = system parents = {} + for name, system in systems.items(): + parent = system.get('parent', None) + parents[name] = parent + for name, system in systems.items(): + util.recursive_merge_dict(name, systems, parents) + return systems + + +def _get_installers_from_configuration(configs): + """Get installers from configurations. + + Example: { + : { + 'alias': , + 'id': , + 'name': , + 'settings': + } + } + """ + installers = {} + for config in configs: + name = config['NAME'] + instance_name = config.get('INSTANCE_NAME', name) + installers[instance_name] = { + 'alias': instance_name, + 'id': instance_name, + 'name': name, + 'settings': config.get('SETTINGS', {}) + } + return installers + + +def _get_os_installers_from_configuration(): + """Get os installers from os installer config dir.""" + configs = util.load_configs(setting.OS_INSTALLER_DIR) + return _get_installers_from_configuration(configs) + + +def _get_package_installers_from_configuration(): + """Get package installers from package installer config dir.""" + configs = util.load_configs(setting.PACKAGE_INSTALLER_DIR) + return _get_installers_from_configuration(configs) + + +def _get_adapters_from_configuration(): + """Get adapters from adapter config dir.""" configs = util.load_configs(setting.ADAPTER_DIR) + adapters = {} for config in configs: logging.info('add config %s to adapter', config) - if 'DISTRIBUTED_SYSTEM' in config: - distributed_system = utils.get_db_object( - session, models.DistributedSystem, - name=config['DISTRIBUTED_SYSTEM'] - ) - else: - distributed_system = None if 'OS_INSTALLER' in config: - os_installer = utils.get_db_object( - session, models.OSInstaller, - alias=config['OS_INSTALLER'] - ) + os_installer = OS_INSTALLERS[config['OS_INSTALLER']] else: os_installer = None + if 'PACKAGE_INSTALLER' in config: - package_installer = utils.get_db_object( - session, models.PackageInstaller, - alias=config['PACKAGE_INSTALLER'] - ) + package_installer = PACKAGE_INSTALLERS[ + config['PACKAGE_INSTALLER'] + ] else: package_installer = None - adapter = utils.add_db_object( - session, models.Adapter, - exception_when_existing, - config['NAME'], - display_name=config.get('DISPLAY_NAME', None), - distributed_system=distributed_system, - os_installer=os_installer, - package_installer=package_installer, - deployable=config.get('DEPLOYABLE', False), - health_check_cmd=config.get('HEALTH_CHECK_COMMAND', None) - ) + + adapter_name = config['NAME'] + parent_name = config.get('PARENT', None) + adapter = { + 'name': adapter_name, + 'id': adapter_name, + 'parent': parent_name, + 'parent_id': parent_name, + 'display_name': config.get('DISPLAY_NAME', adapter_name), + 'os_installer': os_installer, + 'package_installer': package_installer, + 'deployable': config.get('DEPLOYABLE', False), + 'health_check_cmd': config.get('HEALTH_CHECK_COMMAND', None), + 'supported_oses': [], + 'roles': [], + 'flavors': [] + } supported_os_patterns = [ re.compile(supported_os_pattern) for supported_os_pattern in config.get('SUPPORTED_OS_PATTERNS', []) ] - oses = utils.list_db_objects( - session, models.OperatingSystem - ) - for os in oses: - if not os.deployable: + for os_name, os in OSES.items(): + if not os.get('deployable', False): continue - os_name = os.name for supported_os_pattern in supported_os_patterns: if supported_os_pattern.match(os_name): - utils.add_db_object( - session, models.AdapterOS, - exception_when_existing, - os.id, adapter.id - ) + adapter['supported_oses'].append(os) break - parents[config['NAME']] = (adapter, config.get('PARENT', None)) + adapters[adapter_name] = adapter - for name, (adapter, parent_name) in parents.items(): - if parent_name: - parent, _ = parents[parent_name] - else: - parent = None - utils.update_db_object(session, adapter, parent=parent) + parents = {} + for name, adapter in adapters.items(): + parent = adapter.get('parent', None) + parents[name] = parent + for name, adapter in adapters.items(): + util.recursive_merge_dict(name, adapters, parents) + return adapters -def add_roles_internal(session, exception_when_existing=True): +def _add_roles_from_configuration(): + """Get roles from roles config dir and update to adapters.""" configs = util.load_configs(setting.ADAPTER_ROLE_DIR) for config in configs: logging.info( 'add config %s to role', config ) - adapter = utils.get_db_object( - session, models.Adapter, - name=config['ADAPTER_NAME'] - ) + adapter_name = config['ADAPTER_NAME'] + adapter = ADAPTERS[adapter_name] + adapter_roles = ADAPTERS_ROLES.setdefault(adapter_name, {}) for role_dict in config['ROLES']: - utils.add_db_object( - session, models.AdapterRole, - exception_when_existing, role_dict['role'], adapter.id, - display_name=role_dict.get('display_name', None), - description=role_dict.get('description', None), - optional=role_dict.get('optional', False) - ) + role_name = role_dict['role'] + display_name = role_dict.get('display_name', role_name) + adapter_roles[role_name] = { + 'name': role_name, + 'id': '%s:%s' % (adapter_name, role_name), + 'adapter_id': adapter_name, + 'adapter_name': adapter_name, + 'display_name': display_name, + 'description': role_dict.get('description', display_name), + 'optional': role_dict.get('optional', False) + } + parents = {} + for name, adapter in ADAPTERS.items(): + parent = adapter.get('parent', None) + parents[name] = parent + for adapter_name, adapter_roles in ADAPTERS_ROLES.items(): + util.recursive_merge_dict(adapter_name, ADAPTERS_ROLES, parents) + for adapter_name, adapter_roles in ADAPTERS_ROLES.items(): + adapter = ADAPTERS[adapter_name] + adapter['roles'] = adapter_roles.values() -def add_flavors_internal(session, exception_when_existing=True): +def _add_flavors_from_configuration(): + """Get flavors from flavor config dir and update to adapters.""" configs = util.load_configs(setting.ADAPTER_FLAVOR_DIR) for config in configs: logging.info('add config %s to flavor', config) - adapter = utils.get_db_object( - session, models.Adapter, - name=config['ADAPTER_NAME'] - ) + adapter_name = config['ADAPTER_NAME'] + adapter = ADAPTERS[adapter_name] + adapter_flavors = ADAPTERS_FLAVORS.setdefault(adapter_name, {}) + adapter_roles = ADAPTERS_ROLES[adapter_name] for flavor_dict in config['FLAVORS']: - flavor = utils.add_db_object( - session, models.AdapterFlavor, - exception_when_existing, flavor_dict['flavor'], adapter.id, - display_name=flavor_dict.get('display_name', None), - template=flavor_dict.get('template', None) - ) - role_names = flavor_dict.get('roles', []) - for role_name in role_names: - role = utils.get_db_object( - session, models.AdapterRole, - name=role_name, adapter_id=adapter.id - ) - utils.add_db_object( - session, models.AdapterFlavorRole, - exception_when_existing, flavor.id, role.id - ) - utils.update_db_object( - session, flavor, - patched_ordered_flavor_roles=[role_name] - ) + flavor_name = flavor_dict['flavor'] + flavor_id = '%s:%s' % (adapter_name, flavor_name) + flavor = { + 'name': flavor_name, + 'id': flavor_id, + 'adapter_id': adapter_name, + 'adapter_name': adapter_name, + 'display_name': flavor_dict.get('display_name', flavor_name), + 'template': flavor_dict.get('template', None) + } + flavor_roles = flavor_dict.get('roles', []) + roles_in_flavor = [] + for flavor_role in flavor_roles: + if isinstance(flavor_role, basestring): + role_name = flavor_role + role_in_flavor = { + 'name': role_name, + 'flavor_id': flavor_id + } + else: + role_in_flavor = flavor_role + role_in_flavor['flavor_id'] = flavor_id + if 'role' in role_in_flavor: + role_in_flavor['name'] = role_in_flavor['role'] + del role_in_flavor['role'] + role_name = role_in_flavor['name'] + role = adapter_roles[role_name] + util.merge_dict(role_in_flavor, role, override=False) + roles_in_flavor.append(role_in_flavor) + flavor['roles'] = roles_in_flavor + adapter_flavors[flavor_name] = flavor + parents = {} + for name, adapter in ADAPTERS.items(): + parent = adapter.get('parent', None) + parents[name] = parent + for adapter_name, adapter_roles in ADAPTERS_FLAVORS.items(): + util.recursive_merge_dict(adapter_name, ADAPTERS_FLAVORS, parents) + for adapter_name, adapter_flavors in ADAPTERS_FLAVORS.items(): + adapter = ADAPTERS[adapter_name] + adapter['flavors'] = adapter_flavors.values() -def get_adapters_internal(session): +def load_adapters_internal(force_reload=False): + """Load adapter related configurations into memory. + + If force_reload, reload all configurations even it is loaded already. + """ + global OSES + if force_reload or OSES is None: + OSES = _get_oses_from_configuration() + global OS_INSTALLERS + if force_reload or OS_INSTALLERS is None: + OS_INSTALLERS = _get_os_installers_from_configuration() + global PACKAGE_INSTALLERS + if force_reload or PACKAGE_INSTALLERS is None: + PACKAGE_INSTALLERS = _get_package_installers_from_configuration() + global ADAPTERS + if force_reload or ADAPTERS is None: + ADAPTERS = _get_adapters_from_configuration() + global ADAPTERS_ROLES + if force_reload or ADAPTERS_ROLES is None: + ADAPTERS_ROLES = {} + _add_roles_from_configuration() + global ADAPTERS_FLAVORS + if force_reload or ADAPTERS_FLAVORS is None: + ADAPTERS_FLAVORS = {} + _add_flavors_from_configuration() + + +def get_adapters_internal(force_reload=False): + """Get all deployable adapters.""" + load_adapters_internal(force_reload=force_reload) adapter_mapping = {} - adapters = utils.list_db_objects( - session, models.Adapter - ) - for adapter in adapters: - if adapter.deployable: - adapter_dict = adapter.to_dict() - adapter_mapping[adapter.id] = adapter_dict + for adapter_name, adapter in ADAPTERS.items(): + if adapter.get('deployable'): + # TODO(xicheng): adapter should be filtered before + # return to caller. + adapter_mapping[adapter_name] = adapter else: logging.info( 'ignore adapter %s since it is not deployable', - adapter.to_dict() + adapter_name ) return adapter_mapping + + +def get_flavors_internal(force_reload=False): + """Get all deployable flavors.""" + load_adapters_internal(force_reload=force_reload) + adapter_flavor_mapping = {} + for adapter_name, adapter_flavors in ADAPTERS_FLAVORS.items(): + adapter = ADAPTERS.get(adapter_name, {}) + for flavor_name, flavor in adapter_flavors.items(): + if adapter.get('deployable'): + # TODO(xicheng): flavor dict should be filtered before + # return to caller. + adapter_flavor_mapping.setdefault( + adapter_name, {} + )[flavor_name] = flavor + else: + logging.info( + 'ignore adapter %s since it is not deployable', + adapter_name + ) + + return adapter_flavor_mapping diff --git a/compass/db/api/adapter_holder.py b/compass/db/api/adapter_holder.py index 1125d0d1..91c65c44 100644 --- a/compass/db/api/adapter_holder.py +++ b/compass/db/api/adapter_holder.py @@ -25,38 +25,48 @@ from compass.db import exception SUPPORTED_FIELDS = [ 'name', - 'distributed_system_name', ] RESP_FIELDS = [ 'id', 'name', 'roles', 'flavors', 'os_installer', 'package_installer', - 'distributed_system_id', - 'distributed_system_name', 'supported_oses', 'display_name', 'health_check_cmd' ] RESP_OS_FIELDS = [ - 'id', 'os_id', 'name' + 'id', 'name', 'os_id' ] RESP_ROLES_FIELDS = [ 'id', 'name', 'display_name', 'description', 'optional' ] RESP_FLAVORS_FIELDS = [ - 'id', 'name', 'display_name', 'template', 'roles' + 'id', 'adapter_id', 'adapter_name', 'name', 'display_name', + 'template', 'roles' ] -@database.run_in_session() -def load_adapters(session): - load_adapters_internal(session) +ADAPTER_MAPPING = None +FLAVOR_MAPPING = None -def load_adapters_internal(session): +def load_adapters(force_reload=False): global ADAPTER_MAPPING - logging.info('load adapters into memory') - ADAPTER_MAPPING = adapter_api.get_adapters_internal(session) + if force_reload or ADAPTER_MAPPING is None: + logging.info('load adapters into memory') + ADAPTER_MAPPING = adapter_api.get_adapters_internal( + force_reload=force_reload + ) -ADAPTER_MAPPING = {} +def load_flavors(force_reload=False): + global FLAVOR_MAPPING + if force_reload or FLAVOR_MAPPING is None: + logging.info('load flavors into memory') + FLAVOR_MAPPING = {} + adapters_flavors = adapter_api.get_flavors_internal( + force_reload=force_reload + ) + for adapter_name, adapter_flavors in adapters_flavors.items(): + for flavor_name, flavor in adapter_flavors.items(): + FLAVOR_MAPPING['%s:%s' % (adapter_name, flavor_name)] = flavor def _filter_adapters(adapter_config, filter_name, filter_value): @@ -80,15 +90,10 @@ def _filter_adapters(adapter_config, filter_name, filter_value): @utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_ADAPTERS ) -@utils.output_filters( - name=utils.general_filter_callback, - distributed_system_name=utils.general_filter_callback, - os_installer_name=utils.general_filter_callback, - package_installer_name=utils.general_filter_callback -) +@utils.output_filters(name=utils.general_filter_callback) @utils.wrap_to_dict( RESP_FIELDS, supported_oses=RESP_OS_FIELDS, @@ -97,26 +102,13 @@ def _filter_adapters(adapter_config, filter_name, filter_value): ) def list_adapters(user=None, session=None, **filters): """list adapters.""" - if not ADAPTER_MAPPING: - load_adapters_internal(session) + load_adapters() return ADAPTER_MAPPING.values() -def get_adapter_internal(session, adapter_id): - """get adapter.""" - if not ADAPTER_MAPPING: - load_adapters_internal(session) - - if adapter_id not in ADAPTER_MAPPING: - raise exception.RecordNotExists( - 'adpater %s does not exist' % adapter_id - ) - return ADAPTER_MAPPING[adapter_id] - - @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_ADAPTERS ) @utils.wrap_to_dict( @@ -127,4 +119,37 @@ def get_adapter_internal(session, adapter_id): ) def get_adapter(adapter_id, user=None, session=None, **kwargs): """get adapter.""" - return get_adapter_internal(session, adapter_id) + load_adapters() + if adapter_id not in ADAPTER_MAPPING: + raise exception.RecordNotExists( + 'adpater %s does not exist' % adapter_id + ) + return ADAPTER_MAPPING[adapter_id] + + +@utils.supported_filters([]) +@database.run_in_session() +@user_api.check_user_permission( + permission.PERMISSION_LIST_METADATAS +) +@utils.wrap_to_dict(RESP_FLAVORS_FIELDS) +def list_flavors(user=None, session=None, **filters): + """List flavors.""" + load_flavors() + return FLAVOR_MAPPING.values() + + +@utils.supported_filters([]) +@database.run_in_session() +@user_api.check_user_permission( + permission.PERMISSION_LIST_METADATAS +) +@utils.wrap_to_dict(RESP_FLAVORS_FIELDS) +def get_flavor(flavor_id, user=None, session=None, **kwargs): + """Get flavor.""" + load_flavors() + if flavor_id not in FLAVOR_MAPPING: + raise exception.RecordNotExists( + 'flavor %s does not exist' % flavor_id + ) + return FLAVOR_MAPPING[flavor_id] diff --git a/compass/db/api/cluster.py b/compass/db/api/cluster.py index 55277b59..1a675692 100644 --- a/compass/db/api/cluster.py +++ b/compass/db/api/cluster.py @@ -17,7 +17,9 @@ import copy import functools import logging +import re +from compass.db.api import adapter_holder as adapter_api from compass.db.api import database from compass.db.api import metadata_holder as metadata_api from compass.db.api import permission @@ -29,15 +31,15 @@ from compass.utils import util SUPPORTED_FIELDS = [ - 'name', 'os_name', 'distributed_system_name', 'owner', + 'name', 'os_name', 'owner', 'adapter_name', 'flavor_name' ] SUPPORTED_CLUSTERHOST_FIELDS = [] RESP_FIELDS = [ - 'id', 'name', 'os_name', 'os_id', 'distributed_system_id', + 'id', 'name', 'os_name', 'os_id', 'adapter_id', 'flavor_id', 'reinstall_distributed_system', 'flavor', - 'distributed_system_name', 'distributed_system_installed', - 'owner', 'adapter_id', 'adapter_name', 'flavor_name', + 'distributed_system_installed', + 'owner', 'adapter_name', 'flavor_name', 'created_at', 'updated_at' ] RESP_CLUSTERHOST_FIELDS = [ @@ -46,7 +48,7 @@ RESP_CLUSTERHOST_FIELDS = [ 'cluster_id', 'clustername', 'location', 'tag', 'networks', 'mac', 'switch_ip', 'port', 'switches', 'os_installed', 'distributed_system_installed', - 'os_name', 'distributed_system_name', 'ip', + 'os_name', 'os_id', 'ip', 'reinstall_os', 'reinstall_distributed_system', 'owner', 'cluster_id', 'created_at', 'updated_at' @@ -153,7 +155,7 @@ UPDATED_CLUSTERHOST_LOG_FIELDS = [ @utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_CLUSTERS ) @utils.wrap_to_dict(RESP_FIELDS) @@ -164,9 +166,56 @@ def list_clusters(user=None, session=None, **filters): ) +def _get_cluster(cluster_id, session=None, **kwargs): + """Get cluster by id.""" + if isinstance(cluster_id, (int, long)): + return utils.get_db_object( + session, models.Cluster, id=cluster_id, **kwargs + ) + raise exception.InvalidParameter( + 'cluster id %s type is not int compatible' % cluster_id + ) + + +def get_cluster_internal(cluster_id, session=None, **kwargs): + """Helper function to get cluster. + + Should be only used by other files under db/api. + """ + return _get_cluster(cluster_id, session=session, **kwargs) + + +def _get_cluster_host( + cluster_id, host_id, session=None, **kwargs +): + """Get clusterhost by cluster id and host id.""" + cluster = _get_cluster(cluster_id, session=session, **kwargs) + from compass.db.api import host as host_api + host = host_api.get_host_internal(host_id, session=session, **kwargs) + return utils.get_db_object( + session, models.ClusterHost, + cluster_id=cluster.id, + host_id=host.id, + **kwargs + ) + + +def _get_clusterhost(clusterhost_id, session=None, **kwargs): + """Get clusterhost by clusterhost id.""" + if isinstance(clusterhost_id, (int, long)): + return utils.get_db_object( + session, models.ClusterHost, + clusterhost_id=clusterhost_id, + **kwargs + ) + raise exception.InvalidParameter( + 'clusterhost id %s type is not int compatible' % clusterhost_id + ) + + @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_CLUSTERS ) @utils.wrap_to_dict(RESP_FIELDS) @@ -175,63 +224,78 @@ def get_cluster( user=None, session=None, **kwargs ): """Get cluster info.""" - return utils.get_db_object( - session, models.Cluster, exception_when_missing, id=cluster_id + return _get_cluster( + cluster_id, + session=session, + exception_when_missing=exception_when_missing ) -def _conditional_exception(cluster, exception_when_not_editable): - if exception_when_not_editable: - raise exception.Forbidden( - 'cluster %s is not editable' % cluster.name - ) - else: - return False - - -def is_cluster_validated( - session, cluster -): +def check_cluster_validated(cluster): + """Check cluster is validated.""" if not cluster.config_validated: raise exception.Forbidden( 'cluster %s is not validated' % cluster.name ) -def is_clusterhost_validated( - session, clusterhost -): +def check_clusterhost_validated(clusterhost): + """Check clusterhost is validated.""" if not clusterhost.config_validated: raise exception.Forbidden( 'clusterhost %s is not validated' % clusterhost.name ) -def is_cluster_editable( - session, cluster, user, - reinstall_distributed_system_set=False, - exception_when_not_editable=True +def check_cluster_editable( + cluster, user=None, + check_in_installing=False ): - if reinstall_distributed_system_set: + """Check if cluster is editable. + + If we try to set cluster + reinstall_distributed_system attribute or any + checking to make sure the cluster is not in installing state, + we can set check_in_installing to True. + Otherwise we will make sure the cluster is not in deploying or + deployed. + If user is not admin or not the owner of the cluster, the check + will fail to make sure he can not update the cluster attributes. + """ + if check_in_installing: if cluster.state.state == 'INSTALLING': - return _conditional_exception( - cluster, exception_when_not_editable + raise exception.Forbidden( + 'cluster %s is not editable ' + 'when state is installing' % cluster.name ) elif ( - cluster.distributed_system and + cluster.flavor_name and not cluster.reinstall_distributed_system ): - logging.debug( - 'cluster is not editable when not reinstall_distributed_system' + raise exception.Forbidden( + 'cluster %s is not editable ' + 'when not to be reinstalled' % cluster.name ) - return _conditional_exception( - cluster, exception_when_not_editable + if user and not user.is_admin and cluster.creator_id != user.id: + raise exception.Forbidden( + 'cluster %s is not editable ' + 'when user is not admin or cluster owner' % cluster.name ) - if not user.is_admin and cluster.creator_id != user.id: - return _conditional_exception( - cluster, exception_when_not_editable + + +def is_cluster_editable( + cluster, user=None, + check_in_installing=False +): + """Get if cluster is editble.""" + try: + check_cluster_editable( + cluster, user=user, + check_in_installing=check_in_installing ) - return True + return True + except exception.Forbidden: + return False @utils.supported_filters( @@ -241,20 +305,42 @@ def is_cluster_editable( ) @utils.input_validates(name=utils.check_name) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_CLUSTER ) @utils.wrap_to_dict(RESP_FIELDS) def add_cluster( exception_when_existing=True, - name=None, user=None, session=None, **kwargs + name=None, adapter_id=None, flavor_id=None, + user=None, session=None, **kwargs ): """Create a cluster.""" - return utils.add_db_object( - session, models.Cluster, exception_when_existing, - name, creator_id=user.id, - **kwargs + adapter = adapter_api.get_adapter( + adapter_id, user=user, session=session ) + # if flavor_id is not None, also set flavor field. + # In future maybe we can move the use of flavor from + # models.py to db/api and explictly get flavor when + # needed instead of setting flavor into cluster record. + flavor = {} + if flavor_id: + flavor = adapter_api.get_flavor( + flavor_id, + user=user, session=session + ) + if flavor['adapter_id'] != adapter['id']: + raise exception.InvalidParameter( + 'flavor %s is not of adapter %s' % ( + flavor_id, adapter_id + ) + ) + + cluster = utils.add_db_object( + session, models.Cluster, exception_when_existing, + name, creator_id=user.id, adapter_id=adapter_id, + flavor_id=flavor_id, flavor=flavor, **kwargs + ) + return cluster @utils.supported_filters( @@ -263,18 +349,18 @@ def add_cluster( ) @utils.input_validates(name=utils.check_name) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_CLUSTER ) @utils.wrap_to_dict(RESP_FIELDS) def update_cluster(cluster_id, user=None, session=None, **kwargs): """Update a cluster.""" - cluster = utils.get_db_object( - session, models.Cluster, id=cluster_id + cluster = _get_cluster( + cluster_id, session=session ) - is_cluster_editable( - session, cluster, user, - reinstall_distributed_system_set=( + check_cluster_editable( + cluster, user=user, + check_in_installing=( kwargs.get('reinstall_distributed_system', False) ) ) @@ -283,7 +369,7 @@ def update_cluster(cluster_id, user=None, session=None, **kwargs): @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_DEL_CLUSTER ) @utils.wrap_to_dict( @@ -295,15 +381,34 @@ def del_cluster( cluster_id, force=False, from_database_only=False, delete_underlying_host=False, user=None, session=None, **kwargs ): - """Delete a cluster.""" - cluster = utils.get_db_object( - session, models.Cluster, id=cluster_id + """Delete a cluster. + + If force, the cluster will be deleted anyway. It is used by cli to + force clean a cluster in any case. + If from_database_only, the cluster recored will only be removed from + database. Otherwise, a del task is sent to celery to do clean deletion. + If delete_underlying_host, all hosts under this cluster will also be + deleted. + The backend will call del_cluster again with from_database_only set + when it has done the deletion work on os installer/package installer. + """ + cluster = _get_cluster( + cluster_id, session=session ) logging.debug( 'delete cluster %s with force=%s ' 'from_database_only=%s delete_underlying_host=%s', cluster.id, force, from_database_only, delete_underlying_host ) + # force set cluster state to ERROR and the state of any clusterhost + # in the cluster to ERROR when we want to delete the cluster anyway + # even the cluster is in installing or already installed. + # It let the api know the deleting is in doing when backend is doing + # the real deleting. + # In future we may import a new state like INDELETE to indicate + # the deleting is processing. + # We need discuss about if we can delete a cluster when it is already + # installed by api. for clusterhost in cluster.clusterhosts: if clusterhost.state.state != 'UNINITIALIZED' and force: clusterhost.state.state = 'ERROR' @@ -314,109 +419,145 @@ def del_cluster( if cluster.state.state != 'UNINITIALIZED' and force: cluster.state.state = 'ERROR' - is_cluster_editable( - session, cluster, user, - reinstall_distributed_system_set=True + check_cluster_editable( + cluster, user=user, + check_in_installing=True ) - for clusterhost in cluster.clusterhosts: - from compass.db.api import host as host_api - host = clusterhost.host - host_api.is_host_editable( - session, host, user, reinstall_os_set=True - ) - if host.state.state == 'UNINITIALIZED' or from_database_only: - utils.del_db_object( - session, host - ) + # delete underlying host if delete_underlying_host is set. + if delete_underlying_host: + for clusterhost in cluster.clusterhosts: + # delete underlying host only user has permission. + from compass.db.api import host as host_api + host = clusterhost.host + if host_api.is_host_editable( + host, user=user, check_in_installing=True + ): + # Delete host record directly in database when there is no need + # to do the deletion in backend or from_database_only is set. + if host.state.state == 'UNINITIALIZED' or from_database_only: + utils.del_db_object( + session, host + ) + + # Delete cluster record directly in database when there + # is no need to do the deletion in backend or from_database_only is set. if cluster.state.state == 'UNINITIALIZED' or from_database_only: return utils.del_db_object( session, cluster ) else: from compass.tasks import client as celery_client - clusterhosts = [] - for clusterhost in cluster.clusterhosts: - clusterhosts.append(clusterhost) - logging.info('send del cluster %s task to celery', cluster_id) celery_client.celery.send_task( 'compass.tasks.delete_cluster', ( - user.email, cluster_id, - [clusterhost.host_id for clusterhost in clusterhosts], + user.email, cluster.id, + [ + clusterhost.host_id + for clusterhost in cluster.clusterhosts + ], delete_underlying_host ) ) return { - 'status': 'delete action sent', + 'status': 'delete action is sent', 'cluster': cluster, - 'hosts': clusterhosts + 'hosts': cluster.clusterhosts } @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_CLUSTER_CONFIG ) @utils.wrap_to_dict(RESP_CONFIG_FIELDS) def get_cluster_config(cluster_id, user=None, session=None, **kwargs): """Get cluster config.""" - return utils.get_db_object( - session, models.Cluster, id=cluster_id - ) + return _get_cluster(cluster_id, session=session) @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_CLUSTER_CONFIG ) @utils.wrap_to_dict(RESP_DEPLOYED_CONFIG_FIELDS) def get_cluster_deployed_config(cluster_id, user=None, session=None, **kwargs): """Get cluster deployed config.""" - return utils.get_db_object( - session, models.Cluster, id=cluster_id - ) + return _get_cluster(cluster_id, session=session) @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_METADATAS ) @utils.wrap_to_dict(RESP_METADATA_FIELDS) def get_cluster_metadata(cluster_id, user=None, session=None, **kwargs): - """Get cluster metadata.""" - cluster = utils.get_db_object( - session, models.Cluster, id=cluster_id - ) + """Get cluster metadata. + + If no flavor in the cluster, it means this is a os only cluster. + We ignore package metadata for os only cluster. + """ + cluster = _get_cluster(cluster_id, session=session) metadatas = {} - os = cluster.os - if os: - metadatas['os_config'] = metadata_api.get_os_metadata_internal( - session, os.id + os_name = cluster.os_name + if os_name: + metadatas.update( + metadata_api.get_os_metadata( + os_name, session=session + ) ) - adapter = cluster.adapter - if adapter: - metadatas['package_config'] = ( - metadata_api.get_package_metadata_internal( - session, adapter.id + flavor_id = cluster.flavor_id + if flavor_id: + metadatas.update( + metadata_api.get_flavor_metadata( + flavor_id, + user=user, session=session ) ) return metadatas +def _cluster_os_config_validates( + config, cluster, session=None, user=None, **kwargs +): + """Check cluster os config validation.""" + metadata_api.validate_os_config( + config, cluster.os_id + ) + + +def _cluster_package_config_validates( + config, cluster, session=None, user=None, **kwargs +): + """Check cluster package config validation.""" + metadata_api.validate_flavor_config( + config, cluster.flavor_id + ) + + +@utils.input_validates_with_args( + put_os_config=_cluster_os_config_validates, + put_package_config=_cluster_package_config_validates +) +@utils.output_validates_with_args( + os_config=_cluster_os_config_validates, + package_config=_cluster_package_config_validates +) @utils.wrap_to_dict(RESP_CONFIG_FIELDS) -def _update_cluster_config(session, user, cluster, **kwargs): +def _update_cluster_config(cluster, session=None, user=None, **kwargs): """Update a cluster config.""" - is_cluster_editable(session, cluster, user) + check_cluster_editable(cluster, user=user) return utils.update_db_object( session, cluster, **kwargs ) +# replace os_config to deployed_os_config, +# package_config to deployed_package_config @utils.replace_filters( os_config='deployed_os_config', package_config='deployed_package_config' @@ -426,7 +567,7 @@ def _update_cluster_config(session, user, cluster, **kwargs): ignore_support_keys=IGNORE_FIELDS ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_CLUSTER_CONFIG ) @utils.wrap_to_dict(RESP_DEPLOYED_CONFIG_FIELDS) @@ -434,16 +575,17 @@ def update_cluster_deployed_config( cluster_id, user=None, session=None, **kwargs ): """Update cluster deployed config.""" - cluster = utils.get_db_object( - session, models.Cluster, id=cluster_id - ) - is_cluster_editable(session, cluster, user) - is_cluster_validated(session, cluster) + cluster = _get_cluster(cluster_id, session=session) + check_cluster_editable(cluster, user=user) + check_cluster_validated(cluster) return utils.update_db_object( session, cluster, **kwargs ) +# replace os_config to put_os_config, +# package_config to put_package_config in kwargs. +# It tells db these fields will be updated not patched. @utils.replace_filters( os_config='put_os_config', package_config='put_package_config' @@ -453,41 +595,20 @@ def update_cluster_deployed_config( ignore_support_keys=IGNORE_FIELDS ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_CLUSTER_CONFIG ) def update_cluster_config(cluster_id, user=None, session=None, **kwargs): """Update cluster config.""" - cluster = utils.get_db_object( - session, models.Cluster, id=cluster_id - ) - - def os_config_validates(config): - metadata_api.validate_os_config( - session, config, os_id=cluster.os_id - ) - - def package_config_validates(config): - metadata_api.validate_flavor_config( - session, config, flavor_id=cluster.flavor.id - ) - - @utils.input_validates( - put_os_config=os_config_validates, - put_package_config=package_config_validates - ) - def update_config_internal( - cluster, **in_kwargs - ): - return _update_cluster_config( - session, user, cluster, **in_kwargs - ) - - return update_config_internal( - cluster, **kwargs + cluster = _get_cluster(cluster_id, session=session) + return _update_cluster_config( + cluster, session=session, user=user, **kwargs ) +# replace os_config to patched_os_config and +# package_config to patched_package_config in kwargs. +# It tells db these fields will be patched not updated. @utils.replace_filters( os_config='patched_os_config', package_config='patched_package_config' @@ -497,165 +618,241 @@ def update_cluster_config(cluster_id, user=None, session=None, **kwargs): ignore_support_keys=IGNORE_FIELDS ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_CLUSTER_CONFIG ) def patch_cluster_config(cluster_id, user=None, session=None, **kwargs): """patch cluster config.""" - cluster = utils.get_db_object( - session, models.Cluster, id=cluster_id - ) - - def os_config_validates(config): - metadata_api.validate_os_config( - session, config, os_id=cluster.os_id - ) - - def package_config_validates(config): - metadata_api.validate_flavor_config( - session, config, flavor_id=cluster.flavor.id - ) - - @utils.output_validates( - os_config=os_config_validates, - package_config=package_config_validates - ) - def update_config_internal(cluster, **in_kwargs): - return _update_cluster_config( - session, user, cluster, **in_kwargs - ) - - return update_config_internal( - cluster, **kwargs + cluster = _get_cluster(cluster_id, session=session) + return _update_cluster_config( + cluster, session=session, user=user, **kwargs ) @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_DEL_CLUSTER_CONFIG ) @utils.wrap_to_dict(RESP_CONFIG_FIELDS) def del_cluster_config(cluster_id, user=None, session=None): """Delete a cluster config.""" - cluster = utils.get_db_object( - session, models.Cluster, id=cluster_id + cluster = _get_cluster( + cluster_id, session=session ) - is_cluster_editable(session, cluster, user) + check_cluster_editable(cluster, user=user) return utils.update_db_object( session, cluster, os_config={}, package_config={}, config_validated=False ) -@utils.supported_filters( - ADDED_HOST_FIELDS, - optional_support_keys=(UPDATED_HOST_FIELDS + UPDATED_CLUSTERHOST_FIELDS), - ignore_support_keys=IGNORE_FIELDS -) -@utils.input_validates(name=utils.check_name) -def add_clusterhost_internal( - session, cluster, - exception_when_existing=False, - machine_id=None, **kwargs -): - from compass.db.api import host as host_api - clusterhost_dict = {} - host_dict = {} - for key, value in kwargs.items(): - if key in UPDATED_CLUSTERHOST_FIELDS: - clusterhost_dict[key] = value - else: - host_dict[key] = value - host = utils.get_db_object( - session, models.Host, False, id=machine_id - ) - if host: - if ( - host_dict and - host_api.is_host_editable( - session, host, cluster.creator, - reinstall_os_set=kwargs.get('reinstall_os', False), - exception_when_not_editable=False +def _roles_validates(roles, cluster, session=None, user=None): + """Check roles is validated to a cluster's roles.""" + if roles: + if not cluster.flavor_name: + raise exception.InvalidParameter( + 'not flavor in cluster %s' % cluster.name ) - ): - utils.update_db_object( - session, host, - **host_dict - ) - else: - logging.info('host %s is not editable', host.name) - else: - host = utils.add_db_object( - session, models.Host, False, machine_id, - os=cluster.os, - os_installer=cluster.adapter.adapter_os_installer, - creator=cluster.creator, - **host_dict - ) - - if 'roles' in kwargs: - roles = kwargs['roles'] - if not roles: - flavor = cluster.flavor - if flavor and flavor.flavor_roles: + cluster_roles = [role['name'] for role in cluster.flavor['roles']] + for role in roles: + if role not in cluster_roles: raise exception.InvalidParameter( - 'roles %s is empty' % roles + 'role %s is not in cluster roles %s' % ( + role, cluster_roles + ) ) - return utils.add_db_object( - session, models.ClusterHost, exception_when_existing, - cluster.id, host.id, **clusterhost_dict + +def _cluster_host_roles_validates( + value, cluster, host, session=None, user=None, **kwargs +): + """Check clusterhost roles is validated by cluster and host.""" + _roles_validates(value, cluster, session=session, user=user) + + +def _clusterhost_roles_validates( + value, clusterhost, session=None, user=None, **kwargs +): + """Check clusterhost roles is validated by clusterhost.""" + _roles_validates( + value, clusterhost.cluster, session=session, user=user ) -def _add_clusterhosts(session, cluster, machines): +@utils.supported_filters( + optional_support_keys=UPDATED_HOST_FIELDS, + ignore_support_keys=UPDATED_CLUSTERHOST_FIELDS +) +@utils.input_validates(name=utils.check_name) +def _add_host_if_not_exist( + machine_id, cluster, session=None, user=None, **kwargs +): + """Add underlying host if it does not exist.""" + from compass.db.api import host as host_api + host = host_api.get_host_internal( + machine_id, session=session, exception_when_missing=False + ) + if host: + if kwargs: + # ignore update underlying host if host is not editable. + from compass.db.api import host as host_api + if host_api.is_host_editable( + host, user=cluster.creator, + check_in_installing=kwargs.get('reinstall_os', False), + ): + utils.update_db_object( + session, host, + **kwargs + ) + else: + logging.debug( + 'ignore update host host %s ' + 'since it is not editable' % host.name + ) + else: + logging.debug('nothing to update for host %s', host.name) + else: + from compass.db.api import adapter_holder as adapter_api + adapter = adapter_api.get_adapter( + cluster.adapter_name, user=user, session=session + ) + host = utils.add_db_object( + session, models.Host, False, machine_id, + os_name=cluster.os_name, + os_installer=adapter['os_installer'], + creator=cluster.creator, + **kwargs + ) + return host + + +@utils.supported_filters( + optional_support_keys=UPDATED_CLUSTERHOST_FIELDS, + ignore_support_keys=UPDATED_HOST_FIELDS +) +@utils.input_validates_with_args( + roles=_cluster_host_roles_validates +) +def _add_clusterhost_only( + cluster, host, + exception_when_existing=False, + session=None, user=None, + **kwargs +): + """Get clusterhost only.""" + return utils.add_db_object( + session, models.ClusterHost, exception_when_existing, + cluster.id, host.id, **kwargs + ) + + +@utils.supported_filters( + ADDED_HOST_FIELDS, + optional_support_keys=UPDATED_HOST_FIELDS + UPDATED_CLUSTERHOST_FIELDS, + ignore_support_keys=IGNORE_FIELDS +) +def _add_clusterhost( + cluster, + exception_when_existing=False, + session=None, user=None, machine_id=None, **kwargs +): + """Add clusterhost and add underlying host if it does not exist.""" + host = _add_host_if_not_exist( + machine_id, cluster, session=session, + user=user, **kwargs + ) + return _add_clusterhost_only( + cluster, host, exception_when_existing=exception_when_existing, + session=session, user=user, **kwargs + ) + + +def _add_clusterhosts(cluster, machines, session=None, user=None): + """Add machines to cluster. + + Args: + machines: list of dict which contains clusterost attr to update. + + Examples: + [{'machine_id': 1, 'name': 'host1'}] + """ + check_cluster_editable( + cluster, user=user, + check_in_installing=True + ) + if cluster.state.state == 'SUCCESSFUL': + cluster.state.state == 'UPDATE_PREPARING' for machine_dict in machines: - add_clusterhost_internal( - session, cluster, **machine_dict + _add_clusterhost( + cluster, session=session, user=user, **machine_dict ) -def _remove_clusterhosts(session, cluster, hosts): +def _remove_clusterhosts(cluster, hosts, session=None, user=None): + """Remove hosts from cluster. + + Args: + hosts: list of host id. + """ + check_cluster_editable( + cluster, user=user, + check_in_installing=True + ) utils.del_db_objects( session, models.ClusterHost, cluster_id=cluster.id, host_id=hosts ) -def _set_clusterhosts(session, cluster, machines): +def _set_clusterhosts(cluster, machines, session=None, user=None): + """set machines to cluster. + + Args: + machines: list of dict which contains clusterost attr to update. + + Examples: + [{'machine_id': 1, 'name': 'host1'}] + """ + check_cluster_editable( + cluster, user=user, + check_in_installing=True + ) utils.del_db_objects( session, models.ClusterHost, cluster_id=cluster.id ) + if cluster.state.state == 'SUCCESSFUL': + cluster.state.state = 'UPDATE_PREPARING' for machine_dict in machines: - add_clusterhost_internal( - session, cluster, True, **machine_dict + _add_clusterhost( + cluster, True, session=session, user=user, **machine_dict ) @utils.supported_filters(optional_support_keys=SUPPORTED_CLUSTERHOST_FIELDS) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_CLUSTERHOSTS ) @utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS) def list_cluster_hosts(cluster_id, user=None, session=None, **filters): - """Get cluster host info.""" + """List clusterhosts of a cluster.""" + cluster = _get_cluster(cluster_id, session=session) return utils.list_db_objects( - session, models.ClusterHost, cluster_id=cluster_id, + session, models.ClusterHost, cluster_id=cluster.id, **filters ) @utils.supported_filters(optional_support_keys=SUPPORTED_CLUSTERHOST_FIELDS) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_CLUSTERHOSTS ) @utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS) def list_clusterhosts(user=None, session=None, **filters): - """Get cluster host info.""" + """List all clusterhosts.""" return utils.list_db_objects( session, models.ClusterHost, **filters ) @@ -663,7 +860,7 @@ def list_clusterhosts(user=None, session=None, **filters): @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_CLUSTERHOSTS ) @utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS) @@ -671,17 +868,16 @@ def get_cluster_host( cluster_id, host_id, exception_when_missing=True, user=None, session=None, **kwargs ): - """Get clusterhost info.""" - return utils.get_db_object( - session, models.ClusterHost, - exception_when_missing, - cluster_id=cluster_id, host_id=host_id + """Get clusterhost info by cluster id and host id.""" + return _get_cluster_host( + cluster_id, host_id, session=session, + exception_when_missing=exception_when_missing, ) @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_CLUSTERHOSTS ) @utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS) @@ -689,16 +885,16 @@ def get_clusterhost( clusterhost_id, exception_when_missing=True, user=None, session=None, **kwargs ): - """Get clusterhost info.""" - return utils.get_db_object( - session, models.ClusterHost, - exception_when_missing, - clusterhost_id=clusterhost_id + """Get clusterhost info by clusterhost id.""" + return _get_clusterhost( + clusterhost_id, session=session, + exception_when_missing=exception_when_missing, + user=user ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_UPDATE_CLUSTER_HOSTS ) @utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS) @@ -706,84 +902,83 @@ def add_cluster_host( cluster_id, exception_when_existing=True, user=None, session=None, **kwargs ): - """Add cluster host.""" - cluster = utils.get_db_object( - session, models.Cluster, id=cluster_id + """Add a host to a cluster.""" + cluster = _get_cluster(cluster_id, session=session) + check_cluster_editable( + cluster, user=user, + check_in_installing=True ) - is_cluster_editable(session, cluster, user) - return add_clusterhost_internal( - session, cluster, exception_when_existing, - **kwargs + if cluster.state.state == 'SUCCESSFUL': + cluster.state.state = 'UPDATE_PREPARING' + return _add_clusterhost( + cluster, exception_when_existing, + session=session, user=user, **kwargs + ) + + +@utils.supported_filters( + optional_support_keys=UPDATED_HOST_FIELDS, + ignore_support_keys=( + UPDATED_CLUSTERHOST_FIELDS + + PATCHED_CLUSTERHOST_FIELDS + ) +) +def _update_host_if_necessary( + clusterhost, session=None, user=None, **kwargs +): + """Update underlying host if there is something to update.""" + host = clusterhost.host + if kwargs: + # ignore update underlying host if the host is not editable. + from compass.db.api import host as host_api + if host_api.is_host_editable( + host, user=clusterhost.cluster.creator, + check_in_installing=kwargs.get('reinstall_os', False), + ): + utils.update_db_object( + session, host, + **kwargs + ) + else: + logging.debug( + 'ignore update host %s since it is not editable' % host.name + ) + else: + logging.debug( + 'nothing to update for host %s', host.name + ) + return host + + +@utils.supported_filters( + optional_support_keys=( + UPDATED_CLUSTERHOST_FIELDS + + PATCHED_CLUSTERHOST_FIELDS + ), + ignore_support_keys=UPDATED_HOST_FIELDS +) +@utils.input_validates_with_args( + roles=_clusterhost_roles_validates, + patched_roles=_clusterhost_roles_validates +) +def _update_clusterhost_only( + clusterhost, session=None, user=None, **kwargs +): + """Update clusterhost only.""" + check_cluster_editable(clusterhost.cluster, user=user) + return utils.update_db_object( + session, clusterhost, **kwargs ) @utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS) -def _update_clusterhost(session, user, clusterhost, **kwargs): - clusterhost_dict = {} - host_dict = {} - for key, value in kwargs.items(): - if key in UPDATED_HOST_FIELDS: - host_dict[key] = value - else: - clusterhost_dict[key] = value - - host = clusterhost.host - if host_dict: - from compass.db.api import host as host_api - if host_api.is_host_editable( - session, host, clusterhost.cluster.creator, - reinstall_os_set=kwargs.get('reinstall_os', False), - exception_when_not_editable=False - ): - utils.update_db_object( - session, host, - **host_dict - ) - else: - logging.debug( - 'ignore no editable host %s', host.id - ) - else: - logging.debug( - 'nothing to update for host %s', host.id - ) - - def roles_validates(roles): - cluster_roles = [] - cluster = clusterhost.cluster - flavor = cluster.flavor - if not roles: - if flavor: - raise exception.InvalidParameter( - 'roles %s is empty' % roles - ) - else: - if not flavor: - raise exception.InvalidParameter( - 'not flavor in cluster %s' % cluster.name - ) - for flavor_roles in flavor.flavor_roles: - cluster_roles.append(flavor_roles.role.name) - for role in roles: - if role not in cluster_roles: - raise exception.InvalidParameter( - 'role %s is not in cluster roles %s' % ( - role, cluster_roles - ) - ) - - @utils.input_validates( - roles=roles_validates, - patched_roles=roles_validates +def _update_clusterhost(clusterhost, session=None, user=None, **kwargs): + """Update clusterhost and underlying host if necessary.""" + _update_host_if_necessary( + clusterhost, session=session, user=user, **kwargs ) - def update_internal(clusterhost, **in_kwargs): - return utils.update_db_object( - session, clusterhost, **in_kwargs - ) - - is_cluster_editable(session, clusterhost.cluster, user) - return update_internal( - clusterhost, **clusterhost_dict + return _update_clusterhost_only( + clusterhost, session=session, user=user, **kwargs ) @@ -792,18 +987,20 @@ def _update_clusterhost(session, user, clusterhost, **kwargs): ignore_support_keys=IGNORE_FIELDS ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_UPDATE_CLUSTER_HOSTS ) def update_cluster_host( cluster_id, host_id, user=None, session=None, **kwargs ): - """Update cluster host.""" - clusterhost = utils.get_db_object( - session, models.ClusterHost, cluster_id=cluster_id, host_id=host_id + """Update clusterhost by cluster id and host id.""" + clusterhost = _get_cluster_host( + cluster_id, host_id, session=session + ) + return _update_clusterhost( + clusterhost, session=session, user=user, **kwargs ) - return _update_clusterhost(session, user, clusterhost, **kwargs) @utils.supported_filters( @@ -811,20 +1008,24 @@ def update_cluster_host( ignore_support_keys=IGNORE_FIELDS ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_UPDATE_CLUSTER_HOSTS ) def update_clusterhost( clusterhost_id, user=None, session=None, **kwargs ): - """Update cluster host.""" - clusterhost = utils.get_db_object( - session, models.ClusterHost, clusterhost_id=clusterhost_id + """Update clusterhost by clusterhost id.""" + clusterhost = _get_clusterhost( + clusterhost_id, session=session + ) + return _update_clusterhost( + clusterhost, session=session, user=user, **kwargs ) - return _update_clusterhost(session, user, clusterhost, **kwargs) +# replace roles to patched_roles in kwargs. +# It tells db roles field will be patched. @utils.replace_filters( roles='patched_roles' ) @@ -833,20 +1034,24 @@ def update_clusterhost( ignore_support_keys=IGNORE_FIELDS ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_UPDATE_CLUSTER_HOSTS ) def patch_cluster_host( cluster_id, host_id, user=None, session=None, **kwargs ): - """Update cluster host.""" - clusterhost = utils.get_db_object( - session, models.ClusterHost, cluster_id=cluster_id, host_id=host_id + """Patch clusterhost by cluster id and host id.""" + clusterhost = _get_cluster_host( + cluster_id, host_id, session=session + ) + return _update_clusterhost( + clusterhost, session=session, user=user, **kwargs ) - return _update_clusterhost(session, user, clusterhost, **kwargs) +# replace roles to patched_roles in kwargs. +# It tells db roles field will be patched. @utils.replace_filters( roles='patched_roles' ) @@ -855,129 +1060,81 @@ def patch_cluster_host( ignore_support_keys=IGNORE_FIELDS ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_UPDATE_CLUSTER_HOSTS ) def patch_clusterhost( clusterhost_id, user=None, session=None, **kwargs ): - """Update cluster host.""" - clusterhost = utils.get_db_object( - session, models.ClusterHost, clusterhost_id=clusterhost_id + """Patch clusterhost by clusterhost id.""" + clusterhost = _get_clusterhost( + clusterhost_id, session=session + ) + return _update_clusterhost( + clusterhost, session=session, user=user, **kwargs ) - return _update_clusterhost(session, user, clusterhost, **kwargs) -@utils.supported_filters([]) -@database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_DEL_CLUSTER_HOST ) @utils.wrap_to_dict( RESP_CLUSTERHOST_FIELDS + ['status', 'host'], host=RESP_CLUSTERHOST_FIELDS ) -def del_cluster_host( - cluster_id, host_id, +def _del_cluster_host( + clusterhost, force=False, from_database_only=False, delete_underlying_host=False, user=None, session=None, **kwargs ): - """Delete cluster host.""" - clusterhost = utils.get_db_object( - session, models.ClusterHost, - cluster_id=cluster_id, host_id=host_id - ) + """delete clusterhost. + + If force, the cluster host will be deleted anyway. + If from_database_only, the cluster host recored will only be + deleted from database. Otherwise a celery task sent to do + clean deletion. + If delete_underlying_host, the underlying host will also be deleted. + The backend will call _del_cluster_host again when the clusterhost is + deleted from os installer/package installer with from_database_only + set. + """ + # force set clusterhost state to ERROR when we want to delete the + # clusterhost anyway even the clusterhost is in installing or already + # installed. It let the api know the deleting is in doing when backend + # is doing the real deleting. In future we may import a new state like + # INDELETE to indicate the deleting is processing. + # We need discuss about if we can delete a clusterhost when it is already + # installed by api. if clusterhost.state.state != 'UNINITIALIZED' and force: clusterhost.state.state = 'ERROR' if not force: - is_cluster_editable( - session, clusterhost.cluster, user, - reinstall_distributed_system_set=True - ) - else: - raise Exception( - 'cluster is not editable: %s', clusterhost.cluster.state.state + check_cluster_editable( + clusterhost.cluster, user=user, + check_in_installing=True ) + # delete underlying host if delete_underlying_host is set. if delete_underlying_host: host = clusterhost.host if host.state.state != 'UNINITIALIZED' and force: host.state.state = 'ERROR' + # only delete the host when user have the permission to delete it. import compass.db.api.host as host_api - host_api.is_host_editable( - session, host, user, - reinstall_os_set=True - ) - if host.state.state == 'UNINITIALIZED' or from_database_only: - utils.del_db_object( - session, host - ) - - if clusterhost.state.state == 'UNINITIALIZED' or from_database_only: - return utils.del_db_object( - session, clusterhost - ) - else: - logging.info( - 'send del cluster %s host %s task to celery', - cluster_id, host_id - ) - from compass.tasks import client as celery_client - celery_client.celery.send_task( - 'compass.tasks.delete_cluster_host', - ( - user.email, cluster_id, host_id, - delete_underlying_host - ) - ) - return { - 'status': 'delete action sent', - 'host': clusterhost, - } - - -@utils.supported_filters([]) -@database.run_in_session() -@user_api.check_user_permission_in_session( - permission.PERMISSION_DEL_CLUSTER_HOST -) -@utils.wrap_to_dict( - RESP_CLUSTERHOST_FIELDS + ['status', 'host'], - host=RESP_CLUSTERHOST_FIELDS -) -def del_clusterhost( - clusterhost_id, - force=False, from_database_only=False, - delete_underlying_host=False, user=None, - session=None, **kwargs -): - """Delete cluster host.""" - clusterhost = utils.get_db_object( - session, models.ClusterHost, - clusterhost_id=clusterhost_id - ) - if clusterhost.state.state != 'UNINITIALIZED' and force: - clusterhost.state.state = 'ERROR' - if not force: - is_cluster_editable( - session, clusterhost.cluster, user, - reinstall_distributed_system_set=True - ) - if delete_underlying_host: - host = clusterhost.host - if host.state.state != 'UNINITIALIZED' and force: - host.state.state = 'ERROR' - import compass.db.api.host as host_api - host_api.is_host_editable( - session, host, user, - reinstall_os_set=True - ) - if host.state.state == 'UNINITIALIZED' or from_database_only: - utils.del_db_object( - session, host - ) + if host_api.is_host_editable( + host, user=user, + check_in_installing=True + ): + # if there is no need to do the deletion by backend or + # from_database_only is set, we only delete the record + # in database. + if host.state.state == 'UNINITIALIZED' or from_database_only: + utils.del_db_object( + session, host + ) + # if there is no need to do the deletion by backend or + # from_database_only is set, we only delete the record in database. if clusterhost.state.state == 'UNINITIALIZED' or from_database_only: return utils.del_db_object( session, clusterhost @@ -991,20 +1148,57 @@ def del_clusterhost( celery_client.celery.send_task( 'compass.tasks.delete_cluster_host', ( - user.email, clusterhost.cluster_id, - clusterhost.host_id, + user.email, clusterhost.cluster_id, clusterhost.host_id, delete_underlying_host ) ) return { 'status': 'delete action sent', - 'host': clusterhost + 'host': clusterhost, } @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +def del_cluster_host( + cluster_id, host_id, + force=False, from_database_only=False, + delete_underlying_host=False, user=None, + session=None, **kwargs +): + """Delete clusterhost by cluster id and host id.""" + clusterhost = _get_cluster_host( + cluster_id, host_id, session=session + ) + return _del_cluster_host( + clusterhost, force=force, from_database_only=from_database_only, + delete_underlying_host=delete_underlying_host, user=user, + session=session, **kwargs + ) + + +@utils.supported_filters([]) +@database.run_in_session() +def del_clusterhost( + clusterhost_id, + force=False, from_database_only=False, + delete_underlying_host=False, user=None, + session=None, **kwargs +): + """Delete clusterhost by clusterhost id.""" + clusterhost = _get_clusterhost( + clusterhost_id, session=session + ) + return _del_cluster_host( + clusterhost, force=force, from_database_only=from_database_only, + delete_underlying_host=delete_underlying_host, user=user, + session=session, **kwargs + ) + + +@utils.supported_filters([]) +@database.run_in_session() +@user_api.check_user_permission( permission.PERMISSION_LIST_CLUSTERHOST_CONFIG ) @utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS) @@ -1012,45 +1206,43 @@ def get_cluster_host_config( cluster_id, host_id, user=None, session=None, **kwargs ): - """Get clusterhost config.""" - return utils.get_db_object( - session, models.ClusterHost, - cluster_id=cluster_id, host_id=host_id + """Get clusterhost config by cluster id and host id.""" + return _get_cluster_host( + cluster_id, host_id, session=session ) @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_CLUSTERHOST_CONFIG ) @utils.wrap_to_dict(RESP_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS) def get_cluster_host_deployed_config( cluster_id, host_id, user=None, session=None, **kwargs ): - """Get clusterhost deployed config.""" - return utils.get_db_object( - session, models.ClusterHost, - cluster_id=cluster_id, host_id=host_id + """Get clusterhost deployed config by cluster id and host id.""" + return _get_cluster_host( + cluster_id, host_id, session=session ) @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_CLUSTERHOST_CONFIG ) @utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS) def get_clusterhost_config(clusterhost_id, user=None, session=None, **kwargs): - """Get clusterhost config.""" - return utils.get_db_object( - session, models.ClusterHost, clusterhost_id=clusterhost_id + """Get clusterhost config by clusterhost id.""" + return _get_clusterhost( + clusterhost_id, session=session ) @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_CLUSTERHOST_CONFIG ) @utils.wrap_to_dict(RESP_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS) @@ -1058,318 +1250,321 @@ def get_clusterhost_deployed_config( clusterhost_id, user=None, session=None, **kwargs ): - """Get clusterhost deployed config.""" - return utils.get_db_object( - session, models.ClusterHost, clusterhost_id=clusterhost_id + """Get clusterhost deployed config by clusterhost id.""" + return _get_clusterhost( + clusterhost_id, session=session ) -@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS) -def _update_clusterhost_config(session, user, clusterhost, **kwargs): +def _clusterhost_os_config_validates( + config, clusterhost, session=None, user=None, **kwargs +): + """Validate clusterhost's underlying host os config.""" from compass.db.api import host as host_api - ignore_keys = [] - if not host_api.is_host_editable( - session, clusterhost.host, user, - exception_when_not_editable=False - ): - ignore_keys.append('put_os_config') - - def os_config_validates(os_config): - host = clusterhost.host - metadata_api.validate_os_config( - session, os_config, host.os_id) - - def package_config_validates(package_config): - cluster = clusterhost.cluster - is_cluster_editable(session, cluster, user) - metadata_api.validate_flavor_config( - session, package_config, cluster.flavor.id - ) - - @utils.supported_filters( - optional_support_keys=UPDATED_CLUSTERHOST_CONFIG_FIELDS, - ignore_support_keys=ignore_keys - ) - @utils.input_validates( - put_os_config=os_config_validates, - put_package_config=package_config_validates - ) - def update_config_internal(clusterihost, **in_kwargs): - return utils.update_db_object( - session, clusterhost, **in_kwargs - ) - - return update_config_internal( - clusterhost, **kwargs + host = clusterhost.host + host_api.check_host_editable(host, user=user) + metadata_api.validate_os_config( + config, host.os_id ) +def _clusterhost_package_config_validates( + config, clusterhost, session=None, user=None, **kwargs +): + """Validate clusterhost's cluster package config.""" + cluster = clusterhost.cluster + check_cluster_editable(cluster, user=user) + metadata_api.validate_flavor_config( + config, cluster.flavor_id + ) + + +def _filter_clusterhost_host_editable( + config, clusterhost, session=None, user=None, **kwargs +): + """Filter fields if the underlying host is not editable.""" + from compass.db.api import host as host_api + host = clusterhost.host + return host_api.is_host_editable(host, user=user) + + +@utils.input_filters( + put_os_config=_filter_clusterhost_host_editable, + patched_os_config=_filter_clusterhost_host_editable +) +@utils.input_validates_with_args( + put_os_config=_clusterhost_os_config_validates, + put_package_config=_clusterhost_package_config_validates +) +@utils.output_validates_with_args( + os_config=_clusterhost_os_config_validates, + package_config=_clusterhost_package_config_validates +) +@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS) +def _update_clusterhost_config(clusterhost, session=None, user=None, **kwargs): + """Update clusterhost config.""" + return utils.update_db_object( + session, clusterhost, **kwargs + ) + + +def _clusterhost_host_validated( + config, clusterhost, session=None, user=None, **kwargs +): + """Check clusterhost's underlying host is validated.""" + from compass.db.api import host as host_api + host = clusterhost.host + host_api.check_host_editable(host, user=user) + host_api.check_host_validated(host) + + +def _clusterhost_cluster_validated( + config, clusterhost, session=None, user=None, **kwargs +): + """Check clusterhost's cluster is validated.""" + cluster = clusterhost.cluster + check_cluster_editable(cluster, user=user) + check_clusterhost_validated(clusterhost) + + +@utils.input_filters( + deployed_os_config=_filter_clusterhost_host_editable, +) +@utils.input_validates_with_args( + deployed_os_config=_clusterhost_host_validated, + deployed_package_config=_clusterhost_cluster_validated +) @utils.wrap_to_dict(RESP_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS) def _update_clusterhost_deployed_config( - session, user, clusterhost, **kwargs + clusterhost, session=None, user=None, **kwargs ): - from compass.db.api import host as host_api - ignore_keys = [] - if not host_api.is_host_editable( - session, clusterhost.host, user, - exception_when_not_editable=False - ): - ignore_keys.append('deployed_os_config') - - def os_config_validates(os_config): - host = clusterhost.host - host_api.is_host_validated(session, host) - - def package_config_validates(package_config): - cluster = clusterhost.cluster - is_cluster_editable(session, cluster, user) - is_clusterhost_validated(session, clusterhost) - - @utils.supported_filters( - optional_support_keys=UPDATED_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS, - ignore_support_keys=ignore_keys - ) - @utils.input_validates( - deployed_os_config=os_config_validates, - deployed_package_config=package_config_validates - ) - def update_config_internal(clusterhost, **in_kwargs): - return utils.update_db_object( - session, clusterhost, **in_kwargs - ) - - return update_config_internal( - clusterhost, **kwargs + """Update clusterhost deployed config.""" + return utils.update_db_object( + session, clusterhost, **kwargs ) +# replace os_config to put_os_config and +# package_config to put_package_config in kwargs. +# It tells db these fields will be updated not patched. @utils.replace_filters( os_config='put_os_config', package_config='put_package_config' ) +@utils.supported_filters( + optional_support_keys=UPDATED_CLUSTERHOST_CONFIG_FIELDS, +) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_CLUSTERHOST_CONFIG ) def update_cluster_host_config( cluster_id, host_id, user=None, session=None, **kwargs ): - """Update clusterhost config.""" - clusterhost = utils.get_db_object( - session, models.ClusterHost, - cluster_id=cluster_id, host_id=host_id + """Update clusterhost config by cluster id and host id.""" + clusterhost = _get_cluster_host( + cluster_id, host_id, session=session ) return _update_clusterhost_config( - session, user, clusterhost, **kwargs + clusterhost, user=user, session=session, **kwargs ) +# replace os_config to deployed_os_config and +# package_config to deployed_package_config in kwargs. @utils.replace_filters( os_config='deployed_os_config', package_config='deployed_package_config' ) +@utils.supported_filters( + optional_support_keys=UPDATED_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS +) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_CLUSTERHOST_CONFIG ) def update_cluster_host_deployed_config( cluster_id, host_id, user=None, session=None, **kwargs ): - """Update clusterhost deployed config.""" - clusterhost = utils.get_db_object( - session, models.ClusterHost, - cluster_id=cluster_id, host_id=host_id + """Update clusterhost deployed config by cluster id and host id.""" + clusterhost = _get_cluster_host( + cluster_id, host_id, session=session ) return _update_clusterhost_deployed_config( - session, user, clusterhost, **kwargs + clusterhost, session=session, user=user, **kwargs ) +# replace os_config to put_os_config and +# package_config to put_package_config in kwargs. +# It tells db these fields will be updated not patched. @utils.replace_filters( os_config='put_os_config', package_config='put_package_config' ) +@utils.supported_filters( + optional_support_keys=UPDATED_CLUSTERHOST_CONFIG_FIELDS, +) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_CLUSTERHOST_CONFIG ) def update_clusterhost_config( clusterhost_id, user=None, session=None, **kwargs ): - """Update clusterhost config.""" - clusterhost = utils.get_db_object( - session, models.ClusterHost, clusterhost_id=clusterhost_id + """Update clusterhost config by clusterhost id.""" + clusterhost = _get_clusterhost( + clusterhost_id, session=session ) return _update_clusterhost_config( - session, user, clusterhost, **kwargs + clusterhost, session=session, user=user, **kwargs ) +# replace os_config to deployed_os_config and +# package_config to deployed_package_config in kwargs. @utils.replace_filters( os_config='deployed_os_config', package_config='deployed_package_config' ) +@utils.supported_filters( + optional_support_keys=UPDATED_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS +) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_CLUSTERHOST_CONFIG ) def update_clusterhost_deployed_config( clusterhost_id, user=None, session=None, **kwargs ): - """Update clusterhost deployed config.""" - clusterhost = utils.get_db_object( - session, models.ClusterHost, clusterhost_id=clusterhost_id + """Update clusterhost deployed config by clusterhost id.""" + clusterhost = _get_clusterhost( + clusterhost_id, session=session ) return _update_clusterhost_deployed_config( - session, user, clusterhost, **kwargs - ) - - -@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS) -def _patch_clusterhost_config(session, user, clusterhost, **kwargs): - from compass.db.api import host as host_api - ignore_keys = [] - if not host_api.is_host_editable( - session, clusterhost.host, user, - exception_when_not_editable=False - ): - ignore_keys.append('patched_os_config') - - def os_config_validates(os_config): - host = clusterhost.host - metadata_api.validate_os_config(session, os_config, host.os_id) - - def package_config_validates(package_config): - cluster = clusterhost.cluster - is_cluster_editable(session, cluster, user) - metadata_api.validate_flavor_config( - session, package_config, cluster.flavor.id - ) - - @utils.supported_filters( - optional_support_keys=PATCHED_CLUSTERHOST_CONFIG_FIELDS, - ignore_support_keys=ignore_keys - ) - @utils.output_validates( - os_config=os_config_validates, - package_config=package_config_validates - ) - def patch_config_internal(clusterhost, **in_kwargs): - return utils.update_db_object( - session, clusterhost, **in_kwargs - ) - - return patch_config_internal( - clusterhost, **kwargs + clusterhost, session=session, user=user, **kwargs ) +# replace os_config to patched_os_config and +# package_config to patched_package_config in kwargs +# It tells db these fields will be patched not updated. @utils.replace_filters( os_config='patched_os_config', package_config='patched_package_config' ) +@utils.supported_filters( + optional_support_keys=PATCHED_CLUSTERHOST_CONFIG_FIELDS, +) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_CLUSTERHOST_CONFIG ) def patch_cluster_host_config( cluster_id, host_id, user=None, session=None, **kwargs ): - """patch clusterhost config.""" - clusterhost = utils.get_db_object( - session, models.ClusterHost, - cluster_id=cluster_id, host_id=host_id + """patch clusterhost config by cluster id and host id.""" + clusterhost = _get_cluster_host( + cluster_id, host_id, session=session ) - return _patch_clusterhost_config( - session, user, clusterhost, **kwargs + return _update_clusterhost_config( + clusterhost, session=session, user=user, **kwargs ) +# replace os_config to patched_os_config and +# package_config to patched_package_config in kwargs +# It tells db these fields will be patched not updated. @utils.replace_filters( os_config='patched_os_config', package_config='patched_package_config' ) +@utils.supported_filters( + optional_support_keys=PATCHED_CLUSTERHOST_CONFIG_FIELDS, +) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_CLUSTERHOST_CONFIG ) def patch_clusterhost_config( clusterhost_id, user=None, session=None, **kwargs ): - """patch clusterhost config.""" - clusterhost = utils.get_db_object( - session, models.ClusterHost, clusterhost_id=clusterhost_id + """patch clusterhost config by clusterhost id.""" + clusterhost = _get_clusterhost( + clusterhost_id, session=session ) - return _patch_clusterhost_config( - session, user, clusterhost, **kwargs + return _update_clusterhost_config( + clusterhost, session=session, user=user, **kwargs ) +def _clusterhost_host_editable( + config, clusterhost, session=None, user=None, **kwargs +): + """Check clusterhost underlying host is editable.""" + from compass.db.api import host as host_api + host_api.check_host_editable(clusterhost.host, user=user) + + +def _clusterhost_cluster_editable( + config, clusterhost, session=None, user=None, **kwargs +): + """Check clusterhost's cluster is editable.""" + check_cluster_editable(clusterhost.cluster, user=user) + + +@utils.supported_filters( + optional_support_keys=['os_config', 'package_config'] +) +@utils.input_filters( + os_config=_filter_clusterhost_host_editable, +) +@utils.output_validates_with_args( + package_config=_clusterhost_cluster_editable +) @utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS) def _delete_clusterhost_config( - session, user, clusterhost + clusterhost, session=None, user=None, **kwargs ): - from compass.db.api import host as host_api - ignore_keys = [] - if not host_api.is_host_editable( - session, clusterhost.host, user, - exception_when_not_editable=False - ): - ignore_keys.append('os_config') - - def package_config_validates(package_config): - is_cluster_editable(session, clusterhost.cluster, user) - - @utils.supported_filters( - optional_support_keys=['os_config', 'package_config'], - ignore_support_keys=ignore_keys - ) - @utils.output_validates( - package_config=package_config_validates - ) - def delete_config_internal(clusterhost, **in_kwargs): - return utils.update_db_object( - session, clusterhost, config_validated=False, - **in_kwargs - ) - - return delete_config_internal( - clusterhost, os_config={}, - package_config={} + """delete clusterhost config.""" + return utils.update_db_object( + session, clusterhost, config_validated=False, + **kwargs ) @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_DEL_CLUSTERHOST_CONFIG ) def delete_cluster_host_config( cluster_id, host_id, user=None, session=None ): - """Delete a clusterhost config.""" - clusterhost = utils.get_db_object( - session, models.ClusterHost, - cluster_id=cluster_id, host_id=host_id + """Delete a clusterhost config by cluster id and host id.""" + clusterhost = _get_cluster_host( + cluster_id, host_id, session=session ) return _delete_clusterhost_config( - session, user, clusterhost + clusterhost, session=session, user=user, + os_config={}, package_config={} ) @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_DEL_CLUSTERHOST_CONFIG ) @utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS) def delete_clusterhost_config(clusterhost_id, user=None, session=None): - """Delet a clusterhost config.""" - clusterhost = utils.get_db_object( - session, models.ClusterHost, clusterhost_id=clusterhost_id + """Delet a clusterhost config by clusterhost id.""" + clusterhost = _get_clusterhost( + clusterhost_id, session=session ) return _delete_clusterhost_config( - session, user, clusterhost + clusterhost, session=session, user=user, + os_config={}, package_config={} ) @@ -1377,7 +1572,7 @@ def delete_clusterhost_config(clusterhost_id, user=None, session=None): optional_support_keys=['add_hosts', 'remove_hosts', 'set_hosts'] ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_UPDATE_CLUSTER_HOSTS ) @utils.wrap_to_dict( @@ -1389,59 +1584,53 @@ def update_cluster_hosts( remove_hosts={}, user=None, session=None ): """Update cluster hosts.""" - cluster = utils.get_db_object( - session, models.Cluster, id=cluster_id - ) - is_cluster_editable(session, cluster, user) + cluster = _get_cluster(cluster_id, session=session) if remove_hosts: - _remove_clusterhosts(session, cluster, **remove_hosts) + _remove_clusterhosts( + cluster, session=session, user=user, **remove_hosts + ) if add_hosts: - _add_clusterhosts(session, cluster, **add_hosts) + _add_clusterhosts( + cluster, session=session, user=user, **add_hosts + ) if set_hosts is not None: - _set_clusterhosts(session, cluster, **set_hosts) - clusterhosts = utils.list_db_objects( - session, models.ClusterHost, cluster_id=cluster.id - ) - logging.info('updated clusterhosts: %s', clusterhosts) - for clusterhost in clusterhosts: - logging.info('clusterhost state: %s', clusterhost.state) + _set_clusterhosts( + cluster, session=session, user=user, **set_hosts + ) return { - 'hosts': clusterhosts + 'hosts': cluster.clusterhosts } -def validate_clusterhost(session, clusterhost): +def validate_clusterhost(clusterhost, session=None): + """validate clusterhost.""" roles = clusterhost.roles if not roles: - flavor = clusterhost.cluster.flavor - if flavor and flavor.flavor_roles: + if clusterhost.cluster.flavor_name: raise exception.InvalidParameter( 'empty roles for clusterhost %s' % clusterhost.name ) -def validate_cluster(session, cluster): +def validate_cluster(cluster, session=None): + """Validate cluster.""" if not cluster.clusterhosts: raise exception.InvalidParameter( 'cluster %s does not have any hosts' % cluster.name ) - flavor = cluster.flavor - if flavor: - cluster_roles = [ - flavor_role.role - for flavor_role in flavor.flavor_roles - ] + if cluster.flavor_name: + cluster_roles = cluster.flavor['roles'] else: cluster_roles = [] necessary_roles = set([ - role.name for role in cluster_roles if not role.optional + role['name'] for role in cluster_roles if not role.get('optional') ]) clusterhost_roles = set([]) interface_subnets = {} for clusterhost in cluster.clusterhosts: roles = clusterhost.roles for role in roles: - clusterhost_roles.add(role.name) + clusterhost_roles.add(role['name']) host = clusterhost.host for host_network in host.host_networks: interface_subnets.setdefault( @@ -1465,7 +1654,7 @@ def validate_cluster(session, cluster): @utils.supported_filters(optional_support_keys=['review']) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_REVIEW_CLUSTER ) @utils.wrap_to_dict( @@ -1474,86 +1663,98 @@ def validate_cluster(session, cluster): hosts=RESP_CLUSTERHOST_CONFIG_FIELDS ) def review_cluster(cluster_id, review={}, user=None, session=None, **kwargs): - """review cluster.""" + """review cluster. + + Args: + cluster_id: the cluster id. + review: dict contains hosts to be reviewed. either contains key + hosts or clusterhosts. where hosts is a list of host id, + clusterhosts is a list of clusterhost id. + """ from compass.db.api import host as host_api - cluster = utils.get_db_object( - session, models.Cluster, id=cluster_id - ) - is_cluster_editable(session, cluster, user) + cluster = _get_cluster(cluster_id, session=session) + check_cluster_editable(cluster, user=user) host_ids = review.get('hosts', []) clusterhost_ids = review.get('clusterhosts', []) clusterhosts = [] + # Get clusterhosts need to be reviewed. for clusterhost in cluster.clusterhosts: if ( clusterhost.clusterhost_id in clusterhost_ids or clusterhost.host_id in host_ids ): clusterhosts.append(clusterhost) + os_config = copy.deepcopy(cluster.os_config) os_config = metadata_api.autofill_os_config( - session, os_config, cluster.os_id, - cluster=cluster + os_config, cluster.os_id, cluster=cluster ) - if os_config: - metadata_api.validate_os_config( - session, os_config, cluster.os_id, True + metadata_api.validate_os_config( + os_config, cluster.os_id, True + ) + for clusterhost in clusterhosts: + host = clusterhost.host + # ignore underlying host os config validation + # since the host is not editable + if not host_api.is_host_editable( + host, user=user, check_in_installing=False + ): + logging.info( + 'ignore update host %s config ' + 'since it is not editable' % host.name + ) + continue + host_os_config = copy.deepcopy(host.os_config) + host_os_config = metadata_api.autofill_os_config( + host_os_config, host.os_id, + host=host ) - for clusterhost in clusterhosts: - host = clusterhost.host - if not host_api.is_host_editable( - session, host, user, False - ): - logging.info( - 'ignore update host %s config ' - 'since it is not editable' % host.name - ) - continue - host_os_config = copy.deepcopy(host.os_config) - host_os_config = metadata_api.autofill_os_config( - session, host_os_config, host.os_id, - host=host - ) - deployed_os_config = util.merge_dict( - os_config, host_os_config - ) - metadata_api.validate_os_config( - session, deployed_os_config, host.os_id, True - ) - host_api.validate_host(session, host) - utils.update_db_object( - session, host, os_config=host_os_config, config_validated=True - ) + deployed_os_config = util.merge_dict( + os_config, host_os_config + ) + metadata_api.validate_os_config( + deployed_os_config, host.os_id, True + ) + host_api.validate_host(host) + utils.update_db_object( + session, host, os_config=host_os_config, config_validated=True + ) + package_config = copy.deepcopy(cluster.package_config) - package_config = metadata_api.autofill_package_config( - session, package_config, cluster.adapter_id, - cluster=cluster - ) - if package_config: + if cluster.flavor_name: + package_config = metadata_api.autofill_flavor_config( + package_config, cluster.flavor_id, + cluster=cluster + ) metadata_api.validate_flavor_config( - session, package_config, cluster.flavor.id, True + package_config, cluster.flavor_id, True ) for clusterhost in clusterhosts: clusterhost_package_config = copy.deepcopy( clusterhost.package_config ) - clusterhost_package_config = metadata_api.autofill_package_config( - session, clusterhost_package_config, - cluster.adapter_id, clusterhost=clusterhost + clusterhost_package_config = ( + metadata_api.autofill_flavor_config( + clusterhost_package_config, + cluster.flavor_id, + clusterhost=clusterhost + ) ) deployed_package_config = util.merge_dict( package_config, clusterhost_package_config ) metadata_api.validate_flavor_config( - session, deployed_package_config, - cluster.flavor.id, True + deployed_package_config, + cluster.flavor_id, True ) - validate_clusterhost(session, clusterhost) + validate_clusterhost(clusterhost, session=session) utils.update_db_object( session, clusterhost, package_config=clusterhost_package_config, config_validated=True ) - validate_cluster(session, cluster) + + validate_cluster(cluster, session=session) utils.update_db_object( session, cluster, os_config=os_config, package_config=package_config, config_validated=True @@ -1566,7 +1767,7 @@ def review_cluster(cluster_id, review={}, user=None, session=None, **kwargs): @utils.supported_filters(optional_support_keys=['deploy']) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_DEPLOY_CLUSTER ) @utils.wrap_to_dict( @@ -1577,36 +1778,39 @@ def review_cluster(cluster_id, review={}, user=None, session=None, **kwargs): def deploy_cluster( cluster_id, deploy={}, user=None, session=None, **kwargs ): - """deploy cluster.""" + """deploy cluster. + + Args: + cluster_id: cluster id. + deploy: dict contains key either hosts or clusterhosts. + deploy['hosts'] is a list of host id, + deploy['clusterhosts'] is a list of clusterhost id. + """ from compass.db.api import host as host_api from compass.tasks import client as celery_client - cluster = utils.get_db_object( - session, models.Cluster, id=cluster_id - ) + cluster = _get_cluster(cluster_id, session=session) host_ids = deploy.get('hosts', []) clusterhost_ids = deploy.get('clusterhosts', []) clusterhosts = [] + # get clusterhost to deploy. for clusterhost in cluster.clusterhosts: if ( clusterhost.clusterhost_id in clusterhost_ids or clusterhost.host_id in host_ids ): clusterhosts.append(clusterhost) - is_cluster_editable(session, cluster, user) - is_cluster_validated(session, cluster) + check_cluster_editable(cluster, user=user) + check_cluster_validated(cluster) utils.update_db_object(session, cluster.state, state='INITIALIZED') for clusterhost in clusterhosts: host = clusterhost.host - if host_api.is_host_editable( - session, host, user, - exception_when_not_editable=False - ): - host_api.is_host_validated( - session, host - ) + # ignore checking if underlying host is validated if + # the host is not editable. + if host_api.is_host_editable(host, user=user): + host_api.check_host_validated(host) utils.update_db_object(session, host.state, state='INITIALIZED') - if cluster.distributed_system: - is_clusterhost_validated(session, clusterhost) + if cluster.flavor_name: + check_clusterhost_validated(clusterhost) utils.update_db_object( session, clusterhost.state, state='INITIALIZED' ) @@ -1627,82 +1831,72 @@ def deploy_cluster( @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_GET_CLUSTER_STATE ) @utils.wrap_to_dict(RESP_STATE_FIELDS) def get_cluster_state(cluster_id, user=None, session=None, **kwargs): """Get cluster state info.""" - return utils.get_db_object( - session, models.Cluster, id=cluster_id - ).state_dict() + return _get_cluster(cluster_id, session=session).state_dict() @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_GET_CLUSTERHOST_STATE ) @utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS) def get_cluster_host_state( cluster_id, host_id, user=None, session=None, **kwargs ): - """Get clusterhost state info.""" - return utils.get_db_object( - session, models.ClusterHost, - cluster_id=cluster_id, host_id=host_id + """Get clusterhost state merged with underlying host state.""" + return _get_cluster_host( + cluster_id, host_id, session=session ).state_dict() @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_GET_CLUSTERHOST_STATE ) @utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS) def get_cluster_host_self_state( cluster_id, host_id, user=None, session=None, **kwargs ): - """Get clusterhost state info.""" - clusterhost = utils.get_db_object( - session, models.ClusterHost, - cluster_id=cluster_id, host_id=host_id - ) - return utils.get_db_object( - session, models.ClusterHostState, - id=clusterhost.clusterhost_id - ) + """Get clusterhost itself state.""" + return _get_cluster_host( + cluster_id, host_id, session=session + ).state @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_GET_CLUSTERHOST_STATE ) @utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS) def get_clusterhost_state( clusterhost_id, user=None, session=None, **kwargs ): - """Get clusterhost state info.""" - return utils.get_db_object( - session, models.ClusterHost, - clusterhost_id=clusterhost_id + """Get clusterhost state merged with underlying host state.""" + return _get_clusterhost( + clusterhost_id, session=session ).state_dict() @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_GET_CLUSTERHOST_STATE ) @utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS) def get_clusterhost_self_state( clusterhost_id, user=None, session=None, **kwargs ): - """Get clusterhost state info.""" - return utils.get_db_object( - session, models.ClusterHost, - clusterhost_id=clusterhost_id + """Get clusterhost itself state.""" + return _get_clusterhost( + clusterhost_id, session=session ).state @@ -1711,61 +1905,38 @@ def get_clusterhost_self_state( ignore_support_keys=IGNORE_FIELDS ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_UPDATE_CLUSTERHOST_STATE ) @utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS) def update_cluster_host_state( cluster_id, host_id, user=None, session=None, **kwargs ): - """Update a clusterhost state.""" - clusterhost = utils.get_db_object( - session, models.ClusterHost, - cluster_id=cluster_id, host_id=host_id + """Update a clusterhost itself state.""" + clusterhost = _get_cluster_host( + cluster_id, host_id, session=session ) utils.update_db_object(session, clusterhost.state, **kwargs) return clusterhost.state_dict() -@utils.supported_filters( - optional_support_keys=UPDATED_CLUSTERHOST_STATE_INTERNAL_FIELDS, - ignore_support_keys=IGNORE_FIELDS -) -@database.run_in_session() -@user_api.check_user_permission_in_session( - permission.PERMISSION_UPDATE_CLUSTERHOST_STATE -) -@utils.wrap_to_dict(['status', 'clusterhost']) -def update_cluster_host_state_internal( - clustername, hostname, from_database_only=False, - user=None, session=None, **kwargs +def _update_clusterhost_state( + clusterhost, from_database_only=False, + session=None, **kwargs ): - """Update a clusterhost state.""" - if isinstance(clustername, (int, long)): - cluster = utils.get_db_object( - session, models.Cluster, id=clustername - ) - else: - cluster = utils.get_db_object( - session, models.Cluster, name=clustername - ) - if isinstance(hostname, (int, long)): - host = utils.get_db_object( - session, models.Host, id=hostname - ) - else: - host = utils.get_db_object( - session, models.Host, name=hostname - ) - clusterhost = utils.get_db_object( - session, models.ClusterHost, - cluster_id=cluster.id, host_id=host.id - ) + """Update clusterhost state. + + If from_database_only, the state will only be updated in database. + Otherwise a task sent to celery and os installer/package installer + will also update its state if needed. + """ if 'ready' in kwargs and kwargs['ready'] and not clusterhost.state.ready: ready_triggered = True else: ready_triggered = False cluster_ready = False + host = clusterhost.host + cluster = clusterhost.cluster host_ready = not host.state.ready if ready_triggered: cluster_ready = True @@ -1779,8 +1950,8 @@ def update_cluster_host_state_internal( cluster_ready = False logging.info( - 'cluster %s host %s ready: %s', - clustername, hostname, ready_triggered + 'clusterhost %s ready: %s', + clusterhost.name, ready_triggered ) logging.info('cluster ready: %s', cluster_ready) logging.info('host ready: %s', host_ready) @@ -1810,107 +1981,72 @@ def update_cluster_host_state_internal( } +@util.deprecated +@utils.supported_filters( + optional_support_keys=UPDATED_CLUSTERHOST_STATE_INTERNAL_FIELDS, + ignore_support_keys=IGNORE_FIELDS +) +@database.run_in_session() +@user_api.check_user_permission( + permission.PERMISSION_UPDATE_CLUSTERHOST_STATE +) +@utils.wrap_to_dict(['status', 'clusterhost']) +def update_cluster_host_state_internal( + cluster_id, host_id, from_database_only=False, + user=None, session=None, **kwargs +): + """Update a clusterhost state by installation process.""" + # TODO(xicheng): it should be merged into update_cluster_host_state + clusterhost = _get_cluster_host( + cluster_id, host_id, session=session + ) + return _update_clusterhost_state( + clusterhost, from_database_only=from_database_only, + session=None, **kwargs + ) + + @utils.supported_filters( optional_support_keys=UPDATED_CLUSTERHOST_STATE_FIELDS, ignore_support_keys=IGNORE_FIELDS ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_UPDATE_CLUSTERHOST_STATE ) @utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS) def update_clusterhost_state( clusterhost_id, user=None, session=None, **kwargs ): - """Update a clusterhost state.""" - clusterhost = utils.get_db_object( - session, models.ClusterHost, - clusterhost_id=clusterhost_id + """Update a clusterhost itself state.""" + clusterhost = _get_clusterhost( + clusterhost_id, session=session ) utils.update_db_object(session, clusterhost.state, **kwargs) return clusterhost.state_dict() +@util.deprecated @utils.supported_filters( optional_support_keys=UPDATED_CLUSTERHOST_STATE_INTERNAL_FIELDS, ignore_support_keys=IGNORE_FIELDS ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_UPDATE_CLUSTERHOST_STATE ) @utils.wrap_to_dict(['status', 'clusterhost']) def update_clusterhost_state_internal( - clusterhost_name, from_database_only=False, + clusterhost_id, from_database_only=False, user=None, session=None, **kwargs ): - """Update a clusterhost state.""" - if isinstance(clusterhost_name, (int, long)): - clusterhost = utils.get_db_object( - session, models.ClusterHost, - clusterhost_id=clusterhost_name - ) - cluster = clusterhost.cluster - host = clusterhost.host - else: - hostname, clustername = clusterhost_name.split('.', 1) - cluster = utils.get_db_object( - session, models.Cluster, name=clustername - ) - host = utils.get_db_object( - session, models.Host, name=hostname - ) - clusterhost = utils.get_db_object( - session, models.ClusterHost, - cluster_id=cluster.id, host_id=host.id - ) - if 'ready' in kwargs and kwargs['ready'] and not clusterhost.state.ready: - ready_triggered = True - else: - ready_triggered = False - cluster_ready = False - host_ready = not host.state.ready - if ready_triggered: - cluster_ready = True - for clusterhost_in_cluster in cluster.clusterhosts: - if ( - clusterhost_in_cluster.clusterhost_id - == clusterhost.clusterhost_id - ): - continue - if not clusterhost_in_cluster.state.ready: - cluster_ready = False - - logging.info( - 'clusterhost %s ready: %s', - clusterhost_name, ready_triggered + """Update a clusterhost state by installation process.""" + # TODO(xicheng): it should be merged into update_clusterhost_state + clusterhost = _get_clusterhost(clusterhost_id, session=session) + return _update_clusterhost_state( + clusterhost, from_database_only=from_database_only, + session=None, **kwargs ) - logging.info('cluster ready: %s', cluster_ready) - logging.info('host ready: %s', host_ready) - if not ready_triggered or from_database_only: - logging.info('%s set state to %s', clusterhost.name, kwargs) - utils.update_db_object(session, clusterhost.state, **kwargs) - if not clusterhost.state.ready: - logging.info('%s state ready is to False', cluster.name) - utils.update_db_object(session, cluster.state, ready=False) - status = '%s state is updated' % clusterhost.name - else: - from compass.tasks import client as celery_client - celery_client.celery.send_task( - 'compass.tasks.package_installed', - ( - clusterhost.cluster_id, clusterhost.host_id, - cluster_ready, host_ready - ) - ) - status = '%s: cluster ready %s host ready %s' % ( - clusterhost.name, cluster_ready, host_ready - ) - logging.info('action status: %s', status) - return { - 'status': status, - 'clusterhost': clusterhost.state_dict() - } @utils.supported_filters( @@ -1918,7 +2054,7 @@ def update_clusterhost_state_internal( ignore_support_keys=(IGNORE_FIELDS + IGNORE_UPDATED_CLUSTER_STATE_FIELDS) ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_UPDATE_CLUSTER_STATE ) @utils.wrap_to_dict(RESP_STATE_FIELDS) @@ -1926,35 +2062,35 @@ def update_cluster_state( cluster_id, user=None, session=None, **kwargs ): """Update a cluster state.""" - cluster = utils.get_db_object( - session, models.Cluster, id=cluster_id + cluster = _get_cluster( + cluster_id, session=session ) utils.update_db_object(session, cluster.state, **kwargs) return cluster.state_dict() +@util.deprecated @utils.supported_filters( optional_support_keys=UPDATED_CLUSTER_STATE_INTERNAL_FIELDS, ignore_support_keys=IGNORE_FIELDS ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_UPDATE_CLUSTER_STATE ) @utils.wrap_to_dict(['status', 'cluster']) def update_cluster_state_internal( - clustername, from_database_only=False, + cluster_id, from_database_only=False, user=None, session=None, **kwargs ): - """Update a cluster state.""" - if isinstance(clustername, (int, long)): - cluster = utils.get_db_object( - session, models.Cluster, id=clustername - ) - else: - cluster = utils.get_db_object( - session, models.Cluster, name=clustername - ) + """Update a cluster state by installation process. + + If from_database_only, the state will only be updated in database. + Otherwise a task sent to do state update in os installer and + package installer. + """ + # TODO(xicheng): it should be merged into update_cluster_state + cluster = _get_cluster(cluster_id, session=session) if 'ready' in kwargs and kwargs['ready'] and not cluster.state.ready: ready_triggered = True else: @@ -1966,7 +2102,7 @@ def update_cluster_state_internal( not clusterhost.state.ready ) - logging.info('cluster %s ready: %s', clustername, ready_triggered) + logging.info('cluster %s ready: %s', cluster_id, ready_triggered) logging.info('clusterhost ready: %s', clusterhost_ready) if not ready_triggered or from_database_only: @@ -2001,11 +2137,10 @@ def update_cluster_state_internal( def get_cluster_host_log_histories( cluster_id, host_id, user=None, session=None, **kwargs ): - """Get clusterhost log history.""" - return utils.list_db_objects( - session, models.ClusterHostLogHistory, - cluster_id=cluster_id, host_id=host_id - ) + """Get clusterhost log history by cluster id and host id.""" + return _get_cluster_host( + cluster_id, host_id, session=session + ).log_histories @utils.supported_filters([]) @@ -2015,9 +2150,33 @@ def get_clusterhost_log_histories( clusterhost_id, user=None, session=None, **kwargs ): - """Get clusterhost log history.""" - return utils.list_db_objects( - session, models.ClusterHostLogHistory, clusterhost_id=clusterhost_id + """Get clusterhost log history by clusterhost id.""" + return _get_clusterhost( + clusterhost_id, session=session + ).log_histories + + +def _get_cluster_host_log_history( + cluster_id, host_id, filename, session=None, **kwargs +): + """Get clusterhost log history by cluster id, host id and filename.""" + clusterhost = _get_cluster_host(cluster_id, host_id, session=session) + return utils.get_db_object( + session, models.ClusterHostLogHistory, + clusterhost_id=clusterhost.clusterhost_id, filename=filename, + **kwargs + ) + + +def _get_clusterhost_log_history( + clusterhost_id, filename, session=None, **kwargs +): + """Get clusterhost log history by clusterhost id and filename.""" + clusterhost = _get_clusterhost(clusterhost_id, session=session) + return utils.get_db_object( + session, models.ClusterHostLogHistory, + clusterhost_id=clusterhost.clusterhost_id, filename=filename, + **kwargs ) @@ -2027,10 +2186,9 @@ def get_clusterhost_log_histories( def get_cluster_host_log_history( cluster_id, host_id, filename, user=None, session=None, **kwargs ): - """Get clusterhost log history.""" - return utils.get_db_object( - session, models.ClusterHostLogHistory, - cluster_id=cluster_id, host_id=host_id, filename=filename + """Get clusterhost log history by cluster id, host id and filename.""" + return _get_cluster_host_log_history( + cluster_id, host_id, filename, session=session ) @@ -2040,10 +2198,9 @@ def get_cluster_host_log_history( def get_clusterhost_log_history( clusterhost_id, filename, user=None, session=None, **kwargs ): - """Get host log history.""" - return utils.get_db_object( - session, models.ClusterHostLogHistory, - clusterhost_id=clusterhost_id, filename=filename + """Get host log history by clusterhost id and filename.""" + return _get_clusterhost_log_history( + clusterhost_id, filename, session=session ) @@ -2056,12 +2213,13 @@ def get_clusterhost_log_history( def update_cluster_host_log_history( cluster_id, host_id, filename, user=None, session=None, **kwargs ): - """Update a host log history.""" - cluster_host_log_history = utils.get_db_object( - session, models.ClusterHostLogHistory, - cluster_id=cluster_id, host_id=host_id, filename=filename + """Update a host log history by cluster id, host id and filename.""" + cluster_host_log_history = _get_cluster_host_log_history( + cluster_id, host_id, filename, session=session + ) + return utils.update_db_object( + session, cluster_host_log_history, **kwargs ) - return utils.update_db_object(session, cluster_host_log_history, **kwargs) @utils.supported_filters( @@ -2073,10 +2231,9 @@ def update_cluster_host_log_history( def update_clusterhost_log_history( clusterhost_id, filename, user=None, session=None, **kwargs ): - """Update a host log history.""" - clusterhost_log_history = utils.get_db_object( - session, models.ClusterHostLogHistory, - clusterhost_id=clusterhost_id, filename=filename + """Update a host log history by clusterhost id and filename.""" + clusterhost_log_history = _get_clusterhost_log_history( + clusterhost_id, filename, session=session ) return utils.update_db_object(session, clusterhost_log_history, **kwargs) @@ -2092,10 +2249,12 @@ def add_clusterhost_log_history( clusterhost_id, exception_when_existing=False, filename=None, user=None, session=None, **kwargs ): - """add a host log history.""" + """add a host log history by clusterhost id and filename.""" + clusterhost = _get_clusterhost(clusterhost_id, session=session) return utils.add_db_object( - session, models.ClusterHostLogHistory, exception_when_existing, - clusterhost_id, filename, **kwargs + session, models.ClusterHostLogHistory, + exception_when_existing, + clusterhost.clusterhost_id, filename, **kwargs ) @@ -2110,10 +2269,9 @@ def add_cluster_host_log_history( cluster_id, host_id, exception_when_existing=False, filename=None, user=None, session=None, **kwargs ): - """add a host log history.""" - clusterhost = utils.get_db_object( - session, models.ClusterHost, - cluster_id=cluster_id, host_id=host_id + """add a host log history by cluster id, host id and filename.""" + clusterhost = _get_cluster_host( + cluster_id, host_id, session=session ) return utils.add_db_object( session, models.ClusterHostLogHistory, exception_when_existing, diff --git a/compass/db/api/database.py b/compass/db/api/database.py index bba22f74..49769d7e 100644 --- a/compass/db/api/database.py +++ b/compass/db/api/database.py @@ -51,6 +51,8 @@ POOL_MAPPING = { def init(database_url=None): """Initialize database. + Adjust sqlalchemy logging if necessary. + :param database_url: string, database url. """ global ENGINE @@ -81,35 +83,48 @@ def init(database_url=None): def in_session(): """check if in database session scope.""" - if hasattr(SESSION_HOLDER, 'session'): - return True - else: - return False + bool(hasattr(SESSION_HOLDER, 'session')) @contextmanager -def session(): +def session(exception_when_in_session=True): """database session scope. - .. note:: - To operate database, it should be called in database session. + To operate database, it should be called in database session. + If not exception_when_in_session, the with session statement support + nested session and only the out most session commit/rollback the + transaction. """ if not ENGINE: init() + nested_session = False if hasattr(SESSION_HOLDER, 'session'): - logging.error('we are already in session') - raise exception.DatabaseException('session already exist') + if exception_when_in_session: + logging.error('we are already in session') + raise exception.DatabaseException('session already exist') + else: + new_session = SESSION_HOLDER.session + nested_session = True + logging.log( + logsetting.getLevelByName('fine'), + 'reuse session %s', nested_session + ) else: new_session = SCOPED_SESSION() setattr(SESSION_HOLDER, 'session', new_session) - + logging.log( + logsetting.getLevelByName('fine'), + 'enter session %s', new_session + ) try: yield new_session - new_session.commit() + if not nested_session: + new_session.commit() except Exception as error: - new_session.rollback() - logging.error('failed to commit session') + if not nested_session: + new_session.rollback() + logging.error('failed to commit session') logging.exception(error) if isinstance(error, IntegrityError): for item in error.statement.split(): @@ -128,15 +143,21 @@ def session(): else: raise exception.DatabaseException(str(error)) finally: - new_session.close() - SCOPED_SESSION.remove() - delattr(SESSION_HOLDER, 'session') + if not nested_session: + new_session.close() + SCOPED_SESSION.remove() + delattr(SESSION_HOLDER, 'session') + logging.log( + logsetting.getLevelByName('fine'), + 'exit session %s', new_session + ) def current_session(): """Get the current session scope when it is called. :return: database session. + :raises: DatabaseException when it is not in session. """ try: return SESSION_HOLDER.session @@ -149,26 +170,42 @@ def current_session(): raise exception.DatabaseException(str(error)) -def run_in_session(): +def run_in_session(exception_when_in_session=True): + """Decorator to make sure the decorated function run in session. + + When not exception_when_in_session, the run_in_session can be + decorated several times. + """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): - if 'session' in kwargs.keys(): - return func(*args, **kwargs) - else: - with session() as my_session: - kwargs['session'] = my_session + try: + my_session = kwargs.get('session') + if my_session is not None: return func(*args, **kwargs) + else: + with session( + exception_when_in_session=exception_when_in_session + ) as my_session: + kwargs['session'] = my_session + return func(*args, **kwargs) + except Exception as error: + logging.error( + 'got exception with func %s args %s kwargs %s', + func, args, kwargs + ) + logging.exception(error) + raise error return wrapper return decorator def _setup_user_table(user_session): - """Initialize default user.""" + """Initialize user table with default user.""" logging.info('setup user table') from compass.db.api import user - user.add_user_internal( - user_session, + user.add_user( + session=user_session, email=setting.COMPASS_ADMIN_EMAIL, password=setting.COMPASS_ADMIN_PASSWORD, is_admin=True @@ -180,120 +217,22 @@ def _setup_permission_table(permission_session): logging.info('setup permission table.') from compass.db.api import permission permission.add_permissions_internal( - permission_session + session=permission_session ) def _setup_switch_table(switch_session): """Initialize switch table.""" + # TODO(xicheng): deprecate setup default switch. logging.info('setup switch table') from compass.db.api import switch - switch.add_switch_internal( - switch_session, long(netaddr.IPAddress(setting.DEFAULT_SWITCH_IP)), - True, filters=['allow ports all'] + switch.add_switch( + True, setting.DEFAULT_SWITCH_IP, + session=switch_session, + machine_filters=['allow ports all'] ) -def _setup_os_installers(installer_session): - """Initialize os_installer table.""" - logging.info('setup os installer table') - from compass.db.api import installer - installer.add_os_installers_internal( - installer_session - ) - - -def _setup_package_installers(installer_session): - """Initialize package_installer table.""" - logging.info('setup package installer table') - from compass.db.api import installer - installer.add_package_installers_internal( - installer_session - ) - - -def _setup_oses(os_session): - """Initialize os table.""" - logging.info('setup os table') - from compass.db.api import adapter - adapter.add_oses_internal( - os_session - ) - - -def _setup_distributed_systems(distributed_system_session): - """Initialize distributed system table.""" - logging.info('setup distributed system table') - from compass.db.api import adapter - adapter.add_distributed_systems_internal( - distributed_system_session - ) - - -def _setup_adapters(adapter_session): - """Initialize package adapter table.""" - logging.info('setup adapter table') - from compass.db.api import adapter - adapter.add_adapters_internal( - adapter_session) - - -def _setup_os_fields(field_session): - """Initialize os field table.""" - logging.info('setup os field table') - from compass.db.api import metadata - metadata.add_os_field_internal(field_session) - - -def _setup_package_fields(field_session): - """Initialize package field table.""" - logging.info('setup package field table') - from compass.db.api import metadata - metadata.add_package_field_internal(field_session) - - -def _setup_flavor_fields(field_session): - """Initialize flavor field table.""" - logging.info('setup flavor field table') - from compass.db.api import metadata - metadata.add_flavor_field_internal(field_session) - - -def _setup_os_metadatas(metadata_session): - """Initialize os metadata table.""" - logging.info('setup os metadata table') - from compass.db.api import metadata - metadata.add_os_metadata_internal(metadata_session) - - -def _setup_package_metadatas(metadata_session): - """Initialize package metadata table.""" - logging.info('setup package metadata table') - from compass.db.api import metadata - metadata.add_package_metadata_internal(metadata_session) - - -def _setup_flavor_metadatas(metadata_session): - """Initialize flavor metadata table.""" - logging.info('setup flavor metadata table') - from compass.db.api import metadata - metadata.add_flavor_metadata_internal(metadata_session) - - -def _setup_adapter_roles(role_session): - """Initialize adapter role table.""" - logging.info('setup adapter role table') - from compass.db.api import adapter - adapter.add_roles_internal(role_session) - - -def _setup_adapter_flavors(flavor_session): - """Initialize adapter flavor table.""" - logging.info('setup adapter flavor table') - from compass.db.api import adapter - adapter.add_flavors_internal(flavor_session) - - def _update_others(other_session): """Update other tables.""" logging.info('update other tables') @@ -311,25 +250,12 @@ def _update_others(other_session): @run_in_session() -def create_db(session): +def create_db(session=None): """Create database.""" models.BASE.metadata.create_all(bind=ENGINE) _setup_permission_table(session) _setup_user_table(session) _setup_switch_table(session) - _setup_os_installers(session) - _setup_package_installers(session) - _setup_oses(session) - _setup_distributed_systems(session) - _setup_adapters(session) - _setup_adapter_roles(session) - _setup_adapter_flavors(session) - _setup_os_fields(session) - _setup_package_fields(session) - _setup_flavor_fields(session) - _setup_os_metadatas(session) - _setup_package_metadatas(session) - _setup_flavor_metadatas(session) _update_others(session) diff --git a/compass/db/api/health_check_report.py b/compass/db/api/health_check_report.py index 8fd34572..562008c9 100644 --- a/compass/db/api/health_check_report.py +++ b/compass/db/api/health_check_report.py @@ -16,7 +16,9 @@ """Cluster health check report.""" import logging +from compass.db.api import cluster as cluster_api from compass.db.api import database +from compass.db.api import host as host_api from compass.db.api import permission from compass.db.api import user as user_api from compass.db.api import utils @@ -39,27 +41,32 @@ RESP_ACTION_FIELDS = ['cluster_id', 'status'] @utils.supported_filters(REQUIRED_INSERT_FIELDS, OPTIONAL_INSERT_FIELDS) @database.run_in_session() @utils.wrap_to_dict(RESP_FIELDS) -def add_report_record(cluster_id, name, report={}, +def add_report_record(cluster_id, name=None, report={}, state='verifying', session=None, **kwargs): """Create a health check report record.""" # Replace any white space into '-' words = name.split() name = '-'.join(words) - + cluster = cluster_api.get_cluster_internal(cluster_id, session=session) return utils.add_db_object( - session, models.HealthCheckReport, True, cluster_id, name, + session, models.HealthCheckReport, True, cluster.id, name, report=report, state=state, **kwargs ) +def _get_report(cluster_id, name, session=None): + cluster = cluster_api.get_cluster_internal(cluster_id, session=session) + return utils.get_db_object( + session, models.HealthCheckReport, cluster_id=cluster.id, name=name + ) + + @utils.supported_filters(UPDATE_FIELDS) @database.run_in_session() @utils.wrap_to_dict(RESP_FIELDS) def update_report(cluster_id, name, session=None, **kwargs): """Update health check report.""" - report = utils.get_db_object( - session, models.HealthCheckReport, cluster_id=cluster_id, name=name - ) + report = _get_report(cluster_id, name, session=session) if report.state == 'finished': err_msg = 'Report cannot be updated if state is in "finished"' raise exception.Forbidden(err_msg) @@ -72,106 +79,109 @@ def update_report(cluster_id, name, session=None, **kwargs): @utils.wrap_to_dict(RESP_FIELDS) def update_multi_reports(cluster_id, session=None, **kwargs): """Bulk update reports.""" + # TODO(grace): rename the fuction if needed to reflect the fact. return set_error(cluster_id, session=session, **kwargs) def set_error(cluster_id, report={}, session=None, state='error', error_message=None): - with session.begin(subtransactions=True): - logging.debug( - "session %s updates all reports as %s in cluster %s", - id(session), state, cluster_id - ) - session.query( - models.HealthCheckReport - ).filter_by(cluster_id=cluster_id).update( - {"report": {}, 'state': 'error', 'error_message': error_message} - ) - - reports = session.query( - models.HealthCheckReport - ).filter_by(cluster_id=cluster_id).all() - - return reports + cluster = cluster_api.get_cluster_internal(cluster_id, session=session) + logging.debug( + "updates all reports as %s in cluster %s", + state, cluster_id + ) + return utils.update_db_objects( + session, models.HealthCheckReport, + updates={ + 'report': {}, + 'state': 'error', + 'error_message': error_message + }, cluster_id=cluster.id + ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_HEALTH_REPORT ) @utils.wrap_to_dict(RESP_FIELDS) -def list_health_reports(user, cluster_id, session=None): +def list_health_reports(cluster_id, user=None, session=None): """List all reports in the specified cluster.""" + cluster = cluster_api.get_cluster_internal(cluster_id, session=session) return utils.list_db_objects( - session, models.HealthCheckReport, cluster_id=cluster_id + session, models.HealthCheckReport, cluster_id=cluster.id ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_GET_HEALTH_REPORT ) @utils.wrap_to_dict(RESP_FIELDS) -def get_health_report(user, cluster_id, name, session=None): - return utils.get_db_object( - session, models.HealthCheckReport, cluster_id=cluster_id, name=name +def get_health_report(cluster_id, name, user=None, session=None): + return _get_report( + cluster_id, name, session=session ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_DELETE_REPORT ) @utils.wrap_to_dict(RESP_FIELDS) -def delete_reports(user, cluster_id, name=None, session=None): - if not name: - report = utils.get_db_object( - session, models.HealthCheckReport, cluster_id=cluster_id, name=name - ) +def delete_reports(cluster_id, name=None, user=None, session=None): + # TODO(grace): better to separate this function into two. + # One is to delete a report of a cluster, the other to delete all + # reports under a cluster. + if name: + report = _get_report(cluster_id, name, session=session) return utils.del_db_object(session, report) - - return utils.del_db_objects( - session, models.HealthCheckReport, cluster_id=cluster_id - ) + else: + cluster = cluster_api.get_cluster_internal( + cluster_id, session=session + ) + return utils.del_db_objects( + session, models.HealthCheckReport, cluster_id=cluster.id + ) @utils.supported_filters(optional_support_keys=['check_health']) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_CHECK_CLUSTER_HEALTH ) @utils.wrap_to_dict(RESP_ACTION_FIELDS) -def start_check_cluster_health(user, cluster_id, send_report_url, - session=None, check_health={}): +def start_check_cluster_health(cluster_id, send_report_url, + user=None, session=None, check_health={}): """Start to check cluster health.""" - cluster_state = utils.get_db_object( - session, models.Cluster, True, id=cluster_id - ).state_dict() + cluster = cluster_api.get_cluster_internal(cluster_id, session=session) - if cluster_state['state'] != 'SUCCESSFUL': - logging.debug("state is %s" % cluster_state['state']) + if cluster.state.state != 'SUCCESSFUL': + logging.debug("state is %s" % cluster.state.state) err_msg = "Healthcheck starts only after cluster finished deployment!" raise exception.Forbidden(err_msg) reports = utils.list_db_objects( session, models.HealthCheckReport, - cluster_id=cluster_id, state='verifying' + cluster_id=cluster.id, state='verifying' ) if reports: err_msg = 'Healthcheck in progress, please wait for it to complete!' raise exception.Forbidden(err_msg) # Clear all preivous report + # TODO(grace): the delete should be moved into celery task. + # We should consider the case that celery task is down. utils.del_db_objects( - session, models.HealthCheckReport, cluster_id=cluster_id + session, models.HealthCheckReport, cluster_id=cluster.id ) from compass.tasks import client as celery_client celery_client.celery.send_task( 'compass.tasks.cluster_health', - (cluster_id, send_report_url, user.email) + (cluster.id, send_report_url, user.email) ) return { - "cluster_id": cluster_id, + "cluster_id": cluster.id, "status": "start to check cluster health." } diff --git a/compass/db/api/host.py b/compass/db/api/host.py index a454ad49..aa8c2fe6 100644 --- a/compass/db/api/host.py +++ b/compass/db/api/host.py @@ -16,6 +16,7 @@ import functools import logging import netaddr +import re from compass.db.api import database from compass.db.api import metadata_holder as metadata_api @@ -24,24 +25,26 @@ from compass.db.api import user as user_api from compass.db.api import utils from compass.db import exception from compass.db import models +from compass.utils import util SUPPORTED_FIELDS = ['name', 'os_name', 'owner', 'mac'] -SUPPORTED_MACHINE_HOST_FIELDS = ['mac', 'tag', 'location', 'os_name', 'os_id'] +SUPPORTED_MACHINE_HOST_FIELDS = [ + 'mac', 'tag', 'location', 'os_name', 'os_id' +] SUPPORTED_NETOWORK_FIELDS = [ 'interface', 'ip', 'is_mgmt', 'is_promiscuous' ] RESP_FIELDS = [ - 'id', 'name', 'hostname', 'os_name', 'os_id', 'owner', 'mac', - 'switch_ip', 'port', 'switches', 'os_installer', 'ip', + 'id', 'name', 'hostname', 'os_name', 'owner', 'mac', + 'switch_ip', 'port', 'switches', 'os_installer', 'os_id', 'ip', 'reinstall_os', 'os_installed', 'tag', 'location', 'networks', 'created_at', 'updated_at' ] RESP_CLUSTER_FIELDS = [ 'id', 'name', 'os_name', 'reinstall_distributed_system', - 'distributed_system_name', 'owner', 'adapter_id', - 'distributed_system_installed', - 'adapter_id', 'created_at', 'updated_at' + 'owner', 'adapter_name', 'flavor_name', + 'distributed_system_installed', 'created_at', 'updated_at' ] RESP_NETWORK_FIELDS = [ 'id', 'ip', 'interface', 'netmask', 'is_mgmt', 'is_promiscuous', @@ -61,7 +64,7 @@ RESP_DEPLOYED_CONFIG_FIELDS = [ RESP_DEPLOY_FIELDS = [ 'status', 'host' ] -UPDATED_FIELDS = ['host_id', 'name', 'reinstall_os'] +UPDATED_FIELDS = ['name', 'reinstall_os'] UPDATED_CONFIG_FIELDS = [ 'put_os_config' ] @@ -106,7 +109,7 @@ UPDATED_LOG_FIELDS = [ @utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_HOSTS ) @utils.wrap_to_dict(RESP_FIELDS) @@ -120,7 +123,7 @@ def list_hosts(user=None, session=None, **filters): @utils.supported_filters( optional_support_keys=SUPPORTED_MACHINE_HOST_FIELDS) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_HOSTS ) @utils.output_filters( @@ -132,7 +135,7 @@ def list_hosts(user=None, session=None, **filters): ) @utils.wrap_to_dict(RESP_FIELDS) def list_machines_or_hosts(user=None, session=None, **filters): - """List hosts.""" + """List machines or hosts if possible.""" machines = utils.list_db_objects( session, models.Machine, **filters ) @@ -146,9 +149,30 @@ def list_machines_or_hosts(user=None, session=None, **filters): return machines_or_hosts +def _get_host(host_id, session=None, **kwargs): + """Get host by id.""" + if isinstance(host_id, (int, long)): + return utils.get_db_object( + session, models.Host, + id=host_id, **kwargs + ) + else: + raise exception.InvalidParameter( + 'host id %s type is not int compatible' % host_id + ) + + +def get_host_internal(host_id, session=None, **kwargs): + """Helper function to get host. + + Used by other files under db/api. + """ + return _get_host(host_id, session=session, **kwargs) + + @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_HOSTS ) @utils.wrap_to_dict(RESP_FIELDS) @@ -157,15 +181,16 @@ def get_host( user=None, session=None, **kwargs ): """get host info.""" - return utils.get_db_object( - session, models.Host, - exception_when_missing, id=host_id + return _get_host( + host_id, + exception_when_missing=exception_when_missing, + session=session ) @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_HOSTS ) @utils.wrap_to_dict(RESP_FIELDS) @@ -173,78 +198,90 @@ def get_machine_or_host( host_id, exception_when_missing=True, user=None, session=None, **kwargs ): - """get host info.""" - machine = utils.get_db_object( - session, models.Machine, - exception_when_missing, id=host_id + """get machine or host if possible.""" + from compass.db.api import machine as machine_api + machine = machine_api.get_machine_internal( + host_id, + exception_when_missing=exception_when_missing, + session=session ) - if not machine: - return None - host = machine.host - if host: - return host + if machine.host: + return machine.host else: return machine @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_HOST_CLUSTERS ) @utils.wrap_to_dict(RESP_CLUSTER_FIELDS) def get_host_clusters(host_id, user=None, session=None, **kwargs): """get host clusters.""" - host = utils.get_db_object( - session, models.Host, id=host_id - ) + host = _get_host(host_id, session=session) return [clusterhost.cluster for clusterhost in host.clusterhosts] -def _conditional_exception(host, exception_when_not_editable): - if exception_when_not_editable: - raise exception.Forbidden( - 'host %s is not editable' % host.name - ) - else: - return False - - -def is_host_validated(session, host): +def check_host_validated(host): + """Check host is validated.""" if not host.config_validated: raise exception.Forbidden( 'host %s is not validated' % host.name ) -def is_host_editable( - session, host, user, - reinstall_os_set=False, exception_when_not_editable=True +def check_host_editable( + host, user=None, + check_in_installing=False ): - if reinstall_os_set: + """Check host is editable. + + If we try to set reinstall_os or check the host is not in installing + state, we should set check_in_installing to True. + Otherwise we will check the host is not in installing or installed. + We also make sure the user is admin or the owner of the host to avoid + unauthorized user to update host attributes. + """ + if check_in_installing: if host.state.state == 'INSTALLING': - logging.debug('installing host is not editable') - return _conditional_exception( - host, exception_when_not_editable + raise exception.Forbidden( + 'host %s is not editable ' + 'when state is in installing' % host.name ) elif not host.reinstall_os: - logging.debug( - 'host is not editable when not reinstall os' + raise exception.Forbidden( + 'host %s is not editable ' + 'when not to be reinstalled' % host.name ) - return _conditional_exception( - host, exception_when_not_editable + if user and not user.is_admin and host.creator_id != user.id: + raise exception.Forbidden( + 'host %s is not editable ' + 'when user is not admin or the owner of the host' % host.name ) - if not user.is_admin and host.creator_id != user.id: - logging.debug( - 'user do not have permission to edit host' - ) - return _conditional_exception( - host, exception_when_not_editable - ) - return True -def validate_host(session, host): +def is_host_editable( + host, user=None, + check_in_installing=False +): + """Get if host is editable.""" + try: + check_host_editable( + host, user=user, + check_in_installing=check_in_installing + ) + return True + except exception.Forbidden: + return False + + +def validate_host(host): + """Validate host. + + Makesure hostname is not empty, there is only one mgmt network, + The mgmt network is not in promiscuous mode. + """ if not host.hostname: raise exception.Invalidparameter( 'host %s does not set hostname' % host.name @@ -279,41 +316,43 @@ def validate_host(session, host): ) @utils.input_validates(name=utils.check_name) @utils.wrap_to_dict(RESP_FIELDS) -def _update_host(session, user, host_id, **kwargs): +def _update_host(host_id, session=None, user=None, **kwargs): """Update a host internal.""" - host = utils.get_db_object( - session, models.Host, id=host_id - ) - is_host_editable( - session, host, user, - reinstall_os_set=kwargs.get('reinstall_os', False) + host = _get_host(host_id, session=session) + check_host_editable( + host, user=user, + check_in_installing=kwargs.get('reinstall_os', False) ) return utils.update_db_object(session, host, **kwargs) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_UPDATE_HOST ) def update_host(host_id, user=None, session=None, **kwargs): """Update a host.""" - return _update_host(session, user, host_id=host_id, **kwargs) + return _update_host(host_id, session=session, user=user, **kwargs) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_UPDATE_HOST ) def update_hosts(data=[], user=None, session=None): + """Update hosts.""" + # TODO(xicheng): this batch function is not similar as others. + # try to make it similar output as others and batch update should + # tolerate partial failure. hosts = [] for host_data in data: - hosts.append(_update_host(session, user, **host_data)) + hosts.append(_update_host(session=session, user=user, **host_data)) return hosts @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_DEL_HOST ) @utils.wrap_to_dict( @@ -324,27 +363,48 @@ def del_host( host_id, force=False, from_database_only=False, user=None, session=None, **kwargs ): - """Delete a host.""" + """Delete a host. + + If force, we delete the host anyway. + If from_database_only, we only delete the host record in databaes. + Otherwise we send to del host task to celery to delete the host + record in os installer and package installer, clean installation logs + and at last clean database record. + The backend will call this function again after it deletes the record + in os installer and package installer with from_database_only set. + """ from compass.db.api import cluster as cluster_api - host = utils.get_db_object( - session, models.Host, id=host_id - ) + host = _get_host(host_id, session=session) + # force set host state to ERROR when we want to delete the + # host anyway even the host is in installing or already + # installed. It let the api know the deleting is in doing when backend + # is doing the real deleting. In future we may import a new state like + # INDELETE to indicate the deleting is processing. + # We need discuss about if we can delete a host when it is already + # installed by api. if host.state.state != 'UNINITIALIZED' and force: host.state.state = 'ERROR' - is_host_editable( - session, host, user, - reinstall_os_set=True + check_host_editable( + host, user=user, + check_in_installing=True ) cluster_ids = [] for clusterhost in host.clusterhosts: if clusterhost.state.state != 'UNINITIALIZED' and force: clusterhost.state.state = 'ERROR' - cluster_api.is_cluster_editable( - session, clusterhost.cluster, user, - reinstall_distributed_system_set=True + # TODO(grace): here we check all clusters which use this host editable. + # Because in backend we do not have functions to delete host without + # reference its cluster. After deleting pure host supported in backend, + # we should change code here to is_cluster_editable. + # Here delete a host may fail even we set force flag. + cluster_api.check_cluster_editable( + clusterhost.cluster, user=user, + check_in_installing=True ) cluster_ids.append(clusterhost.cluster_id) + # Delete host record directly if there is no need to delete it + # in backend or from_database_only is set. if host.state.state == 'UNINITIALIZED' or from_database_only: return utils.del_db_object(session, host) else: @@ -355,7 +415,7 @@ def del_host( celery_client.celery.send_task( 'compass.tasks.delete_host', ( - user.email, host_id, cluster_ids + user.email, host.id, cluster_ids ) ) return { @@ -366,30 +426,27 @@ def del_host( @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_HOST_CONFIG ) @utils.wrap_to_dict(RESP_CONFIG_FIELDS) def get_host_config(host_id, user=None, session=None, **kwargs): """Get host config.""" - return utils.get_db_object( - session, models.Host, id=host_id - ) + return _get_host(host_id, session=session) @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_HOST_CONFIG ) @utils.wrap_to_dict(RESP_DEPLOYED_CONFIG_FIELDS) def get_host_deployed_config(host_id, user=None, session=None, **kwargs): """Get host deployed config.""" - return utils.get_db_object( - session, models.Host, id=host_id - ) + return _get_host(host_id, session=session) +# replace os_config to deployed_os_config in kwargs. @utils.replace_filters( os_config='deployed_os_config' ) @@ -398,27 +455,42 @@ def get_host_deployed_config(host_id, user=None, session=None, **kwargs): ignore_support_keys=IGNORE_FIELDS ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_HOST_CONFIG ) @utils.wrap_to_dict(RESP_CONFIG_FIELDS) def update_host_deployed_config(host_id, user=None, session=None, **kwargs): """Update host deployed config.""" - host = utils.get_db_object( - session, models.Host, id=host_id + host = _get_host(host_id, session=session) + check_host_editable(host, user=user) + check_host_validated(host) + return utils.update_db_object(session, host, **kwargs) + + +def _host_os_config_validates( + config, host, session=None, user=None, **kwargs +): + """Check host os config's validation.""" + metadata_api.validate_os_config( + config, host.os_id ) - is_host_editable(session, host, user) - is_host_validated(session, host) - return utils.update_db_object(session, host, **kwargs) +@utils.input_validates_with_args( + put_os_config=_host_os_config_validates +) +@utils.output_validates_with_args( + os_config=_host_os_config_validates +) @utils.wrap_to_dict(RESP_CONFIG_FIELDS) -def _update_host_config(session, user, host, **kwargs): +def _update_host_config(host, session=None, user=None, **kwargs): """Update host config.""" - is_host_editable(session, host, user) + check_host_editable(host, user=user) return utils.update_db_object(session, host, **kwargs) +# replace os_config to put_os_config in kwargs. +# It tells db the os_config will be updated not patched. @utils.replace_filters( os_config='put_os_config' ) @@ -427,32 +499,19 @@ def _update_host_config(session, user, host, **kwargs): ignore_support_keys=IGNORE_FIELDS ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_HOST_CONFIG ) def update_host_config(host_id, user=None, session=None, **kwargs): - host = utils.get_db_object( - session, models.Host, id=host_id - ) - - def os_config_validates(config): - metadata_api.validate_os_config( - session, config, os_id=host.os_id - ) - - @utils.input_validates( - put_os_config=os_config_validates, - ) - def update_config_internal(host, **in_kwargs): - return _update_host_config( - session, user, host, **kwargs - ) - - return update_config_internal( - host, **kwargs + """Update host config.""" + host = _get_host(host_id, session=session) + return _update_host_config( + host, session=session, user=user, **kwargs ) +# replace os_config to patched_os_config in kwargs. +# It tells db os_config will be patched not be updated. @utils.replace_filters( os_config='patched_os_config' ) @@ -461,44 +520,27 @@ def update_host_config(host_id, user=None, session=None, **kwargs): ignore_support_keys=IGNORE_FIELDS ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_HOST_CONFIG ) def patch_host_config(host_id, user=None, session=None, **kwargs): - host = utils.get_db_object( - session, models.Host, id=host_id - ) - - def os_config_validates(config): - metadata_api.validate_os_config( - session, config, os_id=host.os_id - ) - - @utils.output_validates( - os_config=os_config_validates, - ) - def patch_config_internal(host, **in_kwargs): - return _update_host_config( - session, user, host, **in_kwargs - ) - - return patch_config_internal( - host, **kwargs + """Patch host config.""" + host = _get_host(host_id, session=session) + return _update_host_config( + host, session=session, user=user, **kwargs ) @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_DEL_HOST_CONFIG ) @utils.wrap_to_dict(RESP_CONFIG_FIELDS) def del_host_config(host_id, user=None, session=None): """delete a host config.""" - host = utils.get_db_object( - session, models.Host, id=host_id - ) - is_host_editable(session, host, user) + host = _get_host(host_id, session=session) + check_host_editable(host, user=user) return utils.update_db_object( session, host, os_config={}, config_validated=False ) @@ -508,15 +550,16 @@ def del_host_config(host_id, user=None, session=None): optional_support_keys=SUPPORTED_NETOWORK_FIELDS ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_HOST_NETWORKS ) @utils.wrap_to_dict(RESP_NETWORK_FIELDS) def list_host_networks(host_id, user=None, session=None, **filters): - """Get host networks.""" + """Get host networks for a host.""" + host = _get_host(host_id, session=session) return utils.list_db_objects( session, models.HostNetwork, - host_id=host_id, **filters + host_id=host.id, **filters ) @@ -524,7 +567,7 @@ def list_host_networks(host_id, user=None, session=None, **filters): optional_support_keys=SUPPORTED_NETOWORK_FIELDS ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_HOST_NETWORKS ) @utils.wrap_to_dict(RESP_NETWORK_FIELDS) @@ -535,9 +578,34 @@ def list_hostnetworks(user=None, session=None, **filters): ) +def _get_hostnetwork(host_network_id, session=None, **kwargs): + """Get hostnetwork by hostnetwork id.""" + if isinstance(host_network_id, (int, long)): + return utils.get_db_object( + session, models.HostNetwork, + id=host_network_id, **kwargs + ) + raise exception.InvalidParameter( + 'host network id %s type is not int compatible' % host_network_id + ) + + +def _get_host_network(host_id, host_network_id, session=None, **kwargs): + """Get hostnetwork by host id and hostnetwork id.""" + host = _get_host(host_id, session=session) + host_network = _get_hostnetwork(host_network_id, session=session, **kwargs) + if host_network.host_id != host.id: + raise exception.RecordNotExists( + 'host %s does not own host network %s' % ( + host.id, host_network.id + ) + ) + return host_network + + @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_HOST_NETWORKS ) @utils.wrap_to_dict(RESP_NETWORK_FIELDS) @@ -546,31 +614,20 @@ def get_host_network( user=None, session=None, **kwargs ): """Get host network.""" - host_network = utils.get_db_object( - session, models.HostNetwork, - id=host_network_id + return _get_host_network( + host_id, host_network_id, session=session ) - if host_network.host_id != host_id: - raise exception.RecordNotExists( - 'host %s does not own host network %s' % ( - host_id, host_network_id - ) - ) - return host_network @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_HOST_NETWORKS ) @utils.wrap_to_dict(RESP_NETWORK_FIELDS) def get_hostnetwork(host_network_id, user=None, session=None, **kwargs): """Get host network.""" - return utils.get_db_object( - session, models.HostNetwork, - id=host_network_id - ) + return _get_hostnetwork(host_network_id, session=session) @utils.supported_filters( @@ -583,37 +640,47 @@ def get_hostnetwork(host_network_id, user=None, session=None, **kwargs): ) @utils.wrap_to_dict(RESP_NETWORK_FIELDS) def _add_host_network( - session, user, host_id, exception_when_existing=True, - interface=None, ip=None, **kwargs + host_id, exception_when_existing=True, + session=None, user=None, interface=None, ip=None, **kwargs ): - host = utils.get_db_object( - session, models.Host, id=host_id - ) - is_host_editable(session, host, user) + """Add hostnetwork to a host.""" + host = _get_host(host_id, session=session) + check_host_editable(host, user=user) return utils.add_db_object( session, models.HostNetwork, exception_when_existing, - host_id, interface, ip=ip, **kwargs + host.id, interface, ip=ip, **kwargs ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_HOST_NETWORK ) def add_host_network( host_id, exception_when_existing=True, interface=None, user=None, session=None, **kwargs ): - """Create a host network.""" + """Create a hostnetwork to a host.""" return _add_host_network( - session, user, host_id, exception_when_existing, - interface=interface, **kwargs + host_id, + exception_when_existing, + interface=interface, session=session, user=user, **kwargs + ) + + +def _get_hostnetwork_by_ip( + ip, session=None, **kwargs +): + ip_int = long(netaddr.IPAddress(ip)) + return utils.get_db_object( + session, models.HostNetwork, + ip_int=ip_int, **kwargs ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_HOST_NETWORK ) def add_host_networks( @@ -625,18 +692,18 @@ def add_host_networks( failed_hosts = [] for host_data in data: host_id = host_data['host_id'] + host = _get_host(host_id, session=session) networks = host_data['networks'] host_networks = [] failed_host_networks = [] for network in networks: - ip_int = long(netaddr.IPAddress(network['ip'])) - host_network = utils.get_db_object( - session, models.HostNetwork, False, - ip_int=ip_int + host_network = _get_hostnetwork_by_ip( + network['ip'], session=session, + exception_when_missing=False ) if ( host_network and not ( - host_network.host_id == host_id and + host_network.host_id == host.id and host_network.interface == network['interface'] ) ): @@ -646,14 +713,14 @@ def add_host_networks( failed_host_networks.append(network) else: host_networks.append(_add_host_network( - session, user, host_id, exception_when_existing, - **network + host.id, exception_when_existing, + session=session, user=user, **network )) if host_networks: - hosts.append({'host_id': host_id, 'networks': host_networks}) + hosts.append({'host_id': host.id, 'networks': host_networks}) if failed_host_networks: failed_hosts.append({ - 'host_id': host_id, 'networks': failed_host_networks + 'host_id': host.id, 'networks': failed_host_networks }) return { 'hosts': hosts, @@ -663,9 +730,10 @@ def add_host_networks( @utils.wrap_to_dict(RESP_NETWORK_FIELDS) def _update_host_network( - session, user, host_network, **kwargs + host_network, session=None, user=None, **kwargs ): - is_host_editable(session, host_network.host, user) + """Update host network.""" + check_host_editable(host_network.host, user=user) return utils.update_db_object(session, host_network, **kwargs) @@ -677,25 +745,18 @@ def _update_host_network( ip=utils.check_ip ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_HOST_NETWORK ) def update_host_network( host_id, host_network_id, user=None, session=None, **kwargs ): - """Update a host network.""" - host_network = utils.get_db_object( - session, models.HostNetwork, - id=host_network_id + """Update a host network by host id and host network id.""" + host_network = _get_host_network( + host_id, host_network_id, session=session ) - if host_network.host_id != host_id: - raise exception.RecordNotExists( - 'host %s does not own host network %s' % ( - host_id, host_network_id - ) - ) return _update_host_network( - session, user, host_network, **kwargs + host_network, session=session, user=user, **kwargs ) @@ -707,22 +768,22 @@ def update_host_network( ip=utils.check_ip ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_HOST_NETWORK ) def update_hostnetwork(host_network_id, user=None, session=None, **kwargs): - """Update a host network.""" - host_network = utils.get_db_object( - session, models.HostNetwork, id=host_network_id + """Update a host network by host network id.""" + host_network = _get_hostnetwork( + host_network_id, session=session ) return _update_host_network( - session, user, host_network, **kwargs + host_network, session=session, user=user, **kwargs ) @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_DEL_HOST_NETWORK ) @utils.wrap_to_dict(RESP_NETWORK_FIELDS) @@ -730,47 +791,38 @@ def del_host_network( host_id, host_network_id, user=None, session=None, **kwargs ): - """Delete a host network.""" - host_network = utils.get_db_object( - session, models.HostNetwork, - id=host_network_id + """Delete a host network by host id and host network id.""" + host_network = _get_host_network( + host_id, host_network_id, session=session ) - if host_network.host_id != host_id: - raise exception.RecordNotExists( - 'host %s does not own host network %s' % ( - host_id, host_network_id - ) - ) - is_host_editable(session, host_network.host, user) + check_host_editable(host_network.host, user=user) return utils.del_db_object(session, host_network) @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_DEL_HOST_NETWORK ) @utils.wrap_to_dict(RESP_NETWORK_FIELDS) def del_hostnetwork(host_network_id, user=None, session=None, **kwargs): - """Delete a host network.""" - host_network = utils.get_db_object( - session, models.HostNetwork, id=host_network_id + """Delete a host network by host network id.""" + host_network = _get_hostnetwork( + host_network_id, session=session ) - is_host_editable(session, host_network.host, user) + check_host_editable(host_network.host, user=user) return utils.del_db_object(session, host_network) @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_GET_HOST_STATE ) @utils.wrap_to_dict(RESP_STATE_FIELDS) def get_host_state(host_id, user=None, session=None, **kwargs): """Get host state info.""" - return utils.get_db_object( - session, models.Host, id=host_id - ).state_dict() + return _get_host(host_id, session=session).state @utils.supported_filters( @@ -778,54 +830,53 @@ def get_host_state(host_id, user=None, session=None, **kwargs): ignore_support_keys=IGNORE_FIELDS ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_UPDATE_HOST_STATE ) @utils.wrap_to_dict(RESP_STATE_FIELDS) def update_host_state(host_id, user=None, session=None, **kwargs): """Update a host state.""" - host = utils.get_db_object( - session, models.Host, id=host_id - ) + host = _get_host(host_id, session=session) utils.update_db_object(session, host.state, **kwargs) - return host.state_dict() + return host.state +@util.deprecated @utils.supported_filters( optional_support_keys=UPDATED_STATE_INTERNAL_FIELDS, ignore_support_keys=IGNORE_FIELDS ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_UPDATE_HOST_STATE ) @utils.wrap_to_dict(['status', 'host']) def update_host_state_internal( - hostname, from_database_only=False, + host_id, from_database_only=False, user=None, session=None, **kwargs ): - """Update a host state.""" - if isinstance(hostname, (int, long)): - host = utils.get_db_object( - session, models.Host, id=hostname - ) - else: - host = utils.get_db_object( - session, models.Host, name=hostname - ) + """Update a host state. + + This function is called when host os is installed. + If from_database_only, the state is updated in database. + Otherwise a celery task sent to os installer and package installer + to do some future actions. + """ + # TODO(xicheng): should be merged into update_host_state + host = _get_host(host_id, session=session) if 'ready' in kwargs and kwargs['ready'] and not host.state.ready: ready_triggered = True else: ready_triggered = False - clusterhost_ready = {} - cluster_os_ready = {} + clusterhosts_ready = {} + clusters_os_ready = {} if ready_triggered: for clusterhost in host.clusterhosts: cluster = clusterhost.cluster - if cluster.distributed_system: - clusterhost_ready[cluster.id] = False + if cluster.flavor_name: + clusterhosts_ready[cluster.id] = False else: - clusterhost_ready[cluster.id] = True + clusterhosts_ready[cluster.id] = True all_os_ready = True for clusterhost_in_cluster in cluster.clusterhosts: host_in_cluster = clusterhost_in_cluster.host @@ -833,13 +884,13 @@ def update_host_state_internal( continue if not host_in_cluster.state.ready: all_os_ready = False - cluster_os_ready[cluster.id] = all_os_ready - logging.info('host %s ready: %s', hostname, ready_triggered) - logging.info("clusterhost_ready is: %s", clusterhost_ready) - logging.info("cluster_os_ready is %s", cluster_os_ready) + clusters_os_ready[cluster.id] = all_os_ready + logging.debug('host %s ready: %s', host_id, ready_triggered) + logging.debug("clusterhosts_ready is: %s", clusterhosts_ready) + logging.debug("clusters_os_ready is %s", clusters_os_ready) if not ready_triggered or from_database_only: - logging.info('%s state is set to %s', host.name, kwargs) + logging.debug('%s state is set to %s', host.name, kwargs) utils.update_db_object(session, host.state, **kwargs) if not host.state.ready: for clusterhost in host.clusterhosts: @@ -855,17 +906,17 @@ def update_host_state_internal( celery_client.celery.send_task( 'compass.tasks.os_installed', ( - host.id, clusterhost_ready, - cluster_os_ready + host.id, clusterhosts_ready, + clusters_os_ready ) ) - status = '%s: clusterhost ready %s cluster os ready %s' % ( - host.name, clusterhost_ready, cluster_os_ready + status = '%s: clusterhosts ready %s clusters os ready %s' % ( + host.name, clusterhosts_ready, clusters_os_ready ) logging.info('action status: %s', status) return { 'status': status, - 'host': host.state_dict() + 'host': host.state } @@ -874,8 +925,17 @@ def update_host_state_internal( @utils.wrap_to_dict(RESP_LOG_FIELDS) def get_host_log_histories(host_id, user=None, session=None, **kwargs): """Get host log history.""" + host = _get_host(host_id, session=session) return utils.list_db_objects( - session, models.HostLogHistory, id=host_id + session, models.HostLogHistory, id=host.id, **kwargs + ) + + +def _get_host_log_history(host_id, filename, session=None, **kwargs): + host = _get_host(host_id, session=session) + return utils.get_db_object( + session, models.HostLogHistory, id=host.id, + filename=filename, **kwargs ) @@ -884,8 +944,8 @@ def get_host_log_histories(host_id, user=None, session=None, **kwargs): @utils.wrap_to_dict(RESP_LOG_FIELDS) def get_host_log_history(host_id, filename, user=None, session=None, **kwargs): """Get host log history.""" - return utils.get_db_object( - session, models.HostLogHistory, id=host_id, filename=filename + return _get_host_log_history( + host_id, filename, session=session ) @@ -900,8 +960,8 @@ def update_host_log_history( session=None, **kwargs ): """Update a host log history.""" - host_log_history = utils.get_db_object( - session, models.HostLogHistory, id=host_id, filename=filename + host_log_history = _get_host_log_history( + host_id, filename, session=session ) return utils.update_db_object(session, host_log_history, **kwargs) @@ -918,15 +978,16 @@ def add_host_log_history( filename=None, user=None, session=None, **kwargs ): """add a host log history.""" + host = _get_host(host_id, session=session) return utils.add_db_object( session, models.HostLogHistory, exception_when_existing, - host_id, filename, **kwargs + host.id, filename, **kwargs ) @utils.supported_filters(optional_support_keys=['poweron']) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_DEPLOY_HOST ) @utils.wrap_to_dict( @@ -938,13 +999,11 @@ def poweron_host( ): """power on host.""" from compass.tasks import client as celery_client - host = utils.get_db_object( - session, models.Host, id=host_id - ) - is_host_validated(session, host) + host = _get_host(host_id, session=session) + check_host_validated(host) celery_client.celery.send_task( 'compass.tasks.poweron_host', - (host_id,) + (host.id,) ) return { 'status': 'poweron %s action sent' % host.name, @@ -954,7 +1013,7 @@ def poweron_host( @utils.supported_filters(optional_support_keys=['poweroff']) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_DEPLOY_HOST ) @utils.wrap_to_dict( @@ -966,13 +1025,11 @@ def poweroff_host( ): """power off host.""" from compass.tasks import client as celery_client - host = utils.get_db_object( - session, models.Host, id=host_id - ) - is_host_validated(session, host) + host = _get_host(host_id, session=session) + check_host_validated(host) celery_client.celery.send_task( 'compass.tasks.poweroff_host', - (host_id,) + (host.id,) ) return { 'status': 'poweroff %s action sent' % host.name, @@ -982,7 +1039,7 @@ def poweroff_host( @utils.supported_filters(optional_support_keys=['reset']) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_DEPLOY_HOST ) @utils.wrap_to_dict( @@ -994,13 +1051,11 @@ def reset_host( ): """reset host.""" from compass.tasks import client as celery_client - host = utils.get_db_object( - session, models.Host, id=host_id - ) - is_host_validated(session, host) + host = _get_host(host_id, session=session) + check_host_validated(host) celery_client.celery.send_task( 'compass.tasks.reset_host', - (host_id,) + (host.id,) ) return { 'status': 'reset %s action sent' % host.name, diff --git a/compass/db/api/installer.py b/compass/db/api/installer.py deleted file mode 100644 index 59ab19b9..00000000 --- a/compass/db/api/installer.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2014 Huawei Technologies Co. Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Adapter database operations.""" -import logging -import os - -from compass.db.api import database -from compass.db.api import utils -from compass.db import exception -from compass.db import models - -from compass.utils import setting_wrapper as setting -from compass.utils import util - - -def _add_installers(session, model, configs, exception_when_existing=True): - installers = [] - for config in configs: - installers.append(utils.add_db_object( - session, model, - exception_when_existing, config['INSTANCE_NAME'], - name=config['NAME'], - settings=config.get('SETTINGS', {}) - )) - return installers - - -def add_os_installers_internal(session, exception_when_existing=True): - configs = util.load_configs(setting.OS_INSTALLER_DIR) - return _add_installers( - session, models.OSInstaller, configs, - exception_when_existing=exception_when_existing - ) - - -def add_package_installers_internal(session, exception_when_existing=True): - configs = util.load_configs(setting.PACKAGE_INSTALLER_DIR) - return _add_installers( - session, models.PackageInstaller, configs, - exception_when_existing=exception_when_existing - ) diff --git a/compass/db/api/machine.py b/compass/db/api/machine.py index 6bf066a7..ef8f5c21 100644 --- a/compass/db/api/machine.py +++ b/compass/db/api/machine.py @@ -14,6 +14,7 @@ """Switch database operations.""" import logging +import re from compass.db.api import database from compass.db.api import permission @@ -43,9 +44,26 @@ RESP_DEPLOY_FIELDS = [ ] +def _get_machine(machine_id, session=None, **kwargs): + """Get machine by id.""" + if isinstance(machine_id, (int, long)): + return utils.get_db_object( + session, models.Machine, + id=machine_id, **kwargs + ) + raise exception.InvalidParameter( + 'machine id %s type is not int compatible' % machine_id + ) + + +def get_machine_internal(machine_id, session=None, **kwargs): + """Helper function to other files under db/api.""" + return _get_machine(machine_id, session=session, **kwargs) + + @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_MACHINES ) @utils.wrap_to_dict(RESP_FIELDS) @@ -53,10 +71,10 @@ def get_machine( machine_id, exception_when_missing=True, user=None, session=None, **kwargs ): - """get field dict of a machine.""" - return utils.get_db_object( - session, models.Machine, - exception_when_missing, id=machine_id + """get a machine.""" + return _get_machine( + machine_id, session=session, + exception_when_missing=exception_when_missing ) @@ -64,7 +82,7 @@ def get_machine( optional_support_keys=SUPPORTED_FIELDS ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_MACHINES ) @utils.output_filters( @@ -80,9 +98,9 @@ def list_machines(user=None, session=None, **filters): @utils.wrap_to_dict(RESP_FIELDS) -def _update_machine(session, machine_id, **kwargs): +def _update_machine(machine_id, session=None, **kwargs): """Update a machine.""" - machine = utils.get_db_object(session, models.Machine, id=machine_id) + machine = _get_machine(machine_id, session=session) return utils.update_db_object(session, machine, **kwargs) @@ -92,15 +110,19 @@ def _update_machine(session, machine_id, **kwargs): ) @utils.input_validates(ipmi_credentials=utils.check_ipmi_credentials) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_MACHINE ) def update_machine(machine_id, user=None, session=None, **kwargs): + """Update a machine.""" return _update_machine( - session, machine_id, **kwargs + machine_id, session=session, **kwargs ) +# replace [ipmi_credentials, tag, location] to +# [patched_ipmi_credentials, patched_tag, patched_location] +# in kwargs. It tells db these fields will be patched. @utils.replace_filters( ipmi_credentials='patched_ipmi_credentials', tag='patched_tag', @@ -112,24 +134,18 @@ def update_machine(machine_id, user=None, session=None, **kwargs): ) @database.run_in_session() @utils.output_validates(ipmi_credentials=utils.check_ipmi_credentials) -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_MACHINE ) def patch_machine(machine_id, user=None, session=None, **kwargs): + """Patch a machine.""" return _update_machine( - session, machine_id, **kwargs + machine_id, session=session, **kwargs ) -@utils.supported_filters() -@database.run_in_session() -@user_api.check_user_permission_in_session( - permission.PERMISSION_DEL_MACHINE -) -@utils.wrap_to_dict(RESP_FIELDS) -def del_machine(machine_id, user=None, session=None, **kwargs): - """Delete a machine.""" - machine = utils.get_db_object(session, models.Machine, id=machine_id) +def _check_machine_deletable(machine): + """Check a machine deletable.""" if machine.host: host = machine.host raise exception.NotAcceptable( @@ -137,12 +153,24 @@ def del_machine(machine_id, user=None, session=None, **kwargs): machine.mac, host.name ) ) + + +@utils.supported_filters() +@database.run_in_session() +@user_api.check_user_permission( + permission.PERMISSION_DEL_MACHINE +) +@utils.wrap_to_dict(RESP_FIELDS) +def del_machine(machine_id, user=None, session=None, **kwargs): + """Delete a machine.""" + machine = _get_machine(machine_id, session=session) + _check_machine_deletable(machine) return utils.del_db_object(session, machine) @utils.supported_filters(optional_support_keys=['poweron']) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_DEPLOY_HOST ) @utils.wrap_to_dict( @@ -154,8 +182,8 @@ def poweron_machine( ): """power on machine.""" from compass.tasks import client as celery_client - machine = utils.get_db_object( - session, models.Machine, id=machine_id + machine = _get_machine( + machine_id, session=session ) celery_client.celery.send_task( 'compass.tasks.poweron_machine', @@ -169,7 +197,7 @@ def poweron_machine( @utils.supported_filters(optional_support_keys=['poweroff']) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_DEPLOY_HOST ) @utils.wrap_to_dict( @@ -181,8 +209,8 @@ def poweroff_machine( ): """power off machine.""" from compass.tasks import client as celery_client - machine = utils.get_db_object( - session, models.Machine, id=machine_id + machine = _get_machine( + machine_id, session=session ) celery_client.celery.send_task( 'compass.tasks.poweroff_machine', @@ -196,7 +224,7 @@ def poweroff_machine( @utils.supported_filters(optional_support_keys=['reset']) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_DEPLOY_HOST ) @utils.wrap_to_dict( @@ -208,8 +236,8 @@ def reset_machine( ): """reset machine.""" from compass.tasks import client as celery_client - machine = utils.get_db_object( - session, models.Machine, id=machine_id + machine = _get_machine( + machine_id, session=session ) celery_client.celery.send_task( 'compass.tasks.reset_machine', diff --git a/compass/db/api/metadata.py b/compass/db/api/metadata.py index bbcf9e21..16310c8c 100644 --- a/compass/db/api/metadata.py +++ b/compass/db/api/metadata.py @@ -17,6 +17,7 @@ import copy import logging import string +from compass.db.api import adapter as adapter_api from compass.db.api import database from compass.db.api import utils from compass.db import callback as metadata_callback @@ -29,26 +30,39 @@ from compass.utils import setting_wrapper as setting from compass.utils import util -def _add_field_internal(session, model, configs): - fields = [] +OS_FIELDS = None +PACKAGE_FIELDS = None +FLAVOR_FIELDS = None +OSES_METADATA = None +PACKAGES_METADATA = None +FLAVORS_METADATA = None +OSES_METADATA_UI_CONVERTERS = None +FLAVORS_METADATA_UI_CONVERTERS = None + + +def _get_field_from_configuration(configs): + """Get fields from configurations.""" + fields = {} for config in configs: if not isinstance(config, dict): raise exception.InvalidParameter( 'config %s is not dict' % config ) - fields.append(utils.add_db_object( - session, model, False, - config['NAME'], - field_type=config.get('FIELD_TYPE', basestring), - display_type=config.get('DISPLAY_TYPE', 'text'), - validator=config.get('VALIDATOR', None), - js_validator=config.get('JS_VALIDATOR', None), - description=config.get('DESCRIPTION', None) - )) + field_name = config['NAME'] + fields[field_name] = { + 'name': field_name, + 'id': field_name, + 'field_type': config.get('FIELD_TYPE', basestring), + 'display_type': config.get('DISPLAY_TYPE', 'text'), + 'validator': config.get('VALIDATOR', None), + 'js_validator': config.get('JS_VALIDATOR', None), + 'description': config.get('DESCRIPTION', field_name) + } return fields -def add_os_field_internal(session): +def _get_os_fields_from_configuration(): + """Get os fields from os field config dir.""" env_locals = {} env_locals.update(metadata_validator.VALIDATOR_LOCALS) env_locals.update(metadata_callback.CALLBACK_LOCALS) @@ -56,12 +70,13 @@ def add_os_field_internal(session): setting.OS_FIELD_DIR, env_locals=env_locals ) - return _add_field_internal( - session, models.OSConfigField, configs + return _get_field_from_configuration( + configs ) -def add_package_field_internal(session): +def _get_package_fields_from_configuration(): + """Get package fields from package field config dir.""" env_locals = {} env_locals.update(metadata_validator.VALIDATOR_LOCALS) env_locals.update(metadata_callback.CALLBACK_LOCALS) @@ -69,12 +84,13 @@ def add_package_field_internal(session): setting.PACKAGE_FIELD_DIR, env_locals=env_locals ) - return _add_field_internal( - session, models.PackageConfigField, configs + return _get_field_from_configuration( + configs ) -def add_flavor_field_internal(session): +def _get_flavor_fields_from_configuration(): + """Get flavor fields from flavor field config dir.""" env_locals = {} env_locals.update(metadata_validator.VALIDATOR_LOCALS) env_locals.update(metadata_callback.CALLBACK_LOCALS) @@ -82,26 +98,38 @@ def add_flavor_field_internal(session): setting.FLAVOR_FIELD_DIR, env_locals=env_locals ) - return _add_field_internal( - session, models.FlavorConfigField, configs + return _get_field_from_configuration( + configs ) -def _add_metadata( - session, field_model, metadata_model, id, path, name, config, - exception_when_existing=True, parent=None, **kwargs +def _get_metadata_from_configuration( + path, name, config, + fields, **kwargs ): + """Recursively get metadata from configuration. + + Args: + path: used to indicate the path to the root element. + mainly for trouble shooting. + name: the key of the metadata section. + config: the value of the metadata section. + fields: all fields defined in os fields or package fields dir. + """ if not isinstance(config, dict): raise exception.InvalidParameter( '%s config %s is not dict' % (path, config) ) metadata_self = config.get('_self', {}) if 'field' in metadata_self: - field = utils.get_db_object( - session, field_model, field=metadata_self['field'] - ) + field_name = metadata_self['field'] + field = fields[field_name] else: - field = None + field = {} + # mapping to may contain $ like $partition. Here we replace the + # $partition to the key of the correspendent config. The backend then + # can use this kind of feature to support multi partitions when we + # only declare the partition metadata in one place. mapping_to_template = metadata_self.get('mapping_to', None) if mapping_to_template: mapping_to = string.Template( @@ -111,33 +139,54 @@ def _add_metadata( ) else: mapping_to = None - metadata = utils.add_db_object( - session, metadata_model, exception_when_existing, - id, path, name=name, parent=parent, field=field, - display_name=metadata_self.get('display_name', name), - description=metadata_self.get('description', None), - is_required=metadata_self.get('is_required', False), - required_in_whole_config=metadata_self.get( + self_metadata = { + 'name': name, + 'display_name': metadata_self.get('display_name', name), + 'field_type': field.get('field_type', dict), + 'display_type': field.get('display_type', None), + 'description': metadata_self.get( + 'description', field.get('description', None) + ), + 'is_required': metadata_self.get('is_required', False), + 'required_in_whole_config': metadata_self.get( 'required_in_whole_config', False), - mapping_to=mapping_to, - validator=metadata_self.get('validator', None), - js_validator=metadata_self.get('js_validator', None), - default_value=metadata_self.get('default_value', None), - default_callback=metadata_self.get('default_callback', None), - default_callback_params=metadata_self.get( + 'mapping_to': mapping_to, + 'validator': metadata_self.get( + 'validator', field.get('validator', None) + ), + 'js_validator': metadata_self.get( + 'js_validator', field.get('js_validator', None) + ), + 'default_value': metadata_self.get('default_value', None), + 'default_callback': metadata_self.get('default_callback', None), + 'default_callback_params': metadata_self.get( 'default_callback_params', {}), - options=metadata_self.get('options', None), - options_callback=metadata_self.get('options_callback', None), - options_callback_params=metadata_self.get( + 'options': metadata_self.get('options', None), + 'options_callback': metadata_self.get('options_callback', None), + 'options_callback_params': metadata_self.get( 'options_callback_params', {}), - autofill_callback=metadata_self.get( + 'autofill_callback': metadata_self.get( 'autofill_callback', None), - autofill_callback_params=metadata_self.get( + 'autofill_callback_params': metadata_self.get( 'autofill_callback_params', {}), - required_in_options=metadata_self.get( - 'required_in_options', False), - **kwargs - ) + 'required_in_options': metadata_self.get( + 'required_in_options', False) + } + self_metadata.update(kwargs) + metadata = {'_self': self_metadata} + # Key extension used to do two things: + # one is to return the extended metadata that $ + # will be replace to possible extensions. + # The other is to record the $ to extended value + # and used in future mapping_to subsititution. + # TODO(grace): select proper name instead of key_extensions if + # you think it is better. + # Suppose key_extension is {'$partition': ['/var', '/']} for $partition + # the metadata for $partition will be mapped to { + # '/var': ..., '/': ...} and kwargs={'partition': '/var'} and + # kwargs={'partition': '/'} will be parsed to recursive metadata parsing + # for sub metadata under '/var' and '/'. Then in the metadata parsing + # for the sub metadata, this kwargs will be used to substitute mapping_to. key_extensions = metadata_self.get('key_extensions', {}) general_keys = [] for key, value in config.items(): @@ -160,20 +209,16 @@ def _add_metadata( ) sub_kwargs = dict(kwargs) sub_kwargs[key[1:]] = extended_key - _add_metadata( - session, field_model, metadata_model, - id, '%s/%s' % (path, extended_key), extended_key, value, - exception_when_existing=exception_when_existing, - parent=metadata, **sub_kwargs + metadata[extended_key] = _get_metadata_from_configuration( + '%s/%s' % (path, extended_key), extended_key, value, + fields, **sub_kwargs ) else: if key.startswith('$'): general_keys.append(key) - _add_metadata( - session, field_model, metadata_model, - id, '%s/%s' % (path, key), key, value, - exception_when_existing=exception_when_existing, - parent=metadata, **kwargs + metadata[key] = _get_metadata_from_configuration( + '%s/%s' % (path, key), key, value, + fields, **kwargs ) if len(general_keys) > 1: raise exception.InvalidParameter( @@ -184,8 +229,9 @@ def _add_metadata( return metadata -def add_os_metadata_internal(session, exception_when_existing=True): - os_metadatas = [] +def _get_oses_metadata_from_configuration(): + """Get os metadata from os metadata config dir.""" + oses_metadata = {} env_locals = {} env_locals.update(metadata_validator.VALIDATOR_LOCALS) env_locals.update(metadata_callback.CALLBACK_LOCALS) @@ -194,22 +240,28 @@ def add_os_metadata_internal(session, exception_when_existing=True): env_locals=env_locals ) for config in configs: - os = utils.get_db_object( - session, models.OperatingSystem, name=config['OS'] - ) + os_name = config['OS'] + os_metadata = oses_metadata.setdefault(os_name, {}) for key, value in config['METADATA'].items(): - os_metadatas.append(_add_metadata( - session, models.OSConfigField, - models.OSConfigMetadata, - os.id, key, key, value, - exception_when_existing=exception_when_existing, - parent=None - )) - return os_metadatas + os_metadata[key] = _get_metadata_from_configuration( + key, key, value, OS_FIELDS + ) + + oses = adapter_api.OSES + parents = {} + for os_name, os in oses.items(): + parent = os.get('parent', None) + parents[os_name] = parent + for os_name, os in oses.items(): + oses_metadata[os_name] = util.recursive_merge_dict( + os_name, oses_metadata, parents + ) + return oses_metadata -def add_package_metadata_internal(session, exception_when_existing=True): - package_metadatas = [] +def _get_packages_metadata_from_configuration(): + """Get package metadata from package metadata config dir.""" + packages_metadata = {} env_locals = {} env_locals.update(metadata_validator.VALIDATOR_LOCALS) env_locals.update(metadata_callback.CALLBACK_LOCALS) @@ -218,22 +270,27 @@ def add_package_metadata_internal(session, exception_when_existing=True): env_locals=env_locals ) for config in configs: - adapter = utils.get_db_object( - session, models.Adapter, name=config['ADAPTER'] - ) + adapter_name = config['ADAPTER'] + package_metadata = packages_metadata.setdefault(adapter_name, {}) for key, value in config['METADATA'].items(): - package_metadatas.append(_add_metadata( - session, models.PackageConfigField, - models.PackageConfigMetadata, - adapter.id, key, key, value, - exception_when_existing=exception_when_existing, - parent=None - )) - return package_metadatas + package_metadata[key] = _get_metadata_from_configuration( + key, key, value, PACKAGE_FIELDS + ) + adapters = adapter_api.ADAPTERS + parents = {} + for adapter_name, adapter in adapters.items(): + parent = adapter.get('parent', None) + parents[adapter_name] = parent + for adapter_name, adapter in adapters.items(): + packages_metadata[adapter_name] = util.recursive_merge_dict( + adapter_name, packages_metadata, parents + ) + return packages_metadata -def add_flavor_metadata_internal(session, exception_when_existing=True): - flavor_metadatas = [] +def _get_flavors_metadata_from_configuration(): + """Get flavor metadata from flavor metadata config dir.""" + flavors_metadata = {} env_locals = {} env_locals.update(metadata_validator.VALIDATOR_LOCALS) env_locals.update(metadata_callback.CALLBACK_LOCALS) @@ -242,18 +299,26 @@ def add_flavor_metadata_internal(session, exception_when_existing=True): env_locals=env_locals ) for config in configs: - flavor = utils.get_db_object( - session, models.AdapterFlavor, name=config['FLAVOR'] - ) + adapter_name = config['ADAPTER'] + flavor_name = config['FLAVOR'] + flavor_metadata = flavors_metadata.setdefault( + adapter_name, {} + ).setdefault(flavor_name, {}) for key, value in config['METADATA'].items(): - flavor_metadatas.append(_add_metadata( - session, models.FlavorConfigField, - models.FlavorConfigMetadata, - flavor.id, key, key, value, - exception_when_existing=exception_when_existing, - parent=None - )) - return flavor_metadatas + flavor_metadata[key] = _get_metadata_from_configuration( + key, key, value, FLAVOR_FIELDS + ) + + packages_metadata = PACKAGES_METADATA + adapters_flavors = adapter_api.ADAPTERS_FLAVORS + for adapter_name, adapter_flavors in adapters_flavors.items(): + package_metadata = packages_metadata.get(adapter_name, {}) + for flavor_name, flavor in adapter_flavors.items(): + flavor_metadata = flavors_metadata.setdefault( + adapter_name, {} + ).setdefault(flavor_name, {}) + util.merge_dict(flavor_metadata, package_metadata, override=False) + return flavors_metadata def _filter_metadata(metadata, **kwargs): @@ -295,282 +360,158 @@ def _filter_metadata(metadata, **kwargs): return filtered_metadata -def get_package_metadatas_internal(session): +def _load_metadata(force_reload=False): + """Load metadata information into memory. + + If force_reload, the metadata information will be reloaded + even if the metadata is already loaded. + """ + adapter_api.load_adapters_internal(force_reload=force_reload) + global OS_FIELDS + if force_reload or OS_FIELDS is None: + OS_FIELDS = _get_os_fields_from_configuration() + global PACKAGE_FIELDS + if force_reload or PACKAGE_FIELDS is None: + PACKAGE_FIELDS = _get_package_fields_from_configuration() + global FLAVOR_FIELDS + if force_reload or FLAVOR_FIELDS is None: + FLAVOR_FIELDS = _get_flavor_fields_from_configuration() + global OSES_METADATA + if force_reload or OSES_METADATA is None: + OSES_METADATA = _get_oses_metadata_from_configuration() + global PACKAGES_METADATA + if force_reload or PACKAGES_METADATA is None: + PACKAGES_METADATA = _get_packages_metadata_from_configuration() + global FLAVORS_METADATA + if force_reload or FLAVORS_METADATA is None: + FLAVORS_METADATA = _get_flavors_metadata_from_configuration() + global OSES_METADATA_UI_CONVERTERS + if force_reload or OSES_METADATA_UI_CONVERTERS is None: + OSES_METADATA_UI_CONVERTERS = ( + _get_oses_metadata_ui_converters_from_configuration() + ) + global FLAVORS_METADATA_UI_CONVERTERS + if force_reload or FLAVORS_METADATA_UI_CONVERTERS is None: + FLAVORS_METADATA_UI_CONVERTERS = ( + _get_flavors_metadata_ui_converters_from_configuration() + ) + + +def _get_oses_metadata_ui_converters_from_configuration(): + """Get os metadata ui converters from os metadata mapping config dir. + + os metadata ui converter is used to convert os metadata to + the format UI can understand and show. + """ + oses_metadata_ui_converters = {} + configs = util.load_configs(setting.OS_MAPPING_DIR) + for config in configs: + os_name = config['OS'] + oses_metadata_ui_converters[os_name] = config.get('CONFIG_MAPPING', {}) + + oses = adapter_api.OSES + parents = {} + for os_name, os in oses.items(): + parent = os.get('parent', None) + parents[os_name] = parent + for os_name, os in oses.items(): + oses_metadata_ui_converters[os_name] = util.recursive_merge_dict( + os_name, oses_metadata_ui_converters, parents + ) + return oses_metadata_ui_converters + + +def _get_flavors_metadata_ui_converters_from_configuration(): + """Get flavor metadata ui converters from flavor mapping config dir.""" + flavors_metadata_ui_converters = {} + configs = util.load_configs(setting.FLAVOR_MAPPING_DIR) + for config in configs: + adapter_name = config['ADAPTER'] + flavor_name = config['FLAVOR'] + flavors_metadata_ui_converters.setdefault( + adapter_name, {} + )[flavor_name] = config.get('CONFIG_MAPPING', {}) + adapters = adapter_api.ADAPTERS + parents = {} + for adapter_name, adapter in adapters.items(): + parent = adapter.get('parent', None) + parents[adapter_name] = parent + for adapter_name, adapter in adapters.items(): + flavors_metadata_ui_converters[adapter_name] = ( + util.recursive_merge_dict( + adapter_name, flavors_metadata_ui_converters, parents + ) + ) + return flavors_metadata_ui_converters + + +def get_packages_metadata_internal(force_reload=False): + """Get deployable package metadata.""" + _load_metadata(force_reload=force_reload) metadata_mapping = {} - adapters = utils.list_db_objects( - session, models.Adapter - ) - for adapter in adapters: - if adapter.deployable: - metadata_dict = adapter.metadata_dict() - metadata_mapping[adapter.id] = _filter_metadata( - metadata_dict, session=session + adapters = adapter_api.ADAPTERS + for adapter_name, adapter in adapters.items(): + if adapter.get('deployable'): + metadata_mapping[adapter_name] = _filter_metadata( + PACKAGES_METADATA.get(adapter_name, {}) ) else: logging.info( 'ignore metadata since its adapter %s is not deployable', - adapter.id + adapter_name ) return metadata_mapping -def get_flavor_metadatas_internal(session): +def get_flavors_metadata_internal(force_reload=False): + """Get deployable flavor metadata.""" + _load_metadata(force_reload=force_reload) metadata_mapping = {} - flavors = utils.list_db_objects( - session, models.AdapterFlavor - ) - for flavor in flavors: - flavor_metadata_dict = flavor.metadata_dict() - metadata_mapping[flavor.id] = _filter_metadata( - flavor_metadata_dict, session=session - ) - adapters = utils.list_db_objects( - session, models.Adapter, id=flavor.adapter_id - ) - for adapter in adapters: - package_metadata_dict = adapter.metadata_dict() - metadata_mapping[flavor.id].update(_filter_metadata( - package_metadata_dict, session=session - )) + adapters_flavors = adapter_api.ADAPTERS_FLAVORS + for adapter_name, adapter_flavors in adapters_flavors.items(): + adapter = adapter_api.ADAPTERS[adapter_name] + if not adapter.get('deployable'): + logging.info( + 'ignore metadata since its adapter %s is not deployable', + adapter_name + ) + continue + for flavor_name, flavor in adapter_flavors.items(): + flavor_metadata = FLAVORS_METADATA.get( + adapter_name, {} + ).get(flavor_name, {}) + metadata = _filter_metadata(flavor_metadata) + metadata_mapping.setdefault( + adapter_name, {} + )[flavor_name] = metadata return metadata_mapping -def get_os_metadatas_internal(session): +def get_flavors_metadata_ui_converters_internal(force_reload=False): + """Get usable flavor metadata ui converters.""" + _load_metadata(force_reload=force_reload) + return FLAVORS_METADATA_UI_CONVERTERS + + +def get_oses_metadata_internal(force_reload=False): + """Get deployable os metadata.""" + _load_metadata(force_reload=force_reload) metadata_mapping = {} - oses = utils.list_db_objects( - session, models.OperatingSystem - ) - for os in oses: - if os.deployable: - metadata_dict = os.metadata_dict() - metadata_mapping[os.id] = _filter_metadata( - metadata_dict, session=session + oses = adapter_api.OSES + for os_name, os in oses.items(): + if os.get('deployable'): + metadata_mapping[os_name] = _filter_metadata( + OSES_METADATA.get(os_name, {}) ) else: logging.info( 'ignore metadata since its os %s is not deployable', - os.id + os_name ) return metadata_mapping -def _validate_self( - config_path, config_key, config, - metadata, whole_check, - **kwargs -): - logging.debug('validate config self %s', config_path) - if '_self' not in metadata: - if isinstance(config, dict): - _validate_config( - config_path, config, metadata, whole_check, **kwargs - ) - return - field_type = metadata['_self'].get('field_type', basestring) - if not isinstance(config, field_type): - raise exception.InvalidParameter( - '%s config type is not %s' % (config_path, field_type) - ) - is_required = metadata['_self'].get( - 'is_required', False - ) - required_in_whole_config = metadata['_self'].get( - 'required_in_whole_config', False - ) - if isinstance(config, basestring): - if config == '' and not is_required and not required_in_whole_config: - # ignore empty config when it is optional - return - required_in_options = metadata['_self'].get( - 'required_in_options', False - ) - options = metadata['_self'].get('options', None) - if required_in_options: - if field_type in [int, basestring, float, bool]: - if options and config not in options: - raise exception.InvalidParameter( - '%s config is not in %s' % (config_path, options) - ) - elif field_type in [list, tuple]: - if options and not set(config).issubset(set(options)): - raise exception.InvalidParameter( - '%s config is not in %s' % (config_path, options) - ) - elif field_type == dict: - if options and not set(config.keys()).issubset(set(options)): - raise exception.InvalidParameter( - '%s config is not in %s' % (config_path, options) - ) - validator = metadata['_self'].get('validator', None) - logging.debug('validate by validator %s', validator) - if validator: - if not validator(config_key, config, **kwargs): - raise exception.InvalidParameter( - '%s config is invalid' % config_path - ) - if isinstance(config, dict): - _validate_config( - config_path, config, metadata, whole_check, **kwargs - ) - - -def _validate_config( - config_path, config, metadata, whole_check, - **kwargs -): - logging.debug('validate config %s', config_path) - generals = {} - specified = {} - for key, value in metadata.items(): - if key.startswith('$'): - generals[key] = value - elif key.startswith('_'): - pass - else: - specified[key] = value - config_keys = set(config.keys()) - specified_keys = set(specified.keys()) - intersect_keys = config_keys & specified_keys - not_found_keys = config_keys - specified_keys - redundant_keys = specified_keys - config_keys - for key in redundant_keys: - if '_self' not in specified[key]: - continue - if specified[key]['_self'].get('is_required', False): - raise exception.InvalidParameter( - '%s/%s does not find but it is required' % ( - config_path, key - ) - ) - if ( - whole_check and - specified[key]['_self'].get( - 'required_in_whole_config', False - ) - ): - raise exception.InvalidParameter( - '%s/%s does not find but it is required in whole config' % ( - config_path, key - ) - ) - for key in intersect_keys: - _validate_self( - '%s/%s' % (config_path, key), - key, config[key], specified[key], whole_check, - **kwargs - ) - for key in not_found_keys: - if not generals: - raise exception.InvalidParameter( - 'key %s missing in metadata %s' % ( - key, config_path - ) - ) - for general_key, general_value in generals.items(): - _validate_self( - '%s/%s' % (config_path, key), - key, config[key], general_value, whole_check, - **kwargs - ) - - -def _autofill_self_config( - config_path, config_key, config, - metadata, - **kwargs -): - if '_self' not in metadata: - if isinstance(config, dict): - _autofill_config( - config_path, config, metadata, **kwargs - ) - return config - logging.debug( - 'autofill %s by metadata %s', config_path, metadata['_self'] - ) - autofill_callback = metadata['_self'].get( - 'autofill_callback', None - ) - autofill_callback_params = metadata['_self'].get( - 'autofill_callback_params', {} - ) - callback_params = dict(kwargs) - if autofill_callback_params: - callback_params.update(autofill_callback_params) - default_value = metadata['_self'].get( - 'default_value', None - ) - if default_value is not None: - callback_params['default_value'] = default_value - options = metadata['_self'].get( - 'options', None - ) - if options is not None: - callback_params['options'] = options - if autofill_callback: - config = autofill_callback( - config_key, config, **callback_params - ) - if config is None: - new_config = {} - else: - new_config = config - if isinstance(new_config, dict): - _autofill_config( - config_path, new_config, metadata, **kwargs - ) - if new_config: - config = new_config - return config - - -def _autofill_config( - config_path, config, metadata, **kwargs -): - generals = {} - specified = {} - for key, value in metadata.items(): - if key.startswith('$'): - generals[key] = value - elif key.startswith('_'): - pass - else: - specified[key] = value - config_keys = set(config.keys()) - specified_keys = set(specified.keys()) - intersect_keys = config_keys & specified_keys - not_found_keys = config_keys - specified_keys - redundant_keys = specified_keys - config_keys - for key in redundant_keys: - self_config = _autofill_self_config( - '%s/%s' % (config_path, key), - key, None, specified[key], **kwargs - ) - if self_config is not None: - config[key] = self_config - for key in intersect_keys: - config[key] = _autofill_self_config( - '%s/%s' % (config_path, key), - key, config[key], specified[key], - **kwargs - ) - for key in not_found_keys: - for general_key, general_value in generals.items(): - config[key] = _autofill_self_config( - '%s/%s' % (config_path, key), - key, config[key], general_value, - **kwargs - ) - return config - - -def validate_config_internal( - config, metadata, whole_check, **kwargs -): - _validate_config('', config, metadata, whole_check, **kwargs) - - -def autofill_config_internal( - config, metadata, **kwargs -): - return _autofill_config('', config, metadata, **kwargs) +def get_oses_metadata_ui_converters_internal(force_reload=False): + """Get usable os metadata ui converters.""" + _load_metadata(force_reload=force_reload) + return OSES_METADATA_UI_CONVERTERS diff --git a/compass/db/api/metadata_holder.py b/compass/db/api/metadata_holder.py index a91900a0..44245347 100644 --- a/compass/db/api/metadata_holder.py +++ b/compass/db/api/metadata_holder.py @@ -15,6 +15,8 @@ """Metadata related object holder.""" import logging +from compass.db.api import adapter as adapter_api +from compass.db.api import adapter_holder as adapter_holder_api from compass.db.api import database from compass.db.api import metadata as metadata_api from compass.db.api import permission @@ -27,94 +29,172 @@ from compass.utils import util RESP_METADATA_FIELDS = [ - 'os_config', 'package_config', 'flavor_config' + 'os_config', 'package_config' ] -RESP_FLAVORS_FIELDS = [ - 'id', 'name', 'display_name', 'template', 'roles' +RESP_UI_METADATA_FIELDS = [ + 'os_global_config', 'flavor_config' ] -@database.run_in_session() -def load_metadatas(session): - load_os_metadatas_internal(session) - load_package_metadatas_internal(session) - load_flavor_metadatas_internal(session) +def load_metadatas(force_reload=False): + """Load metadatas.""" + # TODO(xicheng): today we load metadata in memory as it original + # format in files in metadata.py. We get these inmemory metadata + # and do some translation, store the translated metadata into memory + # too in metadata_holder.py. api can only access the global inmemory + # data in metadata_holder.py. + _load_os_metadatas(force_reload=force_reload) + _load_package_metadatas(force_reload=force_reload) + _load_flavor_metadatas(force_reload=force_reload) + _load_os_metadata_ui_converters(force_reload=force_reload) + _load_flavor_metadata_ui_converters(force_reload=force_reload) -def load_os_metadatas_internal(session): - global OS_METADATA_MAPPING - logging.info('load os metadatas into memory') - OS_METADATA_MAPPING = metadata_api.get_os_metadatas_internal(session) - - -def load_package_metadatas_internal(session): - global PACKAGE_METADATA_MAPPING - logging.info('load package metadatas into memory') - PACKAGE_METADATA_MAPPING = ( - metadata_api.get_package_metadatas_internal(session) - ) - - -def load_flavor_metadatas_internal(session): - global FLAVOR_METADATA_MAPPING - logging.info('load flavor metadatas into memory') - FLAVOR_METADATA_MAPPING = ( - metadata_api.get_flavor_metadatas_internal(session) - ) - - -OS_METADATA_MAPPING = {} -PACKAGE_METADATA_MAPPING = {} -FLAVOR_METADATA_MAPPING = {} - - -def _validate_config( - config, id, id_name, metadata_mapping, whole_check, **kwargs -): - if id not in metadata_mapping: - raise exception.InvalidParameter( - '%s id %s is not found in metadata mapping' % (id_name, id) +def _load_os_metadata_ui_converters(force_reload=False): + global OS_METADATA_UI_CONVERTERS + if force_reload or OS_METADATA_UI_CONVERTERS is None: + logging.info('load os metadatas ui converters into memory') + OS_METADATA_UI_CONVERTERS = ( + metadata_api.get_oses_metadata_ui_converters_internal( + force_reload=force_reload + ) ) - metadatas = metadata_mapping[id] - metadata_api.validate_config_internal( - config, metadatas, whole_check, **kwargs - ) + + +def _load_os_metadatas(force_reload=False): + """Load os metadata from inmemory db and map it by os_id.""" + global OS_METADATA_MAPPING + if force_reload or OS_METADATA_MAPPING is None: + logging.info('load os metadatas into memory') + OS_METADATA_MAPPING = metadata_api.get_oses_metadata_internal( + force_reload=force_reload + ) + + +def _load_flavor_metadata_ui_converters(force_reload=False): + """Load flavor metadata ui converters from inmemory db. + + The loaded metadata is mapped by flavor id. + """ + global FLAVOR_METADATA_UI_CONVERTERS + if force_reload or FLAVOR_METADATA_UI_CONVERTERS is None: + logging.info('load flavor metadata ui converters into memory') + FLAVOR_METADATA_UI_CONVERTERS = {} + adapters_flavors_metadata_ui_converters = ( + metadata_api.get_flavors_metadata_ui_converters_internal( + force_reload=force_reload + ) + ) + for adapter_name, adapter_flavors_metadata_ui_converters in ( + adapters_flavors_metadata_ui_converters.items() + ): + for flavor_name, flavor_metadata_ui_converter in ( + adapter_flavors_metadata_ui_converters.items() + ): + FLAVOR_METADATA_UI_CONVERTERS[ + '%s:%s' % (adapter_name, flavor_name) + ] = flavor_metadata_ui_converter + + +@util.deprecated +def _load_package_metadatas(force_reload=False): + """Load deployable package metadata from inmemory db.""" + global PACKAGE_METADATA_MAPPING + if force_reload or PACKAGE_METADATA_MAPPING is None: + logging.info('load package metadatas into memory') + PACKAGE_METADATA_MAPPING = ( + metadata_api.get_packages_metadata_internal( + force_reload=force_reload + ) + ) + + +def _load_flavor_metadatas(force_reload=False): + """Load flavor metadata from inmemory db. + + The loaded metadata are mapped by flavor id. + """ + global FLAVOR_METADATA_MAPPING + if force_reload or FLAVOR_METADATA_MAPPING is None: + logging.info('load flavor metadatas into memory') + FLAVOR_METADATA_MAPPING = {} + adapters_flavors_metadata = ( + metadata_api.get_flavors_metadata_internal( + force_reload=force_reload + ) + ) + for adapter_name, adapter_flavors_metadata in ( + adapters_flavors_metadata.items() + ): + for flavor_name, flavor_metadata in ( + adapter_flavors_metadata.items() + ): + FLAVOR_METADATA_MAPPING[ + '%s:%s' % (adapter_name, flavor_name) + ] = flavor_metadata + + +OS_METADATA_MAPPING = None +PACKAGE_METADATA_MAPPING = None +FLAVOR_METADATA_MAPPING = None +OS_METADATA_UI_CONVERTERS = None +FLAVOR_METADATA_UI_CONVERTERS = None def validate_os_config( - session, config, os_id, whole_check=False, **kwargs + config, os_id, whole_check=False, **kwargs ): - if not OS_METADATA_MAPPING: - load_os_metadatas_internal(session) + """Validate os config.""" + load_metadatas() + if os_id not in OS_METADATA_MAPPING: + raise exception.InvalidParameter( + 'os %s is not found in os metadata mapping' % os_id + ) _validate_config( - config, os_id, 'os', OS_METADATA_MAPPING, - whole_check, session=session, **kwargs + '', config, OS_METADATA_MAPPING[os_id], + whole_check, **kwargs ) +@util.deprecated def validate_package_config( - session, config, adapter_id, whole_check=False, **kwargs + config, adapter_id, whole_check=False, **kwargs ): - if not PACKAGE_METADATA_MAPPING: - load_package_metadatas_internal(session) + """Validate package config.""" + load_metadatas() + if adapter_id not in PACKAGE_METADATA_MAPPING: + raise exception.InvalidParameter( + 'adapter %s is not found in package metedata mapping' % adapter_id + ) _validate_config( - config, adapter_id, 'adapter', PACKAGE_METADATA_MAPPING, - whole_check, session=session, **kwargs + '', config, PACKAGE_METADATA_MAPPING[adapter_id], + whole_check, **kwargs ) def validate_flavor_config( - session, config, flavor_id, whole_check=False, **kwargs + config, flavor_id, whole_check=False, **kwargs ): - if not FLAVOR_METADATA_MAPPING: - load_flavor_metadatas_internal(session) + """Validate flavor config.""" + load_metadatas() + if flavor_id not in FLAVOR_METADATA_MAPPING: + raise exception.InvalidParameter( + 'flavor %s is not found in flavor metedata mapping' % flavor_id + ) _validate_config( - config, flavor_id, 'flavor', FLAVOR_METADATA_MAPPING, - whole_check, session=session, **kwargs + '', config, FLAVOR_METADATA_MAPPING[flavor_id], + whole_check, **kwargs ) def _filter_metadata(metadata, **kwargs): + """Filter metadata before return it to api. + + + Some metadata fields are not json compatible or + only used in db/api internally. + We should strip these fields out before return to api. + """ if not isinstance(metadata, dict): return metadata filtered_metadata = {} @@ -141,132 +221,162 @@ def _filter_metadata(metadata, **kwargs): return filtered_metadata -def get_package_metadata_internal(session, adapter_id): - """get package metadata internal.""" - if not PACKAGE_METADATA_MAPPING: - load_package_metadatas_internal(session) +@util.deprecated +def _get_package_metadata(adapter_id): + """get package metadata.""" + load_metadatas() if adapter_id not in PACKAGE_METADATA_MAPPING: raise exception.RecordNotExists( 'adpater %s does not exist' % adapter_id ) return _filter_metadata( - PACKAGE_METADATA_MAPPING[adapter_id], session=session + PACKAGE_METADATA_MAPPING[adapter_id] ) +@util.deprecated @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_METADATAS ) @utils.wrap_to_dict(RESP_METADATA_FIELDS) def get_package_metadata(adapter_id, user=None, session=None, **kwargs): + """Get package metadata from adapter.""" return { - 'package_config': get_package_metadata_internal(session, adapter_id) + 'package_config': _get_package_metadata(adapter_id) } -def get_flavor_metadata_internal(session, flavor_id): - """get flavor metadata internal.""" - if not FLAVOR_METADATA_MAPPING: - load_flavor_metadatas_internal(session) +def _get_flavor_metadata(flavor_id): + """get flavor metadata.""" + load_metadatas() if flavor_id not in FLAVOR_METADATA_MAPPING: raise exception.RecordNotExists( 'flavor %s does not exist' % flavor_id ) - return _filter_metadata( - FLAVOR_METADATA_MAPPING[flavor_id], session=session - ) + return _filter_metadata(FLAVOR_METADATA_MAPPING[flavor_id]) @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_METADATAS ) @utils.wrap_to_dict(RESP_METADATA_FIELDS) def get_flavor_metadata(flavor_id, user=None, session=None, **kwargs): + """Get flavor metadata by flavor.""" return { - 'flavor_config': get_flavor_metadata_internal(session, flavor_id) + 'package_config': _get_flavor_metadata(flavor_id) } -@utils.supported_filters([]) -@database.run_in_session() -@user_api.check_user_permission_in_session( - permission.PERMISSION_LIST_METADATAS -) -@utils.wrap_to_dict(RESP_FLAVORS_FIELDS) -def list_flavors(user=None, session=None, **filters): - """List flavors.""" - return utils.list_db_objects( - session, models.AdapterFlavor, **filters - ) - - -@utils.supported_filters([]) -@database.run_in_session() -@user_api.check_user_permission_in_session( - permission.PERMISSION_LIST_METADATAS -) -@utils.wrap_to_dict(RESP_FLAVORS_FIELDS) -def get_flavor(flavor_id, user=None, session=None, **kwargs): - """Get flavor.""" - return utils.get_db_object( - session, models.AdapterFlavor, id=flavor_id - ) - - -def get_os_metadata_internal(session, os_id): - """get os metadata internal.""" - if not OS_METADATA_MAPPING: - load_os_metadatas_internal(session) +def _get_os_metadata(os_id): + """get os metadata.""" + load_metadatas() if os_id not in OS_METADATA_MAPPING: raise exception.RecordNotExists( 'os %s does not exist' % os_id ) - return _filter_metadata( - OS_METADATA_MAPPING[os_id], session=session - ) + return _filter_metadata(OS_METADATA_MAPPING[os_id]) + + +def _get_os_metadata_ui_converter(os_id): + """get os metadata ui converter.""" + load_metadatas() + if os_id not in OS_METADATA_UI_CONVERTERS: + raise exception.RecordNotExists( + 'os %s does not exist' % os_id + ) + return OS_METADATA_UI_CONVERTERS[os_id] + + +def _get_flavor_metadata_ui_converter(flavor_id): + """get flavor metadata ui converter.""" + load_metadatas() + if flavor_id not in FLAVOR_METADATA_UI_CONVERTERS: + raise exception.RecordNotExists( + 'flavor %s does not exist' % flavor_id + ) + return FLAVOR_METADATA_UI_CONVERTERS[flavor_id] @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_METADATAS ) @utils.wrap_to_dict(RESP_METADATA_FIELDS) def get_os_metadata(os_id, user=None, session=None, **kwargs): """get os metadatas.""" - return {'os_config': get_os_metadata_internal(session, os_id)} + return {'os_config': _get_os_metadata(os_id)} -def get_ui_metadata(metadata, config): - """convert os_metadata to ui os_metadata.""" - result_config = {} - result_config[config['mapped_name']] = [] - for mapped_child in config['mapped_children']: +@utils.supported_filters([]) +@database.run_in_session() +@user_api.check_user_permission( + permission.PERMISSION_LIST_METADATAS +) +@utils.wrap_to_dict(RESP_UI_METADATA_FIELDS) +def get_os_ui_metadata(os_id, user=None, session=None, **kwargs): + """Get os metadata ui converter by os.""" + metadata = _get_os_metadata(os_id) + metadata_ui_converter = _get_os_metadata_ui_converter(os_id) + return _get_ui_metadata(metadata, metadata_ui_converter) + + +@utils.supported_filters([]) +@database.run_in_session() +@user_api.check_user_permission( + permission.PERMISSION_LIST_METADATAS +) +@utils.wrap_to_dict(RESP_UI_METADATA_FIELDS) +def get_flavor_ui_metadata(flavor_id, user=None, session=None, **kwargs): + """Get flavor ui metadata by flavor.""" + metadata = _get_flavor_metadata(flavor_id) + metadata_ui_converter = _get_flavor_metadata_ui_converter(flavor_id) + return _get_ui_metadata(metadata, metadata_ui_converter) + + +def _get_ui_metadata(metadata, metadata_ui_converter): + """convert metadata to ui metadata. + + Args: + metadata: metadata we defined in metadata files. + metadata_ui_converter: metadata ui converter defined in metadata + mapping files. Used to convert orignal + metadata to ui understandable metadata. + + Returns: + ui understandable metadata. + """ + ui_metadata = {} + ui_metadata[metadata_ui_converter['mapped_name']] = [] + for mapped_child in metadata_ui_converter['mapped_children']: data_dict = {} - for config_key, config_value in mapped_child.items(): - for key, value in config_value.items(): + for ui_key, ui_value in mapped_child.items(): + for key, value in ui_value.items(): if 'data' == key: result_data = [] - _get_data(metadata[config_key], value, result_data) + _get_ui_metadata_data( + metadata[ui_key], value, result_data + ) data_dict['data'] = result_data else: data_dict[key] = value - result_config[config['mapped_name']].append(data_dict) - return result_config + ui_metadata[metadata_ui_converter['mapped_name']].append(data_dict) + return ui_metadata -def _get_data(metadata, config, result_data): +def _get_ui_metadata_data(metadata, config, result_data): + """Get ui metadata data and fill to result.""" data_dict = {} for key, config_value in config.items(): if isinstance(config_value, dict) and key != 'content_data': if key in metadata.keys(): - _get_data(metadata[key], config_value, result_data) + _get_ui_metadata_data(metadata[key], config_value, result_data) else: - _get_data(metadata, config_value, result_data) + _get_ui_metadata_data(metadata, config_value, result_data) elif isinstance(config_value, list): option_list = [] for item in config_value: @@ -285,9 +395,10 @@ def _get_data(metadata, config, result_data): return result_data +@util.deprecated @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_METADATAS ) @utils.wrap_to_dict(RESP_METADATA_FIELDS) @@ -295,9 +406,11 @@ def get_package_os_metadata( adapter_id, os_id, user=None, session=None, **kwargs ): - from compass.db.api import adapter_holder as adapter_api - adapter = adapter_api.get_adapter_internal(session, adapter_id) - os_ids = [os['os_id'] for os in adapter['supported_oses']] + """Get metadata by adapter and os.""" + adapter = adapter_holder_api.get_adapter( + adapter_id, user=user, session=session + ) + os_ids = [os['id'] for os in adapter['supported_oses']] if os_id not in os_ids: raise exception.InvalidParameter( 'os %s is not in the supported os list of adapter %s' % ( @@ -305,48 +418,307 @@ def get_package_os_metadata( ) ) metadatas = {} - metadatas['os_config'] = get_os_metadata_internal( - session, os_id + metadatas['os_config'] = _get_os_metadata( + os_id ) - metadatas['package_config'] = get_package_metadata_internal( - session, adapter_id + metadatas['package_config'] = _get_package_metadata( + adapter_id ) return metadatas -def _autofill_config( - config, id, id_name, metadata_mapping, **kwargs +@utils.supported_filters([]) +@database.run_in_session() +@user_api.check_user_permission( + permission.PERMISSION_LIST_METADATAS +) +@utils.wrap_to_dict(RESP_METADATA_FIELDS) +def get_flavor_os_metadata( + flavor_id, os_id, + user=None, session=None, **kwargs ): - if id not in metadata_mapping: + """Get metadata by flavor and os.""" + flavor = adapter_holder_api.get_flavor( + flavor_id, user=user, session=session + ) + adapter_id = flavor['adapter_id'] + adapter = adapter_holder_api.get_adapter( + adapter_id, user=user, session=session + ) + os_ids = [os['id'] for os in adapter['supported_oses']] + if os_id not in os_ids: raise exception.InvalidParameter( - '%s id %s is not found in metadata mapping' % (id_name, id) + 'os %s is not in the supported os list of adapter %s' % ( + os_id, adapter_id + ) ) - metadatas = metadata_mapping[id] + metadatas = {} + metadatas['os_config'] = _get_os_metadata( + session, os_id + ) + metadatas['package_config'] = _get_flavor_metadata( + session, flavor_id + ) + return metadatas + + +def _validate_self( + config_path, config_key, config, + metadata, whole_check, + **kwargs +): + """validate config by metadata self section.""" + logging.debug('validate config self %s', config_path) + if '_self' not in metadata: + if isinstance(config, dict): + _validate_config( + config_path, config, metadata, whole_check, **kwargs + ) + return + field_type = metadata['_self'].get('field_type', basestring) + if not isinstance(config, field_type): + raise exception.InvalidParameter( + '%s config type is not %s: %s' % (config_path, field_type, config) + ) + is_required = metadata['_self'].get( + 'is_required', False + ) + required_in_whole_config = metadata['_self'].get( + 'required_in_whole_config', False + ) + if isinstance(config, basestring): + if config == '' and not is_required and not required_in_whole_config: + # ignore empty config when it is optional + return + required_in_options = metadata['_self'].get( + 'required_in_options', False + ) + options = metadata['_self'].get('options', None) + if required_in_options: + if field_type in [int, basestring, float, bool]: + if options and config not in options: + raise exception.InvalidParameter( + '%s config is not in %s: %s' % ( + config_path, options, config + ) + ) + elif field_type in [list, tuple]: + if options and not set(config).issubset(set(options)): + raise exception.InvalidParameter( + '%s config is not in %s: %s' % ( + config_path, options, config + ) + ) + elif field_type == dict: + if options and not set(config.keys()).issubset(set(options)): + raise exception.InvalidParameter( + '%s config is not in %s: %s' % ( + config_path, options, config + ) + ) + validator = metadata['_self'].get('validator', None) + logging.debug('validate by validator %s', validator) + if validator: + if not validator(config_key, config, **kwargs): + raise exception.InvalidParameter( + '%s config is invalid' % config_path + ) + if isinstance(config, dict): + _validate_config( + config_path, config, metadata, whole_check, **kwargs + ) + + +def _validate_config( + config_path, config, metadata, whole_check, + **kwargs +): + """validate config by metadata.""" + logging.debug('validate config %s', config_path) + generals = {} + specified = {} + for key, value in metadata.items(): + if key.startswith('$'): + generals[key] = value + elif key.startswith('_'): + pass + else: + specified[key] = value + config_keys = set(config.keys()) + specified_keys = set(specified.keys()) + intersect_keys = config_keys & specified_keys + not_found_keys = config_keys - specified_keys + redundant_keys = specified_keys - config_keys + for key in redundant_keys: + if '_self' not in specified[key]: + continue + if specified[key]['_self'].get('is_required', False): + raise exception.InvalidParameter( + '%s/%s does not find but it is required' % ( + config_path, key + ) + ) + if ( + whole_check and + specified[key]['_self'].get( + 'required_in_whole_config', False + ) + ): + raise exception.InvalidParameter( + '%s/%s does not find but it is required in whole config' % ( + config_path, key + ) + ) + for key in intersect_keys: + _validate_self( + '%s/%s' % (config_path, key), + key, config[key], specified[key], whole_check, + **kwargs + ) + for key in not_found_keys: + if not generals: + raise exception.InvalidParameter( + 'key %s missing in metadata %s' % ( + key, config_path + ) + ) + for general_key, general_value in generals.items(): + _validate_self( + '%s/%s' % (config_path, key), + key, config[key], general_value, whole_check, + **kwargs + ) + + +def _autofill_self_config( + config_path, config_key, config, + metadata, + **kwargs +): + """Autofill config by metadata self section.""" + if '_self' not in metadata: + if isinstance(config, dict): + _autofill_config( + config_path, config, metadata, **kwargs + ) + return config logging.debug( - 'auto fill %s config %s by params %s', - id_name, config, kwargs + 'autofill %s by metadata %s', config_path, metadata['_self'] ) - return metadata_api.autofill_config_internal( - config, metadatas, **kwargs + autofill_callback = metadata['_self'].get( + 'autofill_callback', None ) + autofill_callback_params = metadata['_self'].get( + 'autofill_callback_params', {} + ) + callback_params = dict(kwargs) + if autofill_callback_params: + callback_params.update(autofill_callback_params) + default_value = metadata['_self'].get( + 'default_value', None + ) + if default_value is not None: + callback_params['default_value'] = default_value + options = metadata['_self'].get( + 'options', None + ) + if options is not None: + callback_params['options'] = options + if autofill_callback: + config = autofill_callback( + config_key, config, **callback_params + ) + if config is None: + new_config = {} + else: + new_config = config + if isinstance(new_config, dict): + _autofill_config( + config_path, new_config, metadata, **kwargs + ) + if new_config: + config = new_config + return config + + +def _autofill_config( + config_path, config, metadata, **kwargs +): + """autofill config by metadata.""" + generals = {} + specified = {} + for key, value in metadata.items(): + if key.startswith('$'): + generals[key] = value + elif key.startswith('_'): + pass + else: + specified[key] = value + config_keys = set(config.keys()) + specified_keys = set(specified.keys()) + intersect_keys = config_keys & specified_keys + not_found_keys = config_keys - specified_keys + redundant_keys = specified_keys - config_keys + for key in redundant_keys: + self_config = _autofill_self_config( + '%s/%s' % (config_path, key), + key, None, specified[key], **kwargs + ) + if self_config is not None: + config[key] = self_config + for key in intersect_keys: + config[key] = _autofill_self_config( + '%s/%s' % (config_path, key), + key, config[key], specified[key], + **kwargs + ) + for key in not_found_keys: + for general_key, general_value in generals.items(): + config[key] = _autofill_self_config( + '%s/%s' % (config_path, key), + key, config[key], general_value, + **kwargs + ) + return config def autofill_os_config( - session, config, os_id, **kwargs + config, os_id, **kwargs ): - if not OS_METADATA_MAPPING: - load_os_metadatas_internal(session) + load_metadatas() + if os_id not in OS_METADATA_MAPPING: + raise exception.InvalidParameter( + 'os %s is not found in os metadata mapping' % os_id + ) + return _autofill_config( - config, os_id, 'os', OS_METADATA_MAPPING, session=session, **kwargs + '', config, OS_METADATA_MAPPING[os_id], **kwargs ) def autofill_package_config( - session, config, adapter_id, **kwargs + config, adapter_id, **kwargs ): - if not PACKAGE_METADATA_MAPPING: - load_package_metadatas_internal(session) + load_metadatas() + if adapter_id not in PACKAGE_METADATA_MAPPING: + raise exception.InvalidParameter( + 'adapter %s is not found in package metadata mapping' % adapter_id + ) + return _autofill_config( - config, adapter_id, 'adapter', PACKAGE_METADATA_MAPPING, - session=session, **kwargs + '', config, PACKAGE_METADATA_MAPPING[adapter_id], **kwargs + ) + + +def autofill_flavor_config( + config, flavor_id, **kwargs +): + load_metadatas() + if flavor_id not in FLAVOR_METADATA_MAPPING: + raise exception.InvalidParameter( + 'flavor %s is not found in flavor metadata mapping' % flavor_id + ) + + return _autofill_config( + '', config, FLAVOR_METADATA_MAPPING[flavor_id], **kwargs ) diff --git a/compass/db/api/network.py b/compass/db/api/network.py index ec1765b1..e2bf7d31 100644 --- a/compass/db/api/network.py +++ b/compass/db/api/network.py @@ -15,6 +15,7 @@ """Network related database operations.""" import logging import netaddr +import re from compass.db.api import database from compass.db.api import permission @@ -37,6 +38,7 @@ UPDATED_FIELDS = ['subnet', 'name'] def _check_subnet(subnet): + """Check subnet format is correct.""" try: netaddr.IPNetwork(subnet) except Exception as error: @@ -47,7 +49,7 @@ def _check_subnet(subnet): @utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_SUBNETS ) @utils.wrap_to_dict(RESP_FIELDS) @@ -58,9 +60,21 @@ def list_subnets(user=None, session=None, **filters): ) +def _get_subnet(subnet_id, session=None, **kwargs): + """Get subnet by subnet id.""" + if isinstance(subnet_id, (int, long)): + return utils.get_db_object( + session, models.Subnet, + id=subnet_id, **kwargs + ) + raise exception.InvalidParameter( + 'subnet id %s type is not int compatible' % subnet_id + ) + + @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_SUBNETS ) @utils.wrap_to_dict(RESP_FIELDS) @@ -69,9 +83,9 @@ def get_subnet( user=None, session=None, **kwargs ): """Get subnet info.""" - return utils.get_db_object( - session, models.Subnet, - exception_when_missing, id=subnet_id + return _get_subnet( + subnet_id, session=session, + exception_when_missing=exception_when_missing ) @@ -81,7 +95,7 @@ def get_subnet( ) @utils.input_validates(subnet=_check_subnet) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_SUBNET ) @utils.wrap_to_dict(RESP_FIELDS) @@ -102,29 +116,20 @@ def add_subnet( ) @utils.input_validates(subnet=_check_subnet) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_SUBNET ) @utils.wrap_to_dict(RESP_FIELDS) def update_subnet(subnet_id, user=None, session=None, **kwargs): """Update a subnet.""" - subnet = utils.get_db_object( - session, models.Subnet, id=subnet_id + subnet = _get_subnet( + subnet_id, session=session ) return utils.update_db_object(session, subnet, **kwargs) -@utils.supported_filters([]) -@database.run_in_session() -@user_api.check_user_permission_in_session( - permission.PERMISSION_DEL_SUBNET -) -@utils.wrap_to_dict(RESP_FIELDS) -def del_subnet(subnet_id, user=None, session=None, **kwargs): - """Delete a subnet.""" - subnet = utils.get_db_object( - session, models.Subnet, id=subnet_id - ) +def _check_subnet_deletable(subnet): + """Check a subnet deletable.""" if subnet.host_networks: host_networks = [ '%s:%s=%s' % ( @@ -139,4 +144,17 @@ def del_subnet(subnet_id, user=None, session=None, **kwargs): ) ) + +@utils.supported_filters([]) +@database.run_in_session() +@user_api.check_user_permission( + permission.PERMISSION_DEL_SUBNET +) +@utils.wrap_to_dict(RESP_FIELDS) +def del_subnet(subnet_id, user=None, session=None, **kwargs): + """Delete a subnet.""" + subnet = _get_subnet( + subnet_id, session=session + ) + _check_subnet_deletable(subnet) return utils.del_db_object(session, subnet) diff --git a/compass/db/api/permission.py b/compass/db/api/permission.py index e3ddf69d..f4d777a2 100644 --- a/compass/db/api/permission.py +++ b/compass/db/api/permission.py @@ -13,14 +13,17 @@ # limitations under the License. """Permission database operations.""" +import re + from compass.db.api import database from compass.db.api import user as user_api from compass.db.api import utils from compass.db import exception from compass.db import models +from compass.utils import util -SUPPORTED_FIELDS = ['name', 'alias', 'description'] +SUPPORTED_FIELDS = ['id', 'name', 'alias', 'description'] RESP_FIELDS = ['id', 'name', 'alias', 'description'] @@ -291,6 +294,7 @@ PERMISSIONS = [ ] +@util.deprecated def list_permissions_internal(session, **filters): """internal functions used only by other db.api modules.""" return utils.list_db_objects(session, models.Permission, **filters) @@ -298,7 +302,7 @@ def list_permissions_internal(session, **filters): @utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS) @database.run_in_session() -@user_api.check_user_permission_in_session(PERMISSION_LIST_PERMISSIONS) +@user_api.check_user_permission(PERMISSION_LIST_PERMISSIONS) @utils.wrap_to_dict(RESP_FIELDS) def list_permissions(user=None, session=None, **filters): """list permissions.""" @@ -307,22 +311,36 @@ def list_permissions(user=None, session=None, **filters): ) +def _get_permission(permission_id, session=None, **kwargs): + """Get permission object by the unique key of Permission table.""" + if isinstance(permission_id, (int, long)): + return utils.get_db_object( + session, models.Permission, id=permission_id, **kwargs) + raise exception.InvalidParameter( + 'permission id %s type is not int compatible' % permission_id + ) + + +def get_permission_internal(permission_id, session=None, **kwargs): + return _get_permission(permission_id, session=session, **kwargs) + + @utils.supported_filters() @database.run_in_session() -@user_api.check_user_permission_in_session(PERMISSION_LIST_PERMISSIONS) +@user_api.check_user_permission(PERMISSION_LIST_PERMISSIONS) @utils.wrap_to_dict(RESP_FIELDS) def get_permission( permission_id, exception_when_missing=True, user=None, session=None, **kwargs ): """get permissions.""" - return utils.get_db_object( - session, models.Permission, - exception_when_missing, id=permission_id + return _get_permission( + permission_id, session=session, + exception_when_missing=exception_when_missing ) -def add_permissions_internal(session): +def add_permissions_internal(session=None): """internal functions used by other db.api modules only.""" permissions = [] for permission in PERMISSIONS: diff --git a/compass/db/api/switch.py b/compass/db/api/switch.py index 6d6d1dda..4c7c8e59 100644 --- a/compass/db/api/switch.py +++ b/compass/db/api/switch.py @@ -24,6 +24,7 @@ from compass.db.api import utils from compass.db import exception from compass.db import models from compass.utils import setting_wrapper as setting +from compass.utils import util SUPPORTED_FIELDS = ['ip_int', 'vendor', 'state'] @@ -36,36 +37,37 @@ SUPPORTED_MACHINES_FIELDS = [ ] SUPPORTED_SWITCH_MACHINES_HOSTS_FIELDS = [ 'switch_ip_int', 'port', 'vlans', 'mac', - 'tag', 'location', 'os_name', 'os_id' + 'tag', 'location', 'os_name' ] SUPPORTED_MACHINES_HOSTS_FIELDS = [ 'port', 'vlans', 'mac', 'tag', 'location', - 'os_name', 'os_id' + 'os_name' ] IGNORE_FIELDS = ['id', 'created_at', 'updated_at'] ADDED_FIELDS = ['ip'] OPTIONAL_ADDED_FIELDS = [ - 'credentials', 'vendor', 'state', 'err_msg', 'filters' + 'credentials', 'vendor', 'state', 'err_msg', 'machine_filters' ] UPDATED_FIELDS = [ 'ip', 'credentials', 'vendor', 'state', - 'err_msg', 'put_filters' + 'err_msg', 'put_machine_filters' ] -PATCHED_FIELDS = ['patched_credentials', 'patched_filters'] -UPDATED_FILTERS_FIELDS = ['put_filters'] -PATCHED_FILTERS_FIELDS = ['patched_filters'] -ADDED_MACHINES_FIELDS = ['mac', 'port'] +PATCHED_FIELDS = ['patched_credentials', 'patched_machine_filters'] +UPDATED_FILTERS_FIELDS = ['put_machine_filters'] +PATCHED_FILTERS_FIELDS = ['patched_machine_filters'] +ADDED_MACHINES_FIELDS = ['mac'] OPTIONAL_ADDED_MACHINES_FIELDS = [ - 'vlans', 'ipmi_credentials', 'tag', 'location' + 'ipmi_credentials', 'tag', 'location' ] -ADDED_SWITCH_MACHINES_FIELDS = ['port', 'vlans'] +ADDED_SWITCH_MACHINES_FIELDS = ['port'] +OPTIONAL_ADDED_SWITCH_MACHINES_FIELDS = ['vlans'] UPDATED_MACHINES_FIELDS = [ - 'port', 'vlans', 'ipmi_credentials', + 'ipmi_credentials', 'tag', 'location' ] UPDATED_SWITCH_MACHINES_FIELDS = ['port', 'vlans'] PATCHED_MACHINES_FIELDS = [ - 'patched_vlans', 'patched_ipmi_credentials', + 'patched_ipmi_credentials', 'patched_tag', 'patched_location' ] PATCHED_SWITCH_MACHINES_FIELDS = ['patched_vlans'] @@ -89,7 +91,7 @@ RESP_MACHINES_HOSTS_FIELDS = [ 'id', 'switch_id', 'switch_ip', 'machine_id', 'switch_machine_id', 'port', 'vlans', 'mac', 'ipmi_credentials', 'tag', 'location', 'ip', - 'name', 'hostname', 'os_name', 'os_id', 'owner', + 'name', 'hostname', 'os_name', 'owner', 'os_installer', 'reinstall_os', 'os_installed', 'clusters', 'created_at', 'updated_at' ] @@ -98,12 +100,14 @@ RESP_CLUSTER_FIELDS = [ ] -def _check_filters(switch_filters): - logging.debug('check filters: %s', switch_filters) - models.Switch.parse_filters(switch_filters) +def _check_machine_filters(machine_filters): + """Check if machine filters format is acceptable.""" + logging.debug('check machine filters: %s', machine_filters) + models.Switch.parse_filters(machine_filters) def _check_vlans(vlans): + """Check vlans format is acceptable.""" for vlan in vlans: if not isinstance(vlan, int): raise exception.InvalidParameter( @@ -119,34 +123,80 @@ def _check_vlans(vlans): @utils.input_validates( ip=utils.check_ip, credentials=utils.check_switch_credentials, - filters=_check_filters + machine_filters=_check_machine_filters ) @utils.wrap_to_dict(RESP_FIELDS) -def add_switch_internal( - session, ip, exception_when_existing=True, - filters=setting.SWITCHES_DEFAULT_FILTERS, **kwargs +def _add_switch( + ip, exception_when_existing=True, + machine_filters=setting.SWITCHES_DEFAULT_FILTERS, + session=None, **kwargs ): - with session.begin(subtransactions=True): - return utils.add_db_object( - session, models.Switch, exception_when_existing, ip, - filters=filters, **kwargs - ) + """Add switch by switch ip.""" + ip_int = long(netaddr.IPAddress(ip)) + return utils.add_db_object( + session, models.Switch, exception_when_existing, ip_int, + machine_filters=machine_filters, **kwargs + ) def get_switch_internal( - session, exception_when_missing=True, **kwargs + switch_id, session=None, **kwargs ): - """Get switch.""" - with session.begin(subtransactions=True): + """Get switch by switch id. + + Should only be used by other files under db/api + """ + return _get_switch(switch_id, session=session, **kwargs) + + +def _get_switch(switch_id, session=None, **kwargs): + """Get Switch object switch id.""" + if isinstance(switch_id, (int, long)): return utils.get_db_object( - session, models.Switch, exception_when_missing, - **kwargs + session, models.Switch, + id=switch_id, **kwargs ) + raise exception.InvalidParameter( + 'switch id %s type is not int compatible' % switch_id) + + +def _get_switch_by_ip(switch_ip, session=None, **kwargs): + """Get switch by switch ip.""" + switch_ip_int = long(netaddr.IPAddress(switch_ip)) + return utils.get_db_object( + session, models.Switch, + ip_int=switch_ip_int, **kwargs + ) + + +def _get_switch_machine(switch_id, machine_id, session=None, **kwargs): + """Get switch machine by switch id and machine id.""" + switch = _get_switch(switch_id, session=session) + from compass.db.api import machine as machine_api + machine = machine_api.get_machine_internal(machine_id, session=session) + return utils.get_db_object( + session, models.SwitchMachine, + switch_id=switch.id, machine_id=machine.id, **kwargs + ) + + +def _get_switchmachine(switch_machine_id, session=None, **kwargs): + """Get switch machine by switch_machine_id.""" + if not isinstance(switch_machine_id, (int, long)): + raise exception.InvalidParameter( + 'switch machine id %s type is not int compatible' % ( + switch_machine_id + ) + ) + return utils.get_db_object( + session, models.SwitchMachine, + switch_machine_id=switch_machine_id, **kwargs + ) @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_SWITCHES ) @utils.wrap_to_dict(RESP_FIELDS) @@ -154,21 +204,27 @@ def get_switch( switch_id, exception_when_missing=True, user=None, session=None, **kwargs ): - """get field dict of a switch.""" - return utils.get_db_object( - session, models.Switch, - exception_when_missing, id=switch_id + """get a switch by switch id.""" + return _get_switch( + switch_id, session=session, + exception_when_missing=exception_when_missing ) @utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_SWITCHES ) @utils.wrap_to_dict(RESP_FIELDS) def list_switches(user=None, session=None, **filters): """List switches.""" + # TODO(xicheng): should discuss with weidong. + # If we can deprecate the use of DEFAULT_SWITCH_IP, + # The code will be simpler. + # The UI should use /machines-hosts instead of + # /switches-machines-hosts and can show multi switch ip/port + # under one row of machine info. switches = utils.list_db_objects( session, models.Switch, **filters ) @@ -183,32 +239,40 @@ def list_switches(user=None, session=None, **filters): @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_DEL_SWITCH ) @utils.wrap_to_dict(RESP_FIELDS) def del_switch(switch_id, user=None, session=None, **kwargs): - """Delete a switch.""" - switch = utils.get_db_object(session, models.Switch, id=switch_id) - default_switch_ip_int = long(netaddr.IPAddress(setting.DEFAULT_SWITCH_IP)) - default_switch = utils.get_db_object( - session, models.Switch, - ip_int=default_switch_ip_int + """Delete a switch. + + If switch is not the default switch, and the machine under this switch + is only connected to this switch, the machine will be moved to connect + to default switch. Otherwise we can only simply delete the switch + machine. The purpose here to make sure there is no machine not + connecting to any switch. + """ + # TODO(xicheng): Simplify the logic if the default switch feature + # can be deprecated. + switch = _get_switch(switch_id, session=session) + default_switch = _get_switch_by_ip( + setting.DEFAULT_SWITCH_IP, session=session ) - for switch_machine in switch.switch_machines: - machine = switch_machine.machine - if len(machine.switch_machines) <= 1: - utils.add_db_object( - session, models.SwitchMachine, - False, - default_switch.id, machine.id, - port=switch_machine.port - ) + if switch.id != default_switch.id: + for switch_machine in switch.switch_machines: + machine = switch_machine.machine + if len(machine.switch_machines) <= 1: + utils.add_db_object( + session, models.SwitchMachine, + False, + default_switch.id, machine.id, + port=switch_machine.port + ) return utils.del_db_object(session, switch) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_SWITCH ) def add_switch( @@ -216,14 +280,15 @@ def add_switch( user=None, session=None, **kwargs ): """Create a switch.""" - ip_int = long(netaddr.IPAddress(ip)) - return add_switch_internal( - session, ip_int, exception_when_existing, **kwargs + return _add_switch( + ip, + exception_when_existing=exception_when_existing, + session=session, **kwargs ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_SWITCH ) def add_switches( @@ -231,24 +296,24 @@ def add_switches( data=[], user=None, session=None ): """Create switches.""" + # TODO(xicheng): simplify the batch api. switches = [] fail_switches = [] for switch_data in data: - switch_ip = long(netaddr.IPAddress(switch_data['ip'])) - switch_object = utils.get_db_object( - session, models.Switch, False, - ip_int=switch_ip + switch_object = _get_switch_by_ip( + switch_data['ip'], session=session, + exception_when_missing=False ) if switch_object: logging.error('ip %s exists in switch %s' % ( - switch_ip, switch_object.id + switch_data['ip'], switch_object.id )) fail_switches.append(switch_data) else: - switch_data.pop('ip') switches.append( - add_switch_internal( - session, switch_ip, exception_when_existing, + _add_switch( + exception_when_existing=exception_when_existing, + session=session, **switch_data ) ) @@ -258,25 +323,17 @@ def add_switches( } -def update_switch_internal(session, switch, **kwargs): - """update switch.""" - return utils.update_db_object( - session, switch, - **kwargs - ) - - @utils.wrap_to_dict(RESP_FIELDS) -def _update_switch(session, switch_id, **kwargs): +def _update_switch(switch_id, session=None, **kwargs): """Update a switch.""" - switch = utils.get_db_object( - session, models.Switch, id=switch_id - ) + switch = _get_switch(switch_id, session=session) return utils.update_db_object(session, switch, **kwargs) +# replace machine_filters in kwargs to put_machine_filters, +# which is used to tell db this is a put action for the field. @utils.replace_filters( - filters='put_filters' + machine_filters='put_machine_filters' ) @utils.supported_filters( optional_support_keys=UPDATED_FIELDS, @@ -284,106 +341,116 @@ def _update_switch(session, switch_id, **kwargs): ) @utils.input_validates( credentials=utils.check_switch_credentials, - put_filters=_check_filters + put_machine_filters=_check_machine_filters ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_SWITCH ) def update_switch(switch_id, user=None, session=None, **kwargs): """Update fields of a switch.""" - return _update_switch(session, switch_id, **kwargs) + return _update_switch(switch_id, session=session, **kwargs) +# replace credentials to patched_credentials, +# machine_filters to patched_machine_filters in kwargs. +# This is to tell db they are patch action to the above fields. @utils.replace_filters( credentials='patched_credentials', - filters='patched_filters' + machine_filters='patched_machine_filters' ) @utils.supported_filters( optional_support_keys=PATCHED_FIELDS, ignore_support_keys=IGNORE_FIELDS ) @utils.input_validates( - patched_filters=_check_filters + patched_machine_filters=_check_machine_filters ) @database.run_in_session() @utils.output_validates( credentials=utils.check_switch_credentials ) -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_SWITCH ) def patch_switch(switch_id, user=None, session=None, **kwargs): """Patch fields of a switch.""" - return _update_switch(session, switch_id, **kwargs) + return _update_switch(switch_id, session=session, **kwargs) +@util.deprecated @utils.supported_filters(optional_support_keys=SUPPORTED_FILTER_FIELDS) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_SWITCH_FILTERS ) @utils.wrap_to_dict(RESP_FILTERS_FIELDS) def list_switch_filters(user=None, session=None, **filters): - """List switch filters.""" + """List all switches' filters.""" return utils.list_db_objects( session, models.Switch, **filters ) +@util.deprecated @utils.supported_filters() @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_SWITCH_FILTERS ) @utils.wrap_to_dict(RESP_FILTERS_FIELDS) def get_switch_filters( - switch_id, user=None, session=None, **kwargs + switch_id, exception_when_missing=True, + user=None, session=None, **kwargs ): - """get switch filter.""" - return utils.get_db_object( - session, models.Switch, id=switch_id + """get filters of a switch.""" + return _get_switch( + switch_id, session=session, + exception_when_missing=exception_when_missing ) +@util.deprecated @utils.replace_filters( - filters='put_filters' + machine_filters='put_machine_filters' ) @utils.supported_filters( optional_support_keys=UPDATED_FILTERS_FIELDS, ignore_support_keys=IGNORE_FIELDS ) -@utils.input_validates(put_filters=_check_filters) +@utils.input_validates(put_machine_filters=_check_machine_filters) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_UPDATE_SWITCH_FILTERS ) @utils.wrap_to_dict(RESP_FILTERS_FIELDS) def update_switch_filters(switch_id, user=None, session=None, **kwargs): - """Update a switch filter.""" - switch = utils.get_db_object(session, models.Switch, id=switch_id) + """Update filters of a switch.""" + switch = _get_switch(switch_id, session=session) return utils.update_db_object(session, switch, **kwargs) +@util.deprecated @utils.replace_filters( - filters='patched_filters' + machine_filters='patched_machine_filters' ) @utils.supported_filters( optional_support_keys=PATCHED_FILTERS_FIELDS, ignore_support_keys=IGNORE_FIELDS ) -@utils.input_validates(patched_filters=_check_filters) +@utils.input_validates(patched_machine_filters=_check_machine_filters) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_UPDATE_SWITCH_FILTERS ) @utils.wrap_to_dict(RESP_FILTERS_FIELDS) def patch_switch_filter(switch_id, user=None, session=None, **kwargs): - """Patch a switch filter.""" - switch = utils.get_db_object(session, models.Switch, id=switch_id) + """Patch filters to a switch.""" + switch = _get_switch(switch_id, session=session) return utils.update_db_object(session, switch, **kwargs) +@util.deprecated def get_switch_machines_internal(session, **filters): return utils.list_db_objects( session, models.SwitchMachine, **filters @@ -391,6 +458,19 @@ def get_switch_machines_internal(session, **filters): def _filter_port(port_filter, obj): + """filter switch machines by port. + + supported port_filter keys: [ + 'startswith', 'endswith', 'resp_lt', + 'resp_le', 'resp_gt', 'resp_ge', 'resp_range' + ] + + port_filter examples: + { + 'startswitch': 'ae', 'endswith': '', + 'resp_ge': 20, 'resp_le': 30, + } + """ port_prefix = port_filter.get('startswith', '') port_suffix = port_filter.get('endswith', '') pattern = re.compile(r'%s(\d+)%s' % (port_prefix, port_suffix)) @@ -419,8 +499,11 @@ def _filter_port(port_filter, obj): ): return False if 'resp_range' in port_filter: + resp_range = port_filter['resp_range'] + if not isinstance(resp_range, list): + resp_range = [resp_range] in_range = False - for port_start, port_end in port_filter['resp_range']: + for port_start, port_end in resp_range: if port_start <= port_number <= port_end: in_range = True break @@ -430,6 +513,11 @@ def _filter_port(port_filter, obj): def _filter_vlans(vlan_filter, obj): + """Filter switch machines by vlan. + + supported keys in vlan_filter: + ['resp_in'] + """ vlans = set(obj) if 'resp_in' in vlan_filter: resp_vlans = set(vlan_filter['resp_in']) @@ -444,7 +532,11 @@ def _filter_vlans(vlan_filter, obj): location=utils.general_filter_callback ) @utils.wrap_to_dict(RESP_MACHINES_FIELDS) -def _filter_switch_machines(session, switch_machines): +def _filter_switch_machines(switch_machines): + """Get filtered switch machines. + + The filters are defined in each switch. + """ return [ switch_machine for switch_machine in switch_machines if not switch_machine.filtered @@ -457,13 +549,13 @@ def _filter_switch_machines(session, switch_machines): tag=utils.general_filter_callback, location=utils.general_filter_callback, os_name=utils.general_filter_callback, - os_id=utils.general_filter_callback ) @utils.wrap_to_dict( RESP_MACHINES_HOSTS_FIELDS, clusters=RESP_CLUSTER_FIELDS ) -def _filter_switch_machines_hosts(session, switch_machines): +def _filter_switch_machines_hosts(switch_machines): + """Similar as _filter_switch_machines, but also return host info.""" filtered_switch_machines = [ switch_machine for switch_machine in switch_machines if not switch_machine.filtered @@ -487,17 +579,21 @@ def _filter_switch_machines_hosts(session, switch_machines): optional_support_keys=SUPPORTED_MACHINES_FIELDS ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_SWITCH_MACHINES ) -def list_switch_machines(switch_id, user=None, session=None, **filters): - """Get switch machines.""" - switch_machines = get_switch_machines_internal( - session, switch_id=switch_id, **filters +def list_switch_machines( + switch_id, user=None, session=None, **filters +): + """Get switch machines of a switch.""" + switch = _get_switch(switch_id, session=session) + switch_machines = utils.list_db_objects( + session, models.SwitchMachine, switch_id=switch.id, **filters ) - return _filter_switch_machines(session, switch_machines) + return _filter_switch_machines(switch_machines) +# replace ip_int to switch_ip_int in kwargs @utils.replace_filters( ip_int='switch_ip_int' ) @@ -505,16 +601,16 @@ def list_switch_machines(switch_id, user=None, session=None, **filters): optional_support_keys=SUPPORTED_SWITCH_MACHINES_FIELDS ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_SWITCH_MACHINES ) def list_switchmachines(user=None, session=None, **filters): """List switch machines.""" - switch_machines = get_switch_machines_internal( - session, **filters + switch_machines = utils.list_db_objects( + session, models.SwitchMachine, **filters ) return _filter_switch_machines( - session, switch_machines + switch_machines ) @@ -522,19 +618,23 @@ def list_switchmachines(user=None, session=None, **filters): optional_support_keys=SUPPORTED_MACHINES_HOSTS_FIELDS ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_SWITCH_MACHINES ) -def list_switch_machines_hosts(switch_id, user=None, session=None, **filters): - """Get switch machines hosts.""" - switch_machines = get_switch_machines_internal( - session, switch_id=switch_id, **filters +def list_switch_machines_hosts( + switch_id, user=None, session=None, **filters +): + """Get switch machines and possible hosts of a switch.""" + switch = _get_switch(switch_id, session=session) + switch_machines = utils.list_db_objects( + session, models.SwitchMachine, switch_id=switch.id, **filters ) return _filter_switch_machines_hosts( - session, switch_machines + switch_machines ) +# replace ip_int to switch_ip_int in kwargs @utils.replace_filters( ip_int='switch_ip_int' ) @@ -542,76 +642,109 @@ def list_switch_machines_hosts(switch_id, user=None, session=None, **filters): optional_support_keys=SUPPORTED_SWITCH_MACHINES_HOSTS_FIELDS ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_SWITCH_MACHINES ) def list_switchmachines_hosts(user=None, session=None, **filters): - """List switch machines hosts.""" - switch_machines = get_switch_machines_internal( - session, **filters + """List switch machines hnd possible hosts.""" + switch_machines = utils.list_db_objects( + session, models.SwitchMachine, **filters ) - if 'ip_int' in filters: - filtered_switch_machines = switch_machines - else: - filtered_switch_machines = [ - switch_machine for switch_machine in switch_machines - ] return _filter_switch_machines_hosts( - session, filtered_switch_machines + switch_machines ) @utils.supported_filters( ADDED_MACHINES_FIELDS, optional_support_keys=OPTIONAL_ADDED_MACHINES_FIELDS, - ignore_support_keys=IGNORE_FIELDS + ignore_support_keys=OPTIONAL_ADDED_SWITCH_MACHINES_FIELDS ) -@utils.input_validates(mac=utils.check_mac, vlans=_check_vlans) -@utils.wrap_to_dict(RESP_MACHINES_FIELDS) -def _add_switch_machine( - session, user, switch_id, exception_when_existing=True, - mac=None, **kwargs -): - switch = utils.get_db_object( - session, models.Switch, id=switch_id) - switch_machine_dict = {} - machine_dict = {} - for key, value in kwargs.items(): - if key in ADDED_SWITCH_MACHINES_FIELDS: - switch_machine_dict[key] = value - else: - machine_dict[key] = value - machine = utils.add_db_object( +@utils.input_validates(mac=utils.check_mac) +def _add_machine_if_not_exist(mac=None, session=None, **kwargs): + """Add machine if the mac does not exist in any machine.""" + return utils.add_db_object( session, models.Machine, False, - mac, **machine_dict) + mac, **kwargs) + +@utils.supported_filters( + ADDED_SWITCH_MACHINES_FIELDS, + optional_support_keys=OPTIONAL_ADDED_SWITCH_MACHINES_FIELDS, + ignore_support_keys=OPTIONAL_ADDED_MACHINES_FIELDS +) +@utils.input_validates(vlans=_check_vlans) +def _add_switch_machine_only( + switch, machine, exception_when_existing=True, + session=None, port=None, **kwargs +): + """add a switch machine.""" return utils.add_db_object( session, models.SwitchMachine, exception_when_existing, - switch.id, machine.id, - **switch_machine_dict + switch.id, machine.id, port=port, + **kwargs + ) + + +@utils.supported_filters( + ADDED_MACHINES_FIELDS + ADDED_SWITCH_MACHINES_FIELDS, + optional_support_keys=( + OPTIONAL_ADDED_MACHINES_FIELDS + + OPTIONAL_ADDED_SWITCH_MACHINES_FIELDS + ), + ignore_support_keys=IGNORE_FIELDS +) +@utils.wrap_to_dict(RESP_MACHINES_FIELDS) +def _add_switch_machine( + switch_id, exception_when_existing=True, + mac=None, port=None, session=None, **kwargs +): + """Add switch machine. + + If underlying machine does not exist, also create the underlying + machine. + """ + switch = _get_switch(switch_id, session=session) + machine = _add_machine_if_not_exist( + mac=mac, session=session, **kwargs + ) + return _add_switch_machine_only( + switch, machine, + exception_when_existing, + port=port, session=session, **kwargs ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_SWITCH_MACHINE ) def add_switch_machine( switch_id, exception_when_existing=True, mac=None, user=None, session=None, **kwargs ): - """Add switch machine.""" + """Add switch machine to a switch.""" return _add_switch_machine( - session, user, switch_id, - exception_when_existing, mac=mac, **kwargs + switch_id, + exception_when_existing=exception_when_existing, + mac=mac, session=session, **kwargs ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_SWITCH_MACHINE ) +@utils.wrap_to_dict( + [ + 'switches_machines', + 'duplicate_switches_machines', + 'fail_switches_machines' + ], + switches_machines=RESP_MACHINES_FIELDS, + duplicate_switches_machines=RESP_MACHINES_FIELDS +) def add_switch_machines( exception_when_existing=False, data=[], user=None, session=None @@ -620,78 +753,71 @@ def add_switch_machines( switch_machines = [] duplicate_switch_machines = [] failed_switch_machines = [] - switch_ip_list = [] - switch_datas = [] + switches_mapping = {} + switch_machines_mapping = {} + switch_ips = [] for item_data in data: switch_ip = item_data['switch_ip'] - switch_ip_int = long(netaddr.IPAddress(item_data['switch_ip'])) - if switch_ip not in switch_ip_list: - switch_object = utils.get_db_object( - session, models.Switch, False, - ip_int=switch_ip_int + if switch_ip not in switches_mapping: + switch_object = _get_switch_by_ip( + switch_ip, session=session, + exception_when_missing=False ) if switch_object: - switch_ip_list.append(switch_ip) - item_data.pop('switch_ip') - switch_datas.append({ - 'switch_id': switch_object.id, - 'switch_ip': switch_ip, - 'machines': [item_data] - }) + switch_ips.append(switch_ip) + switches_mapping[switch_ip] = switch_object else: logging.error( - 'switch ip %s is not existed in switch table' % switch_ip + 'switch %s does not exist' % switch_ip ) item_data.pop('switch_ip') failed_switch_machines.append(item_data) else: - for item in switch_datas: - if switch_ip == item['switch_ip']: - item_data.pop('switch_ip') - item['machines'].append(item_data) - for switch_data in switch_datas: - switch_id = switch_data['switch_id'] - machines = switch_data['machines'] + switch_object = switches_mapping[switch_ip] + if switch_object: + item_data.pop('switch_ip') + switch_machines_mapping.setdefault( + switch_object.id, [] + ).append(item_data) + + for switch_ip in switch_ips: + switch_object = switches_mapping[switch_ip] + switch_id = switch_object.id + machines = switch_machines_mapping[switch_id] for machine in machines: mac = machine['mac'] - machine_object = utils.get_db_object( - session, models.Machine, False, - mac=mac + machine_object = _add_machine_if_not_exist( + mac=mac, session=session ) - if machine_object: - switch_machine_object = utils.get_db_object( - session, models.SwitchMachine, False, - machine_id=machine_object.id - ) - if ( - switch_machine_object and not( - switch_machine_object.switch_id == switch_id and - switch_machine_object.port == machine['port'] - ) - ): - logging.error('machine %s exists in switch machine %s' % ( - machine['mac'], switch_machine_object.switch_machine_id - )) - failed_switch_machines.append(machine) - elif ( - switch_machine_object and - switch_machine_object.switch_id == switch_id and - switch_machine_object.port == machine['port'] - ): + switch_machine_object = _get_switch_machine( + switch_id, machine_object.id, session=session, + exception_when_missing=False + ) + if switch_machine_object: + port = machine['port'] + switch_machine_id = switch_machine_object.switch_machine_id + exist_port = switch_machine_object.port + if exist_port != port: logging.error( - 'machine %s is dulicate, will not be override' % - machine['mac'] + 'switch machine %s exist port %s is ' + 'different from added port %s' % ( + switch_machine_id, + exist_port, port + ) + ) + failed_switch_machines.append(machine) + else: + logging.error( + 'iswitch machine %s is dulicate, ' + 'will not be override' % switch_machine_id ) duplicate_switch_machines.append(machine) - else: - switch_machines.append(_add_switch_machine( - session, user, switch_id, exception_when_existing, - **machine - )) else: - switch_machines.append(_add_switch_machine( - session, user, switch_id, exception_when_existing, - **machine + del machine['mac'] + switch_machines.append(_add_switch_machine_only( + switch_object, machine_object, + exception_when_existing, + session=session, **machine )) return { 'switches_machines': switch_machines, @@ -702,14 +828,14 @@ def add_switch_machines( @utils.supported_filters(optional_support_keys=['find_machines']) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_UPDATE_SWITCH_MACHINES ) @utils.wrap_to_dict(RESP_ACTION_FIELDS) -def poll_switch_machines(switch_id, user=None, session=None, **kwargs): - """poll switch machines.""" +def poll_switch(switch_id, user=None, session=None, **kwargs): + """poll switch to get machines.""" from compass.tasks import client as celery_client - switch = utils.get_db_object(session, models.Switch, id=switch_id) + switch = _get_switch(switch_id, session=session) celery_client.celery.send_task( 'compass.tasks.pollswitch', (user.email, switch.ip, switch.credentials) @@ -723,7 +849,7 @@ def poll_switch_machines(switch_id, user=None, session=None, **kwargs): @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_SWITCH_MACHINES ) @utils.wrap_to_dict(RESP_MACHINES_FIELDS) @@ -731,17 +857,16 @@ def get_switch_machine( switch_id, machine_id, exception_when_missing=True, user=None, session=None, **kwargs ): - """get field dict of a switch machine.""" - return utils.get_db_object( - session, models.SwitchMachine, - exception_when_missing, - switch_id=switch_id, machine_id=machine_id + """get a switch machine by switch id and machine id.""" + return _get_switch_machine( + switch_id, machine_id, session=session, + exception_when_missing=exception_when_missing ) @utils.supported_filters([]) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_LIST_SWITCH_MACHINES ) @utils.wrap_to_dict(RESP_MACHINES_FIELDS) @@ -749,40 +874,70 @@ def get_switchmachine( switch_machine_id, exception_when_missing=True, user=None, session=None, **kwargs ): - """get field dict of a switch machine.""" - return utils.get_db_object( - session, models.SwitchMachine, - exception_when_missing, switch_machine_id=switch_machine_id - ) - - -def update_switch_machine_internal( - session, switch_machine, switch_machines_fields, **kwargs -): - """Update switch machine internal.""" - switch_machine_dict = {} - machine_dict = {} - for key, value in kwargs.items(): - if key in switch_machines_fields: - switch_machine_dict[key] = value - else: - machine_dict[key] = value - if machine_dict: - utils.update_db_object( - session, switch_machine.machine, **machine_dict - ) - return utils.update_db_object( - session, switch_machine, **switch_machine_dict + """get a switch machine by switch_machine_id.""" + return _get_switchmachine( + switch_machine_id, session=session, + exception_when_missing=exception_when_missing ) @utils.supported_filters( - optional_support_keys=UPDATED_MACHINES_FIELDS, + optional_support_keys=( + UPDATED_MACHINES_FIELDS + PATCHED_MACHINES_FIELDS + ), + ignore_support_keys=( + UPDATED_SWITCH_MACHINES_FIELDS + PATCHED_SWITCH_MACHINES_FIELDS + ) +) +def _update_machine_if_necessary( + machine, session=None, **kwargs +): + """Update machine is there is something to update.""" + utils.update_db_object( + session, machine, **kwargs + ) + + +@utils.supported_filters( + optional_support_keys=( + UPDATED_SWITCH_MACHINES_FIELDS + PATCHED_SWITCH_MACHINES_FIELDS + ), + ignore_support_keys=( + UPDATED_MACHINES_FIELDS + PATCHED_MACHINES_FIELDS + ) +) +def _update_switch_machine_only(switch_machine, session=None, **kwargs): + """Update switch machine.""" + return utils.update_db_object( + session, switch_machine, **kwargs + ) + + +def _update_switch_machine( + switch_machine, session=None, **kwargs +): + """Update switch machine. + + If there are some attributes of underlying machine need to update, + also update them in underlying machine. + """ + _update_machine_if_necessary( + switch_machine.machine, session=session, **kwargs + ) + return _update_switch_machine_only( + switch_machine, session=session, **kwargs + ) + + +@utils.supported_filters( + optional_support_keys=( + UPDATED_MACHINES_FIELDS + UPDATED_SWITCH_MACHINES_FIELDS + ), ignore_support_keys=IGNORE_FIELDS ) @utils.input_validates(vlans=_check_vlans) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_SWITCH_MACHINE ) @utils.wrap_to_dict(RESP_MACHINES_FIELDS) @@ -790,39 +945,43 @@ def update_switch_machine( switch_id, machine_id, user=None, session=None, **kwargs ): - """Update switch machine.""" - switch_machine = utils.get_db_object( - session, models.SwitchMachine, - switch_id=switch_id, machine_id=machine_id + """Update switch machine by switch id and machine id.""" + switch_machine = _get_switch_machine( + switch_id, machine_id, session=session ) - return update_switch_machine_internal( - session, switch_machine, - UPDATED_SWITCH_MACHINES_FIELDS, **kwargs + return _update_switch_machine( + switch_machine, + session=session, **kwargs ) @utils.supported_filters( - optional_support_keys=UPDATED_MACHINES_FIELDS, + optional_support_keys=( + UPDATED_MACHINES_FIELDS + UPDATED_SWITCH_MACHINES_FIELDS + ), ignore_support_keys=IGNORE_FIELDS ) @utils.input_validates(vlans=_check_vlans) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_SWITCH_MACHINE ) @utils.wrap_to_dict(RESP_MACHINES_FIELDS) def update_switchmachine(switch_machine_id, user=None, session=None, **kwargs): - """Update switch machine.""" - switch_machine = utils.get_db_object( - session, models.SwitchMachine, - switch_machine_id=switch_machine_id + """Update switch machine by switch_machine_id.""" + switch_machine = _get_switchmachine( + switch_machine_id, session=session ) - return update_switch_machine_internal( - session, switch_machine, - UPDATED_SWITCH_MACHINES_FIELDS, **kwargs + return _update_switch_machine( + switch_machine, + session=session, **kwargs ) +# replace [vlans, ipmi_credentials, tag, location] to +# [patched_vlans, patched_ipmi_credentials, patched_tag, +# patched_location] in kwargs. It tells db these fields will +# be patched. @utils.replace_filters( vlans='patched_vlans', ipmi_credentials='patched_ipmi_credentials', @@ -830,12 +989,14 @@ def update_switchmachine(switch_machine_id, user=None, session=None, **kwargs): location='patched_location' ) @utils.supported_filters( - optional_support_keys=PATCHED_MACHINES_FIELDS, + optional_support_keys=( + PATCHED_MACHINES_FIELDS + PATCHED_SWITCH_MACHINES_FIELDS + ), ignore_support_keys=IGNORE_FIELDS ) @utils.input_validates(patched_vlans=_check_vlans) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_SWITCH_MACHINE ) @utils.wrap_to_dict(RESP_MACHINES_FIELDS) @@ -843,17 +1004,20 @@ def patch_switch_machine( switch_id, machine_id, user=None, session=None, **kwargs ): - """Patch switch machine.""" - switch_machine = utils.get_db_object( - session, models.SwitchMachine, - switch_id=switch_id, machine_id=machine_id + """Patch switch machine by switch_id and machine_id.""" + switch_machine = _get_switch_machine( + switch_id, machine_id, session=session ) - return update_switch_machine_internal( - session, switch_machine, - PATCHED_SWITCH_MACHINES_FIELDS, **kwargs + return _update_switch_machine( + switch_machine, + session=session, **kwargs ) +# replace [vlans, ipmi_credentials, tag, location] to +# [patched_vlans, patched_ipmi_credentials, patched_tag, +# patched_location] in kwargs. It tells db these fields will +# be patched. @utils.replace_filters( vlans='patched_vlans', ipmi_credentials='patched_ipmi_credentials', @@ -861,30 +1025,54 @@ def patch_switch_machine( location='patched_location' ) @utils.supported_filters( - optional_support_keys=PATCHED_MACHINES_FIELDS, + optional_support_keys=( + PATCHED_MACHINES_FIELDS + PATCHED_SWITCH_MACHINES_FIELDS + ), ignore_support_keys=IGNORE_FIELDS ) @utils.input_validates(patched_vlans=_check_vlans) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_ADD_SWITCH_MACHINE ) @utils.wrap_to_dict(RESP_MACHINES_FIELDS) def patch_switchmachine(switch_machine_id, user=None, session=None, **kwargs): - """Patch switch machine.""" - switch_machine = utils.get_db_object( - session, models.SwitchMachine, - switch_machine_id=switch_machine_id + """Patch switch machine by switch_machine_id.""" + switch_machine = _get_switchmachine( + switch_machine_id, session=session ) - return update_switch_machine_internal( - session, switch_machine, - PATCHED_SWITCH_MACHINES_FIELDS, **kwargs + return _update_switch_machine( + switch_machine, + session=session, **kwargs ) +def _del_switch_machine( + switch_machine, session=None +): + """Delete switch machine. + + If this is the last switch machine associated to underlying machine, + add a switch machine record to default switch to make the machine + searchable. + """ + default_switch = _get_switch_by_ip( + setting.DEFAULT_SWITCH_IP, session=session + ) + machine = switch_machine.machine + if len(machine.switch_machines) <= 1: + utils.add_db_object( + session, models.SwitchMachine, + False, + default_switch.id, machine.id, + port=switch_machine.port + ) + return utils.del_db_object(session, switch_machine) + + @utils.supported_filters() @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_DEL_SWITCH_MACHINE ) @utils.wrap_to_dict(RESP_MACHINES_FIELDS) @@ -893,52 +1081,24 @@ def del_switch_machine( session=None, **kwargs ): """Delete switch machine by switch id and machine id.""" - switch_machine = utils.get_db_object( - session, models.SwitchMachine, - switch_id=switch_id, machine_id=machine_id + switch_machine = _get_switch_machine( + switch_id, machine_id, session=session ) - default_switch_ip_int = long(netaddr.IPAddress(setting.DEFAULT_SWITCH_IP)) - default_switch = utils.get_db_object( - session, models.Switch, - ip_int=default_switch_ip_int - ) - machine = switch_machine.machine - if len(machine.switch_machines) <= 1: - utils.add_db_object( - session, models.SwitchMachine, - False, - default_switch.id, machine.id, - port=switch_machine.port - ) - return utils.del_db_object(session, switch_machine) + return _del_switch_machine(switch_machine, session=session) @utils.supported_filters() @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_DEL_SWITCH_MACHINE ) @utils.wrap_to_dict(RESP_MACHINES_FIELDS) def del_switchmachine(switch_machine_id, user=None, session=None, **kwargs): """Delete switch machine by switch_machine_id.""" - switch_machine = utils.get_db_object( - session, models.SwitchMachine, - switch_machine_id=switch_machine_id + switch_machine = _get_switchmachine( + switch_machine_id, session=session ) - default_switch_ip_int = long(netaddr.IPAddress(setting.DEFAULT_SWITCH_IP)) - default_switch = utils.get_db_object( - session, models.Switch, - ip_int=default_switch_ip_int - ) - machine = switch_machine.machine - if len(machine.switch_machines) <= 1: - utils.add_db_object( - session, models.SwitchMachine, - False, - default_switch.id, machine.id, - port=switch_machine.port - ) - return utils.del_db_object(session, switch_machine) + return _del_switch_machine(switch_machine, session=session) @utils.supported_filters( @@ -946,35 +1106,68 @@ def del_switchmachine(switch_machine_id, user=None, session=None, **kwargs): optional_support_keys=UPDATED_SWITCH_MACHINES_FIELDS, ignore_support_keys=IGNORE_FIELDS ) -def _update_machine_internal(session, switch_id, machine_id, **kwargs): - utils.add_db_object( - session, models.SwitchMachine, False, - switch_id, machine_id, **kwargs +def _add_machine_to_switch( + switch_id, machine_id, session=None, **kwargs +): + """Add machine to switch.""" + switch = _get_switch(switch_id, session=session) + from compass.db.api import machine as machine_api + machine = machine_api.get_machine_internal( + machine_id, session=session + ) + _add_switch_machine_only( + switch, machine, False, **kwargs ) -def _add_machines(session, switch, machines): +def _add_machines(switch, machines, session=None): + """Add machines to switch. + + Args: + machines: list of dict which contains attributes to + add machine to switch. + + machines example: + {{'machine_id': 1, 'port': 'ae20'}] + """ for machine in machines: - _update_machine_internal( - session, switch.id, **machine + _add_machine_to_switch( + switch.id, session=session, **machine ) -def _remove_machines(session, switch, machines): +def _remove_machines(switch, machines, session=None): + """Remove machines from switch. + + Args: + machines: list of machine id. + + machines example: + [1,2] + """ utils.del_db_objects( session, models.SwitchMachine, switch_id=switch.id, machine_id=machines ) -def _set_machines(session, switch, machines): +def _set_machines(switch, machines, session=None): + """Reset machines to a switch. + + Args: + machines: list of dict which contains attributes to + add machine to switch. + + machines example: + {{'machine_id': 1, 'port': 'ae20'}] + """ utils.del_db_objects( session, models.SwitchMachine, switch_id=switch.id ) for switch_machine in machines: - _update_machine_internal( - session, switch.id, **switch_machine + _add_machine_to_switch( + switch.id, session=session, **switch_machine ) @@ -984,7 +1177,7 @@ def _set_machines(session, switch, machines): ] ) @database.run_in_session() -@user_api.check_user_permission_in_session( +@user_api.check_user_permission( permission.PERMISSION_UPDATE_SWITCH_MACHINES ) @utils.wrap_to_dict(RESP_MACHINES_FIELDS) @@ -992,21 +1185,18 @@ def update_switch_machines( switch_id, add_machines=[], remove_machines=[], set_machines=None, user=None, session=None, **kwargs ): - """update switch machines.""" - switch = utils.get_db_object( - session, models.Switch, id=switch_id - ) + """update switch's machines""" + switch = _get_switch(switch_id, session=session) if remove_machines: _remove_machines( - session, switch, remove_machines + switch, remove_machines, session=session ) if add_machines: _add_machines( - session, switch, add_machines + switch, add_machines, session=session ) if set_machines is not None: _set_machines( - session, switch, - set_machines + switch, set_machines, session=session ) return switch.switch_machines diff --git a/compass/db/api/user.py b/compass/db/api/user.py index 40b3bfb4..db039eba 100644 --- a/compass/db/api/user.py +++ b/compass/db/api/user.py @@ -16,6 +16,7 @@ import datetime import functools import logging +import re from flask.ext.login import UserMixin @@ -53,36 +54,14 @@ PERMISSION_RESP_FIELDS = [ def _check_email(email): + """Check email is email format.""" if '@' not in email: raise exception.InvalidParameter( 'there is no @ in email address %s.' % email ) -def get_user_internal(session, exception_when_missing=True, **kwargs): - """internal function used only by other db.api modules.""" - return utils.get_db_object( - session, models.User, exception_when_missing, **kwargs - ) - - -def add_user_internal( - session, exception_when_existing=True, - email=None, **kwargs -): - """internal function used only by other db.api modules.""" - user = utils.add_db_object( - session, models.User, - exception_when_existing, email, - **kwargs) - _add_user_permissions( - session, user, - name=setting.COMPASS_DEFAULT_PERMISSIONS - ) - return user - - -def _check_user_permission(session, user, permission): +def _check_user_permission(user, permission, session=None): """Check user has permission.""" if not user: logging.info('empty user means the call is from internal') @@ -102,14 +81,19 @@ def _check_user_permission(session, user, permission): ) -def check_user_permission_in_session(permission): +def check_user_permission(permission): + """Decorator to check user having permission.""" def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): - if 'user' in kwargs.keys() and 'session' in kwargs.keys(): - session = kwargs['session'] - user = kwargs['user'] - _check_user_permission(session, user, permission) + user = kwargs.get('user') + if user is not None: + session = kwargs.get('session') + if session is None: + raise exception.DatabaseException( + 'wrapper check_user_permission does not run in session' + ) + _check_user_permission(user, permission, session=session) return func(*args, **kwargs) else: return func(*args, **kwargs) @@ -118,11 +102,12 @@ def check_user_permission_in_session(permission): def check_user_admin(): + """Decorator to check user is admin.""" def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): - if 'user' in kwargs.keys(): - user = kwargs['user'] + user = kwargs.get('user') + if user is not None: if not user.is_admin: raise exception.Forbidden( 'User %s is not admin.' % ( @@ -137,48 +122,56 @@ def check_user_admin(): def check_user_admin_or_owner(): + """Decorator to check user is admin or the owner of the resource.""" def decorator(func): @functools.wraps(func) def wrapper(user_id, *args, **kwargs): - if 'user' in kwargs.keys(): - user = kwargs['user'] - if not user.is_admin and user.id != user_id: + user = kwargs.get('user') + if user is not None: + session = kwargs.get('session') + if session is None: + raise exception.DatabaseException( + 'wrapper check_user_admin_or_owner is ' + 'not called in session' + ) + check_user = _get_user(user_id, session=session) + if not user.is_admin and user.id != check_user.id: raise exception.Forbidden( - 'User %s is not admin or the owner of user id %s.' % ( - user.email, user_id + 'User %s is not admin or the owner of user %s.' % ( + user.email, check_user.email ) ) - return func(user_id, *args, **kwargs) + + return func( + user_id, *args, **kwargs + ) else: - return func(user_id, *args, **kwargs) + return func( + user_id, *args, **kwargs + ) return wrapper return decorator -def check_user_permission_internal(session, user, permission): - """internal function only used by other db.api modules.""" - _check_user_permission(session, user, permission) - - -def _add_user_permissions(session, user, **permission_filters): +def _add_user_permissions(user, session=None, **permission_filters): """add permissions to a user.""" from compass.db.api import permission as permission_api - for api_permission in permission_api.list_permissions_internal( - session, **permission_filters + for api_permission in permission_api.list_permissions( + session=session, **permission_filters ): utils.add_db_object( session, models.UserPermission, False, - user.id, api_permission.id + user.id, api_permission['id'] ) -def _remove_user_permissions(session, user, **permission_filters): - """remove permissions to a user.""" +def _remove_user_permissions(user, session=None, **permission_filters): + """remove permissions from a user.""" from compass.db.api import permission as permission_api permission_ids = [ - api_permission.id - for api_permission in permission_api.list_permissions_internal( - session, **permission_filters + api_permission['id'] + for api_permission in permission_api.list_permissions( + session=session, **permission_filters ) ] utils.del_db_objects( @@ -187,7 +180,7 @@ def _remove_user_permissions(session, user, **permission_filters): ) -def _set_user_permissions(session, user, **permission_filters): +def _set_user_permissions(user, session=None, **permission_filters): """set permissions to a user.""" utils.del_db_objects( session, models.UserPermission, @@ -197,6 +190,8 @@ def _set_user_permissions(session, user, **permission_filters): class UserWrapper(UserMixin): + """Wrapper class provided to flask.""" + def __init__( self, id, email, crypted_password, active=True, is_admin=False, @@ -241,6 +236,7 @@ class UserWrapper(UserMixin): @database.run_in_session() def get_user_object(email, session=None, **kwargs): + """get user and convert to UserWrapper object.""" user = utils.get_db_object( session, models.User, False, email=email ) @@ -253,8 +249,13 @@ def get_user_object(email, session=None, **kwargs): return UserWrapper(**user_dict) -@database.run_in_session() +@database.run_in_session(exception_when_in_session=False) def get_user_object_from_token(token, session=None): + """Get user from token and convert to UserWrapper object. + + ::note: + get_user_object_from_token may be called in session. + """ expire_timestamp = { 'ge': datetime.datetime.now() } @@ -266,8 +267,8 @@ def get_user_object_from_token(token, session=None): raise exception.Unauthorized( 'invalid user token: %s' % token ) - user_dict = utils.get_db_object( - session, models.User, id=user_token.user_id + user_dict = _get_user( + user_token.user_id, session=session ).to_dict() user_dict['token'] = token expire_timestamp = user_token.expire_timestamp @@ -310,17 +311,29 @@ def clean_user_token(token, user=None, session=None): ) +def _get_user(user_id, session=None, **kwargs): + """Get user object by user id.""" + if isinstance(user_id, (int, long)): + return utils.get_db_object( + session, models.User, id=user_id, **kwargs + ) + raise exception.InvalidParameter( + 'user id %s type is not int compatible' % user_id + ) + + @utils.supported_filters() -@check_user_admin_or_owner() @database.run_in_session() +@check_user_admin_or_owner() @utils.wrap_to_dict(RESP_FIELDS) def get_user( user_id, exception_when_missing=True, user=None, session=None, **kwargs ): - """get field dict of a user.""" - return utils.get_db_object( - session, models.User, exception_when_missing, id=user_id + """get a user.""" + return _get_user( + user_id, session=session, + exception_when_missing=exception_when_missing ) @@ -331,20 +344,21 @@ def get_current_user( exception_when_missing=True, user=None, session=None, **kwargs ): - """get field dict of a user.""" - return utils.get_db_object( - session, models.User, exception_when_missing, id=user.id + """get current user.""" + return _get_user( + user.id, session=session, + exception_when_missing=exception_when_missing ) @utils.supported_filters( optional_support_keys=SUPPORTED_FIELDS ) -@check_user_admin() @database.run_in_session() +@check_user_admin() @utils.wrap_to_dict(RESP_FIELDS) def list_users(user=None, session=None, **filters): - """List fields of all users by some fields.""" + """List all users.""" return utils.list_db_objects( session, models.User, **filters ) @@ -356,27 +370,34 @@ def list_users(user=None, session=None, **filters): optional_support_keys=OPTIONAL_ADDED_FIELDS, ignore_support_keys=IGNORE_FIELDS ) -@check_user_admin() @database.run_in_session() +@check_user_admin() @utils.wrap_to_dict(RESP_FIELDS) def add_user( exception_when_existing=True, user=None, - session=None, **kwargs + session=None, email=None, **kwargs ): """Create a user and return created user object.""" - return add_user_internal( - session, exception_when_existing, **kwargs + add_user = utils.add_db_object( + session, models.User, + exception_when_existing, email, + **kwargs) + _add_user_permissions( + add_user, + session=session, + name=setting.COMPASS_DEFAULT_PERMISSIONS ) + return add_user @utils.supported_filters() -@check_user_admin() @database.run_in_session() +@check_user_admin() @utils.wrap_to_dict(RESP_FIELDS) def del_user(user_id, user=None, session=None, **kwargs): """delete a user and return the deleted user object.""" - user = utils.get_db_object(session, models.User, id=user_id) - return utils.del_db_object(session, user) + del_user = _get_user(user_id, session=session) + return utils.del_db_object(session, del_user) @utils.supported_filters( @@ -388,13 +409,13 @@ def del_user(user_id, user=None, session=None, **kwargs): @utils.wrap_to_dict(RESP_FIELDS) def update_user(user_id, user=None, session=None, **kwargs): """Update a user and return the updated user object.""" - user = utils.get_db_object( - session, models.User, id=user_id + update_user = _get_user( + user_id, session=session, ) allowed_fields = set() if user.is_admin: allowed_fields |= set(ADMIN_UPDATED_FIELDS) - if user.id == user_id: + if user.id == update_user.id: allowed_fields |= set(SELF_UPDATED_FIELDS) unsupported_fields = set(kwargs) - allowed_fields if unsupported_fields: @@ -404,47 +425,67 @@ def update_user(user_id, user=None, session=None, **kwargs): user.email, user.email, unsupported_fields ) ) - return utils.update_db_object(session, user, **kwargs) + return utils.update_db_object(session, update_user, **kwargs) @utils.supported_filters(optional_support_keys=PERMISSION_SUPPORTED_FIELDS) -@check_user_admin_or_owner() @database.run_in_session() +@check_user_admin_or_owner() @utils.wrap_to_dict(PERMISSION_RESP_FIELDS) -def get_permissions(user_id, user=None, session=None, **kwargs): +def get_permissions( + user_id, user=None, exception_when_missing=True, + session=None, **kwargs +): """List permissions of a user.""" + get_user = _get_user( + user_id, session=session, + exception_when_missing=exception_when_missing + ) return utils.list_db_objects( - session, models.UserPermission, user_id=user_id, **kwargs + session, models.UserPermission, user_id=get_user.id, **kwargs + ) + + +def _get_permission(user_id, permission_id, session=None, **kwargs): + """Get user permission by user id and permission id.""" + user = _get_user(user_id, session=session) + from compass.db.api import permission as permission_api + permission = permission_api.get_permission_internal( + permission_id, session=session + ) + return utils.get_db_object( + session, models.UserPermission, + user_id=user.id, permission_id=permission.id, + **kwargs ) @utils.supported_filters() -@check_user_admin_or_owner() @database.run_in_session() +@check_user_admin_or_owner() @utils.wrap_to_dict(PERMISSION_RESP_FIELDS) def get_permission( user_id, permission_id, exception_when_missing=True, user=None, session=None, **kwargs ): - """Get a specific user permission.""" - return utils.get_db_object( - session, models.UserPermission, - exception_when_missing, - user_id=user_id, permission_id=permission_id, + """Get a permission of a user.""" + return _get_permission( + user_id, permission_id, + exception_when_missing=exception_when_missing, + session=session, **kwargs ) @utils.supported_filters() -@check_user_admin_or_owner() @database.run_in_session() +@check_user_admin_or_owner() @utils.wrap_to_dict(PERMISSION_RESP_FIELDS) def del_permission(user_id, permission_id, user=None, session=None, **kwargs): - """Delete a specific user permission.""" - user_permission = utils.get_db_object( - session, models.UserPermission, - user_id=user_id, permission_id=permission_id, - **kwargs + """Delete a permission from a user.""" + user_permission = _get_permission( + user_id, permission_id, + session=session, **kwargs ) return utils.del_db_object(session, user_permission) @@ -453,21 +494,27 @@ def del_permission(user_id, permission_id, user=None, session=None, **kwargs): PERMISSION_ADDED_FIELDS, ignore_support_keys=IGNORE_FIELDS ) -@check_user_admin() @database.run_in_session() +@check_user_admin() @utils.wrap_to_dict(PERMISSION_RESP_FIELDS) def add_permission( - user_id, exception_when_missing=True, - permission_id=None, user=None, session=None + user_id, permission_id=None, exception_when_existing=True, + user=None, session=None ): - """Add an user permission.""" + """Add a permission to a user.""" + get_user = _get_user(user_id, session=session) + from compass.db.api import permission as permission_api + get_permission = permission_api.get_permission_internal( + permission_id, session=session + ) return utils.add_db_object( - session, models.UserPermission, exception_when_missing, - user_id, permission_id + session, models.UserPermission, exception_when_existing, + get_user.id, get_permission.id ) def _get_permission_filters(permission_ids): + """Helper function to filter permissions.""" if permission_ids == 'all': return {} else: @@ -479,28 +526,28 @@ def _get_permission_filters(permission_ids): 'add_permissions', 'remove_permissions', 'set_permissions' ] ) -@check_user_admin() @database.run_in_session() +@check_user_admin() @utils.wrap_to_dict(PERMISSION_RESP_FIELDS) def update_permissions( user_id, add_permissions=[], remove_permissions=[], set_permissions=None, user=None, session=None, **kwargs ): """update user permissions.""" - user = utils.get_db_object(session, models.User, id=user_id) + update_user = _get_user(user_id, session=session) if remove_permissions: _remove_user_permissions( - session, user, + update_user, session=session, **_get_permission_filters(remove_permissions) ) if add_permissions: _add_user_permissions( - session, user, + update_user, session=session, **_get_permission_filters(add_permissions) ) if set_permissions is not None: _set_user_permissions( - session, user, + update_user, session=session, **_get_permission_filters(set_permissions) ) - return user.user_permissions + return update_user.user_permissions diff --git a/compass/db/api/user_log.py b/compass/db/api/user_log.py index f58693b0..70de9db4 100644 --- a/compass/db/api/user_log.py +++ b/compass/db/api/user_log.py @@ -36,14 +36,15 @@ def log_user_action(user_id, action, session=None): @utils.supported_filters(optional_support_keys=USER_SUPPORTED_FIELDS) -@user_api.check_user_admin_or_owner() @database.run_in_session() +@user_api.check_user_admin_or_owner() @utils.wrap_to_dict(RESP_FIELDS) def list_user_actions(user_id, user=None, session=None, **filters): - """list user actions.""" + """list user actions of a user.""" + list_user = user_api.get_user(user_id, user=user, session=session) return utils.list_db_objects( session, models.UserLog, order_by=['timestamp'], - user_id=user_id, **filters + user_id=list_user['id'], **filters ) @@ -52,29 +53,30 @@ def list_user_actions(user_id, user=None, session=None, **filters): @database.run_in_session() @utils.wrap_to_dict(RESP_FIELDS) def list_actions(user=None, session=None, **filters): - """list actions.""" + """list actions of all users.""" return utils.list_db_objects( session, models.UserLog, order_by=['timestamp'], **filters ) @utils.supported_filters() -@user_api.check_user_admin_or_owner() @database.run_in_session() +@user_api.check_user_admin_or_owner() @utils.wrap_to_dict(RESP_FIELDS) def del_user_actions(user_id, user=None, session=None, **filters): - """delete user actions.""" + """delete actions of a user.""" + del_user = user_api.get_user(user_id, user=user, session=session) return utils.del_db_objects( - session, models.UserLog, user_id=user_id, **filters + session, models.UserLog, user_id=del_user['id'], **filters ) @utils.supported_filters() -@user_api.check_user_admin() @database.run_in_session() +@user_api.check_user_admin() @utils.wrap_to_dict(RESP_FIELDS) def del_actions(user=None, session=None, **filters): - """delete actions.""" + """delete actions of all users.""" return utils.del_db_objects( session, models.UserLog, **filters ) diff --git a/compass/db/api/utils.py b/compass/db/api/utils.py index 38fa6b88..a44f26ea 100644 --- a/compass/db/api/utils.py +++ b/compass/db/api/utils.py @@ -30,7 +30,10 @@ from compass.utils import util def model_query(session, model): - """model query.""" + """model query. + + Return sqlalchemy query object. + """ if not issubclass(model, models.BASE): raise exception.DatabaseException("model should be sublass of BASE!") @@ -38,6 +41,23 @@ def model_query(session, model): def _default_list_condition_func(col_attr, value, condition_func): + """The default condition func for a list of data. + + Given the condition func for single item of data, this function + wrap the condition_func and return another condition func using + or_ to merge the conditions of each single item to deal with a + list of data item. + + Args: + col_attr: the colomn name + value: the column value need to be compared. + condition_func: the sqlalchemy condition object like == + + Examples: + col_attr is name, value is ['a', 'b', 'c'] and + condition_func is ==, the returned condition is + name == 'a' or name == 'b' or name == 'c' + """ conditions = [] for sub_value in value: condition = condition_func(col_attr, sub_value) @@ -50,6 +70,11 @@ def _default_list_condition_func(col_attr, value, condition_func): def _one_item_list_condition_func(col_attr, value, condition_func): + """The wrapper condition func to deal with one item data list. + + For simplification, it is used to reduce generating too complex + sql conditions. + """ if value: return condition_func(col_attr, value[0]) else: @@ -61,6 +86,7 @@ def _model_condition_func( item_condition_func, list_condition_func=_default_list_condition_func ): + """Return sql condition based on value type.""" if isinstance(value, list): if not value: return None @@ -74,6 +100,7 @@ def _model_condition_func( def _between_condition(col_attr, value): + """Return sql range condition.""" if value[0] is not None and value[1] is not None: return col_attr.between(value[0], value[1]) if value[0] is not None: @@ -84,6 +111,7 @@ def _between_condition(col_attr, value): def model_order_by(query, model, order_by): + """append order by into sql query model.""" if not order_by: return query order_by_cols = [] @@ -107,11 +135,39 @@ def model_order_by(query, model, order_by): def _model_condition(col_attr, value): + """Generate condition for one column. + + Example for col_attr is name: + value is 'a': name == 'a' + value is ['a']: name == 'a' + value is ['a', 'b']: name == 'a' or name == 'b' + value is {'eq': 'a'}: name == 'a' + value is {'lt': 'a'}: name < 'a' + value is {'le': 'a'}: name <= 'a' + value is {'gt': 'a'}: name > 'a' + value is {'ge': 'a'}: name >= 'a' + value is {'ne': 'a'}: name != 'a' + value is {'in': ['a', 'b']}: name in ['a', 'b'] + value is {'notin': ['a', 'b']}: name not in ['a', 'b'] + value is {'startswith': 'abc'}: name like 'abc%' + value is {'endswith': 'abc'}: name like '%abc' + value is {'like': 'abc'}: name like '%abc%' + value is {'between': ('a', 'c')}: name >= 'a' and name <= 'c' + value is [{'lt': 'a'}]: name < 'a' + value is [{'lt': 'a'}, {'gt': c'}]: name < 'a' or name > 'c' + value is {'lt': 'c', 'gt': 'a'}: name > 'a' and name < 'c' + + If value is a list, the condition is the or relationship among + conditions of each item. + If value is dict and there are multi keys in the dict, the relationship + is and conditions of each key. + Otherwise the condition is to compare the column with the value. + """ if isinstance(value, list): basetype_values = [] composite_values = [] for item in value: - if util.is_instance(item, [list, dict]): + if isinstance(item, (list, dict)): composite_values.append(item) else: basetype_values.append(item) @@ -209,6 +265,7 @@ def _model_condition(col_attr, value): def model_filter(query, model, **filters): + """Append conditons to query for each possible column.""" for key, value in filters.items(): if isinstance(key, basestring): if hasattr(model, key): @@ -224,6 +281,10 @@ def model_filter(query, model, **filters): def replace_output(**output_mapping): + """Decorator to recursively relace output by output mapping. + + The replacement detail is described in _replace_output. + """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): @@ -235,12 +296,34 @@ def replace_output(**output_mapping): def _replace_output(data, **output_mapping): - """Helper to replace output data.""" + """Helper to replace output data. + + Example: + data = {'a': 'hello'} + output_mapping = {'a': 'b'} + returns: {'b': 'hello'} + + data = {'a': {'b': 'hello'}} + output_mapping = {'a': 'b'} + returns: {'b': {'b': 'hello'}} + + data = {'a': {'b': 'hello'}} + output_mapping = {'a': {'b': 'c'}} + returns: {'a': {'c': 'hello'}} + + data = [{'a': 'hello'}, {'a': 'hi'}] + output_mapping = {'a': 'b'} + returns: [{'b': 'hello'}, {'b': 'hi'}] + """ if isinstance(data, list): return [ _replace_output(item, **output_mapping) for item in data ] + if not isinstance(data, dict): + raise exception.InvalidResponse( + '%s type is not dict' % data + ) info = {} for key, value in data.items(): if key in output_mapping: @@ -257,7 +340,23 @@ def _replace_output(data, **output_mapping): def get_wrapped_func(func): - """Get wrapped function instance.""" + """Get wrapped function instance. + + Example: + @dec1 + @dec2 + myfunc(*args, **kwargs) + + get_wrapped_func(myfunc) returns function object with + following attributes: + __name__: 'myfunc' + args: args + kwargs: kwargs + otherwise myfunc is function object with following attributes: + __name__: partial object ... + args: ... + kwargs: ... + """ if func.func_closure: for closure in func.func_closure: if isfunction(closure.cell_contents): @@ -268,6 +367,10 @@ def get_wrapped_func(func): def wrap_to_dict(support_keys=[], **filters): + """Decrator to convert returned object to dict. + + The details is decribed in _wrapper_dict. + """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): @@ -279,7 +382,31 @@ def wrap_to_dict(support_keys=[], **filters): def _wrapper_dict(data, support_keys, **filters): - """Helper for warpping db object into dictionary.""" + """Helper for warpping db object into dictionary. + + If data is list, convert it to a list of dict + If data is Base model, convert it to dict + for the data as a dict, filter it with the supported keys. + For each filter_key, filter_value in filters, also filter + data[filter_key] by filter_value recursively if it exists. + + Example: + data is models.Switch, it will be converted to + { + 'id': 1, 'ip': '10.0.0.1', 'ip_int': 123456, + 'credentials': {'version': 2, 'password': 'abc'} + } + Then if support_keys are ['id', 'ip', 'credentials'], + it will be filtered to { + 'id': 1, 'ip': '10.0.0.1', + 'credentials': {'version': 2, 'password': 'abc'} + } + Then if filters is {'credentials': ['version']}, + it will be filtered to { + 'id': 1, 'ip': '10.0.0.1', + 'credentials': {'version': 2} + } + """ logging.debug( 'wrap dict %s by support_keys=%s filters=%s', data, support_keys, filters @@ -296,32 +423,46 @@ def _wrapper_dict(data, support_keys, **filters): 'response %s type is not dict' % data ) info = {} - for key in support_keys: - if key in data: - if key in filters: - filter_keys = filters[key] - if isinstance(filter_keys, dict): - info[key] = _wrapper_dict( - data[key], filter_keys.keys(), - **filter_keys - ) + try: + for key in support_keys: + if key in data and data[key] is not None: + if key in filters: + filter_keys = filters[key] + if isinstance(filter_keys, dict): + info[key] = _wrapper_dict( + data[key], filter_keys.keys(), + **filter_keys + ) + else: + info[key] = _wrapper_dict( + data[key], filter_keys + ) else: - info[key] = _wrapper_dict( - data[key], filter_keys - ) - else: - info[key] = data[key] - return info + info[key] = data[key] + return info + except Exception as error: + logging.exception(error) + raise error -def replace_input_types(**kwarg_mapping): +def replace_filters(**kwarg_mapping): + """Decorator to replace kwargs. + + Examples: + kwargs: {'a': 'b'}, kwarg_mapping: {'a': 'c'} + replaced kwargs to decorated func: + {'c': 'b'} + + replace_filters is used to replace caller's input + to make it understandable by models.py. + """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): replaced_kwargs = {} for key, value in kwargs.items(): if key in kwarg_mapping: - replaced_kwargs[key] = kwarg_mapping[key](value) + replaced_kwargs[kwarg_mapping[key]] = value else: replaced_kwargs[key] = value return func(*args, **replaced_kwargs) @@ -329,52 +470,115 @@ def replace_input_types(**kwarg_mapping): return decorator -def replace_filters(**filter_mapping): - def decorator(func): - @functools.wraps(func) - def wrapper(*args, **filters): - replaced_filters = {} - for key, value in filters.items(): - if key in filter_mapping: - replaced_filters[filter_mapping[key]] = value - else: - replaced_filters[key] = value - return func(*args, **replaced_filters) - return wrapper - return decorator - - def supported_filters( support_keys=[], optional_support_keys=[], ignore_support_keys=[], ): + """Decorator to check kwargs keys. + + keys in kwargs and in ignore_support_keys will be removed. + If any unsupported keys found, a InvalidParameter + exception raises. + + Args: + support_keys: keys that must exist. + optional_support_keys: keys that may exist. + ignore_support_keys: keys should be ignored. + + Assumption: args without default value is supposed to exist. + You can add them in support_keys or not but we will make sure + it appears when we call the decorated function. + We do best match on both args and kwargs to make sure if the + key appears or not. + + Examples: + decorated func: func(a, b, c=3, d=4, **kwargs) + + support_keys=['e'] and call func(e=5): + raises: InvalidParameter: missing declared arg + support_keys=['e'] and call func(1,2,3,4,5,e=6): + raises: InvalidParameter: caller sending more args + support_keys=['e'] and call func(1,2): + raises: InvalidParameter: supported keys ['e'] missing + support_keys=['d', 'e'] and call func(1,2,e=3): + raises: InvalidParameter: supported keys ['d'] missing + support_keys=['d', 'e'] and call func(1,2,d=4, e=3): + passed + support_keys=['d'], optional_support_keys=['e'] + and call func(1,2, d=3): + passed + support_keys=['d'], optional_support_keys=['e'] + and call func(1,2, d=3, e=4, f=5): + raises: InvalidParameter: unsupported keys ['f'] + support_keys=['d'], optional_support_keys=['e'], + ignore_support_keys=['f'] + and call func(1,2, d=3, e=4, f=5): + passed to decorated keys: func(1,2, d=3, e=4) + """ def decorator(func): @functools.wraps(func) def wrapper(*args, **filters): wrapped_func = get_wrapped_func(func) argspec = inspect.getargspec(wrapped_func) wrapped_args = argspec.args + args_defaults = argspec.defaults + # wrapped_must_args are positional args caller must pass in. + if args_defaults: + wrapped_must_args = wrapped_args[:-len(args_defaults)] + else: + wrapped_must_args = wrapped_args[:] + # make sure any positional args without default value in + # decorated function should appear in args or filters. + if len(args) < len(wrapped_must_args): + remain_args = wrapped_must_args[len(args):] + for remain_arg in remain_args: + if remain_arg not in filters: + raise exception.InvalidParameter( + 'function missing declared arg %s ' + 'while caller sends args %s' % ( + remain_arg, args + ) + ) + # make sure args should be no more than positional args + # declared in decorated function. + if len(args) > len(wrapped_args): + raise exception.InvalidParameter( + 'function definition args %s while the caller ' + 'sends args %s' % ( + wrapped_args, args + ) + ) + # exist_args are positional args caller has given. + exist_args = dict(zip(wrapped_args, args)).keys() must_support_keys = set(support_keys) all_support_keys = must_support_keys | set(optional_support_keys) - filter_keys = set(filters) - set(wrapped_args) - wrapped_support_keys = set(filters) | set(wrapped_args) + wrapped_supported_keys = set(filters) | set(exist_args) unsupported_keys = ( - filter_keys - all_support_keys - set(ignore_support_keys) + set(filters) - set(wrapped_args) - + all_support_keys - set(ignore_support_keys) ) + # unsupported_keys are the keys that are not in support_keys, + # optional_support_keys, ignore_support_keys and are not passed in + # by positional args. It means the decorated function may + # not understand these parameters. if unsupported_keys: raise exception.InvalidParameter( - 'filter keys %s are not supported' % str( - list(unsupported_keys) + 'filter keys %s are not supported for %s' % ( + list(unsupported_keys), wrapped_func ) ) - missing_keys = must_support_keys - wrapped_support_keys + # missing_keys are the keys that must exist but missing in + # both positional args or kwargs. + missing_keys = must_support_keys - wrapped_supported_keys if missing_keys: raise exception.InvalidParameter( - 'filter keys %s not found' % str( - list(missing_keys) + 'filter keys %s not found for %s' % ( + list(missing_keys), wrapped_func ) ) + # We filter kwargs to eliminate ignore_support_keys in kwargs + # passed to decorated function. filtered_filters = dict([ (key, value) for key, value in filters.items() @@ -385,61 +589,198 @@ def supported_filters( return decorator -def _obj_equal(check, obj): +def input_filters( + **filters +): + """Decorator to filter kwargs. + + For key in kwargs, if the key exists and filters + and the return of call filters[key] is False, the key + will be removed from kwargs. + + The function definition of filters[key] is + func(value, *args, **kwargs) compared with decorated + function func(*args, **kwargs) + + The function is used to filter kwargs in case some + kwargs should be removed conditionally depends on the + related filters. + + Examples: + filters={'a': func(value, *args, **kwargs)} + @input_filters(**filters) + decorated_func(*args, **kwargs) + func returns False. + Then when call decorated_func(a=1, b=2) + it will be actually called the decorated func with + b=2. a=1 will be removed since it does not pass filtering. + """ + def decorator(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + filtered_kwargs = {} + for key, value in kwargs.items(): + if key in filters: + if filters[key](value, *args, **kwargs): + filtered_kwargs[key] = value + else: + logging.debug( + 'ignore filtered input key %s' % key + ) + else: + filtered_kwargs[key] = value + return func(*args, **filtered_kwargs) + return wrapper + return decorator + + +def _obj_equal_or_subset(check, obj): + """Used by output filter to check if obj is in check.""" if check == obj: return True if not issubclass(obj.__class__, check.__class__): return False if isinstance(obj, dict): - return _dict_equal(check, obj) + return _dict_equal_or_subset(check, obj) elif isinstance(obj, list): - return _list_equal(check, obj) + return _list_equal_or_subset(check, obj) else: return False -def _list_equal(check_list, obj_list): +def _list_equal_or_subset(check_list, obj_list): + """Used by output filter to check if obj_list is in check_list""" + if not isinstance(check_list, list): + return False return set(check_list).issubset(set(obj_list)) -def _dict_equal(check_dict, obj_dict): +def _dict_equal_or_subset(check_dict, obj_dict): + """Used by output filter to check if obj_dict in check_dict.""" + if not isinstance(check_dict, dict): + return False for key, value in check_dict.items(): if ( key not in obj_dict or - not _obj_equal(check_dict[key], obj_dict[key]) + not _obj_equal_or_subset(check_dict[key], obj_dict[key]) ): return False return True def general_filter_callback(general_filter, obj): - if 'resp_eq' in general_filter: - return _obj_equal(general_filter['resp_eq'], obj) - elif 'resp_in' in general_filter: - in_filters = general_filter['resp_in'] - if not in_filters: + """General filter function to filter output. + + Since some fields stored in database is json encoded and + we want to do the deep match for the json encoded field to + do the filtering in some cases, we introduces the output_filters + and general_filter_callback to deal with this kind of cases. + + We do special treatment for key 'resp_eq' to check if + obj is the recursively subset of general_filter['resp_eq'] + + + Example: + obj: 'b' + general_filter: {} + returns: True + + obj: 'b' + general_filter: {'resp_in': ['a', 'b']} + returns: True + + obj: 'b' + general_filter: {'resp_in': ['a']} + returns: False + + obj: 'b' + general_filter: {'resp_eq': 'b'} + returns: True + + obj: 'b' + general_filter: {'resp_eq': 'a'} + returns: False + + obj: 'b' + general_filter: {'resp_range': ('a', 'c')} + returns: True + + obj: 'd' + general_filter: {'resp_range': ('a', 'c')} + returns: False + + If there are multi keys in dict, the output is filtered + by and relationship. + + If the general_filter is a list, the output is filtered + by or relationship. + + Supported general filters: [ + 'resp_eq', 'resp_in', 'resp_lt', + 'resp_le', 'resp_gt', 'resp_ge', + 'resp_match', 'resp_range' + ] + """ + if isinstance(general_filter, list): + if not general_filter: return True - for in_filer in in_filters: - if _obj_equal(in_filer, obj): - return True - return False - elif 'resp_lt' in general_filter: - return obj < general_filter['resp_lt'] - elif 'resp_le' in general_filter: - return obj <= general_filter['resp_le'] - elif 'resp_gt' in general_filter: - return obj > general_filter['resp_gt'] - elif 'resp_ge' in general_filter: - return obj >= general_filter['resp_gt'] - elif 'resp_match' in general_filter: - return bool(re.match(general_filter['resp_match'], obj)) + return any([ + general_filter_callback(item, obj) + for item in general_filter + ]) + elif isinstance(general_filter, dict): + if 'resp_eq' in general_filter: + if not _obj_equal_or_subset( + general_filter['resp_eq'], obj + ): + return False + if 'resp_in' in general_filter: + in_filters = general_filter['resp_in'] + if not any([ + _obj_equal_or_subset(in_filer, obj) + for in_filer in in_filters + ]): + return False + if 'resp_lt' in general_filter: + if obj >= general_filter['resp_lt']: + return False + if 'resp_le' in general_filter: + if obj > general_filter['resp_le']: + return False + if 'resp_gt' in general_filter: + if obj <= general_filter['resp_gt']: + return False + if 'resp_ge' in general_filter: + if obj < general_filter['resp_gt']: + return False + if 'resp_match' in general_filter: + if not re.match(general_filter['resp_match'], obj): + return False + if 'resp_range' in general_filter: + resp_range = general_filter['resp_range'] + if not isinstance(resp_range, list): + resp_range = [resp_range] + in_range = False + for range_start, range_end in resp_range: + if range_start <= obj <= range_end: + in_range = True + if not in_range: + return False + return True else: return True -def filter_output(filter_callbacks, filters, obj, missing_ok=False): +def filter_output(filter_callbacks, kwargs, obj, missing_ok=False): + """Filter ouput. + + For each key in filter_callbacks, if it exists in kwargs, + kwargs[key] tells what we need to filter. If the call of + filter_callbacks[key] returns False, it tells the obj should be + filtered out of output. + """ for callback_key, callback_value in filter_callbacks.items(): - if callback_key not in filters: + if callback_key not in kwargs: continue if callback_key not in obj: if missing_ok: @@ -449,21 +790,26 @@ def filter_output(filter_callbacks, filters, obj, missing_ok=False): '%s is not in %s' % (callback_key, obj) ) if not callback_value( - filters[callback_key], obj[callback_key] + kwargs[callback_key], obj[callback_key] ): return False return True def output_filters(missing_ok=False, **filter_callbacks): + """Decorator to filter output list. + + Each filter_callback should have the definition like: + func({'resp_eq': 'a'}, 'a') + """ def decorator(func): @functools.wraps(func) - def wrapper(*args, **filters): + def wrapper(*args, **kwargs): filtered_obj_list = [] - obj_list = func(*args, **filters) + obj_list = func(*args, **kwargs) for obj in obj_list: if filter_output( - filter_callbacks, filters, obj, missing_ok + filter_callbacks, kwargs, obj, missing_ok ): filtered_obj_list.append(obj) return filtered_obj_list @@ -472,6 +818,7 @@ def output_filters(missing_ok=False, **filter_callbacks): def _input_validates(args_validators, kwargs_validators, *args, **kwargs): + """Used by input_validators to validate inputs.""" for i, value in enumerate(args): if i < len(args_validators) and args_validators[i]: args_validators[i](value) @@ -481,6 +828,11 @@ def _input_validates(args_validators, kwargs_validators, *args, **kwargs): def input_validates(*args_validators, **kwargs_validators): + """Decorator to validate input. + + Each validator should have definition like: + func('00:01:02:03:04:05') + """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): @@ -493,7 +845,103 @@ def input_validates(*args_validators, **kwargs_validators): return decorator +def _input_validates_with_args( + args_validators, kwargs_validators, *args, **kwargs +): + """Validate input with validators. + + Each validator takes the arguments of the decorated function + as its arguments. The function definition is like: + func(value, *args, **kwargs) compared with the decorated + function func(*args, **kwargs). + """ + for i, value in enumerate(args): + if i < len(args_validators) and args_validators[i]: + args_validators[i](value, *args, **kwargs) + for key, value in kwargs.items(): + if kwargs_validators.get(key): + kwargs_validators[key](value, *args, **kwargs) + + +def input_validates_with_args( + *args_validators, **kwargs_validators +): + """Decorator to validate input.""" + def decorator(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + _input_validates_with_args( + args_validators, kwargs_validators, + *args, **kwargs + ) + return func(*args, **kwargs) + return wrapper + return decorator + + +def _output_validates_with_args( + kwargs_validators, obj, *args, **kwargs +): + """Validate output with validators. + + Each validator takes the arguments of the decorated function + as its arguments. The function definition is like: + func(value, *args, **kwargs) compared with the decorated + function func(*args, **kwargs). + """ + if isinstance(obj, list): + for item in obj: + _output_validates_with_args( + kwargs_validators, item, *args, **kwargs + ) + return + if isinstance(obj, models.HelperMixin): + obj = obj.to_dict() + if not isinstance(obj, dict): + raise exception.InvalidResponse( + 'response %s type is not dict' % str(obj) + ) + try: + for key, value in obj.items(): + if key in kwargs_validators: + kwargs_validators[key](value, *args, **kwargs) + except Exception as error: + logging.exception(error) + raise error + + +def output_validates_with_args(**kwargs_validators): + """Decorator to validate output. + + The validator can take the arguments of the decorated + function as its arguments. + """ + def decorator(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + obj = func(*args, **kwargs) + if isinstance(obj, list): + for obj_item in obj: + _output_validates_with_args( + kwargs_validators, obj_item, + *args, **kwargs + ) + else: + _output_validates_with_args( + kwargs_validators, obj, + *args, **kwargs + ) + return obj + return wrapper + return decorator + + def _output_validates(kwargs_validators, obj): + """Validate output. + + Each validator has following signature: + func(value) + """ if isinstance(obj, list): for item in obj: _output_validates(kwargs_validators, item) @@ -504,12 +952,17 @@ def _output_validates(kwargs_validators, obj): raise exception.InvalidResponse( 'response %s type is not dict' % str(obj) ) - for key, value in obj.items(): - if key in kwargs_validators: - kwargs_validators[key](value) + try: + for key, value in obj.items(): + if key in kwargs_validators: + kwargs_validators[key](value) + except Exception as error: + logging.exception(error) + raise error def output_validates(**kwargs_validators): + """Decorator to validate output.""" def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): @@ -525,7 +978,13 @@ def output_validates(**kwargs_validators): def get_db_object(session, table, exception_when_missing=True, **kwargs): - """Get db object.""" + """Get db object. + + If not exception_when_missing and the db object can not be found, + return None instead of raising exception. + """ + if not session: + raise exception.DatabaseException('session param is None') with session.begin(subtransactions=True): logging.debug( 'session %s get db object %s from table %s', @@ -551,7 +1010,13 @@ def get_db_object(session, table, exception_when_missing=True, **kwargs): def add_db_object(session, table, exception_when_existing=True, *args, **kwargs): - """Create db object.""" + """Create db object. + + If not exception_when_existing and the db object exists, + Instead of raising exception, updating the existing db object. + """ + if not session: + raise exception.DatabaseException('session param is None') with session.begin(subtransactions=True): logging.debug( 'session %s add object %s atributes %s to table %s', @@ -602,7 +1067,12 @@ def add_db_object(session, table, exception_when_existing=True, def list_db_objects(session, table, order_by=[], **filters): - """List db objects.""" + """List db objects. + + If order by given, the db objects should be sorted by the ordered keys. + """ + if not session: + raise exception.DatabaseException('session param is None') with session.begin(subtransactions=True): logging.debug( 'session %s list db objects by filters %s in table %s', @@ -626,6 +1096,8 @@ def list_db_objects(session, table, order_by=[], **filters): def del_db_objects(session, table, **filters): """delete db objects.""" + if not session: + raise exception.DatabaseException('session param is None') with session.begin(subtransactions=True): logging.debug( 'session %s delete db objects by filters %s in table %s', @@ -642,8 +1114,10 @@ def del_db_objects(session, table, **filters): return db_objects -def update_db_objects(session, table, **filters): +def update_db_objects(session, table, updates={}, **filters): """Update db objects.""" + if not session: + raise exception.DatabaseException('session param is None') with session.begin(subtransactions=True): logging.debug( 'session %s update db objects by filters %s in table %s', @@ -652,10 +1126,8 @@ def update_db_objects(session, table, **filters): model_query(session, table), table, **filters ).all() for db_object in db_objects: - logging.debug('update db object %s', db_object) - session.flush() - db_object.update() - db_object.validate() + logging.debug('update db object %s: %s', db_object, updates) + update_db_object(session, db_object, **updates) logging.debug( 'session %s db objects %s updated', id(session), db_objects @@ -665,6 +1137,8 @@ def update_db_objects(session, table, **filters): def update_db_object(session, db_object, **kwargs): """Update db object.""" + if not session: + raise exception.DatabaseException('session param is None') with session.begin(subtransactions=True): logging.debug( 'session %s update db object %s by value %s', @@ -684,6 +1158,8 @@ def update_db_object(session, db_object, **kwargs): def del_db_object(session, db_object): """Delete db object.""" + if not session: + raise exception.DatabaseException('session param is None') with session.begin(subtransactions=True): logging.debug( 'session %s delete db object %s', @@ -698,6 +1174,7 @@ def del_db_object(session, db_object): def check_ip(ip): + """Check ip is ip address formatted.""" try: netaddr.IPAddress(ip) except Exception as error: @@ -708,6 +1185,7 @@ def check_ip(ip): def check_mac(mac): + """Check mac is mac address formatted.""" try: netaddr.EUI(mac) except Exception as error: @@ -721,6 +1199,7 @@ NAME_PATTERN = re.compile(r'[a-zA-Z0-9][a-zA-Z0-9_-]*') def check_name(name): + """Check name meeting name format requirement.""" if not NAME_PATTERN.match(name): raise exception.InvalidParameter( 'name %s does not match the pattern %s' % ( @@ -734,6 +1213,7 @@ def _check_ipmi_credentials_ip(ip): def check_ipmi_credentials(ipmi_credentials): + """Check ipmi credentials format is correct.""" if not ipmi_credentials: return if not isinstance(ipmi_credentials, dict): @@ -775,6 +1255,7 @@ def _check_switch_credentials_version(version): def check_switch_credentials(credentials): + """Check switch credentials format is correct.""" if not credentials: return if not isinstance(credentials, dict): diff --git a/compass/db/exception.py b/compass/db/exception.py index 5823d9e4..44556c9b 100644 --- a/compass/db/exception.py +++ b/compass/db/exception.py @@ -17,6 +17,7 @@ import traceback class DatabaseException(Exception): + """Base class for all database exceptions.""" def __init__(self, message): super(DatabaseException, self).__init__(message) self.traceback = traceback.format_exc() diff --git a/compass/db/models.py b/compass/db/models.py index 332d5a02..3a7dcf49 100644 --- a/compass/db/models.py +++ b/compass/db/models.py @@ -38,9 +38,7 @@ from sqlalchemy import Text from sqlalchemy.types import TypeDecorator from sqlalchemy import UniqueConstraint -from compass.db import callback as metadata_callback from compass.db import exception -from compass.db import validator as metadata_validator from compass.utils import util @@ -64,12 +62,15 @@ class JSONEncoded(TypeDecorator): class TimestampMixin(object): + """Provides table fields for each row created/updated timestamp.""" created_at = Column(DateTime, default=lambda: datetime.datetime.now()) updated_at = Column(DateTime, default=lambda: datetime.datetime.now(), onupdate=lambda: datetime.datetime.now()) class HelperMixin(object): + """Provides general fuctions for all compass table models.""" + def initialize(self): self.update() @@ -78,6 +79,7 @@ class HelperMixin(object): @staticmethod def type_compatible(value, column_type): + """Check if value type is compatible with the column type.""" if value is None: return True if not hasattr(column_type, 'python_type'): @@ -96,6 +98,7 @@ class HelperMixin(object): return False def validate(self): + """Generate validate function to make sure the record is legal.""" columns = self.__mapper__.columns for key, column in columns.items(): value = getattr(self, key) @@ -107,6 +110,11 @@ class HelperMixin(object): ) def to_dict(self): + """General function to convert record to dict. + + Convert all columns not starting with '_' to + {: } + """ keys = self.__mapper__.columns.keys() dict_info = {} for key in keys: @@ -120,297 +128,12 @@ class HelperMixin(object): return dict_info -class MetadataMixin(HelperMixin): - name = Column(String(80), nullable=False) - display_name = Column(String(80)) - path = Column(String(256)) - description = Column(Text) - is_required = Column(Boolean, default=False) - required_in_whole_config = Column(Boolean, default=False) - mapping_to = Column(String(80), default='') - _validator = Column('validator', Text) - js_validator = Column(Text) - default_value = Column(JSONEncoded) - _default_callback = Column('default_callback', Text) - default_callback_params = Column( - 'default_callback_params', JSONEncoded, default={} - ) - options = Column(JSONEncoded) - _options_callback = Column('options_callback', Text) - options_callback_params = Column( - 'options_callback_params', JSONEncoded, default={} - ) - _autofill_callback = Column('autofill_callback', Text) - autofill_callback_params = Column( - 'autofill_callback_params', JSONEncoded, default={} - ) - required_in_options = Column(Boolean, default=False) - - def initialize(self): - if not self.display_name: - if self.name: - self.display_name = self.name - super(MetadataMixin, self).initialize() - - def validate(self): - super(MetadataMixin, self).validate() - if not self.name: - raise exception.InvalidParamter( - 'name is not set in os metadata %s' % self.id - ) - - @property - def validator(self): - if not self._validator: - return None - func = eval( - self._validator, - metadata_validator.VALIDATOR_GLOBALS, - metadata_validator.VALIDATOR_LOCALS - ) - if not callable(func): - raise Exception( - 'validator %s is not callable' % self._validator - ) - return func - - @validator.setter - def validator(self, value): - if not value: - self._validator = None - elif isinstance(value, basestring): - self._validator = value - elif callable(value): - self._validator = value.func_name - else: - raise Exception( - 'validator %s is not callable' % value - ) - - @property - def default_callback(self): - if not self._default_callback: - return None - func = eval( - self._default_callback, - metadata_callback.CALLBACK_GLOBALS, - metadata_callback.CALLBACK_LOCALS - ) - if not callable(func): - raise Exception( - 'default callback %s is not callable' % self._default_callback - ) - return func - - @default_callback.setter - def default_callback(self, value): - if not value: - self._default_callback = None - elif isinstance(value, basestring): - self._default_callback = value - elif callable(value): - self._default_callback = value.func_name - else: - raise Exception( - 'default callback %s is not callable' % value - ) - - @property - def options_callback(self): - if not self._options_callback: - return None - func = eval( - self._options_callback, - metadata_callback.CALLBACK_GLOBALS, - metadata_callback.CALLBACK_LOCALS - ) - if not callable(func): - raise Exception( - 'options callback %s is not callable' % self._options_callback - ) - return func - - @options_callback.setter - def options_callback(self, value): - if not value: - self._options_callback = None - elif isinstance(value, basestring): - self._options_callback = value - elif callable(value): - self._options_callback = value.func_name - else: - raise Exception( - 'options callback %s is not callable' % value - ) - - @property - def autofill_callback(self): - if not self._autofill_callback: - return None - func = eval( - self._autofill_callback, - metadata_callback.CALLBACK_GLOBALS, - metadata_callback.CALLBACK_LOCALS - ) - if not callable(func): - raise Exception( - 'autofill callback %s is not callable' % ( - self._autofill_callback - ) - ) - return func - - @autofill_callback.setter - def autofill_callback(self, value): - if not value: - self._autofill_callback = None - elif isinstance(value, basestring): - self._autofill_callback = value - elif callable(value): - self._autofill_callback = value.func_name - else: - raise Exception( - 'autofill callback %s is not callable' % value - ) - - def to_dict(self): - self_dict_info = {} - if self.field: - self_dict_info.update(self.field.to_dict()) - else: - self_dict_info['field_type_data'] = 'dict' - self_dict_info['field_type'] = dict - self_dict_info.update(super(MetadataMixin, self).to_dict()) - validator = self.validator - if validator: - self_dict_info['validator'] = validator - default_callback = self.default_callback - if default_callback: - self_dict_info['default_callback'] = default_callback - options_callback = self.options_callback - if options_callback: - self_dict_info['options_callback'] = options_callback - autofill_callback = self.autofill_callback - if autofill_callback: - self_dict_info['autofill_callback'] = autofill_callback - js_validator = self.js_validator - if js_validator: - self_dict_info['js_validator'] = js_validator - dict_info = { - '_self': self_dict_info - } - for child in self.children: - dict_info.update(child.to_dict()) - return { - self.name: dict_info - } - return dict_info - - -class FieldMixin(HelperMixin): - id = Column(Integer, primary_key=True) - field = Column(String(80), unique=True, nullable=False) - field_type_data = Column( - 'field_type', - Enum( - 'basestring', 'int', 'float', 'list', 'bool', - 'dict', 'object' - ), - ColumnDefault('basestring') - ) - display_type = Column( - Enum( - 'checkbox', 'radio', 'select', - 'multiselect', 'combobox', 'text', - 'multitext', 'password', 'dropdown' - ), - ColumnDefault('text') - ) - _validator = Column('validator', Text) - js_validator = Column(Text) - description = Column(Text) - - @property - def field_type(self): - if not self.field_type_data: - return None - field_type = eval(self.field_type_data) - if not type(field_type) == type: - raise Exception( - '%s is not type' % self.field_type_data - ) - return field_type - - @field_type.setter - def field_type(self, value): - if not value: - self.field_type_data = None - elif isinstance(value, basestring): - self.field_type_data = value - elif type(value) == type: - self.field_type_data = value.__name__ - else: - raise Exception( - '%s is not type' % value - ) - - @property - def validator(self): - if not self._validator: - return None - func = eval( - self._validator, - metadata_validator.VALIDATOR_GLOBALS, - metadata_validator.VALIDATOR_LOCALS - ) - if not callable(func): - raise Exception( - '%s is not callable' % self._validator - ) - return func - - @validator.setter - def validator(self, value): - if not value: - self._validator = None - elif isinstance(value, basestring): - self._validator = value - elif callable(value): - self._validator = value.func_name - else: - raise Exception( - '%s is not callable' % value - ) - - def to_dict(self): - dict_info = super(FieldMixin, self).to_dict() - dict_info['field_type'] = self.field_type - validator = self.validator - if validator: - dict_info['validator'] = self.validator - js_validator = self.js_validator - if js_validator: - dict_info['js_validator'] = self.js_validator - return dict_info - - -class InstallerMixin(HelperMixin): - name = Column(String(80), nullable=False) - alias = Column(String(80), unique=True, nullable=False) - settings = Column(JSONEncoded, default={}) - - def validate(self): - super(InstallerMixin, self).validate() - if not self.name: - raise exception.InvalidParameter( - 'name is not set in installer %s' % self.name - ) - - class StateMixin(TimestampMixin, HelperMixin): + """Provides general fields and functions for state related table.""" + state = Column( Enum( - 'UNINITIALIZED', 'INITIALIZED', + 'UNINITIALIZED', 'INITIALIZED', 'UPDATE_PREPARING', 'INSTALLING', 'SUCCESSFUL', 'ERROR' ), ColumnDefault('UNINITIALIZED') @@ -424,6 +147,9 @@ class StateMixin(TimestampMixin, HelperMixin): ready = Column(Boolean, default=False) def update(self): + # In state table, some field information is redundant. + # The update function to make sure all related fields + # are set to correct state. if self.ready: self.state = 'SUCCESSFUL' if self.state in ['UNINITIALIZED', 'INITIALIZED']: @@ -442,6 +168,7 @@ class StateMixin(TimestampMixin, HelperMixin): class LogHistoryMixin(TimestampMixin, HelperMixin): + """Provides general fields and functions for LogHistory related tables.""" position = Column(Integer, default=0) partial_line = Column(Text, default='') percentage = Column(Float, default=0.0) @@ -455,6 +182,7 @@ class LogHistoryMixin(TimestampMixin, HelperMixin): ) def validate(self): + # TODO(xicheng): some validation can be moved to column. if not self.filename: raise exception.InvalidParameter( 'filename is not set in %s' % self.id @@ -508,6 +236,7 @@ class HostNetwork(BASE, TimestampMixin, HelperMixin): self.host.config_validated = False def validate(self): + # TODO(xicheng): some validation can be moved to column. super(HostNetwork, self).validate() if not self.subnet: raise exception.InvalidParameter( @@ -616,14 +345,21 @@ class ClusterHostState(BASE, StateMixin): ) def update(self): + """Update clusterhost state. + + When clusterhost state is updated, the underlying host state + may be updated accordingly. + """ super(ClusterHostState, self).update() host_state = self.clusterhost.host.state if self.state == 'INITIALIZED': - if host_state.state in ['UNINITIALIZED']: + if host_state.state in ['UNINITIALIZED', 'UPDATE_PREPARING']: host_state.state = 'INITIALIZED' host_state.update() elif self.state == 'INSTALLING': - if host_state.state in ['UNINITIALIZED', 'INITIALIZED']: + if host_state.state in [ + 'UNINITIALIZED', 'UPDATE_PREPARING', 'INITIALIZED' + ]: host_state.state = 'INSTALLING' host_state.update() elif self.state == 'SUCCESSFUL': @@ -645,13 +381,14 @@ class ClusterHost(BASE, TimestampMixin, HelperMixin): Integer, ForeignKey('host.id', onupdate='CASCADE', ondelete='CASCADE') ) + # the list of role names. _roles = Column('roles', JSONEncoded, default=[]) config_step = Column(String(80), default='') package_config = Column(JSONEncoded, default={}) config_validated = Column(Boolean, default=False) deployed_package_config = Column(JSONEncoded, default={}) - log_history = relationship( + log_histories = relationship( ClusterHostLogHistory, passive_deletes=True, passive_updates=True, cascade='all, delete-orphan', @@ -751,14 +488,6 @@ class ClusterHost(BASE, TimestampMixin, HelperMixin): host = self.host host.deployed_os_config = value - @hybrid_property - def distributed_system_name(self): - return self.cluster.distributed_system_name - - @distributed_system_name.expression - def distributed_system_name(cls): - return cls.cluster.distributed_system_name - @hybrid_property def os_name(self): return self.host.os_name @@ -797,26 +526,31 @@ class ClusterHost(BASE, TimestampMixin, HelperMixin): @property def os_installed(self): - logging.debug('os installed: %s' % self.host.os_installed) return self.host.os_installed @property def roles(self): + # only the role exists in flavor roles will be returned. + # the role will be sorted as the order defined in flavor + # roles. + # duplicate role names will be removed. + # The returned value is a list of dict like + # [{'name': 'allinone', 'optional': False}] role_names = list(self._roles) if not role_names: return [] - flavor = self.cluster.flavor - if not flavor: + cluster_roles = self.cluster.flavor['roles'] + if not cluster_roles: return [] roles = [] - for flavor_role in flavor.ordered_flavor_roles: - role = flavor_role.role - if role.name in role_names: - roles.append(role) + for cluster_role in cluster_roles: + if cluster_role['name'] in role_names: + roles.append(cluster_role) return roles @roles.setter def roles(self, value): + """value should be a list of role name.""" self._roles = list(value) self.config_validated = False @@ -826,6 +560,7 @@ class ClusterHost(BASE, TimestampMixin, HelperMixin): @patched_roles.setter def patched_roles(self, value): + """value should be a list of role name.""" roles = list(self._roles) roles.extend(value) self._roles = roles @@ -840,10 +575,19 @@ class ClusterHost(BASE, TimestampMixin, HelperMixin): return cls.cluster.owner def state_dict(self): + """Get clusterhost state dict. + + The clusterhost state_dict is different from + clusterhost.state.to_dict. The main difference is state_dict + show the progress of both installing os on host and installing + distributed system on clusterhost. While clusterhost.state.to_dict + only shows the progress of installing distributed system on + clusterhost. + """ cluster = self.cluster host = self.host host_state = host.state_dict() - if not cluster.distributed_system: + if not cluster.flavor_name: return host_state clusterhost_state = self.state.to_dict() if clusterhost_state['state'] in ['ERROR', 'SUCCESSFUL']: @@ -869,7 +613,6 @@ class ClusterHost(BASE, TimestampMixin, HelperMixin): dict_info.update(super(ClusterHost, self).to_dict()) state_dict = self.state_dict() dict_info.update({ - 'distributed_system_name': self.distributed_system_name, 'distributed_system_installed': self.distributed_system_installed, 'reinstall_distributed_system': self.reinstall_distributed_system, 'owner': self.owner, @@ -877,10 +620,7 @@ class ClusterHost(BASE, TimestampMixin, HelperMixin): 'name': self.name, 'state': state_dict['state'] }) - roles = self.roles - dict_info['roles'] = [ - role.to_dict() for role in roles - ] + dict_info['roles'] = self.roles return dict_info @@ -900,6 +640,11 @@ class HostState(BASE, StateMixin): ) def update(self): + """Update host state. + + When host state is updated, all clusterhosts on the + host will update their state if necessary. + """ super(HostState, self).update() host = self.host if self.state == 'INSTALLING': @@ -917,6 +662,13 @@ class HostState(BASE, StateMixin): ]: clusterhost.state = 'UNINITIALIZED' clusterhost.state.update() + elif self.state == 'UPDATE_PREPARING': + for clusterhost in self.host.clusterhosts: + if clusterhost.state in [ + 'INITIALIZED', 'INSTALLING', 'SUCCESSFUL', 'ERROR' + ]: + clusterhost.state = 'UPDATE_PREPARING' + clusterhost.state.update() elif self.state == 'INITIALIZED': for clusterhost in self.host.clusterhosts: if clusterhost.state in [ @@ -931,7 +683,6 @@ class Host(BASE, TimestampMixin, HelperMixin): __tablename__ = 'host' name = Column(String(80), unique=True, nullable=True) - os_id = Column(Integer, ForeignKey('os.id')) config_step = Column(String(80), default='') os_config = Column(JSONEncoded, default={}) config_validated = Column(Boolean, default=False) @@ -939,10 +690,7 @@ class Host(BASE, TimestampMixin, HelperMixin): os_name = Column(String(80)) creator_id = Column(Integer, ForeignKey('user.id')) owner = Column(String(80)) - os_installer_id = Column( - Integer, - ForeignKey('os_installer.id') - ) + os_installer = Column(JSONEncoded, default={}) id = Column( Integer, @@ -970,7 +718,7 @@ class Host(BASE, TimestampMixin, HelperMixin): cascade='all, delete-orphan', backref=backref('host') ) - log_history = relationship( + log_histories = relationship( HostLogHistory, passive_deletes=True, passive_updates=True, cascade='all, delete-orphan', @@ -988,6 +736,14 @@ class Host(BASE, TimestampMixin, HelperMixin): else: return None + @property + def os_id(self): + return self.os_name + + @os_id.setter + def os_id(self, value): + self.os_name = value + @hybrid_property def hostname(self): return self.name @@ -1035,23 +791,19 @@ class Host(BASE, TimestampMixin, HelperMixin): else: self.state.state = 'UNINITIALIZED' self.state.update() - os = self.os - if os: - self.os_name = os.name - else: - self.os_name = None self.state.update() super(Host, self).update() def validate(self): + # TODO(xicheng): some validation can be moved to the column in future. super(Host, self).validate() creator = self.creator if not creator: raise exception.InvalidParameter( 'creator is not set in host %s' % self.id ) - os = self.os - if not os: + os_name = self.os_name + if not os_name: raise exception.InvalidParameter( 'os is not set in host %s' % self.id ) @@ -1060,10 +812,6 @@ class Host(BASE, TimestampMixin, HelperMixin): raise exception.Invalidparameter( 'os_installer is not set in host %s' % self.id ) - if not os.deployable: - raise exception.InvalidParameter( - 'os %s is not deployable in host %s' % (os.name, self.id) - ) @property def os_installed(self): @@ -1077,6 +825,7 @@ class Host(BASE, TimestampMixin, HelperMixin): return self.state.to_dict() def to_dict(self): + """Host dict contains its underlying machine dict.""" dict_info = self.machine.to_dict() dict_info.update(super(Host, self).to_dict()) state_dict = self.state_dict() @@ -1093,7 +842,7 @@ class Host(BASE, TimestampMixin, HelperMixin): host_network.to_dict() for host_network in self.host_networks ], - 'os_installer': self.os_installer.to_dict(), + 'os_id': self.os_id, 'clusters': [cluster.to_dict() for cluster in self.clusters], 'state': state_dict['state'] }) @@ -1145,17 +894,15 @@ class ClusterState(BASE, StateMixin): return dict_info def update(self): + # all fields of cluster state should be calculated by + # its each underlying clusterhost state. cluster = self.cluster clusterhosts = cluster.clusterhosts self.total_hosts = len(clusterhosts) - if self.state in ['UNINITIALIZED', 'INITIALIZED', 'INSTALLING']: - self.installing_hosts = 0 - self.failed_hosts = 0 - self.completed_hosts = 0 - if self.state == 'INSTALLING': - cluster.reinstall_distributed_system = False - - if not cluster.distributed_system: + self.installing_hosts = 0 + self.failed_hosts = 0 + self.completed_hosts = 0 + if not cluster.flavor_name: for clusterhost in clusterhosts: host = clusterhost.host host_state = host.state.state @@ -1183,6 +930,9 @@ class ClusterState(BASE, StateMixin): / float(self.total_hosts) ) + if self.state == 'SUCCESSFUL': + self.state = 'INSTALLING' + self.ready = False self.message = ( 'total %s, installing %s, completed: %s, error %s' ) % ( @@ -1193,14 +943,8 @@ class ClusterState(BASE, StateMixin): self.severity = 'ERROR' super(ClusterState, self).update() - - if self.state == 'SUCCESSFUL': - self.completed_hosts = self.total_hosts - for clusterhost in clusterhosts: - clusterhost_state = clusterhost.state - if clusterhost_state.state != 'SUCCESSFUL': - clusterhost_state.state = 'SUCCESSFUL' - clusterhost.state.update() + if self.state == 'INSTALLING': + cluster.reinstall_distributed_system = False class Cluster(BASE, TimestampMixin, HelperMixin): @@ -1211,27 +955,15 @@ class Cluster(BASE, TimestampMixin, HelperMixin): name = Column(String(80), unique=True, nullable=False) reinstall_distributed_system = Column(Boolean, default=True) config_step = Column(String(80), default='') - os_id = Column(Integer, ForeignKey('os.id')) os_name = Column(String(80)) - flavor_id = Column( - Integer, - ForeignKey('adapter_flavor.id'), - nullable=True - ) flavor_name = Column(String(80), nullable=True) - distributed_system_id = Column( - Integer, ForeignKey('distributed_system.id'), - nullable=True - ) - distributed_system_name = Column( - String(80), nullable=True - ) + # flavor dict got from flavor id. + flavor = Column(JSONEncoded, default={}) os_config = Column(JSONEncoded, default={}) package_config = Column(JSONEncoded, default={}) deployed_os_config = Column(JSONEncoded, default={}) deployed_package_config = Column(JSONEncoded, default={}) config_validated = Column(Boolean, default=False) - adapter_id = Column(Integer, ForeignKey('adapter.id')) adapter_name = Column(String(80)) creator_id = Column(Integer, ForeignKey('user.id')) owner = Column(String(80)) @@ -1268,85 +1000,79 @@ class Cluster(BASE, TimestampMixin, HelperMixin): else: self.state.state = 'UNINITIALIZED' self.state.update() - os = self.os - if os: - self.os_name = os.name - else: - self.os_name = None - self.os_config = {} - adapter = self.adapter - if adapter: - self.adapter_name = adapter.name - distributed_system = adapter.adapter_distributed_system - self.distributed_system = distributed_system - if distributed_system: - self.distributed_system_name = distributed_system.name - else: - self.distributed_system_name = None - flavor = self.flavor - if flavor: - self.flavor_name = flavor.name - else: - self.flavor_name = None self.state.update() super(Cluster, self).update() def validate(self): + # TODO(xicheng): some validation can be moved to column. super(Cluster, self).validate() creator = self.creator if not creator: raise exception.InvalidParameter( 'creator is not set in cluster %s' % self.id ) - os = self.os - if not os: + os_name = self.os_name + if not os_name: raise exception.InvalidParameter( 'os is not set in cluster %s' % self.id ) - if not os.deployable: - raise exception.InvalidParameter( - 'os %s is not deployable' % os.name - ) - adapter = self.adapter - if not adapter: + adapter_name = self.adapter_name + if not adapter_name: raise exception.InvalidParameter( 'adapter is not set in cluster %s' % self.id ) - if not adapter.deployable: - raise exception.InvalidParameter( - 'adapter %s is not deployable' % adapter.name - ) - supported_os_ids = [ - adapter_os.os.id for adapter_os in adapter.supported_oses - ] - if os.id not in supported_os_ids: - raise exception.InvalidParameter( - 'os %s is not supported' % os.name - ) - distributed_system = self.distributed_system - if distributed_system: - if not distributed_system.deployable: - raise exception.InvalidParamerter( - 'distributed system %s is not deployable' % ( - distributed_system.name + flavor_name = self.flavor_name + if flavor_name: + if 'name' not in self.flavor: + raise exception.InvalidParameter( + 'key name does not exist in flavor %s' % ( + self.flavor ) ) - flavor = self.flavor - if not flavor: - if distributed_system: + if flavor_name != self.flavor['name']: raise exception.InvalidParameter( - 'flavor is not set in cluster %s' % self.id + 'flavor name %s is not match ' + 'the name key in flavor %s' % ( + flavor_name, self.flavor + ) ) else: - flavor_adapter_id = flavor.adapter_id - adapter_id = self.adapter_id - if flavor_adapter_id != adapter_id: + if self.flavor: raise exception.InvalidParameter( - 'flavor adapter id %s does not match adapter id %s' % ( - flavor_adapter_id, adapter_id - ) + 'flavor %s is not empty' % self.flavor ) + @property + def os_id(self): + return self.os_name + + @os_id.setter + def os_id(self, value): + self.os_name = value + + @property + def adapter_id(self): + return self.adapter_name + + @adapter_id.setter + def adapter_id(self, value): + self.adapter_name = value + + @property + def flavor_id(self): + if self.flavor_name: + return '%s:%s' % (self.adapter_name, self.flavor_name) + else: + return None + + @flavor_id.setter + def flavor_id(self, value): + if value: + _, flavor_name = value.split(':', 1) + self.flavor_name = flavor_name + else: + self.flavor_name = value + @property def patched_os_config(self): return self.os_config @@ -1405,8 +1131,9 @@ class Cluster(BASE, TimestampMixin, HelperMixin): dict_info['distributed_system_installed'] = ( self.distributed_system_installed ) - if self.flavor: - dict_info['flavor'] = self.flavor.to_dict() + dict_info['os_id'] = self.os_id + dict_info['adapter_id'] = self.adapter_id + dict_info['flavor_id'] = self.flavor_id return dict_info @@ -1484,6 +1211,7 @@ class UserToken(BASE, HelperMixin): super(UserToken, self).__init__(**kwargs) def validate(self): + # TODO(xicheng): some validation can be moved to column. super(UserToken, self).validate() if not self.user: raise exception.InvalidParameter( @@ -1508,6 +1236,7 @@ class UserLog(BASE, HelperMixin): return self.user.email def validate(self): + # TODO(xicheng): some validation can be moved to column. super(UserLog, self).validate() if not self.user: raise exception.InvalidParameter( @@ -1561,6 +1290,7 @@ class User(BASE, HelperMixin, TimestampMixin): return 'User[%s]' % self.email def validate(self): + # TODO(xicheng): some validation can be moved to column. super(User, self).validate() if not self.crypted_password: raise exception.InvalidParameter( @@ -1573,6 +1303,7 @@ class User(BASE, HelperMixin, TimestampMixin): @password.setter def password(self, password): + # password stored in database is crypted. self.crypted_password = util.encrypt(password) @hybrid_property @@ -1623,6 +1354,7 @@ class SwitchMachine(BASE, HelperMixin, TimestampMixin): ) def validate(self): + # TODO(xicheng): some validation can be moved to column. super(SwitchMachine, self).validate() if not self.switch: raise exception.InvalidParameter( @@ -1681,7 +1413,26 @@ class SwitchMachine(BASE, HelperMixin, TimestampMixin): @property def filtered(self): - filters = self.switch.filters + """Check if switch machine should be filtered. + + port should be composed with + For each filter in switch machine filters, + if filter_type is allow and port match the pattern, the switch + machine is allowed to be got by api. If filter_type is deny and + port match the pattern, the switch machine is not allowed to be got + by api. + If not filter is matched, if the last filter is allow, deny all + unmatched switch machines, if the last filter is deny, allow all + unmatched switch machines. + If no filter defined, allow all switch machines. + if ports defined in filter and 'all' in ports, the switch machine is + matched. if ports defined in filter and 'all' not in ports, + the switch machine with the port name in ports will be matched. + If the port pattern matches + < and port number is in the + range of [port_start, port_end], the switch machine is matched. + """ + filters = self.switch.machine_filters port = self.port unmatched_allowed = True ports_pattern = re.compile(r'(\D*)(\d+)-(\d+)(\D*)') @@ -1775,6 +1526,7 @@ class Machine(BASE, HelperMixin, TimestampMixin): return 'Machine[%s:%s]' % (self.id, self.mac) def validate(self): + # TODO(xicheng): some validation can be moved to column. super(Machine, self).validate() try: netaddr.EUI(self.mac) @@ -1819,6 +1571,8 @@ class Machine(BASE, HelperMixin, TimestampMixin): self.location = location def to_dict(self): + # TODO(xicheng): move the filling of switches + # to db/api. dict_info = {} dict_info['switches'] = [ { @@ -1846,6 +1600,16 @@ class Switch(BASE, HelperMixin, TimestampMixin): 'repolling', 'error', 'under_monitoring', name='switch_state'), ColumnDefault('initialized')) + # filters is json formatted list, each element has following format: + # keys: ['filter_type', 'ports', 'port_prefix', 'port_suffix', + # 'port_start', 'port_end']. + # each port name is divided into + # filter_type is one of ['allow', 'deny'], default is 'allow' + # ports is a list of port name. + # port_prefix is the prefix that filtered port should start with. + # port_suffix is the suffix that filtered posrt should end with. + # port_start is integer that the port number should start with. + # port_end is the integer that the port number should end with. _filters = Column('filters', JSONEncoded, default=[]) switch_machines = relationship( SwitchMachine, @@ -1859,22 +1623,47 @@ class Switch(BASE, HelperMixin, TimestampMixin): @classmethod def parse_filters(cls, filters): + """parse filters set from outside to standard format. + + api can set switch filters with the flexible format, this + function will parse the flexible format filters. + + Supported format: + as string: + allow ports ae10,ae20 + allow port_prefix ae port_start 30 port_end 40 + deny ports all + as python object: + [{ + 'filter_type': 'allow', + 'ports': ['ae10', 'ae20'] + },{ + 'filter_type': 'allow', + 'port_prefix': 'ae', + 'port_suffix': '', + 'port_start': 30, + 'port_end': 40 + },{ + 'filter_type': 'deny', + 'ports': ['all'] + }] + """ if isinstance(filters, basestring): filters = filters.replace('\r\n', '\n').replace('\n', ';') filters = [ - switch_filter for switch_filter in filters.split(';') - if switch_filter + machine_filter for machine_filter in filters.split(';') + if machine_filter ] if not isinstance(filters, list): filters = [filters] - switch_filters = [] - for switch_filter in filters: - if not switch_filter: + machine_filters = [] + for machine_filter in filters: + if not machine_filter: continue - if isinstance(switch_filter, basestring): + if isinstance(machine_filter, basestring): filter_dict = {} filter_items = [ - item for item in switch_filter.split() if item + item for item in machine_filter.split() if item ] if filter_items[0] in ['allow', 'deny']: filter_dict['filter_type'] = filter_items[0] @@ -1893,77 +1682,80 @@ class Switch(BASE, HelperMixin, TimestampMixin): else: filter_dict[filter_items[0]] = '' filter_items = filter_items[1:] - switch_filter = filter_dict - if not isinstance(switch_filter, dict): + machine_filter = filter_dict + if not isinstance(machine_filter, dict): raise exception.InvalidParameter( - 'filter %s is not dict' % switch_filter + 'filter %s is not dict' % machine_filter ) - if 'filter_type' in switch_filter: - if switch_filter['filter_type'] not in ['allow', 'deny']: + if 'filter_type' in machine_filter: + if machine_filter['filter_type'] not in ['allow', 'deny']: raise exception.InvalidParameter( 'filter_type should be `allow` or `deny` in %s' % ( - switch_filter + machine_filter ) ) - if 'ports' in switch_filter: - if isinstance(switch_filter['ports'], basestring): - switch_filter['ports'] = [ + if 'ports' in machine_filter: + if isinstance(machine_filter['ports'], basestring): + machine_filter['ports'] = [ port_or_ports - for port_or_ports in switch_filter['ports'].split(',') + for port_or_ports in machine_filter['ports'].split(',') if port_or_ports ] - if not isinstance(switch_filter['ports'], list): + if not isinstance(machine_filter['ports'], list): raise exception.InvalidParameter( - '`ports` type is not list in filter %s' % switch_filter + '`ports` type is not list in filter %s' % ( + machine_filter + ) ) - for port_or_ports in switch_filter['ports']: + for port_or_ports in machine_filter['ports']: if not isinstance(port_or_ports, basestring): raise exception.InvalidParameter( '%s type is not basestring in `ports` %s' % ( - port_or_ports, switch_filter['ports'] + port_or_ports, machine_filter['ports'] ) ) for key in ['port_start', 'port_end']: - if key in switch_filter: - if isinstance(switch_filter[key], basestring): - if switch_filter[key].isdigit(): - switch_filter[key] = int(switch_filter[key]) - if not isinstance(switch_filter[key], int): + if key in machine_filter: + if isinstance(machine_filter[key], basestring): + if machine_filter[key].isdigit(): + machine_filter[key] = int(machine_filter[key]) + if not isinstance(machine_filter[key], (int, long)): raise exception.InvalidParameter( '`%s` type is not int in filer %s' % ( - key, switch_filter + key, machine_filter ) ) - switch_filters.append(switch_filter) - return switch_filters + machine_filters.append(machine_filter) + return machine_filters @classmethod def format_filters(cls, filters): + """format json formatted filters to string.""" filter_strs = [] - for switch_filter in filters: + for machine_filter in filters: filter_properties = [] filter_properties.append( - switch_filter.get('filter_type', 'allow') + machine_filter.get('filter_type', 'allow') ) - if 'ports' in switch_filter: + if 'ports' in machine_filter: filter_properties.append( - 'ports ' + ','.join(switch_filter['ports']) + 'ports ' + ','.join(machine_filter['ports']) ) - if 'port_prefix' in switch_filter: + if 'port_prefix' in machine_filter: filter_properties.append( - 'port_prefix ' + switch_filter['port_prefix'] + 'port_prefix ' + machine_filter['port_prefix'] ) - if 'port_suffix' in switch_filter: + if 'port_suffix' in machine_filter: filter_properties.append( - 'port_suffix ' + switch_filter['port_suffix'] + 'port_suffix ' + machine_filter['port_suffix'] ) - if 'port_start' in switch_filter: + if 'port_start' in machine_filter: filter_properties.append( - 'port_start ' + str(switch_filter['port_start']) + 'port_start ' + str(machine_filter['port_start']) ) - if 'port_end' in switch_filter: + if 'port_end' in machine_filter: filter_properties.append( - 'port_end ' + str(switch_filter['port_end']) + 'port_end ' + str(machine_filter['port_end']) ) filter_strs.append(' '.join(filter_properties)) return ';'.join(filter_strs) @@ -1992,35 +1784,35 @@ class Switch(BASE, HelperMixin, TimestampMixin): self.credentials = util.merge_dict(credentials, value) @property - def filters(self): + def machine_filters(self): return self._filters - @filters.setter - def filters(self, value): + @machine_filters.setter + def machine_filters(self, value): if not value: return self._filters = self.parse_filters(value) @property - def put_filters(self): + def put_machine_filters(self): return self._filters - @put_filters.setter - def put_filters(self, value): + @put_machine_filters.setter + def put_machine_filters(self, value): if not value: return self._filters = self.parse_filters(value) @property - def patched_filters(self): + def patched_machine_filters(self): return self._filters - @patched_filters.setter - def patched_filters(self, value): + @patched_machine_filters.setter + def patched_machine_filters(self, value): if not value: return - filters = list(self.filters) - self.filters = self.parse_filters(value) + filters + filters = list(self.machine_filters) + self._filters = self.parse_filters(value) + filters def to_dict(self): dict_info = super(Switch, self).to_dict() @@ -2029,789 +1821,6 @@ class Switch(BASE, HelperMixin, TimestampMixin): return dict_info -class OSConfigMetadata(BASE, MetadataMixin): - """OS config metadata.""" - __tablename__ = "os_config_metadata" - - id = Column(Integer, primary_key=True) - os_id = Column( - Integer, - ForeignKey( - 'os.id', onupdate='CASCADE', ondelete='CASCADE' - ) - ) - parent_id = Column( - Integer, - ForeignKey( - 'os_config_metadata.id', onupdate='CASCADE', ondelete='CASCADE' - ) - ) - field_id = Column( - Integer, - ForeignKey( - 'os_config_field.id', onupdate='CASCADE', ondelete='CASCADE' - ) - ) - children = relationship( - 'OSConfigMetadata', - passive_deletes=True, passive_updates=True, - backref=backref('parent', remote_side=id) - ) - __table_args__ = ( - UniqueConstraint('path', 'os_id', name='constraint'), - ) - - def __init__(self, os_id, path, **kwargs): - self.os_id = os_id - self.path = path - super(OSConfigMetadata, self).__init__(**kwargs) - - def validate(self): - super(OSConfigMetadata, self).validate() - if not self.os: - raise exception.InvalidParameter( - 'os is not set in os metadata %s' % self.id - ) - - -class OSConfigField(BASE, FieldMixin): - """OS config fields.""" - __tablename__ = 'os_config_field' - - metadatas = relationship( - OSConfigMetadata, - passive_deletes=True, passive_updates=True, - cascade='all, delete-orphan', - backref=backref('field')) - - def __init__(self, field, **kwargs): - self.field = field - super(OSConfigField, self).__init__(**kwargs) - - -class AdapterOS(BASE, HelperMixin): - """Adapter OS table.""" - __tablename__ = 'adapter_os' - - adapter_os_id = Column('id', Integer, primary_key=True) - os_id = Column( - Integer, - ForeignKey( - 'os.id', - onupdate='CASCADE', ondelete='CASCADE' - ) - ) - adapter_id = Column( - Integer, - ForeignKey( - 'adapter.id', - onupdate='CASCADE', ondelete='CASCADE' - ) - ) - - def __init__(self, os_id, adapter_id, **kwargs): - self.os_id = os_id - self.adapter_id = adapter_id - super(AdapterOS, self).__init__(**kwargs) - - def to_dict(self): - dict_info = self.os.to_dict() - dict_info.update(super(AdapterOS, self).to_dict()) - return dict_info - - -class OperatingSystem(BASE, HelperMixin): - """OS table.""" - __tablename__ = 'os' - - id = Column(Integer, primary_key=True) - parent_id = Column( - Integer, - ForeignKey('os.id', onupdate='CASCADE', ondelete='CASCADE'), - nullable=True - ) - name = Column(String(80), unique=True, nullable=False) - deployable = Column(Boolean, default=False) - - metadatas = relationship( - OSConfigMetadata, - passive_deletes=True, passive_updates=True, - cascade='all, delete-orphan', - backref=backref('os') - ) - clusters = relationship( - Cluster, - backref=backref('os') - ) - hosts = relationship( - Host, - backref=backref('os') - ) - children = relationship( - 'OperatingSystem', - passive_deletes=True, passive_updates=True, - backref=backref('parent', remote_side=id) - ) - supported_adapters = relationship( - AdapterOS, - passive_deletes=True, passive_updates=True, - backref=backref('os') - ) - - def __init__(self, name): - self.name = name - super(OperatingSystem, self).__init__() - - def __str__(self): - return 'OperatingSystem[%s:%s]' % (self.id, self.name) - - @property - def root_metadatas(self): - return [ - metadata for metadata in self.metadatas - if metadata.parent_id is None - ] - - def metadata_dict(self): - dict_info = {} - if self.parent: - dict_info.update(self.parent.metadata_dict()) - for metadata in self.root_metadatas: - util.merge_dict(dict_info, metadata.to_dict()) - return dict_info - - @property - def os_supported_adapters(self): - supported_adapters = self.supported_adapters - if supported_adapters: - return supported_adapters - parent = self.parent - if parent: - return parent.os_supported_adapters - else: - return [] - - -class AdapterFlavorRole(BASE, HelperMixin): - """Adapter flavor roles.""" - - __tablename__ = 'adapter_flavor_role' - - flavor_id = Column( - Integer, - ForeignKey( - 'adapter_flavor.id', - onupdate='CASCADE', ondelete='CASCADE' - ), - primary_key=True - ) - role_id = Column( - Integer, - ForeignKey( - 'adapter_role.id', - onupdate='CASCADE', ondelete='CASCADE' - ), - primary_key=True - ) - - def __init__(self, flavor_id, role_id): - self.flavor_id = flavor_id - self.role_id = role_id - super(AdapterFlavorRole, self).__init__() - - def __str__(self): - return 'AdapterFlavorRole[%s:%s]' % (self.flavor_id, self.role_id) - - def validate(self): - super(AdapterFlavorRole, self).validate() - flavor_adapter_id = self.flavor.adapter_id - role_adapter_id = self.role.adapter_id - if flavor_adapter_id != role_adapter_id: - raise exception.InvalidParameter( - 'flavor adapter %s and role adapter %s does not match' % ( - flavor_adapter_id, role_adapter_id - ) - ) - - def to_dict(self): - dict_info = super(AdapterFlavorRole, self).to_dict() - dict_info.update( - self.role.to_dict() - ) - return dict_info - - -class FlavorConfigMetadata(BASE, MetadataMixin): - """flavor config metadata.""" - - __tablename__ = "flavor_config_metadata" - - id = Column(Integer, primary_key=True) - flavor_id = Column( - Integer, - ForeignKey( - 'adapter_flavor.id', - onupdate='CASCADE', ondelete='CASCADE' - ) - ) - parent_id = Column( - Integer, - ForeignKey( - 'flavor_config_metadata.id', - onupdate='CASCADE', ondelete='CASCADE' - ) - ) - field_id = Column( - Integer, - ForeignKey( - 'flavor_config_field.id', - onupdate='CASCADE', ondelete='CASCADE' - ) - ) - children = relationship( - 'FlavorConfigMetadata', - passive_deletes=True, passive_updates=True, - backref=backref('parent', remote_side=id) - ) - - __table_args__ = ( - UniqueConstraint('path', 'flavor_id', name='constraint'), - ) - - def __init__( - self, flavor_id, path, **kwargs - ): - self.flavor_id = flavor_id - self.path = path - super(FlavorConfigMetadata, self).__init__(**kwargs) - - def validate(self): - super(FlavorConfigMetadata, self).validate() - if not self.flavor: - raise exception.InvalidParameter( - 'flavor is not set in package metadata %s' % self.id - ) - - -class FlavorConfigField(BASE, FieldMixin): - """Flavor config metadata fields.""" - - __tablename__ = "flavor_config_field" - - metadatas = relationship( - FlavorConfigMetadata, - passive_deletes=True, passive_updates=True, - cascade="all, delete-orphan", - backref=backref('field') - ) - - def __init__(self, field, **kwargs): - self.field = field - super(FlavorConfigField, self).__init__(**kwargs) - - -class AdapterFlavor(BASE, HelperMixin): - """Adapter's flavors.""" - - __tablename__ = 'adapter_flavor' - - id = Column(Integer, primary_key=True) - adapter_id = Column( - Integer, - ForeignKey('adapter.id', onupdate='CASCADE', ondelete='CASCADE') - ) - name = Column(String(80), nullable=False) - display_name = Column(String(80)) - template = Column(String(80)) - _ordered_flavor_roles = Column( - 'ordered_flavor_roles', JSONEncoded, default=[] - ) - - flavor_roles = relationship( - AdapterFlavorRole, - passive_deletes=True, passive_updates=True, - cascade='all, delete-orphan', - backref=backref('flavor') - ) - clusters = relationship( - Cluster, - backref=backref('flavor') - ) - metadatas = relationship( - FlavorConfigMetadata, - passive_deletes=True, passive_updates=True, - cascade='all, delete-orphan', - backref=backref('flavor') - ) - - __table_args__ = ( - UniqueConstraint('name', 'adapter_id', name='constraint'), - ) - - def __str__(self): - return 'AdapterFlavor[%s:%s]' % (self.id, self.name) - - @property - def root_metadatas(self): - return [ - metadata for metadata in self.metadatas - if metadata.parent_id is None - ] - - def metadata_dict(self): - dict_info = {} - for metadata in self.root_metadatas: - logging.info('metadata from flavr config metadata: %s', metadata) - util.merge_dict(dict_info, metadata.to_dict()) - return dict_info - - @property - def ordered_flavor_roles(self): - flavor_roles = dict([ - (flavor_role.role.name, flavor_role) - for flavor_role in self.flavor_roles - ]) - ordered_flavor_roles = [] - for flavor_role in list(self._ordered_flavor_roles): - if flavor_role in flavor_roles: - ordered_flavor_roles.append(flavor_roles[flavor_role]) - return ordered_flavor_roles - - @ordered_flavor_roles.setter - def ordered_flavor_roles(self, value): - self._ordered_flavor_roles = list(value) - - @property - def patched_ordered_flavor_roles(self): - return self.ordered_flavor_roles - - @patched_ordered_flavor_roles.setter - def patched_ordered_flavor_roles(self, value): - ordered_flavor_roles = list(self._ordered_flavor_roles) - ordered_flavor_roles.extend(value) - self._ordered_flavor_roles = ordered_flavor_roles - - def __init__(self, name, adapter_id, **kwargs): - self.name = name - self.adapter_id = adapter_id - super(AdapterFlavor, self).__init__(**kwargs) - - def initialize(self): - if not self.display_name: - self.display_name = self.name - super(AdapterFlavor, self).initialize() - - def validate(self): - super(AdapterFlavor, self).validate() - if not self.template: - raise exception.InvalidParameter( - 'template is not set in adapter flavor %s' % self.id - ) - - def to_dict(self): - dict_info = super(AdapterFlavor, self).to_dict() - dict_info['roles'] = [ - flavor_role.to_dict() - for flavor_role in self.ordered_flavor_roles - ] - return dict_info - - -class AdapterRole(BASE, HelperMixin): - """Adapter's roles.""" - - __tablename__ = "adapter_role" - id = Column(Integer, primary_key=True) - name = Column(String(80), nullable=False) - display_name = Column(String(80)) - description = Column(Text) - optional = Column(Boolean, default=False) - adapter_id = Column( - Integer, - ForeignKey( - 'adapter.id', - onupdate='CASCADE', - ondelete='CASCADE' - ) - ) - - flavor_roles = relationship( - AdapterFlavorRole, - passive_deletes=True, passive_updates=True, - cascade='all, delete-orphan', - backref=backref('role') - ) - - __table_args__ = ( - UniqueConstraint('name', 'adapter_id', name='constraint'), - ) - - def __init__(self, name, adapter_id, **kwargs): - self.name = name - self.adapter_id = adapter_id - super(AdapterRole, self).__init__(**kwargs) - - def __str__(self): - return 'AdapterRole[%s:%s]' % (self.id, self.name) - - def initialize(self): - if not self.description: - self.description = self.name - if not self.display_name: - self.display_name = self.name - super(AdapterRole, self).initialize() - - -class PackageConfigMetadata(BASE, MetadataMixin): - """package config metadata.""" - __tablename__ = "package_config_metadata" - - id = Column(Integer, primary_key=True) - adapter_id = Column( - Integer, - ForeignKey( - 'adapter.id', - onupdate='CASCADE', ondelete='CASCADE' - ) - ) - parent_id = Column( - Integer, - ForeignKey( - 'package_config_metadata.id', - onupdate='CASCADE', ondelete='CASCADE' - ) - ) - field_id = Column( - Integer, - ForeignKey( - 'package_config_field.id', - onupdate='CASCADE', ondelete='CASCADE' - ) - ) - children = relationship( - 'PackageConfigMetadata', - passive_deletes=True, passive_updates=True, - backref=backref('parent', remote_side=id) - ) - - __table_args__ = ( - UniqueConstraint('path', 'adapter_id', name='constraint'), - ) - - def __init__( - self, adapter_id, path, **kwargs - ): - self.adapter_id = adapter_id - self.path = path - super(PackageConfigMetadata, self).__init__(**kwargs) - - def validate(self): - super(PackageConfigMetadata, self).validate() - if not self.adapter: - raise exception.InvalidParameter( - 'adapter is not set in package metadata %s' % self.id - ) - - -class PackageConfigField(BASE, FieldMixin): - """Adapter cofig metadata fields.""" - __tablename__ = "package_config_field" - - metadatas = relationship( - PackageConfigMetadata, - passive_deletes=True, passive_updates=True, - cascade='all, delete-orphan', - backref=backref('field')) - - def __init__(self, field, **kwargs): - self.field = field - super(PackageConfigField, self).__init__(**kwargs) - - -class Adapter(BASE, HelperMixin): - """Adapter table.""" - __tablename__ = 'adapter' - - id = Column(Integer, primary_key=True) - name = Column(String(80), unique=True, nullable=False) - display_name = Column(String(80)) - parent_id = Column( - Integer, - ForeignKey( - 'adapter.id', - onupdate='CASCADE', ondelete='CASCADE' - ), - nullable=True - ) - distributed_system_id = Column( - Integer, - ForeignKey( - 'distributed_system.id', - onupdate='CASCADE', ondelete='CASCADE' - ), - nullable=True - ) - os_installer_id = Column( - Integer, - ForeignKey( - 'os_installer.id', - onupdate='CASCADE', ondelete='CASCADE' - ), - nullable=True - ) - package_installer_id = Column( - Integer, - ForeignKey( - 'package_installer.id', - onupdate='CASCADE', ondelete='CASCADE' - ), - nullable=True - ) - deployable = Column( - Boolean, default=False - ) - - health_check_cmd = Column(String(80)) - - supported_oses = relationship( - AdapterOS, - passive_deletes=True, passive_updates=True, - cascade='all, delete-orphan', - backref=backref('adapter') - ) - - roles = relationship( - AdapterRole, - passive_deletes=True, passive_updates=True, - cascade='all, delete-orphan', - backref=backref('adapter') - ) - flavors = relationship( - AdapterFlavor, - passive_deletes=True, passive_updates=True, - cascade='all, delete-orphan', - backref=backref('adapter') - ) - children = relationship( - 'Adapter', - passive_deletes=True, passive_updates=True, - backref=backref('parent', remote_side=id) - ) - metadatas = relationship( - PackageConfigMetadata, - passive_deletes=True, passive_updates=True, - cascade='all, delete-orphan', - backref=backref('adapter') - ) - clusters = relationship( - Cluster, - backref=backref('adapter') - ) - - __table_args__ = ( - UniqueConstraint( - 'distributed_system_id', - 'os_installer_id', 'package_installer_id', name='constraint' - ), - ) - - def __init__( - self, name, **kwargs - ): - self.name = name - super(Adapter, self).__init__(**kwargs) - - def __str__(self): - return 'Adapter[%s:%s]' % (self.id, self.name) - - def initialize(self): - if not self.display_name: - self.display_name = self.name - super(Adapter, self).initialize() - - @property - def root_metadatas(self): - return [ - metadata for metadata in self.metadatas - if metadata.parent_id is None - ] - - def metadata_dict(self): - dict_info = {} - if self.parent: - dict_info.update(self.parent.metadata_dict()) - for metadata in self.root_metadatas: - util.merge_dict(dict_info, metadata.to_dict()) - return dict_info - - @property - def adapter_package_installer(self): - if self.package_installer: - return self.package_installer - elif self.parent: - return self.parent.adapter_package_installer - else: - return None - - @property - def adapter_os_installer(self): - if self.os_installer: - return self.os_installer - elif self.parent: - return self.parent.adapter_os_installer - else: - return None - - @property - def adapter_distributed_system(self): - distributed_system = self.distributed_system - if distributed_system: - return distributed_system - parent = self.parent - if parent: - return parent.adapter_distributed_system - else: - return None - - @property - def adapter_supported_oses(self): - supported_oses = self.supported_oses - if supported_oses: - return supported_oses - parent = self.parent - if parent: - return parent.adapter_supported_oses - else: - return [] - - @property - def adapter_roles(self): - roles = self.roles - if roles: - return roles - parent = self.parent - if parent: - return parent.adapter_roles - else: - return [] - - @property - def adapter_flavors(self): - flavors = self.flavors - if flavors: - return flavors - parent = self.parent - if parent: - return parent.adapter_flavors - else: - return [] - - def to_dict(self): - dict_info = super(Adapter, self).to_dict() - dict_info.update({ - 'supported_oses': [ - adapter_os.to_dict() - for adapter_os in self.adapter_supported_oses - ], - 'flavors': [ - flavor.to_dict() for flavor in self.adapter_flavors - ] - }) - distributed_system = self.adapter_distributed_system - if distributed_system: - dict_info['distributed_system_id'] = distributed_system.id - dict_info['distributed_system_name'] = distributed_system.name - os_installer = self.adapter_os_installer - if os_installer: - dict_info['os_installer'] = os_installer.to_dict() - package_installer = self.adapter_package_installer - if package_installer: - dict_info['package_installer'] = package_installer.to_dict() - return dict_info - - -class DistributedSystem(BASE, HelperMixin): - """distributed system table.""" - __tablename__ = 'distributed_system' - - id = Column(Integer, primary_key=True) - parent_id = Column( - Integer, - ForeignKey( - 'distributed_system.id', - onupdate='CASCADE', ondelete='CASCADE' - ), - nullable=True - ) - name = Column(String(80), unique=True, nullable=False) - deployable = Column(Boolean, default=False) - - adapters = relationship( - Adapter, - passive_deletes=True, passive_updates=True, - cascade='all, delete-orphan', - backref=backref('distributed_system') - ) - clusters = relationship( - Cluster, - backref=backref('distributed_system') - ) - children = relationship( - 'DistributedSystem', - passive_deletes=True, passive_updates=True, - backref=backref('parent', remote_side=id) - ) - - def __init__(self, name): - self.name = name - super(DistributedSystem, self).__init__() - - def __str__(self): - return 'DistributedSystem[%s:%s]' % (self.id, self.name) - - -class OSInstaller(BASE, InstallerMixin): - """OS installer table.""" - __tablename__ = 'os_installer' - id = Column(Integer, primary_key=True) - adpaters = relationship( - Adapter, - passive_deletes=True, passive_updates=True, - cascade='all, delete-orphan', - backref=backref('os_installer') - ) - hosts = relationship( - Host, - backref=backref('os_installer') - ) - - def __init__(self, alias, **kwargs): - self.alias = alias - super(OSInstaller, self).__init__(**kwargs) - - def __str__(self): - return 'OSInstaller[%s:%s]' % (self.id, self.alias) - - -class PackageInstaller(BASE, InstallerMixin): - """package installer table.""" - __tablename__ = 'package_installer' - id = Column(Integer, primary_key=True) - adapters = relationship( - Adapter, - passive_deletes=True, passive_updates=True, - cascade='all, delete-orphan', - backref=backref('package_installer') - ) - - def __init__(self, alias, **kwargs): - self.alias = alias - super(PackageInstaller, self).__init__(**kwargs) - - def __str__(self): - return 'PackageInstaller[%s:%s]' % (self.id, self.alias) - - class Subnet(BASE, TimestampMixin, HelperMixin): """network table.""" __tablename__ = 'subnet' @@ -2841,6 +1850,7 @@ class Subnet(BASE, TimestampMixin, HelperMixin): return dict_info +# TODO(grace): move this global variable into HealthCheckReport. HEALTH_REPORT_STATES = ('verifying', 'success', 'finished', 'error') diff --git a/compass/deployment/installers/os_installers/cobbler/cobbler.py b/compass/deployment/installers/os_installers/cobbler/cobbler.py index 5de9a580..cfd83105 100644 --- a/compass/deployment/installers/os_installers/cobbler/cobbler.py +++ b/compass/deployment/installers/os_installers/cobbler/cobbler.py @@ -226,7 +226,7 @@ class CobblerInstaller(OSInstaller): os.path.join(self.tmpl_dir, os_version), self.SYS_TMPL_NAME ) if not os.path.exists(tmpl_path): - err_msg = "Template '%s' does not exists!" % self.SYS_TMPL_NAME + err_msg = "Template '%s' does not exists!" % tmpl_path logging.error(err_msg) raise Exception(err_msg) diff --git a/compass/deployment/utils/constants.py b/compass/deployment/utils/constants.py index 74257cb3..1fce77dc 100644 --- a/compass/deployment/utils/constants.py +++ b/compass/deployment/utils/constants.py @@ -30,7 +30,6 @@ USERNAME = 'username' # Adapter info related keywords -DIST_SYS_NAME = 'distributed_system_name' FLAVOR = 'flavor' FLAVORS = 'flavors' PLAYBOOK = 'playbook' diff --git a/compass/log_analyzor/adapter_matcher.py b/compass/log_analyzor/adapter_matcher.py index bd57a826..8630e01c 100644 --- a/compass/log_analyzor/adapter_matcher.py +++ b/compass/log_analyzor/adapter_matcher.py @@ -96,27 +96,27 @@ class PackageMatcher(object): """Progress matcher for package installer.""" def __init__( - self, package_installer_name, distributed_system_pattern, + self, package_installer_name, adapter_pattern, item_matcher, file_reader_factory ): self.name_ = re.compile(package_installer_name) - self.ds_regex_ = re.compile(distributed_system_pattern) + self.adapter_regex_ = re.compile(adapter_pattern) self.matcher_ = item_matcher self.file_reader_factory_ = file_reader_factory def __repr__(self): - return '%s[name:%s, ds_pattern:%s, matcher:%s]' % ( + return '%s[name:%s, adapter_pattern:%s, matcher:%s]' % ( self.__class__.__name__, self.name_.pattern, - self.ds_regex_.pattern, self.matcher_) + self.adapter_regex_.pattern, self.matcher_) - def match(self, package_installer_name, distributed_system_name): + def match(self, package_installer_name, adapter_name): """Check if the package matcher is acceptable.""" if package_installer_name is None: return False else: return all([ self.name_.match(package_installer_name), - self.ds_regex_.match(distributed_system_name) + self.adapter_regex_.match(adapter_name) ]) def update_progress(self, name, state, log_history_mapping): diff --git a/compass/log_analyzor/environment.py b/compass/log_analyzor/environment.py new file mode 100644 index 00000000..80ff7389 --- /dev/null +++ b/compass/log_analyzor/environment.py @@ -0,0 +1,29 @@ +# Copyright 2014 Huawei Technologies Co. Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""module to provide environment to load progress calculator configurations. + + .. moduleauthor:: Xiaodong Wang +""" +from compass.log_analyzor.adapter_matcher import AdapterItemMatcher +from compass.log_analyzor.file_matcher import FileMatcher +from compass.log_analyzor.file_matcher import FileReaderFactory +from compass.log_analyzor.line_matcher import IncrementalProgress +from compass.log_analyzor.line_matcher import LineMatcher +from compass.utils import setting_wrapper as setting +from compass.utils import util + + +ENV_GLOBALS = globals() +ENV_LOCALS = locals() diff --git a/compass/log_analyzor/line_matcher.py b/compass/log_analyzor/line_matcher.py index 7f5ce7a1..ada9ed61 100644 --- a/compass/log_analyzor/line_matcher.py +++ b/compass/log_analyzor/line_matcher.py @@ -158,7 +158,7 @@ class LineMatcher(object): self.progress_ = SameProgress() elif isinstance(progress, ProgressCalculator): self.progress_ = progress - elif util.is_instance(progress, [int, float]): + elif isinstance(progress, (int, long, float)): self.progress_ = RelativeProgress(progress) else: raise TypeError( diff --git a/compass/log_analyzor/progress_calculator.py b/compass/log_analyzor/progress_calculator.py index 685700bf..b0f35f21 100644 --- a/compass/log_analyzor/progress_calculator.py +++ b/compass/log_analyzor/progress_calculator.py @@ -20,6 +20,8 @@ import logging from compass.log_analyzor.adapter_matcher import OSMatcher from compass.log_analyzor.adapter_matcher import PackageMatcher +from compass.log_analyzor.environment import ENV_GLOBALS +from compass.log_analyzor.environment import ENV_LOCALS from compass.log_analyzor.file_matcher import FileReaderFactory from compass.utils import setting_wrapper as setting @@ -30,90 +32,102 @@ PACKAGE_ADAPTER_CONFIGURATIONS = None PROGRESS_CALCULATOR_CONFIGURATIONS = None -def _load_calculator_configurations(): +def _load_calculator_configurations(force=False): global PROGRESS_CALCULATOR_CONFIGURATIONS - if PROGRESS_CALCULATOR_CONFIGURATIONS is None: + if force or PROGRESS_CALCULATOR_CONFIGURATIONS is None: + env_locals = {} + env_locals.update(ENV_GLOBALS) + env_locals.update(ENV_LOCALS) PROGRESS_CALCULATOR_CONFIGURATIONS = util.load_configs( - setting.PROGRESS_CALCULATOR_DIR + setting.PROGRESS_CALCULATOR_DIR, + env_locals=env_locals ) - progress_calculator_configuration = ( - PROGRESS_CALCULATOR_CONFIGURATIONS[0] - ) - os_installer_configurations = None - package_installer_configurations = None - if progress_calculator_configuration is not None: - if 'OS_INSTALLER_CONFIGURATIONS' in ( + if not PROGRESS_CALCULATOR_CONFIGURATIONS: + logging.info('No configuration found for progress calculator.') + + global OS_ADAPTER_CONFIGURATIONS + if force or OS_ADAPTER_CONFIGURATIONS is None: + OS_ADAPTER_CONFIGURATIONS = [] + for progress_calculator_configuration in ( + PROGRESS_CALCULATOR_CONFIGURATIONS + ): + if 'OS_LOG_CONFIGURATIONS' in ( progress_calculator_configuration ): os_installer_configurations = ( - (progress_calculator_configuration[ - 'OS_INSTALLER_CONFIGURATIONS']) + progress_calculator_configuration['OS_LOG_CONFIGURATIONS'] ) - if 'PACKAGE_INSTALLER_CONFIGURATIONS' in ( + for os_installer_configuration in os_installer_configurations: + OS_ADAPTER_CONFIGURATIONS.append(OSMatcher( + os_installer_name=( + os_installer_configuration['os_installer_name'] + ), + os_pattern=os_installer_configuration['os_pattern'], + item_matcher=( + os_installer_configuration['item_matcher'] + ), + file_reader_factory=FileReaderFactory( + os_installer_configuration['logdir'] + ) + )) + if not OS_ADAPTER_CONFIGURATIONS: + logging.info( + 'no OS_LOG_CONFIGURATIONS section found ' + 'in progress calculator.' + ) + else: + logging.debug( + 'OS_ADAPTER_CONFIGURATIONS is\n%s', + OS_ADAPTER_CONFIGURATIONS + ) + + global PACKAGE_ADAPTER_CONFIGURATIONS + if force or PACKAGE_ADAPTER_CONFIGURATIONS is None: + PACKAGE_ADAPTER_CONFIGURATIONS = [] + for progress_calculator_configuration in ( + PROGRESS_CALCULATOR_CONFIGURATIONS + ): + if 'ADAPTER_LOG_CONFIGURATIONS' in ( progress_calculator_configuration ): package_installer_configurations = ( - (progress_calculator_configuration[ - 'PACKAGE_INSTALLER_CONFIGURATIONS']) + progress_calculator_configuration[ + 'ADAPTER_LOG_CONFIGURATIONS' + ] ) + for package_installer_configuration in ( + package_installer_configurations + ): + PACKAGE_ADAPTER_CONFIGURATIONS.append(PackageMatcher( + package_installer_name=( + package_installer_configuration[ + 'package_installer_name' + ] + ), + adapter_pattern=( + package_installer_configuration['adapter_pattern'] + ), + item_matcher=( + package_installer_configuration['item_matcher'] + ), + file_reader_factory=FileReaderFactory( + package_installer_configuration['logdir'] + ) + )) + if not PACKAGE_ADAPTER_CONFIGURATIONS: + logging.info( + 'no PACKAGE_LOG_CONFIGURATIONS section found ' + 'in progress calculator.' + ) else: - logging.debug('No configuration found for progress calculator.') + logging.debug( + 'PACKAGE_ADAPTER_CONFIGURATIONS is\n%s', + PACKAGE_ADAPTER_CONFIGURATIONS + ) - global OS_ADAPTER_CONFIGURATIONS - if OS_ADAPTER_CONFIGURATIONS is None: - if os_installer_configurations is not None: - OS_ADAPTER_CONFIGURATIONS = [ - OSMatcher( - os_installer_name='cobbler', - os_pattern='CentOS-6.*', - item_matcher=( - (os_installer_configurations[ - 'cobbler']['CentOS6']) - ), - file_reader_factory=FileReaderFactory( - setting.INSTALLATION_LOGDIR['CobblerInstaller'] - ) - ), - OSMatcher( - os_installer_name='cobbler', - os_pattern='CentOS-7.*', - item_matcher=( - (os_installer_configurations[ - 'cobbler']['CentOS7']) - ), - file_reader_factory=FileReaderFactory( - setting.INSTALLATION_LOGDIR['CobblerInstaller'] - ) - ), - OSMatcher( - os_installer_name='cobbler', - os_pattern='Ubuntu.*', - item_matcher=( - (os_installer_configurations[ - 'cobbler']['Ubuntu']) - ), - file_reader_factory=FileReaderFactory( - setting.INSTALLATION_LOGDIR['CobblerInstaller'] - ) - ) - ] - global PACKAGE_ADAPTER_CONFIGURATIONS - if PACKAGE_ADAPTER_CONFIGURATIONS is None: - if package_installer_configurations is not None: - PACKAGE_ADAPTER_CONFIGURATIONS = [ - PackageMatcher( - package_installer_name='chef_installer', - distributed_system_pattern='openstack.*', - item_matcher=( - (package_installer_configurations[ - 'chef_installer']['openstack']) - ), - file_reader_factory=FileReaderFactory( - setting.INSTALLATION_LOGDIR['ChefInstaller'] - ) - ) - ] +def load_calculator_configurations(force_reload=False): + _load_calculator_configurations(force=force_reload) def _get_os_matcher(os_installer_name, os_name): @@ -131,22 +145,22 @@ def _get_os_matcher(os_installer_name, os_name): def _get_package_matcher( - package_installer_name, distributed_system_name + package_installer_name, adapter_name ): - """Get package adapter matcher by pacakge name and installer name.""" + """Get package adapter matcher by adapter name and installer name.""" _load_calculator_configurations() for configuration in PACKAGE_ADAPTER_CONFIGURATIONS: if configuration.match( package_installer_name, - distributed_system_name + adapter_name ): return configuration else: logging.debug('configuration %s does not match %s and %s', - configuration, distributed_system_name, + configuration, adapter_name, package_installer_name) - logging.error('No configuration found for package installer %s os %s', - package_installer_name, distributed_system_name) + logging.error('No configuration found for package installer %s adapter %s', + package_installer_name, adapter_name) return None @@ -174,11 +188,11 @@ def update_clusterhost_progress(clusterhost_mapping): ) in ( clusterhost_mapping.items() ): - distributed_system_name = clusterhost['distributed_system_name'] + adapter_name = clusterhost['adapter_name'] package_installer_name = clusterhost['package_installer']['name'] package_matcher = _get_package_matcher( package_installer_name, - distributed_system_name + adapter_name ) if not package_matcher: continue diff --git a/compass/tasks/tasks.py b/compass/tasks/tasks.py index 26f48dc5..e78ff029 100644 --- a/compass/tasks/tasks.py +++ b/compass/tasks/tasks.py @@ -30,6 +30,7 @@ from compass.actions import update_progress from compass.db.api import adapter_holder as adapter_api from compass.db.api import database from compass.db.api import metadata_holder as metadata_api +from compass.log_analyzor import progress_calculator from compass.tasks.client import celery from compass.utils import flags @@ -46,6 +47,8 @@ def global_celery_init(**_): database.init() adapter_api.load_adapters() metadata_api.load_metadatas() + adapter_api.load_flavors() + progress_calculator.load_calculator_configurations() @setup_logging.connect() diff --git a/compass/tests/actions/deploy/test_deploy.py b/compass/tests/actions/deploy/test_deploy.py index f680e70e..dc542180 100644 --- a/compass/tests/actions/deploy/test_deploy.py +++ b/compass/tests/actions/deploy/test_deploy.py @@ -24,12 +24,16 @@ import unittest2 os.environ['COMPASS_IGNORE_SETTING'] = 'true' -from compass.actions import deploy -from compass.actions import util from compass.utils import setting_wrapper as setting reload(setting) +from compass.actions import deploy +from compass.actions import util +from compass.utils import flags +from compass.utils import logsetting + + class TestDeployAction(unittest2.TestCase): """Test deploy moudle functions in actions.""" def setUp(self): @@ -169,3 +173,9 @@ class TestDeployAction(unittest2.TestCase): output = util.ActionHelper.get_hosts_info(1, [1], None) self.maxDiff = None self.assertDictEqual(expected_output, output) + + +if __name__ == '__main__': + flags.init() + logsetting.init() + unittest2.main() diff --git a/compass/tests/actions/update_progress/test_update_progress.py b/compass/tests/actions/update_progress/test_update_progress.py index e6808970..0b3ea650 100644 --- a/compass/tests/actions/update_progress/test_update_progress.py +++ b/compass/tests/actions/update_progress/test_update_progress.py @@ -27,8 +27,6 @@ os.environ['COMPASS_IGNORE_SETTING'] = 'true' from compass.utils import setting_wrapper as setting - - reload(setting) @@ -60,6 +58,7 @@ from compass.log_analyzor import progress_calculator from compass.utils import flags from compass.utils import logsetting + ADAPTER_NAME = 'openstack_icehouse' OS_NAME = 'CentOS-6.5-x86_64' SWITCH_IP = '172.29.8.40' @@ -72,8 +71,9 @@ class TestProgressCalculator(unittest2.TestCase): """Test end to end.""" def _prepare_database(self): - adapter.load_adapters() - metadata.load_metadatas() + adapter.load_adapters(force_reload=True) + metadata.load_metadatas(force_reload=True) + adapter.load_flavors(force_reload=True) self.user_object = ( user_api.get_user_object( @@ -87,25 +87,24 @@ class TestProgressCalculator(unittest2.TestCase): # get adapter information list_adapters = adapter.list_adapters(user=self.user_object) - for adptr in list_adapters: + for adpt in list_adapters: self.adapter_id = None - if adptr['name'] != ADAPTER_NAME: + if adpt['name'] != ADAPTER_NAME: continue - self.adapter_id = adptr['id'] + self.adapter_id = adpt['id'] self.os_id = None - for supported_os in adptr['supported_oses']: + for supported_os in adpt['supported_oses']: if supported_os['name'] == OS_NAME: self.os_id = supported_os['os_id'] break if not self.os_id: continue if ( - 'package_installer' in adptr.keys() and - adptr['flavors'] != [] and - adptr['distributed_system_name'] == 'openstack' + 'package_installer' in adpt.keys() and + adpt['flavors'] != [] ): self.flavor_id = None - for flavor in adptr['flavors']: + for flavor in adpt['flavors']: if flavor['name'] == 'allinone': self.flavor_id = flavor['id'] break @@ -401,7 +400,7 @@ class TestProgressCalculator(unittest2.TestCase): with open(target_log, 'w') as f: for single_line in raw_file: f.write(single_line + '\n') - f.close + f.close() def _mock_lock(self): @contextmanager @@ -419,10 +418,15 @@ class TestProgressCalculator(unittest2.TestCase): def setUp(self): super(TestProgressCalculator, self).setUp() + os.environ['COMPASS_IGNORE_SETTING'] = 'true' parent_path = os.path.abspath(os.path.join( os.path.dirname(__file__), "../../../.." )) - setting.CONFIG_DIR = os.path.join(parent_path, 'conf') + os.environ['COMPASS_CONFIG_DIR'] = os.path.join( + parent_path, + 'conf' + ) + reload(setting) logsetting.init() self._mock_lock() database.init('sqlite://') @@ -439,7 +443,7 @@ class TestProgressCalculator(unittest2.TestCase): 'CobblerInstaller': setting.COBBLER_INSTALLATION_LOGDIR, 'ChefInstaller': setting.CHEF_INSTALLATION_LOGDIR } - reload(progress_calculator) + progress_calculator.load_calculator_configurations(force_reload=True) def tearDown(self): super(TestProgressCalculator, self).tearDown() diff --git a/compass/tests/api/data/adapter/ceph.conf b/compass/tests/api/data/adapter/ceph.conf index 6b6c8139..d63ecf13 100644 --- a/compass/tests/api/data/adapter/ceph.conf +++ b/compass/tests/api/data/adapter/ceph.conf @@ -1,3 +1,2 @@ NAME = 'ceph' PARENT = 'general' -DISTRIBUTED_SYSTEM = 'ceph' diff --git a/compass/tests/api/data/adapter/openstack.conf b/compass/tests/api/data/adapter/openstack.conf index 2f948fb0..44d0a9af 100644 --- a/compass/tests/api/data/adapter/openstack.conf +++ b/compass/tests/api/data/adapter/openstack.conf @@ -1,4 +1,3 @@ NAME = 'openstack' PARENT = 'general' -DISTRIBUTED_SYSTEM = 'openstack' SUPPORTED_OSES = ['CentOS6.5', 'Ubuntu12.04'] diff --git a/compass/tests/api/data/flavor/openstack_chef.conf b/compass/tests/api/data/flavor/openstack_chef.conf index 7137d150..a0e8a507 100644 --- a/compass/tests/api/data/flavor/openstack_chef.conf +++ b/compass/tests/api/data/flavor/openstack_chef.conf @@ -4,20 +4,4 @@ FLAVORS = [{ 'display_name': 'allinone', 'template': 'allinone.tmpl', 'roles': ['allinone-compute'] -}, { - 'flavor': 'multiroles', - 'display_name': 'multiroles', - 'template': 'multiroles.tmpl', - 'roles': [ - 'os-compute-worker', 'os-network', 'os-block-storage-worker', - 'os-image', 'os-compute-vncproxy', 'os-controller', - 'os-ops-messaging', 'os-ops-database', 'ha-proxy' - ] -},{ - 'flavor': 'single-contoller-multi-compute', - 'display_name': 'Single Controller, Multi-compute', - 'template': 'base.tmpl', - 'roles': [ - 'os-controller', 'os-compute-worker', 'os-network' - ] }] diff --git a/compass/tests/api/data/flavor_mapping/allinone.conf b/compass/tests/api/data/flavor_mapping/allinone.conf index f5244ca5..0ccd3fe8 100644 --- a/compass/tests/api/data/flavor_mapping/allinone.conf +++ b/compass/tests/api/data/flavor_mapping/allinone.conf @@ -1,4 +1,6 @@ -allinone = { +ADAPTER = 'openstack_icehouse' +FLAVOR = 'allinone' +CONFIG_MAPPING = { "mapped_name": "flavor_config", "mapped_children": [{ "security": { diff --git a/compass/tests/api/data/flavor_metadata/allinone.conf b/compass/tests/api/data/flavor_metadata/allinone.conf index 32bac253..7d248ecf 100644 --- a/compass/tests/api/data/flavor_metadata/allinone.conf +++ b/compass/tests/api/data/flavor_metadata/allinone.conf @@ -1,2 +1,3 @@ +ADAPTER = 'openstack_icehouse' FLAVOR = 'allinone' METADATA = {} diff --git a/compass/tests/api/data/os_mapping/os_mapping.conf b/compass/tests/api/data/os_mapping/os_mapping.conf index 15c06ed1..53e9e660 100644 --- a/compass/tests/api/data/os_mapping/os_mapping.conf +++ b/compass/tests/api/data/os_mapping/os_mapping.conf @@ -1,4 +1,5 @@ -OS_CONFIG_MAPPING = { +OS = 'general' +CONFIG_MAPPING = { "mapped_name": "os_global_config", "mapped_children": [{ "server_credentials":{ diff --git a/compass/tests/api/test_api.py b/compass/tests/api/test_api.py index 5fa6a36f..6ba09e67 100644 --- a/compass/tests/api/test_api.py +++ b/compass/tests/api/test_api.py @@ -57,15 +57,17 @@ class ApiTestCase(unittest2.TestCase): def setUp(self): super(ApiTestCase, self).setUp() - reload(setting) - setting.CONFIG_DIR = os.path.join( + os.environ['COMPASS_IGNORE_SETTING'] = 'true' + os.environ['COMPASS_CONFIG_DIR'] = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'data' ) + reload(setting) database.init('sqlite://') database.create_db() - adapter_api.load_adapters() - metadata_api.load_metadatas() + adapter_api.load_adapters(force_reload=True) + metadata_api.load_metadatas(force_reload=True) + adapter_api.load_flavors(force_reload=True) from compass.api import api as compass_api application = compass_api.app @@ -168,6 +170,14 @@ class ApiTestCase(unittest2.TestCase): for flavor in adapter['flavors']: flavor_id = flavor['id'] break + if not adapter_name: + raise Exception('adapter name not found') + if not adapter_id: + raise Exception('adapter id not found') + if not os_id: + raise Exception('os id not found') + if not flavor_id: + raise Exception('flavor id not found') return (adapter_name, adapter_id, os_id, flavor_id) @@ -336,9 +346,18 @@ class TestClusterAPI(ApiTestCase): data['name'] = 'cluster_invalid' data['adapter_id'] = 9 data['os_id'] = 1 + data['flavor_id'] = flavor_id + return_value = self.post(url, data) + self.assertEqual(return_value.status_code, 404) + + # add a cluster with a non-existed flavor-id + data = {} + data['name'] = 'cluster_invalid' + data['adapter_id'] = adapter_id + data['os_id'] = 1 data['flavor_id'] = 1 return_value = self.post(url, data) - self.assertEqual(return_value.status_code, 400) + self.assertEqual(return_value.status_code, 404) def test_update_cluster(self): # update a cluster sucessfully @@ -403,8 +422,7 @@ class TestClusterAPI(ApiTestCase): # give a non-existed cluster_id url = '/clusters/99/hosts' return_value = self.get(url) - resp = json.loads(return_value.get_data()) - self.assertEqual(resp, []) + self.assertEqual(return_value.status_code, 404) def test_show_cluster_host(self): # show a cluster_host successfully @@ -951,8 +969,7 @@ class TestSwitchMachines(ApiTestCase): # give a non-existed switch_id url = '/switches/99/machines' return_value = self.get(url) - resp = json.loads(return_value.get_data()) - self.assertEqual(resp, []) + self.assertEqual(return_value.status_code, 404) def test_add_switch_machine(self): # add a switch machine successfully @@ -978,12 +995,12 @@ class TestSwitchMachines(ApiTestCase): self.assertEqual(return_value.status_code, 409) # add a invalid switch machine - url = '/switches/2/machines' + url = 's/witchedes' data = { 'mac': 'xxx' } return_value = self.post(url, data) - self.assertEqual(return_value.status_code, 400) + self.assertEqual(return_value.status_code, 404) def test_add_switch_machines(self): # batch switch machines @@ -1030,7 +1047,7 @@ class TestSwitchMachines(ApiTestCase): 'port': '200', 'mac': 'b1:b2:c3:d4:e5:f6' }] - expect_duplicate = {'mac': 'a1:b2:c3:d4:e5:f6', 'port': '101'} + expect_duplicate = [{'mac': 'a1:b2:c3:d4:e5:f6', 'port': '101'}] expect_failed = [ {'mac': 'a1:b2:f3:d4:e5:f6', 'port': '100'}, {'mac': 'a1:b2:c3:d4:e5:f6', 'port': '102'} @@ -1049,18 +1066,21 @@ class TestSwitchMachines(ApiTestCase): if k == 'fail_switches_machines': for item in v: res_fail.append(item) + self.assertEqual(len(res), len(expected)) for i, v in enumerate(res): - self.assertTrue( - all(item in res[i].items() for item in expected[i].items()) + self.assertDictContainsSubset( + expected[i], res[i] ) + self.assertEqual(len(res_fail), len(expect_failed)) for i, v in enumerate(res_fail): - self.assertTrue( - all(item in res_fail[i].items() for - item in expect_failed[i].items()) + self.assertDictContainsSubset( + expect_failed[i], res_fail[i] + ) + self.assertEqual(len(res_du), len(expect_duplicate)) + for i, v in enumerate(res_du): + self.assertDictContainsSubset( + expect_duplicate[i], res_du[i] ) - self.assertTrue( - all(item in res_du[0].items() for item in expect_duplicate.items()) - ) def test_show_switch_machine(self): # show a switch_machine successfully diff --git a/compass/tests/api/test_health_check_api.py b/compass/tests/api/test_health_check_api.py index e74419a4..2adfb5da 100644 --- a/compass/tests/api/test_health_check_api.py +++ b/compass/tests/api/test_health_check_api.py @@ -17,17 +17,23 @@ import os import simplejson as json +import unittest2 os.environ['COMPASS_IGNORE_SETTING'] = 'true' + + from compass.utils import setting_wrapper as setting reload(setting) +from test_api import ApiTestCase + + from compass.db.api import cluster as cluster_db from compass.db.api import health_check_report as health_check_db -from compass.db import models -from compass.tests.api.test_api import ApiTestCase +from compass.utils import flags +from compass.utils import logsetting report_sample = { @@ -152,9 +158,14 @@ class TestHealthCheckAPI(ApiTestCase): self.assertEqual(403, return_value.status_code) # Cluster has been deployed successfully. - user = models.User.query.filter_by(email='admin@huawei.com').first() cluster_db.update_cluster_state( - self.cluster_id, user=user, state='SUCCESSFUL' + self.cluster_id, state='SUCCESSFUL' ) return_value = self.test_client.post(url, data=request_data) self.assertEqual(202, return_value.status_code) + + +if __name__ == '__main__': + flags.init() + logsetting.init() + unittest2.main() diff --git a/compass/tests/db/api/base.py b/compass/tests/db/api/base.py index 8a63f697..18bc87ae 100644 --- a/compass/tests/db/api/base.py +++ b/compass/tests/db/api/base.py @@ -41,15 +41,17 @@ class BaseTest(unittest2.TestCase): def setUp(self): super(BaseTest, self).setUp() - reload(setting) - setting.CONFIG_DIR = os.path.join( + os.environ['COMPASS_IGNORE_SETTING'] = 'true' + os.environ['COMPASS_CONFIG_DIR'] = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'data' ) + reload(setting) database.init('sqlite://') database.create_db() - adapter_api.load_adapters() - metadata_api.load_metadatas() + adapter_api.load_adapters(force_reload=True) + metadata_api.load_metadatas(force_reload=True) + adapter_api.load_flavors(force_reload=True) self.user_object = ( user_api.get_user_object( setting.COMPASS_ADMIN_EMAIL diff --git a/compass/tests/db/api/data/adapter/ceph.conf b/compass/tests/db/api/data/adapter/ceph.conf index 6b6c8139..d63ecf13 100644 --- a/compass/tests/db/api/data/adapter/ceph.conf +++ b/compass/tests/db/api/data/adapter/ceph.conf @@ -1,3 +1,2 @@ NAME = 'ceph' PARENT = 'general' -DISTRIBUTED_SYSTEM = 'ceph' diff --git a/compass/tests/db/api/data/adapter/openstack.conf b/compass/tests/db/api/data/adapter/openstack.conf index 2f948fb0..44d0a9af 100644 --- a/compass/tests/db/api/data/adapter/openstack.conf +++ b/compass/tests/db/api/data/adapter/openstack.conf @@ -1,4 +1,3 @@ NAME = 'openstack' PARENT = 'general' -DISTRIBUTED_SYSTEM = 'openstack' SUPPORTED_OSES = ['CentOS6.5', 'Ubuntu12.04'] diff --git a/compass/tests/db/api/data/flavor_mapping/ha-multinodes.conf b/compass/tests/db/api/data/flavor_mapping/ha-multinodes.conf index bb18319f..6b998d14 100644 --- a/compass/tests/db/api/data/flavor_mapping/ha-multinodes.conf +++ b/compass/tests/db/api/data/flavor_mapping/ha-multinodes.conf @@ -1,4 +1,6 @@ -HA_MULTINODES = { +ADAPTER = 'openstack-icehouse' +FLAVOR = 'HA-multinodes' +CONFIG_MAPPING = { "mapped_name": "flavor_config", "mapped_children": [{ "security": { diff --git a/compass/tests/db/api/data/flavor_metadata/HA-multinodes.conf b/compass/tests/db/api/data/flavor_metadata/HA-multinodes.conf index 5f9e28d5..4ec7b29f 100644 --- a/compass/tests/db/api/data/flavor_metadata/HA-multinodes.conf +++ b/compass/tests/db/api/data/flavor_metadata/HA-multinodes.conf @@ -1,3 +1,4 @@ +ADAPTER = 'openstack_icehouse' FLAVOR = 'HA-multinodes' METADATA = { 'ha_proxy': { diff --git a/compass/tests/db/api/data/flavor_metadata/allinone.conf b/compass/tests/db/api/data/flavor_metadata/allinone.conf index 32bac253..7d248ecf 100644 --- a/compass/tests/db/api/data/flavor_metadata/allinone.conf +++ b/compass/tests/db/api/data/flavor_metadata/allinone.conf @@ -1,2 +1,3 @@ +ADAPTER = 'openstack_icehouse' FLAVOR = 'allinone' METADATA = {} diff --git a/compass/tests/db/api/data/flavor_metadata/single-contoller-multi-compute.conf b/compass/tests/db/api/data/flavor_metadata/single-contoller-multi-compute.conf index 9822f712..69faf5d2 100644 --- a/compass/tests/db/api/data/flavor_metadata/single-contoller-multi-compute.conf +++ b/compass/tests/db/api/data/flavor_metadata/single-contoller-multi-compute.conf @@ -1,2 +1,3 @@ +ADAPTER = 'openstack_icehouse' FLAVOR = 'single-contoller-multi-compute' METADATA = {} diff --git a/compass/tests/db/api/data/os_mapping/os_mapping.conf b/compass/tests/db/api/data/os_mapping/os_mapping.conf index 0bc72708..196581c3 100644 --- a/compass/tests/db/api/data/os_mapping/os_mapping.conf +++ b/compass/tests/db/api/data/os_mapping/os_mapping.conf @@ -1,4 +1,5 @@ -OS_CONFIG_MAPPING = { +OS = 'general' +CONFIG_MAPPING = { "mapped_name": "os_global_config", "mapped_children": [{ "server_credentials":{ diff --git a/compass/tests/db/api/test_adapter_holder.py b/compass/tests/db/api/test_adapter_holder.py index c9b7e722..0ecd711a 100644 --- a/compass/tests/db/api/test_adapter_holder.py +++ b/compass/tests/db/api/test_adapter_holder.py @@ -43,13 +43,100 @@ from compass.utils import util class AdapterTestCase(unittest2.TestCase): """Adapter base test case.""" + def _mock_load_configs(self, config_dir): + if config_dir == setting.OS_INSTALLER_DIR: + return [{ + 'NAME': 'cobbler', + 'INSTANCE_NAME': 'cobbler', + 'SETTINGS': { + 'cobbler_url': 'http://127.0.0.1/cobbler_api', + 'credentials': { + 'username': 'cobbler', + 'password': 'cobbler' + } + } + }] + elif config_dir == setting.PACKAGE_INSTALLER_DIR: + return [{ + 'NAME': 'chef_installer', + 'INSTANCE_NAME': 'chef_installer', + 'SETTINGS': { + 'chef_url': 'https://127.0.0.1', + 'key_dir': '', + 'client_name': '', + 'databags': [ + 'user_passwords', 'db_passwords', + 'service_passwords', 'secrets' + ] + } + }] + elif config_dir == setting.ADAPTER_DIR: + return [{ + 'NAME': 'openstack_icehouse', + 'DISLAY_NAME': 'Test OpenStack Icehouse', + 'PACKAGE_INSTALLER': 'chef_installer', + 'OS_INSTALLER': 'cobbler', + 'SUPPORTED_OS_PATTERNS': ['(?i)centos.*', '(?i)ubuntu.*'], + 'DEPLOYABLE': True + }, { + 'NAME': 'ceph(chef)', + 'DISPLAY_NAME': 'ceph(ceph)', + 'PACKAGE_INSTALLER': 'chef_installer', + 'OS_INSTALLER': 'cobbler', + 'SUPPORTED_OS_PATTERNS': ['(?i)centos.*', '(?i)ubuntu.*'], + 'DEPLOYABLE': True + }, { + 'NAME': 'os_only', + 'OS_INSTALLER': 'cobbler', + 'SUPPORTED_OS_PATTERNS': ['(?i)centos.*', '(?i)ubuntu.*'], + 'DEPLOYABLE': True + }] + elif config_dir == setting.ADAPTER_ROLE_DIR: + return [{ + 'ADAPTER_NAME': 'openstack_icehouse', + 'ROLES': [{ + 'role': 'allinone-compute', + 'display_name': 'all in one compute', + 'description': 'all in one compute', + 'optional': True + }] + }] + elif config_dir == setting.ADAPTER_FLAVOR_DIR: + return [{ + 'ADAPTER_NAME': 'openstack_icehouse', + 'FLAVORS': [{ + 'flavor': 'allinone', + 'display_name': 'allinone', + 'template': 'allinone.tmpl', + 'roles': ['allinone-compute'] + }, { + 'flavor': 'multiroles', + 'display_name': 'multiroles', + 'template': 'multiroles.tmpl', + 'roles': ['allinone-compute'] + }, { + 'flavor': 'HA-multinodes', + 'display_name': 'Multi-node Cluster with HA', + 'template': 'ha_multinodes.tmpl', + 'roles': ['allinone-compute'] + }, { + 'flavor': 'single-contoller-multi-compute', + 'display_name': 'Single Controller, Multi-compute', + 'template': 'base.tmpl', + 'roles': ['allinone-compute'] + }] + }] + else: + return [] + def setUp(self): super(AdapterTestCase, self).setUp() - reload(setting) - setting.CONFIG_DIR = os.path.join( + os.environ['COMPASS_IGNORE_SETTING'] = 'true' + os.environ['COMPASS_CONFIG_DIR'] = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'data' ) + reload(setting) database.init('sqlite://') database.create_db() self.user_object = ( @@ -58,27 +145,26 @@ class AdapterTestCase(unittest2.TestCase): ) ) - mock_config = mock.Mock() + mock_config = mock.Mock(side_effect=self._mock_load_configs) self.backup_adapter_configs = util.load_configs util.load_configs = mock_config - configs = [{ - 'NAME': 'openstack_test', - 'DISLAY_NAME': 'Test OpenStack Icehouse', - 'PACKAGE_INSTALLER': 'chef_installer', - 'OS_INSTALLER': 'cobbler', - 'SUPPORTED_OS_PATTERNS': ['(?i)centos.*', '(?i)ubuntu.*'], - 'DEPLOYABLE': True - }] - util.load_configs.return_value = configs - with database.session() as session: - adapter_api.add_adapters_internal(session) - adapter.load_adapters() + adapter.load_adapters(force_reload=True) + adapter.load_flavors(force_reload=True) self.adapter_object = adapter.list_adapters(user=self.user_object) + self.adapter_obj = None + self.adapter_id = None + self.flavor_id = None for adapter_obj in self.adapter_object: if adapter_obj['name'] == 'openstack_icehouse': + self.adapter_obj = adapter_obj self.adapter_id = adapter_obj['id'] break + for flavor in self.adapter_obj['flavors']: + if flavor['name'] == 'HA-multinodes': + self.flavor_id = flavor['id'] + break + def tearDown(self): super(AdapterTestCase, self).tearDown() util.load_configs = self.backup_adapter_configs @@ -106,7 +192,6 @@ class TestListAdapters(AdapterTestCase): 'openstack_icehouse', 'os_only', 'ceph(chef)', - 'openstack_test' ] self.assertIsNotNone(adapters) for expect in expects: @@ -143,6 +228,55 @@ class TestGetAdapter(AdapterTestCase): ) +class TestListFlavors(AdapterTestCase): + def setUp(self): + super(TestListFlavors, self).setUp() + + def tesrDown(self): + super(TestListFlavors, self).tearDown() + + def test_list_flavors(self): + """Test list flavors.""" + flavors = adapter.list_flavors( + user=self.user_object + ) + flavor_name = [] + for flavor in flavors: + flavor_name.append(flavor['name']) + expected = [ + 'allinone', + 'multiroles', + 'HA-multinodes', + 'single-contoller-multi-compute' + ] + for expect in expected: + self.assertIn(expect, flavor_name) + + +class TestGetFlavors(AdapterTestCase): + def setUp(self): + super(TestGetFlavors, self).setUp() + + def tearDown(self): + super(TestGetFlavors, self).tearDown() + + def test_get_flavor(self): + """Test get a flavor.""" + flavor = adapter.get_flavor( + self.flavor_id, + user=self.user_object + ) + expected = { + 'display_name': 'Multi-node Cluster with HA', + 'id': 'openstack_icehouse:HA-multinodes', + 'template': 'ha_multinodes.tmpl', + 'name': 'HA-multinodes' + } + self.assertTrue( + all(item in flavor.items() for item in expected.items()) + ) + + if __name__ == '__main__': flags.init() logsetting.init() diff --git a/compass/tests/db/api/test_cluster.py b/compass/tests/db/api/test_cluster.py index a76adeb5..c5cb309a 100644 --- a/compass/tests/db/api/test_cluster.py +++ b/compass/tests/db/api/test_cluster.py @@ -51,15 +51,17 @@ class ClusterTestCase(unittest2.TestCase): def setUp(self): super(ClusterTestCase, self).setUp() - reload(setting) - setting.CONFIG_DIR = os.path.join( + os.environ['COMPASS_IGNORE_SETTING'] = 'true' + os.environ['COMPASS_CONFIG_DIR'] = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'data' ) + reload(setting) database.init('sqlite://') database.create_db() - adapter.load_adapters() - metadata.load_metadatas() + adapter.load_adapters(force_reload=True) + metadata.load_metadatas(force_reload=True) + adapter.load_flavors(force_reload=True) self.user_object = ( user_api.get_user_object( @@ -1771,7 +1773,7 @@ class TestGetClusterHostSelfState(ClusterTestCase): def test_get_cluster_host_self_state(self): cluster_host_self_state = cluster.get_cluster_host_self_state( self.cluster_id, - self.host_id, + self.host_id[0], user=self.user_object, ) self.assertEqual(cluster_host_self_state['state'], 'UNINITIALIZED') @@ -1823,13 +1825,13 @@ class TestUpdateClusterHostState(ClusterTestCase): def test_update_cluster_host_state(self): cluster.update_cluster_host_state( self.cluster_id, - self.host_id, + self.host_id[0], user=self.user_object, state='INSTALLING' ) update_state = cluster.get_cluster_host_state( self.cluster_id, - self.host_id, + self.host_id[0], user=self.user_object, ) self.assertEqual(update_state['state'], 'INSTALLING') diff --git a/compass/tests/db/api/test_host.py b/compass/tests/db/api/test_host.py index b784aebd..6aafc05f 100644 --- a/compass/tests/db/api/test_host.py +++ b/compass/tests/db/api/test_host.py @@ -51,15 +51,17 @@ class HostTestCase(unittest2.TestCase): def setUp(self): super(HostTestCase, self).setUp() - reload(setting) - setting.CONFIG_DIR = os.path.join( + os.environ['COMPASS_IGNORE_SETTING'] = 'true' + os.environ['COMPASS_CONFIG_DIR'] = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'data' ) + reload(setting) database.init('sqlite://') database.create_db() - adapter.load_adapters() - metadata.load_metadatas() + adapter.load_adapters(force_reload=True) + metadata.load_metadatas(force_reload=True) + adapter.load_flavors(force_reload=True) self.user_object = ( user_api.get_user_object( diff --git a/compass/tests/db/api/test_metadata_holder.py b/compass/tests/db/api/test_metadata_holder.py index bec8a581..99618326 100644 --- a/compass/tests/db/api/test_metadata_holder.py +++ b/compass/tests/db/api/test_metadata_holder.py @@ -45,15 +45,17 @@ class MetadataTestCase(unittest2.TestCase): def setUp(self): super(MetadataTestCase, self).setUp() - reload(setting) - setting.CONFIG_DIR = os.path.join( + os.environ['COMPASS_IGNORE_SETTING'] = 'true' + os.environ['COMPASS_CONFIG_DIR'] = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'data' ) + reload(setting) database.init('sqlite://') database.create_db() - adapter.load_adapters() - metadata.load_metadatas() + adapter.load_adapters(force_reload=True) + metadata.load_metadatas(force_reload=True) + adapter.load_flavors(force_reload=True) # Get a os_id and adapter_id self.user_object = ( @@ -249,6 +251,7 @@ class TestGetFlavorMetadata(MetadataTestCase): config_dir, *args, **kwargs ) config = { + 'ADAPTER': 'openstack_icehouse', 'FLAVOR': 'HA-multinodes', 'METADATA': { 'test_ha_proxy': { @@ -279,7 +282,7 @@ class TestGetFlavorMetadata(MetadataTestCase): ) self.assertIsNotNone(flavor_metadata) self.assertTrue( - 'test_ha_proxy' in flavor_metadata['flavor_config'].keys() + 'test_ha_proxy' in flavor_metadata['package_config'].keys() ) @@ -310,55 +313,6 @@ class TestGetPackageOsMetadata(MetadataTestCase): ) -class TestListFlavors(MetadataTestCase): - def setUp(self): - super(TestListFlavors, self).setUp() - - def tesrDown(self): - super(TestListFlavors, self).tearDown() - - def test_list_flavors(self): - """Test list flavors.""" - flavors = metadata.list_flavors( - user=self.user_object - ) - flavor_name = [] - for flavor in flavors: - flavor_name.append(flavor['name']) - expected = [ - 'allinone', - 'multiroles', - 'HA-multinodes', - 'single-contoller-multi-compute' - ] - for expect in expected: - self.assertIn(expect, flavor_name) - - -class TestGetFlavors(MetadataTestCase): - def setUp(self): - super(TestGetFlavors, self).setUp() - - def tearDown(self): - super(TestGetFlavors, self).tearDown() - - def test_get_flavor(self): - """Test get a flavor.""" - flavor = metadata.get_flavor( - self.flavor_id, - user=self.user_object - ) - expected = { - 'display_name': 'Multi-node Cluster with HA', - 'id': 3, - 'template': 'ha_multinodes.tmpl', - 'name': 'HA-multinodes' - } - self.assertTrue( - all(item in flavor.items() for item in expected.items()) - ) - - if __name__ == '__main__': flags.init() logsetting.init() diff --git a/compass/tests/db/api/test_switch.py b/compass/tests/db/api/test_switch.py index b61666c7..f974634e 100644 --- a/compass/tests/db/api/test_switch.py +++ b/compass/tests/db/api/test_switch.py @@ -234,7 +234,7 @@ class TestPatchSwitch(BaseTest): switch.patch_switch( 1, user=self.user_object, - patched_credentials={ + credentials={ 'version': '2c', 'community': 'public' } @@ -316,7 +316,7 @@ class TestUpdateSwitchFilters(BaseTest): switch.update_switch_filters( 1, user=self.user_object, - filters=[ + machine_filters=[ { 'filter_type': 'allow' } @@ -352,7 +352,7 @@ class TestPatchSwitchFilter(BaseTest): switch.patch_switch_filter( 2, user=self.user_object, - patched_filters=[ + machine_filters=[ { 'filter_type': 'allow' } @@ -811,7 +811,7 @@ class TestPatchSwitchMachine(BaseTest): def tearDown(self): super(TestPatchSwitchMachine, self).tearDown() - def test_pathc_switch_machine(self): + def test_patch_switch_machine(self): switch.add_switch_machine( 1, mac='28:6e:d4:46:c4:25', @@ -822,7 +822,7 @@ class TestPatchSwitchMachine(BaseTest): 1, 1, user=self.user_object, - patched_tag={ + tag={ 'patched_tag': 'test_patched_tag' } ) @@ -858,7 +858,7 @@ class TestPatchSwitchmachine(BaseTest): switch.patch_switchmachine( 1, user=self.user_object, - patched_location={ + location={ 'patched_location': 'test_location' } ) diff --git a/compass/tests/db/api/test_user.py b/compass/tests/db/api/test_user.py index d13475be..3901b3fc 100644 --- a/compass/tests/db/api/test_user.py +++ b/compass/tests/db/api/test_user.py @@ -38,11 +38,12 @@ class TestGetUserObject(unittest2.TestCase): def setUp(self): super(TestGetUserObject, self).setUp() - reload(setting) - setting.CONFIG_DIR = os.path.join( + os.environ['COMPASS_IGNORE_SETTING'] = 'true' + os.environ['COMPASS_CONFIG_DIR'] = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'data' ) + reload(setting) database.init('sqlite://') database.create_db() @@ -322,8 +323,8 @@ class TestAddDelUserPermission(BaseTest): def test_add_permission_position(self): user_api.add_permission( self.user_object.id, - True, 2, + True, user=self.user_object, ) permissions = user_api.get_permissions( diff --git a/compass/tests/db/api/test_user_log.py b/compass/tests/db/api/test_user_log.py index ed62626f..2d237393 100644 --- a/compass/tests/db/api/test_user_log.py +++ b/compass/tests/db/api/test_user_log.py @@ -65,11 +65,11 @@ class TestListUserActions(BaseTest): self.user_object.id, action='/testaction' ) - user_action = user_log.list_user_actions( - 2, - user=self.user_object + self.assertRaises( + exception.RecordNotExists, + user_log.list_user_actions, + 2, user=self.user_object ) - self.assertEqual([], user_action) class TestListActions(BaseTest): @@ -92,7 +92,6 @@ class TestListActions(BaseTest): 'action': '/testaction', 'user_id': 1 } - print action self.assertTrue( all(item in action[0].items() for item in expected.items())) diff --git a/compass/tests/db/api/test_utils.py b/compass/tests/db/api/test_utils.py index 64cfcfad..6c115d8e 100644 --- a/compass/tests/db/api/test_utils.py +++ b/compass/tests/db/api/test_utils.py @@ -38,11 +38,12 @@ class TestModelQuery(unittest2.TestCase): def setUp(self): super(TestModelQuery, self).setUp() - reload(setting) - setting.CONFIG_DIR = os.path.join( + os.environ['COMPASS_IGNORE_SETTING'] = 'true' + os.environ['COMPASS_CONFIG_DIR'] = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'data' ) + reload(setting) database.init('sqlite://') def tearDown(self): @@ -70,11 +71,12 @@ class TestModelFilter(unittest2.TestCase): def setUp(self): super(TestModelFilter, self).setUp() - reload(setting) - setting.CONFIG_DIR = os.path.join( + os.environ['COMPASS_IGNORE_SETTING'] = 'true' + os.environ['COMPASS_CONFIG_DIR'] = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'data' ) + reload(setting) database.init('sqlite://') database.create_db() @@ -275,11 +277,12 @@ class TestModelFilter(unittest2.TestCase): class TestGetDbObject(unittest2.TestCase): def setUp(self): super(TestGetDbObject, self).setUp() - reload(setting) - setting.CONFIG_DIR = os.path.join( + os.environ['COMPASS_IGNORE_SETTING'] = 'true' + os.environ['COMPASS_CONFIG_DIR'] = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'data' ) + reload(setting) database.init('sqlite://') database.create_db() @@ -322,11 +325,12 @@ class TestGetDbObject(unittest2.TestCase): class TestAddDbObject(unittest2.TestCase): def setUp(self): super(TestAddDbObject, self).setUp() - reload(setting) - setting.CONFIG_DIR = os.path.join( + os.environ['COMPASS_IGNORE_SETTING'] = 'true' + os.environ['COMPASS_CONFIG_DIR'] = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'data' ) + reload(setting) database.init('sqlite://') database.create_db() @@ -408,26 +412,40 @@ class TestAddDbObject(unittest2.TestCase): def test_add_with_multiple_args(self): with database.session() as session: + db_permission = utils.add_db_object( + session, + models.Permission, + False, + 'test', + alias='test' + ) + db_user = utils.add_db_object( + session, + models.User, + False, + 'test@huawei.com', + password='test' + ) db_objs = utils.add_db_object( session, - models.AdapterRole, + models.UserPermission, True, - 'test1', - 1, - name='test1', - alias='test1' + db_user.id, + db_permission.id ) - self.assertEqual('test1', db_objs.alias) + self.assertEqual(db_user.id, db_objs.user_id) + self.assertEqual(db_permission.id, db_objs.permission_id) class TestListDbObjects(unittest2.TestCase): def setUp(self): super(TestListDbObjects, self).setUp() - reload(setting) - setting.CONFIG_DIR = os.path.join( + os.environ['COMPASS_IGNORE_SETTING'] = 'true' + os.environ['COMPASS_CONFIG_DIR'] = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'data' ) + reload(setting) database.init('sqlite://') database.create_db() @@ -484,11 +502,12 @@ class TestListDbObjects(unittest2.TestCase): class TestDelDbObjects(unittest2.TestCase): def setUp(self): super(TestDelDbObjects, self).setUp() - reload(setting) - setting.CONFIG_DIR = os.path.join( + os.environ['COMPASS_IGNORE_SETTING'] = 'true' + os.environ['COMPASS_CONFIG_DIR'] = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'data' ) + reload(setting) database.init('sqlite://') database.create_db() @@ -527,11 +546,12 @@ class TestDelDbObjects(unittest2.TestCase): class TestUpdateDbObject(unittest2.TestCase): def setUp(self): super(TestUpdateDbObject, self).setUp() - reload(setting) - setting.CONFIG_DIR = os.path.join( + os.environ['COMPASS_IGNORE_SETTING'] = 'true' + os.environ['COMPASS_CONFIG_DIR'] = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'data' ) + reload(setting) database.init('sqlite://') database.create_db() @@ -575,11 +595,12 @@ class TestUpdateDbObject(unittest2.TestCase): class TestDelDbObject(unittest2.TestCase): def setUp(self): super(TestDelDbObject, self).setUp() - reload(setting) - setting.CONFIG_DIR = os.path.join( + os.environ['COMPASS_IGNORE_SETTING'] = 'true' + os.environ['COMPASS_CONFIG_DIR'] = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'data' ) + reload(setting) database.init('sqlite://') database.create_db() @@ -610,11 +631,12 @@ class TestDelDbObject(unittest2.TestCase): class TestCheckIp(unittest2.TestCase): def setUp(self): super(TestCheckIp, self).setUp() - reload(setting) - setting.CONFIG_DIR = os.path.join( + os.environ['COMPASS_IGNORE_SETTING'] = 'true' + os.environ['COMPASS_CONFIG_DIR'] = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'data' ) + reload(setting) database.init('sqlite://') database.create_db() @@ -639,11 +661,12 @@ class TestCheckIp(unittest2.TestCase): class TestCheckMac(unittest2.TestCase): def setUp(self): super(TestCheckMac, self).setUp() - reload(setting) - setting.CONFIG_DIR = os.path.join( + os.environ['COMPASS_IGNORE_SETTING'] = 'true' + os.environ['COMPASS_CONFIG_DIR'] = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'data' ) + reload(setting) database.init('sqlite://') database.create_db() diff --git a/compass/tests/deployment/installers/os_installers/cobbler/test_cobbler.py b/compass/tests/deployment/installers/os_installers/cobbler/test_cobbler.py index bca11b4a..f68ceabe 100644 --- a/compass/tests/deployment/installers/os_installers/cobbler/test_cobbler.py +++ b/compass/tests/deployment/installers/os_installers/cobbler/test_cobbler.py @@ -28,12 +28,16 @@ import unittest2 os.environ['COMPASS_IGNORE_SETTING'] = 'true' +from compass.utils import setting_wrapper as compass_setting +reload(compass_setting) + + from compass.deployment.installers.config_manager import BaseConfigManager from compass.deployment.installers.os_installers.cobbler.cobbler \ import CobblerInstaller from compass.tests.deployment.test_data import config_data -from compass.utils import setting_wrapper as compass_setting -reload(compass_setting) +from compass.utils import flags +from compass.utils import logsetting class TestCobblerInstaller(unittest2.TestCase): @@ -291,3 +295,9 @@ class TestCobblerInstaller(unittest2.TestCase): } output = self.test_cobbler._check_and_set_system_impi(3, "test_sys_id") self.assertTrue(output) + + +if __name__ == '__main__': + flags.init() + logsetting.init() + unittest2.main() diff --git a/compass/tests/deployment/installers/pk_installers/chef_installer/test_chef.py b/compass/tests/deployment/installers/pk_installers/chef_installer/test_chef.py index 84e1b062..6b349eb7 100644 --- a/compass/tests/deployment/installers/pk_installers/chef_installer/test_chef.py +++ b/compass/tests/deployment/installers/pk_installers/chef_installer/test_chef.py @@ -27,14 +27,17 @@ import unittest2 os.environ['COMPASS_IGNORE_SETTING'] = 'true' -from compass.deployment.installers.config_manager import BaseConfigManager -from compass.tests.deployment.test_data import config_data + from compass.utils import setting_wrapper as compass_setting reload(compass_setting) +from compass.deployment.installers.config_manager import BaseConfigManager from compass.deployment.installers.pk_installers.chef_installer.chef_installer\ import ChefInstaller +from compass.tests.deployment.test_data import config_data +from compass.utils import flags +from compass.utils import logsetting class TestChefInstaller(unittest2.TestCase): @@ -816,3 +819,9 @@ class TestChefInstaller(unittest2.TestCase): output = self.test_chef.generate_installer_config() self.maxDiff = None self.assertDictEqual(entry["excepted_output"], output) + + +if __name__ == '__main__': + flags.init() + logsetting.init() + unittest2.main() diff --git a/compass/tests/deployment/installers/test_config_manager.py b/compass/tests/deployment/installers/test_config_manager.py index 176f60f8..1be264a0 100644 --- a/compass/tests/deployment/installers/test_config_manager.py +++ b/compass/tests/deployment/installers/test_config_manager.py @@ -19,11 +19,16 @@ import unittest2 os.environ['COMPASS_IGNORE_SETTING'] = 'true' + +from compass.utils import setting_wrapper as compass_setting +reload(compass_setting) + + from compass.deployment.installers.config_manager import BaseConfigManager from compass.deployment.utils import constants as const from compass.tests.deployment.test_data import config_data -from compass.utils import setting_wrapper as compass_setting -reload(compass_setting) +from compass.utils import flags +from compass.utils import logsetting class TestConfigManager(unittest2.TestCase): @@ -225,3 +230,9 @@ class TestConfigManager(unittest2.TestCase): self.maxDiff = None output = self.test_config_manager.get_host_roles_mapping(3) self.assertEqual(expected_output, output) + + +if __name__ == '__main__': + flags.init() + logsetting.init() + unittest2.main() diff --git a/compass/tests/deployment/installers/test_installer.py b/compass/tests/deployment/installers/test_installer.py index 3e325c63..ac0027b3 100644 --- a/compass/tests/deployment/installers/test_installer.py +++ b/compass/tests/deployment/installers/test_installer.py @@ -19,12 +19,17 @@ import unittest2 os.environ['COMPASS_IGNORE_SETTING'] = 'true' -from compass.deployment.installers.installer import BaseInstaller -from compass.tests.deployment.test_data import config_data + from compass.utils import setting_wrapper as compass_setting reload(compass_setting) +from compass.deployment.installers.installer import BaseInstaller +from compass.tests.deployment.test_data import config_data +from compass.utils import flags +from compass.utils import logsetting + + class TestBaseInstaller(unittest2.TestCase): """Test base installer.""" def setUp(self): @@ -48,3 +53,9 @@ class TestBaseInstaller(unittest2.TestCase): self.maxDiff = None self.assertDictEqual(expected_output, output) + + +if __name__ == '__main__': + flags.init() + logsetting.init() + unittest2.main() diff --git a/compass/tests/deployment/test_data/config_data.py b/compass/tests/deployment/test_data/config_data.py index 84c0cfa7..44757f46 100644 --- a/compass/tests/deployment/test_data/config_data.py +++ b/compass/tests/deployment/test_data/config_data.py @@ -32,7 +32,6 @@ test_client = compass_setting.TEST_CLIENT_NAME adapter_test_config = { "name": "openstack_icehouse", - "distributed_system_name": "openstack_icehouse", "flavors": [ { "falvor_name": "test_flavor", diff --git a/compass/tests/deployment/test_deploy_manager.py b/compass/tests/deployment/test_deploy_manager.py index eda00539..1b17d485 100644 --- a/compass/tests/deployment/test_deploy_manager.py +++ b/compass/tests/deployment/test_deploy_manager.py @@ -34,6 +34,8 @@ reload(setting) from compass.deployment.deploy_manager import DeployManager from compass.tests.deployment.test_data import config_data +from compass.utils import flags +from compass.utils import logsetting class TestDeployManager(unittest2.TestCase): @@ -54,3 +56,9 @@ class TestDeployManager(unittest2.TestCase): test_manager = DeployManager(adapter_info, cluster_info, hosts_info) self.assertIsNotNone(test_manager) + + +if __name__ == '__main__': + flags.init() + logsetting.init() + unittest2.main() diff --git a/compass/tests/log_analyzor/test_adapter_matcher.py b/compass/tests/log_analyzor/test_adapter_matcher.py index cd9a198e..6bc4fede 100755 --- a/compass/tests/log_analyzor/test_adapter_matcher.py +++ b/compass/tests/log_analyzor/test_adapter_matcher.py @@ -251,7 +251,7 @@ class TestPackageMatcher(unittest2.TestCase): ) self.package_matcher = adapter_matcher.PackageMatcher( package_installer_name='chef', - distributed_system_pattern=r'openstack', + adapter_pattern=r'openstack', item_matcher=self.item_matcher, file_reader_factory=self.file_reader_factory ) @@ -262,7 +262,7 @@ class TestPackageMatcher(unittest2.TestCase): def test_match_none(self): test_match_none = { 'package_installer_name': None, - 'distributed_system_name': 'openstack' + 'adapter_name': 'openstack' } matcher = self.package_matcher.match(**test_match_none) self.assertFalse(matcher) @@ -270,7 +270,7 @@ class TestPackageMatcher(unittest2.TestCase): def test_match(self): test_match = { 'package_installer_name': 'chef', - 'distributed_system_name': 'openstack' + 'adapter_name': 'openstack' } matcher = self.package_matcher.match(**test_match) self.assertTrue(matcher) @@ -278,7 +278,7 @@ class TestPackageMatcher(unittest2.TestCase): def test_installer_unmatch(self): test_unmatch = { 'package_installer_name': 'dummy', - 'distributed_system_name': 'openstack' + 'adapter_name': 'openstack' } matcher = self.package_matcher.match(**test_unmatch) self.assertFalse(matcher) @@ -286,7 +286,7 @@ class TestPackageMatcher(unittest2.TestCase): def test_name_unmatch(self): test_unmatch = { 'package_installer_name': 'chef', - 'distributed_system_name': 'dummy' + 'adapter_name': 'dummy' } matcher = self.package_matcher.match(**test_unmatch) self.assertFalse(matcher) @@ -294,7 +294,7 @@ class TestPackageMatcher(unittest2.TestCase): def test_both_unmatch(self): test_unmatch = { 'package_installer_name': 'dummy', - 'distributed_system_name': 'dummy' + 'adapter_name': 'dummy' } matcher = self.package_matcher.match(**test_unmatch) self.assertFalse(matcher) diff --git a/compass/utils/logsetting.py b/compass/utils/logsetting.py index c8d7615f..836ebcb8 100644 --- a/compass/utils/logsetting.py +++ b/compass/utils/logsetting.py @@ -42,6 +42,7 @@ flags.add('log_format', flags.add('log_backup_count', type='int', help='log backup count', default=setting.DEFAULT_LOGBACKUPCOUNT) + # mapping str setting in flag --loglevel to logging level. LOGLEVEL_MAPPING = { 'finest': logging.DEBUG - 2, # more detailed log. @@ -53,13 +54,20 @@ LOGLEVEL_MAPPING = { 'critical': logging.CRITICAL, } + logging.addLevelName(LOGLEVEL_MAPPING['fine'], 'fine') logging.addLevelName(LOGLEVEL_MAPPING['finest'], 'finest') + # disable logging when logsetting.init not called logging.getLogger().setLevel(logging.CRITICAL) +def getLevelByName(level_name): + """Get log level by level name.""" + return LOGLEVEL_MAPPING[level_name] + + def init(): """Init loggsetting. It should be called after flags.init.""" loglevel = flags.OPTIONS.loglevel.lower() diff --git a/compass/utils/setting_wrapper.py b/compass/utils/setting_wrapper.py index f6a2ed55..507710ef 100644 --- a/compass/utils/setting_wrapper.py +++ b/compass/utils/setting_wrapper.py @@ -24,7 +24,7 @@ import os.path # default setting -CONFIG_DIR = '/etc/compass' +CONFIG_DIR = os.environ.get('COMPASS_CONFIG_DIR', '/etc/compass') SQLALCHEMY_DATABASE_URI = 'sqlite://' SQLALCHEMY_DATABASE_POOL_TYPE = 'static' COBBLER_INSTALLATION_LOGDIR = '/var/log/cobbler/anamon' @@ -77,68 +77,29 @@ TEST_CLIENT_NAME = "graceyu" PROGRESS_UPDATE_PID_FILE = '/var/run/progress_update.pid' -OS_INSTALLER_DIR = lazypy.delay( - lambda: os.path.join(CONFIG_DIR, 'os_installer') -) -PACKAGE_INSTALLER_DIR = lazypy.delay( - lambda: os.path.join(CONFIG_DIR, 'package_installer') -) -OS_DIR = lazypy.delay( - lambda: os.path.join(CONFIG_DIR, 'os') -) -DISTRIBUTED_SYSTEM_DIR = lazypy.delay( - lambda: os.path.join(CONFIG_DIR, 'distributed_system') -) -ADAPTER_DIR = lazypy.delay( - lambda: os.path.join(CONFIG_DIR, 'adapter') -) -OS_METADATA_DIR = lazypy.delay( - lambda: os.path.join(CONFIG_DIR, 'os_metadata') -) -PACKAGE_METADATA_DIR = lazypy.delay( - lambda: os.path.join(CONFIG_DIR, 'package_metadata') -) -FLAVOR_METADATA_DIR = lazypy.delay( - lambda: os.path.join(CONFIG_DIR, 'flavor_metadata') -) -OS_FIELD_DIR = lazypy.delay( - lambda: os.path.join(CONFIG_DIR, 'os_field') -) -PACKAGE_FIELD_DIR = lazypy.delay( - lambda: os.path.join(CONFIG_DIR, 'package_field') -) -FLAVOR_FIELD_DIR = lazypy.delay( - lambda: os.path.join(CONFIG_DIR, 'flavor_field') -) -ADAPTER_ROLE_DIR = lazypy.delay( - lambda: os.path.join(CONFIG_DIR, 'role') -) -ADAPTER_FLAVOR_DIR = lazypy.delay( - lambda: os.path.join(CONFIG_DIR, 'flavor') -) -VALIDATOR_DIR = lazypy.delay( - lambda: os.path.join(CONFIG_DIR, 'validator') -) -CALLBACK_DIR = lazypy.delay( - lambda: os.path.join(CONFIG_DIR, 'callback') -) -TMPL_DIR = lazypy.delay( - lambda: os.path.join(CONFIG_DIR, 'templates') -) -MACHINE_LIST_DIR = lazypy.delay( - lambda: os.path.join(CONFIG_DIR, 'machine_list') -) -PROGRESS_CALCULATOR_DIR = lazypy.delay( - lambda: os.path.join(CONFIG_DIR, 'progress_calculator') -) -OS_MAPPING_DIR = lazypy.delay( - lambda: os.path.join(CONFIG_DIR, 'os_mapping') -) -FLAVOR_MAPPING_DIR = lazypy.delay( - lambda: os.path.join(CONFIG_DIR, 'flavor_mapping') -) PROXY_URL_PREFIX = 'http://10.145.81.205:5000' +OS_INSTALLER_DIR = '' +PACKAGE_INSTALLER_DIR = '' +OS_DIR = '' +ADAPTER_DIR = '' +OS_METADATA_DIR = '' +PACKAGE_METADATA_DIR = '' +FLAVOR_METADATA_DIR = '' +OS_FIELD_DIR = '' +PACKAGE_FIELD_DIR = '' +FLAVOR_FIELD_DIR = '' +ADAPTER_ROLE_DIR = '' +ADAPTER_FLAVOR_DIR = '' +VALIDATOR_DIR = '' +CALLBACK_DIR = '' +TMPL_DIR = '' +MACHINE_LIST_DIR = '' +PROGRESS_CALCULATOR_DIR = '' +OS_MAPPING_DIR = '' +FLAVOR_MAPPING_DIR = '' + + if ( 'COMPASS_IGNORE_SETTING' in os.environ and os.environ['COMPASS_IGNORE_SETTING'] @@ -156,3 +117,60 @@ else: except Exception as error: logging.exception(error) raise error + +if not OS_INSTALLER_DIR: + OS_INSTALLER_DIR = os.path.join(CONFIG_DIR, 'os_installer') + +if not PACKAGE_INSTALLER_DIR: + PACKAGE_INSTALLER_DIR = os.path.join(CONFIG_DIR, 'package_installer') + +if not OS_DIR: + OS_DIR = os.path.join(CONFIG_DIR, 'os') + +if not ADAPTER_DIR: + ADAPTER_DIR = os.path.join(CONFIG_DIR, 'adapter') + +if not OS_METADATA_DIR: + OS_METADATA_DIR = os.path.join(CONFIG_DIR, 'os_metadata') + +if not PACKAGE_METADATA_DIR: + PACKAGE_METADATA_DIR = os.path.join(CONFIG_DIR, 'package_metadata') + +if not FLAVOR_METADATA_DIR: + FLAVOR_METADATA_DIR = os.path.join(CONFIG_DIR, 'flavor_metadata') + +if not OS_FIELD_DIR: + OS_FIELD_DIR = os.path.join(CONFIG_DIR, 'os_field') + +if not PACKAGE_FIELD_DIR: + PACKAGE_FIELD_DIR = os.path.join(CONFIG_DIR, 'package_field') + +if not FLAVOR_FIELD_DIR: + FLAVOR_FIELD_DIR = os.path.join(CONFIG_DIR, 'flavor_field') + +if not ADAPTER_ROLE_DIR: + ADAPTER_ROLE_DIR = os.path.join(CONFIG_DIR, 'role') + +if not ADAPTER_FLAVOR_DIR: + ADAPTER_FLAVOR_DIR = os.path.join(CONFIG_DIR, 'flavor') + +if not VALIDATOR_DIR: + VALIDATOR_DIR = os.path.join(CONFIG_DIR, 'validator') + +if not CALLBACK_DIR: + CALLBACK_DIR = os.path.join(CONFIG_DIR, 'callback') + +if not TMPL_DIR: + TMPL_DIR = os.path.join(CONFIG_DIR, 'templates') + +if not MACHINE_LIST_DIR: + MACHINE_LIST_DIR = os.path.join(CONFIG_DIR, 'machine_list') + +if not PROGRESS_CALCULATOR_DIR: + PROGRESS_CALCULATOR_DIR = os.path.join(CONFIG_DIR, 'progress_calculator') + +if not OS_MAPPING_DIR: + OS_MAPPING_DIR = os.path.join(CONFIG_DIR, 'os_mapping') + +if not FLAVOR_MAPPING_DIR: + FLAVOR_MAPPING_DIR = os.path.join(CONFIG_DIR, 'flavor_mapping') diff --git a/compass/utils/util.py b/compass/utils/util.py index 1dd1f041..bacfac15 100644 --- a/compass/utils/util.py +++ b/compass/utils/util.py @@ -24,10 +24,32 @@ import os import os.path import re import sys +import warnings + + +def deprecated(func): + """This is a decorator which can be used to mark functions as deprecated. + + It will result in a warning being emitted when the function is used. + """ + def new_func(*args, **kwargs): + warnings.warn( + "Call to deprecated function %s." % func.__name__, + category=DeprecationWarning + ) + return func(*args, **kwargs) + + new_func.__name__ = func.__name__ + new_func.__doc__ = func.__doc__ + new_func.__dict__.update(func.__dict__) + return new_func def parse_datetime(date_time, exception_class=Exception): - """Parse datetime str to get datetime object.""" + """Parse datetime str to get datetime object. + + The date time format is %Y-%m-%d %H:%M:%S + """ try: return datetime.datetime.strptime( date_time, '%Y-%m-%d %H:%M:%S' @@ -40,7 +62,10 @@ def parse_datetime(date_time, exception_class=Exception): def parse_datetime_range(date_time_range, exception_class=Exception): - """parse datetime range str to pair of datetime objects.""" + """parse datetime range str to pair of datetime objects. + + The date time range format is %Y-%m-%d %H:%M:%S,%Y-%m-%d %H:%M:%S + """ try: start, end = date_time_range.split(',') except Exception as error: @@ -60,7 +85,11 @@ def parse_datetime_range(date_time_range, exception_class=Exception): def parse_request_arg_dict(arg, exception_class=Exception): - """parse string to dict.""" + """parse string to dict. + + The str is formatted like a=b;c=d and parsed to + {'a': 'b', 'c': 'd'} + """ arg_dict = {} arg_pairs = arg.split(';') for arg_pair in arg_pairs: @@ -105,6 +134,16 @@ def merge_dict(lhs, rhs, override=True): return lhs +def recursive_merge_dict(name, all_dicts, parents): + """Recursively merge parent dict into base dict.""" + parent_name = parents.get(name, None) + base_dict = all_dicts.get(name, {}) + if not parent_name: + return base_dict + merged = recursive_merge_dict(parent_name, all_dicts, parents) + return merge_dict(base_dict, merged, override=False) + + def encrypt(value, crypt_method=None): """Get encrypted value.""" if not crypt_method: @@ -129,6 +168,12 @@ def encrypt(value, crypt_method=None): def parse_time_interval(time_interval_str): + """parse string of time interval to time interval. + + supported time interval unit: ['d', 'w', 'h', 'm', 's'] + Examples: + time_interval_str: '3d 2h' time interval to 3 days and 2 hours. + """ if not time_interval_str: return 0 @@ -171,10 +216,11 @@ def load_configs( config_dir, config_name_suffix='.conf', env_globals={}, env_locals={} ): + """Load configurations from config dir.""" configs = [] config_dir = str(config_dir) if not os.path.exists(config_dir): - logging.debug('path %s does not exist', config_dir) + logging.error('path %s does not exist', config_dir) return configs for component in os.listdir(config_dir): if not component.endswith(config_name_suffix): @@ -194,22 +240,6 @@ def load_configs( return configs -def is_instance(instance, expected_types): - """Check instance type is in one of expected types. - - :param instance: instance to check the type. - :param expected_types: types to check if instance type is in them. - :type expected_types: list of type - - :returns: True if instance type is in expect_types. - """ - for expected_type in expected_types: - if isinstance(instance, expected_type): - return True - - return False - - def pretty_print(*contents): """pretty print contents.""" if len(contents) == 0: diff --git a/conf/adapter/ceph.conf b/conf/adapter/ceph.conf index 6b6c8139..d63ecf13 100644 --- a/conf/adapter/ceph.conf +++ b/conf/adapter/ceph.conf @@ -1,3 +1,2 @@ NAME = 'ceph' PARENT = 'general' -DISTRIBUTED_SYSTEM = 'ceph' diff --git a/conf/adapter/chef_ceph.conf b/conf/adapter/chef_ceph.conf index c3f1435d..330de321 100644 --- a/conf/adapter/chef_ceph.conf +++ b/conf/adapter/chef_ceph.conf @@ -1,5 +1,5 @@ NAME = 'ceph_firefly' -DSPLAY_NAME = 'Ceph Firefly' +DISPLAY_NAME = 'Ceph Firefly' PARENT = 'ceph' PACKAGE_INSTALLER = 'chef_installer' OS_INSTALLER = 'cobbler' diff --git a/conf/adapter/chef_ceph_openstack.conf b/conf/adapter/chef_ceph_openstack.conf index 08264efc..7bccfaaa 100644 --- a/conf/adapter/chef_ceph_openstack.conf +++ b/conf/adapter/chef_ceph_openstack.conf @@ -1,7 +1,6 @@ NAME = 'ceph_openstack_icehouse' DISPLAY_NAME = 'Ceph + OpenStack Icehouse' PARENT = 'openstack' -DISTRIBUTED_SYSTEM = 'openstack_ceph' PACKAGE_INSTALLER = 'chef_installer' OS_INSTALLER = 'cobbler' SUPPORTED_OS_PATTERNS = ['(?i)centos-6\.5.*', '(?i)ubuntu-12\.04.*'] diff --git a/conf/adapter/openstack.conf b/conf/adapter/openstack.conf index 038c6d4c..66bdd8bb 100644 --- a/conf/adapter/openstack.conf +++ b/conf/adapter/openstack.conf @@ -1,3 +1,2 @@ NAME = 'openstack' PARENT = 'general' -DISTRIBUTED_SYSTEM = 'openstack' diff --git a/conf/flavor_mapping/allinone.conf b/conf/flavor_mapping/allinone.conf index d35386b7..4752a805 100644 --- a/conf/flavor_mapping/allinone.conf +++ b/conf/flavor_mapping/allinone.conf @@ -1,4 +1,6 @@ -allinone = { +ADAPTER = 'openstack-icehouse' +FLAVOR = 'allinone' +CONFIG_MAPPING = { "mapped_name": "flavor_config", "mapped_children": [{ "security": { diff --git a/conf/flavor_mapping/ceph_firefly.conf b/conf/flavor_mapping/ceph_firefly.conf index ee7a6348..17e7c465 100644 --- a/conf/flavor_mapping/ceph_firefly.conf +++ b/conf/flavor_mapping/ceph_firefly.conf @@ -1,4 +1,6 @@ -ceph_firefly = { +ADAPTER = 'ceph_firefly' +FLAVOR = 'ceph_firefly' +CONFIG_MAPPING = { "mapped_name": "flavor_config", "mapped_children": [{ "ceph_config": { diff --git a/conf/flavor_mapping/ceph_openstack_multinodes.conf b/conf/flavor_mapping/ceph_openstack_multinodes.conf index 57294c49..2a9b0fe1 100644 --- a/conf/flavor_mapping/ceph_openstack_multinodes.conf +++ b/conf/flavor_mapping/ceph_openstack_multinodes.conf @@ -1,4 +1,6 @@ -ceph_openstack_multinodes = { +ADAPTER = 'ceph_openstack_icehouse' +FLAVOR = 'ceph-openstack-multinodes' +CONFIG_MAPPING = { "mapped_name": "flavor_config", "mapped_children": [{ "security": { diff --git a/conf/flavor_mapping/ceph_openstack_single_controller.conf b/conf/flavor_mapping/ceph_openstack_single_controller.conf index ef0293b9..959224b0 100644 --- a/conf/flavor_mapping/ceph_openstack_single_controller.conf +++ b/conf/flavor_mapping/ceph_openstack_single_controller.conf @@ -1,4 +1,6 @@ -ceph_openstack_single_controller = { +ADAPTER = 'ceph_openstack_icehouse' +FLAVOR = 'ceph-openstack-single-controller' +CONFIG_MAPPING = { "mapped_name": "flavor_config", "mapped_children": [{ "security": { diff --git a/conf/flavor_mapping/ha-multinodes.conf b/conf/flavor_mapping/ha-multinodes.conf index 4472abc2..34d76be1 100644 --- a/conf/flavor_mapping/ha-multinodes.conf +++ b/conf/flavor_mapping/ha-multinodes.conf @@ -1,4 +1,6 @@ -HA_multinodes = { +ADAPTER = 'openstack-icehouse' +FLAVOR = 'HA-multinodes' +CONFIG_MAPPING = { "mapped_name": "flavor_config", "mapped_children": [{ "security": { diff --git a/conf/flavor_mapping/multinodes.conf b/conf/flavor_mapping/multinodes.conf index 35836398..bed52f06 100644 --- a/conf/flavor_mapping/multinodes.conf +++ b/conf/flavor_mapping/multinodes.conf @@ -1,4 +1,6 @@ -multinodes = { +ADAPTER = 'openstack-icehouse' +FLAVOR = 'multinodes' +CONFIG_MAPPING = { "mapped_name": "flavor_config", "mapped_children": [{ "security": { diff --git a/conf/flavor_mapping/single-contoller-multi-compute.conf b/conf/flavor_mapping/single-contoller-multi-compute.conf index b9d5cbe9..c7bbff73 100644 --- a/conf/flavor_mapping/single-contoller-multi-compute.conf +++ b/conf/flavor_mapping/single-contoller-multi-compute.conf @@ -1,4 +1,6 @@ -single_contoller_multi_compute = { +ADAPTER = 'openstack-icehouse' +FLAVOR = 'single-contoller-multi-compute' +CONFIG_MAPPING = { "mapped_name": "flavor_config", "mapped_children": [{ "security": { diff --git a/conf/flavor_metadata/HA-multinodes.conf b/conf/flavor_metadata/HA-multinodes.conf index cd440e47..c859c93f 100644 --- a/conf/flavor_metadata/HA-multinodes.conf +++ b/conf/flavor_metadata/HA-multinodes.conf @@ -1,3 +1,4 @@ +ADAPTER = 'openstack_icehouse' FLAVOR = 'HA-multinodes' METADATA = { 'ha_proxy': { diff --git a/conf/flavor_metadata/allinone.conf b/conf/flavor_metadata/allinone.conf index 32bac253..7d248ecf 100644 --- a/conf/flavor_metadata/allinone.conf +++ b/conf/flavor_metadata/allinone.conf @@ -1,2 +1,3 @@ +ADAPTER = 'openstack_icehouse' FLAVOR = 'allinone' METADATA = {} diff --git a/conf/flavor_metadata/ceph_firefly.conf b/conf/flavor_metadata/ceph_firefly.conf index b264d77c..8755376b 100644 --- a/conf/flavor_metadata/ceph_firefly.conf +++ b/conf/flavor_metadata/ceph_firefly.conf @@ -1,2 +1,3 @@ +ADAPTER = 'ceph_firefly' FLAVOR = 'ceph_firefly' METADATA = {} diff --git a/conf/flavor_metadata/ceph_openstack_multinodes.conf b/conf/flavor_metadata/ceph_openstack_multinodes.conf index 034d3f87..3c4a4010 100644 --- a/conf/flavor_metadata/ceph_openstack_multinodes.conf +++ b/conf/flavor_metadata/ceph_openstack_multinodes.conf @@ -1,2 +1,3 @@ +ADAPTER = 'ceph_openstack_icehouse' FLAVOR = 'ceph_openstack_multinodes' METADATA = {} diff --git a/conf/flavor_metadata/ceph_openstack_single_controller.conf b/conf/flavor_metadata/ceph_openstack_single_controller.conf index a7b6cff5..cbd2664f 100644 --- a/conf/flavor_metadata/ceph_openstack_single_controller.conf +++ b/conf/flavor_metadata/ceph_openstack_single_controller.conf @@ -1,2 +1,3 @@ +ADAPTER = 'ceph_openstack_icehouse' FLAVOR = 'ceph_openstack_single_controller' METADATA = {} diff --git a/conf/flavor_metadata/multinodes.conf b/conf/flavor_metadata/multinodes.conf index 9def97d3..7fb835f8 100644 --- a/conf/flavor_metadata/multinodes.conf +++ b/conf/flavor_metadata/multinodes.conf @@ -1,2 +1,3 @@ +ADAPTER = 'openstack_icehouse' FLAVOR = 'multinodes' METADATA = {} diff --git a/conf/flavor_metadata/single-contoller-multi-compute.conf b/conf/flavor_metadata/single-contoller-multi-compute.conf index 9822f712..69faf5d2 100644 --- a/conf/flavor_metadata/single-contoller-multi-compute.conf +++ b/conf/flavor_metadata/single-contoller-multi-compute.conf @@ -1,2 +1,3 @@ +ADAPTER = 'openstack_icehouse' FLAVOR = 'single-contoller-multi-compute' METADATA = {} diff --git a/conf/os_mapping/os_mapping.conf b/conf/os_mapping/os_mapping.conf index d3cc47cf..0be15e48 100644 --- a/conf/os_mapping/os_mapping.conf +++ b/conf/os_mapping/os_mapping.conf @@ -1,4 +1,5 @@ -OS_CONFIG_MAPPING = { +OS = 'general' +CONFIG_MAPPING = { "mapped_name": "os_global_config", "mapped_children": [{ "server_credentials":{ diff --git a/conf/progress_calculator/progress_calculator.conf b/conf/progress_calculator/progress_calculator.conf index 67ddf7de..f4f991e4 100644 --- a/conf/progress_calculator/progress_calculator.conf +++ b/conf/progress_calculator/progress_calculator.conf @@ -1,10 +1,3 @@ -from compass.log_analyzor.adapter_matcher import AdapterItemMatcher -from compass.log_analyzor.file_matcher import FileMatcher -from compass.log_analyzor.file_matcher import FileReaderFactory -from compass.log_analyzor.line_matcher import IncrementalProgress -from compass.log_analyzor.line_matcher import LineMatcher - - OS_INSTALLER_CONFIGURATIONS = { 'cobbler': { 'Ubuntu': AdapterItemMatcher( @@ -503,10 +496,26 @@ OS_INSTALLER_CONFIGURATIONS = { } } +OS_LOG_CONFIGURATIONS = [{ + 'os_installer_name': 'cobbler', + 'os_pattern': 'CentOS-6.*', + 'item_matcher': OS_INSTALLER_CONFIGURATIONS['cobbler']['CentOS6'], + 'logdir': setting.INSTALLATION_LOGDIR['CobblerInstaller'] +}, { + 'os_installer_name': 'cobbler', + 'os_pattern': 'CentOS-7.*', + 'item_matcher': OS_INSTALLER_CONFIGURATIONS['cobbler']['CentOS7'], + 'logdir': setting.INSTALLATION_LOGDIR['CobblerInstaller'] +}, { + 'os_installer_name': 'cobbler', + 'os_pattern': 'Ubuntu.*', + 'item_matcher': OS_INSTALLER_CONFIGURATIONS['cobbler']['Ubuntu'], + 'logdir': setting.INSTALLATION_LOGDIR['CobblerInstaller'] +}] PACKAGE_INSTALLER_CONFIGURATIONS = { 'chef_installer': { - 'openstack': AdapterItemMatcher( + 'default': AdapterItemMatcher( file_matchers=[ FileMatcher( filename='chef-client.log', @@ -538,3 +547,12 @@ PACKAGE_INSTALLER_CONFIGURATIONS = { ), } } + +ADAPTER_LOG_CONFIGURATIONS = [{ + 'package_installer_name': 'chef_installer', + 'adapter_pattern': '.*', + 'item_matcher': PACKAGE_INSTALLER_CONFIGURATIONS['chef_installer']['default'], + 'logdir': setting.INSTALLATION_LOGDIR['ChefInstaller'] +}] + + diff --git a/conf/role/openstack_chef.conf b/conf/role/openstack_chef.conf index 0c3dc837..c6da8689 100644 --- a/conf/role/openstack_chef.conf +++ b/conf/role/openstack_chef.conf @@ -59,11 +59,9 @@ ROLES = [{ }, { 'role': 'os-ha', 'display_name': 'ha proxy node', - 'description': 'ha proxy node', - 'optional': True + 'description': 'ha proxy node' }, { 'role': 'allinone-compute', 'display_name': 'all in one compute', - 'description': 'all in one compute', - 'optional': True + 'description': 'all in one compute' }] diff --git a/install/ansible.sh b/install/ansible.sh index f5dd13c6..a7b0ca02 100755 --- a/install/ansible.sh +++ b/install/ansible.sh @@ -5,7 +5,7 @@ echo "Installing Ansible" DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) source $DIR/install.conf if [ -f $DIR/env.conf ]; then - source env.conf + source $DIR/env.conf else echo "failed to load environment" exit 1 diff --git a/install/chef.sh b/install/chef.sh index e6ed0e2e..500ef81f 100755 --- a/install/chef.sh +++ b/install/chef.sh @@ -27,7 +27,7 @@ fi echo "reconfigure chef server" # configure chef-server sudo chef-server-ctl cleanse -mkdir -p /etc/chef-server +sudo mkdir -p /etc/chef-server sudo cp -rn /etc/chef-server/chef-server.rb /root/backup/chef/ sudo rm -f /etc/chef-server/chef-server.rb sudo cp -rf $COMPASSDIR/misc/chef-server/chef-server.rb /etc/chef-server/chef-server.rb @@ -60,10 +60,10 @@ if [ ! -f /etc/chef-server/chef-validator.pem ]; then fi sudo knife configure -y -i --defaults -r ~/chef-repo -s https://$IPADDR:443 -u $USER --admin-client-name admin --admin-client-key /etc/chef-server/admin.pem --validation-client-name chef-validator --validation-key /etc/chef-server/chef-validator.pem <=0.9.0 simplejson requests redis - diff --git a/test-requirements.txt b/test-requirements.txt index f35aa9a6..f5cf038d 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,9 +1,9 @@ discover -mock +mock==1.0 unittest2>=0.8.0 testtools>=0.9.32 testrepository>=0.0.17 mimeparse coverage>=3.6 hacking -pycrypto<=2.0.1 +pycrypto