From bd9062c1c0d1a6de5cf4a26273f881afbf751abd Mon Sep 17 00:00:00 2001 From: Charles Short Date: Tue, 6 Dec 2022 07:54:53 -0500 Subject: [PATCH] debian: Remove debian-integration package Remove the debian-integration package and drop from the ISO. Test Plan: PASS Build the ISO Story: 2009968 Task: 46991 Depends-On: https://review.opendev.org/c/starlingx/tools/+/866600 Signed-off-by: Charles Short Change-Id: I4f141f82c69fcfad90965662b9c1a0f9758e1155 --- debian_iso_image.inc | 3 - debian_pkg_dirs | 1 - .../debian/deb_folder/changelog | 5 - .../debian/deb_folder/control | 13 - .../debian/deb_folder/copyright | 27 - .../debian/deb_folder/debian-integration.dirs | 1 - .../deb_folder/debian-integration.install | 7 - .../debian/deb_folder/rules | 5 - .../debian/deb_folder/source/format | 1 - .../debian-integration/debian/meta_data.yaml | 7 - .../source-debian/README.md | 19 - .../after_bootstrap_workarounds.sh | 1 - .../before_bootstrap_workarounds.sh | 1 - .../source-debian/helm.py.patched | 1118 ----------------- .../source-debian/interfaces.vm.example | 39 - .../source-debian/worker_reserved.conf | 55 - 16 files changed, 1303 deletions(-) delete mode 100644 tools/debian-integration/debian/deb_folder/changelog delete mode 100644 tools/debian-integration/debian/deb_folder/control delete mode 100644 tools/debian-integration/debian/deb_folder/copyright delete mode 100644 tools/debian-integration/debian/deb_folder/debian-integration.dirs delete mode 100644 tools/debian-integration/debian/deb_folder/debian-integration.install delete mode 100644 tools/debian-integration/debian/deb_folder/rules delete mode 100644 tools/debian-integration/debian/deb_folder/source/format delete mode 100644 tools/debian-integration/debian/meta_data.yaml delete mode 100644 tools/debian-integration/source-debian/README.md delete mode 100755 tools/debian-integration/source-debian/after_bootstrap_workarounds.sh delete mode 100755 tools/debian-integration/source-debian/before_bootstrap_workarounds.sh delete mode 100644 tools/debian-integration/source-debian/helm.py.patched delete mode 100644 tools/debian-integration/source-debian/interfaces.vm.example delete mode 100644 tools/debian-integration/source-debian/worker_reserved.conf diff --git a/debian_iso_image.inc b/debian_iso_image.inc index f6fca07a..568cbb6f 100644 --- a/debian_iso_image.inc +++ b/debian_iso_image.inc @@ -10,9 +10,6 @@ ceph-manager #collector collector -#debian-integration -debian-integration - #hostdata-collectors engtools diff --git a/debian_pkg_dirs b/debian_pkg_dirs index 6e9e109d..cf8528ff 100644 --- a/debian_pkg_dirs +++ b/debian_pkg_dirs @@ -1,6 +1,5 @@ ceph/ceph-manager ceph/python-cephclient -tools/debian-integration tools/collector tools/engtools/hostdata-collectors tools/opae/opae-sdk diff --git a/tools/debian-integration/debian/deb_folder/changelog b/tools/debian-integration/debian/deb_folder/changelog deleted file mode 100644 index 9a0c6eb7..00000000 --- a/tools/debian-integration/debian/deb_folder/changelog +++ /dev/null @@ -1,5 +0,0 @@ -debian-integration (1.0-1) unstable; urgency=medium - - * Initial release. - - -- Al Bailey Mon, 21 Mar 2022 14:54:39 +0000 diff --git a/tools/debian-integration/debian/deb_folder/control b/tools/debian-integration/debian/deb_folder/control deleted file mode 100644 index c067fcad..00000000 --- a/tools/debian-integration/debian/deb_folder/control +++ /dev/null @@ -1,13 +0,0 @@ -Source: debian-integration -Section: admin -Priority: optional -Maintainer: StarlingX Developers -Build-Depends: debhelper-compat (= 13) -Standards-Version: 4.4.1 -Homepage: https://www.starlingx.io - -Package: debian-integration -Architecture: all -Depends: ${misc:Depends} -Description: StarlingX Debian integration helper files. - Installs temporary StarlingX's Debian integration files diff --git a/tools/debian-integration/debian/deb_folder/copyright b/tools/debian-integration/debian/deb_folder/copyright deleted file mode 100644 index 51ffc3fe..00000000 --- a/tools/debian-integration/debian/deb_folder/copyright +++ /dev/null @@ -1,27 +0,0 @@ -Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: debian-integration -Source: https://opendev.org/starlingx/config-files/ - -Files: * -Copyright: (c) 2013-2022 Wind River Systems, Inc -License: Apache-2 - -Files: debian/* -Copyright: 2022 Wind River Systems, Inc -License: Apache-2 - -License: Apache-2 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - https://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - On Debian-based systems the full text of the Apache version 2.0 license - can be found in `/usr/share/common-licenses/Apache-2.0'. diff --git a/tools/debian-integration/debian/deb_folder/debian-integration.dirs b/tools/debian-integration/debian/deb_folder/debian-integration.dirs deleted file mode 100644 index 900a2041..00000000 --- a/tools/debian-integration/debian/deb_folder/debian-integration.dirs +++ /dev/null @@ -1 +0,0 @@ -usr/share/debian-integration diff --git a/tools/debian-integration/debian/deb_folder/debian-integration.install b/tools/debian-integration/debian/deb_folder/debian-integration.install deleted file mode 100644 index 867385d4..00000000 --- a/tools/debian-integration/debian/deb_folder/debian-integration.install +++ /dev/null @@ -1,7 +0,0 @@ - -after_bootstrap_workarounds.sh usr/share/debian-integration -before_bootstrap_workarounds.sh usr/share/debian-integration -helm.py.patched usr/share/debian-integration -interfaces.vm.example usr/share/debian-integration -README.md usr/share/debian-integration -worker_reserved.conf usr/share/debian-integration diff --git a/tools/debian-integration/debian/deb_folder/rules b/tools/debian-integration/debian/deb_folder/rules deleted file mode 100644 index ed58acc7..00000000 --- a/tools/debian-integration/debian/deb_folder/rules +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/make -f -#export DH_VERBOSE = 1 - -%: - dh $@ diff --git a/tools/debian-integration/debian/deb_folder/source/format b/tools/debian-integration/debian/deb_folder/source/format deleted file mode 100644 index 163aaf8d..00000000 --- a/tools/debian-integration/debian/deb_folder/source/format +++ /dev/null @@ -1 +0,0 @@ -3.0 (quilt) diff --git a/tools/debian-integration/debian/meta_data.yaml b/tools/debian-integration/debian/meta_data.yaml deleted file mode 100644 index d3ac8ed5..00000000 --- a/tools/debian-integration/debian/meta_data.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -debname: debian-integration -debver: 1.0-1 -src_path: source-debian -revision: - dist: $STX_DIST - PKG_GITREVCOUNT: true diff --git a/tools/debian-integration/source-debian/README.md b/tools/debian-integration/source-debian/README.md deleted file mode 100644 index db3d2baf..00000000 --- a/tools/debian-integration/source-debian/README.md +++ /dev/null @@ -1,19 +0,0 @@ -VM steps: -- boot iso -- copy workarounds to /home/sysadmin -- generate /home/sysadmin/localhost.yml (with dns_servers entry) -- run before_bootstrap_workarounds.sh -- run bootstrap -- generate /home/sysadmin/interfaces file -- run after_bootstrap_workarounds.sh -- unlock - -HW steps: -- boot iso -- copy workarounds to /home/sysadmin -- generate /home/sysadmin/localhost.yml (with dns_servers entry) -- run before_bootstrap_workarounds.sh -- run bootstrap -- generate /home/sysadmin/interfaces file -- run after_bootstrap_workarounds.sh -- unlock diff --git a/tools/debian-integration/source-debian/after_bootstrap_workarounds.sh b/tools/debian-integration/source-debian/after_bootstrap_workarounds.sh deleted file mode 100755 index b0367ff2..00000000 --- a/tools/debian-integration/source-debian/after_bootstrap_workarounds.sh +++ /dev/null @@ -1 +0,0 @@ -# don't remove the file yet, keep this hot diff --git a/tools/debian-integration/source-debian/before_bootstrap_workarounds.sh b/tools/debian-integration/source-debian/before_bootstrap_workarounds.sh deleted file mode 100755 index b0367ff2..00000000 --- a/tools/debian-integration/source-debian/before_bootstrap_workarounds.sh +++ /dev/null @@ -1 +0,0 @@ -# don't remove the file yet, keep this hot diff --git a/tools/debian-integration/source-debian/helm.py.patched b/tools/debian-integration/source-debian/helm.py.patched deleted file mode 100644 index d16c6897..00000000 --- a/tools/debian-integration/source-debian/helm.py.patched +++ /dev/null @@ -1,1118 +0,0 @@ -# -# Copyright (c) 2018-2021 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -""" System Inventory Helm Overrides Operator.""" - -from __future__ import absolute_import - -import eventlet -import os -import re -import tempfile -import yaml - -from six import iteritems -from stevedore import extension - -from oslo_log import log as logging -from sysinv.common import constants -from sysinv.common import exception -from sysinv.common import utils -from sysinv.helm import common -from sysinv.helm import utils as helm_utils - - -LOG = logging.getLogger(__name__) - -# Disable yaml feature 'alias' for clean and readable output -yaml.Dumper.ignore_aliases = lambda *data: True - -# Number of characters to strip off from helm plugin name defined in setup.cfg, -# in order to allow controlling the order of the helm plugins, without changing -# the names of the plugins. -# The convention here is for the helm plugins to be named ###_PLUGINNAME. -HELM_PLUGIN_PREFIX_LENGTH = 4 - -# Number of optional characters appended to Armada manifest operator name, -# to allow overriding with a newer version of the Armada manifest operator. -# The convention here is for the Armada operator plugins to allow an -# optional suffix, as in PLUGINNAME_###. -ARMADA_PLUGIN_SUFFIX_LENGTH = 4 - -# Number of optional characters appended to AppLifecycle operator name, -# to allow overriding with a newer version of the AppLifecycle operator. -# The convention here is for the AppLifecycle operator plugins to allow an -# optional suffix, as in PLUGINNAME_###. -LIFECYCLE_PLUGIN_SUFFIX_LENGTH = 4 - - -def helm_context(func): - """Decorate to initialize the local threading context""" - - def _wrapper(self, *args, **kwargs): - thread_context = eventlet.greenthread.getcurrent() - setattr(thread_context, '_helm_context', dict()) - return func(self, *args, **kwargs) - return _wrapper - - -def suppress_stevedore_errors(manager, entrypoint, exception): - """ - stevedore.ExtensionManager will try to import the entry point defined in the module. - For helm_applications, both stx_openstack and platform_integ_apps are virtual modules. - So ExtensionManager will throw the "Could not load ..." error message, which is expected. - Just suppress this error message to avoid cause confusion. - """ - pass - - -LOCK_NAME = 'HelmOperator' - - -class HelmOperator(object): - """Class to encapsulate helm override operations for System Inventory""" - - # Define the stevedore namespaces that will need to be managed for plugins - STEVEDORE_APPS = 'systemconfig.helm_applications' - STEVEDORE_ARMADA = 'systemconfig.armada.manifest_ops' - STEVEDORE_LIFECYCLE = 'systemconfig.app_lifecycle' - - def __init__(self, dbapi=None): - self.dbapi = dbapi - - # Find all plugins for apps, charts per app, and armada manifest - # operators - self.discover_plugins() - - @utils.synchronized(LOCK_NAME) - def discover_plugins(self): - """ Scan for all available plugins """ - - LOG.debug("HelmOperator: Loading available helm, armada and lifecycle plugins.") - - # Initialize the plugins - self.helm_system_applications = {} - self.chart_operators = {} - self.armada_manifest_operators = {} - self.app_lifecycle_operators = {} - - # Need to purge the stevedore plugin cache so that when we discover the - # plugins, new plugin resources are found. If the cache exists, then no - # new plugins are discoverable. - self.purge_cache() - - # dict containing sequence of helm charts per app - self.helm_system_applications = self._load_helm_applications() - - # dict containing Armada manifest operators per app - self.armada_manifest_operators = self._load_armada_manifest_operators() - - # dict containing app lifecycle operators per app - self.app_lifecycle_operators = self._load_app_lifecycle_operators() - - @utils.synchronized(LOCK_NAME) - def purge_cache_by_location(self, install_location): - """Purge the stevedore entry point cache.""" - for lifecycle_ep in extension.ExtensionManager.ENTRY_POINT_CACHE[self.STEVEDORE_LIFECYCLE]: - try: - lifecycle_distribution = utils.get_distribution_from_entry_point(lifecycle_ep) - (project_name, project_location) = \ - utils.get_project_name_and_location_from_distribution(lifecycle_distribution) - - if project_location == install_location: - extension.ExtensionManager.ENTRY_POINT_CACHE[self.STEVEDORE_LIFECYCLE].remove(lifecycle_ep) - break - except exception.SysinvException: - LOG.info("Didn't find distribution for {}. Deleting from cache".format(lifecycle_ep)) - try: - extension.ExtensionManager.ENTRY_POINT_CACHE[self.STEVEDORE_LIFECYCLE].remove(lifecycle_ep) - except Exception as e: - LOG.info("Workaround {}".format(e)) - break - else: - LOG.info("Couldn't find endpoint distribution located at %s for " - "%s" % (install_location, lifecycle_distribution)) - - for armada_ep in extension.ExtensionManager.ENTRY_POINT_CACHE[self.STEVEDORE_ARMADA]: - try: - armada_distribution = utils.get_distribution_from_entry_point(armada_ep) - (project_name, project_location) = \ - utils.get_project_name_and_location_from_distribution(armada_distribution) - - if project_location == install_location: - extension.ExtensionManager.ENTRY_POINT_CACHE[self.STEVEDORE_ARMADA].remove(armada_ep) - break - except exception.SysinvException: - LOG.info("Didn't find distribution for {}. Deleting from cache".format(lifecycle_ep)) - try: - extension.ExtensionManager.ENTRY_POINT_CACHE[self.STEVEDORE_ARMADA].remove(lifecycle_ep) - except Exception as e: - LOG.info("Workaround {}".format(e)) - break - else: - LOG.info("Couldn't find endpoint distribution located at %s for " - "%s" % (install_location, armada_distribution)) - - for app_ep in extension.ExtensionManager.ENTRY_POINT_CACHE[self.STEVEDORE_APPS]: - try: - app_distribution = utils.get_distribution_from_entry_point(app_ep) - (app_project_name, app_project_location) = \ - utils.get_project_name_and_location_from_distribution(app_distribution) - - if app_project_location == install_location: - namespace = utils.get_module_name_from_entry_point(app_ep) - - purged_list = [] - for helm_ep in extension.ExtensionManager.ENTRY_POINT_CACHE[namespace]: - helm_distribution = utils.get_distribution_from_entry_point(helm_ep) - (helm_project_name, helm_project_location) = \ - utils.get_project_name_and_location_from_distribution(helm_distribution) - - if helm_project_location != install_location: - purged_list.append(helm_ep) - - if purged_list: - extension.ExtensionManager.ENTRY_POINT_CACHE[namespace] = purged_list - else: - del extension.ExtensionManager.ENTRY_POINT_CACHE[namespace] - extension.ExtensionManager.ENTRY_POINT_CACHE[self.STEVEDORE_APPS].remove(app_ep) - LOG.info("Removed stevedore namespace: %s" % namespace) - except Exception as e: - LOG.error('Purge error {}'.format(e)) - continue - - def purge_cache(self): - """Purge the stevedore entry point cache.""" - if self.STEVEDORE_APPS in extension.ExtensionManager.ENTRY_POINT_CACHE: - for entry_point in extension.ExtensionManager.ENTRY_POINT_CACHE[self.STEVEDORE_APPS]: - namespace = utils.get_module_name_from_entry_point(entry_point) - try: - del extension.ExtensionManager.ENTRY_POINT_CACHE[namespace] - LOG.debug("Deleted entry points for %s." % namespace) - except KeyError: - LOG.info("No entry points for %s found." % namespace) - - try: - del extension.ExtensionManager.ENTRY_POINT_CACHE[self.STEVEDORE_APPS] - LOG.debug("Deleted entry points for %s." % self.STEVEDORE_APPS) - except KeyError: - LOG.info("No entry points for %s found." % self.STEVEDORE_APPS) - - else: - LOG.info("No entry points for %s found." % self.STEVEDORE_APPS) - - try: - del extension.ExtensionManager.ENTRY_POINT_CACHE[self.STEVEDORE_ARMADA] - LOG.debug("Deleted entry points for %s." % self.STEVEDORE_ARMADA) - except KeyError: - LOG.info("No entry points for %s found." % self.STEVEDORE_ARMADA) - - try: - del extension.ExtensionManager.ENTRY_POINT_CACHE[self.STEVEDORE_LIFECYCLE] - LOG.debug("Deleted entry points for %s." % self.STEVEDORE_LIFECYCLE) - except KeyError: - LOG.info("No entry points for %s found." % self.STEVEDORE_LIFECYCLE) - - def _load_app_lifecycle_operators(self): - """Build a dictionary of AppLifecycle operators""" - - operators_dict = {} - - app_lifecycle_operators = extension.ExtensionManager( - namespace=self.STEVEDORE_LIFECYCLE, - invoke_on_load=True, invoke_args=()) - - sorted_app_lifecycle_operators = sorted( - app_lifecycle_operators.extensions, key=lambda x: x.name) - - for operator in sorted_app_lifecycle_operators: - if (operator.name[-(LIFECYCLE_PLUGIN_SUFFIX_LENGTH - 1):].isdigit() and - operator.name[-LIFECYCLE_PLUGIN_SUFFIX_LENGTH:-3] == '_'): - operator_name = operator.name[0:-LIFECYCLE_PLUGIN_SUFFIX_LENGTH] - else: - operator_name = operator.name - operators_dict[operator_name] = operator.obj - - return operators_dict - - def get_app_lifecycle_operator(self, app_name): - """Return an AppLifecycle operator based on app name""" - - plugin_name = utils.find_app_plugin_name(app_name) - if plugin_name in self.app_lifecycle_operators: - operator = self.app_lifecycle_operators[plugin_name] - else: - operator = self.app_lifecycle_operators['generic'] - - return operator - - def _load_armada_manifest_operators(self): - """Build a dictionary of armada manifest operators""" - - operators_dict = {} - dist_info_dict = {} - - armada_manifest_operators = extension.ExtensionManager( - namespace=self.STEVEDORE_ARMADA, - invoke_on_load=True, invoke_args=()) - - sorted_armada_manifest_operators = sorted( - armada_manifest_operators.extensions, key=lambda x: x.name) - - for op in sorted_armada_manifest_operators: - if (op.name[-(ARMADA_PLUGIN_SUFFIX_LENGTH - 1):].isdigit() and - op.name[-ARMADA_PLUGIN_SUFFIX_LENGTH:-3] == '_'): - op_name = op.name[0:-ARMADA_PLUGIN_SUFFIX_LENGTH] - else: - op_name = op.name - operators_dict[op_name] = op.obj - - distribution = utils.get_distribution_from_entry_point(op.entry_point) - (project_name, project_location) = \ - utils.get_project_name_and_location_from_distribution(distribution) - - # Extract distribution information for logging - dist_info_dict[op_name] = { - 'name': project_name, - 'location': project_location, - } - - # Provide some log feedback on plugins being used - for (app_name, info) in iteritems(dist_info_dict): - LOG.debug("Plugins for %-20s: loaded from %-20s - %s." % (app_name, - info['name'], info['location'])) - - return operators_dict - - def get_armada_manifest_operator(self, app_name): - """Return a manifest operator based on app name""" - - plugin_name = utils.find_app_plugin_name(app_name) - if plugin_name in self.armada_manifest_operators: - manifest_op = self.armada_manifest_operators[plugin_name] - else: - manifest_op = self.armada_manifest_operators['generic'] - return manifest_op - - def _load_helm_applications(self): - """Build a dictionary of supported helm applications""" - - helm_application_dict = {} - helm_applications = extension.ExtensionManager( - namespace=self.STEVEDORE_APPS, - on_load_failure_callback=suppress_stevedore_errors - ) - for entry_point in helm_applications.list_entry_points(): - helm_application_dict[entry_point.name] = \ - utils.get_module_name_from_entry_point(entry_point) - - supported_helm_applications = {} - for name, namespace in helm_application_dict.items(): - supported_helm_applications[name] = [] - helm_plugins = extension.ExtensionManager( - namespace=namespace, invoke_on_load=True, invoke_args=(self,)) - sorted_helm_plugins = sorted(helm_plugins.extensions, key=lambda x: x.name) - for plugin in sorted_helm_plugins: - distribution = utils.get_distribution_from_entry_point(plugin.entry_point) - (project_name, project_location) = \ - utils.get_project_name_and_location_from_distribution(distribution) - - LOG.debug("%s: helm plugin %s loaded from %s - %s." % (name, - plugin.name, - project_name, - project_location)) - - plugin_name = plugin.name[HELM_PLUGIN_PREFIX_LENGTH:] - self.chart_operators.update({plugin_name: plugin.obj}) - # Remove duplicates, keeping last occurrence only - if plugin_name in supported_helm_applications[name]: - supported_helm_applications[name].remove(plugin_name) - supported_helm_applications[name].append(plugin_name) - - return supported_helm_applications - - def get_active_helm_applications(self): - """ Get the active system applications and charts """ - return self.helm_system_applications - - @property - def context(self): - thread_context = eventlet.greenthread.getcurrent() - return getattr(thread_context, '_helm_context') - - def get_helm_chart_namespaces_by_app(self, chart_name, app_name): - """Get supported chart namespaces for a given application. - - This method retrieves the namespace supported by a given chart. - - :param chart_name: name of the chart - :param app_name: name of the application - :returns: list of supported namespaces that associated overrides may be - provided. - """ - - namespaces = [] - if chart_name in self.chart_operators: - app_plugin_name = utils.find_app_plugin_name(app_name) - - namespaces = self.chart_operators[chart_name].get_namespaces_by_app( - app_plugin_name) - return namespaces - - def get_helm_chart_namespaces(self, chart_name): - """Get supported chart namespaces. - - This method retrieves the namespace supported by a given chart. - - :param chart_name: name of the chart - :returns: list of supported namespaces that associated overrides may be - provided. - """ - - namespaces = [] - if chart_name in self.chart_operators: - namespaces = self.chart_operators[chart_name].get_namespaces() - return namespaces - - @helm_context - def get_helm_chart_overrides(self, chart_name, cnamespace=None): - """ RPCApi: Gets the *chart* overrides for a supported chart. """ - return self._get_helm_chart_overrides(chart_name, cnamespace) - - def _get_helm_chart_overrides(self, chart_name, cnamespace=None): - """Get the overrides for a supported chart. - - This method retrieves overrides for a supported chart. Overrides for - all supported namespaces will be returned unless a specific namespace - is requested. - - :param chart_name: name of a supported chart - :param cnamespace: (optional) namespace - :returns: dict of overrides. - - Example Without a cnamespace parameter: - { - 'kube-system': { - 'deployment': { - 'mode': 'cluster', - 'type': 'DaemonSet' - }, - }, - 'openstack': { - 'pod': { - 'replicas': { - 'server': 1 - } - } - } - } - - Example with a cnamespace parameter: cnamespace='kube-system' - { - 'deployment': { - 'mode': 'cluster', - 'type': 'DaemonSet' - } - } - """ - overrides = {} - if chart_name in self.chart_operators: - try: - overrides.update( - self.chart_operators[chart_name].get_overrides( - cnamespace)) - except exception.InvalidHelmNamespace: - raise - return overrides - - def get_helm_application_namespaces(self, app_name): - """Get supported application namespaces. - - This method retrieves a dict of charts and their supported namespaces - for an application. - - :param app_name: name of the bundle of charts required to support an - application - :returns: dict of charts and supported namespaces that associated - overrides may be provided. - """ - - app, plugin_name = self._find_kube_app_and_app_plugin_name(app_name) - - app_namespaces = {} - if plugin_name in self.helm_system_applications: - for chart_name in self.helm_system_applications[plugin_name]: - try: - app_namespaces.update( - {chart_name: - self.get_helm_chart_namespaces_by_app( - chart_name, app_name)}) - except exception.InvalidHelmNamespace as e: - LOG.info(e) - else: - # Generic apps - db_namespaces = self.dbapi.helm_override_get_all(app.id) - for chart in db_namespaces: - app_namespaces.setdefault( - chart.name, []).append(chart.namespace) - - return app_namespaces - - @helm_context - def get_helm_application_overrides(self, app_name, cnamespace=None): - """RPCApi: Gets the application overrides for a supported set of charts.""" - return self._get_helm_application_overrides(app_name, cnamespace) - - def _get_helm_application_overrides(self, app_name, cnamespace=None): - """Get the overrides for a supported set of charts. - - This method retrieves overrides for a set of supported charts that - comprise an application. Overrides for all charts and all supported - namespaces will be returned unless a specific namespace is requested. - - If a specific namespace is requested, then only charts that support - that specified namespace will be returned. - - :param app_name: name of a supported application (set of charts) - :param cnamespace: (optional) namespace - :returns: dict of overrides. - - Example: - { - 'ingress': { - 'kube-system': { - 'deployment': { - 'mode': 'cluster', - 'type': 'DaemonSet' - }, - }, - 'openstack': { - 'pod': { - 'replicas': { - 'server': 1 - } - } - } - }, - 'glance': { - 'openstack': { - 'pod': { - 'replicas': { - 'server': 1 - } - } - } - } - } - """ - overrides = {} - plugin_name = utils.find_app_plugin_name(app_name) - - if plugin_name in self.helm_system_applications: - for chart_name in self.helm_system_applications[plugin_name]: - try: - overrides.update({chart_name: - self._get_helm_chart_overrides( - chart_name, - cnamespace)}) - except exception.InvalidHelmNamespace as e: - LOG.info(e) - return overrides - - def _get_helm_chart_location(self, chart_name, repo_name, chart_tarfile): - """Get the chart location. - - This method returns the download location for a given chart. - - :param chart_name: name of the chart - :param repo_name: name of the repo that chart uploaded to - :param chart_tarfile: name of the chart tarfile - :returns: a URL as location - """ - if repo_name is None: - repo_name = common.HELM_REPO_FOR_APPS - if chart_tarfile is None: - # TODO: Clean up the assumption - chart_tarfile = chart_name + '-0.1.0' - # Set the location based on ip address since - # http://controller does not resolve in armada container. - sys_controller_network = self.dbapi.network_get_by_type(constants.NETWORK_TYPE_CLUSTER_HOST) - sys_controller_network_addr_pool = self.dbapi.address_pool_get(sys_controller_network.pool_uuid) - sc_float_ip = sys_controller_network_addr_pool.floating_address - if utils.is_valid_ipv6(sc_float_ip): - sc_float_ip = '[' + sc_float_ip + ']' - return 'http://{}:{}/helm_charts/{}/{}.tgz'.format( - sc_float_ip, - utils.get_http_port(self.dbapi), repo_name, chart_tarfile) - - def _add_armada_override_header(self, chart_name, chart_metadata_name, repo_name, - chart_tarfile, namespace, overrides): - if chart_metadata_name is None: - chart_metadata_name = namespace + '-' + chart_name - - new_overrides = { - 'schema': 'armada/Chart/v1', - 'metadata': { - 'schema': 'metadata/Document/v1', - 'name': chart_metadata_name - }, - 'data': { - 'values': overrides - } - } - location = self._get_helm_chart_location(chart_name, repo_name, chart_tarfile) - if location: - new_overrides['data'].update({ - 'source': { - 'location': location - } - }) - return new_overrides - - def _get_chart_info_from_armada_chart(self, chart_name, chart_namespace, - chart_info_list): - """ Extract the metadata name of the armada chart, repo and the name of - the chart tarfile from the armada manifest chart. - - :param chart_name: name of the chart from the (application list) - :param chart_namespace: namespace of the chart - :param chart_info_list: a list of chart objects containing information - extracted from the armada manifest - :returns: the metadata name of the chart, the supported StarlingX repository, - the name of the chart tarfile or None,None,None if not present - """ - - # Could be called without any armada_manifest info. Returning 'None' - # will enable helm defaults to point to common.HELM_REPO_FOR_APPS - metadata_name = None - repo = None - chart_tarfile = None - if chart_info_list is None: - return metadata_name, repo, chart_tarfile - - location = None - for c in chart_info_list: - if (c.name == chart_name and - c.namespace == chart_namespace): - location = c.location - metadata_name = c.metadata_name - break - - if location: - match = re.search('/helm_charts/(.*)/(.*).tgz', location) - if match: - repo = match.group(1) - chart_tarfile = match.group(2) - LOG.debug("Chart %s can be found in repo: %s" % (chart_name, repo)) - return metadata_name, repo, chart_tarfile - - def merge_overrides(self, file_overrides=None, set_overrides=None): - """ Merge helm overrides together. - - :param values: A dict of different types of user override values, - 'files' (which generally specify many overrides) and - 'set' (which generally specify one override). - """ - - if file_overrides is None: - file_overrides = [] - if set_overrides is None: - set_overrides = [] - # At this point we have potentially two separate types of overrides - # specified by system or user, values from files and values passed in - # via --set . We need to ensure that we call helm using the same - # mechanisms to ensure the same behaviour. - args = [] - - # Process the newly-passed-in override values - tmpfiles = [] - - for value_file in file_overrides: - # For values passed in from files, write them back out to - # temporary files. - tmpfile = tempfile.NamedTemporaryFile(delete=False) - tmpfile.write(value_file) - tmpfile.close() - tmpfiles.append(tmpfile.name) - args.extend(['--values', tmpfile.name]) - - for value_set in set_overrides: - keypair = list(value_set.split("=")) - - # request user to input with "--set key=value" or - # "--set key=", for the second case, the value is assume "" - # skip setting like "--set =value", "--set xxxx" - if len(keypair) == 2 and keypair[0]: - if keypair[1] and keypair[1].isdigit(): - args.extend(['--set-string', value_set]) - else: - args.extend(['--set', value_set]) - - try: - # Apply changes by calling out to helm to do values merge - # using a dummy chart. - output = helm_utils.install_helm_chart_with_dry_run(args) - # Extract the info we want. - values = output.split('USER-SUPPLIED VALUES:\n')[1].split( - '\nCOMPUTED VALUES:')[0] - except Exception as e: - LOG.error("Failed to merge overrides %s" % e) - raise - - for tmpfile in tmpfiles: - os.remove(tmpfile) - - return values - - @helm_context - def generate_helm_chart_overrides(self, path, chart_name, cnamespace=None): - """Generate system helm chart overrides - - This method will generate system helm chart override an write them to a - yaml file for use with the helm command. If the namespace is provided - only the overrides file for that specified namespace will be written. - - :param chart_name: name of a supported chart - :param cnamespace: (optional) namespace - """ - - if chart_name in self.chart_operators: - namespaces = self.chart_operators[chart_name].get_namespaces() - if cnamespace and cnamespace not in namespaces: - LOG.exception("The %s chart does not support namespace: %s" % - (chart_name, cnamespace)) - return - - try: - overrides = self._get_helm_chart_overrides( - chart_name, - cnamespace) - self._write_chart_overrides(path, - chart_name, - cnamespace, - overrides) - except Exception as e: - LOG.exception("failed to create chart overrides for %s: %s" % - (chart_name, e)) - elif chart_name: - LOG.exception("%s chart is not supported" % chart_name) - else: - LOG.exception("chart name is required") - - @helm_context - @utils.synchronized(LOCK_NAME) - def generate_helm_application_overrides(self, path, app_name, - mode=None, - cnamespace=None, - armada_format=False, - armada_chart_info=None, - combined=False, - is_fluxcd_app=False): - """Create the system overrides files for a supported application - - This method will generate system helm chart overrides yaml files for a - set of supported charts that comprise an application. If the namespace - is provided only the overrides files for that specified namespace will - be written. - - :param app_name: name of the bundle of charts required to support an - application - :param mode: mode to control how to apply application manifest - :param cnamespace: (optional) namespace - :param armada_format: (optional) whether to emit in armada format - instead of helm format (with extra header) - :param armada_chart_info: (optional) supporting chart information - extracted from the armada manifest which is used to influence - overrides - :param combined: (optional) whether to apply user overrides on top of - system overrides - :param is_fluxcd_app: whether the app is fluxcd or not - """ - - app, plugin_name = self._find_kube_app_and_app_plugin_name(app_name) - - if is_fluxcd_app: - armada_format = False - - else: - # Get a manifest operator to provide a single point of - # manipulation for the chart, chart group and manifest schemas - manifest_op = self.get_armada_manifest_operator(app.name) - - # Load the manifest into the operator - armada_manifest = utils.generate_synced_armada_manifest_fqpn( - app.name, app.app_version, app.manifest_file) - manifest_op.load(armada_manifest) - - - if plugin_name in self.helm_system_applications: - app_overrides = self._get_helm_application_overrides(plugin_name, - cnamespace) - for (chart_name, overrides) in iteritems(app_overrides): - if combined: - # The overrides at this point are the system overrides. For - # charts with multiple namespaces, the overrides would - # contain multiple keys, one for each namespace. - # - # Retrieve the user overrides of each namespace from the - # database and merge this list of user overrides, if they - # exist, with the system overrides. Both system and user - # override contents are then merged based on the namespace, - # prepended with required header and written to - # corresponding files (-.yaml). - file_overrides = [] - for chart_namespace in overrides.keys(): - try: - db_chart = self.dbapi.helm_override_get( - app.id, chart_name, chart_namespace) - db_user_overrides = db_chart.user_overrides - if db_user_overrides: - file_overrides.append(yaml.dump( - {chart_namespace: yaml.load(db_user_overrides)})) - except exception.HelmOverrideNotFound: - pass - - if file_overrides: - # Use dump() instead of safe_dump() as the latter is - # not agreeable with password regex in some overrides - system_overrides = yaml.dump(overrides) - file_overrides.insert(0, system_overrides) - combined_overrides = self.merge_overrides( - file_overrides=file_overrides) - overrides = yaml.load(combined_overrides) - - # If armada formatting is wanted, we need to change the - # structure of the yaml file somewhat - if armada_format: - for key in overrides: - metadata_name, repo_name, chart_tarfile = \ - self._get_chart_info_from_armada_chart(chart_name, key, - armada_chart_info) - new_overrides = self._add_armada_override_header( - chart_name, metadata_name, repo_name, chart_tarfile, - key, overrides[key]) - overrides[key] = new_overrides - self._write_chart_overrides(path, chart_name, cnamespace, overrides) - - if not is_fluxcd_app: - # Update manifest docs based on the plugin directives. If the - # application does not provide a manifest operator, the - # GenericArmadaManifestOperator is used and chart specific - # operations can be skipped. - if manifest_op.APP: - if chart_name in self.chart_operators: - self.chart_operators[chart_name].execute_manifest_updates( - manifest_op) - - if not is_fluxcd_app: - # Update the manifest based on platform conditions - manifest_op.platform_mode_manifest_updates(self.dbapi, mode) - - else: - # Generic applications - for chart in armada_chart_info: - try: - db_chart = self.dbapi.helm_override_get( - app.id, chart.name, chart.namespace) - except exception.HelmOverrideNotFound: - # This routine is to create helm overrides entries - # in database during application-upload so that user - # can list the supported helm chart overrides of the - # application via helm-override-list - try: - values = { - 'name': chart.name, - 'namespace': chart.namespace, - 'app_id': app.id, - } - db_chart = self.dbapi.helm_override_create(values=values) - except Exception as e: - LOG.exception(e) - return - - user_overrides = {chart.namespace: {}} - db_user_overrides = db_chart.user_overrides - if db_user_overrides: - user_overrides = yaml.load(yaml.dump( - {chart.namespace: yaml.load(db_user_overrides)})) - - if armada_format: - metadata_name, repo_name, chart_tarfile =\ - self._get_chart_info_from_armada_chart(chart.name, chart.namespace, - armada_chart_info) - new_overrides = self._add_armada_override_header( - chart.name, metadata_name, repo_name, chart_tarfile, - chart.namespace, user_overrides[chart.namespace]) - user_overrides[chart.namespace] = new_overrides - - self._write_chart_overrides(path, chart.name, - cnamespace, user_overrides) - - if not is_fluxcd_app: - # Write the manifest doc overrides, a summmary file for easy --value - # generation on the apply, and a unified manifest for deletion. - manifest_op.save_overrides() - manifest_op.save_summary(path=path) - manifest_op.save_delete_manifest() - - def _find_kube_app_and_app_plugin_name(self, app_name): - return utils.find_kube_app(self.dbapi, app_name), \ - utils.find_app_plugin_name(app_name) - - def remove_helm_chart_overrides(self, path, chart_name, cnamespace=None): - """Remove the overrides files for a chart""" - - if chart_name in self.chart_operators: - namespaces = self.chart_operators[chart_name].get_namespaces() - - filenames = [] - if cnamespace and cnamespace in namespaces: - filenames.append("%s-%s.yaml" % (cnamespace, chart_name)) - else: - for n in namespaces: - filenames.append("%s-%s.yaml" % (n, chart_name)) - - for f in filenames: - try: - self._remove_overrides(path, f) - except Exception as e: - LOG.exception("failed to remove %s overrides: %s: %s" % ( - chart_name, f, e)) - else: - LOG.exception("chart %s not supported for system overrides" % - chart_name) - - def _write_chart_overrides(self, path, chart_name, cnamespace, overrides): - """Write a one or more overrides files for a chart. """ - - def _write_file(filename, values): - try: - self._write_overrides(path, filename, values) - except Exception as e: - LOG.exception("failed to write %s overrides: %s: %s" % ( - chart_name, filename, e)) - - if cnamespace: - _write_file("%s-%s.yaml" % (cnamespace, chart_name), overrides) - else: - for ns in overrides.keys(): - _write_file("%s-%s.yaml" % (ns, chart_name), overrides[ns]) - - def _write_overrides(self, path, filename, overrides): - """Write a single overrides file. """ - - if not os.path.isdir(path): - os.makedirs(path) - - filepath = os.path.join(path, filename) - try: - fd, tmppath = tempfile.mkstemp(dir=path, prefix=filename, - text=True) - - with open(tmppath, 'w') as f: - yaml.dump(overrides, f, default_flow_style=False) - os.close(fd) - os.rename(tmppath, filepath) - # Change the permission to be readable to non-root users(ie.Armada) - os.chmod(filepath, 0o644) - except Exception: - LOG.exception("failed to write overrides file: %s" % filepath) - raise - - def _remove_overrides(self, path, filename): - """Remove a single overrides file. """ - - filepath = os.path.join(path, filename) - try: - if os.path.exists(filepath): - os.unlink(filepath) - except Exception: - LOG.exception("failed to delete overrides file: %s" % filepath) - raise - - @helm_context - def version_check(self, app_name, app_version): - """Validate application version""" - if app_name in self.helm_system_applications: - for chart_name in self.helm_system_applications[app_name]: - if not self.chart_operators[chart_name].version_check(app_version): - LOG.info("Unsupported version reported by %s: %s %s" % ( - chart_name, app_name, app_version)) - return False - - # Return True by default - return True - - -class HelmOperatorData(HelmOperator): - """Class to allow retrieval of helm managed data""" - - # TODO (rchurch): decouple. Plugin chart names. This class needs to be - # delivered as a plugin. - HELM_CHART_KEYSTONE = 'keystone' - HELM_CHART_NOVA = 'nova' - HELM_CHART_CINDER = 'cinder' - HELM_CHART_GLANCE = 'glance' - HELM_CHART_NEUTRON = 'neutron' - HELM_CHART_HEAT = 'heat' - HELM_CHART_CEILOMETER = 'ceilometer' - HELM_CHART_DCDBSYNC = 'dcdbsync' - - @helm_context - def get_keystone_auth_data(self): - keystone_operator = self.chart_operators[self.HELM_CHART_KEYSTONE] - - # use stx_admin account to communicate with openstack app - username = common.USER_STX_ADMIN - try: - password = keystone_operator.get_stx_admin_password() - except Exception: - # old version app doesn't support stx_admin account yet. - # fallback to admin account - username = keystone_operator.get_admin_user_name() - password = keystone_operator.get_admin_password() - - auth_data = { - 'admin_user_name': - username, - 'admin_project_name': - keystone_operator.get_admin_project_name(), - 'auth_host': - 'keystone.openstack.svc.cluster.local', - 'auth_port': 80, - 'admin_user_domain': - keystone_operator.get_admin_user_domain(), - 'admin_project_domain': - keystone_operator.get_admin_project_domain(), - 'admin_password': - password, - } - return auth_data - - @helm_context - def get_keystone_endpoint_data(self): - keystone_operator = self.chart_operators[self.HELM_CHART_KEYSTONE] - endpoint_data = { - 'endpoint_override': - 'http://keystone.openstack.svc.cluster.local:80', - 'region_name': - keystone_operator.get_region_name(), - } - return endpoint_data - - @helm_context - def get_keystone_oslo_db_data(self): - keystone_operator = self.chart_operators[self.HELM_CHART_KEYSTONE] - endpoints_overrides = keystone_operator.\ - _get_endpoints_oslo_db_overrides(self.HELM_CHART_KEYSTONE, - ['keystone']) - - password = endpoints_overrides['keystone']['password'] - connection = "mysql+pymysql://keystone:%s@" \ - "mariadb.openstack.svc.cluster.local:3306/keystone"\ - % (password) - - endpoint_data = { - 'connection': - connection, - } - return endpoint_data - - @helm_context - def get_nova_endpoint_data(self): - nova_operator = self.chart_operators[self.HELM_CHART_NOVA] - endpoint_data = { - 'endpoint_override': - 'http://nova-api-internal.openstack.svc.cluster.local:80', - 'region_name': - nova_operator.get_region_name(), - } - return endpoint_data - - @helm_context - def get_nova_oslo_messaging_data(self): - nova_operator = self.chart_operators[self.HELM_CHART_NOVA] - endpoints_overrides = nova_operator._get_endpoints_overrides() - auth_data = { - 'host': - 'rabbitmq.openstack.svc.cluster.local', - 'port': - 5672, - 'virt_host': - 'nova', - 'username': - endpoints_overrides['oslo_messaging']['auth']['nova'] - ['username'], - 'password': - endpoints_overrides['oslo_messaging']['auth']['nova'] - ['password'], - } - return auth_data - - @helm_context - def get_cinder_endpoint_data(self): - cinder_operator = self.chart_operators[self.HELM_CHART_CINDER] - endpoint_data = { - 'region_name': - cinder_operator.get_region_name(), - 'service_name': - cinder_operator.get_service_name_v2(), - 'service_type': - cinder_operator.get_service_type_v2(), - } - return endpoint_data - - @helm_context - def get_glance_endpoint_data(self): - glance_operator = self.chart_operators[self.HELM_CHART_GLANCE] - endpoint_data = { - 'region_name': - glance_operator.get_region_name(), - 'service_name': - glance_operator.get_service_name(), - 'service_type': - glance_operator.get_service_type(), - } - return endpoint_data - - @helm_context - def get_neutron_endpoint_data(self): - neutron_operator = self.chart_operators[self.HELM_CHART_NEUTRON] - endpoint_data = { - 'region_name': - neutron_operator.get_region_name(), - } - return endpoint_data - - @helm_context - def get_heat_endpoint_data(self): - heat_operator = self.chart_operators[self.HELM_CHART_HEAT] - endpoint_data = { - 'region_name': - heat_operator.get_region_name(), - } - return endpoint_data - - @helm_context - def get_ceilometer_endpoint_data(self): - ceilometer_operator = \ - self.chart_operators[self.HELM_CHART_CEILOMETER] - endpoint_data = { - 'region_name': - ceilometer_operator.get_region_name(), - } - return endpoint_data - - @helm_context - def get_dcdbsync_endpoint_data(self): - dcdbsync_operator = self.chart_operators[self.HELM_CHART_DCDBSYNC] - endpoints_overrides = dcdbsync_operator._get_endpoints_overrides() - endpoint_data = { - 'keystone_password': - endpoints_overrides['identity']['auth']['dcdbsync'] - ['password'], - } - return endpoint_data diff --git a/tools/debian-integration/source-debian/interfaces.vm.example b/tools/debian-integration/source-debian/interfaces.vm.example deleted file mode 100644 index e3c05f2c..00000000 --- a/tools/debian-integration/source-debian/interfaces.vm.example +++ /dev/null @@ -1,39 +0,0 @@ -# HEADER: This file is being managed by puppet. Changes to -# HEADER: interfaces that are not being managed by puppet will persist; -# HEADER: however changes to interfaces that are being managed by puppet will -# HEADER: be overwritten. In addition, file order is NOT guaranteed. -# HEADER: Last generated at: 2022-02-15 12:23:56 +0000 by Andre Kantek - - -auto enp0s3 lo lo:1 lo:5 - -iface enp0s3 inet static -address 10.41.4.3 -netmask 255.255.255.0 - gateway 10.41.4.1 - mtu 1500 - pre-up sleep 20; /usr/sbin/sysctl -w net.ipv6.conf.enp0s3.autoconf=0 net.ipv6.conf.enp0s3.accept_ra=0 net.ipv6.conf.enp0s3.accept_redirects=0 - -iface lo inet static -address 192.168.204.2 -netmask 255.255.255.0 - mtu 1500 - post-up /usr/local/bin/tc_setup.sh lo mgmt 10000 > /dev/null - pre-up sleep 20; /usr/sbin/sysctl -w net.ipv6.conf.lo.autoconf=0 net.ipv6.conf.lo.accept_ra=0 net.ipv6.conf.lo.accept_redirects=0 - scope host - -iface lo:1 inet static -address 192.168.204.2 -netmask 255.255.255.0 - mtu 1500 - pre-up sleep 20; /usr/sbin/sysctl -w net.ipv6.conf.lo.autoconf=0 net.ipv6.conf.lo.accept_ra=0 net.ipv6.conf.lo.accept_redirects=0 - scope host - -iface lo:5 inet static -address 192.168.206.2 -netmask 255.255.255.0 - mtu 1500 - pre-up sleep 20; /usr/sbin/sysctl -w net.ipv6.conf.lo.autoconf=0 net.ipv6.conf.lo.accept_ra=0 net.ipv6.conf.lo.accept_redirects=0 - scope host - - diff --git a/tools/debian-integration/source-debian/worker_reserved.conf b/tools/debian-integration/source-debian/worker_reserved.conf deleted file mode 100644 index f28237d0..00000000 --- a/tools/debian-integration/source-debian/worker_reserved.conf +++ /dev/null @@ -1,55 +0,0 @@ -################################################################################ -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -################################################################################ -# WORKER Node configuration parameters for reserved memory and physical cores -# used by Base software and VSWITCH. These are resources that libvirt cannot use. -# - -################################################################################ -# -# List of logical CPU instances available in the system. This value is used -# for auditing purposes so that the current configuration can be checked for -# validity against the actual number of logical CPU instances in the system. -# -################################################################################ -WORKER_CPU_LIST="0-7" - -################################################################################ -# -# List of Base software resources reserved per numa node. Each array element -# consists of a 3-tuple formatted as: ::. -# -# Example: To reserve 1500MB and 1 core on NUMA node0, and 1500MB and 1 core -# on NUMA node1, the variable must be specified as follows. -# WORKER_BASE_MEMORY=("node0:1500MB:1" "node1:1500MB:1") -# -################################################################################ -WORKER_BASE_RESERVED=("node0:14500MB:1" "node1:2000MB:0" "node2:2000MB:0" "node3:2000MB:0") - -################################################################################ -# -# List of HugeTLB memory descriptors to configure. Each array element -# consists of a 3-tuple descriptor formatted as: ::. -# The NUMA node specified must exist and the HugeTLB pagesize must be a valid -# value such as 2048kB or 1048576kB. -# -# For example, to request 256 x 2MB HugeTLB pages on NUMA node0 and node1 the -# variable must be specified as follows. -# WORKER_VSWITCH_MEMORY=("node0:2048kB:256" "node1:2048kB:256") -# -################################################################################ -WORKER_VSWITCH_MEMORY=("node0:1048576kB:1" "node1:1048576kB:1" "node2:1048576kB:1" "node3:1048576kB:1") - -################################################################################ -# -# List of VSWITCH physical cores reserved for VSWITCH applications. -# -# Example: To reserve 2 cores on NUMA node0, and 2 cores on NUMA node1, the -# variable must be specified as follows. -# WORKER_VSWITCH_CORES=("node0:2" "node1:2") -# -################################################################################ -WORKER_VSWITCH_CORES=("node0:2" "node1:0" "node2:0" "node3:0")