Clean imports in code
This patch set modifies lines which are importing objects instead of modules. As per openstack import guide lines, user should import modules in a file not objects. http://docs.openstack.org/developer/hacking/#imports Closes-Bug: #1620161 Change-Id: I7ec9022a6b1cec36c678a2cec2a1856e70a51c68
This commit is contained in:
parent
5476354e65
commit
33bd24252f
@ -13,7 +13,7 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from random import Random
|
import random
|
||||||
|
|
||||||
|
|
||||||
class NameGenerator(object):
|
class NameGenerator(object):
|
||||||
@ -23,7 +23,7 @@ class NameGenerator(object):
|
|||||||
'phi', 'chi', 'psi', 'omega']
|
'phi', 'chi', 'psi', 'omega']
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.random = Random()
|
self.random = random.Random()
|
||||||
|
|
||||||
def generate(self):
|
def generate(self):
|
||||||
'''Generate a random name compose of a Greek leter and
|
'''Generate a random name compose of a Greek leter and
|
||||||
|
@ -12,10 +12,10 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from enum import Enum
|
import enum
|
||||||
|
|
||||||
|
|
||||||
class Extensions(Enum):
|
class Extensions(enum.Enum):
|
||||||
__order__ = ('AUTHORITY_KEY_IDENTIFIER SUBJECT_KEY_IDENTIFIER '
|
__order__ = ('AUTHORITY_KEY_IDENTIFIER SUBJECT_KEY_IDENTIFIER '
|
||||||
'AUTHORITY_INFORMATION_ACCESS BASIC_CONSTRAINTS '
|
'AUTHORITY_INFORMATION_ACCESS BASIC_CONSTRAINTS '
|
||||||
'CRL_DISTRIBUTION_POINTS CERTIFICATE_POLICIES '
|
'CRL_DISTRIBUTION_POINTS CERTIFICATE_POLICIES '
|
||||||
@ -38,7 +38,7 @@ class Extensions(Enum):
|
|||||||
ISSUER_ALTERNATIVE_NAME = "issuerAltName"
|
ISSUER_ALTERNATIVE_NAME = "issuerAltName"
|
||||||
|
|
||||||
|
|
||||||
class KeyUsages(Enum):
|
class KeyUsages(enum.Enum):
|
||||||
__order__ = ('DIGITAL_SIGNATURE CONTENT_COMMITMENT KEY_ENCIPHERMENT '
|
__order__ = ('DIGITAL_SIGNATURE CONTENT_COMMITMENT KEY_ENCIPHERMENT '
|
||||||
'DATA_ENCIPHERMENT KEY_AGREEMENT KEY_CERT_SIGN '
|
'DATA_ENCIPHERMENT KEY_AGREEMENT KEY_CERT_SIGN '
|
||||||
'CRL_SIGN ENCIPHER_ONLY DECIPHER_ONLY')
|
'CRL_SIGN ENCIPHER_ONLY DECIPHER_ONLY')
|
||||||
|
@ -21,7 +21,6 @@ from cryptography.hazmat.primitives.asymmetric import rsa
|
|||||||
from cryptography.hazmat.primitives import hashes
|
from cryptography.hazmat.primitives import hashes
|
||||||
from cryptography.hazmat.primitives import serialization
|
from cryptography.hazmat.primitives import serialization
|
||||||
from cryptography import x509
|
from cryptography import x509
|
||||||
from cryptography.x509 import Extension
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
@ -70,13 +69,13 @@ def _build_client_extentions():
|
|||||||
# Digital Signature and Key Encipherment are enabled
|
# Digital Signature and Key Encipherment are enabled
|
||||||
key_usage = x509.KeyUsage(True, False, True, False, False, False, False,
|
key_usage = x509.KeyUsage(True, False, True, False, False, False, False,
|
||||||
False, False)
|
False, False)
|
||||||
key_usage = Extension(key_usage.oid, True, key_usage)
|
key_usage = x509.Extension(key_usage.oid, True, key_usage)
|
||||||
extended_key_usage = x509.ExtendedKeyUsage([x509.OID_CLIENT_AUTH])
|
extended_key_usage = x509.ExtendedKeyUsage([x509.OID_CLIENT_AUTH])
|
||||||
extended_key_usage = Extension(extended_key_usage.oid, False,
|
extended_key_usage = x509.Extension(extended_key_usage.oid, False,
|
||||||
extended_key_usage)
|
extended_key_usage)
|
||||||
basic_constraints = x509.BasicConstraints(ca=False, path_length=None)
|
basic_constraints = x509.BasicConstraints(ca=False, path_length=None)
|
||||||
basic_constraints = Extension(basic_constraints.oid, True,
|
basic_constraints = x509.Extension(basic_constraints.oid, True,
|
||||||
basic_constraints)
|
basic_constraints)
|
||||||
|
|
||||||
return [key_usage, extended_key_usage, basic_constraints]
|
return [key_usage, extended_key_usage, basic_constraints]
|
||||||
|
|
||||||
@ -85,10 +84,10 @@ def _build_ca_extentions():
|
|||||||
# Certificate Sign is enabled
|
# Certificate Sign is enabled
|
||||||
key_usage = x509.KeyUsage(False, False, False, False, False, True, False,
|
key_usage = x509.KeyUsage(False, False, False, False, False, True, False,
|
||||||
False, False)
|
False, False)
|
||||||
key_usage = Extension(key_usage.oid, True, key_usage)
|
key_usage = x509.Extension(key_usage.oid, True, key_usage)
|
||||||
basic_constraints = x509.BasicConstraints(ca=True, path_length=0)
|
basic_constraints = x509.BasicConstraints(ca=True, path_length=0)
|
||||||
basic_constraints = Extension(basic_constraints.oid, True,
|
basic_constraints = x509.Extension(basic_constraints.oid, True,
|
||||||
basic_constraints)
|
basic_constraints)
|
||||||
|
|
||||||
return [basic_constraints, key_usage]
|
return [basic_constraints, key_usage]
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
from cryptography import x509
|
from cryptography import x509
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
|
|
||||||
from magnum.common.exception import CertificateValidationError
|
from magnum.common import exception
|
||||||
from magnum.common.x509 import extensions
|
from magnum.common.x509 import extensions
|
||||||
|
|
||||||
_CA_KEY_USAGES = [
|
_CA_KEY_USAGES = [
|
||||||
@ -55,7 +55,7 @@ def filter_allowed_extensions(extensions, allowed_extensions=None):
|
|||||||
yield ext
|
yield ext
|
||||||
else:
|
else:
|
||||||
if ext.critical:
|
if ext.critical:
|
||||||
raise CertificateValidationError(extension=ext)
|
raise exception.CertificateValidationError(extension=ext)
|
||||||
|
|
||||||
|
|
||||||
def _merge_key_usage(key_usage, allowed_key_usage):
|
def _merge_key_usage(key_usage, allowed_key_usage):
|
||||||
@ -74,7 +74,8 @@ def _merge_key_usage(key_usage, allowed_key_usage):
|
|||||||
if value:
|
if value:
|
||||||
if k not in allowed_key_usage:
|
if k not in allowed_key_usage:
|
||||||
if critical:
|
if critical:
|
||||||
raise CertificateValidationError(extension=key_usage)
|
raise exception.CertificateValidationError(
|
||||||
|
extension=key_usage)
|
||||||
else:
|
else:
|
||||||
value = False
|
value = False
|
||||||
usages.append(value)
|
usages.append(value)
|
||||||
@ -95,7 +96,8 @@ def _remove_ca_key_usage(allowed_key_usage):
|
|||||||
def _disallow_ca_in_basic_constraints(basic_constraints):
|
def _disallow_ca_in_basic_constraints(basic_constraints):
|
||||||
if basic_constraints.value.ca:
|
if basic_constraints.value.ca:
|
||||||
if basic_constraints.critical:
|
if basic_constraints.critical:
|
||||||
raise CertificateValidationError(extension=basic_constraints)
|
raise exception.CertificateValidationError(
|
||||||
|
extension=basic_constraints)
|
||||||
|
|
||||||
bc = x509.BasicConstraints(False, None)
|
bc = x509.BasicConstraints(False, None)
|
||||||
return x509.Extension(bc.oid, False, bc)
|
return x509.Extension(bc.oid, False, bc)
|
||||||
|
@ -30,12 +30,12 @@ from magnum.conductor.handlers.common import cert_manager
|
|||||||
from magnum.conductor.handlers.common import trust_manager
|
from magnum.conductor.handlers.common import trust_manager
|
||||||
from magnum.conductor import scale_manager
|
from magnum.conductor import scale_manager
|
||||||
from magnum.conductor import utils as conductor_utils
|
from magnum.conductor import utils as conductor_utils
|
||||||
from magnum.drivers.common.template_def import TemplateDefinition as TDef
|
from magnum.drivers.common import template_def
|
||||||
from magnum.i18n import _
|
from magnum.i18n import _
|
||||||
from magnum.i18n import _LE
|
from magnum.i18n import _LE
|
||||||
from magnum.i18n import _LI
|
from magnum.i18n import _LI
|
||||||
from magnum import objects
|
from magnum import objects
|
||||||
from magnum.objects.fields import BayStatus as bay_status
|
from magnum.objects import fields
|
||||||
|
|
||||||
|
|
||||||
cluster_heat_opts = [
|
cluster_heat_opts = [
|
||||||
@ -73,9 +73,10 @@ def _extract_template_definition(context, bay, scale_manager=None):
|
|||||||
cluster_distro = cluster_template.cluster_distro
|
cluster_distro = cluster_template.cluster_distro
|
||||||
cluster_coe = cluster_template.coe
|
cluster_coe = cluster_template.coe
|
||||||
cluster_server_type = cluster_template.server_type
|
cluster_server_type = cluster_template.server_type
|
||||||
definition = TDef.get_template_definition(cluster_server_type,
|
definition = template_def.TemplateDefinition.get_template_definition(
|
||||||
cluster_distro,
|
cluster_server_type,
|
||||||
cluster_coe)
|
cluster_distro,
|
||||||
|
cluster_coe)
|
||||||
return definition.extract_definition(context, cluster_template, bay,
|
return definition.extract_definition(context, cluster_template, bay,
|
||||||
scale_manager=scale_manager)
|
scale_manager=scale_manager)
|
||||||
|
|
||||||
@ -173,7 +174,7 @@ class Handler(object):
|
|||||||
raise
|
raise
|
||||||
|
|
||||||
bay.stack_id = created_stack['stack']['id']
|
bay.stack_id = created_stack['stack']['id']
|
||||||
bay.status = bay_status.CREATE_IN_PROGRESS
|
bay.status = fields.BayStatus.CREATE_IN_PROGRESS
|
||||||
bay.create()
|
bay.create()
|
||||||
|
|
||||||
self._poll_and_check(osc, bay)
|
self._poll_and_check(osc, bay)
|
||||||
@ -186,14 +187,14 @@ class Handler(object):
|
|||||||
osc = clients.OpenStackClients(context)
|
osc = clients.OpenStackClients(context)
|
||||||
stack = osc.heat().stacks.get(bay.stack_id)
|
stack = osc.heat().stacks.get(bay.stack_id)
|
||||||
allow_update_status = (
|
allow_update_status = (
|
||||||
bay_status.CREATE_COMPLETE,
|
fields.BayStatus.CREATE_COMPLETE,
|
||||||
bay_status.UPDATE_COMPLETE,
|
fields.BayStatus.UPDATE_COMPLETE,
|
||||||
bay_status.RESUME_COMPLETE,
|
fields.BayStatus.RESUME_COMPLETE,
|
||||||
bay_status.RESTORE_COMPLETE,
|
fields.BayStatus.RESTORE_COMPLETE,
|
||||||
bay_status.ROLLBACK_COMPLETE,
|
fields.BayStatus.ROLLBACK_COMPLETE,
|
||||||
bay_status.SNAPSHOT_COMPLETE,
|
fields.BayStatus.SNAPSHOT_COMPLETE,
|
||||||
bay_status.CHECK_COMPLETE,
|
fields.BayStatus.CHECK_COMPLETE,
|
||||||
bay_status.ADOPT_COMPLETE
|
fields.BayStatus.ADOPT_COMPLETE
|
||||||
)
|
)
|
||||||
if stack.stack_status not in allow_update_status:
|
if stack.stack_status not in allow_update_status:
|
||||||
conductor_utils.notify_about_bay_operation(
|
conductor_utils.notify_about_bay_operation(
|
||||||
@ -255,7 +256,7 @@ class Handler(object):
|
|||||||
context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE)
|
context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
bay.status = bay_status.DELETE_IN_PROGRESS
|
bay.status = fields.BayStatus.DELETE_IN_PROGRESS
|
||||||
bay.save()
|
bay.save()
|
||||||
|
|
||||||
self._poll_and_check(osc, bay)
|
self._poll_and_check(osc, bay)
|
||||||
@ -277,9 +278,11 @@ class HeatPoller(object):
|
|||||||
self.attempts = 0
|
self.attempts = 0
|
||||||
self.cluster_template = conductor_utils.retrieve_cluster_template(
|
self.cluster_template = conductor_utils.retrieve_cluster_template(
|
||||||
self.context, bay)
|
self.context, bay)
|
||||||
self.template_def = TDef.get_template_definition(
|
self.template_def = \
|
||||||
self.cluster_template.server_type,
|
template_def.TemplateDefinition.get_template_definition(
|
||||||
self.cluster_template.cluster_distro, self.cluster_template.coe)
|
self.cluster_template.server_type,
|
||||||
|
self.cluster_template.cluster_distro,
|
||||||
|
self.cluster_template.coe)
|
||||||
|
|
||||||
def poll_and_check(self):
|
def poll_and_check(self):
|
||||||
# TODO(yuanying): temporary implementation to update api_address,
|
# TODO(yuanying): temporary implementation to update api_address,
|
||||||
@ -287,26 +290,26 @@ class HeatPoller(object):
|
|||||||
stack = self.openstack_client.heat().stacks.get(self.bay.stack_id)
|
stack = self.openstack_client.heat().stacks.get(self.bay.stack_id)
|
||||||
self.attempts += 1
|
self.attempts += 1
|
||||||
status_to_event = {
|
status_to_event = {
|
||||||
bay_status.DELETE_COMPLETE: taxonomy.ACTION_DELETE,
|
fields.BayStatus.DELETE_COMPLETE: taxonomy.ACTION_DELETE,
|
||||||
bay_status.CREATE_COMPLETE: taxonomy.ACTION_CREATE,
|
fields.BayStatus.CREATE_COMPLETE: taxonomy.ACTION_CREATE,
|
||||||
bay_status.UPDATE_COMPLETE: taxonomy.ACTION_UPDATE,
|
fields.BayStatus.UPDATE_COMPLETE: taxonomy.ACTION_UPDATE,
|
||||||
bay_status.ROLLBACK_COMPLETE: taxonomy.ACTION_UPDATE,
|
fields.BayStatus.ROLLBACK_COMPLETE: taxonomy.ACTION_UPDATE,
|
||||||
bay_status.CREATE_FAILED: taxonomy.ACTION_CREATE,
|
fields.BayStatus.CREATE_FAILED: taxonomy.ACTION_CREATE,
|
||||||
bay_status.DELETE_FAILED: taxonomy.ACTION_DELETE,
|
fields.BayStatus.DELETE_FAILED: taxonomy.ACTION_DELETE,
|
||||||
bay_status.UPDATE_FAILED: taxonomy.ACTION_UPDATE,
|
fields.BayStatus.UPDATE_FAILED: taxonomy.ACTION_UPDATE,
|
||||||
bay_status.ROLLBACK_FAILED: taxonomy.ACTION_UPDATE
|
fields.BayStatus.ROLLBACK_FAILED: taxonomy.ACTION_UPDATE
|
||||||
}
|
}
|
||||||
# poll_and_check is detached and polling long time to check status,
|
# poll_and_check is detached and polling long time to check status,
|
||||||
# so another user/client can call delete bay/stack.
|
# so another user/client can call delete bay/stack.
|
||||||
if stack.stack_status == bay_status.DELETE_COMPLETE:
|
if stack.stack_status == fields.BayStatus.DELETE_COMPLETE:
|
||||||
self._delete_complete()
|
self._delete_complete()
|
||||||
conductor_utils.notify_about_bay_operation(
|
conductor_utils.notify_about_bay_operation(
|
||||||
self.context, status_to_event[stack.stack_status],
|
self.context, status_to_event[stack.stack_status],
|
||||||
taxonomy.OUTCOME_SUCCESS)
|
taxonomy.OUTCOME_SUCCESS)
|
||||||
raise loopingcall.LoopingCallDone()
|
raise loopingcall.LoopingCallDone()
|
||||||
|
|
||||||
if stack.stack_status in (bay_status.CREATE_COMPLETE,
|
if stack.stack_status in (fields.BayStatus.CREATE_COMPLETE,
|
||||||
bay_status.UPDATE_COMPLETE):
|
fields.BayStatus.UPDATE_COMPLETE):
|
||||||
self._sync_bay_and_template_status(stack)
|
self._sync_bay_and_template_status(stack)
|
||||||
conductor_utils.notify_about_bay_operation(
|
conductor_utils.notify_about_bay_operation(
|
||||||
self.context, status_to_event[stack.stack_status],
|
self.context, status_to_event[stack.stack_status],
|
||||||
@ -315,11 +318,11 @@ class HeatPoller(object):
|
|||||||
elif stack.stack_status != self.bay.status:
|
elif stack.stack_status != self.bay.status:
|
||||||
self._sync_bay_status(stack)
|
self._sync_bay_status(stack)
|
||||||
|
|
||||||
if stack.stack_status in (bay_status.CREATE_FAILED,
|
if stack.stack_status in (fields.BayStatus.CREATE_FAILED,
|
||||||
bay_status.DELETE_FAILED,
|
fields.BayStatus.DELETE_FAILED,
|
||||||
bay_status.UPDATE_FAILED,
|
fields.BayStatus.UPDATE_FAILED,
|
||||||
bay_status.ROLLBACK_COMPLETE,
|
fields.BayStatus.ROLLBACK_COMPLETE,
|
||||||
bay_status.ROLLBACK_FAILED):
|
fields.BayStatus.ROLLBACK_FAILED):
|
||||||
self._sync_bay_and_template_status(stack)
|
self._sync_bay_and_template_status(stack)
|
||||||
self._bay_failed(stack)
|
self._bay_failed(stack)
|
||||||
conductor_utils.notify_about_bay_operation(
|
conductor_utils.notify_about_bay_operation(
|
||||||
@ -329,7 +332,7 @@ class HeatPoller(object):
|
|||||||
# only check max attempts when the stack is being created when
|
# only check max attempts when the stack is being created when
|
||||||
# the timeout hasn't been set. If the timeout has been set then
|
# the timeout hasn't been set. If the timeout has been set then
|
||||||
# the loop will end when the stack completes or the timeout occurs
|
# the loop will end when the stack completes or the timeout occurs
|
||||||
if stack.stack_status == bay_status.CREATE_IN_PROGRESS:
|
if stack.stack_status == fields.BayStatus.CREATE_IN_PROGRESS:
|
||||||
if (stack.timeout_mins is None and
|
if (stack.timeout_mins is None and
|
||||||
self.attempts > cfg.CONF.cluster_heat.max_attempts):
|
self.attempts > cfg.CONF.cluster_heat.max_attempts):
|
||||||
LOG.error(_LE('Bay check exit after %(attempts)s attempts,'
|
LOG.error(_LE('Bay check exit after %(attempts)s attempts,'
|
||||||
@ -375,7 +378,7 @@ class HeatPoller(object):
|
|||||||
if stack_param:
|
if stack_param:
|
||||||
self.bay.coe_version = stack.parameters[stack_param]
|
self.bay.coe_version = stack.parameters[stack_param]
|
||||||
|
|
||||||
tdef = TDef.get_template_definition(
|
tdef = template_def.TemplateDefinition.get_template_definition(
|
||||||
self.cluster_template.server_type,
|
self.cluster_template.server_type,
|
||||||
self.cluster_template.cluster_distro, self.cluster_template.coe)
|
self.cluster_template.cluster_distro, self.cluster_template.coe)
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from tempfile import NamedTemporaryFile
|
import tempfile
|
||||||
|
|
||||||
from k8sclient.client import api_client
|
from k8sclient.client import api_client
|
||||||
from k8sclient.client.apis import apiv_api
|
from k8sclient.client.apis import apiv_api
|
||||||
@ -33,7 +33,7 @@ class K8sAPI(apiv_api.ApivApi):
|
|||||||
:returns: temp file
|
:returns: temp file
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
tmp = NamedTemporaryFile(delete=True)
|
tmp = tempfile.NamedTemporaryFile(delete=True)
|
||||||
tmp.write(content)
|
tmp.write(content)
|
||||||
tmp.flush()
|
tmp.flush()
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
|
@ -14,10 +14,10 @@ import ast
|
|||||||
|
|
||||||
from magnum.common import utils
|
from magnum.common import utils
|
||||||
from magnum.conductor import k8s_api as k8s
|
from magnum.conductor import k8s_api as k8s
|
||||||
from magnum.conductor.monitors import MonitorBase
|
from magnum.conductor import monitors
|
||||||
|
|
||||||
|
|
||||||
class K8sMonitor(MonitorBase):
|
class K8sMonitor(monitors.MonitorBase):
|
||||||
|
|
||||||
def __init__(self, context, bay):
|
def __init__(self, context, bay):
|
||||||
super(K8sMonitor, self).__init__(context, bay)
|
super(K8sMonitor, self).__init__(context, bay)
|
||||||
|
@ -13,10 +13,10 @@
|
|||||||
from oslo_serialization import jsonutils
|
from oslo_serialization import jsonutils
|
||||||
|
|
||||||
from magnum.common import urlfetch
|
from magnum.common import urlfetch
|
||||||
from magnum.conductor.monitors import MonitorBase
|
from magnum.conductor import monitors
|
||||||
|
|
||||||
|
|
||||||
class MesosMonitor(MonitorBase):
|
class MesosMonitor(monitors.MonitorBase):
|
||||||
|
|
||||||
def __init__(self, context, bay):
|
def __init__(self, context, bay):
|
||||||
super(MesosMonitor, self).__init__(context, bay)
|
super(MesosMonitor, self).__init__(context, bay)
|
||||||
|
@ -20,7 +20,7 @@ from oslo_log import log
|
|||||||
from oslo_utils import importutils
|
from oslo_utils import importutils
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from magnum.objects.fields import BayType as bay_type
|
from magnum.objects import fields
|
||||||
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
@ -34,9 +34,9 @@ CONF.import_opt('default_timeout',
|
|||||||
group='docker')
|
group='docker')
|
||||||
|
|
||||||
COE_CLASS_PATH = {
|
COE_CLASS_PATH = {
|
||||||
bay_type.SWARM: 'magnum.conductor.swarm_monitor.SwarmMonitor',
|
fields.BayType.SWARM: 'magnum.conductor.swarm_monitor.SwarmMonitor',
|
||||||
bay_type.KUBERNETES: 'magnum.conductor.k8s_monitor.K8sMonitor',
|
fields.BayType.KUBERNETES: 'magnum.conductor.k8s_monitor.K8sMonitor',
|
||||||
bay_type.MESOS: 'magnum.conductor.mesos_monitor.MesosMonitor'
|
fields.BayType.MESOS: 'magnum.conductor.mesos_monitor.MesosMonitor'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -16,13 +16,13 @@
|
|||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from magnum.common import docker_utils
|
from magnum.common import docker_utils
|
||||||
from magnum.conductor.monitors import MonitorBase
|
from magnum.conductor import monitors
|
||||||
from magnum.i18n import _LW
|
from magnum.i18n import _LW
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class SwarmMonitor(MonitorBase):
|
class SwarmMonitor(monitors.MonitorBase):
|
||||||
|
|
||||||
def __init__(self, context, bay):
|
def __init__(self, context, bay):
|
||||||
super(SwarmMonitor, self).__init__(context, bay)
|
super(SwarmMonitor, self).__init__(context, bay)
|
||||||
|
@ -30,7 +30,7 @@ from magnum.i18n import _
|
|||||||
from magnum.i18n import _LI
|
from magnum.i18n import _LI
|
||||||
from magnum.i18n import _LW
|
from magnum.i18n import _LW
|
||||||
from magnum import objects
|
from magnum import objects
|
||||||
from magnum.objects.fields import BayStatus as bay_status
|
from magnum.objects import fields
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
@ -73,10 +73,10 @@ class MagnumPeriodicTasks(periodic_task.PeriodicTasks):
|
|||||||
try:
|
try:
|
||||||
LOG.debug('Starting to sync up bay status')
|
LOG.debug('Starting to sync up bay status')
|
||||||
osc = clients.OpenStackClients(ctx)
|
osc = clients.OpenStackClients(ctx)
|
||||||
status = [bay_status.CREATE_IN_PROGRESS,
|
status = [fields.BayStatus.CREATE_IN_PROGRESS,
|
||||||
bay_status.UPDATE_IN_PROGRESS,
|
fields.BayStatus.UPDATE_IN_PROGRESS,
|
||||||
bay_status.DELETE_IN_PROGRESS,
|
fields.BayStatus.DELETE_IN_PROGRESS,
|
||||||
bay_status.ROLLBACK_IN_PROGRESS]
|
fields.BayStatus.ROLLBACK_IN_PROGRESS]
|
||||||
filters = {'status': status}
|
filters = {'status': status}
|
||||||
bays = objects.Bay.list(ctx, filters=filters)
|
bays = objects.Bay.list(ctx, filters=filters)
|
||||||
if not bays:
|
if not bays:
|
||||||
@ -155,12 +155,12 @@ class MagnumPeriodicTasks(periodic_task.PeriodicTasks):
|
|||||||
'status': bay.status})
|
'status': bay.status})
|
||||||
|
|
||||||
def _sync_missing_heat_stack(self, bay):
|
def _sync_missing_heat_stack(self, bay):
|
||||||
if bay.status == bay_status.DELETE_IN_PROGRESS:
|
if bay.status == fields.BayStatus.DELETE_IN_PROGRESS:
|
||||||
self._sync_deleted_stack(bay)
|
self._sync_deleted_stack(bay)
|
||||||
elif bay.status == bay_status.CREATE_IN_PROGRESS:
|
elif bay.status == fields.BayStatus.CREATE_IN_PROGRESS:
|
||||||
self._sync_missing_stack(bay, bay_status.CREATE_FAILED)
|
self._sync_missing_stack(bay, fields.BayStatus.CREATE_FAILED)
|
||||||
elif bay.status == bay_status.UPDATE_IN_PROGRESS:
|
elif bay.status == fields.BayStatus.UPDATE_IN_PROGRESS:
|
||||||
self._sync_missing_stack(bay, bay_status.UPDATE_FAILED)
|
self._sync_missing_stack(bay, fields.BayStatus.UPDATE_FAILED)
|
||||||
|
|
||||||
def _sync_deleted_stack(self, bay):
|
def _sync_deleted_stack(self, bay):
|
||||||
try:
|
try:
|
||||||
@ -189,8 +189,8 @@ class MagnumPeriodicTasks(periodic_task.PeriodicTasks):
|
|||||||
def _send_bay_metrics(self, ctx):
|
def _send_bay_metrics(self, ctx):
|
||||||
LOG.debug('Starting to send bay metrics')
|
LOG.debug('Starting to send bay metrics')
|
||||||
for bay in objects.Bay.list(ctx):
|
for bay in objects.Bay.list(ctx):
|
||||||
if bay.status not in [bay_status.CREATE_COMPLETE,
|
if bay.status not in [fields.BayStatus.CREATE_COMPLETE,
|
||||||
bay_status.UPDATE_COMPLETE]:
|
fields.BayStatus.UPDATE_COMPLETE]:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
monitor = monitors.create_monitor(ctx, bay)
|
monitor = monitors.create_monitor(ctx, bay)
|
||||||
|
Loading…
Reference in New Issue
Block a user