Clean imports in code
This patch set modifies lines which are importing objects instead of modules. As per openstack import guide lines, user should import modules in a file not objects. http://docs.openstack.org/developer/hacking/#imports Closes-Bug: #1620161 Change-Id: I7ec9022a6b1cec36c678a2cec2a1856e70a51c68
This commit is contained in:
parent
5476354e65
commit
33bd24252f
@ -13,7 +13,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from random import Random
|
||||
import random
|
||||
|
||||
|
||||
class NameGenerator(object):
|
||||
@ -23,7 +23,7 @@ class NameGenerator(object):
|
||||
'phi', 'chi', 'psi', 'omega']
|
||||
|
||||
def __init__(self):
|
||||
self.random = Random()
|
||||
self.random = random.Random()
|
||||
|
||||
def generate(self):
|
||||
'''Generate a random name compose of a Greek leter and
|
||||
|
@ -12,10 +12,10 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from enum import Enum
|
||||
import enum
|
||||
|
||||
|
||||
class Extensions(Enum):
|
||||
class Extensions(enum.Enum):
|
||||
__order__ = ('AUTHORITY_KEY_IDENTIFIER SUBJECT_KEY_IDENTIFIER '
|
||||
'AUTHORITY_INFORMATION_ACCESS BASIC_CONSTRAINTS '
|
||||
'CRL_DISTRIBUTION_POINTS CERTIFICATE_POLICIES '
|
||||
@ -38,7 +38,7 @@ class Extensions(Enum):
|
||||
ISSUER_ALTERNATIVE_NAME = "issuerAltName"
|
||||
|
||||
|
||||
class KeyUsages(Enum):
|
||||
class KeyUsages(enum.Enum):
|
||||
__order__ = ('DIGITAL_SIGNATURE CONTENT_COMMITMENT KEY_ENCIPHERMENT '
|
||||
'DATA_ENCIPHERMENT KEY_AGREEMENT KEY_CERT_SIGN '
|
||||
'CRL_SIGN ENCIPHER_ONLY DECIPHER_ONLY')
|
||||
|
@ -21,7 +21,6 @@ from cryptography.hazmat.primitives.asymmetric import rsa
|
||||
from cryptography.hazmat.primitives import hashes
|
||||
from cryptography.hazmat.primitives import serialization
|
||||
from cryptography import x509
|
||||
from cryptography.x509 import Extension
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
@ -70,12 +69,12 @@ def _build_client_extentions():
|
||||
# Digital Signature and Key Encipherment are enabled
|
||||
key_usage = x509.KeyUsage(True, False, True, False, False, False, False,
|
||||
False, False)
|
||||
key_usage = Extension(key_usage.oid, True, key_usage)
|
||||
key_usage = x509.Extension(key_usage.oid, True, key_usage)
|
||||
extended_key_usage = x509.ExtendedKeyUsage([x509.OID_CLIENT_AUTH])
|
||||
extended_key_usage = Extension(extended_key_usage.oid, False,
|
||||
extended_key_usage = x509.Extension(extended_key_usage.oid, False,
|
||||
extended_key_usage)
|
||||
basic_constraints = x509.BasicConstraints(ca=False, path_length=None)
|
||||
basic_constraints = Extension(basic_constraints.oid, True,
|
||||
basic_constraints = x509.Extension(basic_constraints.oid, True,
|
||||
basic_constraints)
|
||||
|
||||
return [key_usage, extended_key_usage, basic_constraints]
|
||||
@ -85,9 +84,9 @@ def _build_ca_extentions():
|
||||
# Certificate Sign is enabled
|
||||
key_usage = x509.KeyUsage(False, False, False, False, False, True, False,
|
||||
False, False)
|
||||
key_usage = Extension(key_usage.oid, True, key_usage)
|
||||
key_usage = x509.Extension(key_usage.oid, True, key_usage)
|
||||
basic_constraints = x509.BasicConstraints(ca=True, path_length=0)
|
||||
basic_constraints = Extension(basic_constraints.oid, True,
|
||||
basic_constraints = x509.Extension(basic_constraints.oid, True,
|
||||
basic_constraints)
|
||||
|
||||
return [basic_constraints, key_usage]
|
||||
|
@ -15,7 +15,7 @@
|
||||
from cryptography import x509
|
||||
from oslo_config import cfg
|
||||
|
||||
from magnum.common.exception import CertificateValidationError
|
||||
from magnum.common import exception
|
||||
from magnum.common.x509 import extensions
|
||||
|
||||
_CA_KEY_USAGES = [
|
||||
@ -55,7 +55,7 @@ def filter_allowed_extensions(extensions, allowed_extensions=None):
|
||||
yield ext
|
||||
else:
|
||||
if ext.critical:
|
||||
raise CertificateValidationError(extension=ext)
|
||||
raise exception.CertificateValidationError(extension=ext)
|
||||
|
||||
|
||||
def _merge_key_usage(key_usage, allowed_key_usage):
|
||||
@ -74,7 +74,8 @@ def _merge_key_usage(key_usage, allowed_key_usage):
|
||||
if value:
|
||||
if k not in allowed_key_usage:
|
||||
if critical:
|
||||
raise CertificateValidationError(extension=key_usage)
|
||||
raise exception.CertificateValidationError(
|
||||
extension=key_usage)
|
||||
else:
|
||||
value = False
|
||||
usages.append(value)
|
||||
@ -95,7 +96,8 @@ def _remove_ca_key_usage(allowed_key_usage):
|
||||
def _disallow_ca_in_basic_constraints(basic_constraints):
|
||||
if basic_constraints.value.ca:
|
||||
if basic_constraints.critical:
|
||||
raise CertificateValidationError(extension=basic_constraints)
|
||||
raise exception.CertificateValidationError(
|
||||
extension=basic_constraints)
|
||||
|
||||
bc = x509.BasicConstraints(False, None)
|
||||
return x509.Extension(bc.oid, False, bc)
|
||||
|
@ -30,12 +30,12 @@ from magnum.conductor.handlers.common import cert_manager
|
||||
from magnum.conductor.handlers.common import trust_manager
|
||||
from magnum.conductor import scale_manager
|
||||
from magnum.conductor import utils as conductor_utils
|
||||
from magnum.drivers.common.template_def import TemplateDefinition as TDef
|
||||
from magnum.drivers.common import template_def
|
||||
from magnum.i18n import _
|
||||
from magnum.i18n import _LE
|
||||
from magnum.i18n import _LI
|
||||
from magnum import objects
|
||||
from magnum.objects.fields import BayStatus as bay_status
|
||||
from magnum.objects import fields
|
||||
|
||||
|
||||
cluster_heat_opts = [
|
||||
@ -73,7 +73,8 @@ def _extract_template_definition(context, bay, scale_manager=None):
|
||||
cluster_distro = cluster_template.cluster_distro
|
||||
cluster_coe = cluster_template.coe
|
||||
cluster_server_type = cluster_template.server_type
|
||||
definition = TDef.get_template_definition(cluster_server_type,
|
||||
definition = template_def.TemplateDefinition.get_template_definition(
|
||||
cluster_server_type,
|
||||
cluster_distro,
|
||||
cluster_coe)
|
||||
return definition.extract_definition(context, cluster_template, bay,
|
||||
@ -173,7 +174,7 @@ class Handler(object):
|
||||
raise
|
||||
|
||||
bay.stack_id = created_stack['stack']['id']
|
||||
bay.status = bay_status.CREATE_IN_PROGRESS
|
||||
bay.status = fields.BayStatus.CREATE_IN_PROGRESS
|
||||
bay.create()
|
||||
|
||||
self._poll_and_check(osc, bay)
|
||||
@ -186,14 +187,14 @@ class Handler(object):
|
||||
osc = clients.OpenStackClients(context)
|
||||
stack = osc.heat().stacks.get(bay.stack_id)
|
||||
allow_update_status = (
|
||||
bay_status.CREATE_COMPLETE,
|
||||
bay_status.UPDATE_COMPLETE,
|
||||
bay_status.RESUME_COMPLETE,
|
||||
bay_status.RESTORE_COMPLETE,
|
||||
bay_status.ROLLBACK_COMPLETE,
|
||||
bay_status.SNAPSHOT_COMPLETE,
|
||||
bay_status.CHECK_COMPLETE,
|
||||
bay_status.ADOPT_COMPLETE
|
||||
fields.BayStatus.CREATE_COMPLETE,
|
||||
fields.BayStatus.UPDATE_COMPLETE,
|
||||
fields.BayStatus.RESUME_COMPLETE,
|
||||
fields.BayStatus.RESTORE_COMPLETE,
|
||||
fields.BayStatus.ROLLBACK_COMPLETE,
|
||||
fields.BayStatus.SNAPSHOT_COMPLETE,
|
||||
fields.BayStatus.CHECK_COMPLETE,
|
||||
fields.BayStatus.ADOPT_COMPLETE
|
||||
)
|
||||
if stack.stack_status not in allow_update_status:
|
||||
conductor_utils.notify_about_bay_operation(
|
||||
@ -255,7 +256,7 @@ class Handler(object):
|
||||
context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE)
|
||||
raise
|
||||
|
||||
bay.status = bay_status.DELETE_IN_PROGRESS
|
||||
bay.status = fields.BayStatus.DELETE_IN_PROGRESS
|
||||
bay.save()
|
||||
|
||||
self._poll_and_check(osc, bay)
|
||||
@ -277,9 +278,11 @@ class HeatPoller(object):
|
||||
self.attempts = 0
|
||||
self.cluster_template = conductor_utils.retrieve_cluster_template(
|
||||
self.context, bay)
|
||||
self.template_def = TDef.get_template_definition(
|
||||
self.template_def = \
|
||||
template_def.TemplateDefinition.get_template_definition(
|
||||
self.cluster_template.server_type,
|
||||
self.cluster_template.cluster_distro, self.cluster_template.coe)
|
||||
self.cluster_template.cluster_distro,
|
||||
self.cluster_template.coe)
|
||||
|
||||
def poll_and_check(self):
|
||||
# TODO(yuanying): temporary implementation to update api_address,
|
||||
@ -287,26 +290,26 @@ class HeatPoller(object):
|
||||
stack = self.openstack_client.heat().stacks.get(self.bay.stack_id)
|
||||
self.attempts += 1
|
||||
status_to_event = {
|
||||
bay_status.DELETE_COMPLETE: taxonomy.ACTION_DELETE,
|
||||
bay_status.CREATE_COMPLETE: taxonomy.ACTION_CREATE,
|
||||
bay_status.UPDATE_COMPLETE: taxonomy.ACTION_UPDATE,
|
||||
bay_status.ROLLBACK_COMPLETE: taxonomy.ACTION_UPDATE,
|
||||
bay_status.CREATE_FAILED: taxonomy.ACTION_CREATE,
|
||||
bay_status.DELETE_FAILED: taxonomy.ACTION_DELETE,
|
||||
bay_status.UPDATE_FAILED: taxonomy.ACTION_UPDATE,
|
||||
bay_status.ROLLBACK_FAILED: taxonomy.ACTION_UPDATE
|
||||
fields.BayStatus.DELETE_COMPLETE: taxonomy.ACTION_DELETE,
|
||||
fields.BayStatus.CREATE_COMPLETE: taxonomy.ACTION_CREATE,
|
||||
fields.BayStatus.UPDATE_COMPLETE: taxonomy.ACTION_UPDATE,
|
||||
fields.BayStatus.ROLLBACK_COMPLETE: taxonomy.ACTION_UPDATE,
|
||||
fields.BayStatus.CREATE_FAILED: taxonomy.ACTION_CREATE,
|
||||
fields.BayStatus.DELETE_FAILED: taxonomy.ACTION_DELETE,
|
||||
fields.BayStatus.UPDATE_FAILED: taxonomy.ACTION_UPDATE,
|
||||
fields.BayStatus.ROLLBACK_FAILED: taxonomy.ACTION_UPDATE
|
||||
}
|
||||
# poll_and_check is detached and polling long time to check status,
|
||||
# so another user/client can call delete bay/stack.
|
||||
if stack.stack_status == bay_status.DELETE_COMPLETE:
|
||||
if stack.stack_status == fields.BayStatus.DELETE_COMPLETE:
|
||||
self._delete_complete()
|
||||
conductor_utils.notify_about_bay_operation(
|
||||
self.context, status_to_event[stack.stack_status],
|
||||
taxonomy.OUTCOME_SUCCESS)
|
||||
raise loopingcall.LoopingCallDone()
|
||||
|
||||
if stack.stack_status in (bay_status.CREATE_COMPLETE,
|
||||
bay_status.UPDATE_COMPLETE):
|
||||
if stack.stack_status in (fields.BayStatus.CREATE_COMPLETE,
|
||||
fields.BayStatus.UPDATE_COMPLETE):
|
||||
self._sync_bay_and_template_status(stack)
|
||||
conductor_utils.notify_about_bay_operation(
|
||||
self.context, status_to_event[stack.stack_status],
|
||||
@ -315,11 +318,11 @@ class HeatPoller(object):
|
||||
elif stack.stack_status != self.bay.status:
|
||||
self._sync_bay_status(stack)
|
||||
|
||||
if stack.stack_status in (bay_status.CREATE_FAILED,
|
||||
bay_status.DELETE_FAILED,
|
||||
bay_status.UPDATE_FAILED,
|
||||
bay_status.ROLLBACK_COMPLETE,
|
||||
bay_status.ROLLBACK_FAILED):
|
||||
if stack.stack_status in (fields.BayStatus.CREATE_FAILED,
|
||||
fields.BayStatus.DELETE_FAILED,
|
||||
fields.BayStatus.UPDATE_FAILED,
|
||||
fields.BayStatus.ROLLBACK_COMPLETE,
|
||||
fields.BayStatus.ROLLBACK_FAILED):
|
||||
self._sync_bay_and_template_status(stack)
|
||||
self._bay_failed(stack)
|
||||
conductor_utils.notify_about_bay_operation(
|
||||
@ -329,7 +332,7 @@ class HeatPoller(object):
|
||||
# only check max attempts when the stack is being created when
|
||||
# the timeout hasn't been set. If the timeout has been set then
|
||||
# the loop will end when the stack completes or the timeout occurs
|
||||
if stack.stack_status == bay_status.CREATE_IN_PROGRESS:
|
||||
if stack.stack_status == fields.BayStatus.CREATE_IN_PROGRESS:
|
||||
if (stack.timeout_mins is None and
|
||||
self.attempts > cfg.CONF.cluster_heat.max_attempts):
|
||||
LOG.error(_LE('Bay check exit after %(attempts)s attempts,'
|
||||
@ -375,7 +378,7 @@ class HeatPoller(object):
|
||||
if stack_param:
|
||||
self.bay.coe_version = stack.parameters[stack_param]
|
||||
|
||||
tdef = TDef.get_template_definition(
|
||||
tdef = template_def.TemplateDefinition.get_template_definition(
|
||||
self.cluster_template.server_type,
|
||||
self.cluster_template.cluster_distro, self.cluster_template.coe)
|
||||
|
||||
|
@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from tempfile import NamedTemporaryFile
|
||||
import tempfile
|
||||
|
||||
from k8sclient.client import api_client
|
||||
from k8sclient.client.apis import apiv_api
|
||||
@ -33,7 +33,7 @@ class K8sAPI(apiv_api.ApivApi):
|
||||
:returns: temp file
|
||||
"""
|
||||
try:
|
||||
tmp = NamedTemporaryFile(delete=True)
|
||||
tmp = tempfile.NamedTemporaryFile(delete=True)
|
||||
tmp.write(content)
|
||||
tmp.flush()
|
||||
except Exception as err:
|
||||
|
@ -14,10 +14,10 @@ import ast
|
||||
|
||||
from magnum.common import utils
|
||||
from magnum.conductor import k8s_api as k8s
|
||||
from magnum.conductor.monitors import MonitorBase
|
||||
from magnum.conductor import monitors
|
||||
|
||||
|
||||
class K8sMonitor(MonitorBase):
|
||||
class K8sMonitor(monitors.MonitorBase):
|
||||
|
||||
def __init__(self, context, bay):
|
||||
super(K8sMonitor, self).__init__(context, bay)
|
||||
|
@ -13,10 +13,10 @@
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from magnum.common import urlfetch
|
||||
from magnum.conductor.monitors import MonitorBase
|
||||
from magnum.conductor import monitors
|
||||
|
||||
|
||||
class MesosMonitor(MonitorBase):
|
||||
class MesosMonitor(monitors.MonitorBase):
|
||||
|
||||
def __init__(self, context, bay):
|
||||
super(MesosMonitor, self).__init__(context, bay)
|
||||
|
@ -20,7 +20,7 @@ from oslo_log import log
|
||||
from oslo_utils import importutils
|
||||
import six
|
||||
|
||||
from magnum.objects.fields import BayType as bay_type
|
||||
from magnum.objects import fields
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
@ -34,9 +34,9 @@ CONF.import_opt('default_timeout',
|
||||
group='docker')
|
||||
|
||||
COE_CLASS_PATH = {
|
||||
bay_type.SWARM: 'magnum.conductor.swarm_monitor.SwarmMonitor',
|
||||
bay_type.KUBERNETES: 'magnum.conductor.k8s_monitor.K8sMonitor',
|
||||
bay_type.MESOS: 'magnum.conductor.mesos_monitor.MesosMonitor'
|
||||
fields.BayType.SWARM: 'magnum.conductor.swarm_monitor.SwarmMonitor',
|
||||
fields.BayType.KUBERNETES: 'magnum.conductor.k8s_monitor.K8sMonitor',
|
||||
fields.BayType.MESOS: 'magnum.conductor.mesos_monitor.MesosMonitor'
|
||||
}
|
||||
|
||||
|
||||
|
@ -16,13 +16,13 @@
|
||||
from oslo_log import log
|
||||
|
||||
from magnum.common import docker_utils
|
||||
from magnum.conductor.monitors import MonitorBase
|
||||
from magnum.conductor import monitors
|
||||
from magnum.i18n import _LW
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class SwarmMonitor(MonitorBase):
|
||||
class SwarmMonitor(monitors.MonitorBase):
|
||||
|
||||
def __init__(self, context, bay):
|
||||
super(SwarmMonitor, self).__init__(context, bay)
|
||||
|
@ -30,7 +30,7 @@ from magnum.i18n import _
|
||||
from magnum.i18n import _LI
|
||||
from magnum.i18n import _LW
|
||||
from magnum import objects
|
||||
from magnum.objects.fields import BayStatus as bay_status
|
||||
from magnum.objects import fields
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -73,10 +73,10 @@ class MagnumPeriodicTasks(periodic_task.PeriodicTasks):
|
||||
try:
|
||||
LOG.debug('Starting to sync up bay status')
|
||||
osc = clients.OpenStackClients(ctx)
|
||||
status = [bay_status.CREATE_IN_PROGRESS,
|
||||
bay_status.UPDATE_IN_PROGRESS,
|
||||
bay_status.DELETE_IN_PROGRESS,
|
||||
bay_status.ROLLBACK_IN_PROGRESS]
|
||||
status = [fields.BayStatus.CREATE_IN_PROGRESS,
|
||||
fields.BayStatus.UPDATE_IN_PROGRESS,
|
||||
fields.BayStatus.DELETE_IN_PROGRESS,
|
||||
fields.BayStatus.ROLLBACK_IN_PROGRESS]
|
||||
filters = {'status': status}
|
||||
bays = objects.Bay.list(ctx, filters=filters)
|
||||
if not bays:
|
||||
@ -155,12 +155,12 @@ class MagnumPeriodicTasks(periodic_task.PeriodicTasks):
|
||||
'status': bay.status})
|
||||
|
||||
def _sync_missing_heat_stack(self, bay):
|
||||
if bay.status == bay_status.DELETE_IN_PROGRESS:
|
||||
if bay.status == fields.BayStatus.DELETE_IN_PROGRESS:
|
||||
self._sync_deleted_stack(bay)
|
||||
elif bay.status == bay_status.CREATE_IN_PROGRESS:
|
||||
self._sync_missing_stack(bay, bay_status.CREATE_FAILED)
|
||||
elif bay.status == bay_status.UPDATE_IN_PROGRESS:
|
||||
self._sync_missing_stack(bay, bay_status.UPDATE_FAILED)
|
||||
elif bay.status == fields.BayStatus.CREATE_IN_PROGRESS:
|
||||
self._sync_missing_stack(bay, fields.BayStatus.CREATE_FAILED)
|
||||
elif bay.status == fields.BayStatus.UPDATE_IN_PROGRESS:
|
||||
self._sync_missing_stack(bay, fields.BayStatus.UPDATE_FAILED)
|
||||
|
||||
def _sync_deleted_stack(self, bay):
|
||||
try:
|
||||
@ -189,8 +189,8 @@ class MagnumPeriodicTasks(periodic_task.PeriodicTasks):
|
||||
def _send_bay_metrics(self, ctx):
|
||||
LOG.debug('Starting to send bay metrics')
|
||||
for bay in objects.Bay.list(ctx):
|
||||
if bay.status not in [bay_status.CREATE_COMPLETE,
|
||||
bay_status.UPDATE_COMPLETE]:
|
||||
if bay.status not in [fields.BayStatus.CREATE_COMPLETE,
|
||||
fields.BayStatus.UPDATE_COMPLETE]:
|
||||
continue
|
||||
|
||||
monitor = monitors.create_monitor(ctx, bay)
|
||||
|
Loading…
Reference in New Issue
Block a user