Modified tox.ini to clone repos using zuul-cloner
Replaced "," with "%" in logs statement for files: - cinder/volume/drivers/aws/ebs.py - cinder/volume/drivers/gce/gceutils.py - glance/glance_store/_drivers/aws.py - neutron/neutron/common/gceutils.py - neutron/neutron/plugins/ml2/drivers/gce/mech_gce.py - neutron/neutron/plugins/ml2/managers.py - neutron/neutron/services/l3_router/gce_router_plugin.py - nova/virt/ec2/ec2driver.py - nova/virt/gce/driver.py - nova/virt/gce/gceutils.py Added import fix in nova/tests/unit/virt/ec2/test_ec2.py Modified run_tests.sh to get total number of passed and failed tests Closes-Bug: #1704147 Change-Id: I4b144b8095cf9ff4807c4b9f4ca21577a43de27f
This commit is contained in:
parent
f2c95ee757
commit
84b9c632f5
cinder/volume/drivers
clone_repos.shglance/glance_store/_drivers
neutron/neutron
nova
omni-requirements.txtrun_tests.shtest-requirements.txttox.ini@ -134,7 +134,7 @@ class EBSDriver(BaseVD):
|
|||||||
try:
|
try:
|
||||||
ebs_vol = self._find(volume['id'], self._conn.get_all_volumes)
|
ebs_vol = self._find(volume['id'], self._conn.get_all_volumes)
|
||||||
except NotFound:
|
except NotFound:
|
||||||
LOG.error('Volume %s was not found', volume['id'])
|
LOG.error('Volume %s was not found' % volume['id'])
|
||||||
return
|
return
|
||||||
self._conn.delete_volume(ebs_vol.id)
|
self._conn.delete_volume(ebs_vol.id)
|
||||||
|
|
||||||
@ -208,7 +208,7 @@ class EBSDriver(BaseVD):
|
|||||||
try:
|
try:
|
||||||
ebs_ss = self._find(snapshot['id'], self._conn.get_all_snapshots)
|
ebs_ss = self._find(snapshot['id'], self._conn.get_all_snapshots)
|
||||||
except NotFound:
|
except NotFound:
|
||||||
LOG.error('Snapshot %s was not found', snapshot['id'])
|
LOG.error('Snapshot %s was not found' % snapshot['id'])
|
||||||
return
|
return
|
||||||
self._conn.delete_snapshot(ebs_ss.id)
|
self._conn.delete_snapshot(ebs_ss.id)
|
||||||
|
|
||||||
@ -216,7 +216,7 @@ class EBSDriver(BaseVD):
|
|||||||
try:
|
try:
|
||||||
ebs_ss = self._find(snapshot['id'], self._conn.get_all_snapshots)
|
ebs_ss = self._find(snapshot['id'], self._conn.get_all_snapshots)
|
||||||
except NotFound:
|
except NotFound:
|
||||||
LOG.error('Snapshot %s was not found', snapshot['id'])
|
LOG.error('Snapshot %s was not found' % snapshot['id'])
|
||||||
raise
|
raise
|
||||||
ebs_vol = ebs_ss.create_volume(self._zone)
|
ebs_vol = ebs_ss.create_volume(self._zone)
|
||||||
|
|
||||||
|
@ -82,7 +82,7 @@ def wait_for_operation(compute, project, operation, interval=1, timeout=60):
|
|||||||
def watch_operation(name, request):
|
def watch_operation(name, request):
|
||||||
result = request.execute()
|
result = request.execute()
|
||||||
if result['status'] == 'DONE':
|
if result['status'] == 'DONE':
|
||||||
LOG.info("Operation %s status is %s", (name, result['status']))
|
LOG.info("Operation %s status is %s" % (name, result['status']))
|
||||||
if 'error' in result:
|
if 'error' in result:
|
||||||
raise GceOperationError(result['error'])
|
raise GceOperationError(result['error'])
|
||||||
raise loopingcall.LoopingCallDone()
|
raise loopingcall.LoopingCallDone()
|
||||||
|
@ -1,31 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Copyright (c) 2017 Platform9 Systems Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either expressed or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
WORKSPACE=$(pwd)
|
|
||||||
DIRECTORY="$WORKSPACE/openstack"
|
|
||||||
|
|
||||||
mkdir $DIRECTORY
|
|
||||||
|
|
||||||
clone_repos() {
|
|
||||||
project=$1
|
|
||||||
git clone -b stable/newton --depth 1 https://github.com/openstack/$project.git $DIRECTORY/$project
|
|
||||||
}
|
|
||||||
|
|
||||||
echo "============Cloning repos============"
|
|
||||||
clone_repos cinder &
|
|
||||||
clone_repos nova &
|
|
||||||
clone_repos glance_store &
|
|
||||||
clone_repos neutron &
|
|
||||||
wait
|
|
@ -161,7 +161,7 @@ class Store(glance_store.driver.Store):
|
|||||||
if image.root_device_type == 'ebs':
|
if image.root_device_type == 'ebs':
|
||||||
for bdm in image.block_device_mappings:
|
for bdm in image.block_device_mappings:
|
||||||
if 'Ebs' in bdm and 'VolumeSize' in bdm['Ebs']:
|
if 'Ebs' in bdm and 'VolumeSize' in bdm['Ebs']:
|
||||||
LOG.debug('ebs info: %s', bdm['Ebs'])
|
LOG.debug('ebs info: %s' % bdm['Ebs'])
|
||||||
size += bdm['Ebs']['VolumeSize']
|
size += bdm['Ebs']['VolumeSize']
|
||||||
# convert size in gb to bytes
|
# convert size in gb to bytes
|
||||||
size *= 1073741824
|
size *= 1073741824
|
||||||
|
@ -118,7 +118,7 @@ def wait_for_operation(compute, project, operation, interval=1, timeout=60):
|
|||||||
def watch_operation(name, request):
|
def watch_operation(name, request):
|
||||||
result = request.execute()
|
result = request.execute()
|
||||||
if result['status'] == 'DONE':
|
if result['status'] == 'DONE':
|
||||||
LOG.info("Operation %s status is %s", (name, result['status']))
|
LOG.info("Operation %s status is %s" % (name, result['status']))
|
||||||
if 'error' in result:
|
if 'error' in result:
|
||||||
raise GceOperationError(result['error'])
|
raise GceOperationError(result['error'])
|
||||||
raise loopingcall.LoopingCallDone()
|
raise loopingcall.LoopingCallDone()
|
||||||
@ -329,7 +329,7 @@ def assign_floatingip(compute, project, zone, fixedip, floatingip):
|
|||||||
raise GceResourceNotFound(name='Instance with fixed IP',
|
raise GceResourceNotFound(name='Instance with fixed IP',
|
||||||
identifier=fixedip)
|
identifier=fixedip)
|
||||||
|
|
||||||
LOG.info('Assigning floating ip %s to instance %s',
|
LOG.info('Assigning floating ip %s to instance %s' %
|
||||||
(floatingip, instance_name))
|
(floatingip, instance_name))
|
||||||
|
|
||||||
operation = compute.instances().addAccessConfig(
|
operation = compute.instances().addAccessConfig(
|
||||||
@ -356,7 +356,7 @@ def release_floatingip(compute, project, zone, floatingip):
|
|||||||
|
|
||||||
items = urllib.parse.urlparse(user).path.strip('/').split('/')
|
items = urllib.parse.urlparse(user).path.strip('/').split('/')
|
||||||
if len(items) < 4 or items[-2] != 'instances':
|
if len(items) < 4 or items[-2] != 'instances':
|
||||||
LOG.warning('Unknown referrer %s to GCE static IP %s',
|
LOG.warning('Unknown referrer %s to GCE static IP %s' %
|
||||||
(user, floatingip))
|
(user, floatingip))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -365,7 +365,7 @@ def release_floatingip(compute, project, zone, floatingip):
|
|||||||
for interface in instance_info['networkInterfaces']:
|
for interface in instance_info['networkInterfaces']:
|
||||||
for accessconfig in interface.get('accessConfigs', []):
|
for accessconfig in interface.get('accessConfigs', []):
|
||||||
if accessconfig.get('natIP') == floatingip:
|
if accessconfig.get('natIP') == floatingip:
|
||||||
LOG.info('Releasing %s from instance %s',
|
LOG.info('Releasing %s from instance %s' %
|
||||||
(floatingip, instance))
|
(floatingip, instance))
|
||||||
operation = compute.instances().deleteAccessConfig(
|
operation = compute.instances().deleteAccessConfig(
|
||||||
project=project, zone=zone, instance=instance,
|
project=project, zone=zone, instance=instance,
|
||||||
|
@ -52,7 +52,7 @@ class GceMechanismDriver(api.MechanismDriver):
|
|||||||
|
|
||||||
def initialize(self):
|
def initialize(self):
|
||||||
self.gce_svc = gceutils.get_gce_service(self.gce_svc_key)
|
self.gce_svc = gceutils.get_gce_service(self.gce_svc_key)
|
||||||
LOG.info("GCE Mechanism driver init with %s project, %s region",
|
LOG.info("GCE Mechanism driver init with %s project, %s region" %
|
||||||
(self.gce_project, self.gce_region))
|
(self.gce_project, self.gce_region))
|
||||||
self._subscribe_events()
|
self._subscribe_events()
|
||||||
|
|
||||||
@ -88,7 +88,7 @@ class GceMechanismDriver(api.MechanismDriver):
|
|||||||
name = self._gce_network_name(context)
|
name = self._gce_network_name(context)
|
||||||
operation = gceutils.create_network(compute, project, name)
|
operation = gceutils.create_network(compute, project, name)
|
||||||
gceutils.wait_for_operation(compute, project, operation)
|
gceutils.wait_for_operation(compute, project, operation)
|
||||||
LOG.info('Created network on GCE %s', name)
|
LOG.info('Created network on GCE %s' % name)
|
||||||
|
|
||||||
def update_network_precommit(self, context):
|
def update_network_precommit(self, context):
|
||||||
pass
|
pass
|
||||||
@ -104,7 +104,7 @@ class GceMechanismDriver(api.MechanismDriver):
|
|||||||
name = self._gce_network_name(context)
|
name = self._gce_network_name(context)
|
||||||
operation = gceutils.delete_network(compute, project, name)
|
operation = gceutils.delete_network(compute, project, name)
|
||||||
gceutils.wait_for_operation(compute, project, operation)
|
gceutils.wait_for_operation(compute, project, operation)
|
||||||
LOG.info('Deleted network on GCE %s', name)
|
LOG.info('Deleted network on GCE %s' % name)
|
||||||
|
|
||||||
def create_subnet_precommit(self, context):
|
def create_subnet_precommit(self, context):
|
||||||
pass
|
pass
|
||||||
@ -120,7 +120,7 @@ class GceMechanismDriver(api.MechanismDriver):
|
|||||||
operation = gceutils.create_subnet(compute, project, region, name,
|
operation = gceutils.create_subnet(compute, project, region, name,
|
||||||
cidr, network_link)
|
cidr, network_link)
|
||||||
gceutils.wait_for_operation(compute, project, operation)
|
gceutils.wait_for_operation(compute, project, operation)
|
||||||
LOG.info("Created subnet %s in region %s on GCE", (name, region))
|
LOG.info("Created subnet %s in region %s on GCE" % (name, region))
|
||||||
|
|
||||||
def update_subnet_precommit(self, context):
|
def update_subnet_precommit(self, context):
|
||||||
pass
|
pass
|
||||||
@ -138,7 +138,7 @@ class GceMechanismDriver(api.MechanismDriver):
|
|||||||
name = self._gce_subnet_name(context)
|
name = self._gce_subnet_name(context)
|
||||||
operation = gceutils.delete_subnet(compute, project, region, name)
|
operation = gceutils.delete_subnet(compute, project, region, name)
|
||||||
gceutils.wait_for_operation(compute, project, operation)
|
gceutils.wait_for_operation(compute, project, operation)
|
||||||
LOG.info("Deleted subnet %s in region %s on GCE",(name, region))
|
LOG.info("Deleted subnet %s in region %s on GCE" % (name, region))
|
||||||
|
|
||||||
def _gce_secgrp_id(self, openstack_id):
|
def _gce_secgrp_id(self, openstack_id):
|
||||||
return "secgrp-" + openstack_id
|
return "secgrp-" + openstack_id
|
||||||
@ -204,7 +204,7 @@ class GceMechanismDriver(api.MechanismDriver):
|
|||||||
LOG.exception(
|
LOG.exception(
|
||||||
"An error occured while creating security group: %s" % e)
|
"An error occured while creating security group: %s" % e)
|
||||||
return
|
return
|
||||||
LOG.info("Create GCE firewall rule %s", gce_rule)
|
LOG.info("Create GCE firewall rule %s" % gce_rule)
|
||||||
operation = gceutils.create_firewall_rule(compute, project, gce_rule)
|
operation = gceutils.create_firewall_rule(compute, project, gce_rule)
|
||||||
gceutils.wait_for_operation(compute, project, operation)
|
gceutils.wait_for_operation(compute, project, operation)
|
||||||
|
|
||||||
@ -226,14 +226,14 @@ class GceMechanismDriver(api.MechanismDriver):
|
|||||||
network_link = gce_firewall_info['network']
|
network_link = gce_firewall_info['network']
|
||||||
try:
|
try:
|
||||||
gce_rule = self._convert_secgrp_rule_to_gce(rule, network_link)
|
gce_rule = self._convert_secgrp_rule_to_gce(rule, network_link)
|
||||||
LOG.info("Update GCE firewall rule %s", name)
|
LOG.info("Update GCE firewall rule %s" % name)
|
||||||
operation = gceutils.update_firewall_rule(compute, project, name,
|
operation = gceutils.update_firewall_rule(compute, project, name,
|
||||||
gce_rule)
|
gce_rule)
|
||||||
gceutils.wait_for_operation(compute, project, operation)
|
gceutils.wait_for_operation(compute, project, operation)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception("An error occurred while updating security "
|
LOG.exception("An error occurred while updating security "
|
||||||
"group: %s", e)
|
"group: %s" % e)
|
||||||
LOG.error("Deleting existing GCE firewall rule %s", name)
|
LOG.error("Deleting existing GCE firewall rule %s" % name)
|
||||||
operation = gceutils.delete_firewall_rule(compute, project, name)
|
operation = gceutils.delete_firewall_rule(compute, project, name)
|
||||||
gceutils.wait_for_operation(compute, project, operation)
|
gceutils.wait_for_operation(compute, project, operation)
|
||||||
|
|
||||||
@ -242,7 +242,7 @@ class GceMechanismDriver(api.MechanismDriver):
|
|||||||
compute, project = self.gce_svc, self.gce_project
|
compute, project = self.gce_svc, self.gce_project
|
||||||
try:
|
try:
|
||||||
LOG.warn("Delete existing GCE firewall rule %s,"
|
LOG.warn("Delete existing GCE firewall rule %s,"
|
||||||
"as firewall rule update not GCE compatible.", name)
|
"as firewall rule update not GCE compatible." % name)
|
||||||
operation = gceutils.delete_firewall_rule(compute, project, name)
|
operation = gceutils.delete_firewall_rule(compute, project, name)
|
||||||
gceutils.wait_for_operation(compute, project, operation)
|
gceutils.wait_for_operation(compute, project, operation)
|
||||||
except gceutils.HttpError:
|
except gceutils.HttpError:
|
||||||
|
@ -1,886 +0,0 @@
|
|||||||
# Copyright (c) 2013 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log
|
|
||||||
from oslo_utils import excutils
|
|
||||||
import stevedore
|
|
||||||
|
|
||||||
from neutron.api.v2 import attributes
|
|
||||||
from neutron.common import exceptions as exc
|
|
||||||
from neutron.extensions import external_net
|
|
||||||
from neutron.extensions import multiprovidernet as mpnet
|
|
||||||
from neutron.extensions import portbindings
|
|
||||||
from neutron.extensions import providernet as provider
|
|
||||||
from neutron.extensions import vlantransparent
|
|
||||||
from neutron.plugins.ml2.common import exceptions as ml2_exc
|
|
||||||
from neutron.plugins.ml2 import db
|
|
||||||
from neutron.plugins.ml2 import driver_api as api
|
|
||||||
from neutron.plugins.ml2 import models
|
|
||||||
from neutron.services.qos import qos_consts
|
|
||||||
from neutron.common import exceptions
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
|
||||||
|
|
||||||
MAX_BINDING_LEVELS = 10
|
|
||||||
|
|
||||||
|
|
||||||
class TypeManager(stevedore.named.NamedExtensionManager):
|
|
||||||
"""Manage network segment types using drivers."""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
# Mapping from type name to DriverManager
|
|
||||||
self.drivers = {}
|
|
||||||
|
|
||||||
LOG.info("Configured type driver names: %s",
|
|
||||||
cfg.CONF.ml2.type_drivers)
|
|
||||||
super(TypeManager, self).__init__('neutron.ml2.type_drivers',
|
|
||||||
cfg.CONF.ml2.type_drivers,
|
|
||||||
invoke_on_load=True)
|
|
||||||
LOG.info("Loaded type driver names: %s", self.names())
|
|
||||||
self._register_types()
|
|
||||||
self._check_tenant_network_types(cfg.CONF.ml2.tenant_network_types)
|
|
||||||
self._check_external_network_type(cfg.CONF.ml2.external_network_type)
|
|
||||||
|
|
||||||
def _register_types(self):
|
|
||||||
for ext in self:
|
|
||||||
network_type = ext.obj.get_type()
|
|
||||||
if network_type in self.drivers:
|
|
||||||
LOG.error("Type driver '%(new_driver)s' ignored because"
|
|
||||||
" type driver '%(old_driver)s' is already"
|
|
||||||
" registered for type '%(type)s'",
|
|
||||||
{'new_driver': ext.name,
|
|
||||||
'old_driver': self.drivers[network_type].name,
|
|
||||||
'type': network_type})
|
|
||||||
else:
|
|
||||||
self.drivers[network_type] = ext
|
|
||||||
LOG.info("Registered types: %s", self.drivers.keys())
|
|
||||||
|
|
||||||
def _check_tenant_network_types(self, types):
|
|
||||||
self.tenant_network_types = []
|
|
||||||
for network_type in types:
|
|
||||||
if network_type in self.drivers:
|
|
||||||
self.tenant_network_types.append(network_type)
|
|
||||||
else:
|
|
||||||
LOG.error("No type driver for tenant network_type: %s. "
|
|
||||||
"Service terminated!", network_type)
|
|
||||||
raise SystemExit(1)
|
|
||||||
LOG.info("Tenant network_types: %s", self.tenant_network_types)
|
|
||||||
|
|
||||||
def _check_external_network_type(self, ext_network_type):
|
|
||||||
if ext_network_type and ext_network_type not in self.drivers:
|
|
||||||
LOG.error("No type driver for external network_type: %s. "
|
|
||||||
"Service terminated!", ext_network_type)
|
|
||||||
raise SystemExit(1)
|
|
||||||
|
|
||||||
def _process_provider_segment(self, segment):
|
|
||||||
(network_type, physical_network,
|
|
||||||
segmentation_id) = (self._get_attribute(segment, attr)
|
|
||||||
for attr in provider.ATTRIBUTES)
|
|
||||||
|
|
||||||
if attributes.is_attr_set(network_type):
|
|
||||||
segment = {api.NETWORK_TYPE: network_type,
|
|
||||||
api.PHYSICAL_NETWORK: physical_network,
|
|
||||||
api.SEGMENTATION_ID: segmentation_id}
|
|
||||||
self.validate_provider_segment(segment)
|
|
||||||
return segment
|
|
||||||
|
|
||||||
msg = _("network_type required")
|
|
||||||
raise exc.InvalidInput(error_message=msg)
|
|
||||||
|
|
||||||
def _process_provider_create(self, network):
|
|
||||||
if any(attributes.is_attr_set(network.get(attr))
|
|
||||||
for attr in provider.ATTRIBUTES):
|
|
||||||
# Verify that multiprovider and provider attributes are not set
|
|
||||||
# at the same time.
|
|
||||||
if attributes.is_attr_set(network.get(mpnet.SEGMENTS)):
|
|
||||||
raise mpnet.SegmentsSetInConjunctionWithProviders()
|
|
||||||
segment = self._get_provider_segment(network)
|
|
||||||
return [self._process_provider_segment(segment)]
|
|
||||||
elif attributes.is_attr_set(network.get(mpnet.SEGMENTS)):
|
|
||||||
segments = [self._process_provider_segment(s)
|
|
||||||
for s in network[mpnet.SEGMENTS]]
|
|
||||||
mpnet.check_duplicate_segments(segments, self.is_partial_segment)
|
|
||||||
return segments
|
|
||||||
|
|
||||||
def _match_segment(self, segment, filters):
|
|
||||||
return all(not filters.get(attr) or segment.get(attr) in filters[attr]
|
|
||||||
for attr in provider.ATTRIBUTES)
|
|
||||||
|
|
||||||
def _get_provider_segment(self, network):
|
|
||||||
# TODO(manishg): Placeholder method
|
|
||||||
# Code intended for operating on a provider segment should use
|
|
||||||
# this method to extract the segment, even though currently the
|
|
||||||
# segment attributes are part of the network dictionary. In the
|
|
||||||
# future, network and segment information will be decoupled and
|
|
||||||
# here we will do the job of extracting the segment information.
|
|
||||||
return network
|
|
||||||
|
|
||||||
def network_matches_filters(self, network, filters):
|
|
||||||
if not filters:
|
|
||||||
return True
|
|
||||||
if any(attributes.is_attr_set(network.get(attr))
|
|
||||||
for attr in provider.ATTRIBUTES):
|
|
||||||
segments = [self._get_provider_segment(network)]
|
|
||||||
elif attributes.is_attr_set(network.get(mpnet.SEGMENTS)):
|
|
||||||
segments = self._get_attribute(network, mpnet.SEGMENTS)
|
|
||||||
else:
|
|
||||||
return True
|
|
||||||
return any(self._match_segment(s, filters) for s in segments)
|
|
||||||
|
|
||||||
def _get_attribute(self, attrs, key):
|
|
||||||
value = attrs.get(key)
|
|
||||||
if value is attributes.ATTR_NOT_SPECIFIED:
|
|
||||||
value = None
|
|
||||||
return value
|
|
||||||
|
|
||||||
def extend_network_dict_provider(self, context, network):
|
|
||||||
# this method is left for backward compat even though it would be
|
|
||||||
# easy to change the callers in tree to use the bulk function
|
|
||||||
return self.extend_networks_dict_provider(context, [network])
|
|
||||||
|
|
||||||
def extend_networks_dict_provider(self, context, networks):
|
|
||||||
ids = [network['id'] for network in networks]
|
|
||||||
net_segments = db.get_networks_segments(context.session, ids)
|
|
||||||
for network in networks:
|
|
||||||
segments = net_segments[network['id']]
|
|
||||||
self._extend_network_dict_provider(network, segments)
|
|
||||||
|
|
||||||
def _extend_network_dict_provider(self, network, segments):
|
|
||||||
if not segments:
|
|
||||||
LOG.debug("Network %s has no segments", network['id'])
|
|
||||||
for attr in provider.ATTRIBUTES:
|
|
||||||
network[attr] = None
|
|
||||||
elif len(segments) > 1:
|
|
||||||
network[mpnet.SEGMENTS] = [
|
|
||||||
{provider.NETWORK_TYPE: segment[api.NETWORK_TYPE],
|
|
||||||
provider.PHYSICAL_NETWORK: segment[api.PHYSICAL_NETWORK],
|
|
||||||
provider.SEGMENTATION_ID: segment[api.SEGMENTATION_ID]}
|
|
||||||
for segment in segments]
|
|
||||||
else:
|
|
||||||
segment = segments[0]
|
|
||||||
network[provider.NETWORK_TYPE] = segment[api.NETWORK_TYPE]
|
|
||||||
network[provider.PHYSICAL_NETWORK] = segment[api.PHYSICAL_NETWORK]
|
|
||||||
network[provider.SEGMENTATION_ID] = segment[api.SEGMENTATION_ID]
|
|
||||||
|
|
||||||
def initialize(self):
|
|
||||||
for network_type, driver in self.drivers.items():
|
|
||||||
LOG.info("Initializing driver for type '%s'", network_type)
|
|
||||||
driver.obj.initialize()
|
|
||||||
|
|
||||||
def _add_network_segment(self, session, network_id, segment, mtu,
|
|
||||||
segment_index=0):
|
|
||||||
db.add_network_segment(session, network_id, segment, segment_index)
|
|
||||||
if segment.get(api.MTU, 0) > 0:
|
|
||||||
mtu.append(segment[api.MTU])
|
|
||||||
|
|
||||||
def create_network_segments(self, context, network, tenant_id):
|
|
||||||
"""Call type drivers to create network segments."""
|
|
||||||
segments = self._process_provider_create(network)
|
|
||||||
session = context.session
|
|
||||||
mtu = []
|
|
||||||
with session.begin(subtransactions=True):
|
|
||||||
network_id = network['id']
|
|
||||||
if segments:
|
|
||||||
for segment_index, segment in enumerate(segments):
|
|
||||||
segment = self.reserve_provider_segment(
|
|
||||||
session, segment)
|
|
||||||
self._add_network_segment(session, network_id, segment,
|
|
||||||
mtu, segment_index)
|
|
||||||
elif (cfg.CONF.ml2.external_network_type and
|
|
||||||
self._get_attribute(network, external_net.EXTERNAL)):
|
|
||||||
segment = self._allocate_ext_net_segment(session)
|
|
||||||
self._add_network_segment(session, network_id, segment, mtu)
|
|
||||||
else:
|
|
||||||
segment = self._allocate_tenant_net_segment(session)
|
|
||||||
self._add_network_segment(session, network_id, segment, mtu)
|
|
||||||
network[api.MTU] = min(mtu) if mtu else 0
|
|
||||||
|
|
||||||
def is_partial_segment(self, segment):
|
|
||||||
network_type = segment[api.NETWORK_TYPE]
|
|
||||||
driver = self.drivers.get(network_type)
|
|
||||||
if driver:
|
|
||||||
return driver.obj.is_partial_segment(segment)
|
|
||||||
else:
|
|
||||||
msg = _("network_type value '%s' not supported") % network_type
|
|
||||||
raise exc.InvalidInput(error_message=msg)
|
|
||||||
|
|
||||||
def validate_provider_segment(self, segment):
|
|
||||||
network_type = segment[api.NETWORK_TYPE]
|
|
||||||
driver = self.drivers.get(network_type)
|
|
||||||
if driver:
|
|
||||||
driver.obj.validate_provider_segment(segment)
|
|
||||||
else:
|
|
||||||
msg = _("network_type value '%s' not supported") % network_type
|
|
||||||
raise exc.InvalidInput(error_message=msg)
|
|
||||||
|
|
||||||
def reserve_provider_segment(self, session, segment):
|
|
||||||
network_type = segment.get(api.NETWORK_TYPE)
|
|
||||||
driver = self.drivers.get(network_type)
|
|
||||||
return driver.obj.reserve_provider_segment(session, segment)
|
|
||||||
|
|
||||||
def _allocate_segment(self, session, network_type):
|
|
||||||
driver = self.drivers.get(network_type)
|
|
||||||
return driver.obj.allocate_tenant_segment(session)
|
|
||||||
|
|
||||||
def _allocate_tenant_net_segment(self, session):
|
|
||||||
for network_type in self.tenant_network_types:
|
|
||||||
segment = self._allocate_segment(session, network_type)
|
|
||||||
if segment:
|
|
||||||
return segment
|
|
||||||
raise exc.NoNetworkAvailable()
|
|
||||||
|
|
||||||
def _allocate_ext_net_segment(self, session):
|
|
||||||
network_type = cfg.CONF.ml2.external_network_type
|
|
||||||
segment = self._allocate_segment(session, network_type)
|
|
||||||
if segment:
|
|
||||||
return segment
|
|
||||||
raise exc.NoNetworkAvailable()
|
|
||||||
|
|
||||||
def release_network_segments(self, session, network_id):
|
|
||||||
segments = db.get_network_segments(session, network_id,
|
|
||||||
filter_dynamic=None)
|
|
||||||
|
|
||||||
for segment in segments:
|
|
||||||
network_type = segment.get(api.NETWORK_TYPE)
|
|
||||||
driver = self.drivers.get(network_type)
|
|
||||||
if driver:
|
|
||||||
driver.obj.release_segment(session, segment)
|
|
||||||
else:
|
|
||||||
LOG.error("Failed to release segment '%s' because "
|
|
||||||
"network type is not supported.", segment)
|
|
||||||
|
|
||||||
def allocate_dynamic_segment(self, session, network_id, segment):
|
|
||||||
"""Allocate a dynamic segment using a partial or full segment dict."""
|
|
||||||
dynamic_segment = db.get_dynamic_segment(
|
|
||||||
session, network_id, segment.get(api.PHYSICAL_NETWORK),
|
|
||||||
segment.get(api.SEGMENTATION_ID))
|
|
||||||
|
|
||||||
if dynamic_segment:
|
|
||||||
return dynamic_segment
|
|
||||||
|
|
||||||
driver = self.drivers.get(segment.get(api.NETWORK_TYPE))
|
|
||||||
dynamic_segment = driver.obj.reserve_provider_segment(session, segment)
|
|
||||||
db.add_network_segment(session, network_id, dynamic_segment,
|
|
||||||
is_dynamic=True)
|
|
||||||
return dynamic_segment
|
|
||||||
|
|
||||||
def release_dynamic_segment(self, session, segment_id):
|
|
||||||
"""Delete a dynamic segment."""
|
|
||||||
segment = db.get_segment_by_id(session, segment_id)
|
|
||||||
if segment:
|
|
||||||
driver = self.drivers.get(segment.get(api.NETWORK_TYPE))
|
|
||||||
if driver:
|
|
||||||
driver.obj.release_segment(session, segment)
|
|
||||||
db.delete_network_segment(session, segment_id)
|
|
||||||
else:
|
|
||||||
LOG.error("Failed to release segment '%s' because "
|
|
||||||
"network type is not supported.", segment)
|
|
||||||
else:
|
|
||||||
LOG.debug("No segment found with id %(segment_id)s", segment_id)
|
|
||||||
|
|
||||||
|
|
||||||
class MechanismManager(stevedore.named.NamedExtensionManager):
|
|
||||||
"""Manage networking mechanisms using drivers."""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
# Registered mechanism drivers, keyed by name.
|
|
||||||
self.mech_drivers = {}
|
|
||||||
# Ordered list of mechanism drivers, defining
|
|
||||||
# the order in which the drivers are called.
|
|
||||||
self.ordered_mech_drivers = []
|
|
||||||
|
|
||||||
LOG.info("Configured mechanism driver names: %s",
|
|
||||||
cfg.CONF.ml2.mechanism_drivers)
|
|
||||||
super(MechanismManager, self).__init__('neutron.ml2.mechanism_drivers',
|
|
||||||
cfg.CONF.ml2.mechanism_drivers,
|
|
||||||
invoke_on_load=True,
|
|
||||||
name_order=True)
|
|
||||||
LOG.info("Loaded mechanism driver names: %s", self.names())
|
|
||||||
self._register_mechanisms()
|
|
||||||
|
|
||||||
def _register_mechanisms(self):
|
|
||||||
"""Register all mechanism drivers.
|
|
||||||
|
|
||||||
This method should only be called once in the MechanismManager
|
|
||||||
constructor.
|
|
||||||
"""
|
|
||||||
for ext in self:
|
|
||||||
self.mech_drivers[ext.name] = ext
|
|
||||||
self.ordered_mech_drivers.append(ext)
|
|
||||||
LOG.info("Registered mechanism drivers: %s",
|
|
||||||
[driver.name for driver in self.ordered_mech_drivers])
|
|
||||||
|
|
||||||
@property
|
|
||||||
def supported_qos_rule_types(self):
|
|
||||||
if not self.ordered_mech_drivers:
|
|
||||||
return []
|
|
||||||
|
|
||||||
rule_types = set(qos_consts.VALID_RULE_TYPES)
|
|
||||||
binding_driver_found = False
|
|
||||||
|
|
||||||
# Recalculate on every call to allow drivers determine supported rule
|
|
||||||
# types dynamically
|
|
||||||
for driver in self.ordered_mech_drivers:
|
|
||||||
driver_obj = driver.obj
|
|
||||||
if driver_obj._supports_port_binding:
|
|
||||||
binding_driver_found = True
|
|
||||||
if hasattr(driver_obj, 'supported_qos_rule_types'):
|
|
||||||
new_rule_types = \
|
|
||||||
rule_types & set(driver_obj.supported_qos_rule_types)
|
|
||||||
dropped_rule_types = new_rule_types - rule_types
|
|
||||||
if dropped_rule_types:
|
|
||||||
LOG.info("%(rule_types)s rule types disabled for ml2 "
|
|
||||||
"because %(driver)s does not support them",
|
|
||||||
{'rule_types': ', '.join(dropped_rule_types),
|
|
||||||
'driver': driver.name})
|
|
||||||
rule_types = new_rule_types
|
|
||||||
else:
|
|
||||||
# at least one of drivers does not support QoS, meaning
|
|
||||||
# there are no rule types supported by all of them
|
|
||||||
LOG.warn("%s does not support QoS; "
|
|
||||||
"no rule types available",
|
|
||||||
driver.name)
|
|
||||||
return []
|
|
||||||
|
|
||||||
if binding_driver_found:
|
|
||||||
rule_types = list(rule_types)
|
|
||||||
else:
|
|
||||||
rule_types = []
|
|
||||||
LOG.debug("Supported QoS rule types "
|
|
||||||
"(common subset for all mech drivers): %s", rule_types)
|
|
||||||
return rule_types
|
|
||||||
|
|
||||||
def initialize(self):
|
|
||||||
for driver in self.ordered_mech_drivers:
|
|
||||||
LOG.info("Initializing mechanism driver '%s'", driver.name)
|
|
||||||
driver.obj.initialize()
|
|
||||||
|
|
||||||
def _check_vlan_transparency(self, context):
|
|
||||||
"""Helper method for checking vlan transparecncy support.
|
|
||||||
|
|
||||||
:param context: context parameter to pass to each method call
|
|
||||||
:raises: neutron.extensions.vlantransparent.
|
|
||||||
VlanTransparencyDriverError if any mechanism driver doesn't
|
|
||||||
support vlan transparency.
|
|
||||||
"""
|
|
||||||
if context.current['vlan_transparent'] is None:
|
|
||||||
return
|
|
||||||
|
|
||||||
if context.current['vlan_transparent']:
|
|
||||||
for driver in self.ordered_mech_drivers:
|
|
||||||
if not driver.obj.check_vlan_transparency(context):
|
|
||||||
raise vlantransparent.VlanTransparencyDriverError()
|
|
||||||
|
|
||||||
def _call_on_drivers(self, method_name, context,
|
|
||||||
continue_on_failure=False):
|
|
||||||
"""Helper method for calling a method across all mechanism drivers.
|
|
||||||
|
|
||||||
:param method_name: name of the method to call
|
|
||||||
:param context: context parameter to pass to each method call
|
|
||||||
:param continue_on_failure: whether or not to continue to call
|
|
||||||
all mechanism drivers once one has raised an exception
|
|
||||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
|
||||||
if any mechanism driver call fails.
|
|
||||||
"""
|
|
||||||
error = False
|
|
||||||
for driver in self.ordered_mech_drivers:
|
|
||||||
try:
|
|
||||||
getattr(driver.obj, method_name)(context)
|
|
||||||
except exceptions.AwsException as aws_exception:
|
|
||||||
LOG.exception("Mechanism driver '%(name)s' failed in"
|
|
||||||
" %(method)s",
|
|
||||||
{'name': driver.name, 'method': method_name})
|
|
||||||
if not continue_on_failure:
|
|
||||||
raise aws_exception
|
|
||||||
except Exception:
|
|
||||||
LOG.exception("Mechanism driver '%(name)s' failed in "
|
|
||||||
"%(method)s",
|
|
||||||
{'name': driver.name, 'method': method_name})
|
|
||||||
error = True
|
|
||||||
if not continue_on_failure:
|
|
||||||
break
|
|
||||||
if error:
|
|
||||||
raise ml2_exc.MechanismDriverError(
|
|
||||||
method=method_name
|
|
||||||
)
|
|
||||||
|
|
||||||
def create_network_precommit(self, context):
|
|
||||||
"""Notify all mechanism drivers during network creation.
|
|
||||||
|
|
||||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
|
||||||
if any mechanism driver create_network_precommit call fails.
|
|
||||||
|
|
||||||
Called within the database transaction. If a mechanism driver
|
|
||||||
raises an exception, then a MechanismDriverError is propogated
|
|
||||||
to the caller, triggering a rollback. There is no guarantee
|
|
||||||
that all mechanism drivers are called in this case.
|
|
||||||
"""
|
|
||||||
self._check_vlan_transparency(context)
|
|
||||||
self._call_on_drivers("create_network_precommit", context)
|
|
||||||
|
|
||||||
def create_network_postcommit(self, context):
|
|
||||||
"""Notify all mechanism drivers after network creation.
|
|
||||||
|
|
||||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
|
||||||
if any mechanism driver create_network_postcommit call fails.
|
|
||||||
|
|
||||||
Called after the database transaction. If a mechanism driver
|
|
||||||
raises an exception, then a MechanismDriverError is propagated
|
|
||||||
to the caller, where the network will be deleted, triggering
|
|
||||||
any required cleanup. There is no guarantee that all mechanism
|
|
||||||
drivers are called in this case.
|
|
||||||
"""
|
|
||||||
self._call_on_drivers("create_network_postcommit", context)
|
|
||||||
|
|
||||||
def update_network_precommit(self, context):
|
|
||||||
"""Notify all mechanism drivers during network update.
|
|
||||||
|
|
||||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
|
||||||
if any mechanism driver update_network_precommit call fails.
|
|
||||||
|
|
||||||
Called within the database transaction. If a mechanism driver
|
|
||||||
raises an exception, then a MechanismDriverError is propogated
|
|
||||||
to the caller, triggering a rollback. There is no guarantee
|
|
||||||
that all mechanism drivers are called in this case.
|
|
||||||
"""
|
|
||||||
self._call_on_drivers("update_network_precommit", context)
|
|
||||||
|
|
||||||
def update_network_postcommit(self, context):
|
|
||||||
"""Notify all mechanism drivers after network update.
|
|
||||||
|
|
||||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
|
||||||
if any mechanism driver update_network_postcommit call fails.
|
|
||||||
|
|
||||||
Called after the database transaction. If any mechanism driver
|
|
||||||
raises an error, then the error is logged but we continue to
|
|
||||||
call every other mechanism driver. A MechanismDriverError is
|
|
||||||
then reraised at the end to notify the caller of a failure.
|
|
||||||
"""
|
|
||||||
self._call_on_drivers("update_network_postcommit", context,
|
|
||||||
continue_on_failure=True)
|
|
||||||
|
|
||||||
def delete_network_precommit(self, context):
|
|
||||||
"""Notify all mechanism drivers during network deletion.
|
|
||||||
|
|
||||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
|
||||||
if any mechanism driver delete_network_precommit call fails.
|
|
||||||
|
|
||||||
Called within the database transaction. If a mechanism driver
|
|
||||||
raises an exception, then a MechanismDriverError is propogated
|
|
||||||
to the caller, triggering a rollback. There is no guarantee
|
|
||||||
that all mechanism drivers are called in this case.
|
|
||||||
"""
|
|
||||||
self._call_on_drivers("delete_network_precommit", context)
|
|
||||||
|
|
||||||
def delete_network_postcommit(self, context):
|
|
||||||
"""Notify all mechanism drivers after network deletion.
|
|
||||||
|
|
||||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
|
||||||
if any mechanism driver delete_network_postcommit call fails.
|
|
||||||
|
|
||||||
Called after the database transaction. If any mechanism driver
|
|
||||||
raises an error, then the error is logged but we continue to
|
|
||||||
call every other mechanism driver. A MechanismDriverError is
|
|
||||||
then reraised at the end to notify the caller of a failure. In
|
|
||||||
general we expect the caller to ignore the error, as the
|
|
||||||
network resource has already been deleted from the database
|
|
||||||
and it doesn't make sense to undo the action by recreating the
|
|
||||||
network.
|
|
||||||
"""
|
|
||||||
self._call_on_drivers("delete_network_postcommit", context,
|
|
||||||
continue_on_failure=True)
|
|
||||||
|
|
||||||
def create_subnet_precommit(self, context):
|
|
||||||
"""Notify all mechanism drivers during subnet creation.
|
|
||||||
|
|
||||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
|
||||||
if any mechanism driver create_subnet_precommit call fails.
|
|
||||||
|
|
||||||
Called within the database transaction. If a mechanism driver
|
|
||||||
raises an exception, then a MechanismDriverError is propogated
|
|
||||||
to the caller, triggering a rollback. There is no guarantee
|
|
||||||
that all mechanism drivers are called in this case.
|
|
||||||
"""
|
|
||||||
self._call_on_drivers("create_subnet_precommit", context)
|
|
||||||
|
|
||||||
def create_subnet_postcommit(self, context):
|
|
||||||
"""Notify all mechanism drivers after subnet creation.
|
|
||||||
|
|
||||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
|
||||||
if any mechanism driver create_subnet_postcommit call fails.
|
|
||||||
|
|
||||||
Called after the database transaction. If a mechanism driver
|
|
||||||
raises an exception, then a MechanismDriverError is propagated
|
|
||||||
to the caller, where the subnet will be deleted, triggering
|
|
||||||
any required cleanup. There is no guarantee that all mechanism
|
|
||||||
drivers are called in this case.
|
|
||||||
"""
|
|
||||||
self._call_on_drivers("create_subnet_postcommit", context)
|
|
||||||
|
|
||||||
def update_subnet_precommit(self, context):
|
|
||||||
"""Notify all mechanism drivers during subnet update.
|
|
||||||
|
|
||||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
|
||||||
if any mechanism driver update_subnet_precommit call fails.
|
|
||||||
|
|
||||||
Called within the database transaction. If a mechanism driver
|
|
||||||
raises an exception, then a MechanismDriverError is propogated
|
|
||||||
to the caller, triggering a rollback. There is no guarantee
|
|
||||||
that all mechanism drivers are called in this case.
|
|
||||||
"""
|
|
||||||
self._call_on_drivers("update_subnet_precommit", context)
|
|
||||||
|
|
||||||
def update_subnet_postcommit(self, context):
|
|
||||||
"""Notify all mechanism drivers after subnet update.
|
|
||||||
|
|
||||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
|
||||||
if any mechanism driver update_subnet_postcommit call fails.
|
|
||||||
|
|
||||||
Called after the database transaction. If any mechanism driver
|
|
||||||
raises an error, then the error is logged but we continue to
|
|
||||||
call every other mechanism driver. A MechanismDriverError is
|
|
||||||
then reraised at the end to notify the caller of a failure.
|
|
||||||
"""
|
|
||||||
self._call_on_drivers("update_subnet_postcommit", context,
|
|
||||||
continue_on_failure=True)
|
|
||||||
|
|
||||||
def delete_subnet_precommit(self, context):
|
|
||||||
"""Notify all mechanism drivers during subnet deletion.
|
|
||||||
|
|
||||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
|
||||||
if any mechanism driver delete_subnet_precommit call fails.
|
|
||||||
|
|
||||||
Called within the database transaction. If a mechanism driver
|
|
||||||
raises an exception, then a MechanismDriverError is propogated
|
|
||||||
to the caller, triggering a rollback. There is no guarantee
|
|
||||||
that all mechanism drivers are called in this case.
|
|
||||||
"""
|
|
||||||
self._call_on_drivers("delete_subnet_precommit", context)
|
|
||||||
|
|
||||||
def delete_subnet_postcommit(self, context):
|
|
||||||
"""Notify all mechanism drivers after subnet deletion.
|
|
||||||
|
|
||||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
|
||||||
if any mechanism driver delete_subnet_postcommit call fails.
|
|
||||||
|
|
||||||
Called after the database transaction. If any mechanism driver
|
|
||||||
raises an error, then the error is logged but we continue to
|
|
||||||
call every other mechanism driver. A MechanismDriverError is
|
|
||||||
then reraised at the end to notify the caller of a failure. In
|
|
||||||
general we expect the caller to ignore the error, as the
|
|
||||||
subnet resource has already been deleted from the database
|
|
||||||
and it doesn't make sense to undo the action by recreating the
|
|
||||||
subnet.
|
|
||||||
"""
|
|
||||||
self._call_on_drivers("delete_subnet_postcommit", context,
|
|
||||||
continue_on_failure=True)
|
|
||||||
|
|
||||||
def create_port_precommit(self, context):
|
|
||||||
"""Notify all mechanism drivers during port creation.
|
|
||||||
|
|
||||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
|
||||||
if any mechanism driver create_port_precommit call fails.
|
|
||||||
|
|
||||||
Called within the database transaction. If a mechanism driver
|
|
||||||
raises an exception, then a MechanismDriverError is propogated
|
|
||||||
to the caller, triggering a rollback. There is no guarantee
|
|
||||||
that all mechanism drivers are called in this case.
|
|
||||||
"""
|
|
||||||
self._call_on_drivers("create_port_precommit", context)
|
|
||||||
|
|
||||||
def create_port_postcommit(self, context):
|
|
||||||
"""Notify all mechanism drivers of port creation.
|
|
||||||
|
|
||||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
|
||||||
if any mechanism driver create_port_postcommit call fails.
|
|
||||||
|
|
||||||
Called after the database transaction. Errors raised by
|
|
||||||
mechanism drivers are left to propagate to the caller, where
|
|
||||||
the port will be deleted, triggering any required
|
|
||||||
cleanup. There is no guarantee that all mechanism drivers are
|
|
||||||
called in this case.
|
|
||||||
"""
|
|
||||||
self._call_on_drivers("create_port_postcommit", context)
|
|
||||||
|
|
||||||
def update_port_precommit(self, context):
|
|
||||||
"""Notify all mechanism drivers during port update.
|
|
||||||
|
|
||||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
|
||||||
if any mechanism driver update_port_precommit call fails.
|
|
||||||
|
|
||||||
Called within the database transaction. If a mechanism driver
|
|
||||||
raises an exception, then a MechanismDriverError is propogated
|
|
||||||
to the caller, triggering a rollback. There is no guarantee
|
|
||||||
that all mechanism drivers are called in this case.
|
|
||||||
"""
|
|
||||||
self._call_on_drivers("update_port_precommit", context)
|
|
||||||
|
|
||||||
def update_port_postcommit(self, context):
|
|
||||||
"""Notify all mechanism drivers after port update.
|
|
||||||
|
|
||||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
|
||||||
if any mechanism driver update_port_postcommit call fails.
|
|
||||||
|
|
||||||
Called after the database transaction. If any mechanism driver
|
|
||||||
raises an error, then the error is logged but we continue to
|
|
||||||
call every other mechanism driver. A MechanismDriverError is
|
|
||||||
then reraised at the end to notify the caller of a failure.
|
|
||||||
"""
|
|
||||||
self._call_on_drivers("update_port_postcommit", context,
|
|
||||||
continue_on_failure=True)
|
|
||||||
|
|
||||||
def delete_port_precommit(self, context):
|
|
||||||
"""Notify all mechanism drivers during port deletion.
|
|
||||||
|
|
||||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
|
||||||
if any mechanism driver delete_port_precommit call fails.
|
|
||||||
|
|
||||||
Called within the database transaction. If a mechanism driver
|
|
||||||
raises an exception, then a MechanismDriverError is propogated
|
|
||||||
to the caller, triggering a rollback. There is no guarantee
|
|
||||||
that all mechanism drivers are called in this case.
|
|
||||||
"""
|
|
||||||
self._call_on_drivers("delete_port_precommit", context)
|
|
||||||
|
|
||||||
def delete_port_postcommit(self, context):
|
|
||||||
"""Notify all mechanism drivers after port deletion.
|
|
||||||
|
|
||||||
:raises: neutron.plugins.ml2.common.MechanismDriverError
|
|
||||||
if any mechanism driver delete_port_postcommit call fails.
|
|
||||||
|
|
||||||
Called after the database transaction. If any mechanism driver
|
|
||||||
raises an error, then the error is logged but we continue to
|
|
||||||
call every other mechanism driver. A MechanismDriverError is
|
|
||||||
then reraised at the end to notify the caller of a failure. In
|
|
||||||
general we expect the caller to ignore the error, as the
|
|
||||||
port resource has already been deleted from the database
|
|
||||||
and it doesn't make sense to undo the action by recreating the
|
|
||||||
port.
|
|
||||||
"""
|
|
||||||
self._call_on_drivers("delete_port_postcommit", context,
|
|
||||||
continue_on_failure=True)
|
|
||||||
|
|
||||||
def bind_port(self, context):
|
|
||||||
"""Attempt to bind a port using registered mechanism drivers.
|
|
||||||
|
|
||||||
:param context: PortContext instance describing the port
|
|
||||||
|
|
||||||
Called outside any transaction to attempt to establish a port
|
|
||||||
binding.
|
|
||||||
"""
|
|
||||||
binding = context._binding
|
|
||||||
LOG.debug("Attempting to bind port %(port)s on host %(host)s "
|
|
||||||
"for vnic_type %(vnic_type)s with profile %(profile)s",
|
|
||||||
{'port': context.current['id'],
|
|
||||||
'host': context.host,
|
|
||||||
'vnic_type': binding.vnic_type,
|
|
||||||
'profile': binding.profile})
|
|
||||||
context._clear_binding_levels()
|
|
||||||
if not self._bind_port_level(context, 0,
|
|
||||||
context.network.network_segments):
|
|
||||||
binding.vif_type = portbindings.VIF_TYPE_BINDING_FAILED
|
|
||||||
LOG.error("Failed to bind port %(port)s on host %(host)s",
|
|
||||||
{'port': context.current['id'],
|
|
||||||
'host': context.host})
|
|
||||||
|
|
||||||
def _bind_port_level(self, context, level, segments_to_bind):
|
|
||||||
binding = context._binding
|
|
||||||
port_id = context.current['id']
|
|
||||||
LOG.debug("Attempting to bind port %(port)s on host %(host)s "
|
|
||||||
"at level %(level)s using segments %(segments)s",
|
|
||||||
{'port': port_id,
|
|
||||||
'host': context.host,
|
|
||||||
'level': level,
|
|
||||||
'segments': segments_to_bind})
|
|
||||||
|
|
||||||
if level == MAX_BINDING_LEVELS:
|
|
||||||
LOG.error("Exceeded maximum binding levels attempting to bind "
|
|
||||||
"port %(port)s on host %(host)s",
|
|
||||||
{'port': context.current['id'],
|
|
||||||
'host': context.host})
|
|
||||||
return False
|
|
||||||
|
|
||||||
for driver in self.ordered_mech_drivers:
|
|
||||||
if not self._check_driver_to_bind(driver, segments_to_bind,
|
|
||||||
context._binding_levels):
|
|
||||||
continue
|
|
||||||
try:
|
|
||||||
context._prepare_to_bind(segments_to_bind)
|
|
||||||
driver.obj.bind_port(context)
|
|
||||||
segment = context._new_bound_segment
|
|
||||||
if segment:
|
|
||||||
context._push_binding_level(
|
|
||||||
models.PortBindingLevel(port_id=port_id,
|
|
||||||
host=context.host,
|
|
||||||
level=level,
|
|
||||||
driver=driver.name,
|
|
||||||
segment_id=segment))
|
|
||||||
next_segments = context._next_segments_to_bind
|
|
||||||
if next_segments:
|
|
||||||
# Continue binding another level.
|
|
||||||
if self._bind_port_level(context, level + 1,
|
|
||||||
next_segments):
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
context._pop_binding_level()
|
|
||||||
else:
|
|
||||||
# Binding complete.
|
|
||||||
LOG.debug("Bound port: %(port)s, "
|
|
||||||
"host: %(host)s, "
|
|
||||||
"vif_type: %(vif_type)s, "
|
|
||||||
"vif_details: %(vif_details)s, "
|
|
||||||
"binding_levels: %(binding_levels)s",
|
|
||||||
{'port': port_id,
|
|
||||||
'host': context.host,
|
|
||||||
'vif_type': binding.vif_type,
|
|
||||||
'vif_details': binding.vif_details,
|
|
||||||
'binding_levels': context.binding_levels})
|
|
||||||
return True
|
|
||||||
except Exception:
|
|
||||||
LOG.exception("Mechanism driver %s failed in bind_port",
|
|
||||||
driver.name)
|
|
||||||
LOG.error("Failed to bind port %(port)s on host %(host)s",
|
|
||||||
{'port': context.current['id'],
|
|
||||||
'host': binding.host})
|
|
||||||
|
|
||||||
def _check_driver_to_bind(self, driver, segments_to_bind, binding_levels):
|
|
||||||
# To prevent a possible binding loop, don't try to bind with
|
|
||||||
# this driver if the same driver has already bound at a higher
|
|
||||||
# level to one of the segments we are currently trying to
|
|
||||||
# bind. Note that is is OK for the same driver to bind at
|
|
||||||
# multiple levels using different segments.
|
|
||||||
for level in binding_levels:
|
|
||||||
if (level.driver == driver and
|
|
||||||
level.segment_id in segments_to_bind):
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def get_workers(self):
|
|
||||||
workers = []
|
|
||||||
for driver in self.ordered_mech_drivers:
|
|
||||||
workers += driver.obj.get_workers()
|
|
||||||
return workers
|
|
||||||
|
|
||||||
|
|
||||||
class ExtensionManager(stevedore.named.NamedExtensionManager):
|
|
||||||
"""Manage extension drivers using drivers."""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
# Ordered list of extension drivers, defining
|
|
||||||
# the order in which the drivers are called.
|
|
||||||
self.ordered_ext_drivers = []
|
|
||||||
|
|
||||||
LOG.info("Configured extension driver names: %s",
|
|
||||||
cfg.CONF.ml2.extension_drivers)
|
|
||||||
super(ExtensionManager, self).__init__('neutron.ml2.extension_drivers',
|
|
||||||
cfg.CONF.ml2.extension_drivers,
|
|
||||||
invoke_on_load=True,
|
|
||||||
name_order=True)
|
|
||||||
LOG.info("Loaded extension driver names: %s", self.names())
|
|
||||||
self._register_drivers()
|
|
||||||
|
|
||||||
def _register_drivers(self):
|
|
||||||
"""Register all extension drivers.
|
|
||||||
|
|
||||||
This method should only be called once in the ExtensionManager
|
|
||||||
constructor.
|
|
||||||
"""
|
|
||||||
for ext in self:
|
|
||||||
self.ordered_ext_drivers.append(ext)
|
|
||||||
LOG.info("Registered extension drivers: %s",
|
|
||||||
[driver.name for driver in self.ordered_ext_drivers])
|
|
||||||
|
|
||||||
def initialize(self):
|
|
||||||
# Initialize each driver in the list.
|
|
||||||
for driver in self.ordered_ext_drivers:
|
|
||||||
LOG.info("Initializing extension driver '%s'", driver.name)
|
|
||||||
driver.obj.initialize()
|
|
||||||
|
|
||||||
def extension_aliases(self):
|
|
||||||
exts = []
|
|
||||||
for driver in self.ordered_ext_drivers:
|
|
||||||
alias = driver.obj.extension_alias
|
|
||||||
if alias:
|
|
||||||
exts.append(alias)
|
|
||||||
LOG.info("Got %(alias)s extension from driver '%(drv)s'",
|
|
||||||
{'alias': alias, 'drv': driver.name})
|
|
||||||
return exts
|
|
||||||
|
|
||||||
def _call_on_ext_drivers(self, method_name, plugin_context, data, result):
|
|
||||||
"""Helper method for calling a method across all extension drivers."""
|
|
||||||
for driver in self.ordered_ext_drivers:
|
|
||||||
try:
|
|
||||||
getattr(driver.obj, method_name)(plugin_context, data, result)
|
|
||||||
except Exception:
|
|
||||||
with excutils.save_and_reraise_exception():
|
|
||||||
LOG.info("Extension driver '%(name)s' failed in "
|
|
||||||
"%(method)s",
|
|
||||||
{'name': driver.name, 'method': method_name})
|
|
||||||
|
|
||||||
def process_create_network(self, plugin_context, data, result):
|
|
||||||
"""Notify all extension drivers during network creation."""
|
|
||||||
self._call_on_ext_drivers("process_create_network", plugin_context,
|
|
||||||
data, result)
|
|
||||||
|
|
||||||
def process_update_network(self, plugin_context, data, result):
|
|
||||||
"""Notify all extension drivers during network update."""
|
|
||||||
self._call_on_ext_drivers("process_update_network", plugin_context,
|
|
||||||
data, result)
|
|
||||||
|
|
||||||
def process_create_subnet(self, plugin_context, data, result):
|
|
||||||
"""Notify all extension drivers during subnet creation."""
|
|
||||||
self._call_on_ext_drivers("process_create_subnet", plugin_context,
|
|
||||||
data, result)
|
|
||||||
|
|
||||||
def process_update_subnet(self, plugin_context, data, result):
|
|
||||||
"""Notify all extension drivers during subnet update."""
|
|
||||||
self._call_on_ext_drivers("process_update_subnet", plugin_context,
|
|
||||||
data, result)
|
|
||||||
|
|
||||||
def process_create_port(self, plugin_context, data, result):
|
|
||||||
"""Notify all extension drivers during port creation."""
|
|
||||||
self._call_on_ext_drivers("process_create_port", plugin_context,
|
|
||||||
data, result)
|
|
||||||
|
|
||||||
def process_update_port(self, plugin_context, data, result):
|
|
||||||
"""Notify all extension drivers during port update."""
|
|
||||||
self._call_on_ext_drivers("process_update_port", plugin_context,
|
|
||||||
data, result)
|
|
||||||
|
|
||||||
def _call_on_dict_driver(self, method_name, session, base_model, result):
|
|
||||||
for driver in self.ordered_ext_drivers:
|
|
||||||
try:
|
|
||||||
getattr(driver.obj, method_name)(session, base_model, result)
|
|
||||||
except Exception:
|
|
||||||
LOG.error("Extension driver '%(name)s' failed in "
|
|
||||||
"%(method)s",
|
|
||||||
{'name': driver.name, 'method': method_name})
|
|
||||||
raise ml2_exc.ExtensionDriverError(driver=driver.name)
|
|
||||||
|
|
||||||
def extend_network_dict(self, session, base_model, result):
|
|
||||||
"""Notify all extension drivers to extend network dictionary."""
|
|
||||||
self._call_on_dict_driver("extend_network_dict", session, base_model,
|
|
||||||
result)
|
|
||||||
|
|
||||||
def extend_subnet_dict(self, session, base_model, result):
|
|
||||||
"""Notify all extension drivers to extend subnet dictionary."""
|
|
||||||
self._call_on_dict_driver("extend_subnet_dict", session, base_model,
|
|
||||||
result)
|
|
||||||
|
|
||||||
def extend_port_dict(self, session, base_model, result):
|
|
||||||
"""Notify all extension drivers to extend port dictionary."""
|
|
||||||
self._call_on_dict_driver("extend_port_dict", session, base_model,
|
|
||||||
result)
|
|
@ -61,7 +61,7 @@ class GceRouterPlugin(
|
|||||||
self.gce_project = gceconf.project_id
|
self.gce_project = gceconf.project_id
|
||||||
self.gce_svc_key = gceconf.service_key_path
|
self.gce_svc_key = gceconf.service_key_path
|
||||||
self.gce_svc = gceutils.get_gce_service(self.gce_svc_key)
|
self.gce_svc = gceutils.get_gce_service(self.gce_svc_key)
|
||||||
LOG.info("GCE Router plugin init with %s project, %s region",
|
LOG.info("GCE Router plugin init with %s project, %s region" %
|
||||||
(self.gce_project, self.gce_region))
|
(self.gce_project, self.gce_region))
|
||||||
|
|
||||||
def get_plugin_type(self):
|
def get_plugin_type(self):
|
||||||
@ -76,7 +76,7 @@ class GceRouterPlugin(
|
|||||||
def _cleanup_floatingip(self, compute, project, region, floatingip):
|
def _cleanup_floatingip(self, compute, project, region, floatingip):
|
||||||
gceutils.release_floatingip(compute, project, region, floatingip)
|
gceutils.release_floatingip(compute, project, region, floatingip)
|
||||||
gceutils.delete_floatingip(compute, project, region, floatingip)
|
gceutils.delete_floatingip(compute, project, region, floatingip)
|
||||||
LOG.info('Released GCE static IP %s', floatingip)
|
LOG.info('Released GCE static IP %s' % floatingip)
|
||||||
|
|
||||||
def create_floatingip(self, context, floatingip):
|
def create_floatingip(self, context, floatingip):
|
||||||
compute, project, region = self.gce_svc, self.gce_project, self.gce_region
|
compute, project, region = self.gce_svc, self.gce_project, self.gce_region
|
||||||
@ -85,7 +85,7 @@ class GceRouterPlugin(
|
|||||||
try:
|
try:
|
||||||
public_ip_allocated = gceutils.allocate_floatingip(
|
public_ip_allocated = gceutils.allocate_floatingip(
|
||||||
compute, project, region)
|
compute, project, region)
|
||||||
LOG.info("Created GCE static IP %s", public_ip_allocated)
|
LOG.info("Created GCE static IP %s" % public_ip_allocated)
|
||||||
|
|
||||||
floatingip_dict = floatingip['floatingip']
|
floatingip_dict = floatingip['floatingip']
|
||||||
|
|
||||||
@ -125,7 +125,7 @@ class GceRouterPlugin(
|
|||||||
fixed_ip_address = fixed_ip['ip_address']
|
fixed_ip_address = fixed_ip['ip_address']
|
||||||
|
|
||||||
if fixed_ip_address:
|
if fixed_ip_address:
|
||||||
LOG.info('Found fixed ip %s for port %s',
|
LOG.info('Found fixed ip %s for port %s' %
|
||||||
(fixed_ip_address, port_id))
|
(fixed_ip_address, port_id))
|
||||||
gceutils.assign_floatingip(compute, project, zone,
|
gceutils.assign_floatingip(compute, project, zone,
|
||||||
fixed_ip_address, floating_ip_address)
|
fixed_ip_address, floating_ip_address)
|
||||||
@ -157,25 +157,25 @@ class GceRouterPlugin(
|
|||||||
return super(GceRouterPlugin, self).delete_floatingip(context, id)
|
return super(GceRouterPlugin, self).delete_floatingip(context, id)
|
||||||
|
|
||||||
def create_router(self, context, router):
|
def create_router(self, context, router):
|
||||||
LOG.info("Creating router %s", router['router']['name'])
|
LOG.info("Creating router %s" % router['router']['name'])
|
||||||
return super(GceRouterPlugin, self).create_router(context, router)
|
return super(GceRouterPlugin, self).create_router(context, router)
|
||||||
|
|
||||||
def delete_router(self, context, id):
|
def delete_router(self, context, id):
|
||||||
LOG.info("Deleting router %s", id)
|
LOG.info("Deleting router %s" % id)
|
||||||
return super(GceRouterPlugin, self).delete_router(context, id)
|
return super(GceRouterPlugin, self).delete_router(context, id)
|
||||||
|
|
||||||
def update_router(self, context, id, router):
|
def update_router(self, context, id, router):
|
||||||
LOG.info("Updating router %s", id)
|
LOG.info("Updating router %s" % id)
|
||||||
return super(GceRouterPlugin, self).update_router(context, id, router)
|
return super(GceRouterPlugin, self).update_router(context, id, router)
|
||||||
|
|
||||||
def add_router_interface(self, context, router_id, interface_info):
|
def add_router_interface(self, context, router_id, interface_info):
|
||||||
LOG.info("Adding interface %s to router %s",
|
LOG.info("Adding interface %s to router %s" %
|
||||||
(interface_info, router_id))
|
(interface_info, router_id))
|
||||||
return super(GceRouterPlugin, self).add_router_interface(
|
return super(GceRouterPlugin, self).add_router_interface(
|
||||||
context, router_id, interface_info)
|
context, router_id, interface_info)
|
||||||
|
|
||||||
def remove_router_interface(self, context, router_id, interface_info):
|
def remove_router_interface(self, context, router_id, interface_info):
|
||||||
LOG.info("Deleting interface %s from router %s",
|
LOG.info("Deleting interface %s from router %s" %
|
||||||
(interface_info, router_id))
|
(interface_info, router_id))
|
||||||
return super(GceRouterPlugin, self).remove_router_interface(
|
return super(GceRouterPlugin, self).remove_router_interface(
|
||||||
context, router_id, interface_info)
|
context, router_id, interface_info)
|
||||||
|
@ -23,7 +23,7 @@ from nova import test
|
|||||||
from nova.compute import power_state
|
from nova.compute import power_state
|
||||||
from nova.compute import vm_states
|
from nova.compute import vm_states
|
||||||
from nova.compute import task_states
|
from nova.compute import task_states
|
||||||
from nova.image.glance import GlanceImageService
|
from nova.image import glance
|
||||||
from nova.tests.unit import fake_instance
|
from nova.tests.unit import fake_instance
|
||||||
from nova.tests.unit import matchers
|
from nova.tests.unit import matchers
|
||||||
from nova.virt.ec2 import EC2Driver
|
from nova.virt.ec2 import EC2Driver
|
||||||
@ -34,6 +34,11 @@ import boto
|
|||||||
import contextlib
|
import contextlib
|
||||||
import mock
|
import mock
|
||||||
|
|
||||||
|
if hasattr(glance, "GlanceImageService"):
|
||||||
|
from nova.image.glance import GlanceImageService
|
||||||
|
else:
|
||||||
|
from nova.image.glance import GlanceImageServiceV2 as GlanceImageService
|
||||||
|
|
||||||
|
|
||||||
class EC2DriverTestCase(test.NoDBTestCase):
|
class EC2DriverTestCase(test.NoDBTestCase):
|
||||||
|
|
||||||
@ -282,7 +287,7 @@ class EC2DriverTestCase(test.NoDBTestCase):
|
|||||||
None)
|
None)
|
||||||
mock_secgrp.return_value = []
|
mock_secgrp.return_value = []
|
||||||
fake_run_instance_op = self.fake_ec2_conn.run_instances(
|
fake_run_instance_op = self.fake_ec2_conn.run_instances(
|
||||||
'ami-1234abc')
|
'ami-1234abc')
|
||||||
boto.ec2.EC2Connection.run_instances = mock.Mock()
|
boto.ec2.EC2Connection.run_instances = mock.Mock()
|
||||||
boto.ec2.EC2Connection.run_instances.return_value = \
|
boto.ec2.EC2Connection.run_instances.return_value = \
|
||||||
fake_run_instance_op
|
fake_run_instance_op
|
||||||
@ -290,11 +295,11 @@ class EC2DriverTestCase(test.NoDBTestCase):
|
|||||||
fake_instances = self.fake_ec2_conn.get_only_instances()
|
fake_instances = self.fake_ec2_conn.get_only_instances()
|
||||||
self.assertEqual(1, len(fake_instances))
|
self.assertEqual(1, len(fake_instances))
|
||||||
boto.ec2.EC2Connection.run_instances.assert_called_once_with(
|
boto.ec2.EC2Connection.run_instances.assert_called_once_with(
|
||||||
instance_type='t2.small', key_name=None,
|
instance_type='t2.small', key_name=None,
|
||||||
image_id='ami-1234abc', user_data=userdata,
|
image_id='ami-1234abc', user_data=userdata,
|
||||||
subnet_id=self.subnet_id,
|
subnet_id=self.subnet_id,
|
||||||
private_ip_address='192.168.10.5',
|
private_ip_address='192.168.10.5',
|
||||||
security_group_ids=[])
|
security_group_ids=[])
|
||||||
self.reset()
|
self.reset()
|
||||||
|
|
||||||
@mock_ec2
|
@mock_ec2
|
||||||
@ -407,7 +412,7 @@ class EC2DriverTestCase(test.NoDBTestCase):
|
|||||||
fake_inst = self.fake_ec2_conn.get_only_instances()[0]
|
fake_inst = self.fake_ec2_conn.get_only_instances()[0]
|
||||||
self.conn.reboot(self.context, self.instance, None, 'SOFT', None, None)
|
self.conn.reboot(self.context, self.instance, None, 'SOFT', None, None)
|
||||||
boto.ec2.EC2Connection.reboot_instances.assert_called_once_with(
|
boto.ec2.EC2Connection.reboot_instances.assert_called_once_with(
|
||||||
instance_ids=[fake_inst.id], dry_run=False)
|
instance_ids=[fake_inst.id], dry_run=False)
|
||||||
self.reset()
|
self.reset()
|
||||||
|
|
||||||
@mock_ec2
|
@mock_ec2
|
||||||
@ -419,9 +424,9 @@ class EC2DriverTestCase(test.NoDBTestCase):
|
|||||||
EC2Driver._wait_for_state = mock.Mock()
|
EC2Driver._wait_for_state = mock.Mock()
|
||||||
self.conn.reboot(self.context, self.instance, None, 'HARD', None, None)
|
self.conn.reboot(self.context, self.instance, None, 'HARD', None, None)
|
||||||
boto.ec2.EC2Connection.stop_instances.assert_called_once_with(
|
boto.ec2.EC2Connection.stop_instances.assert_called_once_with(
|
||||||
instance_ids=[fake_inst.id], force=False, dry_run=False)
|
instance_ids=[fake_inst.id], force=False, dry_run=False)
|
||||||
boto.ec2.EC2Connection.start_instances.assert_called_once_with(
|
boto.ec2.EC2Connection.start_instances.assert_called_once_with(
|
||||||
instance_ids=[fake_inst.id], dry_run=False)
|
instance_ids=[fake_inst.id], dry_run=False)
|
||||||
wait_state_calls = EC2Driver._wait_for_state.call_args_list
|
wait_state_calls = EC2Driver._wait_for_state.call_args_list
|
||||||
self.assertEqual(2, len(wait_state_calls))
|
self.assertEqual(2, len(wait_state_calls))
|
||||||
self.assertEqual('stopped', wait_state_calls[0][0][2])
|
self.assertEqual('stopped', wait_state_calls[0][0][2])
|
||||||
@ -527,7 +532,7 @@ class EC2DriverTestCase(test.NoDBTestCase):
|
|||||||
self.conn.destroy(self.context, self.instance, None, None)
|
self.conn.destroy(self.context, self.instance, None, None)
|
||||||
fake_stop.assert_not_called()
|
fake_stop.assert_not_called()
|
||||||
fake_terminate.assert_called_once_with(
|
fake_terminate.assert_called_once_with(
|
||||||
instance_ids=[fake_instances[0].id])
|
instance_ids=[fake_instances[0].id])
|
||||||
self.reset()
|
self.reset()
|
||||||
|
|
||||||
@mock_ec2
|
@mock_ec2
|
||||||
|
@ -261,7 +261,7 @@ class EC2Driver(driver.ComputeDriver):
|
|||||||
image_api = glance.get_default_image_service()
|
image_api = glance.get_default_image_service()
|
||||||
image_meta = image_api._client.call(context, 2, 'get',
|
image_meta = image_api._client.call(context, 2, 'get',
|
||||||
image_lacking_meta['id'])
|
image_lacking_meta['id'])
|
||||||
LOG.info("Calling _get_image_ami_id_from_meta Meta: %s", image_meta)
|
LOG.info("Calling _get_image_ami_id_from_meta Meta: %s" % image_meta)
|
||||||
try:
|
try:
|
||||||
return image_meta['aws_image_id']
|
return image_meta['aws_image_id']
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -118,7 +118,7 @@ class GCEDriver(driver.ComputeDriver):
|
|||||||
self.gce_svc = gceutils.get_gce_service(self.gce_svc_key)
|
self.gce_svc = gceutils.get_gce_service(self.gce_svc_key)
|
||||||
self.gce_flavor_info = gceutils.get_machines_info(
|
self.gce_flavor_info = gceutils.get_machines_info(
|
||||||
self.gce_svc, self.gce_project, self.gce_zone)
|
self.gce_svc, self.gce_project, self.gce_zone)
|
||||||
LOG.info("GCE driver init with %s project, %s region",
|
LOG.info("GCE driver init with %s project, %s region" %
|
||||||
(self.gce_project, self.gce_zone))
|
(self.gce_project, self.gce_zone))
|
||||||
if '_GCE_NODES' not in globals():
|
if '_GCE_NODES' not in globals():
|
||||||
set_nodes([CONF.host])
|
set_nodes([CONF.host])
|
||||||
@ -231,7 +231,7 @@ class GCEDriver(driver.ComputeDriver):
|
|||||||
# GCE expects instance name in format "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?"
|
# GCE expects instance name in format "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?"
|
||||||
# So we need to construct it for GCE from uuid
|
# So we need to construct it for GCE from uuid
|
||||||
gce_instance_name = 'inst-' + instance.uuid
|
gce_instance_name = 'inst-' + instance.uuid
|
||||||
LOG.info("Creating instance %s as %s on GCE.",
|
LOG.info("Creating instance %s as %s on GCE." %
|
||||||
(instance.display_name, gce_instance_name))
|
(instance.display_name, gce_instance_name))
|
||||||
# Image Info
|
# Image Info
|
||||||
image_link = instance.system_metadata['image_gce_link']
|
image_link = instance.system_metadata['image_gce_link']
|
||||||
@ -291,7 +291,7 @@ class GCEDriver(driver.ComputeDriver):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
gce_id = self._get_gce_id_from_instance(instance)
|
gce_id = self._get_gce_id_from_instance(instance)
|
||||||
LOG.info("Taking snapshot of instance %s", instance.uuid)
|
LOG.info("Taking snapshot of instance %s" % instance.uuid)
|
||||||
try:
|
try:
|
||||||
boot_disk = gceutils.get_instance_boot_disk(
|
boot_disk = gceutils.get_instance_boot_disk(
|
||||||
compute, project, zone, gce_id)
|
compute, project, zone, gce_id)
|
||||||
@ -299,25 +299,25 @@ class GCEDriver(driver.ComputeDriver):
|
|||||||
reason = "Unable to find boot disk from instance metadata %s" % instance.uuid
|
reason = "Unable to find boot disk from instance metadata %s" % instance.uuid
|
||||||
raise exception.InvalidMetadata(reason=reason)
|
raise exception.InvalidMetadata(reason=reason)
|
||||||
disk_name = boot_disk['name']
|
disk_name = boot_disk['name']
|
||||||
LOG.debug("1. Found boot disk %s for instance %s",
|
LOG.debug("1. Found boot disk %s for instance %s" %
|
||||||
(disk_name, instance.uuid))
|
(disk_name, instance.uuid))
|
||||||
|
|
||||||
operation = gceutils.stop_instance(compute, project, zone, gce_id)
|
operation = gceutils.stop_instance(compute, project, zone, gce_id)
|
||||||
gceutils.wait_for_operation(compute, project, operation)
|
gceutils.wait_for_operation(compute, project, operation)
|
||||||
instance_stopped = True
|
instance_stopped = True
|
||||||
LOG.debug("2. Temporarily stopped instance %s", instance.uuid)
|
LOG.debug("2. Temporarily stopped instance %s" % instance.uuid)
|
||||||
|
|
||||||
snapshot_name = 'nsnap-' + disk_name + time.strftime("%s")
|
snapshot_name = 'nsnap-' + disk_name + time.strftime("%s")
|
||||||
operation = gceutils.snapshot_disk(
|
operation = gceutils.snapshot_disk(
|
||||||
compute, project, zone, boot_disk['name'], snapshot_name)
|
compute, project, zone, boot_disk['name'], snapshot_name)
|
||||||
gceutils.wait_for_operation(compute, project, operation)
|
gceutils.wait_for_operation(compute, project, operation)
|
||||||
temp_disk_snapshot = True
|
temp_disk_snapshot = True
|
||||||
LOG.debug("3. Created boot disk snapshot %s", snapshot_name)
|
LOG.debug("3. Created boot disk snapshot %s" % snapshot_name)
|
||||||
|
|
||||||
operation = gceutils.start_instance(compute, project, zone, gce_id)
|
operation = gceutils.start_instance(compute, project, zone, gce_id)
|
||||||
gceutils.wait_for_operation(compute, project, operation)
|
gceutils.wait_for_operation(compute, project, operation)
|
||||||
instance_stopped = False
|
instance_stopped = False
|
||||||
LOG.debug("4. Restart instance after disk snapshot %s",
|
LOG.debug("4. Restart instance after disk snapshot %s" %
|
||||||
instance.uuid)
|
instance.uuid)
|
||||||
|
|
||||||
snapshot_disk_name = 'vol-' + snapshot_name
|
snapshot_disk_name = 'vol-' + snapshot_name
|
||||||
@ -327,7 +327,7 @@ class GCEDriver(driver.ComputeDriver):
|
|||||||
snapshot_disk_info = gceutils.get_disk(compute, project, zone,
|
snapshot_disk_info = gceutils.get_disk(compute, project, zone,
|
||||||
snapshot_disk_name)
|
snapshot_disk_name)
|
||||||
temp_disk_from_snapshot = True
|
temp_disk_from_snapshot = True
|
||||||
LOG.debug("5. Created disk %s from snapshot %s",
|
LOG.debug("5. Created disk %s from snapshot %s" %
|
||||||
(snapshot_disk_name, snapshot_name))
|
(snapshot_disk_name, snapshot_name))
|
||||||
|
|
||||||
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
|
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
|
||||||
@ -339,9 +339,9 @@ class GCEDriver(driver.ComputeDriver):
|
|||||||
gceutils.wait_for_operation(compute, project, operation,
|
gceutils.wait_for_operation(compute, project, operation,
|
||||||
timeout=120)
|
timeout=120)
|
||||||
image_created = True
|
image_created = True
|
||||||
LOG.debug("6. Created image %s from disk %s",
|
LOG.debug("6. Created image %s from disk %s" %
|
||||||
(name, snapshot_disk_name))
|
(name, snapshot_disk_name))
|
||||||
LOG.info("Created GCE image %s from instance %s",
|
LOG.info("Created GCE image %s from instance %s" %
|
||||||
(name, instance.uuid))
|
(name, instance.uuid))
|
||||||
|
|
||||||
update_task_state(task_state=task_states.IMAGE_UPLOADING,
|
update_task_state(task_state=task_states.IMAGE_UPLOADING,
|
||||||
@ -364,7 +364,7 @@ class GCEDriver(driver.ComputeDriver):
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
image_api.update(context, image_id, image_metadata)
|
image_api.update(context, image_id, image_metadata)
|
||||||
LOG.debug("7. Added image to glance %s", name)
|
LOG.debug("7. Added image to glance %s" % name)
|
||||||
|
|
||||||
disk_operation = gceutils.delete_disk(compute, project, zone,
|
disk_operation = gceutils.delete_disk(compute, project, zone,
|
||||||
snapshot_disk_name)
|
snapshot_disk_name)
|
||||||
@ -372,12 +372,12 @@ class GCEDriver(driver.ComputeDriver):
|
|||||||
snapshot_name)
|
snapshot_name)
|
||||||
gceutils.wait_for_operation(compute, project, disk_operation)
|
gceutils.wait_for_operation(compute, project, disk_operation)
|
||||||
temp_disk_from_snapshot = False
|
temp_disk_from_snapshot = False
|
||||||
LOG.debug("8. Delete temporary disk %s", snapshot_disk_name)
|
LOG.debug("8. Delete temporary disk %s" % snapshot_disk_name)
|
||||||
|
|
||||||
gceutils.wait_for_operation(compute, project, snap_operation)
|
gceutils.wait_for_operation(compute, project, snap_operation)
|
||||||
temp_disk_snapshot = False
|
temp_disk_snapshot = False
|
||||||
LOG.debug("9. Delete temporary disk snapshot %s", snapshot_name)
|
LOG.debug("9. Delete temporary disk snapshot %s" % snapshot_name)
|
||||||
LOG.info("Completed snapshot for instance %s", instance.uuid)
|
LOG.info("Completed snapshot for instance %s" % instance.uuid)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception("An error occured during image creation: %s" % e)
|
LOG.exception("An error occured during image creation: %s" % e)
|
||||||
@ -385,11 +385,11 @@ class GCEDriver(driver.ComputeDriver):
|
|||||||
operation = gceutils.start_instance(compute, project, zone,
|
operation = gceutils.start_instance(compute, project, zone,
|
||||||
gce_id)
|
gce_id)
|
||||||
gceutils.wait_for_operation(compute, project, operation)
|
gceutils.wait_for_operation(compute, project, operation)
|
||||||
LOG.debug("Restart instance after disk snapshot %s",
|
LOG.debug("Restart instance after disk snapshot %s" %
|
||||||
instance.uuid)
|
instance.uuid)
|
||||||
if image_created:
|
if image_created:
|
||||||
LOG.info("Rollback snapshot for instance %s, deleting image "
|
LOG.info("Rollback snapshot for instance %s, deleting image "
|
||||||
"%s from GCE", (instance.uuid, name))
|
"%s from GCE" % (instance.uuid, name))
|
||||||
operation = gceutils.delete_image(compute, project, name)
|
operation = gceutils.delete_image(compute, project, name)
|
||||||
gceutils.wait_for_operation(compute, project, operation)
|
gceutils.wait_for_operation(compute, project, operation)
|
||||||
if temp_disk_from_snapshot:
|
if temp_disk_from_snapshot:
|
||||||
@ -397,13 +397,13 @@ class GCEDriver(driver.ComputeDriver):
|
|||||||
snapshot_disk_name)
|
snapshot_disk_name)
|
||||||
gceutils.wait_for_operation(compute, project, disk_operation)
|
gceutils.wait_for_operation(compute, project, disk_operation)
|
||||||
LOG.debug("Rollback snapshot for instace %s, delete temporary"
|
LOG.debug("Rollback snapshot for instace %s, delete temporary"
|
||||||
" disk %s", (instance.uuid, snapshot_disk_name))
|
" disk %s" % (instance.uuid, snapshot_disk_name))
|
||||||
if temp_disk_snapshot:
|
if temp_disk_snapshot:
|
||||||
snap_operation = gceutils.delete_snapshot(
|
snap_operation = gceutils.delete_snapshot(
|
||||||
compute, project, snapshot_name)
|
compute, project, snapshot_name)
|
||||||
gceutils.wait_for_operation(compute, project, snap_operation)
|
gceutils.wait_for_operation(compute, project, snap_operation)
|
||||||
LOG.debug("Rollback snapshot for instance %s, delete temporary"
|
LOG.debug("Rollback snapshot for instance %s, delete temporary"
|
||||||
" disk snapshot %s", (instance.uuid, snapshot_name))
|
" disk snapshot %s" % (instance.uuid, snapshot_name))
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
def reboot(self, context, instance, network_info, reboot_type,
|
def reboot(self, context, instance, network_info, reboot_type,
|
||||||
@ -435,22 +435,22 @@ class GCEDriver(driver.ComputeDriver):
|
|||||||
block_device_info=None):
|
block_device_info=None):
|
||||||
compute, project, zone = self.gce_svc, self.gce_project, self.gce_zone
|
compute, project, zone = self.gce_svc, self.gce_project, self.gce_zone
|
||||||
gce_id = self._get_gce_id_from_instance(instance)
|
gce_id = self._get_gce_id_from_instance(instance)
|
||||||
LOG.info('Stopping instance %s', instance.uuid)
|
LOG.info('Stopping instance %s' % instance.uuid)
|
||||||
operation = gceutils.stop_instance(compute, project, zone, gce_id)
|
operation = gceutils.stop_instance(compute, project, zone, gce_id)
|
||||||
gceutils.wait_for_operation(compute, project, operation)
|
gceutils.wait_for_operation(compute, project, operation)
|
||||||
LOG.info('Starting instance %s', instance.uuid)
|
LOG.info('Starting instance %s' % instance.uuid)
|
||||||
operation = gceutils.start_instance(compute, project, zone, gce_id)
|
operation = gceutils.start_instance(compute, project, zone, gce_id)
|
||||||
gceutils.wait_for_operation(compute, project, operation)
|
gceutils.wait_for_operation(compute, project, operation)
|
||||||
LOG.info('Soft Reboot Complete for instance %s', instance.uuid)
|
LOG.info('Soft Reboot Complete for instance %s' % instance.uuid)
|
||||||
|
|
||||||
def _hard_reboot(self, context, instance, network_info,
|
def _hard_reboot(self, context, instance, network_info,
|
||||||
block_device_info=None):
|
block_device_info=None):
|
||||||
compute, project, zone = self.gce_svc, self.gce_project, self.gce_zone
|
compute, project, zone = self.gce_svc, self.gce_project, self.gce_zone
|
||||||
gce_id = self._get_gce_id_from_instance(instance)
|
gce_id = self._get_gce_id_from_instance(instance)
|
||||||
LOG.info('Resetting instance %s', instance.uuid)
|
LOG.info('Resetting instance %s' % instance.uuid)
|
||||||
operation = gceutils.reset_instance(compute, project, zone, gce_id)
|
operation = gceutils.reset_instance(compute, project, zone, gce_id)
|
||||||
gceutils.wait_for_operation(compute, project, operation)
|
gceutils.wait_for_operation(compute, project, operation)
|
||||||
LOG.info('Hard Reboot Complete %s', instance.uuid)
|
LOG.info('Hard Reboot Complete %s' % instance.uuid)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_host_ip_addr():
|
def get_host_ip_addr():
|
||||||
@ -502,23 +502,23 @@ class GCEDriver(driver.ComputeDriver):
|
|||||||
"""
|
"""
|
||||||
compute, project, zone = self.gce_svc, self.gce_project, self.gce_zone
|
compute, project, zone = self.gce_svc, self.gce_project, self.gce_zone
|
||||||
gce_id = self._get_gce_id_from_instance(instance)
|
gce_id = self._get_gce_id_from_instance(instance)
|
||||||
LOG.info('Stopping instance %s', instance.uuid)
|
LOG.info('Stopping instance %s' % instance.uuid)
|
||||||
operation = gceutils.stop_instance(compute, project, zone, gce_id)
|
operation = gceutils.stop_instance(compute, project, zone, gce_id)
|
||||||
gceutils.wait_for_operation(compute, project, operation)
|
gceutils.wait_for_operation(compute, project, operation)
|
||||||
LOG.info('Power off complete %s', instance.uuid)
|
LOG.info('Power off complete %s' % instance.uuid)
|
||||||
|
|
||||||
def power_on(self, context, instance, network_info, block_device_info):
|
def power_on(self, context, instance, network_info, block_device_info):
|
||||||
"""Power on the specified instance."""
|
"""Power on the specified instance."""
|
||||||
compute, project, zone = self.gce_svc, self.gce_project, self.gce_zone
|
compute, project, zone = self.gce_svc, self.gce_project, self.gce_zone
|
||||||
gce_id = self._get_gce_id_from_instance(instance)
|
gce_id = self._get_gce_id_from_instance(instance)
|
||||||
LOG.info('Starting instance %s', instance.uuid)
|
LOG.info('Starting instance %s' % instance.uuid)
|
||||||
operation = gceutils.start_instance(compute, project, zone, gce_id)
|
operation = gceutils.start_instance(compute, project, zone, gce_id)
|
||||||
gceutils.wait_for_operation(compute, project, operation)
|
gceutils.wait_for_operation(compute, project, operation)
|
||||||
LOG.info("Power on Complete %s", instance.uuid)
|
LOG.info("Power on Complete %s" % instance.uuid)
|
||||||
|
|
||||||
def soft_delete(self, instance):
|
def soft_delete(self, instance):
|
||||||
"""Deleting the specified instance"""
|
"""Deleting the specified instance"""
|
||||||
LOG.info("Soft delete instance %s", instance.uuid)
|
LOG.info("Soft delete instance %s" % instance.uuid)
|
||||||
self.destroy(instance)
|
self.destroy(instance)
|
||||||
|
|
||||||
def restore(self, instance):
|
def restore(self, instance):
|
||||||
@ -531,7 +531,7 @@ class GCEDriver(driver.ComputeDriver):
|
|||||||
instance.
|
instance.
|
||||||
:param instance: nova.objects.instance.Instance
|
:param instance: nova.objects.instance.Instance
|
||||||
"""
|
"""
|
||||||
LOG.info("Pause instance %s", instance.uuid)
|
LOG.info("Pause instance %s" % instance.uuid)
|
||||||
self.power_off(instance)
|
self.power_off(instance)
|
||||||
|
|
||||||
def unpause(self, instance):
|
def unpause(self, instance):
|
||||||
@ -541,7 +541,7 @@ class GCEDriver(driver.ComputeDriver):
|
|||||||
instance. and powering on such an instance in this method.
|
instance. and powering on such an instance in this method.
|
||||||
:param instance: nova.objects.instance.Instance
|
:param instance: nova.objects.instance.Instance
|
||||||
"""
|
"""
|
||||||
LOG.info("Unpause instance %s", instance.uuid)
|
LOG.info("Unpause instance %s" % instance.uuid)
|
||||||
self.power_on(context=None, instance=instance, network_info=None,
|
self.power_on(context=None, instance=instance, network_info=None,
|
||||||
block_device_info=None)
|
block_device_info=None)
|
||||||
|
|
||||||
@ -552,7 +552,7 @@ class GCEDriver(driver.ComputeDriver):
|
|||||||
instance.
|
instance.
|
||||||
:param instance: nova.objects.instance.Instance
|
:param instance: nova.objects.instance.Instance
|
||||||
"""
|
"""
|
||||||
LOG.info("Suspending instance %s", instance.uuid)
|
LOG.info("Suspending instance %s" % instance.uuid)
|
||||||
self.power_off(instance)
|
self.power_off(instance)
|
||||||
|
|
||||||
def resume(self, context, instance, network_info, block_device_info=None):
|
def resume(self, context, instance, network_info, block_device_info=None):
|
||||||
@ -562,7 +562,7 @@ class GCEDriver(driver.ComputeDriver):
|
|||||||
instance.
|
instance.
|
||||||
:param instance: nova.objects.instance.Instance
|
:param instance: nova.objects.instance.Instance
|
||||||
"""
|
"""
|
||||||
LOG.info("Resuming instance %s", instance.uuid)
|
LOG.info("Resuming instance %s" % instance.uuid)
|
||||||
self.power_on(context, instance, network_info, block_device_info)
|
self.power_on(context, instance, network_info, block_device_info)
|
||||||
|
|
||||||
def destroy(self, context, instance, network_info, block_device_info=None,
|
def destroy(self, context, instance, network_info, block_device_info=None,
|
||||||
@ -583,11 +583,11 @@ class GCEDriver(driver.ComputeDriver):
|
|||||||
:param migrate_data: implementation specific params
|
:param migrate_data: implementation specific params
|
||||||
"""
|
"""
|
||||||
compute, project, zone = self.gce_svc, self.gce_project, self.gce_zone
|
compute, project, zone = self.gce_svc, self.gce_project, self.gce_zone
|
||||||
LOG.info('Deleting instance %s', instance.uuid)
|
LOG.info('Deleting instance %s' % instance.uuid)
|
||||||
try:
|
try:
|
||||||
gce_id = self._get_gce_id_from_instance(instance)
|
gce_id = self._get_gce_id_from_instance(instance)
|
||||||
except exception.InstanceNotFound:
|
except exception.InstanceNotFound:
|
||||||
LOG.error("Unable to find GCE mapping for instance %s",
|
LOG.error("Unable to find GCE mapping for instance %s" %
|
||||||
instance.uuid)
|
instance.uuid)
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
@ -596,11 +596,11 @@ class GCEDriver(driver.ComputeDriver):
|
|||||||
except HttpError:
|
except HttpError:
|
||||||
# Sometimes instance may not exist in GCE, in that case we just
|
# Sometimes instance may not exist in GCE, in that case we just
|
||||||
# allow deleting VM from openstack
|
# allow deleting VM from openstack
|
||||||
LOG.error("Instance %s not found in GCE, removing from openstack.",
|
LOG.error("Instance %s not found in GCE, removing from openstack." %
|
||||||
instance.uuid)
|
instance.uuid)
|
||||||
return
|
return
|
||||||
gceutils.wait_for_operation(compute, project, operation)
|
gceutils.wait_for_operation(compute, project, operation)
|
||||||
LOG.info("Destroy Complete %s", instance.uuid)
|
LOG.info("Destroy Complete %s" % instance.uuid)
|
||||||
|
|
||||||
def attach_volume(self, context, connection_info, instance, mountpoint,
|
def attach_volume(self, context, connection_info, instance, mountpoint,
|
||||||
disk_bus=None, device_type=None, encryption=None):
|
disk_bus=None, device_type=None, encryption=None):
|
||||||
@ -614,7 +614,7 @@ class GCEDriver(driver.ComputeDriver):
|
|||||||
operation = gceutils.attach_disk(compute, project, zone, gce_id,
|
operation = gceutils.attach_disk(compute, project, zone, gce_id,
|
||||||
disk_name, disk_link)
|
disk_name, disk_link)
|
||||||
gceutils.wait_for_operation(compute, project, operation)
|
gceutils.wait_for_operation(compute, project, operation)
|
||||||
LOG.info("Volume %s attached to instace %s",
|
LOG.info("Volume %s attached to instace %s" %
|
||||||
(disk_name, instance.uuid))
|
(disk_name, instance.uuid))
|
||||||
|
|
||||||
def detach_volume(self, connection_info, instance, mountpoint,
|
def detach_volume(self, connection_info, instance, mountpoint,
|
||||||
@ -628,7 +628,7 @@ class GCEDriver(driver.ComputeDriver):
|
|||||||
operation = gceutils.detach_disk(compute, project, zone, gce_id,
|
operation = gceutils.detach_disk(compute, project, zone, gce_id,
|
||||||
disk_name)
|
disk_name)
|
||||||
gceutils.wait_for_operation(compute, project, operation)
|
gceutils.wait_for_operation(compute, project, operation)
|
||||||
LOG.info("Volume %s detached from instace %s",
|
LOG.info("Volume %s detached from instace %s" %
|
||||||
(disk_name, instance.uuid))
|
(disk_name, instance.uuid))
|
||||||
|
|
||||||
def swap_volume(self, old_connection_info, new_connection_info, instance,
|
def swap_volume(self, old_connection_info, new_connection_info, instance,
|
||||||
|
@ -164,7 +164,7 @@ def set_instance_metadata(compute, project, zone, instance, items,
|
|||||||
metadata['items'].extend(items)
|
metadata['items'].extend(items)
|
||||||
else:
|
else:
|
||||||
metadata['items'] = items
|
metadata['items'] = items
|
||||||
LOG.info("Adding metadata %s" % (metadata, ))
|
LOG.info("Adding metadata %s" % (metadata))
|
||||||
# TODO: Add del operation if required
|
# TODO: Add del operation if required
|
||||||
return compute.instances().setMetadata(project=project, zone=zone,
|
return compute.instances().setMetadata(project=project, zone=zone,
|
||||||
instance=instance,
|
instance=instance,
|
||||||
@ -181,7 +181,7 @@ def create_instance(compute, project, zone, name, image_link, machine_link,
|
|||||||
:param image_link: url, GCE Image link for instance launch
|
:param image_link: url, GCE Image link for instance launch
|
||||||
:param machine_link: url, GCE Machine link for instance launch
|
:param machine_link: url, GCE Machine link for instance launch
|
||||||
"""
|
"""
|
||||||
LOG.info("Launching instance %s with image %s, machine %s and network %s",
|
LOG.info("Launching instance %s with image %s, machine %s and network %s" %
|
||||||
(name, image_link, machine_link, network_interfaces))
|
(name, image_link, machine_link, network_interfaces))
|
||||||
|
|
||||||
config = {
|
config = {
|
||||||
@ -269,7 +269,7 @@ def wait_for_operation(compute, project, operation, interval=1, timeout=60):
|
|||||||
def watch_operation(name, request):
|
def watch_operation(name, request):
|
||||||
result = request.execute()
|
result = request.execute()
|
||||||
if result['status'] == 'DONE':
|
if result['status'] == 'DONE':
|
||||||
LOG.info("Operation %s status is %s", (name, result['status']))
|
LOG.info("Operation %s status is %s" % (name, result['status']))
|
||||||
if 'error' in result:
|
if 'error' in result:
|
||||||
raise GceOperationError(result['error'])
|
raise GceOperationError(result['error'])
|
||||||
raise loopingcall.LoopingCallDone()
|
raise loopingcall.LoopingCallDone()
|
||||||
@ -416,7 +416,7 @@ def get_instance_boot_disk(compute, project, zone, instance):
|
|||||||
# Eg. projects/<project>/zones/<zone>/disks/<disk_name>
|
# Eg. projects/<project>/zones/<zone>/disks/<disk_name>
|
||||||
items = urllib.parse.urlparse(disk_url).path.strip('/').split('/')
|
items = urllib.parse.urlparse(disk_url).path.strip('/').split('/')
|
||||||
if len(items) < 4 or items[-2] != 'disks':
|
if len(items) < 4 or items[-2] != 'disks':
|
||||||
LOG.error('Invalid disk URL %s', (disk_url))
|
LOG.error('Invalid disk URL %s' % (disk_url))
|
||||||
disk_name, zone = items[-1], items[-3]
|
disk_name, zone = items[-1], items[-3]
|
||||||
disk_info = get_disk(compute, project, zone, disk_name)
|
disk_info = get_disk(compute, project, zone, disk_name)
|
||||||
return disk_info
|
return disk_info
|
||||||
|
@ -2,4 +2,4 @@ google-api-python-client>=1.4.2 # Apache-2.0
|
|||||||
moto
|
moto
|
||||||
boto>=2.32.1 # MIT
|
boto>=2.32.1 # MIT
|
||||||
ipaddr
|
ipaddr
|
||||||
google_compute_engine
|
google_compute_engine
|
||||||
|
@ -62,8 +62,10 @@ run_tests() {
|
|||||||
|
|
||||||
check_results() {
|
check_results() {
|
||||||
project=$1
|
project=$1
|
||||||
fail=$(awk '/Failed: /{print}' $DIRECTORY/$project.log | awk -F ': ' '{print $2}')
|
fail_string=$(awk '/Failed: /' $DIRECTORY/$project.log | awk -F ': ' '{print $2}')
|
||||||
pass=$(awk '/Passed: /{print}' $DIRECTORY/$project.log | awk -F ': ' '{print $2}')
|
pass_string=$(awk '/Passed: /' $DIRECTORY/$project.log | awk -F ': ' '{print $2}')
|
||||||
|
fail=`echo $fail_string | awk -F ' ' '{print $1}'`
|
||||||
|
pass=`echo $pass_string | awk -F ' ' '{print $1}'`
|
||||||
if [[ $fail -gt 0 ]]; then
|
if [[ $fail -gt 0 ]]; then
|
||||||
results+=( ["$project"]="FAILED" )
|
results+=( ["$project"]="FAILED" )
|
||||||
elif [[ $pass -gt 0 ]]; then
|
elif [[ $pass -gt 0 ]]; then
|
||||||
|
@ -1 +1,2 @@
|
|||||||
hacking>=0.12.0,<0.13 # Apache-2.0
|
hacking>=0.12.0,<0.13 # Apache-2.0
|
||||||
|
zuul
|
4
tox.ini
4
tox.ini
@ -8,7 +8,9 @@ usedevelop = True
|
|||||||
deps = -r{toxinidir}/test-requirements.txt
|
deps = -r{toxinidir}/test-requirements.txt
|
||||||
whitelist_externals = bash
|
whitelist_externals = bash
|
||||||
commands =
|
commands =
|
||||||
bash clone_repos.sh
|
zuul-cloner --cache-dir /opt/git git://git.openstack.org \
|
||||||
|
openstack/nova openstack/cinder \
|
||||||
|
openstack/neutron openstack/glance_store
|
||||||
bash run_tests.sh -wj
|
bash run_tests.sh -wj
|
||||||
|
|
||||||
[testenv:pep8]
|
[testenv:pep8]
|
||||||
|
Loading…
x
Reference in New Issue
Block a user