Ties quantum, melange, and nova network model
get_instance_nw_info() now returns network model, and keeps the network info cache up to date. virt shim and translation in place for virts to get at the old stuff Change-Id: I070ea7d8564af6c644059d1c209542d250d19ddb
This commit is contained in:
@@ -65,17 +65,8 @@ def image_ec2_id(image_id, image_type='ami'):
|
|||||||
return "ami-00000000"
|
return "ami-00000000"
|
||||||
|
|
||||||
|
|
||||||
def get_ip_info_for_instance_from_cache(instance):
|
def get_ip_info_for_instance_from_nw_info(nw_info):
|
||||||
if (not instance.get('info_cache') or
|
|
||||||
not instance['info_cache'].get('network_info')):
|
|
||||||
# NOTE(jkoelker) Raising ValueError so that we trigger the
|
|
||||||
# fallback lookup
|
|
||||||
raise ValueError
|
|
||||||
|
|
||||||
cached_info = instance['info_cache']['network_info']
|
|
||||||
nw_info = network_model.NetworkInfo.hydrate(cached_info)
|
|
||||||
ip_info = dict(fixed_ips=[], fixed_ip6s=[], floating_ips=[])
|
ip_info = dict(fixed_ips=[], fixed_ip6s=[], floating_ips=[])
|
||||||
|
|
||||||
for vif in nw_info:
|
for vif in nw_info:
|
||||||
vif_fixed_ips = vif.fixed_ips()
|
vif_fixed_ips = vif.fixed_ips()
|
||||||
|
|
||||||
@@ -92,27 +83,17 @@ def get_ip_info_for_instance_from_cache(instance):
|
|||||||
return ip_info
|
return ip_info
|
||||||
|
|
||||||
|
|
||||||
def get_ip_for_instance_from_nwinfo(context, instance):
|
def get_ip_info_for_instance_from_cache(instance):
|
||||||
# NOTE(jkoelker) When the network_api starts returning the model, this
|
if (not instance.get('info_cache') or
|
||||||
# can be refactored out into the above function
|
not instance['info_cache'].get('network_info')):
|
||||||
network_api = network.API()
|
# NOTE(jkoelker) Raising ValueError so that we trigger the
|
||||||
|
# fallback lookup
|
||||||
|
raise ValueError
|
||||||
|
|
||||||
def _get_floaters(ip):
|
cached_info = instance['info_cache']['network_info']
|
||||||
return network_api.get_floating_ips_by_fixed_address(context, ip)
|
nw_info = network_model.NetworkInfo.hydrate(cached_info)
|
||||||
|
|
||||||
ip_info = dict(fixed_ips=[], fixed_ip6s=[], floating_ips=[])
|
return get_ip_info_for_instance_from_nw_info(nw_info)
|
||||||
nw_info = network_api.get_instance_nw_info(context, instance)
|
|
||||||
|
|
||||||
for _net, info in nw_info:
|
|
||||||
for ip in info['ips']:
|
|
||||||
ip_info['fixed_ips'].append(ip['ip'])
|
|
||||||
floaters = _get_floaters(ip['ip'])
|
|
||||||
if floaters:
|
|
||||||
ip_info['floating_ips'].extend(floaters)
|
|
||||||
if 'ip6s' in info:
|
|
||||||
for ip in info['ip6s']:
|
|
||||||
ip_info['fixed_ip6s'].append(ip['ip'])
|
|
||||||
return ip_info
|
|
||||||
|
|
||||||
|
|
||||||
def get_ip_info_for_instance(context, instance):
|
def get_ip_info_for_instance(context, instance):
|
||||||
@@ -125,7 +106,10 @@ def get_ip_info_for_instance(context, instance):
|
|||||||
# sqlalchemy FK (KeyError, AttributeError)
|
# sqlalchemy FK (KeyError, AttributeError)
|
||||||
# fail fall back to calling out to he
|
# fail fall back to calling out to he
|
||||||
# network api
|
# network api
|
||||||
return get_ip_for_instance_from_nwinfo(context, instance)
|
network_api = network.API()
|
||||||
|
|
||||||
|
nw_info = network_api.get_instance_nw_info(context, instance)
|
||||||
|
return get_ip_info_for_instance_from_nw_info(nw_info)
|
||||||
|
|
||||||
|
|
||||||
def get_availability_zone_by_host(services, host):
|
def get_availability_zone_by_host(services, host):
|
||||||
|
|||||||
@@ -287,15 +287,7 @@ def dict_to_query_str(params):
|
|||||||
return param_str.rstrip('&')
|
return param_str.rstrip('&')
|
||||||
|
|
||||||
|
|
||||||
def get_networks_for_instance_from_cache(instance):
|
def get_networks_for_instance_from_nw_info(nw_info):
|
||||||
if (not instance.get('info_cache') or
|
|
||||||
not instance['info_cache'].get('network_info')):
|
|
||||||
# NOTE(jkoelker) Raising ValueError so that we trigger the
|
|
||||||
# fallback lookup
|
|
||||||
raise ValueError
|
|
||||||
|
|
||||||
cached_info = instance['info_cache']['network_info']
|
|
||||||
nw_info = network_model.NetworkInfo.hydrate(cached_info)
|
|
||||||
networks = {}
|
networks = {}
|
||||||
|
|
||||||
for vif in nw_info:
|
for vif in nw_info:
|
||||||
@@ -310,38 +302,16 @@ def get_networks_for_instance_from_cache(instance):
|
|||||||
return networks
|
return networks
|
||||||
|
|
||||||
|
|
||||||
def get_networks_for_instance_from_nwinfo(context, instance):
|
def get_networks_for_instance_from_cache(instance):
|
||||||
# NOTE(jkoelker) When the network_api starts returning the model, this
|
if (not instance.get('info_cache') or
|
||||||
# can be refactored out into the above function
|
not instance['info_cache'].get('network_info')):
|
||||||
network_api = network.API()
|
# NOTE(jkoelker) Raising ValueError so that we trigger the
|
||||||
|
# fallback lookup
|
||||||
|
raise ValueError
|
||||||
|
|
||||||
def _get_floats(ip):
|
cached_info = instance['info_cache']['network_info']
|
||||||
return network_api.get_floating_ips_by_fixed_address(context, ip)
|
nw_info = network_model.NetworkInfo.hydrate(cached_info)
|
||||||
|
return get_networks_for_instance_from_nw_info(nw_info)
|
||||||
def _emit_addr(ip, version):
|
|
||||||
return {'address': ip, 'version': version}
|
|
||||||
|
|
||||||
nw_info = network_api.get_instance_nw_info(context, instance)
|
|
||||||
networks = {}
|
|
||||||
for _net, info in nw_info:
|
|
||||||
net = {'ips': [], 'floating_ips': []}
|
|
||||||
for ip in info['ips']:
|
|
||||||
net['ips'].append(_emit_addr(ip['ip'], 4))
|
|
||||||
floaters = _get_floats(ip['ip'])
|
|
||||||
if floaters:
|
|
||||||
net['floating_ips'].extend([_emit_addr(float, 4)
|
|
||||||
for float in floaters])
|
|
||||||
if 'ip6s' in info:
|
|
||||||
for ip in info['ip6s']:
|
|
||||||
net['ips'].append(_emit_addr(ip['ip'], 6))
|
|
||||||
|
|
||||||
label = info['label']
|
|
||||||
if label not in networks:
|
|
||||||
networks[label] = {'ips': [], 'floating_ips': []}
|
|
||||||
|
|
||||||
networks[label]['ips'].extend(net['ips'])
|
|
||||||
networks[label]['floating_ips'].extend(net['floating_ips'])
|
|
||||||
return networks
|
|
||||||
|
|
||||||
|
|
||||||
def get_networks_for_instance(context, instance):
|
def get_networks_for_instance(context, instance):
|
||||||
@@ -363,7 +333,10 @@ def get_networks_for_instance(context, instance):
|
|||||||
# sqlalchemy FK (KeyError, AttributeError)
|
# sqlalchemy FK (KeyError, AttributeError)
|
||||||
# fail fall back to calling out the the
|
# fail fall back to calling out the the
|
||||||
# network api
|
# network api
|
||||||
return get_networks_for_instance_from_nwinfo(context, instance)
|
network_api = network.API()
|
||||||
|
|
||||||
|
nw_info = network_api.get_instance_nw_info(context, instance)
|
||||||
|
return get_networks_for_instance_from_nw_info(nw_info)
|
||||||
|
|
||||||
|
|
||||||
def raise_http_conflict_for_instance_invalid_state(exc, action):
|
def raise_http_conflict_for_instance_invalid_state(exc, action):
|
||||||
|
|||||||
@@ -93,7 +93,6 @@ class Controller(wsgi.Controller):
|
|||||||
context = req.environ["nova.context"]
|
context = req.environ["nova.context"]
|
||||||
instance = self._get_instance(context, server_id)
|
instance = self._get_instance(context, server_id)
|
||||||
networks = common.get_networks_for_instance(context, instance)
|
networks = common.get_networks_for_instance(context, instance)
|
||||||
|
|
||||||
if id not in networks:
|
if id not in networks:
|
||||||
msg = _("Instance is not a member of specified network")
|
msg = _("Instance is not a member of specified network")
|
||||||
raise exc.HTTPNotFound(explanation=msg)
|
raise exc.HTTPNotFound(explanation=msg)
|
||||||
|
|||||||
@@ -1687,25 +1687,30 @@ class API(base.Base):
|
|||||||
# in its info, if this changes, the next few lines will need to
|
# in its info, if this changes, the next few lines will need to
|
||||||
# accommodate the info containing floating as well as fixed ip
|
# accommodate the info containing floating as well as fixed ip
|
||||||
# addresses
|
# addresses
|
||||||
fixed_ip_addrs = []
|
|
||||||
for info in self.network_api.get_instance_nw_info(context.elevated(),
|
|
||||||
instance):
|
|
||||||
ips = info[1]['ips']
|
|
||||||
fixed_ip_addrs.extend([ip_dict['ip'] for ip_dict in ips])
|
|
||||||
|
|
||||||
# TODO(tr3buchet): this will associate the floating IP with the first
|
fail_bag = _('instance |%s| has no fixed ips. '
|
||||||
# fixed_ip (lowest id) an instance has. This should be changed to
|
'unable to associate floating ip') % instance_uuid
|
||||||
# support specifying a particular fixed_ip if multiple exist.
|
|
||||||
if not fixed_ip_addrs:
|
nw_info = self.network_api.get_instance_nw_info(context.elevated(),
|
||||||
msg = _("instance |%s| has no fixed_ips. "
|
instance)
|
||||||
"unable to associate floating ip") % instance_uuid
|
|
||||||
raise exception.ApiError(msg)
|
if nw_info:
|
||||||
if len(fixed_ip_addrs) > 1:
|
ips = [ip for ip in nw_info[0].fixed_ips()]
|
||||||
LOG.warning(_("multiple fixed_ips exist, using the first: %s"),
|
|
||||||
fixed_ip_addrs[0])
|
# TODO(tr3buchet): this will associate the floating IP with the
|
||||||
self.network_api.associate_floating_ip(context,
|
# first # fixed_ip (lowest id) an instance has. This should be
|
||||||
|
# changed to # support specifying a particular fixed_ip if
|
||||||
|
# multiple exist.
|
||||||
|
if not ips:
|
||||||
|
raise exception.ApiError(fail_bag)
|
||||||
|
if len(ips) > 1:
|
||||||
|
LOG.warning(_('multiple fixedips exist, using the first: %s'),
|
||||||
|
ips[0]['address'])
|
||||||
|
self.network_api.associate_floating_ip(context,
|
||||||
floating_address=address,
|
floating_address=address,
|
||||||
fixed_address=fixed_ip_addrs[0])
|
fixed_address=ips[0]['address'])
|
||||||
|
return
|
||||||
|
raise exception.ApiError(fail_bag)
|
||||||
|
|
||||||
@wrap_check_policy
|
@wrap_check_policy
|
||||||
def get_instance_metadata(self, context, instance):
|
def get_instance_metadata(self, context, instance):
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ from nova.common import cfg
|
|||||||
from nova.compute import instance_types
|
from nova.compute import instance_types
|
||||||
from nova.compute import power_state
|
from nova.compute import power_state
|
||||||
from nova.compute import task_states
|
from nova.compute import task_states
|
||||||
from nova.compute.utils import notify_usage_exists
|
from nova.compute import utils as compute_utils
|
||||||
from nova.compute import vm_states
|
from nova.compute import vm_states
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
@@ -57,6 +57,7 @@ import nova.image
|
|||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
from nova import manager
|
from nova import manager
|
||||||
from nova import network
|
from nova import network
|
||||||
|
from nova.network import model as network_model
|
||||||
from nova.notifier import api as notifier
|
from nova.notifier import api as notifier
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
from nova import utils
|
from nova import utils
|
||||||
@@ -227,7 +228,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
try:
|
try:
|
||||||
net_info = self._get_instance_nw_info(context, instance)
|
net_info = self._get_instance_nw_info(context, instance)
|
||||||
self.driver.ensure_filtering_rules_for_instance(instance,
|
self.driver.ensure_filtering_rules_for_instance(instance,
|
||||||
net_info)
|
self._legacy_nw_info(net_info))
|
||||||
except NotImplementedError:
|
except NotImplementedError:
|
||||||
LOG.warning(_('Hypervisor driver does not '
|
LOG.warning(_('Hypervisor driver does not '
|
||||||
'support firewall rules'))
|
'support firewall rules'))
|
||||||
@@ -282,10 +283,18 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
def _get_instance_nw_info(self, context, instance):
|
def _get_instance_nw_info(self, context, instance):
|
||||||
"""Get a list of dictionaries of network data of an instance.
|
"""Get a list of dictionaries of network data of an instance.
|
||||||
Returns an empty list if stub_network flag is set."""
|
Returns an empty list if stub_network flag is set."""
|
||||||
network_info = []
|
if FLAGS.stub_network:
|
||||||
if not FLAGS.stub_network:
|
return network_model.NetworkInfo()
|
||||||
network_info = self.network_api.get_instance_nw_info(context,
|
|
||||||
instance)
|
# get the network info from network
|
||||||
|
network_info = self.network_api.get_instance_nw_info(context,
|
||||||
|
instance)
|
||||||
|
return network_info
|
||||||
|
|
||||||
|
def _legacy_nw_info(self, network_info):
|
||||||
|
"""Converts the model nw_info object to legacy style"""
|
||||||
|
if self.driver.legacy_nwinfo():
|
||||||
|
network_info = compute_utils.legacy_network_info(network_info)
|
||||||
return network_info
|
return network_info
|
||||||
|
|
||||||
def _setup_block_device_mapping(self, context, instance):
|
def _setup_block_device_mapping(self, context, instance):
|
||||||
@@ -489,12 +498,13 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
if FLAGS.stub_network:
|
if FLAGS.stub_network:
|
||||||
msg = _("Skipping network allocation for instance %s")
|
msg = _("Skipping network allocation for instance %s")
|
||||||
LOG.debug(msg % instance['uuid'])
|
LOG.debug(msg % instance['uuid'])
|
||||||
return []
|
return network_model.NetworkInfo()
|
||||||
self._instance_update(context, instance['uuid'],
|
self._instance_update(context, instance['uuid'],
|
||||||
vm_state=vm_states.BUILDING,
|
vm_state=vm_states.BUILDING,
|
||||||
task_state=task_states.NETWORKING)
|
task_state=task_states.NETWORKING)
|
||||||
is_vpn = instance['image_ref'] == str(FLAGS.vpn_image_id)
|
is_vpn = instance['image_ref'] == str(FLAGS.vpn_image_id)
|
||||||
try:
|
try:
|
||||||
|
# allocate and get network info
|
||||||
network_info = self.network_api.allocate_for_instance(
|
network_info = self.network_api.allocate_for_instance(
|
||||||
context, instance, vpn=is_vpn,
|
context, instance, vpn=is_vpn,
|
||||||
requested_networks=requested_networks)
|
requested_networks=requested_networks)
|
||||||
@@ -502,7 +512,9 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
msg = _("Instance %s failed network setup")
|
msg = _("Instance %s failed network setup")
|
||||||
LOG.exception(msg % instance['uuid'])
|
LOG.exception(msg % instance['uuid'])
|
||||||
raise
|
raise
|
||||||
|
|
||||||
LOG.debug(_("instance network_info: |%s|"), network_info)
|
LOG.debug(_("instance network_info: |%s|"), network_info)
|
||||||
|
|
||||||
return network_info
|
return network_info
|
||||||
|
|
||||||
def _prep_block_device(self, context, instance):
|
def _prep_block_device(self, context, instance):
|
||||||
@@ -527,7 +539,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
instance['admin_pass'] = admin_pass
|
instance['admin_pass'] = admin_pass
|
||||||
try:
|
try:
|
||||||
self.driver.spawn(context, instance, image_meta,
|
self.driver.spawn(context, instance, image_meta,
|
||||||
network_info, block_device_info)
|
self._legacy_nw_info(network_info), block_device_info)
|
||||||
except Exception:
|
except Exception:
|
||||||
msg = _("Instance %s failed to spawn")
|
msg = _("Instance %s failed to spawn")
|
||||||
LOG.exception(msg % instance['uuid'])
|
LOG.exception(msg % instance['uuid'])
|
||||||
@@ -606,9 +618,10 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
{'action_str': action_str, 'instance_uuid': instance_uuid},
|
{'action_str': action_str, 'instance_uuid': instance_uuid},
|
||||||
context=context)
|
context=context)
|
||||||
|
|
||||||
|
# get network info before tearing down
|
||||||
network_info = self._get_instance_nw_info(context, instance)
|
network_info = self._get_instance_nw_info(context, instance)
|
||||||
if not FLAGS.stub_network:
|
# tear down allocated network structure
|
||||||
self.network_api.deallocate_for_instance(context, instance)
|
self._deallocate_network(context, instance)
|
||||||
|
|
||||||
if instance['power_state'] == power_state.SHUTOFF:
|
if instance['power_state'] == power_state.SHUTOFF:
|
||||||
self.db.instance_destroy(context, instance_id)
|
self.db.instance_destroy(context, instance_id)
|
||||||
@@ -618,7 +631,8 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
bdms = self._get_instance_volume_bdms(context, instance_id)
|
bdms = self._get_instance_volume_bdms(context, instance_id)
|
||||||
block_device_info = self._get_instance_volume_block_device_info(
|
block_device_info = self._get_instance_volume_block_device_info(
|
||||||
context, instance_id)
|
context, instance_id)
|
||||||
self.driver.destroy(instance, network_info, block_device_info)
|
self.driver.destroy(instance, self._legacy_nw_info(network_info),
|
||||||
|
block_device_info)
|
||||||
for bdm in bdms:
|
for bdm in bdms:
|
||||||
try:
|
try:
|
||||||
# NOTE(vish): actual driver detach done in driver.destroy, so
|
# NOTE(vish): actual driver detach done in driver.destroy, so
|
||||||
@@ -663,7 +677,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
"""Terminate an instance on this host."""
|
"""Terminate an instance on this host."""
|
||||||
elevated = context.elevated()
|
elevated = context.elevated()
|
||||||
instance = self.db.instance_get_by_uuid(elevated, instance_uuid)
|
instance = self.db.instance_get_by_uuid(elevated, instance_uuid)
|
||||||
notify_usage_exists(instance, current_period=True)
|
compute_utils.notify_usage_exists(instance, current_period=True)
|
||||||
self._delete_instance(context, instance)
|
self._delete_instance(context, instance)
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
@@ -732,7 +746,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
task_state=None)
|
task_state=None)
|
||||||
|
|
||||||
network_info = self._get_instance_nw_info(context, instance)
|
network_info = self._get_instance_nw_info(context, instance)
|
||||||
self.driver.destroy(instance, network_info)
|
self.driver.destroy(instance, self._legacy_nw_info(network_info))
|
||||||
|
|
||||||
self._instance_update(context,
|
self._instance_update(context,
|
||||||
instance_uuid,
|
instance_uuid,
|
||||||
@@ -755,7 +769,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
image_meta = _get_image_meta(context, instance['image_ref'])
|
image_meta = _get_image_meta(context, instance['image_ref'])
|
||||||
|
|
||||||
self.driver.spawn(context, instance, image_meta,
|
self.driver.spawn(context, instance, image_meta,
|
||||||
network_info, device_info)
|
self._legacy_nw_info(network_info), device_info)
|
||||||
|
|
||||||
current_power_state = self._get_power_state(context, instance)
|
current_power_state = self._get_power_state(context, instance)
|
||||||
self._instance_update(context,
|
self._instance_update(context,
|
||||||
@@ -794,7 +808,8 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
context=context)
|
context=context)
|
||||||
|
|
||||||
network_info = self._get_instance_nw_info(context, instance)
|
network_info = self._get_instance_nw_info(context, instance)
|
||||||
self.driver.reboot(instance, network_info, reboot_type)
|
self.driver.reboot(instance, self._legacy_nw_info(network_info),
|
||||||
|
reboot_type)
|
||||||
|
|
||||||
current_power_state = self._get_power_state(context, instance)
|
current_power_state = self._get_power_state(context, instance)
|
||||||
self._instance_update(context,
|
self._instance_update(context,
|
||||||
@@ -1026,7 +1041,8 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
image_meta = _get_image_meta(context, instance_ref['image_ref'])
|
image_meta = _get_image_meta(context, instance_ref['image_ref'])
|
||||||
|
|
||||||
with self.error_out_instance_on_exception(context, instance_uuid):
|
with self.error_out_instance_on_exception(context, instance_uuid):
|
||||||
self.driver.rescue(context, instance_ref, network_info, image_meta)
|
self.driver.rescue(context, instance_ref,
|
||||||
|
self._legacy_nw_info(network_info), image_meta)
|
||||||
|
|
||||||
current_power_state = self._get_power_state(context, instance_ref)
|
current_power_state = self._get_power_state(context, instance_ref)
|
||||||
self._instance_update(context,
|
self._instance_update(context,
|
||||||
@@ -1047,7 +1063,8 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
network_info = self._get_instance_nw_info(context, instance_ref)
|
network_info = self._get_instance_nw_info(context, instance_ref)
|
||||||
|
|
||||||
with self.error_out_instance_on_exception(context, instance_uuid):
|
with self.error_out_instance_on_exception(context, instance_uuid):
|
||||||
self.driver.unrescue(instance_ref, network_info)
|
self.driver.unrescue(instance_ref,
|
||||||
|
self._legacy_nw_info(network_info))
|
||||||
|
|
||||||
current_power_state = self._get_power_state(context, instance_ref)
|
current_power_state = self._get_power_state(context, instance_ref)
|
||||||
self._instance_update(context,
|
self._instance_update(context,
|
||||||
@@ -1069,8 +1086,8 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
"resize.confirm.start")
|
"resize.confirm.start")
|
||||||
|
|
||||||
network_info = self._get_instance_nw_info(context, instance_ref)
|
network_info = self._get_instance_nw_info(context, instance_ref)
|
||||||
self.driver.confirm_migration(
|
self.driver.confirm_migration(migration_ref, instance_ref,
|
||||||
migration_ref, instance_ref, network_info)
|
self._legacy_nw_info(network_info))
|
||||||
|
|
||||||
self._notify_about_instance_usage(instance_ref, "resize.confirm.end",
|
self._notify_about_instance_usage(instance_ref, "resize.confirm.end",
|
||||||
network_info=network_info)
|
network_info=network_info)
|
||||||
@@ -1090,7 +1107,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
migration_ref.instance_uuid)
|
migration_ref.instance_uuid)
|
||||||
|
|
||||||
network_info = self._get_instance_nw_info(context, instance_ref)
|
network_info = self._get_instance_nw_info(context, instance_ref)
|
||||||
self.driver.destroy(instance_ref, network_info)
|
self.driver.destroy(instance_ref, self._legacy_nw_info(network_info))
|
||||||
topic = self.db.queue_get_for(context, FLAGS.compute_topic,
|
topic = self.db.queue_get_for(context, FLAGS.compute_topic,
|
||||||
migration_ref['source_compute'])
|
migration_ref['source_compute'])
|
||||||
rpc.cast(context, topic,
|
rpc.cast(context, topic,
|
||||||
@@ -1267,8 +1284,9 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
self.driver.finish_migration(context, migration_ref, instance_ref,
|
self.driver.finish_migration(context, migration_ref, instance_ref,
|
||||||
disk_info, network_info, image_meta,
|
disk_info,
|
||||||
resize_instance)
|
self._legacy_nw_info(network_info),
|
||||||
|
image_meta, resize_instance)
|
||||||
except Exception, error:
|
except Exception, error:
|
||||||
with utils.save_and_reraise_exception():
|
with utils.save_and_reraise_exception():
|
||||||
msg = _('%s. Setting instance vm_state to ERROR')
|
msg = _('%s. Setting instance vm_state to ERROR')
|
||||||
@@ -1477,7 +1495,8 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
network_info = self._get_instance_nw_info(context, instance)
|
network_info = self._get_instance_nw_info(context, instance)
|
||||||
LOG.debug(_("network_info to inject: |%s|"), network_info)
|
LOG.debug(_("network_info to inject: |%s|"), network_info)
|
||||||
|
|
||||||
self.driver.inject_network_info(instance, network_info)
|
self.driver.inject_network_info(instance,
|
||||||
|
self._legacy_nw_info(network_info))
|
||||||
return network_info
|
return network_info
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
@@ -1753,14 +1772,16 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
# concorrent request occurs to iptables, then it complains.
|
# concorrent request occurs to iptables, then it complains.
|
||||||
network_info = self._get_instance_nw_info(context, instance_ref)
|
network_info = self._get_instance_nw_info(context, instance_ref)
|
||||||
|
|
||||||
fixed_ips = [nw_info[1]['ips'] for nw_info in network_info]
|
# TODO(tr3buchet): figure out how on the earth this is necessary
|
||||||
|
fixed_ips = network_info.fixed_ips()
|
||||||
if not fixed_ips:
|
if not fixed_ips:
|
||||||
raise exception.FixedIpNotFoundForInstance(instance_id=instance_id)
|
raise exception.FixedIpNotFoundForInstance(instance_id=instance_id)
|
||||||
|
|
||||||
max_retry = FLAGS.live_migration_retry_count
|
max_retry = FLAGS.live_migration_retry_count
|
||||||
for cnt in range(max_retry):
|
for cnt in range(max_retry):
|
||||||
try:
|
try:
|
||||||
self.driver.plug_vifs(instance_ref, network_info)
|
self.driver.plug_vifs(instance_ref,
|
||||||
|
self._legacy_nw_info(network_info))
|
||||||
break
|
break
|
||||||
except exception.ProcessExecutionError:
|
except exception.ProcessExecutionError:
|
||||||
if cnt == max_retry - 1:
|
if cnt == max_retry - 1:
|
||||||
@@ -1778,7 +1799,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
# In addition, this method is creating filtering rule
|
# In addition, this method is creating filtering rule
|
||||||
# onto destination host.
|
# onto destination host.
|
||||||
self.driver.ensure_filtering_rules_for_instance(instance_ref,
|
self.driver.ensure_filtering_rules_for_instance(instance_ref,
|
||||||
network_info)
|
self._legacy_nw_info(network_info))
|
||||||
|
|
||||||
# Preparation for block migration
|
# Preparation for block migration
|
||||||
if block_migration:
|
if block_migration:
|
||||||
@@ -1868,7 +1889,8 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
|
|
||||||
network_info = self._get_instance_nw_info(ctxt, instance_ref)
|
network_info = self._get_instance_nw_info(ctxt, instance_ref)
|
||||||
# Releasing security group ingress rule.
|
# Releasing security group ingress rule.
|
||||||
self.driver.unfilter_instance(instance_ref, network_info)
|
self.driver.unfilter_instance(instance_ref,
|
||||||
|
self._legacy_nw_info(network_info))
|
||||||
|
|
||||||
# Database updating.
|
# Database updating.
|
||||||
# NOTE(jkoelker) This needs to be converted to network api calls
|
# NOTE(jkoelker) This needs to be converted to network api calls
|
||||||
@@ -1918,13 +1940,15 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
# No instance booting at source host, but instance dir
|
# No instance booting at source host, but instance dir
|
||||||
# must be deleted for preparing next block migration
|
# must be deleted for preparing next block migration
|
||||||
if block_migration:
|
if block_migration:
|
||||||
self.driver.destroy(instance_ref, network_info)
|
self.driver.destroy(instance_ref,
|
||||||
|
self._legacy_nw_info(network_info))
|
||||||
else:
|
else:
|
||||||
# self.driver.destroy() usually performs vif unplugging
|
# self.driver.destroy() usually performs vif unplugging
|
||||||
# but we must do it explicitly here when block_migration
|
# but we must do it explicitly here when block_migration
|
||||||
# is false, as the network devices at the source must be
|
# is false, as the network devices at the source must be
|
||||||
# torn down
|
# torn down
|
||||||
self.driver.unplug_vifs(instance_ref, network_info)
|
self.driver.unplug_vifs(instance_ref,
|
||||||
|
self._legacy_nw_info(network_info))
|
||||||
|
|
||||||
LOG.info(_('Migrating %(instance_uuid)s to %(dest)s finished'
|
LOG.info(_('Migrating %(instance_uuid)s to %(dest)s finished'
|
||||||
' successfully.') % locals())
|
' successfully.') % locals())
|
||||||
@@ -1945,10 +1969,9 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
LOG.info(_('Post operation of migraton started for %s .')
|
LOG.info(_('Post operation of migraton started for %s .')
|
||||||
% instance_ref['uuid'])
|
% instance_ref['uuid'])
|
||||||
network_info = self._get_instance_nw_info(context, instance_ref)
|
network_info = self._get_instance_nw_info(context, instance_ref)
|
||||||
self.driver.post_live_migration_at_destination(context,
|
self.driver.post_live_migration_at_destination(context, instance_ref,
|
||||||
instance_ref,
|
self._legacy_nw_info(network_info),
|
||||||
network_info,
|
block_migration)
|
||||||
block_migration)
|
|
||||||
|
|
||||||
def rollback_live_migration(self, context, instance_ref,
|
def rollback_live_migration(self, context, instance_ref,
|
||||||
dest, block_migration):
|
dest, block_migration):
|
||||||
@@ -2000,7 +2023,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
# from remote volumes if necessary
|
# from remote volumes if necessary
|
||||||
block_device_info = \
|
block_device_info = \
|
||||||
self._get_instance_volume_block_device_info(context, instance_id)
|
self._get_instance_volume_block_device_info(context, instance_id)
|
||||||
self.driver.destroy(instance_ref, network_info,
|
self.driver.destroy(instance_ref, self._legacy_nw_info(network_info),
|
||||||
block_device_info)
|
block_device_info)
|
||||||
|
|
||||||
@manager.periodic_task
|
@manager.periodic_task
|
||||||
|
|||||||
@@ -16,8 +16,11 @@
|
|||||||
|
|
||||||
"""Compute-related Utilities and helpers."""
|
"""Compute-related Utilities and helpers."""
|
||||||
|
|
||||||
|
import netaddr
|
||||||
|
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import db
|
from nova import db
|
||||||
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova.notifier import api as notifier_api
|
from nova.notifier import api as notifier_api
|
||||||
from nova import utils
|
from nova import utils
|
||||||
@@ -53,3 +56,118 @@ def notify_usage_exists(instance_ref, current_period=False):
|
|||||||
'compute.instance.exists',
|
'compute.instance.exists',
|
||||||
notifier_api.INFO,
|
notifier_api.INFO,
|
||||||
usage_info)
|
usage_info)
|
||||||
|
|
||||||
|
|
||||||
|
def legacy_network_info(network_model):
|
||||||
|
"""
|
||||||
|
Return the legacy network_info representation of the network_model
|
||||||
|
"""
|
||||||
|
def get_ip(ip):
|
||||||
|
if not ip:
|
||||||
|
return None
|
||||||
|
return ip['address']
|
||||||
|
|
||||||
|
def fixed_ip_dict(ip, subnet):
|
||||||
|
if ip['version'] == 4:
|
||||||
|
netmask = str(subnet.as_netaddr().netmask)
|
||||||
|
else:
|
||||||
|
netmask = subnet.as_netaddr()._prefixlen
|
||||||
|
|
||||||
|
return {'ip': ip['address'],
|
||||||
|
'enabled': '1',
|
||||||
|
'netmask': netmask,
|
||||||
|
'gateway': get_ip(subnet['gateway'])}
|
||||||
|
|
||||||
|
def get_meta(model, key, default=None):
|
||||||
|
if 'meta' in model and key in model['meta']:
|
||||||
|
return model['meta'][key]
|
||||||
|
return default
|
||||||
|
|
||||||
|
def convert_routes(routes):
|
||||||
|
routes_list = []
|
||||||
|
for route in routes:
|
||||||
|
r = {'route': str(netaddr.IPNetwork(route['cidr']).network),
|
||||||
|
'netmask': str(netaddr.IPNetwork(route['cidr']).netmask),
|
||||||
|
'gateway': get_ip(route['gateway'])}
|
||||||
|
routes_list.append(r)
|
||||||
|
return routes_list
|
||||||
|
|
||||||
|
network_info = []
|
||||||
|
for vif in network_model:
|
||||||
|
if not vif['network'] or not vif['network']['subnets']:
|
||||||
|
continue
|
||||||
|
network = vif['network']
|
||||||
|
|
||||||
|
# NOTE(jkoelker) The legacy format only supports one subnet per
|
||||||
|
# network, so we only use the 1st one of each type
|
||||||
|
# NOTE(tr3buchet): o.O
|
||||||
|
v4_subnets = []
|
||||||
|
v6_subnets = []
|
||||||
|
for subnet in vif['network']['subnets']:
|
||||||
|
if subnet['version'] == 4:
|
||||||
|
v4_subnets.append(subnet)
|
||||||
|
else:
|
||||||
|
v6_subnets.append(subnet)
|
||||||
|
|
||||||
|
subnet_v4 = None
|
||||||
|
subnet_v6 = None
|
||||||
|
|
||||||
|
if v4_subnets:
|
||||||
|
subnet_v4 = v4_subnets[0]
|
||||||
|
|
||||||
|
if v6_subnets:
|
||||||
|
subnet_v6 = v6_subnets[0]
|
||||||
|
|
||||||
|
if not subnet_v4:
|
||||||
|
raise exception.NovaException(
|
||||||
|
message=_('v4 subnets are required for legacy nw_info'))
|
||||||
|
|
||||||
|
routes = convert_routes(subnet_v4['routes'])
|
||||||
|
|
||||||
|
should_create_bridge = get_meta(network, 'should_create_bridge',
|
||||||
|
False)
|
||||||
|
should_create_vlan = get_meta(network, 'should_create_vlan', False)
|
||||||
|
gateway = get_ip(subnet_v4['gateway'])
|
||||||
|
dhcp_server = get_meta(subnet_v4, 'dhcp_server', gateway)
|
||||||
|
network_dict = dict(bridge=network['bridge'],
|
||||||
|
id=network['id'],
|
||||||
|
cidr=subnet_v4['cidr'],
|
||||||
|
cidr_v6=subnet_v6['cidr'] if subnet_v6 else None,
|
||||||
|
vlan=get_meta(network, 'vlan'),
|
||||||
|
injected=get_meta(network, 'injected', False),
|
||||||
|
multi_host=get_meta(network, 'multi_host',
|
||||||
|
False),
|
||||||
|
bridge_interface=get_meta(network,
|
||||||
|
'bridge_interface'))
|
||||||
|
# NOTE(tr3buchet): the 'ips' bit here is tricky, we support a single
|
||||||
|
# subnet but we want all the IPs to be there
|
||||||
|
# so we use the v4_subnets[0] and its IPs are first
|
||||||
|
# so that eth0 will be from subnet_v4, the rest of the
|
||||||
|
# IPs will be aliased eth0:1 etc and the gateways from
|
||||||
|
# their subnets will not be used
|
||||||
|
info_dict = dict(label=network['label'],
|
||||||
|
broadcast=str(subnet_v4.as_netaddr().broadcast),
|
||||||
|
mac=vif['address'],
|
||||||
|
vif_uuid=vif['id'],
|
||||||
|
rxtx_cap=get_meta(network, 'rxtx_cap', 0),
|
||||||
|
dns=[get_ip(ip) for ip in subnet['dns']],
|
||||||
|
ips=[fixed_ip_dict(ip, subnet)
|
||||||
|
for subnet in v4_subnets
|
||||||
|
for ip in subnet['ips']],
|
||||||
|
should_create_bridge=should_create_bridge,
|
||||||
|
should_create_vlan=should_create_vlan,
|
||||||
|
dhcp_server=dhcp_server)
|
||||||
|
if routes:
|
||||||
|
info_dict['routes'] = routes
|
||||||
|
|
||||||
|
if gateway:
|
||||||
|
info_dict['gateway'] = gateway
|
||||||
|
|
||||||
|
if v6_subnets:
|
||||||
|
if subnet_v6['gateway']:
|
||||||
|
info_dict['gateway_v6'] = get_ip(subnet_v6['gateway'])
|
||||||
|
info_dict['ip6s'] = [fixed_ip_dict(ip, subnet_v6)
|
||||||
|
for ip in subnet_v6['ips']]
|
||||||
|
|
||||||
|
network_info.append((network_dict, info_dict))
|
||||||
|
return network_info
|
||||||
|
|||||||
@@ -1822,11 +1822,16 @@ def instance_info_cache_update(context, instance_uuid, values,
|
|||||||
info_cache = instance_info_cache_get(context, instance_uuid,
|
info_cache = instance_info_cache_get(context, instance_uuid,
|
||||||
session=session)
|
session=session)
|
||||||
|
|
||||||
values['updated_at'] = literal_column('updated_at')
|
|
||||||
|
|
||||||
if info_cache:
|
if info_cache:
|
||||||
info_cache.update(values)
|
info_cache.update(values)
|
||||||
info_cache.save(session=session)
|
info_cache.save(session=session)
|
||||||
|
else:
|
||||||
|
# NOTE(tr3buchet): just in case someone blows away an instance's
|
||||||
|
# cache entry
|
||||||
|
values['instance_id'] = instance_uuid
|
||||||
|
info_cache = \
|
||||||
|
instance_info_cache_create(context, values)
|
||||||
|
|
||||||
return info_cache
|
return info_cache
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ from nova.db import base
|
|||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
|
from nova.network import model as network_model
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
from nova.rpc import common as rpc_common
|
from nova.rpc import common as rpc_common
|
||||||
|
|
||||||
@@ -150,9 +151,11 @@ class API(base.Base):
|
|||||||
args['host'] = instance['host']
|
args['host'] = instance['host']
|
||||||
args['instance_type_id'] = instance['instance_type_id']
|
args['instance_type_id'] = instance['instance_type_id']
|
||||||
|
|
||||||
return rpc.call(context, FLAGS.network_topic,
|
nw_info = rpc.call(context, FLAGS.network_topic,
|
||||||
{'method': 'allocate_for_instance',
|
{'method': 'allocate_for_instance',
|
||||||
'args': args})
|
'args': args})
|
||||||
|
|
||||||
|
return network_model.NetworkInfo.hydrate(nw_info)
|
||||||
|
|
||||||
def deallocate_for_instance(self, context, instance, **kwargs):
|
def deallocate_for_instance(self, context, instance, **kwargs):
|
||||||
"""Deallocates all network structures related to instance."""
|
"""Deallocates all network structures related to instance."""
|
||||||
@@ -193,9 +196,10 @@ class API(base.Base):
|
|||||||
'instance_type_id': instance['instance_type_id'],
|
'instance_type_id': instance['instance_type_id'],
|
||||||
'host': instance['host']}
|
'host': instance['host']}
|
||||||
try:
|
try:
|
||||||
return rpc.call(context, FLAGS.network_topic,
|
nw_info = rpc.call(context, FLAGS.network_topic,
|
||||||
{'method': 'get_instance_nw_info',
|
{'method': 'get_instance_nw_info',
|
||||||
'args': args})
|
'args': args})
|
||||||
|
return network_model.NetworkInfo.hydrate(nw_info)
|
||||||
# FIXME(comstud) rpc calls raise RemoteError if the remote raises
|
# FIXME(comstud) rpc calls raise RemoteError if the remote raises
|
||||||
# an exception. In the case here, because of a race condition,
|
# an exception. In the case here, because of a race condition,
|
||||||
# it's possible the remote will raise a InstanceNotFound when
|
# it's possible the remote will raise a InstanceNotFound when
|
||||||
|
|||||||
@@ -673,6 +673,9 @@ class NetworkManager(manager.SchedulerDependentManager):
|
|||||||
# If True, this manager requires VIF to create VLAN tag.
|
# If True, this manager requires VIF to create VLAN tag.
|
||||||
SHOULD_CREATE_VLAN = False
|
SHOULD_CREATE_VLAN = False
|
||||||
|
|
||||||
|
# if True, this manager leverages DHCP
|
||||||
|
DHCP = False
|
||||||
|
|
||||||
timeout_fixed_ips = True
|
timeout_fixed_ips = True
|
||||||
|
|
||||||
def __init__(self, network_driver=None, *args, **kwargs):
|
def __init__(self, network_driver=None, *args, **kwargs):
|
||||||
@@ -686,14 +689,23 @@ class NetworkManager(manager.SchedulerDependentManager):
|
|||||||
self.floating_dns_manager = temp
|
self.floating_dns_manager = temp
|
||||||
self.network_api = network_api.API()
|
self.network_api = network_api.API()
|
||||||
self.compute_api = compute_api.API()
|
self.compute_api = compute_api.API()
|
||||||
|
|
||||||
|
# NOTE(tr3buchet: unless manager subclassing NetworkManager has
|
||||||
|
# already imported ipam, import nova ipam here
|
||||||
|
if not hasattr(self, 'ipam'):
|
||||||
|
self._import_ipam_lib('nova.network.quantum.nova_ipam_lib')
|
||||||
|
|
||||||
super(NetworkManager, self).__init__(service_name='network',
|
super(NetworkManager, self).__init__(service_name='network',
|
||||||
*args, **kwargs)
|
*args, **kwargs)
|
||||||
|
|
||||||
|
def _import_ipam_lib(self, ipam_lib):
|
||||||
|
self.ipam = utils.import_object(ipam_lib).get_ipam_lib(self)
|
||||||
|
|
||||||
@utils.synchronized('get_dhcp')
|
@utils.synchronized('get_dhcp')
|
||||||
def _get_dhcp_ip(self, context, network_ref, host=None):
|
def _get_dhcp_ip(self, context, network_ref, host=None):
|
||||||
"""Get the proper dhcp address to listen on."""
|
"""Get the proper dhcp address to listen on."""
|
||||||
# NOTE(vish): this is for compatibility
|
# NOTE(vish): this is for compatibility
|
||||||
if not network_ref['multi_host']:
|
if not network_ref.get('multi_host'):
|
||||||
return network_ref['gateway']
|
return network_ref['gateway']
|
||||||
|
|
||||||
if not host:
|
if not host:
|
||||||
@@ -893,109 +905,41 @@ class NetworkManager(manager.SchedulerDependentManager):
|
|||||||
where network = dict containing pertinent data from a network db object
|
where network = dict containing pertinent data from a network db object
|
||||||
and info = dict containing pertinent networking data
|
and info = dict containing pertinent networking data
|
||||||
"""
|
"""
|
||||||
# TODO(tr3buchet) should handle floating IPs as well?
|
|
||||||
try:
|
|
||||||
fixed_ips = self.db.fixed_ip_get_by_instance(context, instance_id)
|
|
||||||
except exception.FixedIpNotFoundForInstance:
|
|
||||||
LOG.warn(_('No fixed IPs for instance %s'), instance_id)
|
|
||||||
fixed_ips = []
|
|
||||||
|
|
||||||
vifs = self.db.virtual_interface_get_by_instance(context, instance_id)
|
vifs = self.db.virtual_interface_get_by_instance(context, instance_id)
|
||||||
instance_type = instance_types.get_instance_type(instance_type_id)
|
instance_type = instance_types.get_instance_type(instance_type_id)
|
||||||
network_info = []
|
networks = {}
|
||||||
# a vif has an address, instance_id, and network_id
|
|
||||||
# it is also joined to the instance and network given by those IDs
|
|
||||||
for vif in vifs:
|
for vif in vifs:
|
||||||
network = self._get_network_by_id(context, vif['network_id'])
|
if vif.get('network_id') is not None:
|
||||||
|
network = self._get_network_by_id(context, vif['network_id'])
|
||||||
if network is None:
|
networks[vif['uuid']] = network
|
||||||
continue
|
|
||||||
|
|
||||||
# determine which of the instance's IPs belong to this network
|
|
||||||
network_IPs = [fixed_ip['address'] for fixed_ip in fixed_ips if
|
|
||||||
fixed_ip['network_id'] == network['id']]
|
|
||||||
|
|
||||||
# TODO(tr3buchet) eventually "enabled" should be determined
|
|
||||||
def ip_dict(ip):
|
|
||||||
return {
|
|
||||||
'ip': ip,
|
|
||||||
'netmask': network['netmask'],
|
|
||||||
'enabled': '1'}
|
|
||||||
|
|
||||||
def ip6_dict():
|
|
||||||
return {
|
|
||||||
'ip': ipv6.to_global(network['cidr_v6'],
|
|
||||||
vif['address'],
|
|
||||||
network['project_id']),
|
|
||||||
'netmask': network['netmask_v6'],
|
|
||||||
'enabled': '1'}
|
|
||||||
|
|
||||||
def rxtx_cap(instance_type, network):
|
|
||||||
try:
|
|
||||||
rxtx_factor = instance_type['rxtx_factor']
|
|
||||||
rxtx_base = network['rxtx_base']
|
|
||||||
return rxtx_factor * rxtx_base
|
|
||||||
except (KeyError, TypeError):
|
|
||||||
return 0
|
|
||||||
|
|
||||||
network_dict = {
|
|
||||||
'bridge': network['bridge'],
|
|
||||||
'id': network['id'],
|
|
||||||
'cidr': network['cidr'],
|
|
||||||
'cidr_v6': network['cidr_v6'],
|
|
||||||
'injected': network['injected'],
|
|
||||||
'vlan': network['vlan'],
|
|
||||||
'bridge_interface': network['bridge_interface'],
|
|
||||||
'multi_host': network['multi_host']}
|
|
||||||
if network['multi_host']:
|
|
||||||
dhcp_server = self._get_dhcp_ip(context, network, host)
|
|
||||||
else:
|
|
||||||
dhcp_server = self._get_dhcp_ip(context,
|
|
||||||
network,
|
|
||||||
network['host'])
|
|
||||||
info = {
|
|
||||||
'net_uuid': network['uuid'],
|
|
||||||
'label': network['label'],
|
|
||||||
'gateway': network['gateway'],
|
|
||||||
'dhcp_server': dhcp_server,
|
|
||||||
'broadcast': network['broadcast'],
|
|
||||||
'mac': vif['address'],
|
|
||||||
'vif_uuid': vif['uuid'],
|
|
||||||
'rxtx_cap': rxtx_cap(instance_type, network),
|
|
||||||
'dns': [],
|
|
||||||
'ips': [ip_dict(ip) for ip in network_IPs],
|
|
||||||
'should_create_bridge': self.SHOULD_CREATE_BRIDGE,
|
|
||||||
'should_create_vlan': self.SHOULD_CREATE_VLAN}
|
|
||||||
|
|
||||||
if network['cidr_v6']:
|
|
||||||
info['ip6s'] = [ip6_dict()]
|
|
||||||
# TODO(tr3buchet): handle ip6 routes here as well
|
|
||||||
if network['gateway_v6']:
|
|
||||||
info['gateway_v6'] = network['gateway_v6']
|
|
||||||
if network['dns1']:
|
|
||||||
info['dns'].append(network['dns1'])
|
|
||||||
if network['dns2']:
|
|
||||||
info['dns'].append(network['dns2'])
|
|
||||||
|
|
||||||
network_info.append((network_dict, info))
|
|
||||||
|
|
||||||
# update instance network cache and return network_info
|
# update instance network cache and return network_info
|
||||||
nw_info = self.build_network_info_model(context, vifs, fixed_ips,
|
nw_info = self.build_network_info_model(context, vifs, networks,
|
||||||
instance_type)
|
instance_type, host)
|
||||||
self.db.instance_info_cache_update(context, instance_uuid,
|
self.db.instance_info_cache_update(context, instance_uuid,
|
||||||
{'network_info': nw_info.as_cache()})
|
{'network_info': nw_info.as_cache()})
|
||||||
|
return nw_info
|
||||||
|
|
||||||
# TODO(tr3buchet): return model
|
def build_network_info_model(self, context, vifs, networks,
|
||||||
return network_info
|
instance_type, instance_host):
|
||||||
|
"""Builds a NetworkInfo object containing all network information
|
||||||
def build_network_info_model(self, context, vifs, fixed_ips,
|
|
||||||
instance_type):
|
|
||||||
"""Returns a NetworkInfo object containing all network information
|
|
||||||
for an instance"""
|
for an instance"""
|
||||||
nw_info = network_model.NetworkInfo()
|
nw_info = network_model.NetworkInfo()
|
||||||
for vif in vifs:
|
for vif in vifs:
|
||||||
network = self._get_network_by_id(context, vif['network_id'])
|
vif_dict = {'id': vif['uuid'],
|
||||||
subnets = self._get_subnets_from_network(network)
|
'address': vif['address']}
|
||||||
|
|
||||||
|
# handle case where vif doesn't have a network
|
||||||
|
if not networks.get(vif['uuid']):
|
||||||
|
vif = network_model.VIF(**vif_dict)
|
||||||
|
nw_info.append(vif)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# get network dict for vif from args and build the subnets
|
||||||
|
network = networks[vif['uuid']]
|
||||||
|
subnets = self._get_subnets_from_network(context, network, vif,
|
||||||
|
instance_host)
|
||||||
|
|
||||||
# if rxtx_cap data are not set everywhere, set to none
|
# if rxtx_cap data are not set everywhere, set to none
|
||||||
try:
|
try:
|
||||||
@@ -1003,36 +947,37 @@ class NetworkManager(manager.SchedulerDependentManager):
|
|||||||
except (TypeError, KeyError):
|
except (TypeError, KeyError):
|
||||||
rxtx_cap = None
|
rxtx_cap = None
|
||||||
|
|
||||||
# determine which of the instance's fixed IPs are on this network
|
# get fixed_ips
|
||||||
network_IPs = [fixed_ip['address'] for fixed_ip in fixed_ips if
|
v4_IPs = self.ipam.get_v4_ips_by_interface(context,
|
||||||
fixed_ip['network_id'] == network['id']]
|
network['uuid'],
|
||||||
|
vif['uuid'],
|
||||||
|
network['project_id'])
|
||||||
|
v6_IPs = self.ipam.get_v6_ips_by_interface(context,
|
||||||
|
network['uuid'],
|
||||||
|
vif['uuid'],
|
||||||
|
network['project_id'])
|
||||||
|
|
||||||
# create model FixedIPs from these fixed_ips
|
# create model FixedIPs from these fixed_ips
|
||||||
network_IPs = [network_model.FixedIP(address=ip_address)
|
network_IPs = [network_model.FixedIP(address=ip_address)
|
||||||
for ip_address in network_IPs]
|
for ip_address in v4_IPs + v6_IPs]
|
||||||
|
|
||||||
# get floating_ips for each fixed_ip
|
# get floating_ips for each fixed_ip
|
||||||
# add them to the fixed ip
|
# add them to the fixed ip
|
||||||
for fixed_ip in network_IPs:
|
for fixed_ip in network_IPs:
|
||||||
fipgbfa = self.db.floating_ip_get_by_fixed_address
|
if fixed_ip['version'] == 6:
|
||||||
floating_ips = fipgbfa(context, fixed_ip['address'])
|
continue
|
||||||
|
gfipbfa = self.ipam.get_floating_ips_by_fixed_address
|
||||||
|
floating_ips = gfipbfa(context, fixed_ip['address'])
|
||||||
floating_ips = [network_model.IP(address=ip['address'],
|
floating_ips = [network_model.IP(address=ip['address'],
|
||||||
type='floating')
|
type='floating')
|
||||||
for ip in floating_ips]
|
for ip in floating_ips]
|
||||||
for ip in floating_ips:
|
for ip in floating_ips:
|
||||||
fixed_ip.add_floating_ip(ip)
|
fixed_ip.add_floating_ip(ip)
|
||||||
|
|
||||||
# at this point nova networks can only have 2 subnets,
|
# add ips to subnets they belong to
|
||||||
# one for v4 and one for v6, all ips will belong to the v4 subnet
|
|
||||||
# and the v6 subnet contains a single calculated v6 address
|
|
||||||
for subnet in subnets:
|
for subnet in subnets:
|
||||||
if subnet['version'] == 4:
|
subnet['ips'] = [fixed_ip for fixed_ip in network_IPs
|
||||||
# since subnet currently has no IPs, easily add them all
|
if fixed_ip.is_in_subnet(subnet)]
|
||||||
subnet['ips'] = network_IPs
|
|
||||||
else:
|
|
||||||
v6_addr = ipv6.to_global(subnet['cidr'], vif['address'],
|
|
||||||
context.project_id)
|
|
||||||
subnet.add_ip(network_model.FixedIP(address=v6_addr))
|
|
||||||
|
|
||||||
# convert network into a Network model object
|
# convert network into a Network model object
|
||||||
network = network_model.Network(**self._get_network_dict(network))
|
network = network_model.Network(**self._get_network_dict(network))
|
||||||
@@ -1040,85 +985,76 @@ class NetworkManager(manager.SchedulerDependentManager):
|
|||||||
# since network currently has no subnets, easily add them all
|
# since network currently has no subnets, easily add them all
|
||||||
network['subnets'] = subnets
|
network['subnets'] = subnets
|
||||||
|
|
||||||
# create the vif model and add to network_info
|
# add network and rxtx cap to vif_dict
|
||||||
vif_dict = {'id': vif['uuid'],
|
vif_dict['network'] = network
|
||||||
'address': vif['address'],
|
|
||||||
'network': network}
|
|
||||||
if rxtx_cap:
|
if rxtx_cap:
|
||||||
vif_dict['rxtx_cap'] = rxtx_cap
|
vif_dict['rxtx_cap'] = rxtx_cap
|
||||||
|
|
||||||
|
# create the vif model and add to network_info
|
||||||
vif = network_model.VIF(**vif_dict)
|
vif = network_model.VIF(**vif_dict)
|
||||||
nw_info.append(vif)
|
nw_info.append(vif)
|
||||||
|
|
||||||
return nw_info
|
return nw_info
|
||||||
|
|
||||||
def _get_network_dict(self, network):
|
def _get_network_dict(self, network):
|
||||||
"""Returns the dict representing necessary fields from network"""
|
"""Returns the dict representing necessary and meta network fields"""
|
||||||
|
# get generic network fields
|
||||||
network_dict = {'id': network['uuid'],
|
network_dict = {'id': network['uuid'],
|
||||||
'bridge': network['bridge'],
|
'bridge': network['bridge'],
|
||||||
'label': network['label']}
|
'label': network['label'],
|
||||||
|
'tenant_id': network['project_id']}
|
||||||
|
|
||||||
if network['injected']:
|
# get extra information
|
||||||
|
if network.get('injected'):
|
||||||
network_dict['injected'] = network['injected']
|
network_dict['injected'] = network['injected']
|
||||||
if network['vlan']:
|
|
||||||
network_dict['vlan'] = network['vlan']
|
|
||||||
if network['bridge_interface']:
|
|
||||||
network_dict['bridge_interface'] = network['bridge_interface']
|
|
||||||
if network['multi_host']:
|
|
||||||
network_dict['multi_host'] = network['multi_host']
|
|
||||||
|
|
||||||
return network_dict
|
return network_dict
|
||||||
|
|
||||||
def _get_subnets_from_network(self, network):
|
def _get_subnets_from_network(self, context, network,
|
||||||
|
vif, instance_host=None):
|
||||||
"""Returns the 1 or 2 possible subnets for a nova network"""
|
"""Returns the 1 or 2 possible subnets for a nova network"""
|
||||||
|
# get subnets
|
||||||
|
ipam_subnets = self.ipam.get_subnets_by_net_id(context,
|
||||||
|
network['project_id'], network['uuid'], vif['uuid'])
|
||||||
|
|
||||||
subnets = []
|
subnets = []
|
||||||
|
for subnet in ipam_subnets:
|
||||||
|
subnet_dict = {'cidr': subnet['cidr'],
|
||||||
|
'gateway': network_model.IP(
|
||||||
|
address=subnet['gateway'],
|
||||||
|
type='gateway')}
|
||||||
|
# deal with dhcp
|
||||||
|
if self.DHCP:
|
||||||
|
if network.get('multi_host'):
|
||||||
|
dhcp_server = self._get_dhcp_ip(context, network,
|
||||||
|
instance_host)
|
||||||
|
else:
|
||||||
|
dhcp_server = self._get_dhcp_ip(context, subnet)
|
||||||
|
subnet_dict['dhcp_server'] = dhcp_server
|
||||||
|
|
||||||
# get dns information from network
|
subnet_object = network_model.Subnet(**subnet_dict)
|
||||||
dns = []
|
|
||||||
if network['dns1']:
|
|
||||||
dns.append(network_model.IP(address=network['dns1'], type='dns'))
|
|
||||||
if network['dns2']:
|
|
||||||
dns.append(network_model.IP(address=network['dns2'], type='dns'))
|
|
||||||
|
|
||||||
# if network contains v4 subnet
|
# add dns info
|
||||||
if network['cidr']:
|
for k in ['dns1', 'dns2']:
|
||||||
subnet = network_model.Subnet(cidr=network['cidr'],
|
if subnet.get(k):
|
||||||
gateway=network_model.IP(
|
subnet_object.add_dns(
|
||||||
address=network['gateway'],
|
network_model.IP(address=subnet[k], type='dns'))
|
||||||
type='gateway'))
|
|
||||||
# if either dns address is v4, add it to subnet
|
|
||||||
for ip in dns:
|
|
||||||
if ip['version'] == 4:
|
|
||||||
subnet.add_dns(ip)
|
|
||||||
|
|
||||||
# TODO(tr3buchet): add routes to subnet once it makes sense
|
# get the routes for this subnet
|
||||||
# create default route from gateway
|
# NOTE(tr3buchet): default route comes from subnet gateway
|
||||||
#route = network_model.Route(cidr=network['cidr'],
|
if subnet.get('id'):
|
||||||
# gateway=network['gateway'])
|
routes = self.ipam.get_routes_by_ip_block(context,
|
||||||
#subnet.add_route(route)
|
subnet['id'], network['project_id'])
|
||||||
|
for route in routes:
|
||||||
|
cidr = netaddr.IPNetwork('%s/%s' % (route['destination'],
|
||||||
|
route['netmask'])).cidr
|
||||||
|
subnet_object.add_route(
|
||||||
|
network_model.Route(cidr=str(cidr),
|
||||||
|
gateway=network_model.IP(
|
||||||
|
address=route['gateway'],
|
||||||
|
type='gateway')))
|
||||||
|
|
||||||
# store subnet for return
|
subnets.append(subnet_object)
|
||||||
subnets.append(subnet)
|
|
||||||
|
|
||||||
# if network contains a v6 subnet
|
|
||||||
if network['cidr_v6']:
|
|
||||||
subnet = network_model.Subnet(cidr=network['cidr_v6'],
|
|
||||||
gateway=network_model.IP(
|
|
||||||
address=network['gateway_v6'],
|
|
||||||
type='gateway'))
|
|
||||||
# if either dns address is v6, add it to subnet
|
|
||||||
for entry in dns:
|
|
||||||
if entry['version'] == 6:
|
|
||||||
subnet.add_dns(entry)
|
|
||||||
|
|
||||||
# TODO(tr3buchet): add routes to subnet once it makes sense
|
|
||||||
# create default route from gateway
|
|
||||||
#route = network_model.Route(cidr=network['cidr_v6'],
|
|
||||||
# gateway=network['gateway_v6'])
|
|
||||||
#subnet.add_route(route)
|
|
||||||
|
|
||||||
# store subnet for return
|
|
||||||
subnets.append(subnet)
|
|
||||||
|
|
||||||
return subnets
|
return subnets
|
||||||
|
|
||||||
@@ -1295,6 +1231,7 @@ class NetworkManager(manager.SchedulerDependentManager):
|
|||||||
bridge_interface, dns1=None, dns2=None, **kwargs):
|
bridge_interface, dns1=None, dns2=None, **kwargs):
|
||||||
"""Create networks based on parameters."""
|
"""Create networks based on parameters."""
|
||||||
# NOTE(jkoelker): these are dummy values to make sure iter works
|
# NOTE(jkoelker): these are dummy values to make sure iter works
|
||||||
|
# TODO(tr3buchet): disallow carving up networks
|
||||||
fixed_net_v4 = netaddr.IPNetwork('0/32')
|
fixed_net_v4 = netaddr.IPNetwork('0/32')
|
||||||
fixed_net_v6 = netaddr.IPNetwork('::0/128')
|
fixed_net_v6 = netaddr.IPNetwork('::0/128')
|
||||||
subnets_v4 = []
|
subnets_v4 = []
|
||||||
@@ -1302,17 +1239,24 @@ class NetworkManager(manager.SchedulerDependentManager):
|
|||||||
|
|
||||||
subnet_bits = int(math.ceil(math.log(network_size, 2)))
|
subnet_bits = int(math.ceil(math.log(network_size, 2)))
|
||||||
|
|
||||||
if cidr_v6:
|
if kwargs.get('ipam'):
|
||||||
fixed_net_v6 = netaddr.IPNetwork(cidr_v6)
|
if cidr_v6:
|
||||||
prefixlen_v6 = 128 - subnet_bits
|
subnets_v6 = [netaddr.IPNetwork(cidr_v6)]
|
||||||
subnets_v6 = fixed_net_v6.subnet(prefixlen_v6, count=num_networks)
|
if cidr:
|
||||||
|
subnets_v4 = [netaddr.IPNetwork(cidr)]
|
||||||
|
else:
|
||||||
|
if cidr_v6:
|
||||||
|
fixed_net_v6 = netaddr.IPNetwork(cidr_v6)
|
||||||
|
prefixlen_v6 = 128 - subnet_bits
|
||||||
|
subnets_v6 = fixed_net_v6.subnet(prefixlen_v6,
|
||||||
|
count=num_networks)
|
||||||
|
if cidr:
|
||||||
|
fixed_net_v4 = netaddr.IPNetwork(cidr)
|
||||||
|
prefixlen_v4 = 32 - subnet_bits
|
||||||
|
subnets_v4 = list(fixed_net_v4.subnet(prefixlen_v4,
|
||||||
|
count=num_networks))
|
||||||
|
|
||||||
if cidr:
|
if cidr:
|
||||||
fixed_net_v4 = netaddr.IPNetwork(cidr)
|
|
||||||
prefixlen_v4 = 32 - subnet_bits
|
|
||||||
subnets_v4 = list(fixed_net_v4.subnet(prefixlen_v4,
|
|
||||||
count=num_networks))
|
|
||||||
|
|
||||||
# NOTE(jkoelker): This replaces the _validate_cidrs call and
|
# NOTE(jkoelker): This replaces the _validate_cidrs call and
|
||||||
# prevents looping multiple times
|
# prevents looping multiple times
|
||||||
try:
|
try:
|
||||||
@@ -1608,6 +1552,7 @@ class FlatDHCPManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
SHOULD_CREATE_BRIDGE = True
|
SHOULD_CREATE_BRIDGE = True
|
||||||
|
DHCP = True
|
||||||
|
|
||||||
def init_host(self):
|
def init_host(self):
|
||||||
"""Do any initialization that needs to be run if this is a
|
"""Do any initialization that needs to be run if this is a
|
||||||
@@ -1641,6 +1586,22 @@ class FlatDHCPManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
|
|||||||
return NetworkManager._get_network_by_id(self, context.elevated(),
|
return NetworkManager._get_network_by_id(self, context.elevated(),
|
||||||
network_id)
|
network_id)
|
||||||
|
|
||||||
|
def _get_network_dict(self, network):
|
||||||
|
"""Returns the dict representing necessary and meta network fields"""
|
||||||
|
|
||||||
|
# get generic network fields
|
||||||
|
network_dict = super(FlatDHCPManager, self)._get_network_dict(network)
|
||||||
|
|
||||||
|
# get flat dhcp specific fields
|
||||||
|
if self.SHOULD_CREATE_BRIDGE:
|
||||||
|
network_dict['should_create_bridge'] = self.SHOULD_CREATE_BRIDGE
|
||||||
|
if network.get('bridge_interface'):
|
||||||
|
network_dict['bridge_interface'] = network['bridge_interface']
|
||||||
|
if network.get('multi_host'):
|
||||||
|
network_dict['multi_host'] = network['multi_host']
|
||||||
|
|
||||||
|
return network_dict
|
||||||
|
|
||||||
|
|
||||||
class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
|
class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
|
||||||
"""Vlan network with dhcp.
|
"""Vlan network with dhcp.
|
||||||
@@ -1659,6 +1620,7 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
|
|||||||
|
|
||||||
SHOULD_CREATE_BRIDGE = True
|
SHOULD_CREATE_BRIDGE = True
|
||||||
SHOULD_CREATE_VLAN = True
|
SHOULD_CREATE_VLAN = True
|
||||||
|
DHCP = True
|
||||||
|
|
||||||
def init_host(self):
|
def init_host(self):
|
||||||
"""Do any initialization that needs to be run if this is a
|
"""Do any initialization that needs to be run if this is a
|
||||||
@@ -1772,6 +1734,23 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
|
|||||||
return self.db.network_get_all_by_uuids(context, network_uuids,
|
return self.db.network_get_all_by_uuids(context, network_uuids,
|
||||||
context.project_id)
|
context.project_id)
|
||||||
|
|
||||||
|
def _get_network_dict(self, network):
|
||||||
|
"""Returns the dict representing necessary and meta network fields"""
|
||||||
|
|
||||||
|
# get generic network fields
|
||||||
|
network_dict = super(VlanManager, self)._get_network_dict(network)
|
||||||
|
|
||||||
|
# get vlan specific network fields
|
||||||
|
if self.SHOULD_CREATE_BRIDGE:
|
||||||
|
network_dict['should_create_bridge'] = self.SHOULD_CREATE_BRIDGE
|
||||||
|
if self.SHOULD_CREATE_VLAN:
|
||||||
|
network_dict['should_create_vlan'] = self.SHOULD_CREATE_VLAN
|
||||||
|
for k in ['vlan', 'bridge_interface', 'multi_host']:
|
||||||
|
if network.get(k):
|
||||||
|
network_dict[k] = network[k]
|
||||||
|
|
||||||
|
return network_dict
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _bottom_reserved_ips(self):
|
def _bottom_reserved_ips(self):
|
||||||
"""Number of reserved ips at the bottom of the range."""
|
"""Number of reserved ips at the bottom of the range."""
|
||||||
|
|||||||
@@ -54,6 +54,13 @@ class IP(Model):
|
|||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
return self['address'] == other['address']
|
return self['address'] == other['address']
|
||||||
|
|
||||||
|
def is_in_subnet(self, subnet):
|
||||||
|
if self['address'] and subnet['cidr']:
|
||||||
|
return netaddr.IPAddress(self['address']) in \
|
||||||
|
netaddr.IPNetwork(subnet['cidr'])
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def hydrate(cls, ip):
|
def hydrate(cls, ip):
|
||||||
if ip:
|
if ip:
|
||||||
@@ -136,6 +143,10 @@ class Subnet(Model):
|
|||||||
if ip not in self['ips']:
|
if ip not in self['ips']:
|
||||||
self['ips'].append(ip)
|
self['ips'].append(ip)
|
||||||
|
|
||||||
|
def as_netaddr(self):
|
||||||
|
"""Convience function to get cidr as a netaddr object"""
|
||||||
|
return netaddr.IPNetwork(self['cidr'])
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def hydrate(cls, subnet):
|
def hydrate(cls, subnet):
|
||||||
subnet = Subnet(**subnet)
|
subnet = Subnet(**subnet)
|
||||||
|
|||||||
@@ -70,6 +70,8 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
|
|||||||
Support for these capabilities are targted for future releases.
|
Support for these capabilities are targted for future releases.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
DHCP = FLAGS.quantum_use_dhcp
|
||||||
|
|
||||||
def __init__(self, q_conn=None, ipam_lib=None, *args, **kwargs):
|
def __init__(self, q_conn=None, ipam_lib=None, *args, **kwargs):
|
||||||
"""Initialize two key libraries, the connection to a
|
"""Initialize two key libraries, the connection to a
|
||||||
Quantum service, and the library for implementing IPAM.
|
Quantum service, and the library for implementing IPAM.
|
||||||
@@ -83,7 +85,7 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
|
|||||||
|
|
||||||
if not ipam_lib:
|
if not ipam_lib:
|
||||||
ipam_lib = FLAGS.quantum_ipam_lib
|
ipam_lib = FLAGS.quantum_ipam_lib
|
||||||
self.ipam = utils.import_object(ipam_lib).get_ipam_lib(self)
|
self._import_ipam_lib(ipam_lib)
|
||||||
|
|
||||||
super(QuantumManager, self).__init__(*args, **kwargs)
|
super(QuantumManager, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
@@ -206,6 +208,7 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
|
|||||||
|
|
||||||
ipam_tenant_id = kwargs.get("project_id", None)
|
ipam_tenant_id = kwargs.get("project_id", None)
|
||||||
priority = kwargs.get("priority", 0)
|
priority = kwargs.get("priority", 0)
|
||||||
|
# NOTE(tr3buchet): this call creates a nova network in the nova db
|
||||||
self.ipam.create_subnet(context, label, ipam_tenant_id, quantum_net_id,
|
self.ipam.create_subnet(context, label, ipam_tenant_id, quantum_net_id,
|
||||||
priority, cidr, gateway, gateway_v6,
|
priority, cidr, gateway, gateway_v6,
|
||||||
cidr_v6, dns1, dns2)
|
cidr_v6, dns1, dns2)
|
||||||
@@ -283,7 +286,6 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
|
|||||||
host = kwargs.pop('host')
|
host = kwargs.pop('host')
|
||||||
project_id = kwargs.pop('project_id')
|
project_id = kwargs.pop('project_id')
|
||||||
LOG.debug(_("network allocations for instance %s"), project_id)
|
LOG.debug(_("network allocations for instance %s"), project_id)
|
||||||
|
|
||||||
requested_networks = kwargs.get('requested_networks')
|
requested_networks = kwargs.get('requested_networks')
|
||||||
|
|
||||||
if requested_networks:
|
if requested_networks:
|
||||||
@@ -294,7 +296,8 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
|
|||||||
project_id)
|
project_id)
|
||||||
|
|
||||||
# Create a port via quantum and attach the vif
|
# Create a port via quantum and attach the vif
|
||||||
for (quantum_net_id, project_id) in net_proj_pairs:
|
for (quantum_net_id, net_tenant_id) in net_proj_pairs:
|
||||||
|
net_tenant_id = net_tenant_id or FLAGS.quantum_default_tenant_id
|
||||||
# FIXME(danwent): We'd like to have the manager be
|
# FIXME(danwent): We'd like to have the manager be
|
||||||
# completely decoupled from the nova networks table.
|
# completely decoupled from the nova networks table.
|
||||||
# However, other parts of nova sometimes go behind our
|
# However, other parts of nova sometimes go behind our
|
||||||
@@ -313,7 +316,7 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
|
|||||||
if network_ref is None:
|
if network_ref is None:
|
||||||
network_ref = {}
|
network_ref = {}
|
||||||
network_ref = {"uuid": quantum_net_id,
|
network_ref = {"uuid": quantum_net_id,
|
||||||
"project_id": project_id,
|
"project_id": net_tenant_id,
|
||||||
# NOTE(bgh): We need to document this somewhere but since
|
# NOTE(bgh): We need to document this somewhere but since
|
||||||
# we don't know the priority of any networks we get from
|
# we don't know the priority of any networks we get from
|
||||||
# quantum we just give them a priority of 0. If its
|
# quantum we just give them a priority of 0. If its
|
||||||
@@ -328,6 +331,8 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
|
|||||||
"id": 'NULL',
|
"id": 'NULL',
|
||||||
"label": "quantum-net-%s" % quantum_net_id}
|
"label": "quantum-net-%s" % quantum_net_id}
|
||||||
|
|
||||||
|
# TODO(tr3buchet): broken. Virtual interfaces require an integer
|
||||||
|
# network ID and it is not nullable
|
||||||
vif_rec = self.add_virtual_interface(context,
|
vif_rec = self.add_virtual_interface(context,
|
||||||
instance_id,
|
instance_id,
|
||||||
network_ref['id'])
|
network_ref['id'])
|
||||||
@@ -337,16 +342,15 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
|
|||||||
instance_type = instance_types.get_instance_type(instance_type_id)
|
instance_type = instance_types.get_instance_type(instance_type_id)
|
||||||
rxtx_factor = instance_type['rxtx_factor']
|
rxtx_factor = instance_type['rxtx_factor']
|
||||||
nova_id = self._get_nova_id(instance)
|
nova_id = self._get_nova_id(instance)
|
||||||
q_tenant_id = project_id or FLAGS.quantum_default_tenant_id
|
|
||||||
# Tell the ipam library to allocate an IP
|
# Tell the ipam library to allocate an IP
|
||||||
ip = self.ipam.allocate_fixed_ip(context, project_id,
|
ip = self.ipam.allocate_fixed_ip(context, project_id,
|
||||||
quantum_net_id, vif_rec)
|
quantum_net_id, net_tenant_id, vif_rec)
|
||||||
pairs = []
|
pairs = []
|
||||||
# Set up port security if enabled
|
# Set up port security if enabled
|
||||||
if FLAGS.quantum_use_port_security:
|
if FLAGS.quantum_use_port_security:
|
||||||
pairs = [{'mac_address': vif_rec['address'],
|
pairs = [{'mac_address': vif_rec['address'],
|
||||||
'ip_address': ip}]
|
'ip_address': ip}]
|
||||||
self.q_conn.create_and_attach_port(q_tenant_id, quantum_net_id,
|
self.q_conn.create_and_attach_port(net_tenant_id, quantum_net_id,
|
||||||
vif_rec['uuid'],
|
vif_rec['uuid'],
|
||||||
vm_id=instance['uuid'],
|
vm_id=instance['uuid'],
|
||||||
rxtx_factor=rxtx_factor,
|
rxtx_factor=rxtx_factor,
|
||||||
@@ -355,7 +359,7 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
|
|||||||
# Set up/start the dhcp server for this network if necessary
|
# Set up/start the dhcp server for this network if necessary
|
||||||
if FLAGS.quantum_use_dhcp:
|
if FLAGS.quantum_use_dhcp:
|
||||||
self.enable_dhcp(context, quantum_net_id, network_ref,
|
self.enable_dhcp(context, quantum_net_id, network_ref,
|
||||||
vif_rec, project_id)
|
vif_rec, net_tenant_id)
|
||||||
return self.get_instance_nw_info(context, instance_id,
|
return self.get_instance_nw_info(context, instance_id,
|
||||||
instance['uuid'],
|
instance['uuid'],
|
||||||
instance_type_id, host)
|
instance_type_id, host)
|
||||||
@@ -370,11 +374,12 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
|
|||||||
ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context,
|
ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context,
|
||||||
quantum_net_id, vif_rec['uuid'], project_id)
|
quantum_net_id, vif_rec['uuid'], project_id)
|
||||||
# Figure out what subnets correspond to this network
|
# Figure out what subnets correspond to this network
|
||||||
v4_subnet, v6_subnet = self.ipam.get_subnets_by_net_id(context,
|
subnets = self.ipam.get_subnets_by_net_id(context,
|
||||||
ipam_tenant_id, quantum_net_id, vif_rec['uuid'])
|
ipam_tenant_id, quantum_net_id, vif_rec['uuid'])
|
||||||
|
|
||||||
# Set up (or find) the dhcp server for each of the subnets
|
# Set up (or find) the dhcp server for each of the subnets
|
||||||
# returned above (both v4 and v6).
|
# returned above (both v4 and v6).
|
||||||
for subnet in [v4_subnet, v6_subnet]:
|
for subnet in subnets:
|
||||||
if subnet is None or subnet['cidr'] is None:
|
if subnet is None or subnet['cidr'] is None:
|
||||||
continue
|
continue
|
||||||
# Fill in some of the network fields that we would have
|
# Fill in some of the network fields that we would have
|
||||||
@@ -382,8 +387,10 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
|
|||||||
# passed to the linux_net functions).
|
# passed to the linux_net functions).
|
||||||
network_ref['cidr'] = subnet['cidr']
|
network_ref['cidr'] = subnet['cidr']
|
||||||
n = IPNetwork(subnet['cidr'])
|
n = IPNetwork(subnet['cidr'])
|
||||||
|
# NOTE(tr3buchet): should probably not always assume first+1
|
||||||
network_ref['dhcp_server'] = IPAddress(n.first + 1)
|
network_ref['dhcp_server'] = IPAddress(n.first + 1)
|
||||||
# TODO(bgh): Melange should probably track dhcp_start
|
# TODO(bgh): Melange should probably track dhcp_start
|
||||||
|
# TODO(tr3buchet): melange should store dhcp_server as well
|
||||||
if not 'dhcp_start' in network_ref or \
|
if not 'dhcp_start' in network_ref or \
|
||||||
network_ref['dhcp_start'] is None:
|
network_ref['dhcp_start'] is None:
|
||||||
network_ref['dhcp_start'] = IPAddress(n.first + 2)
|
network_ref['dhcp_start'] = IPAddress(n.first + 2)
|
||||||
@@ -457,81 +464,35 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
|
|||||||
Ideally this 'interface' will be more formally defined
|
Ideally this 'interface' will be more formally defined
|
||||||
in the future.
|
in the future.
|
||||||
"""
|
"""
|
||||||
network_info = []
|
|
||||||
instance = db.instance_get(context, instance_id)
|
|
||||||
project_id = instance.project_id
|
|
||||||
|
|
||||||
admin_context = context.elevated()
|
admin_context = context.elevated()
|
||||||
vifs = db.virtual_interface_get_by_instance(admin_context,
|
project_id = context.project_id
|
||||||
instance_id)
|
vifs = db.virtual_interface_get_by_instance(context, instance_id)
|
||||||
|
instance_type = instance_types.get_instance_type(instance_type_id)
|
||||||
|
|
||||||
|
net_tenant_dict = dict((net_id, tenant_id)
|
||||||
|
for (net_id, tenant_id)
|
||||||
|
in self.ipam.get_project_and_global_net_ids(
|
||||||
|
context, project_id))
|
||||||
|
networks = {}
|
||||||
for vif in vifs:
|
for vif in vifs:
|
||||||
net = db.network_get(admin_context, vif['network_id'])
|
if vif.get('network_id') is not None:
|
||||||
net_id = net['uuid']
|
network = db.network_get(admin_context, vif['network_id'])
|
||||||
|
net_tenant_id = net_tenant_dict[network['uuid']]
|
||||||
|
network = {'id': network['id'],
|
||||||
|
'uuid': network['uuid'],
|
||||||
|
'bridge': 'ovs_flag',
|
||||||
|
'label': self.q_conn.get_network_name(net_tenant_id,
|
||||||
|
network['uuid']),
|
||||||
|
'project_id': net_tenant_id}
|
||||||
|
networks[vif['uuid']] = network
|
||||||
|
|
||||||
if not net_id:
|
# update instance network cache and return network_info
|
||||||
# TODO(bgh): We need to figure out a way to tell if we
|
nw_info = self.build_network_info_model(context, vifs, networks,
|
||||||
# should actually be raising this exception or not.
|
instance_type, host)
|
||||||
# In the case that a VM spawn failed it may not have
|
db.instance_info_cache_update(context, instance_uuid,
|
||||||
# attached the vif and raising the exception here
|
{'network_info': nw_info.as_cache()})
|
||||||
# prevents deletion of the VM. In that case we should
|
|
||||||
# probably just log, continue, and move on.
|
|
||||||
raise Exception(_("No network for for virtual interface %s") %
|
|
||||||
vif['uuid'])
|
|
||||||
|
|
||||||
ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context,
|
return nw_info
|
||||||
net_id, vif['uuid'], project_id)
|
|
||||||
v4_subnet, v6_subnet = \
|
|
||||||
self.ipam.get_subnets_by_net_id(context,
|
|
||||||
ipam_tenant_id, net_id, vif['uuid'])
|
|
||||||
|
|
||||||
v4_ips = self.ipam.get_v4_ips_by_interface(context,
|
|
||||||
net_id, vif['uuid'],
|
|
||||||
project_id=ipam_tenant_id)
|
|
||||||
v6_ips = self.ipam.get_v6_ips_by_interface(context,
|
|
||||||
net_id, vif['uuid'],
|
|
||||||
project_id=ipam_tenant_id)
|
|
||||||
|
|
||||||
def ip_dict(ip, subnet):
|
|
||||||
return {
|
|
||||||
"ip": ip,
|
|
||||||
"netmask": subnet["netmask"],
|
|
||||||
"enabled": "1"}
|
|
||||||
|
|
||||||
network_dict = {
|
|
||||||
'cidr': v4_subnet['cidr'],
|
|
||||||
'injected': True,
|
|
||||||
'bridge': net['bridge'],
|
|
||||||
'multi_host': False}
|
|
||||||
|
|
||||||
q_tenant_id = project_id or FLAGS.quantum_default_tenant_id
|
|
||||||
info = {
|
|
||||||
'net_uuid': net_id,
|
|
||||||
'label': self.q_conn.get_network_name(q_tenant_id, net_id),
|
|
||||||
'gateway': v4_subnet['gateway'],
|
|
||||||
'dhcp_server': v4_subnet['gateway'],
|
|
||||||
'broadcast': v4_subnet['broadcast'],
|
|
||||||
'mac': vif['address'],
|
|
||||||
'vif_uuid': vif['uuid'],
|
|
||||||
'dns': [],
|
|
||||||
'ips': [ip_dict(ip, v4_subnet) for ip in v4_ips]}
|
|
||||||
|
|
||||||
if v6_subnet:
|
|
||||||
if v6_subnet['cidr']:
|
|
||||||
network_dict['cidr_v6'] = v6_subnet['cidr']
|
|
||||||
info['ip6s'] = [ip_dict(ip, v6_subnet) for ip in v6_ips]
|
|
||||||
|
|
||||||
if v6_subnet['gateway']:
|
|
||||||
info['gateway_v6'] = v6_subnet['gateway']
|
|
||||||
|
|
||||||
dns_dict = {}
|
|
||||||
for s in [v4_subnet, v6_subnet]:
|
|
||||||
for k in ['dns1', 'dns2']:
|
|
||||||
if s and s[k]:
|
|
||||||
dns_dict[s[k]] = None
|
|
||||||
info['dns'] = [d for d in dns_dict.keys()]
|
|
||||||
|
|
||||||
network_info.append((network_dict, info))
|
|
||||||
return network_info
|
|
||||||
|
|
||||||
def deallocate_for_instance(self, context, **kwargs):
|
def deallocate_for_instance(self, context, **kwargs):
|
||||||
"""Called when a VM is terminated. Loop through each virtual
|
"""Called when a VM is terminated. Loop through each virtual
|
||||||
@@ -552,31 +513,48 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
|
|||||||
network_ref = db.network_get(admin_context, vif_ref['network_id'])
|
network_ref = db.network_get(admin_context, vif_ref['network_id'])
|
||||||
net_id = network_ref['uuid']
|
net_id = network_ref['uuid']
|
||||||
|
|
||||||
port_id = self.q_conn.get_port_by_attachment(q_tenant_id,
|
# port deallocate block
|
||||||
net_id, interface_id)
|
try:
|
||||||
if not port_id:
|
port_id = None
|
||||||
q_tenant_id = FLAGS.quantum_default_tenant_id
|
port_id = self.q_conn.get_port_by_attachment(q_tenant_id,
|
||||||
port_id = self.q_conn.get_port_by_attachment(
|
net_id, interface_id)
|
||||||
q_tenant_id, net_id, interface_id)
|
if not port_id:
|
||||||
|
q_tenant_id = FLAGS.quantum_default_tenant_id
|
||||||
|
port_id = self.q_conn.get_port_by_attachment(
|
||||||
|
q_tenant_id, net_id, interface_id)
|
||||||
|
|
||||||
if not port_id:
|
if not port_id:
|
||||||
LOG.error("Unable to find port with attachment: %s" %
|
LOG.error("Unable to find port with attachment: %s" %
|
||||||
(interface_id))
|
(interface_id))
|
||||||
else:
|
else:
|
||||||
self.q_conn.detach_and_delete_port(q_tenant_id,
|
self.q_conn.detach_and_delete_port(q_tenant_id,
|
||||||
net_id, port_id)
|
net_id, port_id)
|
||||||
|
except:
|
||||||
|
# except anything so the rest of deallocate can succeed
|
||||||
|
msg = _('port deallocation failed for instance: '
|
||||||
|
'|%(instance_id)s|, port_id: |%(port_id)s|')
|
||||||
|
LOG.critical(msg % locals)
|
||||||
|
|
||||||
ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context,
|
# ipam deallocation block
|
||||||
net_id, vif_ref['uuid'], project_id)
|
try:
|
||||||
|
ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context,
|
||||||
|
net_id, vif_ref['uuid'], project_id)
|
||||||
|
|
||||||
self.ipam.deallocate_ips_by_vif(context, ipam_tenant_id,
|
self.ipam.deallocate_ips_by_vif(context, ipam_tenant_id,
|
||||||
net_id, vif_ref)
|
net_id, vif_ref)
|
||||||
|
|
||||||
|
# If DHCP is enabled on this network then we need to update the
|
||||||
|
# leases and restart the server.
|
||||||
|
if FLAGS.quantum_use_dhcp:
|
||||||
|
self.update_dhcp(context, ipam_tenant_id, network_ref,
|
||||||
|
vif_ref, project_id)
|
||||||
|
except:
|
||||||
|
# except anything so the rest of deallocate can succeed
|
||||||
|
vif_uuid = vif_ref['uuid']
|
||||||
|
msg = _('ipam deallocation failed for instance: '
|
||||||
|
'|%(instance_id)s|, vif_uuid: |%(vif_uuid)s|')
|
||||||
|
LOG.critical(msg % locals)
|
||||||
|
|
||||||
# If DHCP is enabled on this network then we need to update the
|
|
||||||
# leases and restart the server.
|
|
||||||
if FLAGS.quantum_use_dhcp:
|
|
||||||
self.update_dhcp(context, ipam_tenant_id, network_ref, vif_ref,
|
|
||||||
project_id)
|
|
||||||
try:
|
try:
|
||||||
db.virtual_interface_delete_by_instance(admin_context,
|
db.virtual_interface_delete_by_instance(admin_context,
|
||||||
instance_id)
|
instance_id)
|
||||||
@@ -586,12 +564,13 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
|
|||||||
|
|
||||||
# TODO(bgh): At some point we should consider merging enable_dhcp() and
|
# TODO(bgh): At some point we should consider merging enable_dhcp() and
|
||||||
# update_dhcp()
|
# update_dhcp()
|
||||||
|
# TODO(tr3buchet): agree, i'm curious why they differ even now..
|
||||||
def update_dhcp(self, context, ipam_tenant_id, network_ref, vif_ref,
|
def update_dhcp(self, context, ipam_tenant_id, network_ref, vif_ref,
|
||||||
project_id):
|
project_id):
|
||||||
# Figure out what subnet corresponds to this network/vif
|
# Figure out what subnet corresponds to this network/vif
|
||||||
v4_subnet, v6_subnet = self.ipam.get_subnets_by_net_id(context,
|
subnets = self.ipam.get_subnets_by_net_id(context,
|
||||||
ipam_tenant_id, network_ref['uuid'], vif_ref['uuid'])
|
ipam_tenant_id, network_ref['uuid'], vif_ref['uuid'])
|
||||||
for subnet in [v4_subnet, v6_subnet]:
|
for subnet in subnets:
|
||||||
if subnet is None:
|
if subnet is None:
|
||||||
continue
|
continue
|
||||||
# Fill in some of the network fields that we would have
|
# Fill in some of the network fields that we would have
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ import json
|
|||||||
|
|
||||||
from nova.common import cfg
|
from nova.common import cfg
|
||||||
from nova import flags
|
from nova import flags
|
||||||
|
from nova import log as logging
|
||||||
|
|
||||||
|
|
||||||
melange_opts = [
|
melange_opts = [
|
||||||
@@ -35,6 +36,7 @@ melange_opts = [
|
|||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
FLAGS.add_options(melange_opts)
|
FLAGS.add_options(melange_opts)
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
json_content_type = {'Content-type': "application/json"}
|
json_content_type = {'Content-type': "application/json"}
|
||||||
|
|
||||||
@@ -89,9 +91,14 @@ class MelangeConnection(object):
|
|||||||
raise Exception(_("Unable to connect to "
|
raise Exception(_("Unable to connect to "
|
||||||
"server. Got error: %s" % e))
|
"server. Got error: %s" % e))
|
||||||
|
|
||||||
def allocate_ip(self, network_id, vif_id,
|
def allocate_ip(self, network_id, network_tenant_id, vif_id,
|
||||||
project_id=None, mac_address=None):
|
project_id=None, mac_address=None):
|
||||||
tenant_scope = "/tenants/%s" % project_id if project_id else ""
|
LOG.info(_("allocate IP on network |%(network_id)s| "
|
||||||
|
"belonging to |%(network_tenant_id)s| "
|
||||||
|
"to this vif |%(vif_id)s| with mac |%(mac_address)s| "
|
||||||
|
"belonging to |%(project_id)s| ") % locals())
|
||||||
|
tenant_scope = "/tenants/%s" % network_tenant_id if network_tenant_id \
|
||||||
|
else ""
|
||||||
request_body = (json.dumps(dict(network=dict(mac_address=mac_address,
|
request_body = (json.dumps(dict(network=dict(mac_address=mac_address,
|
||||||
tenant_id=project_id)))
|
tenant_id=project_id)))
|
||||||
if mac_address else None)
|
if mac_address else None)
|
||||||
@@ -128,6 +135,15 @@ class MelangeConnection(object):
|
|||||||
response = self.get(url, headers=json_content_type)
|
response = self.get(url, headers=json_content_type)
|
||||||
return json.loads(response)
|
return json.loads(response)
|
||||||
|
|
||||||
|
def get_routes(self, block_id, project_id=None):
|
||||||
|
tenant_scope = "/tenants/%s" % project_id if project_id else ""
|
||||||
|
|
||||||
|
url = "ipam%(tenant_scope)s/ip_blocks/%(block_id)s/ip_routes" % \
|
||||||
|
locals()
|
||||||
|
|
||||||
|
response = self.get(url, headers=json_content_type)
|
||||||
|
return json.loads(response)['ip_routes']
|
||||||
|
|
||||||
def get_allocated_ips(self, network_id, vif_id, project_id=None):
|
def get_allocated_ips(self, network_id, vif_id, project_id=None):
|
||||||
tenant_scope = "/tenants/%s" % project_id if project_id else ""
|
tenant_scope = "/tenants/%s" % project_id if project_id else ""
|
||||||
|
|
||||||
|
|||||||
@@ -79,12 +79,12 @@ class QuantumMelangeIPAMLib(object):
|
|||||||
admin_context = context.elevated()
|
admin_context = context.elevated()
|
||||||
network = db.network_create_safe(admin_context, net)
|
network = db.network_create_safe(admin_context, net)
|
||||||
|
|
||||||
def allocate_fixed_ip(self, context, project_id, quantum_net_id, vif_ref):
|
def allocate_fixed_ip(self, context, project_id, quantum_net_id,
|
||||||
|
network_tenant_id, vif_ref):
|
||||||
"""Pass call to allocate fixed IP on to Melange"""
|
"""Pass call to allocate fixed IP on to Melange"""
|
||||||
tenant_id = project_id or FLAGS.quantum_default_tenant_id
|
ip = self.m_conn.allocate_ip(quantum_net_id, network_tenant_id,
|
||||||
ip = self.m_conn.allocate_ip(quantum_net_id,
|
vif_ref['uuid'], project_id,
|
||||||
vif_ref['uuid'], project_id=tenant_id,
|
vif_ref['address'])
|
||||||
mac_address=vif_ref['address'])
|
|
||||||
return ip[0]['address']
|
return ip[0]['address']
|
||||||
|
|
||||||
def get_network_id_by_cidr(self, context, cidr, project_id):
|
def get_network_id_by_cidr(self, context, cidr, project_id):
|
||||||
@@ -180,14 +180,13 @@ class QuantumMelangeIPAMLib(object):
|
|||||||
"""Returns information about the IPv4 and IPv6 subnets
|
"""Returns information about the IPv4 and IPv6 subnets
|
||||||
associated with a Quantum Network UUID.
|
associated with a Quantum Network UUID.
|
||||||
"""
|
"""
|
||||||
subnet_v4 = None
|
subnets = []
|
||||||
subnet_v6 = None
|
|
||||||
ips = self.m_conn.get_allocated_ips(net_id, vif_id, tenant_id)
|
ips = self.m_conn.get_allocated_ips(net_id, vif_id, tenant_id)
|
||||||
|
|
||||||
for ip_address in ips:
|
for ip_address in ips:
|
||||||
block = ip_address['ip_block']
|
block = ip_address['ip_block']
|
||||||
print block
|
subnet = {'network_id': block['network_id'],
|
||||||
subnet = {'network_id': block['id'],
|
'id': block['id'],
|
||||||
'cidr': block['cidr'],
|
'cidr': block['cidr'],
|
||||||
'gateway': block['gateway'],
|
'gateway': block['gateway'],
|
||||||
'broadcast': block['broadcast'],
|
'broadcast': block['broadcast'],
|
||||||
@@ -195,10 +194,15 @@ class QuantumMelangeIPAMLib(object):
|
|||||||
'dns1': block['dns1'],
|
'dns1': block['dns1'],
|
||||||
'dns2': block['dns2']}
|
'dns2': block['dns2']}
|
||||||
if ip_address['version'] == 4:
|
if ip_address['version'] == 4:
|
||||||
subnet_v4 = subnet
|
subnet['version'] = 4
|
||||||
else:
|
else:
|
||||||
subnet_v6 = subnet
|
subnet['version'] = 6
|
||||||
return (subnet_v4, subnet_v6)
|
subnets.append(subnet)
|
||||||
|
return subnets
|
||||||
|
|
||||||
|
def get_routes_by_ip_block(self, context, block_id, project_id):
|
||||||
|
"""Returns the list of routes for the IP block"""
|
||||||
|
return self.m_conn.get_routes(block_id, project_id)
|
||||||
|
|
||||||
def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id):
|
def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id):
|
||||||
"""Returns a list of IPv4 address strings associated with
|
"""Returns a list of IPv4 address strings associated with
|
||||||
@@ -255,3 +259,7 @@ class QuantumMelangeIPAMLib(object):
|
|||||||
"""
|
"""
|
||||||
tenant_id = project_id or FLAGS.quantum_default_tenant_id
|
tenant_id = project_id or FLAGS.quantum_default_tenant_id
|
||||||
return self.m_conn.create_vif(vif_id, instance_id, tenant_id)
|
return self.m_conn.create_vif(vif_id, instance_id, tenant_id)
|
||||||
|
|
||||||
|
def get_floating_ips_by_fixed_address(self, context, fixed_address):
|
||||||
|
"""This call is not supported in quantum yet"""
|
||||||
|
return []
|
||||||
|
|||||||
@@ -62,7 +62,14 @@ class QuantumNovaIPAMLib(object):
|
|||||||
networks = manager.FlatManager.create_networks(self.net_manager,
|
networks = manager.FlatManager.create_networks(self.net_manager,
|
||||||
admin_context, label, cidr,
|
admin_context, label, cidr,
|
||||||
False, 1, subnet_size, cidr_v6, gateway,
|
False, 1, subnet_size, cidr_v6, gateway,
|
||||||
gateway_v6, quantum_net_id, None, dns1, dns2)
|
gateway_v6, quantum_net_id, None, dns1, dns2,
|
||||||
|
ipam=True)
|
||||||
|
#TODO(tr3buchet): refactor passing in the ipam key so that
|
||||||
|
# it's no longer required. The reason it exists now is because
|
||||||
|
# nova insists on carving up IP blocks. What ends up happening is
|
||||||
|
# we create a v4 and an identically sized v6 block. The reason
|
||||||
|
# the quantum tests passed previosly is nothing prevented an
|
||||||
|
# incorrect v6 address from being assigned to the wrong subnet
|
||||||
|
|
||||||
if len(networks) != 1:
|
if len(networks) != 1:
|
||||||
raise Exception(_("Error creating network entry"))
|
raise Exception(_("Error creating network entry"))
|
||||||
@@ -122,7 +129,8 @@ class QuantumNovaIPAMLib(object):
|
|||||||
id_priority_map[net_id] = n['priority']
|
id_priority_map[net_id] = n['priority']
|
||||||
return sorted(net_list, key=lambda x: id_priority_map[x[0]])
|
return sorted(net_list, key=lambda x: id_priority_map[x[0]])
|
||||||
|
|
||||||
def allocate_fixed_ip(self, context, tenant_id, quantum_net_id, vif_rec):
|
def allocate_fixed_ip(self, context, tenant_id, quantum_net_id,
|
||||||
|
network_tenant_id, vif_rec):
|
||||||
"""Allocates a single fixed IPv4 address for a virtual interface."""
|
"""Allocates a single fixed IPv4 address for a virtual interface."""
|
||||||
admin_context = context.elevated()
|
admin_context = context.elevated()
|
||||||
network = db.network_get_by_uuid(admin_context, quantum_net_id)
|
network = db.network_get_by_uuid(admin_context, quantum_net_id)
|
||||||
@@ -147,31 +155,41 @@ class QuantumNovaIPAMLib(object):
|
|||||||
associated with a Quantum Network UUID.
|
associated with a Quantum Network UUID.
|
||||||
"""
|
"""
|
||||||
n = db.network_get_by_uuid(context.elevated(), net_id)
|
n = db.network_get_by_uuid(context.elevated(), net_id)
|
||||||
subnet_data_v4 = {
|
subnet_v4 = {
|
||||||
'network_id': n['uuid'],
|
'network_id': n['uuid'],
|
||||||
'cidr': n['cidr'],
|
'cidr': n['cidr'],
|
||||||
'gateway': n['gateway'],
|
'gateway': n['gateway'],
|
||||||
'broadcast': n['broadcast'],
|
'broadcast': n['broadcast'],
|
||||||
'netmask': n['netmask'],
|
'netmask': n['netmask'],
|
||||||
|
'version': 4,
|
||||||
'dns1': n['dns1'],
|
'dns1': n['dns1'],
|
||||||
'dns2': n['dns2']}
|
'dns2': n['dns2']}
|
||||||
subnet_data_v6 = {
|
#TODO(tr3buchet): I'm noticing we've assumed here that all dns is v4.
|
||||||
|
# this is probably bad as there is no way to add v6
|
||||||
|
# dns to nova
|
||||||
|
subnet_v6 = {
|
||||||
'network_id': n['uuid'],
|
'network_id': n['uuid'],
|
||||||
'cidr': n['cidr_v6'],
|
'cidr': n['cidr_v6'],
|
||||||
'gateway': n['gateway_v6'],
|
'gateway': n['gateway_v6'],
|
||||||
'broadcast': None,
|
'broadcast': None,
|
||||||
'netmask': None,
|
'netmask': n['netmask_v6'],
|
||||||
|
'version': 6,
|
||||||
'dns1': None,
|
'dns1': None,
|
||||||
'dns2': None}
|
'dns2': None}
|
||||||
return (subnet_data_v4, subnet_data_v6)
|
return [subnet_v4, subnet_v6]
|
||||||
|
|
||||||
|
def get_routes_by_ip_block(self, context, block_id, project_id):
|
||||||
|
"""Returns the list of routes for the IP block"""
|
||||||
|
return []
|
||||||
|
|
||||||
def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id):
|
def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id):
|
||||||
"""Returns a list of IPv4 address strings associated with
|
"""Returns a list of IPv4 address strings associated with
|
||||||
the specified virtual interface, based on the fixed_ips table.
|
the specified virtual interface, based on the fixed_ips table.
|
||||||
"""
|
"""
|
||||||
|
# TODO(tr3buchet): link fixed_ips to vif by uuid so only 1 db call
|
||||||
vif_rec = db.virtual_interface_get_by_uuid(context, vif_id)
|
vif_rec = db.virtual_interface_get_by_uuid(context, vif_id)
|
||||||
fixed_ips = db.fixed_ips_by_virtual_interface(context,
|
fixed_ips = db.fixed_ips_by_virtual_interface(context,
|
||||||
vif_rec['id'])
|
vif_rec['id'])
|
||||||
return [fixed_ip['address'] for fixed_ip in fixed_ips]
|
return [fixed_ip['address'] for fixed_ip in fixed_ips]
|
||||||
|
|
||||||
def get_v6_ips_by_interface(self, context, net_id, vif_id, project_id):
|
def get_v6_ips_by_interface(self, context, net_id, vif_id, project_id):
|
||||||
@@ -228,3 +246,6 @@ class QuantumNovaIPAMLib(object):
|
|||||||
ip['virtual_interface_id'])
|
ip['virtual_interface_id'])
|
||||||
allocated_ips.append((ip['address'], vif['uuid']))
|
allocated_ips.append((ip['address'], vif['uuid']))
|
||||||
return allocated_ips
|
return allocated_ips
|
||||||
|
|
||||||
|
def get_floating_ips_by_fixed_address(self, context, fixed_address):
|
||||||
|
return db.floating_ip_get_by_fixed_address(context, fixed_address)
|
||||||
|
|||||||
@@ -91,6 +91,10 @@ class CloudTestCase(test.TestCase):
|
|||||||
self.flags(connection_type='fake',
|
self.flags(connection_type='fake',
|
||||||
stub_network=True)
|
stub_network=True)
|
||||||
|
|
||||||
|
def dumb(*args, **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
self.stubs.Set(utils, 'usage_from_instance', dumb)
|
||||||
# set up our cloud
|
# set up our cloud
|
||||||
self.cloud = cloud.CloudController()
|
self.cloud = cloud.CloudController()
|
||||||
|
|
||||||
@@ -198,20 +202,15 @@ class CloudTestCase(test.TestCase):
|
|||||||
{'host': self.network.host})
|
{'host': self.network.host})
|
||||||
project_id = self.context.project_id
|
project_id = self.context.project_id
|
||||||
type_id = inst['instance_type_id']
|
type_id = inst['instance_type_id']
|
||||||
ips = self.network.allocate_for_instance(self.context,
|
nw_info = self.network.allocate_for_instance(self.context,
|
||||||
instance_id=inst['id'],
|
instance_id=inst['id'],
|
||||||
instance_uuid='',
|
instance_uuid='',
|
||||||
host=inst['host'],
|
host=inst['host'],
|
||||||
vpn=None,
|
vpn=None,
|
||||||
instance_type_id=type_id,
|
instance_type_id=type_id,
|
||||||
project_id=project_id)
|
project_id=project_id)
|
||||||
# TODO(jkoelker) Make this mas bueno
|
|
||||||
self.assertTrue(ips)
|
|
||||||
self.assertTrue('ips' in ips[0][1])
|
|
||||||
self.assertTrue(ips[0][1]['ips'])
|
|
||||||
self.assertTrue('ip' in ips[0][1]['ips'][0])
|
|
||||||
|
|
||||||
fixed = ips[0][1]['ips'][0]['ip']
|
fixed_ips = nw_info.fixed_ips()
|
||||||
|
|
||||||
ec2_id = ec2utils.id_to_ec2_id(inst['id'])
|
ec2_id = ec2utils.id_to_ec2_id(inst['id'])
|
||||||
self.cloud.associate_address(self.context,
|
self.cloud.associate_address(self.context,
|
||||||
@@ -221,7 +220,7 @@ class CloudTestCase(test.TestCase):
|
|||||||
public_ip=address)
|
public_ip=address)
|
||||||
self.cloud.release_address(self.context,
|
self.cloud.release_address(self.context,
|
||||||
public_ip=address)
|
public_ip=address)
|
||||||
self.network.deallocate_fixed_ip(self.context, fixed)
|
self.network.deallocate_fixed_ip(self.context, fixed_ips[0]['address'])
|
||||||
db.instance_destroy(self.context, inst['id'])
|
db.instance_destroy(self.context, inst['id'])
|
||||||
db.floating_ip_destroy(self.context, address)
|
db.floating_ip_destroy(self.context, address)
|
||||||
|
|
||||||
@@ -1229,6 +1228,11 @@ class CloudTestCase(test.TestCase):
|
|||||||
|
|
||||||
self.stubs.UnsetAll()
|
self.stubs.UnsetAll()
|
||||||
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
|
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
|
||||||
|
|
||||||
|
def dumb(*args, **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
self.stubs.Set(utils, 'usage_from_instance', dumb)
|
||||||
# NOTE(comstud): Make 'cast' behave like a 'call' which will
|
# NOTE(comstud): Make 'cast' behave like a 'call' which will
|
||||||
# ensure that operations complete
|
# ensure that operations complete
|
||||||
self.stubs.Set(rpc, 'cast', rpc.call)
|
self.stubs.Set(rpc, 'cast', rpc.call)
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ from nova import network
|
|||||||
from nova import compute
|
from nova import compute
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
from nova import test
|
from nova import test
|
||||||
|
from nova.tests import fake_network
|
||||||
from nova.tests.api.openstack import fakes
|
from nova.tests.api.openstack import fakes
|
||||||
from nova import utils
|
from nova import utils
|
||||||
|
|
||||||
@@ -58,7 +59,7 @@ def network_api_get_floating_ips_by_project(self, context):
|
|||||||
|
|
||||||
|
|
||||||
def compute_api_get(self, context, instance_id):
|
def compute_api_get(self, context, instance_id):
|
||||||
return dict(uuid=FAKE_UUID)
|
return dict(uuid=FAKE_UUID, id=instance_id, instance_type_id=1, host='bob')
|
||||||
|
|
||||||
|
|
||||||
def network_api_allocate(self, context):
|
def network_api_allocate(self, context):
|
||||||
@@ -81,23 +82,6 @@ def network_api_disassociate(self, context, floating_address):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def network_get_instance_nw_info(self, context, instance):
|
|
||||||
info = {
|
|
||||||
'label': 'fake',
|
|
||||||
'gateway': 'fake',
|
|
||||||
'dhcp_server': 'fake',
|
|
||||||
'broadcast': 'fake',
|
|
||||||
'mac': 'fake',
|
|
||||||
'vif_uuid': 'fake',
|
|
||||||
'rxtx_cap': 'fake',
|
|
||||||
'dns': [],
|
|
||||||
'ips': [{'ip': '10.0.0.1'}],
|
|
||||||
'should_create_bridge': False,
|
|
||||||
'should_create_vlan': False}
|
|
||||||
|
|
||||||
return [['ignore', info]]
|
|
||||||
|
|
||||||
|
|
||||||
def fake_instance_get(context, instance_id):
|
def fake_instance_get(context, instance_id):
|
||||||
return {
|
return {
|
||||||
"id": 1,
|
"id": 1,
|
||||||
@@ -137,8 +121,12 @@ class FloatingIpTest(test.TestCase):
|
|||||||
network_api_release)
|
network_api_release)
|
||||||
self.stubs.Set(network.api.API, "disassociate_floating_ip",
|
self.stubs.Set(network.api.API, "disassociate_floating_ip",
|
||||||
network_api_disassociate)
|
network_api_disassociate)
|
||||||
self.stubs.Set(network.api.API, "get_instance_nw_info",
|
|
||||||
network_get_instance_nw_info)
|
fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
|
||||||
|
spectacular=True)
|
||||||
|
|
||||||
|
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
|
||||||
|
spectacular=True)
|
||||||
self.stubs.Set(db, 'instance_get',
|
self.stubs.Set(db, 'instance_get',
|
||||||
fake_instance_get)
|
fake_instance_get)
|
||||||
|
|
||||||
|
|||||||
@@ -39,6 +39,7 @@ import nova.image.fake
|
|||||||
import nova.rpc
|
import nova.rpc
|
||||||
import nova.scheduler.api
|
import nova.scheduler.api
|
||||||
from nova import test
|
from nova import test
|
||||||
|
from nova.tests import fake_network
|
||||||
from nova.tests.api.openstack import fakes
|
from nova.tests.api.openstack import fakes
|
||||||
from nova import utils
|
from nova import utils
|
||||||
|
|
||||||
@@ -65,12 +66,13 @@ def fake_gen_uuid():
|
|||||||
|
|
||||||
|
|
||||||
def return_server_by_id(context, id):
|
def return_server_by_id(context, id):
|
||||||
return fakes.stub_instance(id)
|
return fakes.stub_instance(id, project_id='fake_project')
|
||||||
|
|
||||||
|
|
||||||
def return_server_by_uuid(context, uuid):
|
def return_server_by_uuid(context, uuid):
|
||||||
id = 1
|
id = 1
|
||||||
return fakes.stub_instance(id, uuid=uuid)
|
return fakes.stub_instance(id, uuid=uuid,
|
||||||
|
project_id='fake_project')
|
||||||
|
|
||||||
|
|
||||||
def return_server_with_attributes(**kwargs):
|
def return_server_with_attributes(**kwargs):
|
||||||
@@ -131,7 +133,8 @@ def return_servers_from_child_zones(*args, **kwargs):
|
|||||||
for server_id in xrange(5):
|
for server_id in xrange(5):
|
||||||
server = Server()
|
server = Server()
|
||||||
server._info = fakes.stub_instance(
|
server._info = fakes.stub_instance(
|
||||||
server_id, reservation_id="child")
|
server_id, reservation_id="child",
|
||||||
|
project_id='fake_project')
|
||||||
servers_list.append(server)
|
servers_list.append(server)
|
||||||
|
|
||||||
zones.append(("Zone%d" % zone, servers_list))
|
zones.append(("Zone%d" % zone, servers_list))
|
||||||
@@ -165,11 +168,9 @@ class ServersControllerTest(test.TestCase):
|
|||||||
self.maxDiff = None
|
self.maxDiff = None
|
||||||
super(ServersControllerTest, self).setUp()
|
super(ServersControllerTest, self).setUp()
|
||||||
self.flags(verbose=True, use_ipv6=False)
|
self.flags(verbose=True, use_ipv6=False)
|
||||||
fakes.stub_out_networking(self.stubs)
|
|
||||||
fakes.stub_out_rate_limiting(self.stubs)
|
fakes.stub_out_rate_limiting(self.stubs)
|
||||||
fakes.stub_out_key_pair_funcs(self.stubs)
|
fakes.stub_out_key_pair_funcs(self.stubs)
|
||||||
fakes.stub_out_image_service(self.stubs)
|
fakes.stub_out_image_service(self.stubs)
|
||||||
fakes.stub_out_nw_api(self.stubs)
|
|
||||||
self.stubs.Set(nova.db, 'instance_get_all_by_filters',
|
self.stubs.Set(nova.db, 'instance_get_all_by_filters',
|
||||||
return_servers)
|
return_servers)
|
||||||
self.stubs.Set(nova.db, 'instance_get', return_server_by_id)
|
self.stubs.Set(nova.db, 'instance_get', return_server_by_id)
|
||||||
@@ -186,13 +187,8 @@ class ServersControllerTest(test.TestCase):
|
|||||||
self.controller = servers.Controller()
|
self.controller = servers.Controller()
|
||||||
self.ips_controller = ips.Controller()
|
self.ips_controller = ips.Controller()
|
||||||
|
|
||||||
def nw_info(*args, **kwargs):
|
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
|
||||||
return []
|
spectacular=True)
|
||||||
|
|
||||||
floaters = nw_info
|
|
||||||
fakes.stub_out_nw_api_get_instance_nw_info(self.stubs, nw_info)
|
|
||||||
fakes.stub_out_nw_api_get_floating_ips_by_fixed_address(self.stubs,
|
|
||||||
floaters)
|
|
||||||
|
|
||||||
def test_get_server_by_uuid(self):
|
def test_get_server_by_uuid(self):
|
||||||
"""
|
"""
|
||||||
@@ -229,11 +225,12 @@ class ServersControllerTest(test.TestCase):
|
|||||||
uuid = FAKE_UUID
|
uuid = FAKE_UUID
|
||||||
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % uuid)
|
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % uuid)
|
||||||
res_dict = self.controller.show(req, uuid)
|
res_dict = self.controller.show(req, uuid)
|
||||||
|
|
||||||
expected_server = {
|
expected_server = {
|
||||||
"server": {
|
"server": {
|
||||||
"id": uuid,
|
"id": uuid,
|
||||||
"user_id": "fake",
|
"user_id": "fake",
|
||||||
"tenant_id": "fake",
|
"tenant_id": "fake_project",
|
||||||
"updated": "2010-11-11T11:00:00Z",
|
"updated": "2010-11-11T11:00:00Z",
|
||||||
"created": "2010-10-10T12:00:00Z",
|
"created": "2010-10-10T12:00:00Z",
|
||||||
"progress": 0,
|
"progress": 0,
|
||||||
@@ -262,6 +259,10 @@ class ServersControllerTest(test.TestCase):
|
|||||||
],
|
],
|
||||||
},
|
},
|
||||||
"addresses": {
|
"addresses": {
|
||||||
|
'test0': [
|
||||||
|
{'version': 4, 'addr': '192.168.0.100'},
|
||||||
|
{'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
|
||||||
|
]
|
||||||
},
|
},
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"seq": "1",
|
"seq": "1",
|
||||||
@@ -326,6 +327,10 @@ class ServersControllerTest(test.TestCase):
|
|||||||
],
|
],
|
||||||
},
|
},
|
||||||
"addresses": {
|
"addresses": {
|
||||||
|
'test0': [
|
||||||
|
{'version': 4, 'addr': '192.168.0.100'},
|
||||||
|
{'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
|
||||||
|
]
|
||||||
},
|
},
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"seq": "1",
|
"seq": "1",
|
||||||
@@ -393,6 +398,10 @@ class ServersControllerTest(test.TestCase):
|
|||||||
],
|
],
|
||||||
},
|
},
|
||||||
"addresses": {
|
"addresses": {
|
||||||
|
'test0': [
|
||||||
|
{'version': 4, 'addr': '192.168.0.100'},
|
||||||
|
{'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
|
||||||
|
]
|
||||||
},
|
},
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"seq": "1",
|
"seq": "1",
|
||||||
@@ -443,67 +452,13 @@ class ServersControllerTest(test.TestCase):
|
|||||||
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
|
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
|
||||||
self.assertEqual(res_dict['server']['name'], 'server1')
|
self.assertEqual(res_dict['server']['name'], 'server1')
|
||||||
|
|
||||||
def test_get_server_by_id_with_addresses(self):
|
def test_get_server_addresses_from_nw_info(self):
|
||||||
self.flags(use_ipv6=True)
|
self.flags(use_ipv6=True)
|
||||||
privates = ['192.168.0.3', '192.168.0.4']
|
|
||||||
publics = ['172.19.0.1', '172.19.0.2']
|
|
||||||
public6s = ['b33f::fdee:ddff:fecc:bbaa']
|
|
||||||
|
|
||||||
def nw_info(*args, **kwargs):
|
|
||||||
return [(None, {'label': 'public',
|
|
||||||
'ips': [dict(ip=ip) for ip in publics],
|
|
||||||
'ip6s': [dict(ip=ip) for ip in public6s]}),
|
|
||||||
(None, {'label': 'private',
|
|
||||||
'ips': [dict(ip=ip) for ip in privates]})]
|
|
||||||
|
|
||||||
def floaters(*args, **kwargs):
|
|
||||||
return []
|
|
||||||
|
|
||||||
new_return_server = return_server_with_attributes()
|
|
||||||
fakes.stub_out_nw_api_get_instance_nw_info(self.stubs, nw_info)
|
|
||||||
fakes.stub_out_nw_api_get_floating_ips_by_fixed_address(self.stubs,
|
|
||||||
floaters)
|
|
||||||
self.stubs.Set(nova.db, 'instance_get', new_return_server)
|
|
||||||
|
|
||||||
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % FAKE_UUID)
|
|
||||||
res_dict = self.controller.show(req, FAKE_UUID)
|
|
||||||
|
|
||||||
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
|
|
||||||
self.assertEqual(res_dict['server']['name'], 'server1')
|
|
||||||
addresses = res_dict['server']['addresses']
|
|
||||||
expected = {
|
|
||||||
'private': [
|
|
||||||
{'addr': '192.168.0.3', 'version': 4},
|
|
||||||
{'addr': '192.168.0.4', 'version': 4},
|
|
||||||
],
|
|
||||||
'public': [
|
|
||||||
{'addr': '172.19.0.1', 'version': 4},
|
|
||||||
{'addr': '172.19.0.2', 'version': 4},
|
|
||||||
{'addr': 'b33f::fdee:ddff:fecc:bbaa', 'version': 6},
|
|
||||||
],
|
|
||||||
}
|
|
||||||
self.assertDictMatch(addresses, expected)
|
|
||||||
|
|
||||||
def test_get_server_addresses_from_nwinfo(self):
|
|
||||||
self.flags(use_ipv6=True)
|
|
||||||
|
|
||||||
privates = ['192.168.0.3', '192.168.0.4']
|
|
||||||
publics = ['172.19.0.1', '1.2.3.4', '172.19.0.2']
|
|
||||||
|
|
||||||
public6s = ['b33f::fdee:ddff:fecc:bbaa']
|
|
||||||
|
|
||||||
def nw_info(*args, **kwargs):
|
|
||||||
return [(None, {'label': 'public',
|
|
||||||
'ips': [dict(ip=ip) for ip in publics],
|
|
||||||
'ip6s': [dict(ip=ip) for ip in public6s]}),
|
|
||||||
(None, {'label': 'private',
|
|
||||||
'ips': [dict(ip=ip) for ip in privates]})]
|
|
||||||
|
|
||||||
def floaters(*args, **kwargs):
|
|
||||||
return []
|
|
||||||
|
|
||||||
new_return_server = return_server_with_attributes_by_uuid()
|
new_return_server = return_server_with_attributes_by_uuid()
|
||||||
fakes.stub_out_nw_api_get_instance_nw_info(self.stubs, nw_info)
|
fake_network.fake_get_instance_nw_info(self.stubs, num_networks=2,
|
||||||
|
spectacular=True)
|
||||||
|
floaters = []
|
||||||
fakes.stub_out_nw_api_get_floating_ips_by_fixed_address(self.stubs,
|
fakes.stub_out_nw_api_get_floating_ips_by_fixed_address(self.stubs,
|
||||||
floaters)
|
floaters)
|
||||||
self.stubs.Set(nova.db, 'instance_get_by_uuid', new_return_server)
|
self.stubs.Set(nova.db, 'instance_get_by_uuid', new_return_server)
|
||||||
@@ -513,16 +468,10 @@ class ServersControllerTest(test.TestCase):
|
|||||||
|
|
||||||
expected = {
|
expected = {
|
||||||
'addresses': {
|
'addresses': {
|
||||||
'private': [
|
'test0': [
|
||||||
{'version': 4, 'addr': '192.168.0.3'},
|
{'version': 4, 'addr': '192.168.0.100'},
|
||||||
{'version': 4, 'addr': '192.168.0.4'},
|
{'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
|
||||||
],
|
]
|
||||||
'public': [
|
|
||||||
{'version': 4, 'addr': '172.19.0.1'},
|
|
||||||
{'version': 4, 'addr': '1.2.3.4'},
|
|
||||||
{'version': 4, 'addr': '172.19.0.2'},
|
|
||||||
{'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
|
|
||||||
],
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
self.assertDictMatch(res_dict, expected)
|
self.assertDictMatch(res_dict, expected)
|
||||||
@@ -580,39 +529,21 @@ class ServersControllerTest(test.TestCase):
|
|||||||
self.assertDictMatch(res_dict, expected)
|
self.assertDictMatch(res_dict, expected)
|
||||||
|
|
||||||
def test_get_server_addresses_with_floating_from_nwinfo(self):
|
def test_get_server_addresses_with_floating_from_nwinfo(self):
|
||||||
ips = dict(privates=['192.168.0.3', '192.168.0.4'],
|
|
||||||
publics=['172.19.0.1', '1.2.3.4', '172.19.0.2'])
|
|
||||||
|
|
||||||
def nw_info(*args, **kwargs):
|
|
||||||
return [(None, {'label': 'private',
|
|
||||||
'ips': [dict(ip=ip)
|
|
||||||
for ip in ips['privates']]})]
|
|
||||||
|
|
||||||
def floaters(*args, **kwargs):
|
|
||||||
# NOTE(jkoelker) floaters will get called multiple times
|
|
||||||
# this makes sure it will only return data
|
|
||||||
# once
|
|
||||||
pubs = list(ips['publics'])
|
|
||||||
ips['publics'] = []
|
|
||||||
return pubs
|
|
||||||
|
|
||||||
new_return_server = return_server_with_attributes_by_uuid()
|
new_return_server = return_server_with_attributes_by_uuid()
|
||||||
fakes.stub_out_nw_api_get_instance_nw_info(self.stubs, nw_info)
|
|
||||||
fakes.stub_out_nw_api_get_floating_ips_by_fixed_address(self.stubs,
|
|
||||||
floaters)
|
|
||||||
self.stubs.Set(nova.db, 'instance_get_by_uuid', new_return_server)
|
self.stubs.Set(nova.db, 'instance_get_by_uuid', new_return_server)
|
||||||
|
|
||||||
|
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
|
||||||
|
floating_ips_per_fixed_ip=1,
|
||||||
|
spectacular=True)
|
||||||
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/ips' % FAKE_UUID)
|
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/ips' % FAKE_UUID)
|
||||||
res_dict = self.ips_controller.index(req, FAKE_UUID)
|
res_dict = self.ips_controller.index(req, FAKE_UUID)
|
||||||
|
|
||||||
expected = {
|
expected = {
|
||||||
'addresses': {
|
'addresses': {
|
||||||
'private': [
|
'test0': [
|
||||||
{'version': 4, 'addr': '192.168.0.3'},
|
{'version': 4, 'addr': '192.168.0.100'},
|
||||||
{'version': 4, 'addr': '192.168.0.4'},
|
{'version': 6, 'addr': 'fe80::dcad:beff:feef:1'},
|
||||||
{'version': 4, 'addr': '172.19.0.1'},
|
{'version': 4, 'addr': '10.10.10.100'},
|
||||||
{'version': 4, 'addr': '1.2.3.4'},
|
|
||||||
{'version': 4, 'addr': '172.19.0.2'},
|
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -620,37 +551,25 @@ class ServersControllerTest(test.TestCase):
|
|||||||
|
|
||||||
def test_get_server_addresses_single_network_from_nwinfo(self):
|
def test_get_server_addresses_single_network_from_nwinfo(self):
|
||||||
self.flags(use_ipv6=True)
|
self.flags(use_ipv6=True)
|
||||||
privates = ['192.168.0.3', '192.168.0.4']
|
|
||||||
publics = ['172.19.0.1', '1.2.3.4', '172.19.0.2']
|
|
||||||
public6s = ['b33f::fdee:ddff:fecc:bbaa']
|
|
||||||
|
|
||||||
def nw_info(*args, **kwargs):
|
|
||||||
return [(None, {'label': 'public',
|
|
||||||
'ips': [dict(ip=ip) for ip in publics],
|
|
||||||
'ip6s': [dict(ip=ip) for ip in public6s]}),
|
|
||||||
(None, {'label': 'private',
|
|
||||||
'ips': [dict(ip=ip) for ip in privates]})]
|
|
||||||
|
|
||||||
def floaters(*args, **kwargs):
|
def floaters(*args, **kwargs):
|
||||||
return []
|
return []
|
||||||
|
|
||||||
new_return_server = return_server_with_attributes_by_uuid()
|
new_return_server = return_server_with_attributes_by_uuid()
|
||||||
fakes.stub_out_nw_api_get_instance_nw_info(self.stubs, nw_info)
|
fake_network.fake_get_instance_nw_info(self.stubs, num_networks=1)
|
||||||
fakes.stub_out_nw_api_get_floating_ips_by_fixed_address(self.stubs,
|
fakes.stub_out_nw_api_get_floating_ips_by_fixed_address(self.stubs,
|
||||||
floaters)
|
floaters)
|
||||||
self.stubs.Set(nova.db, 'instance_get_by_uuid', new_return_server)
|
self.stubs.Set(nova.db, 'instance_get_by_uuid', new_return_server)
|
||||||
|
|
||||||
url = '/v2/fake/servers/%s/ips/public' % FAKE_UUID
|
url = '/v2/fake/servers/%s/ips/test0' % FAKE_UUID
|
||||||
req = fakes.HTTPRequest.blank(url)
|
req = fakes.HTTPRequest.blank(url)
|
||||||
res_dict = self.ips_controller.show(req, FAKE_UUID, 'public')
|
res_dict = self.ips_controller.show(req, FAKE_UUID, 'test0')
|
||||||
|
|
||||||
expected = {
|
expected = {
|
||||||
'public': [
|
'test0': [
|
||||||
{'version': 4, 'addr': '172.19.0.1'},
|
{'version': 4, 'addr': '192.168.0.100'},
|
||||||
{'version': 4, 'addr': '1.2.3.4'},
|
{'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
|
||||||
{'version': 4, 'addr': '172.19.0.2'},
|
]
|
||||||
{'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
|
|
||||||
],
|
|
||||||
}
|
}
|
||||||
self.assertDictMatch(res_dict, expected)
|
self.assertDictMatch(res_dict, expected)
|
||||||
|
|
||||||
@@ -1215,7 +1134,8 @@ class ServersControllerTest(test.TestCase):
|
|||||||
def test_rebuild_instance_with_access_ipv6_bad_format(self):
|
def test_rebuild_instance_with_access_ipv6_bad_format(self):
|
||||||
|
|
||||||
def fake_get_instance(*args, **kwargs):
|
def fake_get_instance(*args, **kwargs):
|
||||||
return fakes.stub_instance(1, vm_state=vm_states.ACTIVE)
|
return fakes.stub_instance(1, vm_state=vm_states.ACTIVE,
|
||||||
|
project_id='fake_project')
|
||||||
|
|
||||||
self.stubs.Set(nova.db, 'instance_get', fake_get_instance)
|
self.stubs.Set(nova.db, 'instance_get', fake_get_instance)
|
||||||
# proper local hrefs must start with 'http://localhost/v2/'
|
# proper local hrefs must start with 'http://localhost/v2/'
|
||||||
@@ -1493,7 +1413,6 @@ class ServersControllerCreateTest(test.TestCase):
|
|||||||
def queue_get_for(context, *args):
|
def queue_get_for(context, *args):
|
||||||
return 'network_topic'
|
return 'network_topic'
|
||||||
|
|
||||||
fakes.stub_out_networking(self.stubs)
|
|
||||||
fakes.stub_out_rate_limiting(self.stubs)
|
fakes.stub_out_rate_limiting(self.stubs)
|
||||||
fakes.stub_out_key_pair_funcs(self.stubs)
|
fakes.stub_out_key_pair_funcs(self.stubs)
|
||||||
fakes.stub_out_image_service(self.stubs)
|
fakes.stub_out_image_service(self.stubs)
|
||||||
@@ -2672,13 +2591,10 @@ class ServersViewBuilderTest(test.TestCase):
|
|||||||
],
|
],
|
||||||
},
|
},
|
||||||
"addresses": {
|
"addresses": {
|
||||||
'private': [
|
'test0': [
|
||||||
{'version': 4, 'addr': '172.19.0.1'}
|
{'version': 4, 'addr': '192.168.0.100'},
|
||||||
],
|
{'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
|
||||||
'public': [
|
]
|
||||||
{'version': 4, 'addr': '192.168.0.3'},
|
|
||||||
{'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
|
|
||||||
],
|
|
||||||
},
|
},
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"config_drive": None,
|
"config_drive": None,
|
||||||
@@ -2744,13 +2660,10 @@ class ServersViewBuilderTest(test.TestCase):
|
|||||||
],
|
],
|
||||||
},
|
},
|
||||||
"addresses": {
|
"addresses": {
|
||||||
'private': [
|
'test0': [
|
||||||
{'version': 4, 'addr': '172.19.0.1'}
|
{'version': 4, 'addr': '192.168.0.100'},
|
||||||
],
|
{'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
|
||||||
'public': [
|
]
|
||||||
{'version': 4, 'addr': '192.168.0.3'},
|
|
||||||
{'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
|
|
||||||
],
|
|
||||||
},
|
},
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"config_drive": None,
|
"config_drive": None,
|
||||||
@@ -2824,13 +2737,10 @@ class ServersViewBuilderTest(test.TestCase):
|
|||||||
],
|
],
|
||||||
},
|
},
|
||||||
"addresses": {
|
"addresses": {
|
||||||
'private': [
|
'test0': [
|
||||||
{'version': 4, 'addr': '172.19.0.1'}
|
{'version': 4, 'addr': '192.168.0.100'},
|
||||||
],
|
{'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
|
||||||
'public': [
|
]
|
||||||
{'version': 4, 'addr': '192.168.0.3'},
|
|
||||||
{'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
|
|
||||||
],
|
|
||||||
},
|
},
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"config_drive": None,
|
"config_drive": None,
|
||||||
@@ -2891,13 +2801,10 @@ class ServersViewBuilderTest(test.TestCase):
|
|||||||
],
|
],
|
||||||
},
|
},
|
||||||
"addresses": {
|
"addresses": {
|
||||||
'private': [
|
'test0': [
|
||||||
{'version': 4, 'addr': '172.19.0.1'}
|
{'version': 4, 'addr': '192.168.0.100'},
|
||||||
],
|
{'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
|
||||||
'public': [
|
]
|
||||||
{'version': 4, 'addr': '192.168.0.3'},
|
|
||||||
{'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
|
|
||||||
],
|
|
||||||
},
|
},
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"config_drive": None,
|
"config_drive": None,
|
||||||
@@ -2956,13 +2863,10 @@ class ServersViewBuilderTest(test.TestCase):
|
|||||||
],
|
],
|
||||||
},
|
},
|
||||||
"addresses": {
|
"addresses": {
|
||||||
'private': [
|
'test0': [
|
||||||
{'version': 4, 'addr': '172.19.0.1'}
|
{'version': 4, 'addr': '192.168.0.100'},
|
||||||
],
|
{'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
|
||||||
'public': [
|
]
|
||||||
{'version': 4, 'addr': '192.168.0.3'},
|
|
||||||
{'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
|
|
||||||
],
|
|
||||||
},
|
},
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"config_drive": None,
|
"config_drive": None,
|
||||||
@@ -3023,12 +2927,9 @@ class ServersViewBuilderTest(test.TestCase):
|
|||||||
],
|
],
|
||||||
},
|
},
|
||||||
"addresses": {
|
"addresses": {
|
||||||
'private': [
|
'test0': [
|
||||||
{'version': 4, 'addr': '172.19.0.1'}
|
{'version': 4, 'addr': '192.168.0.100'},
|
||||||
],
|
{'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
|
||||||
'public': [
|
|
||||||
{'version': 4, 'addr': '192.168.0.3'},
|
|
||||||
{'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@@ -3095,12 +2996,9 @@ class ServersViewBuilderTest(test.TestCase):
|
|||||||
],
|
],
|
||||||
},
|
},
|
||||||
"addresses": {
|
"addresses": {
|
||||||
'private': [
|
'test0': [
|
||||||
{'version': 4, 'addr': '172.19.0.1'}
|
{'version': 4, 'addr': '192.168.0.100'},
|
||||||
],
|
{'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
|
||||||
'public': [
|
|
||||||
{'version': 4, 'addr': '192.168.0.3'},
|
|
||||||
{'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
|||||||
@@ -40,6 +40,7 @@ from nova import context
|
|||||||
from nova.db.sqlalchemy import models
|
from nova.db.sqlalchemy import models
|
||||||
from nova import exception as exc
|
from nova import exception as exc
|
||||||
import nova.image.fake
|
import nova.image.fake
|
||||||
|
from nova.tests import fake_network
|
||||||
from nova.tests.glance import stubs as glance_stubs
|
from nova.tests.glance import stubs as glance_stubs
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova import wsgi
|
from nova import wsgi
|
||||||
@@ -180,15 +181,9 @@ class stub_out_compute_api_backup(object):
|
|||||||
return dict(id='123', status='ACTIVE', name=name, properties=props)
|
return dict(id='123', status='ACTIVE', name=name, properties=props)
|
||||||
|
|
||||||
|
|
||||||
def stub_out_nw_api_get_instance_nw_info(stubs, func=None):
|
def stub_out_nw_api_get_instance_nw_info(stubs, num_networks=1, func=None):
|
||||||
def get_instance_nw_info(self, context, instance):
|
fake_network.stub_out_nw_api_get_instance_nw_info(stubs,
|
||||||
return [(None, {'label': 'public',
|
spectacular=True)
|
||||||
'ips': [{'ip': '192.168.0.3'}],
|
|
||||||
'ip6s': []})]
|
|
||||||
|
|
||||||
if func is None:
|
|
||||||
func = get_instance_nw_info
|
|
||||||
stubs.Set(nova.network.API, 'get_instance_nw_info', func)
|
|
||||||
|
|
||||||
|
|
||||||
def stub_out_nw_api_get_floating_ips_by_fixed_address(stubs, func=None):
|
def stub_out_nw_api_get_floating_ips_by_fixed_address(stubs, func=None):
|
||||||
@@ -208,8 +203,7 @@ def stub_out_nw_api(stubs, cls=None, private=None, publics=None):
|
|||||||
|
|
||||||
class Fake:
|
class Fake:
|
||||||
def get_instance_nw_info(*args, **kwargs):
|
def get_instance_nw_info(*args, **kwargs):
|
||||||
return [(None, {'label': 'private',
|
pass
|
||||||
'ips': [{'ip': private}]})]
|
|
||||||
|
|
||||||
def get_floating_ips_by_fixed_address(*args, **kwargs):
|
def get_floating_ips_by_fixed_address(*args, **kwargs):
|
||||||
return publics
|
return publics
|
||||||
@@ -217,6 +211,7 @@ def stub_out_nw_api(stubs, cls=None, private=None, publics=None):
|
|||||||
if cls is None:
|
if cls is None:
|
||||||
cls = Fake
|
cls = Fake
|
||||||
stubs.Set(nova.network, 'API', cls)
|
stubs.Set(nova.network, 'API', cls)
|
||||||
|
fake_network.stub_out_nw_api_get_instance_nw_info(stubs, spectacular=True)
|
||||||
|
|
||||||
|
|
||||||
def _make_image_fixtures():
|
def _make_image_fixtures():
|
||||||
@@ -473,7 +468,6 @@ def stub_instance(id, user_id='fake', project_id='fake', host=None,
|
|||||||
auto_disk_config=False, display_name=None,
|
auto_disk_config=False, display_name=None,
|
||||||
include_fake_metadata=True,
|
include_fake_metadata=True,
|
||||||
power_state=None, nw_cache=None):
|
power_state=None, nw_cache=None):
|
||||||
|
|
||||||
if include_fake_metadata:
|
if include_fake_metadata:
|
||||||
metadata = [models.InstanceMetadata(key='seq', value=id)]
|
metadata = [models.InstanceMetadata(key='seq', value=id)]
|
||||||
else:
|
else:
|
||||||
@@ -518,6 +512,7 @@ def stub_instance(id, user_id='fake', project_id='fake', host=None,
|
|||||||
"ephemeral_gb": 0,
|
"ephemeral_gb": 0,
|
||||||
"hostname": "",
|
"hostname": "",
|
||||||
"host": host,
|
"host": host,
|
||||||
|
"instance_type_id": 1,
|
||||||
"instance_type": dict(inst_type),
|
"instance_type": dict(inst_type),
|
||||||
"user_data": "",
|
"user_data": "",
|
||||||
"reservation_id": reservation_id,
|
"reservation_id": reservation_id,
|
||||||
|
|||||||
@@ -20,7 +20,10 @@ from nova import db
|
|||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import utils
|
from nova import utils
|
||||||
|
import nova.compute.utils
|
||||||
from nova.network import manager as network_manager
|
from nova.network import manager as network_manager
|
||||||
|
from nova.network.quantum import nova_ipam_lib
|
||||||
|
from nova.tests import fake_network_cache_model
|
||||||
|
|
||||||
|
|
||||||
HOST = "testhost"
|
HOST = "testhost"
|
||||||
@@ -199,7 +202,6 @@ def vifs(n):
|
|||||||
'address': 'DE:AD:BE:EF:00:%02x' % x,
|
'address': 'DE:AD:BE:EF:00:%02x' % x,
|
||||||
'uuid': '00000000-0000-0000-0000-00000000000000%02d' % x,
|
'uuid': '00000000-0000-0000-0000-00000000000000%02d' % x,
|
||||||
'network_id': x,
|
'network_id': x,
|
||||||
'network': FakeModel(**fake_network(x)),
|
|
||||||
'instance_id': 0}
|
'instance_id': 0}
|
||||||
|
|
||||||
|
|
||||||
@@ -253,7 +255,8 @@ def ipv4_like(ip, match_string):
|
|||||||
|
|
||||||
|
|
||||||
def fake_get_instance_nw_info(stubs, num_networks=1, ips_per_vif=2,
|
def fake_get_instance_nw_info(stubs, num_networks=1, ips_per_vif=2,
|
||||||
floating_ips_per_fixed_ip=0):
|
floating_ips_per_fixed_ip=0,
|
||||||
|
spectacular=False):
|
||||||
# stubs is the self.stubs from the test
|
# stubs is the self.stubs from the test
|
||||||
# ips_per_vif is the number of ips each vif will have
|
# ips_per_vif is the number of ips each vif will have
|
||||||
# num_floating_ips is number of float ips for each fixed ip
|
# num_floating_ips is number of float ips for each fixed ip
|
||||||
@@ -261,22 +264,37 @@ def fake_get_instance_nw_info(stubs, num_networks=1, ips_per_vif=2,
|
|||||||
network.db = db
|
network.db = db
|
||||||
|
|
||||||
# reset the fixed and floating ip generators
|
# reset the fixed and floating ip generators
|
||||||
global floating_ip_id, fixed_ip_id
|
global floating_ip_id, fixed_ip_id, fixed_ips
|
||||||
floating_ip_id = floating_ip_ids()
|
floating_ip_id = floating_ip_ids()
|
||||||
fixed_ip_id = fixed_ip_ids()
|
fixed_ip_id = fixed_ip_ids()
|
||||||
|
fixed_ips = []
|
||||||
|
|
||||||
networks = [fake_network(x) for x in xrange(num_networks)]
|
networks = [fake_network(x) for x in xrange(num_networks)]
|
||||||
|
|
||||||
def fixed_ips_fake(*args, **kwargs):
|
def fixed_ips_fake(*args, **kwargs):
|
||||||
return [next_fixed_ip(i, floating_ips_per_fixed_ip)
|
global fixed_ips
|
||||||
for i in xrange(num_networks) for j in xrange(ips_per_vif)]
|
ips = [next_fixed_ip(i, floating_ips_per_fixed_ip)
|
||||||
|
for i in xrange(num_networks) for j in xrange(ips_per_vif)]
|
||||||
|
fixed_ips = ips
|
||||||
|
return ips
|
||||||
|
|
||||||
def floating_ips_fake(*args, **kwargs):
|
def floating_ips_fake(context, address):
|
||||||
|
for ip in fixed_ips:
|
||||||
|
if address == ip['address']:
|
||||||
|
return ip['floating_ips']
|
||||||
return []
|
return []
|
||||||
|
|
||||||
def virtual_interfaces_fake(*args, **kwargs):
|
def virtual_interfaces_fake(*args, **kwargs):
|
||||||
return [vif for vif in vifs(num_networks)]
|
return [vif for vif in vifs(num_networks)]
|
||||||
|
|
||||||
|
def vif_by_uuid_fake(context, uuid):
|
||||||
|
return {'id': 1,
|
||||||
|
'address': 'DE:AD:BE:EF:00:01',
|
||||||
|
'uuid': uuid,
|
||||||
|
'network_id': 1,
|
||||||
|
'network': None,
|
||||||
|
'instance_id': 0}
|
||||||
|
|
||||||
def instance_type_fake(*args, **kwargs):
|
def instance_type_fake(*args, **kwargs):
|
||||||
return flavor
|
return flavor
|
||||||
|
|
||||||
@@ -289,25 +307,68 @@ def fake_get_instance_nw_info(stubs, num_networks=1, ips_per_vif=2,
|
|||||||
def update_cache_fake(*args, **kwargs):
|
def update_cache_fake(*args, **kwargs):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def get_subnets_by_net_id(self, context, project_id, network_uuid,
|
||||||
|
vif_uuid):
|
||||||
|
subnet_v4 = dict(
|
||||||
|
cidr='192.168.0.0/24',
|
||||||
|
dns1='1.2.3.4',
|
||||||
|
dns2='2.3.4.5',
|
||||||
|
gateway='192.168.0.1')
|
||||||
|
|
||||||
|
subnet_v6 = dict(
|
||||||
|
cidr='fe80::/64',
|
||||||
|
gateway='fe80::def')
|
||||||
|
return [subnet_v4, subnet_v6]
|
||||||
|
|
||||||
|
def get_network_by_uuid(context, uuid):
|
||||||
|
return dict(id=1,
|
||||||
|
cidr_v6='fe80::/64',
|
||||||
|
bridge='br0',
|
||||||
|
label='public')
|
||||||
|
|
||||||
|
def get_v4_fake(*args, **kwargs):
|
||||||
|
ips = fixed_ips_fake(*args, **kwargs)
|
||||||
|
return [ip['address'] for ip in ips]
|
||||||
|
|
||||||
stubs.Set(db, 'fixed_ip_get_by_instance', fixed_ips_fake)
|
stubs.Set(db, 'fixed_ip_get_by_instance', fixed_ips_fake)
|
||||||
stubs.Set(db, 'floating_ip_get_by_fixed_address', floating_ips_fake)
|
stubs.Set(db, 'floating_ip_get_by_fixed_address', floating_ips_fake)
|
||||||
|
stubs.Set(db, 'virtual_interface_get_by_uuid', vif_by_uuid_fake)
|
||||||
|
stubs.Set(db, 'network_get_by_uuid', get_network_by_uuid)
|
||||||
stubs.Set(db, 'virtual_interface_get_by_instance', virtual_interfaces_fake)
|
stubs.Set(db, 'virtual_interface_get_by_instance', virtual_interfaces_fake)
|
||||||
stubs.Set(db, 'instance_type_get', instance_type_fake)
|
stubs.Set(db, 'instance_type_get', instance_type_fake)
|
||||||
stubs.Set(db, 'network_get', network_get_fake)
|
stubs.Set(db, 'network_get', network_get_fake)
|
||||||
stubs.Set(db, 'instance_info_cache_update', update_cache_fake)
|
stubs.Set(db, 'instance_info_cache_update', update_cache_fake)
|
||||||
|
|
||||||
context = nova.context.RequestContext('testuser', 'testproject',
|
stubs.Set(nova_ipam_lib.QuantumNovaIPAMLib, 'get_subnets_by_net_id',
|
||||||
is_admin=False)
|
get_subnets_by_net_id)
|
||||||
return network.get_instance_nw_info(context, 0, 0, 0, None)
|
stubs.Set(nova_ipam_lib.QuantumNovaIPAMLib, 'get_v4_ips_by_interface',
|
||||||
|
get_v4_fake)
|
||||||
|
|
||||||
|
class FakeContext(nova.context.RequestContext):
|
||||||
|
def is_admin(self):
|
||||||
|
return True
|
||||||
|
|
||||||
|
nw_model = network.get_instance_nw_info(
|
||||||
|
FakeContext('fakeuser', 'fake_project'),
|
||||||
|
0, 0, 0, None)
|
||||||
|
if spectacular:
|
||||||
|
return nw_model
|
||||||
|
return nova.compute.utils.legacy_network_info(nw_model)
|
||||||
|
|
||||||
|
|
||||||
def stub_out_nw_api_get_instance_nw_info(stubs, func=None):
|
def stub_out_nw_api_get_instance_nw_info(stubs, func=None,
|
||||||
|
num_networks=1,
|
||||||
|
ips_per_vif=1,
|
||||||
|
floating_ips_per_fixed_ip=0,
|
||||||
|
spectacular=False):
|
||||||
import nova.network
|
import nova.network
|
||||||
|
|
||||||
def get_instance_nw_info(self, context, instance):
|
def get_instance_nw_info(self, context, instance):
|
||||||
return [(None, {'label': 'public',
|
return fake_get_instance_nw_info(stubs, num_networks=num_networks,
|
||||||
'ips': [{'ip': '192.168.0.3'}],
|
ips_per_vif=ips_per_vif,
|
||||||
'ip6s': []})]
|
floating_ips_per_fixed_ip=floating_ips_per_fixed_ip,
|
||||||
|
spectacular=spectacular)
|
||||||
|
|
||||||
if func is None:
|
if func is None:
|
||||||
func = get_instance_nw_info
|
func = get_instance_nw_info
|
||||||
stubs.Set(nova.network.API, 'get_instance_nw_info', func)
|
stubs.Set(nova.network.API, 'get_instance_nw_info', func)
|
||||||
|
|||||||
@@ -38,14 +38,13 @@ def new_route(route_dict=None):
|
|||||||
|
|
||||||
def new_subnet(subnet_dict=None):
|
def new_subnet(subnet_dict=None):
|
||||||
new_subnet = dict(
|
new_subnet = dict(
|
||||||
cidr='255.255.255.0',
|
cidr='10.10.0.0/24',
|
||||||
dns=[new_ip(dict(address='1.2.3.4')),
|
dns=[new_ip(dict(address='1.2.3.4')),
|
||||||
new_ip(dict(address='2.3.4.5'))],
|
new_ip(dict(address='2.3.4.5'))],
|
||||||
gateway=new_ip(dict(address='192.168.1.1')),
|
gateway=new_ip(dict(address='10.10.0.1')),
|
||||||
ips=[new_ip(dict(address='192.168.1.100')),
|
ips=[new_ip(dict(address='10.10.0.2')),
|
||||||
new_ip(dict(address='192.168.1.101'))],
|
new_ip(dict(address='10.10.0.3'))],
|
||||||
routes=[new_route()],
|
routes=[new_route()])
|
||||||
version=4)
|
|
||||||
subnet_dict = subnet_dict or {}
|
subnet_dict = subnet_dict or {}
|
||||||
new_subnet.update(subnet_dict)
|
new_subnet.update(subnet_dict)
|
||||||
return model.Subnet(**new_subnet)
|
return model.Subnet(**new_subnet)
|
||||||
|
|||||||
@@ -134,5 +134,4 @@ class _IntegratedTestBase(test.TestCase):
|
|||||||
# Set a valid server name
|
# Set a valid server name
|
||||||
server_name = self.get_unused_server_name()
|
server_name = self.get_unused_server_name()
|
||||||
server['name'] = server_name
|
server['name'] = server_name
|
||||||
|
|
||||||
return server
|
return server
|
||||||
|
|||||||
@@ -29,10 +29,10 @@ LOG = logging.getLogger('nova.tests.integrated')
|
|||||||
|
|
||||||
class ServersTest(integrated_helpers._IntegratedTestBase):
|
class ServersTest(integrated_helpers._IntegratedTestBase):
|
||||||
|
|
||||||
def _wait_for_state_change(self, server, status):
|
def _wait_for_state_change(self, server, from_status):
|
||||||
for i in xrange(0, 50):
|
for i in xrange(0, 50):
|
||||||
server = self.api.get_server(server['id'])
|
server = self.api.get_server(server['id'])
|
||||||
if server['status'] != status:
|
if server['status'] != from_status:
|
||||||
break
|
break
|
||||||
time.sleep(.1)
|
time.sleep(.1)
|
||||||
|
|
||||||
@@ -129,7 +129,6 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
|
|||||||
self.assertTrue(created_server_id in server_ids)
|
self.assertTrue(created_server_id in server_ids)
|
||||||
|
|
||||||
found_server = self._wait_for_state_change(found_server, 'BUILD')
|
found_server = self._wait_for_state_change(found_server, 'BUILD')
|
||||||
|
|
||||||
# It should be available...
|
# It should be available...
|
||||||
# TODO(justinsb): Mock doesn't yet do this...
|
# TODO(justinsb): Mock doesn't yet do this...
|
||||||
self.assertEqual('ACTIVE', found_server['status'])
|
self.assertEqual('ACTIVE', found_server['status'])
|
||||||
|
|||||||
@@ -113,6 +113,7 @@ class BaseTestCase(test.TestCase):
|
|||||||
notification_driver='nova.notifier.test_notifier',
|
notification_driver='nova.notifier.test_notifier',
|
||||||
network_manager='nova.network.manager.FlatManager')
|
network_manager='nova.network.manager.FlatManager')
|
||||||
self.compute = utils.import_object(FLAGS.compute_manager)
|
self.compute = utils.import_object(FLAGS.compute_manager)
|
||||||
|
|
||||||
self.user_id = 'fake'
|
self.user_id = 'fake'
|
||||||
self.project_id = 'fake'
|
self.project_id = 'fake'
|
||||||
self.context = context.RequestContext(self.user_id,
|
self.context = context.RequestContext(self.user_id,
|
||||||
@@ -463,6 +464,12 @@ class ComputeTestCase(BaseTestCase):
|
|||||||
|
|
||||||
def test_rebuild(self):
|
def test_rebuild(self):
|
||||||
"""Ensure instance can be rebuilt"""
|
"""Ensure instance can be rebuilt"""
|
||||||
|
def fake_get_nw_info(cls, ctxt, instance):
|
||||||
|
return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
|
||||||
|
spectacular=True)
|
||||||
|
|
||||||
|
self.stubs.Set(nova.network.API, 'get_instance_nw_info',
|
||||||
|
fake_get_nw_info)
|
||||||
instance = self._create_fake_instance()
|
instance = self._create_fake_instance()
|
||||||
instance_uuid = instance['uuid']
|
instance_uuid = instance['uuid']
|
||||||
|
|
||||||
@@ -878,6 +885,12 @@ class ComputeTestCase(BaseTestCase):
|
|||||||
instance = self._create_fake_instance()
|
instance = self._create_fake_instance()
|
||||||
instance_uuid = instance['uuid']
|
instance_uuid = instance['uuid']
|
||||||
|
|
||||||
|
def fake_get_nw_info(cls, ctxt, instance):
|
||||||
|
return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
|
||||||
|
spectacular=True)
|
||||||
|
|
||||||
|
self.stubs.Set(nova.network.API, 'get_instance_nw_info',
|
||||||
|
fake_get_nw_info)
|
||||||
self.mox.StubOutWithMock(self.compute.network_api,
|
self.mox.StubOutWithMock(self.compute.network_api,
|
||||||
"allocate_for_instance")
|
"allocate_for_instance")
|
||||||
self.compute.network_api.allocate_for_instance(mox.IgnoreArg(),
|
self.compute.network_api.allocate_for_instance(mox.IgnoreArg(),
|
||||||
@@ -1002,6 +1015,13 @@ class ComputeTestCase(BaseTestCase):
|
|||||||
|
|
||||||
def test_resize_instance_notification(self):
|
def test_resize_instance_notification(self):
|
||||||
"""Ensure notifications on instance migrate/resize"""
|
"""Ensure notifications on instance migrate/resize"""
|
||||||
|
def fake_get_nw_info(cls, ctxt, instance):
|
||||||
|
return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
|
||||||
|
spectacular=True)
|
||||||
|
|
||||||
|
self.stubs.Set(nova.network.API, 'get_instance_nw_info',
|
||||||
|
fake_get_nw_info)
|
||||||
|
|
||||||
instance = self._create_fake_instance()
|
instance = self._create_fake_instance()
|
||||||
instance_uuid = instance['uuid']
|
instance_uuid = instance['uuid']
|
||||||
context = self.context.elevated()
|
context = self.context.elevated()
|
||||||
@@ -1212,6 +1232,14 @@ class ComputeTestCase(BaseTestCase):
|
|||||||
|
|
||||||
def test_pre_live_migration_works_correctly(self):
|
def test_pre_live_migration_works_correctly(self):
|
||||||
"""Confirm setup_compute_volume is called when volume is mounted."""
|
"""Confirm setup_compute_volume is called when volume is mounted."""
|
||||||
|
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
|
||||||
|
spectacular=True)
|
||||||
|
|
||||||
|
def stupid(*args, **kwargs):
|
||||||
|
return fake_network.fake_get_instance_nw_info(self.stubs,
|
||||||
|
spectacular=True)
|
||||||
|
self.stubs.Set(nova.compute.manager.ComputeManager,
|
||||||
|
'_get_instance_nw_info', stupid)
|
||||||
# creating instance testdata
|
# creating instance testdata
|
||||||
inst_ref = self._create_fake_instance({'host': 'dummy'})
|
inst_ref = self._create_fake_instance({'host': 'dummy'})
|
||||||
c = context.get_admin_context()
|
c = context.get_admin_context()
|
||||||
@@ -1220,16 +1248,13 @@ class ComputeTestCase(BaseTestCase):
|
|||||||
# creating mocks
|
# creating mocks
|
||||||
self.mox.StubOutWithMock(self.compute.driver, 'pre_live_migration')
|
self.mox.StubOutWithMock(self.compute.driver, 'pre_live_migration')
|
||||||
self.compute.driver.pre_live_migration({'block_device_mapping': []})
|
self.compute.driver.pre_live_migration({'block_device_mapping': []})
|
||||||
dummy_nw_info = [[None, {'ips':'1.1.1.1'}]]
|
nw_info = fake_network.fake_get_instance_nw_info(self.stubs)
|
||||||
self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
|
|
||||||
self.compute._get_instance_nw_info(c, mox.IsA(inst_ref)
|
|
||||||
).AndReturn(dummy_nw_info)
|
|
||||||
self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs')
|
self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs')
|
||||||
self.compute.driver.plug_vifs(mox.IsA(inst_ref), dummy_nw_info)
|
self.compute.driver.plug_vifs(mox.IsA(inst_ref), nw_info)
|
||||||
self.mox.StubOutWithMock(self.compute.driver,
|
self.mox.StubOutWithMock(self.compute.driver,
|
||||||
'ensure_filtering_rules_for_instance')
|
'ensure_filtering_rules_for_instance')
|
||||||
self.compute.driver.ensure_filtering_rules_for_instance(
|
self.compute.driver.ensure_filtering_rules_for_instance(
|
||||||
mox.IsA(inst_ref), dummy_nw_info)
|
mox.IsA(inst_ref), nw_info)
|
||||||
|
|
||||||
# start test
|
# start test
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
@@ -2239,11 +2264,10 @@ class ComputeAPITestCase(BaseTestCase):
|
|||||||
fixed_address):
|
fixed_address):
|
||||||
called['associate'] = True
|
called['associate'] = True
|
||||||
|
|
||||||
nw_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
|
|
||||||
|
|
||||||
def fake_get_nw_info(cls, ctxt, instance):
|
def fake_get_nw_info(cls, ctxt, instance):
|
||||||
self.assertTrue(ctxt.is_admin)
|
self.assertTrue(ctxt.is_admin)
|
||||||
return nw_info
|
return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
|
||||||
|
spectacular=True)
|
||||||
|
|
||||||
self.stubs.Set(nova.network.API, 'associate_floating_ip',
|
self.stubs.Set(nova.network.API, 'associate_floating_ip',
|
||||||
fake_associate_ip_network_api)
|
fake_associate_ip_network_api)
|
||||||
@@ -2968,7 +2992,14 @@ class ComputeAPITestCase(BaseTestCase):
|
|||||||
self.assertTrue(self.compute_api.get_lock(self.context, instance))
|
self.assertTrue(self.compute_api.get_lock(self.context, instance))
|
||||||
|
|
||||||
def test_add_remove_security_group(self):
|
def test_add_remove_security_group(self):
|
||||||
|
def fake_get_nw_info(cls, ctxt, instance):
|
||||||
|
return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
|
||||||
|
spectacular=True)
|
||||||
|
|
||||||
|
self.stubs.Set(nova.network.API, 'get_instance_nw_info',
|
||||||
|
fake_get_nw_info)
|
||||||
instance = self._create_fake_instance()
|
instance = self._create_fake_instance()
|
||||||
|
|
||||||
self.compute.run_instance(self.context, instance['uuid'])
|
self.compute.run_instance(self.context, instance['uuid'])
|
||||||
instance = self.compute_api.get(self.context, instance['uuid'])
|
instance = self.compute_api.get(self.context, instance['uuid'])
|
||||||
security_group_name = self._create_group()['name']
|
security_group_name = self._create_group()['name']
|
||||||
|
|||||||
@@ -61,12 +61,6 @@ class MetadataTestCase(test.TestCase):
|
|||||||
'root_device_name': '/dev/sda1',
|
'root_device_name': '/dev/sda1',
|
||||||
'hostname': 'test'})
|
'hostname': 'test'})
|
||||||
|
|
||||||
def fake_get_instance_nw_info(self, context, instance):
|
|
||||||
return [(None, {'label': 'public',
|
|
||||||
'ips': [{'ip': '192.168.0.3'},
|
|
||||||
{'ip': '192.168.0.4'}],
|
|
||||||
'ip6s': [{'ip': 'fe80::beef'}]})]
|
|
||||||
|
|
||||||
def fake_get_floating_ips_by_fixed_address(self, context, fixed_ip):
|
def fake_get_floating_ips_by_fixed_address(self, context, fixed_ip):
|
||||||
return ['1.2.3.4', '5.6.7.8']
|
return ['1.2.3.4', '5.6.7.8']
|
||||||
|
|
||||||
@@ -76,8 +70,8 @@ class MetadataTestCase(test.TestCase):
|
|||||||
def instance_get_list(*args, **kwargs):
|
def instance_get_list(*args, **kwargs):
|
||||||
return [self.instance]
|
return [self.instance]
|
||||||
|
|
||||||
self.stubs.Set(network.API, 'get_instance_nw_info',
|
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
|
||||||
fake_get_instance_nw_info)
|
spectacular=True)
|
||||||
self.stubs.Set(network.API, 'get_floating_ips_by_fixed_address',
|
self.stubs.Set(network.API, 'get_floating_ips_by_fixed_address',
|
||||||
fake_get_floating_ips_by_fixed_address)
|
fake_get_floating_ips_by_fixed_address)
|
||||||
self.stubs.Set(api, 'instance_get', instance_get)
|
self.stubs.Set(api, 'instance_get', instance_get)
|
||||||
|
|||||||
@@ -106,16 +106,16 @@ class SubnetTests(test.TestCase):
|
|||||||
|
|
||||||
route1 = fake_network_cache_model.new_route()
|
route1 = fake_network_cache_model.new_route()
|
||||||
|
|
||||||
self.assertEqual(subnet['cidr'], '255.255.255.0')
|
self.assertEqual(subnet['cidr'], '10.10.0.0/24')
|
||||||
self.assertEqual(subnet['dns'],
|
self.assertEqual(subnet['dns'],
|
||||||
[fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
|
[fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
|
||||||
fake_network_cache_model.new_ip(dict(address='2.3.4.5'))])
|
fake_network_cache_model.new_ip(dict(address='2.3.4.5'))])
|
||||||
self.assertEqual(subnet['gateway']['address'], '192.168.1.1')
|
self.assertEqual(subnet['gateway']['address'], '10.10.0.1')
|
||||||
self.assertEqual(subnet['ips'],
|
self.assertEqual(subnet['ips'],
|
||||||
[fake_network_cache_model.new_ip(
|
[fake_network_cache_model.new_ip(
|
||||||
dict(address='192.168.1.100')),
|
dict(address='10.10.0.2')),
|
||||||
fake_network_cache_model.new_ip(
|
fake_network_cache_model.new_ip(
|
||||||
dict(address='192.168.1.101'))])
|
dict(address='10.10.0.3'))])
|
||||||
self.assertEqual(subnet['routes'], [route1])
|
self.assertEqual(subnet['routes'], [route1])
|
||||||
self.assertEqual(subnet['version'], 4)
|
self.assertEqual(subnet['version'], 4)
|
||||||
|
|
||||||
@@ -159,9 +159,9 @@ class SubnetTests(test.TestCase):
|
|||||||
dict(address='192.168.1.102')))
|
dict(address='192.168.1.102')))
|
||||||
self.assertEqual(subnet['ips'],
|
self.assertEqual(subnet['ips'],
|
||||||
[fake_network_cache_model.new_ip(
|
[fake_network_cache_model.new_ip(
|
||||||
dict(address='192.168.1.100')),
|
dict(address='10.10.0.2')),
|
||||||
fake_network_cache_model.new_ip(
|
fake_network_cache_model.new_ip(
|
||||||
dict(address='192.168.1.101')),
|
dict(address='10.10.0.3')),
|
||||||
fake_network_cache_model.new_ip(
|
fake_network_cache_model.new_ip(
|
||||||
dict(address='192.168.1.102'))])
|
dict(address='192.168.1.102'))])
|
||||||
|
|
||||||
@@ -172,9 +172,9 @@ class SubnetTests(test.TestCase):
|
|||||||
dict(address='192.168.1.102')))
|
dict(address='192.168.1.102')))
|
||||||
self.assertEqual(subnet['ips'],
|
self.assertEqual(subnet['ips'],
|
||||||
[fake_network_cache_model.new_ip(
|
[fake_network_cache_model.new_ip(
|
||||||
dict(address='192.168.1.100')),
|
dict(address='10.10.0.2')),
|
||||||
fake_network_cache_model.new_ip(
|
fake_network_cache_model.new_ip(
|
||||||
dict(address='192.168.1.101')),
|
dict(address='10.10.0.3')),
|
||||||
fake_network_cache_model.new_ip(
|
fake_network_cache_model.new_ip(
|
||||||
dict(address='192.168.1.102'))])
|
dict(address='192.168.1.102'))])
|
||||||
|
|
||||||
@@ -262,9 +262,9 @@ class VIFTests(test.TestCase):
|
|||||||
def test_vif_get_fixed_ips(self):
|
def test_vif_get_fixed_ips(self):
|
||||||
vif = fake_network_cache_model.new_vif()
|
vif = fake_network_cache_model.new_vif()
|
||||||
fixed_ips = vif.fixed_ips()
|
fixed_ips = vif.fixed_ips()
|
||||||
ips = [fake_network_cache_model.new_ip(dict(address='192.168.1.100')),
|
ips = [fake_network_cache_model.new_ip(dict(address='10.10.0.2')),
|
||||||
fake_network_cache_model.new_ip(
|
fake_network_cache_model.new_ip(
|
||||||
dict(address='192.168.1.101'))] * 2
|
dict(address='10.10.0.3'))] * 2
|
||||||
self.assertEqual(fixed_ips, ips)
|
self.assertEqual(fixed_ips, ips)
|
||||||
|
|
||||||
def test_vif_get_floating_ips(self):
|
def test_vif_get_floating_ips(self):
|
||||||
@@ -279,9 +279,9 @@ class VIFTests(test.TestCase):
|
|||||||
ip_dict = {
|
ip_dict = {
|
||||||
'network_id': 1,
|
'network_id': 1,
|
||||||
'ips': [fake_network_cache_model.new_ip(
|
'ips': [fake_network_cache_model.new_ip(
|
||||||
{'address': '192.168.1.100'}),
|
{'address': '10.10.0.2'}),
|
||||||
fake_network_cache_model.new_ip(
|
fake_network_cache_model.new_ip(
|
||||||
{'address': '192.168.1.101'})] * 2,
|
{'address': '10.10.0.3'})] * 2,
|
||||||
'network_label': 'public'}
|
'network_label': 'public'}
|
||||||
self.assertEqual(labeled_ips, ip_dict)
|
self.assertEqual(labeled_ips, ip_dict)
|
||||||
|
|
||||||
@@ -303,9 +303,9 @@ class NetworkInfoTests(test.TestCase):
|
|||||||
fake_network_cache_model.new_vif(
|
fake_network_cache_model.new_vif(
|
||||||
{'address':'bb:bb:bb:bb:bb:bb'})])
|
{'address':'bb:bb:bb:bb:bb:bb'})])
|
||||||
self.assertEqual(ninfo.fixed_ips(),
|
self.assertEqual(ninfo.fixed_ips(),
|
||||||
[fake_network_cache_model.new_ip({'address': '192.168.1.100'}),
|
[fake_network_cache_model.new_ip({'address': '10.10.0.2'}),
|
||||||
fake_network_cache_model.new_ip(
|
fake_network_cache_model.new_ip(
|
||||||
{'address': '192.168.1.101'})] * 4)
|
{'address': '10.10.0.3'})] * 4)
|
||||||
|
|
||||||
def test_get_floating_ips(self):
|
def test_get_floating_ips(self):
|
||||||
vif = fake_network_cache_model.new_vif()
|
vif = fake_network_cache_model.new_vif()
|
||||||
@@ -321,6 +321,6 @@ class NetworkInfoTests(test.TestCase):
|
|||||||
{'address':'bb:bb:bb:bb:bb:bb'})])
|
{'address':'bb:bb:bb:bb:bb:bb'})])
|
||||||
deserialized = model.NetworkInfo.hydrate(ninfo)
|
deserialized = model.NetworkInfo.hydrate(ninfo)
|
||||||
self.assertEqual(ninfo.fixed_ips(),
|
self.assertEqual(ninfo.fixed_ips(),
|
||||||
[fake_network_cache_model.new_ip({'address': '192.168.1.100'}),
|
[fake_network_cache_model.new_ip({'address': '10.10.0.2'}),
|
||||||
fake_network_cache_model.new_ip(
|
fake_network_cache_model.new_ip(
|
||||||
{'address': '192.168.1.101'})] * 4)
|
{'address': '10.10.0.3'})] * 4)
|
||||||
|
|||||||
@@ -275,32 +275,42 @@ class QuantumNovaIPAMTestCase(QuantumNovaTestCase):
|
|||||||
self.net_man.driver.update_dhcp_hostfile_with_text = func
|
self.net_man.driver.update_dhcp_hostfile_with_text = func
|
||||||
self.net_man.driver.restart_dhcp = func2
|
self.net_man.driver.restart_dhcp = func2
|
||||||
self.net_man.driver.kill_dhcp = func1
|
self.net_man.driver.kill_dhcp = func1
|
||||||
nw_info = self.net_man.allocate_for_instance(ctx,
|
nw_info = self.net_man.allocate_for_instance(ctx.elevated(),
|
||||||
instance_id=instance_ref['id'], host="",
|
instance_id=instance_ref['id'], host="",
|
||||||
instance_type_id=instance_ref['instance_type_id'],
|
instance_type_id=instance_ref['instance_type_id'],
|
||||||
project_id=project_id)
|
project_id=project_id)
|
||||||
|
|
||||||
self.assertEquals(len(nw_info), 2)
|
self.assertEquals(len(nw_info), 2)
|
||||||
|
|
||||||
|
cidrs = ['10.', '192.']
|
||||||
|
addrs = ['10.', '192.']
|
||||||
|
cidrs_v6 = ['2001:1dba:', '2001:1db8:']
|
||||||
|
addrs_v6 = ['2001:1dba:', '2001:1db8:']
|
||||||
|
|
||||||
|
def check_for_startswith(choices, choice):
|
||||||
|
for v in choices:
|
||||||
|
if choice.startswith(v):
|
||||||
|
choices.remove(v)
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
# we don't know which order the NICs will be in until we
|
# we don't know which order the NICs will be in until we
|
||||||
# introduce the notion of priority
|
# introduce the notion of priority
|
||||||
# v4 cidr
|
for vif in nw_info:
|
||||||
self.assertTrue(nw_info[0][0]['cidr'].startswith("10."))
|
for subnet in vif['network']['subnets']:
|
||||||
self.assertTrue(nw_info[1][0]['cidr'].startswith("192."))
|
cidr = subnet['cidr'].lower()
|
||||||
|
if subnet['version'] == 4:
|
||||||
# v4 address
|
# v4 cidr
|
||||||
self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("10."))
|
self.assertTrue(check_for_startswith(cidrs, cidr))
|
||||||
self.assertTrue(nw_info[1][1]['ips'][0]['ip'].startswith("192."))
|
# v4 address
|
||||||
|
address = subnet['ips'][0]['address']
|
||||||
# v6 cidr
|
self.assertTrue(check_for_startswith(addrs, address))
|
||||||
self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1dba:"))
|
else:
|
||||||
self.assertTrue(nw_info[1][0]['cidr_v6'].startswith("2001:1db8:"))
|
# v6 cidr
|
||||||
|
self.assertTrue(check_for_startswith(cidrs_v6, cidr))
|
||||||
# v6 address
|
# v6 address
|
||||||
self.assertTrue(
|
address = subnet['ips'][0]['address']
|
||||||
nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1dba:"))
|
self.assertTrue(check_for_startswith(addrs_v6, address))
|
||||||
self.assertTrue(
|
|
||||||
nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1db8:"))
|
|
||||||
|
|
||||||
self.net_man.deallocate_for_instance(ctx,
|
self.net_man.deallocate_for_instance(ctx,
|
||||||
instance_id=instance_ref['id'],
|
instance_id=instance_ref['id'],
|
||||||
@@ -342,33 +352,34 @@ class QuantumNovaIPAMTestCase(QuantumNovaTestCase):
|
|||||||
|
|
||||||
self.assertEquals(len(nw_info), 2)
|
self.assertEquals(len(nw_info), 2)
|
||||||
|
|
||||||
|
cidrs = ['9.', '192.']
|
||||||
|
addrs = ['9.', '192.']
|
||||||
|
cidrs_v6 = ['2001:1dbb:', '2001:1db9:']
|
||||||
|
addrs_v6 = ['2001:1dbb:', '2001:1db9:']
|
||||||
|
|
||||||
|
def check_for_startswith(choices, choice):
|
||||||
|
for v in choices:
|
||||||
|
if choice.startswith(v):
|
||||||
|
choices.remove(v)
|
||||||
|
return True
|
||||||
|
|
||||||
# we don't know which order the NICs will be in until we
|
# we don't know which order the NICs will be in until we
|
||||||
# introduce the notion of priority
|
# introduce the notion of priority
|
||||||
# v4 cidr
|
for vif in nw_info:
|
||||||
self.assertTrue(nw_info[0][0]['cidr'].startswith("9.") or
|
for subnet in vif['network']['subnets']:
|
||||||
nw_info[1][0]['cidr'].startswith("9."))
|
cidr = subnet['cidr'].lower()
|
||||||
self.assertTrue(nw_info[0][0]['cidr'].startswith("192.") or
|
if subnet['version'] == 4:
|
||||||
nw_info[1][0]['cidr'].startswith("192."))
|
# v4 cidr
|
||||||
|
self.assertTrue(check_for_startswith(cidrs, cidr))
|
||||||
# v4 address
|
# v4 address
|
||||||
self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("9.") or
|
address = subnet['ips'][0]['address']
|
||||||
nw_info[1][1]['ips'][0]['ip'].startswith("9."))
|
self.assertTrue(check_for_startswith(addrs, address))
|
||||||
self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("192.") or
|
else:
|
||||||
nw_info[1][1]['ips'][0]['ip'].startswith("192."))
|
# v6 cidr
|
||||||
|
self.assertTrue(check_for_startswith(cidrs_v6, cidr))
|
||||||
# v6 cidr
|
# v6 address
|
||||||
self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1dbb:") or
|
address = subnet['ips'][0]['address']
|
||||||
nw_info[1][0]['cidr_v6'].startswith("2001:1dbb:"))
|
self.assertTrue(check_for_startswith(addrs_v6, address))
|
||||||
self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1db9:") or
|
|
||||||
nw_info[1][0]['cidr_v6'].startswith("2001:1db9:"))
|
|
||||||
|
|
||||||
# v6 address
|
|
||||||
self.assertTrue(
|
|
||||||
nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1dbb:") or
|
|
||||||
nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1dbb:"))
|
|
||||||
self.assertTrue(
|
|
||||||
nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1db9:") or
|
|
||||||
nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1db9:"))
|
|
||||||
|
|
||||||
self.net_man.deallocate_for_instance(ctx,
|
self.net_man.deallocate_for_instance(ctx,
|
||||||
instance_id=instance_ref['id'],
|
instance_id=instance_ref['id'],
|
||||||
@@ -402,7 +413,7 @@ class QuantumNovaMACGenerationTestCase(QuantumNovaTestCase):
|
|||||||
instance_type_id=instance_ref['instance_type_id'],
|
instance_type_id=instance_ref['instance_type_id'],
|
||||||
project_id=project_id,
|
project_id=project_id,
|
||||||
requested_networks=requested_networks)
|
requested_networks=requested_networks)
|
||||||
self.assertEqual(nw_info[0][1]['mac'], fake_mac)
|
self.assertEqual(nw_info[0]['address'], fake_mac)
|
||||||
|
|
||||||
def test_melange_mac_address_creation(self):
|
def test_melange_mac_address_creation(self):
|
||||||
self.flags(use_melange_mac_generation=True)
|
self.flags(use_melange_mac_generation=True)
|
||||||
@@ -423,7 +434,7 @@ class QuantumNovaMACGenerationTestCase(QuantumNovaTestCase):
|
|||||||
instance_type_id=instance_ref['instance_type_id'],
|
instance_type_id=instance_ref['instance_type_id'],
|
||||||
project_id=project_id,
|
project_id=project_id,
|
||||||
requested_networks=requested_networks)
|
requested_networks=requested_networks)
|
||||||
self.assertEqual(nw_info[0][1]['mac'], fake_mac)
|
self.assertEqual(nw_info[0]['address'], fake_mac)
|
||||||
|
|
||||||
|
|
||||||
class QuantumNovaPortSecurityTestCase(QuantumNovaTestCase):
|
class QuantumNovaPortSecurityTestCase(QuantumNovaTestCase):
|
||||||
@@ -460,7 +471,7 @@ class QuantumNovaPortSecurityTestCase(QuantumNovaTestCase):
|
|||||||
instance_type_id=instance_ref['instance_type_id'],
|
instance_type_id=instance_ref['instance_type_id'],
|
||||||
project_id=project_id,
|
project_id=project_id,
|
||||||
requested_networks=requested_networks)
|
requested_networks=requested_networks)
|
||||||
self.assertEqual(nw_info[0][1]['mac'], fake_mac)
|
self.assertEqual(nw_info[0]['address'], fake_mac)
|
||||||
|
|
||||||
def test_port_securty_negative(self):
|
def test_port_securty_negative(self):
|
||||||
self.flags(use_melange_mac_generation=True)
|
self.flags(use_melange_mac_generation=True)
|
||||||
@@ -494,4 +505,4 @@ class QuantumNovaPortSecurityTestCase(QuantumNovaTestCase):
|
|||||||
instance_type_id=instance_ref['instance_type_id'],
|
instance_type_id=instance_ref['instance_type_id'],
|
||||||
project_id=project_id,
|
project_id=project_id,
|
||||||
requested_networks=requested_networks)
|
requested_networks=requested_networks)
|
||||||
self.assertEqual(nw_info[0][1]['mac'], fake_mac)
|
self.assertEqual(nw_info[0]['address'], fake_mac)
|
||||||
|
|||||||
@@ -414,13 +414,8 @@ def usage_from_instance(instance_ref, network_info=None, **kw):
|
|||||||
state_description=instance_ref['task_state'] \
|
state_description=instance_ref['task_state'] \
|
||||||
if instance_ref['task_state'] else '')
|
if instance_ref['task_state'] else '')
|
||||||
|
|
||||||
# NOTE(jkoelker) This nastyness can go away once compute uses the
|
|
||||||
# network model
|
|
||||||
if network_info is not None:
|
if network_info is not None:
|
||||||
fixed_ips = []
|
usage_info['fixed_ips'] = network_info.fixed_ips()
|
||||||
for network, info in network_info:
|
|
||||||
fixed_ips.extend([ip['ip'] for ip in info['ips']])
|
|
||||||
usage_info['fixed_ips'] = fixed_ips
|
|
||||||
|
|
||||||
usage_info.update(kw)
|
usage_info.update(kw)
|
||||||
return usage_info
|
return usage_info
|
||||||
|
|||||||
@@ -605,3 +605,10 @@ class ComputeDriver(object):
|
|||||||
Note that this function takes an instance ID.
|
Note that this function takes an instance ID.
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def legacy_nwinfo(self):
|
||||||
|
"""
|
||||||
|
Indicate if the driver requires the legacy network_info format.
|
||||||
|
"""
|
||||||
|
# TODO(tr3buchet): update all subclasses and remove this
|
||||||
|
return True
|
||||||
|
|||||||
@@ -34,7 +34,6 @@ xenapi_ovs_integration_bridge_opt = \
|
|||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
FLAGS.add_option(xenapi_ovs_integration_bridge_opt)
|
FLAGS.add_option(xenapi_ovs_integration_bridge_opt)
|
||||||
|
|
||||||
LOG = logging.getLogger("nova.virt.xenapi.vif")
|
LOG = logging.getLogger("nova.virt.xenapi.vif")
|
||||||
|
|
||||||
|
|
||||||
@@ -145,7 +144,7 @@ class XenAPIOpenVswitchDriver(XenVIFDriver):
|
|||||||
# with OVS model, always plug into an OVS integration bridge
|
# with OVS model, always plug into an OVS integration bridge
|
||||||
# that is already created
|
# that is already created
|
||||||
network_ref = NetworkHelper.find_network_with_bridge(self._session,
|
network_ref = NetworkHelper.find_network_with_bridge(self._session,
|
||||||
FLAGS.xenapi_ovs_integration_bridge)
|
FLAGS.xenapi_ovs_integration_bridge)
|
||||||
vif_rec = {}
|
vif_rec = {}
|
||||||
vif_rec['device'] = str(device)
|
vif_rec['device'] = str(device)
|
||||||
vif_rec['network'] = network_ref
|
vif_rec['network'] = network_ref
|
||||||
|
|||||||
Reference in New Issue
Block a user