Port pci request from nova to zun

Change-Id: Iae65d7278e7f2f7c484a906f0101f1fbf0fdfd48
Depends-On: Ie9f278afb1af68379f8f641eba2e97dd582697cc
Partially-Implements: blueprint support-pcipassthroughfilter
This commit is contained in:
ShunliZhou 2017-10-16 16:30:20 +08:00 committed by Hongbin Lu
parent f44af3de86
commit 34b132eaad
18 changed files with 451 additions and 85 deletions

View File

@ -33,8 +33,10 @@ from zun.common import policy
from zun.common import utils
from zun.common import validation
import zun.conf
from zun.network import model as network_model
from zun.network import neutron
from zun import objects
from zun.pci import request as pci_request
from zun.volume import cinder_api as cinder
CONF = zun.conf.CONF
@ -242,6 +244,8 @@ class ContainersController(base.Controller):
nets = container_dict.get('nets', [])
requested_networks = self._build_requested_networks(context, nets)
pci_req = self._create_pci_requests_for_sriov_ports(context,
requested_networks)
mounts = container_dict.pop('mounts', [])
if mounts:
@ -288,6 +292,8 @@ class ContainersController(base.Controller):
kwargs['extra_spec'] = extra_spec
kwargs['requested_networks'] = requested_networks
kwargs['requested_volumes'] = requested_volumes
if pci_req.requests:
kwargs['pci_requests'] = pci_req
kwargs['run'] = run
compute_api.container_create(context, new_container, **kwargs)
# Set the HTTP Location Header
@ -296,6 +302,81 @@ class ContainersController(base.Controller):
pecan.response.status = 202
return view.format_container(pecan.request.host_url, new_container)
def _create_pci_requests_for_sriov_ports(self, context,
requested_networks):
pci_requests = objects.ContainerPCIRequests(requests=[])
if not requested_networks:
return pci_requests
neutron_api = neutron.NeutronAPI(context)
for request_net in requested_networks:
phynet_name = None
vnic_type = network_model.VNIC_TYPE_NORMAL
if request_net.get('port'):
vnic_type, phynet_name = self._get_port_vnic_info(
context, neutron_api, request_net['port'])
pci_request_id = None
if vnic_type in network_model.VNIC_TYPES_SRIOV:
spec = {pci_request.PCI_NET_TAG: phynet_name}
dev_type = pci_request.DEVICE_TYPE_FOR_VNIC_TYPE.get(vnic_type)
if dev_type:
spec[pci_request.PCI_DEVICE_TYPE_TAG] = dev_type
request = objects.ContainerPCIRequest(
count=1,
spec=[spec],
request_id=uuidutils.generate_uuid())
pci_requests.requests.append(request)
pci_request_id = request.request_id
request_net['pci_request_id'] = pci_request_id
return pci_requests
def _get_port_vnic_info(self, context, neutron, port_id):
"""Retrieve port vnic info
Invoked with a valid port_id.
Return vnic type and the attached physical network name.
"""
phynet_name = None
port = self._show_port(context, port_id, neutron_client=neutron,
fields=['binding:vnic_type', 'network_id'])
vnic_type = port.get('binding:vnic_type',
network_model.VNIC_TYPE_NORMAL)
if vnic_type in network_model.VNIC_TYPES_SRIOV:
net_id = port['network_id']
phynet_name = self._get_phynet_info(context, net_id)
return vnic_type, phynet_name
def _show_port(self, context, port_id, neutron_client=None, fields=None):
"""Return the port for the client given the port id.
:param context: Request context.
:param port_id: The id of port to be queried.
:param neutron_client: A neutron client.
:param fields: The condition fields to query port data.
:returns: A dict of port data.
e.g. {'port_id': 'abcd', 'fixed_ip_address': '1.2.3.4'}
"""
if not neutron_client:
neutron_client = neutron.NeutronAPI(context)
if fields:
result = neutron_client.show_port(port_id, fields=fields)
else:
result = neutron_client.show_port(port_id)
return result.get('port')
def _get_phynet_info(self, context, net_id):
phynet_name = None
# NOTE(hongbin): Use admin context here because non-admin users are
# unable to retrieve provider:* attributes.
admin_context = context.elevated()
neutron_api = neutron.NeutronAPI(admin_context)
network = neutron_api.show_network(
net_id, fields='provider:physical_network')
net = network.get('network')
phynet_name = net.get('provider:physical_network')
return phynet_name
def _check_external_network_attach(self, context, nets):
"""Check if attaching to external network is permitted."""
if not context.can(NETWORK_ATTACH_EXTERNAL,

View File

@ -29,7 +29,8 @@ class API(object):
super(API, self).__init__()
def container_create(self, context, new_container, extra_spec,
requested_networks, requested_volumes, run):
requested_networks, requested_volumes, run,
pci_requests=None):
host_state = None
try:
host_state = self._schedule_container(context, new_container,
@ -43,7 +44,7 @@ class API(object):
self.rpcapi.container_create(context, host_state['host'],
new_container, host_state['limits'],
requested_networks, requested_volumes,
run)
run, pci_requests)
def _schedule_container(self, context, new_container, extra_spec):
dests = self.scheduler_client.select_destinations(context,

View File

@ -66,13 +66,15 @@ class Claim(NopClaim):
correct decisions with respect to host selection.
"""
def __init__(self, context, container, tracker, resources, limits=None):
def __init__(self, context, container, tracker, resources, pci_requests,
limits=None):
super(Claim, self).__init__()
# Stash a copy of the container at the current point of time
self.container = container.obj_clone()
self._numa_topology_loaded = False
self.tracker = tracker
self.context = context
self._pci_requests = pci_requests
# Check claim at constructor to avoid mess code
# Raise exception ComputeResourcesUnavailable if claim failed
@ -92,7 +94,7 @@ class Claim(NopClaim):
def abort(self):
"""Requiring claimed resources has failed or been aborted."""
LOG.debug("Aborting claim: %s", self)
self.tracker.abort_container_claim(self.container)
self.tracker.abort_container_claim(self.context, self.container)
def _claim_test(self, resources, limits=None):
"""Test if this claim can be satisfied.
@ -118,7 +120,8 @@ class Claim(NopClaim):
{'memory': self.memory, 'cpu': self.cpu})
reasons = [self._test_memory(resources, memory_limit),
self._test_cpu(resources, cpu_limit)]
self._test_cpu(resources, cpu_limit),
self._test_pci()]
# TODO(Shunli): test numa here
reasons = [r for r in reasons if r is not None]
if len(reasons) > 0:
@ -126,6 +129,13 @@ class Claim(NopClaim):
LOG.info('Claim successful')
def _test_pci(self):
pci_requests = self._pci_requests
if pci_requests and pci_requests.requests:
stats = self.tracker.pci_tracker.stats
if not stats.support_requests(pci_requests.requests):
return _('Claim pci failed')
def _test_memory(self, resources, limit):
type_ = _("memory")
unit = "MB"

View File

@ -76,7 +76,7 @@ class ComputeNodeTracker(object):
{'host': self.host})
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def container_claim(self, context, container, hostname, limits=None):
def container_claim(self, context, container, pci_requests, limits=None):
"""Indicate resources are needed for an upcoming container build.
This should be called before the compute node is about to perform
@ -85,29 +85,31 @@ class ComputeNodeTracker(object):
:param context: security context
:param container: container to reserve resources for.
:type container: zun.objects.container.Container object
:param hostname: The zun hostname selected by the scheduler
:param pci_requests: pci reqeusts for sriov port.
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs.
:returns: A Claim ticket representing the reserved resources. It can
be used to revert the resource usage if an error occurs
during the container build.
"""
# No memory and cpu specified, no need to claim resource now.
if not (container.memory or container.cpu):
# No memory, cpu, or pci_request specified, no need to claim resource
# now.
if not (container.memory or container.cpu or pci_requests):
self._set_container_host(context, container)
return claims.NopClaim()
# We should have the compute node created here, just get it.
self.compute_node = self._get_compute_node(context)
if self.disabled(hostname):
self._set_container_host(context, container)
return claims.NopClaim()
claim = claims.Claim(context, container, self, self.compute_node,
limits=limits)
pci_requests, limits=limits)
if self.pci_tracker:
self.pci_tracker.claim_container(context, container.uuid,
pci_requests)
self._set_container_host(context, container)
self._update_usage_from_container(container)
self._update_usage_from_container(context, container)
# persist changes to the compute node:
self._update(self.compute_node)
@ -125,7 +127,8 @@ class ComputeNodeTracker(object):
container.host = self.host
container.save(context)
def _update_usage_from_container(self, container, is_removed=False):
def _update_usage_from_container(self, context, container,
is_removed=False):
"""Update usage for a single container."""
uuid = container.uuid
@ -142,7 +145,9 @@ class ComputeNodeTracker(object):
sign = -1
if is_new_container or is_removed_container:
# TODO(Shunli): Handle pci, scheduler allocation here.
if self.pci_tracker:
self.pci_tracker.update_pci_for_container(context, container,
sign=sign)
# new container, update compute node resource usage:
self._update_usage(self._get_usage_dict(container), sign=sign)
@ -164,7 +169,7 @@ class ComputeNodeTracker(object):
cn.running_containers = 0
for cnt in containers:
self._update_usage_from_container(cnt)
self._update_usage_from_container(context, cnt)
cn.mem_free = max(0, cn.mem_free)
@ -250,9 +255,9 @@ class ComputeNodeTracker(object):
return usage
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def abort_container_claim(self, container):
def abort_container_claim(self, context, container):
"""Remove usage from the given container."""
self._update_usage_from_container(container, is_removed=True)
self._update_usage_from_container(context, container, is_removed=True)
self._update(self.compute_node)
@ -263,5 +268,5 @@ class ComputeNodeTracker(object):
# We need to get the latest compute node info
self.compute_node = self._get_compute_node(context)
self._update_usage_from_container(container, is_removed)
self._update_usage_from_container(context, container, is_removed)
self._update(self.compute_node)

View File

@ -63,14 +63,14 @@ class Manager(periodic_task.PeriodicTasks):
container.save(context)
def container_create(self, context, limits, requested_networks,
requested_volumes, container, run):
requested_volumes, container, run, pci_requests=None):
@utils.synchronized(container.uuid)
def do_container_create():
if not self._attach_volumes(context, container, requested_volumes):
return
created_container = self._do_container_create(
context, container, requested_networks, requested_volumes,
limits)
pci_requests, limits)
if run and created_container:
self._do_container_start(context, created_container)
@ -92,8 +92,8 @@ class Manager(periodic_task.PeriodicTasks):
container.save(context)
def _do_container_create_base(self, context, container, requested_networks,
requested_volumes,
sandbox=None, limits=None, reraise=False):
requested_volumes, sandbox=None, limits=None,
reraise=False):
self._update_task_state(context, container, consts.IMAGE_PULLING)
repo, tag = utils.parse_image_name(container.image)
image_pull_policy = utils.get_image_pull_policy(
@ -130,17 +130,13 @@ class Manager(periodic_task.PeriodicTasks):
container.image_driver = image.get('driver')
container.save(context)
try:
limits = limits
rt = self._get_resource_tracker()
if image['driver'] == 'glance':
self.driver.read_tar_image(image)
with rt.container_claim(context, container, container.host,
limits):
container = self.driver.create(context, container, image,
requested_networks,
requested_volumes)
self._update_task_state(context, container, None)
return container
container = self.driver.create(context, container, image,
requested_networks,
requested_volumes)
self._update_task_state(context, container, None)
return container
except exception.DockerError as e:
with excutils.save_and_reraise_exception(reraise=reraise):
LOG.error("Error occurred while calling Docker create API: %s",
@ -159,26 +155,37 @@ class Manager(periodic_task.PeriodicTasks):
return
def _do_container_create(self, context, container, requested_networks,
requested_volumes,
requested_volumes, pci_requests=None,
limits=None, reraise=False):
LOG.debug('Creating container: %s', container.uuid)
sandbox = None
if self.use_sandbox:
sandbox = self._create_sandbox(context, container,
requested_networks,
requested_volumes,
reraise)
if sandbox is None:
return
try:
rt = self._get_resource_tracker()
# As sriov port also need to claim, we need claim pci port before
# create sandbox.
with rt.container_claim(context, container, pci_requests, limits):
sandbox = None
if self.use_sandbox:
sandbox = self._create_sandbox(context, container,
requested_networks,
requested_volumes,
reraise)
if sandbox is None:
return
created_container = self._do_container_create_base(context,
container,
requested_networks,
requested_volumes,
sandbox, limits,
reraise)
return created_container
created_container = self._do_container_create_base(
context, container, requested_networks, requested_volumes,
sandbox, limits, reraise)
return created_container
except Exception as e:
# Other exception has handled in create sandbox and create base,
# exception occured here only can be the claim failed.
with excutils.save_and_reraise_exception(reraise=reraise):
LOG.exception("Container resource claim failed: %s",
six.text_type(e))
self._fail_container(context, container, six.text_type(e),
unset_host=True)
return
def _attach_volumes(self, context, container, volumes):
try:
@ -811,8 +818,8 @@ class Manager(periodic_task.PeriodicTasks):
self._do_container_create_base(context,
capsule.containers[k],
requested_networks,
sandbox,
limits)
sandbox=sandbox,
limits=limits)
if created_container:
self._do_container_start(context, created_container)

View File

@ -56,12 +56,14 @@ class API(rpc_service.API):
transport, context, topic=zun.conf.CONF.compute.topic)
def container_create(self, context, host, container, limits,
requested_networks, requested_volumes, run):
requested_networks, requested_volumes, run,
pci_requests):
self._cast(host, 'container_create', limits=limits,
requested_networks=requested_networks,
requested_volumes=requested_volumes,
container=container,
run=run)
run=run,
pci_requests=pci_requests)
@check_container_host
def container_delete(self, context, container, force):

View File

@ -12,6 +12,7 @@
import ipaddress
import six
import time
from neutronclient.common import exceptions
from oslo_log import log as logging
@ -22,18 +23,29 @@ from zun.common.i18n import _
import zun.conf
from zun.network import network
from zun.network import neutron
from zun.objects import fields as obj_fields
from zun.pci import manager as pci_manager
from zun.pci import utils as pci_utils
from zun.pci import whitelist as pci_whitelist
CONF = zun.conf.CONF
LOG = logging.getLogger(__name__)
BINDING_PROFILE = 'binding:profile'
BINDING_HOST_ID = 'binding:host_id'
class KuryrNetwork(network.Network):
def init(self, context, docker_api):
self.docker = docker_api
self.neutron_api = neutron.NeutronAPI(context)
self.context = context
self.pci_whitelist = pci_whitelist.Whitelist(
CONF.pci.passthrough_whitelist)
self.last_neutron_extension_sync = None
self.extensions = {}
def create_network(self, name, neutron_net_id):
"""Create a docker network with Kuryr driver.
@ -144,6 +156,24 @@ class KuryrNetwork(network.Network):
# We might revisit this behaviour later. Alternatively, we could
# either throw an exception or overwrite the port's security
# groups.
# If there is pci_request_id, it should be a sriov port.
# populate pci related info.
pci_request_id = requested_network.get('pci_request_id')
if pci_request_id:
port_req_body = {'port': {'device_id': container.uuid}}
self._populate_neutron_extension_values(container,
pci_request_id,
port_req_body)
self._populate_pci_mac_address(container,
pci_request_id,
port_req_body)
# NOTE(hongbin): Use admin context here because non-admin
# context might not be able to update some attributes
# (i.e. binding:profile).
admin_context = self.neutron_api.context.elevated()
neutron_api = neutron.NeutronAPI(admin_context)
neutron_api.update_port(neutron_port_id, port_req_body)
else:
network = self.inspect_network(network_name)
neutron_net_id = network['Options']['neutron.net.uuid']
@ -246,3 +276,89 @@ class KuryrNetwork(network.Network):
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Neutron Error:")
def _refresh_neutron_extensions_cache(self):
"""Refresh the neutron extensions cache when necessary."""
if (not self.last_neutron_extension_sync or
((time.time() - self.last_neutron_extension_sync)
>= CONF.neutron.extension_sync_interval)):
extensions_list = self.neutron_api.list_extensions()['extensions']
self.last_neutron_extension_sync = time.time()
self.extensions.clear()
self.extensions = {ext['name']: ext for ext in extensions_list}
def _has_port_binding_extension(self, refresh_cache=False):
if refresh_cache:
self._refresh_neutron_extensions_cache()
return "Port Binding" in self.extensions
def _populate_neutron_extension_values(self, container,
pci_request_id,
port_req_body):
"""Populate neutron extension values for the instance.
If the extensions loaded contain QOS_QUEUE then pass the rxtx_factor.
"""
self._refresh_neutron_extensions_cache()
has_port_binding_extension = (
self._has_port_binding_extension())
if has_port_binding_extension:
self._populate_neutron_binding_profile(container,
pci_request_id,
port_req_body)
def _populate_neutron_binding_profile(self, container, pci_request_id,
port_req_body):
"""Populate neutron binding:profile.
Populate it with SR-IOV related information
"""
if pci_request_id:
pci_dev = pci_manager.get_container_pci_devs(
container, pci_request_id).pop()
profile = self._get_pci_device_profile(pci_dev)
port_req_body['port'][BINDING_PROFILE] = profile
def _populate_pci_mac_address(self, container, pci_request_id,
port_req_body):
"""Add the updated MAC address value to the update_port request body.
Currently this is done only for PF passthrough.
"""
if pci_request_id is not None:
pci_devs = pci_manager.get_container_pci_devs(
container, pci_request_id)
if len(pci_devs) != 1:
# NOTE(ndipanov): We shouldn't ever get here since
# InstancePCIRequest instances built from network requests
# only ever index a single device, which needs to be
# successfully claimed for this to be called as part of
# allocate_networks method
LOG.error("PCI request %(pci_request_id)s does not have a "
"unique device associated with it. Unable to "
"determine MAC address",
{'pci_request_id': pci_request_id},
container=container)
return
pci_dev = pci_devs[0]
if pci_dev.dev_type == obj_fields.PciDeviceType.SRIOV_PF:
try:
mac = pci_utils.get_mac_by_pci_address(pci_dev.address)
except exception.PciDeviceNotFoundById as e:
LOG.error("Could not determine MAC address for %(addr)s, "
"error: %(e)s",
{"addr": pci_dev.address, "e": e},
container=container)
else:
port_req_body['port']['mac_address'] = mac
def _get_pci_device_profile(self, pci_dev):
dev_spec = self.pci_whitelist.get_devspec(pci_dev)
if dev_spec:
return {'pci_vendor_info': "%s:%s" % (pci_dev.vendor_id,
pci_dev.product_id),
'pci_slot': pci_dev.address,
'physical_network':
dev_spec.get_tags().get('physical_network')}
raise exception.PciDeviceNotFound(node_id=pci_dev.compute_node_uuid,
address=pci_dev.address)

36
zun/network/model.py Normal file
View File

@ -0,0 +1,36 @@
# Copyright 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Define supported virtual NIC types. VNIC_TYPE_DIRECT and VNIC_TYPE_MACVTAP
# are used for SR-IOV ports
VNIC_TYPE_NORMAL = 'normal'
VNIC_TYPE_DIRECT = 'direct'
VNIC_TYPE_MACVTAP = 'macvtap'
VNIC_TYPE_DIRECT_PHYSICAL = 'direct-physical'
VNIC_TYPE_BAREMETAL = 'baremetal'
VNIC_TYPE_VIRTIO_FORWARDER = 'virtio-forwarder'
# Define list of ports which needs pci request.
# Note: The macvtap port needs a PCI request as it is a tap interface
# with VF as the lower physical interface.
# Note: Currently, VNIC_TYPE_VIRTIO_FORWARDER assumes a 1:1
# relationship with a VF. This is expected to change in the future.
VNIC_TYPES_SRIOV = (VNIC_TYPE_DIRECT, VNIC_TYPE_MACVTAP,
VNIC_TYPE_DIRECT_PHYSICAL, VNIC_TYPE_VIRTIO_FORWARDER)
# Define list of ports which are passthrough to the guest
# and need a special treatment on snapshot and suspend/resume
VNIC_TYPES_DIRECT_PASSTHROUGH = (VNIC_TYPE_DIRECT,
VNIC_TYPE_DIRECT_PHYSICAL)

View File

@ -13,6 +13,7 @@
from zun.objects import capsule
from zun.objects import compute_node
from zun.objects import container
from zun.objects import container_pci_requests
from zun.objects import image
from zun.objects import numa
from zun.objects import pci_device
@ -34,6 +35,8 @@ ComputeNode = compute_node.ComputeNode
Capsule = capsule.Capsule
PciDevice = pci_device.PciDevice
PciDevicePool = pci_device_pool.PciDevicePool
ContainerPCIRequest = container_pci_requests.ContainerPCIRequest
ContainerPCIRequests = container_pci_requests.ContainerPCIRequests
__all__ = (
Container,
@ -48,4 +51,6 @@ __all__ = (
Capsule,
PciDevice,
PciDevicePool,
ContainerPCIRequest,
ContainerPCIRequests,
)

View File

@ -51,7 +51,8 @@ class Container(base.ZunPersistentObject, base.ZunObject):
# Version 1.20: Change runtime to String type
# Version 1.21: Add pci_device attribute
# Version 1.22: Add 'Deleting' to ContainerStatus
VERSION = '1.22'
# Version 1.23: Add the missing 'pci_devices' attribute
VERSION = '1.23'
fields = {
'id': fields.IntegerField(),
@ -84,13 +85,17 @@ class Container(base.ZunPersistentObject, base.ZunObject):
'websocket_url': fields.StringField(nullable=True),
'websocket_token': fields.StringField(nullable=True),
'security_groups': fields.ListOfStringsField(nullable=True),
'runtime': fields.StringField(nullable=True)
'runtime': fields.StringField(nullable=True),
'pci_devices': fields.ListOfObjectsField('PciDevice',
nullable=True)
}
@staticmethod
def _from_db_object(container, db_container):
"""Converts a database entity to a formal object."""
for field in container.fields:
if field in ['pci_devices']:
continue
setattr(container, field, db_container[field])
container.obj_reset_changes()

View File

@ -0,0 +1,67 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from oslo_utils import versionutils
from oslo_versionedobjects import fields
from zun.objects import base
@base.ZunObjectRegistry.register
class ContainerPCIRequest(base.ZunPersistentObject, base.ZunObject):
# Version 1.0: Add request_id
VERSION = '1.0'
fields = {
'count': fields.IntegerField(),
'spec': fields.ListOfDictOfNullableStringsField(),
'alias_name': fields.StringField(nullable=True),
# Note(moshele): is_new is deprecated and should be removed
# on major version bump
'is_new': fields.BooleanField(default=False),
'request_id': fields.UUIDField(nullable=True),
}
def obj_load_attr(self, attr):
setattr(self, attr, None)
def obj_make_compatible(self, primitive, target_version):
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 1) and 'request_id' in primitive:
del primitive['request_id']
@base.ZunObjectRegistry.register
class ContainerPCIRequests(base.ZunPersistentObject, base.ZunObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'container_uuid': fields.UUIDField(),
'requests': fields.ListOfObjectsField('ContainerPCIRequest'),
}
def to_json(self):
blob = [{'count': x.count,
'spec': x.spec,
'alias_name': x.alias_name,
'is_new': x.is_new,
'request_id': x.request_id} for x in self.requests]
return jsonutils.dumps(blob)
@classmethod
def from_request_spec_container_props(cls, pci_requests):
objs = [ContainerPCIRequest(**request)
for request in pci_requests['requests']]
return cls(requests=objs,
container_uuid=pci_requests['container_uuid'])

View File

@ -276,11 +276,11 @@ class PciDevice(base.ZunPersistentObject, base.ZunObject):
address=self.address, status=self.status,
hopestatus=ok_statuses)
if (self.status == z_fields.PciDeviceStatus.CLAIMED and
self.container_uuid != container['uuid']):
self.container_uuid != container.uuid):
raise exception.PciDeviceInvalidOwner(
compute_node_uuid=self.compute_node_uuid,
address=self.address, owner=self.container_uuid,
hopeowner=container['uuid'])
hopeowner=container.uuid)
if self.dev_type == z_fields.PciDeviceType.SRIOV_PF:
vfs_list = self.child_devices
if not all([vf.status in dependants_ok_statuses for
@ -309,9 +309,9 @@ class PciDevice(base.ZunPersistentObject, base.ZunObject):
'vf_addr': self.address})
self.status = z_fields.PciDeviceStatus.ALLOCATED
self.container_uuid = container['uuid']
self.container_uuid = container.uuid
container.pci_devices.objects.append(copy.copy(self))
container.pci_devices.append(copy.copy(self))
def remove(self):
if self.status != z_fields.PciDeviceStatus.AVAILABLE:
@ -363,7 +363,7 @@ class PciDevice(base.ZunPersistentObject, base.ZunObject):
self.container_uuid = None
self.request_id = None
if old_status == z_fields.PciDeviceStatus.ALLOCATED and container:
existed = next((dev for dev in container['pci_devices']
existed = next((dev for dev in container.pci_devices
if dev.id == self.id))
container.pci_devices.objects.remove(existed)
return free_devs

View File

@ -209,12 +209,11 @@ class PciDevTracker(object):
self._build_device_tree(self.pci_devs)
def _claim_container(self, context, pci_requests):
def _claim_container(self, context, container_uuid, pci_requests):
devs = self.stats.consume_requests(pci_requests.requests)
if not devs:
return None
container_uuid = pci_requests.container_uuid
for dev in devs:
dev.claim(container_uuid)
return devs
@ -224,17 +223,15 @@ class PciDevTracker(object):
dev.allocate(container)
def allocate_container(self, container):
devs = self.claims.pop(container['uuid'], [])
devs = self.claims.pop(container.uuid, [])
self._allocate_container(container, devs)
if devs:
self.allocations[container['uuid']] += devs
self.allocations[container.uuid] += devs
def claim_container(self, context, pci_requests, container_numa_topology):
def claim_container(self, context, container_uuid, pci_requests):
devs = []
if self.pci_devs and pci_requests.requests:
container_uuid = pci_requests.container_uuid
devs = self._claim_container(context, pci_requests,
container_numa_topology)
if self.pci_devs and pci_requests and pci_requests.requests:
devs = self._claim_container(context, container_uuid, pci_requests)
if devs:
self.claims[container_uuid] = devs
return devs
@ -250,11 +247,11 @@ class PciDevTracker(object):
# Find the matching pci device in the pci resource tracker.
# Once found, free it.
if (dev.id == pci_dev.id and
dev.container_uuid == container['uuid']):
dev.container_uuid == container.uuid):
self._remove_device_from_pci_mapping(
container['uuid'], pci_dev, self.allocations)
container.uuid, pci_dev, self.allocations)
self._remove_device_from_pci_mapping(
container['uuid'], pci_dev, self.claims)
container.uuid, pci_dev, self.claims)
self._free_device(pci_dev)
break
@ -282,13 +279,13 @@ class PciDevTracker(object):
for dev in self.pci_devs:
if dev.status in (fields.PciDeviceStatus.CLAIMED,
fields.PciDeviceStatus.ALLOCATED):
if dev.container_uuid == container['uuid']:
if dev.container_uuid == container.uuid:
self._free_device(dev)
def free_container(self, context, container):
if self.allocations.pop(container['uuid'], None):
if self.allocations.pop(container.uuid, None):
self._free_container(container)
elif self.claims.pop(container['uuid'], None):
elif self.claims.pop(container.uuid, None):
self._free_container(container)
def update_pci_for_container(self, context, container, sign):

24
zun/pci/request.py Normal file
View File

@ -0,0 +1,24 @@
# Copyright 2013 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from zun.network import model as network_model
from zun.objects import fields as obj_fields
PCI_NET_TAG = 'physical_network'
PCI_DEVICE_TYPE_TAG = 'dev_type'
DEVICE_TYPE_FOR_VNIC_TYPE = {
network_model.VNIC_TYPE_DIRECT_PHYSICAL: obj_fields.PciDeviceType.SRIOV_PF
}

View File

@ -597,6 +597,7 @@ class TestContainerController(api_base.FunctionalTest):
self.assertEqual(fake_network['id'], requested_networks[0]['network'])
@patch('zun.common.policy.enforce')
@patch('neutronclient.v2_0.client.Client.show_port')
@patch('zun.network.neutron.NeutronAPI.get_neutron_network')
@patch('zun.network.neutron.NeutronAPI.get_neutron_port')
@patch('zun.network.neutron.NeutronAPI.ensure_neutron_port_usable')
@ -607,13 +608,14 @@ class TestContainerController(api_base.FunctionalTest):
def test_create_container_with_requested_neutron_port(
self, mock_search, mock_container_delete, mock_container_create,
mock_container_show, mock_ensure_port_usable, mock_get_port,
mock_get_network, mock_policy):
mock_get_network, mock_show_port, mock_policy):
mock_policy.return_value = True
mock_container_create.side_effect = lambda x, y, **z: y
fake_port = {'network_id': 'foo', 'id': 'bar'}
fake_private_network = {'router:external': False, 'shared': False}
mock_get_port.return_value = fake_port
mock_get_network.return_value = fake_private_network
mock_show_port.return_value = {'port': fake_port}
# Create a container with a command
params = ('{"name": "MyDocker", "image": "ubuntu",'
'"command": "env", "memory": "512",'

View File

@ -19,7 +19,6 @@ from six import StringIO
from zun.common import consts
from zun.common import exception
from zun.compute import claims
from zun.compute import compute_node_tracker
from zun.compute import manager
import zun.conf
from zun.objects.container import Container
@ -32,9 +31,12 @@ from zun.tests.unit.db import utils
class FakeResourceTracker(object):
def container_claim(self, context, container, host, limits):
def container_claim(self, context, container, pci_requests, limits):
return claims.NopClaim()
def remove_usage_from_container(self, contxt, context, is_remmoved=True):
pass
class FakeVolumeMapping(object):
@ -65,6 +67,7 @@ class TestManager(base.TestCase):
'container_driver',
'zun.tests.unit.container.fake_driver.FakeDriver')
self.compute_manager = manager.Manager()
self.compute_manager._resource_tracker = FakeResourceTracker()
@mock.patch.object(Container, 'save')
def test_fail_container(self, mock_save):
@ -376,7 +379,7 @@ class TestManager(base.TestCase):
mock_detach_volume.assert_called_once()
self.assertEqual(0, len(FakeVolumeMapping.volumes))
@mock.patch.object(compute_node_tracker.ComputeNodeTracker,
@mock.patch.object(FakeResourceTracker,
'remove_usage_from_container')
@mock.patch.object(Container, 'destroy')
@mock.patch.object(Container, 'save')
@ -395,7 +398,7 @@ class TestManager(base.TestCase):
mock_remove_usage.assert_called_once_with(self.context, container,
True)
@mock.patch.object(compute_node_tracker.ComputeNodeTracker,
@mock.patch.object(FakeResourceTracker,
'remove_usage_from_container')
@mock.patch.object(Container, 'destroy')
@mock.patch.object(manager.Manager, '_fail_container')
@ -416,7 +419,7 @@ class TestManager(base.TestCase):
mock_destroy.assert_not_called()
mock_remove_usage.assert_not_called()
@mock.patch.object(compute_node_tracker.ComputeNodeTracker,
@mock.patch.object(FakeResourceTracker,
'remove_usage_from_container')
@mock.patch.object(Container, 'destroy')
@mock.patch.object(manager.Manager, '_fail_container')
@ -441,7 +444,7 @@ class TestManager(base.TestCase):
mock_remove_usage.assert_called_once_with(self.context, container,
True)
@mock.patch.object(compute_node_tracker.ComputeNodeTracker,
@mock.patch.object(FakeResourceTracker,
'remove_usage_from_container')
@mock.patch.object(Container, 'destroy')
@mock.patch.object(manager.Manager, '_fail_container')
@ -466,7 +469,7 @@ class TestManager(base.TestCase):
mock_destroy.assert_not_called()
mock_remove_usage.assert_not_called()
@mock.patch.object(compute_node_tracker.ComputeNodeTracker,
@mock.patch.object(FakeResourceTracker,
'remove_usage_from_container')
@mock.patch.object(Container, 'destroy')
@mock.patch.object(manager.Manager, '_fail_container')

View File

@ -43,6 +43,9 @@ class FakeNeutronClient(object):
return {'fixed_ips': [{'ip_address': '192.168.2.22'}],
'id': '1234567'}
def list_extensions(self):
return {'extensions': []}
class FakeDockerClient(object):

View File

@ -344,7 +344,7 @@ class TestObject(test_base.TestCase, _TestObject):
# For more information on object version testing, read
# https://docs.openstack.org/zun/latest/
object_data = {
'Container': '1.22-ce965ef78647d4d20dd7c60b2eb1c19a',
'Container': '1.23-4469205888f8aec51af98375eef6b81a',
'VolumeMapping': '1.0-187aeb163610315595be729df1c642fc',
'Image': '1.0-0b976be24f4f6ee0d526e5c981ce0633',
'MyObj': '1.0-34c4b1aadefd177b13f9a2f894cc23cd',
@ -357,7 +357,9 @@ object_data = {
'PciDevice': '1.1-6e3f0851ad1cf12583e6af4df1883979',
'ComputeNode': '1.9-e8536102d3b28cb3378e9e26f508cd72',
'PciDevicePool': '1.0-3f5ddc3ff7bfa14da7f6c7e9904cc000',
'PciDevicePoolList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e'
'PciDevicePoolList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'ContainerPCIRequest': '1.0-b060f9f9f734bedde79a71a4d3112ee0',
'ContainerPCIRequests': '1.0-7b8f7f044661fe4e24e6949c035af2c4'
}