Add vnf packages RestFul APIs

Implemented following APIs:-

* GET    /vnf_packages
* POST   /vnf_packages/
* GET    /vnf_packages/{vnfPkgId}
* DELETE /vnf_packages/{vnfPkgId}
* PUT    /vnf_packages/{vnfPkgId}/package_content
* POST   /vnf_packages/{vnfPkgId}/package_content/upload_from_uri

Partial-Implements: blueprint tosca-csar-mgmt-driver

Depends-On: If8155399df12a96cb86631dfa22eaca7a5a8d398

Co-Author: Neha Alhat <neha.alhat@nttdata.com>
Change-Id: Id3b4812e24a1ed84fe94429e074f96ae11530517
This commit is contained in:
Niraj Singh 2019-08-08 10:09:11 +00:00 committed by nirajsingh
parent 8cdf3e543b
commit 4fa204b370
43 changed files with 2369 additions and 30 deletions

View File

@ -1,6 +1,7 @@
[DEFAULT]
output_file = etc/tacker/tacker.conf.sample
wrap_width = 79
namespace = glance.store
namespace = tacker.common.config
namespace = tacker.conf
namespace = tacker.wsgi

View File

@ -33,6 +33,7 @@ fixtures==3.0.0
flake8==2.5.5
future==0.16.0
futurist==1.6.0
glance-store==0.26.1
google-auth==1.4.1
greenlet==0.4.13
hacking==0.12.0
@ -151,7 +152,7 @@ tenacity==4.9.0
testresources==2.0.1
testscenarios==0.5.0
testtools==2.2.0
tosca-parser==0.8.1
tosca-parser==1.6.0
traceback2==1.4.0
unittest2==1.1.0
urllib3==1.22

View File

@ -39,7 +39,7 @@ oslo.versionedobjects>=1.33.3 # Apache-2.0
openstackdocstheme>=1.20.0 # Apache-2.0
python-neutronclient>=6.7.0 # Apache-2.0
python-novaclient>=9.1.0 # Apache-2.0
tosca-parser>=0.8.1 # Apache-2.0
tosca-parser>=1.6.0 # Apache-2.0
heat-translator>=1.3.1 # Apache-2.0
cryptography>=2.1 # BSD/Apache-2.0
paramiko>=2.0.0 # LGPLv2.1+
@ -50,3 +50,6 @@ castellan>=0.16.0 # Apache-2.0
kubernetes>=5.0.0 # Apache-2.0
setuptools!=24.0.0,!=34.0.0,!=34.0.1,!=34.0.2,!=34.0.3,!=34.1.0,!=34.1.1,!=34.2.0,!=34.3.0,!=34.3.1,!=34.3.2,!=36.2.0,>=21.0.0 # PSF/ZPL
PyYAML>=3.12 # MIT
# Glance Store
glance-store>=0.26.1 # Apache-2.0

Binary file not shown.

View File

@ -0,0 +1,105 @@
# Copyright (C) 2019 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tacker.objects import fields
class ViewBuilder(object):
def _get_links(self, vnf_package):
return {
"_links": {
"self": {
"href": '/vnfpkgm/v1/vnf_packages/%s'
% vnf_package.id
},
"packageContent": {
"href": '/vnfpkgm/v1/vnf_packages/%s/package_content'
% vnf_package.id
}
}
}
def _get_software_images(self, vnf_deployment_flavours):
software_images = list()
for vnf_deployment_flavour in vnf_deployment_flavours:
for sw_image in vnf_deployment_flavour.software_images:
software_images.append({
"id": sw_image.software_image_id,
"name": sw_image.name,
"provider": "provider",
"version": sw_image.version,
"checksum": {
"algorithm": sw_image.algorithm,
"hash": sw_image.hash
},
"containerFormat": sw_image.container_format,
"diskFormat": sw_image.disk_format,
"minDisk": sw_image.min_disk,
"minRam": sw_image.min_ram,
"size": sw_image.size,
"imagePath": sw_image.image_path,
"userMetadata": sw_image.metadata
})
return {'softwareImages': software_images}
def _get_vnfd(self, vnf_package):
vnfd = vnf_package.vnfd
return {
'vnfdId': vnfd.vnfd_id,
'vnfProvider': vnfd.vnf_provider,
'vnfProductName': vnfd.vnf_product_name,
'vnfSoftwareVersion': vnfd.vnf_software_version,
'vnfdVersion': vnfd.vnfd_version
}
def _basic_vnf_package_info(self, vnf_package):
return {
'id': vnf_package.id,
'onboardingState': vnf_package.onboarding_state,
'operationalState': vnf_package.operational_state,
'usageState': vnf_package.usage_state,
'userDefinedData': vnf_package.user_data,
}
def _get_vnf_package(self, vnf_package):
vnf_package_response = self._basic_vnf_package_info(vnf_package)
links = self._get_links(vnf_package)
vnf_package_response.update(links)
if (vnf_package.onboarding_state ==
fields.PackageOnboardingStateType.ONBOARDED):
# add software images
vnf_deployment_flavours = vnf_package.vnf_deployment_flavours
vnf_package_response.update(self._get_software_images(
vnf_deployment_flavours))
vnf_package_response.update(self._get_vnfd(vnf_package))
return vnf_package_response
def create(self, request, vnf_package):
return self._get_vnf_package(vnf_package)
def show(self, request, vnf_package):
return self._get_vnf_package(vnf_package)
def index(self, request, vnf_packages):
return {'vnf_packages': [self._get_vnf_package(
vnf_package) for vnf_package in vnf_packages]}

View File

@ -13,30 +13,224 @@
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import uuidutils
from six.moves import http_client
from six.moves import urllib
import webob
from tacker._i18n import _
from tacker.api.schemas import vnf_packages
from tacker.api import validation
from tacker.api.views import vnf_packages as vnf_packages_view
from tacker.common import exceptions
from tacker.conductor.conductorrpc import vnf_pkgm_rpc
from tacker.glance_store import store as glance_store
from tacker.objects import fields
from tacker.objects import vnf_package as vnf_package_obj
from tacker.policies import vnf_package as vnf_package_policies
from tacker import wsgi
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class VnfPkgmController(wsgi.Controller):
_view_builder_class = vnf_packages_view.ViewBuilder
def __init__(self):
super(VnfPkgmController, self).__init__()
self.rpc_api = vnf_pkgm_rpc.VNFPackageRPCAPI()
glance_store.initialize_glance_store()
@wsgi.response(http_client.CREATED)
@wsgi.expected_errors((http_client.BAD_REQUEST, http_client.FORBIDDEN))
@validation.schema(vnf_packages.create)
def create(self, request, body):
raise webob.exc.HTTPNotImplemented()
context = request.environ['tacker.context']
context.can(vnf_package_policies.VNFPKGM % 'create')
vnf_package = vnf_package_obj.VnfPackage(context=request.context)
vnf_package.onboarding_state = (
fields.PackageOnboardingStateType.CREATED)
vnf_package.operational_state = (
fields.PackageOperationalStateType.DISABLED)
vnf_package.usage_state = fields.PackageUsageStateType.NOT_IN_USE
vnf_package.user_data = body.get('userDefinedData', dict())
vnf_package.tenant_id = request.context.project_id
vnf_package.create()
return self._view_builder.create(request, vnf_package)
@wsgi.response(http_client.OK)
@wsgi.expected_errors((http_client.FORBIDDEN, http_client.NOT_FOUND))
def show(self, request, id):
raise webob.exc.HTTPNotImplemented()
context = request.environ['tacker.context']
context.can(vnf_package_policies.VNFPKGM % 'show')
# check if id is of type uuid format
if not uuidutils.is_uuid_like(id):
msg = _("Can not find requested vnf package: %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
try:
vnf_package = vnf_package_obj.VnfPackage.get_by_id(
request.context, id,
expected_attrs=["vnf_deployment_flavours", "vnfd"])
except exceptions.VnfPackageNotFound:
msg = _("Can not find requested vnf package: %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
return self._view_builder.show(request, vnf_package)
@wsgi.response(http_client.OK)
@wsgi.expected_errors((http_client.FORBIDDEN))
def index(self, request):
raise webob.exc.HTTPNotImplemented()
context = request.environ['tacker.context']
context.can(vnf_package_policies.VNFPKGM % 'index')
vnf_packages = vnf_package_obj.VnfPackagesList.get_all(
request.context,
expected_attrs=["vnf_deployment_flavours", "vnfd"])
return self._view_builder.index(request, vnf_packages)
@wsgi.response(http_client.NO_CONTENT)
@wsgi.expected_errors((http_client.FORBIDDEN, http_client.NOT_FOUND))
def delete(self, request, id):
raise webob.exc.HTTPNotImplemented()
context = request.environ['tacker.context']
context.can(vnf_package_policies.VNFPKGM % 'delete')
# check if id is of type uuid format
if not uuidutils.is_uuid_like(id):
msg = _("Can not find requested vnf package: %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
try:
vnf_package = vnf_package_obj.VnfPackage.get_by_id(
request.context, id)
except exceptions.VnfPackageNotFound:
msg = _("Can not find requested vnf package: %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
if vnf_package.operational_state == \
fields.PackageUsageStateType.IN_USE:
msg = _("VNF Package %(id)s usage state is %(state)s")
raise webob.exc.HTTPConflict(
explanation=msg % {
"id": id,
"state": fields.PackageOperationalStateType.ENABLED})
# Delete vnf_package
self.rpc_api.delete_vnf_package(context, vnf_package)
@wsgi.response(http_client.ACCEPTED)
@wsgi.expected_errors((http_client.FORBIDDEN, http_client.NOT_FOUND,
http_client.CONFLICT))
def upload_vnf_package_content(self, request, id, body):
raise webob.exc.HTTPNotImplemented()
context = request.environ['tacker.context']
context.can(vnf_package_policies.VNFPKGM % 'upload_package_content')
# check if id is of type uuid format
if not uuidutils.is_uuid_like(id):
msg = _("Can not find requested vnf package: %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
try:
vnf_package = vnf_package_obj.VnfPackage.get_by_id(
request.context, id)
except exceptions.VnfPackageNotFound:
msg = _("Can not find requested vnf package: %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
if vnf_package.onboarding_state != \
fields.PackageOnboardingStateType.CREATED:
msg = _("VNF Package %(id)s onboarding state "
"is not %(onboarding)s")
raise webob.exc.HTTPConflict(explanation=msg % {"id": id,
"onboarding": fields.PackageOnboardingStateType.CREATED})
vnf_package.onboarding_state = (
fields.PackageOnboardingStateType.UPLOADING)
vnf_package.save()
try:
(location, size, checksum, multihash,
loc_meta) = glance_store.store_csar(context, id, body)
except exceptions.UploadFailedToGlanceStore:
with excutils.save_and_reraise_exception():
vnf_package.onboarding_state = (
fields.PackageOnboardingStateType.CREATED)
vnf_package.save()
vnf_package.onboarding_state = (
fields.PackageOnboardingStateType.PROCESSING)
vnf_package.algorithm = CONF.vnf_package.hashing_algorithm
vnf_package.hash = multihash
vnf_package.location_glance_store = location
vnf_package.save()
# process vnf_package
self.rpc_api.upload_vnf_package_content(context, vnf_package)
@wsgi.response(http_client.ACCEPTED)
@wsgi.expected_errors((http_client.BAD_REQUEST, http_client.FORBIDDEN,
http_client.NOT_FOUND, http_client.CONFLICT))
@validation.schema(vnf_packages.upload_from_uri)
def upload_vnf_package_from_uri(self, request, id, body):
raise webob.exc.HTTPNotImplemented()
context = request.environ['tacker.context']
context.can(vnf_package_policies.VNFPKGM % 'upload_from_uri')
# check if id is of type uuid format
if not uuidutils.is_uuid_like(id):
msg = _("Can not find requested vnf package: %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
url = body['addressInformation']
try:
data_iter = urllib.request.urlopen(url)
except Exception:
data_iter = None
msg = _("Failed to open URL %s")
raise webob.exc.HTTPBadRequest(explanation=msg % url)
finally:
if hasattr(data_iter, 'close'):
data_iter.close()
try:
vnf_package = vnf_package_obj.VnfPackage.get_by_id(
request.context, id)
except exceptions.VnfPackageNotFound:
msg = _("Can not find requested vnf package: %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
if vnf_package.onboarding_state != \
fields.PackageOnboardingStateType.CREATED:
msg = _("VNF Package %(id)s onboarding state is not "
"%(onboarding)s")
raise webob.exc.HTTPConflict(explanation=msg % {"id": id,
"onboarding": fields.PackageOnboardingStateType.CREATED})
vnf_package.onboarding_state = (
fields.PackageOnboardingStateType.UPLOADING)
vnf_package.save()
# process vnf_package
self.rpc_api.upload_vnf_package_from_uri(context, vnf_package,
body['addressInformation'],
user_name=body.get('userName'),
password=body.get('password'))
def create_resource():

310
tacker/common/csar_utils.py Normal file
View File

@ -0,0 +1,310 @@
# Copyright (C) 2019 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
from toscaparser.tosca_template import ToscaTemplate
import zipfile
from tacker.common import exceptions
import tacker.conf
CONF = tacker.conf.CONF
LOG = logging.getLogger(__name__)
def _check_type(custom_def, node_type, type_list):
for node_data_type, node_data_type_value in custom_def.items():
if node_data_type == node_type and node_type in type_list:
return True, node_data_type_value
for k, v in node_data_type_value.items():
if k == 'derived_from':
if v in type_list and node_type == node_data_type:
return True, node_data_type_value
return False, None
def _get_sw_image_artifact(artifacts):
if not artifacts:
return
for artifact_value in artifacts.values():
if 'type' in artifact_value:
if artifact_value['type'] == 'tosca.artifacts.nfv.SwImage':
return artifact_value
def _update_default_vnfd_data(node_value, node_type_value):
vnf_properties = node_value['properties']
type_properties = node_type_value['properties']
for property_key, property_value in type_properties.items():
if property_key == 'descriptor_id':
# if descriptor_id is parameterized, then get the value from the
# default property and set it in the vnf_properties.
if vnf_properties and isinstance(
vnf_properties.get('descriptor_id'), dict):
vnf_properties['descriptor_id'] = property_value.get("default")
return vnf_properties
def _get_vnf_data(nodetemplates):
type_list = ['tosca.nodes.nfv.VNF']
for nt in nodetemplates:
for node_name, node_value in nt.templates.items():
type_status, node_type_value = _check_type(nt.custom_def,
node_value['type'], type_list)
if type_status and node_type_value:
return _update_default_vnfd_data(node_value, node_type_value)
def _get_instantiation_levels(policies):
if policies:
for policy in policies:
if policy.type_definition.type == \
'tosca.policies.nfv.InstantiationLevels':
return policy.properties
def _update_flavour_data_from_vnf(custom_defs, node_tpl, flavour):
type_list = ['tosca.nodes.nfv.VNF']
type_status, _ = _check_type(custom_defs, node_tpl['type'], type_list)
if type_status and node_tpl['properties']:
vnf_properties = node_tpl['properties']
if 'flavour_description' in vnf_properties:
flavour.update(
{'flavour_description': vnf_properties[
'flavour_description']})
if 'flavour_id' in vnf_properties:
flavour.update({'flavour_id': vnf_properties['flavour_id']})
def _get_software_image(custom_defs, nodetemplate_name, node_tpl):
type_list = ['tosca.nodes.nfv.Vdu.Compute',
'tosca.nodes.nfv.Vdu.VirtualBlockStorage']
type_status, _ = _check_type(custom_defs, node_tpl['type'], type_list)
if type_status:
properties = node_tpl['properties']
sw_image_artifact = _get_sw_image_artifact(node_tpl.get('artifacts'))
if sw_image_artifact:
properties['sw_image_data'].update(
{'software_image_id': nodetemplate_name})
sw_image_data = properties['sw_image_data']
if 'metadata' in sw_image_artifact:
sw_image_data.update({'metadata':
sw_image_artifact['metadata']})
return sw_image_data
def _populate_flavour_data(tosca):
flavours = []
for tp in tosca.nested_tosca_templates_with_topology:
sw_image_list = []
# Setting up flavour data
flavour_id = tp.substitution_mappings.properties.get('flavour_id')
if flavour_id:
flavour = {'flavour_id': flavour_id}
else:
flavour = {}
instantiation_levels = _get_instantiation_levels(tp.policies)
if instantiation_levels:
flavour.update({'instantiation_levels': instantiation_levels})
for template_name, node_tpl in tp.tpl.get('node_templates').items():
# check the flavour property in vnf data
_update_flavour_data_from_vnf(tp.custom_defs, node_tpl, flavour)
# Update the software image data
sw_image = _get_software_image(tp.custom_defs, template_name,
node_tpl)
if sw_image:
sw_image_list.append(sw_image)
# Add software images for flavour
if sw_image_list:
flavour.update({'sw_images': sw_image_list})
if flavour:
flavours.append(flavour)
return flavours
def _get_instantiation_levels_from_policy(tpl_policies):
"""Get defined instantiation levels
Getting instantiation levels defined under policy type
'tosca.policies.nfv.InstantiationLevels'.
"""
levels = []
for policy in tpl_policies:
for key, value in policy.items():
if value.get('type') == 'tosca.policies.nfv.InstantiationLevels'\
and value.get('properties', {}).get('levels', {}):
levels = value.get('properties').get('levels').keys()
default_level = value.get(
'properties').get('default_level')
if default_level and default_level not in levels:
error_msg = "Level {} not found in defined levels" \
" {}".format(default_level,
",".join(sorted(levels)))
raise exceptions.InvalidCSAR(error_msg)
return levels
def _validate_instantiation_levels(policy, instantiation_levels):
expected_policy_type = ['tosca.policies.nfv.VduInstantiationLevels',
'tosca.policies.nfv.'
'VirtualLinkInstantiationLevels']
for policy_name, policy_tpl in policy.items():
if policy_tpl.get('type') not in expected_policy_type:
return
if not instantiation_levels:
msg = ('Policy of type'
' "tosca.policies.nfv.InstantiationLevels is not defined.')
raise exceptions.InvalidCSAR(msg)
if policy_tpl.get('properties'):
levels_in_policy = policy_tpl.get(
'properties').get('levels')
if levels_in_policy:
invalid_levels = set(levels_in_policy.keys()) - set(
instantiation_levels)
else:
invalid_levels = set()
if invalid_levels:
error_msg = "Level(s) {} not found in defined levels" \
" {}".format(",".join(sorted(invalid_levels)),
",".join(sorted(instantiation_levels)
))
raise exceptions.InvalidCSAR(error_msg)
def _validate_sw_image_data_for_artifact(node_tpl, template_name):
artifact_type = []
artifacts = node_tpl.get('artifacts')
if artifacts:
for key, value in artifacts.items():
if value.get('type') == 'tosca.artifacts.nfv.SwImage':
artifact_type.append(value.get('type'))
if len(artifact_type) > 1:
error_msg = ('artifacts of type "tosca.artifacts.nfv.SwImage"'
' is added more than one time for'
' node %(node)s.') % {'node': template_name}
raise exceptions.InvalidCSAR(error_msg)
if artifact_type and node_tpl.get('properties'):
if not node_tpl.get('properties').get('sw_image_data'):
error_msg = ('Node property "sw_image_data" is missing for'
' artifact type %(type)s for '
'node %(node)s.') % {
'type': artifact_type[0], 'node': template_name}
raise exceptions.InvalidCSAR(error_msg)
def _validate_sw_image_data_for_artifacts(tosca):
for tp in tosca.nested_tosca_templates_with_topology:
for template_name, node_tpl in tp.tpl.get('node_templates').items():
_validate_sw_image_data_for_artifact(node_tpl, template_name)
for template in tosca.nodetemplates:
_validate_sw_image_data_for_artifact(
template.entity_tpl, template.name)
def _get_data_from_csar(tosca, context, id):
for tp in tosca.nested_tosca_templates_with_topology:
levels = _get_instantiation_levels_from_policy(tp.tpl.get("policies"))
for policy_tpl in tp.tpl.get("policies"):
_validate_instantiation_levels(policy_tpl, levels)
_validate_sw_image_data_for_artifacts(tosca)
vnf_data = _get_vnf_data(tosca.nodetemplates)
if not vnf_data:
error_msg = "VNF properties are mandatory"
raise exceptions.InvalidCSAR(error_msg)
flavours = _populate_flavour_data(tosca)
if not flavours:
error_msg = "No VNF flavours are available"
raise exceptions.InvalidCSAR(error_msg)
return vnf_data, flavours
def _extract_csar_zip_file(file_path, extract_path):
try:
with zipfile.ZipFile(file_path, 'r') as zf:
zf.extractall(extract_path)
except (RuntimeError, zipfile.BadZipfile) as exp:
with excutils.save_and_reraise_exception():
LOG.error("Error encountered while extracting "
"csar zip file %(path)s. Error: %(error)s.",
{'path': file_path,
'error': encodeutils.exception_to_unicode(exp)})
exp.reraise = False
raise exceptions.InvalidZipFile(path=file_path)
def load_csar_data(context, package_uuid, zip_path):
extract_zip_path = os.path.join(CONF.vnf_package.vnf_package_csar_path,
package_uuid)
_extract_csar_zip_file(zip_path, extract_zip_path)
try:
tosca = ToscaTemplate(zip_path, None, True)
return _get_data_from_csar(tosca, context, package_uuid)
except exceptions.InvalidCSAR as exp:
with excutils.save_and_reraise_exception():
LOG.error("Error processing CSAR file %(path)s for vnf package"
" %(uuid)s: Error: %(error)s. ",
{'path': zip_path, 'uuid': package_uuid,
'error': encodeutils.exception_to_unicode(exp)})
except Exception as exp:
with excutils.save_and_reraise_exception():
LOG.error("Tosca parser failed for vnf package %(uuid)s: "
"Error: %(error)s. ", {'uuid': package_uuid,
'error': encodeutils.exception_to_unicode(exp)})
exp.reraise = False
raise exceptions.InvalidCSAR(encodeutils.exception_to_unicode
(exp))
def delete_csar_data(package_uuid):
# Remove zip and folder from the vnf_package_csar_path
csar_zip_temp_path = os.path.join(CONF.vnf_package.vnf_package_csar_path,
package_uuid)
csar_path = os.path.join(CONF.vnf_package.vnf_package_csar_path,
package_uuid + ".zip")
try:
shutil.rmtree(csar_zip_temp_path)
os.remove(csar_path)
except OSError as exc:
exc_message = encodeutils.exception_to_unicode(exc)
msg = _('Failed to delete csar folder: '
'%(csar_path)s, Error: %(exc)s')
LOG.warning(msg, {'csar_path': csar_path, 'exc': exc_message})

View File

@ -136,9 +136,7 @@ class MalformedRequestBody(BadRequest):
class Invalid(TackerException):
def __init__(self, message=None):
self.message = message
super(Invalid, self).__init__()
message = _("Bad Request - Invalid Parameters")
class InvalidInput(BadRequest):
@ -219,3 +217,35 @@ class VnfSoftwareImageNotFound(NotFound):
class OrphanedObjectError(TackerException):
msg_fmt = _('Cannot call %(method)s on orphaned %(objtype)s object')
class CSARFileSizeLimitExceeded(TackerException):
message = _("The provided CSAR file is too large.")
class VNFPackageURLInvalid(Invalid):
message = _("Failed to open URL %(url)s")
class InvalidZipFile(Invalid):
message = _("Invalid zip file : %(path)s")
class UploadFailedToGlanceStore(Invalid):
message = _("Failed to upload vnf package %(uuid)s to glance store: "
"%(error)s")
class InvalidCSAR(Invalid):
message = _("Invalid csar: %(error)s")
class LimitExceeded(TackerException):
message = _("The request returned a 413 Request Entity Too Large. This "
"generally means that rate limiting or a quota threshold was "
"breached.\n\nThe response body:\n%(body)s")
def __init__(self, *args, **kwargs):
self.retry_after = (int(kwargs['retry']) if kwargs.get('retry')
else None)
super(LimitExceeded, self).__init__(*args, **kwargs)

View File

@ -27,6 +27,7 @@ from oslo_utils import excutils
from tacker.common import exceptions
from tacker import context
from tacker.objects import base as objects_base
LOG = logging.getLogger(__name__)
@ -60,7 +61,8 @@ def init(conf):
allowed_remote_exmods=exmods)
NOTIFICATION_TRANSPORT = oslo_messaging.get_notification_transport(
conf, allowed_remote_exmods=exmods)
serializer = RequestContextSerializer()
json_serializer = oslo_messaging.JsonPayloadSerializer()
serializer = RequestContextSerializer(json_serializer)
NOTIFIER = oslo_messaging.Notifier(NOTIFICATION_TRANSPORT,
serializer=serializer)
@ -298,7 +300,8 @@ class Connection(object):
target = oslo_messaging.Target(
topic=topic, server=host or cfg.CONF.host, fanout=fanout,
exchange=exchange)
server = get_server(target, endpoints)
serializer = objects_base.TackerObjectSerializer()
server = get_server(target, endpoints, serializer)
self.servers.append(server)
def consume_in_threads(self):

View File

@ -0,0 +1,41 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions that won't produce circular imports."""
def get_wrapped_function(function):
"""Get the method at the bottom of a stack of decorators."""
if not hasattr(function, '__closure__') or not function.__closure__:
return function
def _get_wrapped_function(function):
if not hasattr(function, '__closure__') or not function.__closure__:
return None
for closure in function.__closure__:
func = closure.cell_contents
deeper_func = _get_wrapped_function(func)
if deeper_func:
return deeper_func
elif hasattr(closure.cell_contents, '__call__'):
return closure.cell_contents
return function
return _get_wrapped_function(function)

View File

@ -18,6 +18,8 @@
"""Utilities and helper functions."""
import functools
import inspect
import logging as std_logging
import os
import random
@ -32,11 +34,18 @@ import netaddr
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from stevedore import driver
try:
from eventlet import sleep
except ImportError:
from time import sleep
from tacker._i18n import _
from tacker.common import constants as q_const
from tacker.common import exceptions
from tacker.common import safe_utils
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
@ -66,6 +75,12 @@ MEM_UNITS = {
}
CONF = cfg.CONF
synchronized = lockutils.synchronized_with_prefix(SYNCHRONIZED_PREFIX)
MAX_COOP_READER_BUFFER_SIZE = 134217728
if hasattr(inspect, 'getfullargspec'):
getargspec = inspect.getfullargspec
else:
getargspec = inspect.getargspec
def find_config_file(options, config_file):
@ -231,3 +246,213 @@ def none_from_string(orig_str):
return None
else:
return orig_str
def expects_func_args(*args):
def _decorator_checker(dec):
@functools.wraps(dec)
def _decorator(f):
base_f = safe_utils.get_wrapped_function(f)
argspec = getargspec(base_f)
if argspec[1] or argspec[2] or set(args) <= set(argspec[0]):
# NOTE (nirajsingh): We can't really tell if correct stuff will
# be passed if it's a function with *args or **kwargs so
# we still carry on and hope for the best
return dec(f)
else:
raise TypeError("Decorated function %(f_name)s does not "
"have the arguments expected by the "
"decorator %(d_name)s" %
{'f_name': base_f.__name__,
'd_name': dec.__name__})
return _decorator
return _decorator_checker
def cooperative_iter(iter):
"""Prevent eventlet thread starvation during iteration
Return an iterator which schedules after each
iteration. This can prevent eventlet thread starvation.
:param iter: an iterator to wrap
"""
try:
for chunk in iter:
sleep(0)
yield chunk
except Exception as err:
with excutils.save_and_reraise_exception():
msg = _("Error: cooperative_iter exception %s") % err
LOG.error(msg)
def cooperative_read(fd):
"""Prevent eventlet thread starvationafter each read operation.
Wrap a file descriptor's read with a partial function which schedules
after each read. This can prevent eventlet thread starvation.
:param fd: a file descriptor to wrap
"""
def readfn(*args):
result = fd.read(*args)
sleep(0)
return result
return readfn
def chunkreadable(iter, chunk_size=65536):
"""Wrap a readable iterator.
Wrap a readable iterator with a reader yielding chunks of
a preferred size, otherwise leave iterator unchanged.
:param iter: an iter which may also be readable
:param chunk_size: maximum size of chunk
"""
return chunkiter(iter, chunk_size) if hasattr(iter, 'read') else iter
def chunkiter(fp, chunk_size=65536):
"""Convert iterator to a file-like object.
Return an iterator to a file-like obj which yields fixed size chunks
:param fp: a file-like object
:param chunk_size: maximum size of chunk
"""
while True:
chunk = fp.read(chunk_size)
if chunk:
yield chunk
else:
break
class CooperativeReader(object):
"""An eventlet thread friendly class for reading in image data.
When accessing data either through the iterator or the read method
we perform a sleep to allow a co-operative yield. When there is more than
one image being uploaded/downloaded this prevents eventlet thread
starvation, ie allows all threads to be scheduled periodically rather than
having the same thread be continuously active.
"""
def __init__(self, fd):
"""Construct an CooperativeReader object.
:param fd: Underlying image file object
"""
self.fd = fd
self.iterator = None
# NOTE(nirajsingh): if the underlying supports read(), overwrite the
# default iterator-based implementation with cooperative_read which
# is more straightforward
if hasattr(fd, 'read'):
self.read = cooperative_read(fd)
else:
self.iterator = None
self.buffer = b''
self.position = 0
def read(self, length=None):
"""Return the requested amount of bytes.
Fetching the next chunk of the underlying iterator when needed.
This is replaced with cooperative_read in __init__ if the underlying
fd already supports read().
"""
if length is None:
if len(self.buffer) - self.position > 0:
# if no length specified but some data exists in buffer,
# return that data and clear the buffer
result = self.buffer[self.position:]
self.buffer = b''
self.position = 0
return bytes(result)
else:
# otherwise read the next chunk from the underlying iterator
# and return it as a whole. Reset the buffer, as subsequent
# calls may specify the length
try:
if self.iterator is None:
self.iterator = self.__iter__()
return next(self.iterator)
except StopIteration:
return b''
finally:
self.buffer = b''
self.position = 0
else:
result = bytearray()
while len(result) < length:
if self.position < len(self.buffer):
to_read = length - len(result)
chunk = self.buffer[self.position:self.position + to_read]
result.extend(chunk)
# This check is here to prevent potential OOM issues if
# this code is called with unreasonably high values of read
# size. Currently it is only called from the HTTP clients
# of Glance backend stores, which use httplib for data
# streaming, which has readsize hardcoded to 8K, so this
# check should never fire. Regardless it still worths to
# make the check, as the code may be reused somewhere else.
if len(result) >= MAX_COOP_READER_BUFFER_SIZE:
raise exceptions.LimitExceeded()
self.position += len(chunk)
else:
try:
if self.iterator is None:
self.iterator = self.__iter__()
self.buffer = next(self.iterator)
self.position = 0
except StopIteration:
self.buffer = b''
self.position = 0
return bytes(result)
return bytes(result)
def __iter__(self):
return cooperative_iter(self.fd.__iter__())
class LimitingReader(object):
"""Limit Reader to read data past to configured allowed amount.
Reader designed to fail when reading image data past the configured
allowable amount.
"""
def __init__(self, data, limit,
exception_class=exceptions.CSARFileSizeLimitExceeded):
"""Construct an LimitingReader object.
:param data: Underlying image data object
:param limit: maximum number of bytes the reader should allow
:param exception_class: Type of exception to be raised
"""
self.data = data
self.limit = limit
self.bytes_read = 0
self.exception_class = exception_class
def __iter__(self):
for chunk in self.data:
self.bytes_read += len(chunk)
if self.bytes_read > self.limit:
raise self.exception_class()
else:
yield chunk
def read(self, i):
result = self.data.read(i)
self.bytes_read += len(result)
if self.bytes_read > self.limit:
raise self.exception_class()
return result

View File

@ -13,38 +13,111 @@
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import functools
import inspect
import os
import shutil
import sys
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import periodic_task
from oslo_service import service
from oslo_utils import excutils
from oslo_utils import timeutils
from sqlalchemy.orm import exc as orm_exc
from tacker.common import csar_utils
from tacker.common import exceptions
from tacker.common import safe_utils
from tacker.common import topics
from tacker.common import utils
import tacker.conf
from tacker import context as t_context
from tacker.db.common_services import common_services_db
from tacker.db.nfvo import nfvo_db
from tacker.extensions import nfvo
from tacker.glance_store import store as glance_store
from tacker import manager
from tacker import objects
from tacker.objects import fields
from tacker.objects.vnf_package import VnfPackagesList
from tacker.plugins.common import constants
from tacker import service as tacker_service
from tacker import version
CONF = tacker.conf.CONF
LOG = logging.getLogger(__name__)
def _delete_csar(context, vnf_package):
# Delete from glance store
glance_store.delete_csar(context, vnf_package.id,
vnf_package.location_glance_store)
csar_utils.delete_csar_data(vnf_package.id)
@utils.expects_func_args('vnf_package')
def revert_upload_vnf_package(function):
"""Decorator to revert upload_vnf_package on failure."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except Exception as exp:
with excutils.save_and_reraise_exception():
wrapped_func = safe_utils.get_wrapped_function(function)
keyed_args = inspect.getcallargs(wrapped_func, self, context,
*args, **kwargs)
context = keyed_args['context']
vnf_package = keyed_args['vnf_package']
if not isinstance(exp, exceptions.UploadFailedToGlanceStore):
# Delete the csar file from the glance store.
glance_store.delete_csar(context, vnf_package.id,
vnf_package.location_glance_store)
csar_utils.delete_csar_data(vnf_package.id)
# Delete the vnf_deployment_flavour if created.
if vnf_package.vnf_deployment_flavours:
for flavour in vnf_package.vnf_deployment_flavours:
flavour.destroy(context)
# Set the vnf package onboarding status to created,
# so that user can retry uploading vnf package
# after correcting the csar zip file.
vnf_package.onboarding_state = (
fields.PackageOnboardingStateType.CREATED)
vnf_package.save()
return decorated_function
class Conductor(manager.Manager):
def __init__(self, host, conf=None):
if conf:
self.conf = conf
else:
self.conf = cfg.CONF
self.conf = CONF
super(Conductor, self).__init__(host=self.conf.host)
def init_host(self):
glance_store.initialize_glance_store()
self._basic_config_check()
def _basic_config_check(self):
if not os.path.isdir(CONF.vnf_package.vnf_package_csar_path):
LOG.error("Config option 'vnf_package_csar_path' is not "
"configured correctly. VNF package CSAR path directory"
" %s doesn't exist",
CONF.vnf_package.vnf_package_csar_path)
sys.exit(1)
def update_vim(self, context, vim_id, status):
t_admin_context = t_context.get_admin_context()
update_time = timeutils.utcnow()
@ -67,27 +140,171 @@ class Conductor(manager.Manager):
t_admin_context.session.add(event_db)
return status
def _create_software_images(self, context, sw_image, flavour_uuid):
vnf_sw_image = objects.VnfSoftwareImage(context=context)
vnf_sw_image.flavour_uuid = flavour_uuid
vnf_sw_image.name = sw_image.get('name')
# TODO(nirajsingh) Provider is mandatory as per SOL005 but it's not
# a required parameter in SwImageData as per SOL001. SOL001 will be
# amended to make `provider` a required parameter as per
# 'https://docbox.etsi.org/ISG/NFV/SOL/05-CONTRIBUTIONS/2019/
# NFVSOL000338_SOL001ed271_SwImage_Provider.docx'.
vnf_sw_image.provider = sw_image.get('provider', "")
vnf_sw_image.version = sw_image.get('version')
if sw_image.get('checksum'):
checksum = sw_image.get('checksum')
if checksum.get('algorithm'):
vnf_sw_image.algorithm = checksum.get('algorithm')
if checksum.get('hash'):
vnf_sw_image.hash = checksum.get('hash')
vnf_sw_image.container_format = sw_image.get('container_format')
vnf_sw_image.disk_format = sw_image.get('disk_format')
if sw_image.get('min_ram'):
min_ram = sw_image.get('min_ram')
vnf_sw_image.min_ram = int(min_ram.split()[0])
else:
vnf_sw_image.min_ram = 0
vnf_sw_image.min_disk = int(sw_image.get('min_disk').split()[0])
vnf_sw_image.size = int(sw_image.get('size').split()[0])
vnf_sw_image.image_path = ''
vnf_sw_image.software_image_id = sw_image['software_image_id']
vnf_sw_image.metadata = sw_image.get('metadata', dict())
vnf_sw_image.create()
def _create_flavour(self, context, package_uuid, flavour):
deploy_flavour = objects.VnfDeploymentFlavour(context=context)
deploy_flavour.package_uuid = package_uuid
deploy_flavour.flavour_id = flavour['flavour_id']
deploy_flavour.flavour_description = flavour['flavour_description']
deploy_flavour.instantiation_levels = \
flavour['instantiation_levels']
deploy_flavour.create()
sw_images = flavour.get('sw_images')
if sw_images:
for sw_image in sw_images:
self._create_software_images(
context, sw_image, deploy_flavour.id)
def _onboard_vnf_package(self, context, vnf_package, vnf_data, flavours):
package_vnfd = objects.VnfPackageVnfd(context=context)
package_vnfd.package_uuid = vnf_package.id
package_vnfd.vnfd_id = vnf_data.get('descriptor_id')
package_vnfd.vnf_provider = vnf_data.get('provider')
package_vnfd.vnf_product_name = vnf_data.get('product_name')
package_vnfd.vnf_software_version = vnf_data.get('software_version')
package_vnfd.vnfd_version = vnf_data.get('descriptor_version')
package_vnfd.create()
for flavour in flavours:
self._create_flavour(context, vnf_package.id, flavour)
@revert_upload_vnf_package
def upload_vnf_package_content(self, context, vnf_package):
location = vnf_package.location_glance_store
zip_path = glance_store.load_csar(vnf_package.id, location)
vnf_data, flavours = csar_utils.load_csar_data(
context.elevated(), vnf_package.id, zip_path)
self._onboard_vnf_package(context, vnf_package, vnf_data, flavours)
vnf_package.onboarding_state = (
fields.PackageOnboardingStateType.ONBOARDED)
vnf_package.operational_state = (
fields.PackageOperationalStateType.ENABLED)
vnf_package.save()
@revert_upload_vnf_package
def upload_vnf_package_from_uri(self, context, vnf_package,
address_information, user_name=None,
password=None):
body = {"address_information": address_information}
(location, size, checksum, multihash,
loc_meta) = glance_store.store_csar(context, vnf_package.id, body)
vnf_package.onboarding_state = (
fields.PackageOnboardingStateType.PROCESSING)
vnf_package.algorithm = CONF.vnf_package.hashing_algorithm
vnf_package.hash = multihash
vnf_package.location_glance_store = location
vnf_package.save()
zip_path = glance_store.load_csar(vnf_package.id, location)
vnf_data, flavours = csar_utils.load_csar_data(
context.elevated(), vnf_package.id, zip_path)
self._onboard_vnf_package(context, vnf_package, vnf_data, flavours)
vnf_package.onboarding_state = (
fields.PackageOnboardingStateType.ONBOARDED)
vnf_package.operational_state = (
fields.PackageOperationalStateType.ENABLED)
vnf_package.save()
def delete_vnf_package(self, context, vnf_package):
if (vnf_package.onboarding_state ==
fields.PackageOnboardingStateType.ONBOARDED):
_delete_csar(context, vnf_package)
vnf_package.destroy(context)
@periodic_task.periodic_task(spacing=CONF.vnf_package_delete_interval)
def _run_cleanup_vnf_packages(self, context):
"""Delete orphan extracted csar zip and files from extracted path
This periodic task will get all deleted packages for the period
(now - CONF.vnf_package_delete_interval) and delete any left out
csar zip files and vnf packages files from the extracted path.
"""
time_duration = datetime.datetime.utcnow() - datetime.timedelta(
seconds=CONF.vnf_package_delete_interval)
filters = {'deleted_at': time_duration}
deleted_vnf_packages = VnfPackagesList.get_by_filters(
context.elevated(), read_deleted='only', **filters)
for vnf_pack in deleted_vnf_packages:
csar_zip_temp_path = (CONF.vnf_package.vnf_package_csar_path +
vnf_pack.id)
csar_path = (CONF.vnf_package.vnf_package_csar_path +
vnf_pack.id + '.zip')
try:
if os.path.exists(csar_zip_temp_path):
shutil.rmtree(csar_zip_temp_path)
os.remove(csar_path)
except OSError:
LOG.warning("Failed to delete csar zip %(zip)s and"
" folder $(folder)s for vnf package %(uuid)s.",
{'zip': csar_path, 'folder': csar_zip_temp_path,
'uuid': vnf_pack.id})
def init(args, **kwargs):
cfg.CONF(args=args, project='tacker',
version='%%prog %s' % version.version_info.release_string(),
**kwargs)
CONF(args=args, project='tacker',
version='%%prog %s' % version.version_info.release_string(),
**kwargs)
# FIXME(ihrachys): if import is put in global, circular import
# failure occurs
from tacker.common import rpc as n_rpc
n_rpc.init(cfg.CONF)
n_rpc.init(CONF)
def main(manager='tacker.conductor.conductor_server.Conductor'):
init(sys.argv[1:])
objects.register_all()
logging.setup(cfg.CONF, "tacker")
logging.setup(CONF, "tacker")
oslo_messaging.set_transport_defaults(control_exchange='tacker')
logging.setup(cfg.CONF, "tacker")
cfg.CONF.log_opt_values(LOG, logging.DEBUG)
logging.setup(CONF, "tacker")
CONF.log_opt_values(LOG, logging.DEBUG)
server = tacker_service.Service.create(
binary='tacker-conductor',
topic=topics.TOPIC_CONDUCTOR,
manager=manager)
service.launch(cfg.CONF, server, restart_method='mutate').wait()
service.launch(CONF, server, restart_method='mutate').wait()

View File

@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import glance_store
from oslo_config import cfg
from tacker.conf import conductor
@ -23,3 +24,4 @@ CONF = cfg.CONF
vnf_package.register_opts(CONF)
conductor.register_opts(CONF)
glance_store.register_opts(CONF)

View File

View File

@ -0,0 +1,117 @@
# Copyright (C) 2019 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import glance_store
from glance_store import exceptions as store_exceptions
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import units
from six.moves import urllib