Device refactor Part3: Rename device in codes

Previous patch https://review.openstack.org/#/c/349776/
has renamed the DB part.

tacker/vm/plugin.py and tacker/db/vm/vm_db.py are not renamed
in this patch due to the git problem. (It is always be done by
'delete/add' instead of 'rename').
They will be renamed in later patch with unit tests.

Change-Id: I334e0e79c8bdba4a10d97ab691b1e6b242a0f1c5
Partial-bug: #1589018
This commit is contained in:
gong yong sheng 2016-08-08 11:46:13 +08:00
parent f9c0e7a1a6
commit 3c422ddc3e
49 changed files with 814 additions and 855 deletions

View File

@ -39,19 +39,19 @@ Following methods need to be overridden in the new driver:
This method must return the type of driver. ex: ping
``def get_name(self)``
This method must return the symbolic name of the device monitor plugin.
This method must return the symbolic name of the vnf monitor plugin.
``def get_description(self)``
This method must return the description for the monitor driver.
``def monitor_get_config(self, plugin, context, device)``
``def monitor_get_config(self, plugin, context, vnf)``
This method must return dictionary of configuration data for the monitor
driver.
``def monitor_url(self, plugin, context, device)``
This method must return the url of device to monitor.
``def monitor_url(self, plugin, context, vnf)``
This method must return the url of vnf to monitor.
``def monitor_call(self, device, kwargs)``
``def monitor_call(self, vnf, kwargs)``
This method must either return boolean value 'True', if VNF is healthy.
Otherwise it should return an event string like 'failure' or
'calls-capacity-reached' based on specific VNF health condition. More

View File

@ -6,10 +6,10 @@ namespace = tacker.wsgi
namespace = tacker.service
namespace = tacker.nfvo.nfvo_plugin
namespace = tacker.nfvo.drivers.vim.openstack_driver
namespace = tacker.vm.monitor
namespace = tacker.vm.plugin
namespace = tacker.vm.vim_client
namespace = tacker.vm.infra_drivers.heat.heat
namespace = tacker.vm.mgmt_drivers.openwrt.openwrt
namespace = tacker.vm.monitor_drivers.http_ping.http_ping
namespace = tacker.vm.monitor_drivers.ping.ping
namespace = tacker.vnfm.monitor
namespace = tacker.vnfm.plugin
namespace = tacker.vnfm.vim_client
namespace = tacker.vnfm.infra_drivers.heat.heat
namespace = tacker.vnfm.mgmt_drivers.openwrt.openwrt
namespace = tacker.vnfm.monitor_drivers.http_ping.http_ping
namespace = tacker.vnfm.monitor_drivers.ping.ping

View File

@ -48,28 +48,28 @@ tacker.nfvo.vim.drivers =
tacker.openstack.common.cache.backends =
memory = tacker.openstack.common.cache._backends.memory:MemoryBackend
tacker.tacker.device.drivers =
noop = tacker.vm.infra_drivers.noop:DeviceNoop
nova = tacker.vm.infra_drivers.nova.nova:DeviceNova
heat = tacker.vm.infra_drivers.heat.heat:DeviceHeat
noop = tacker.vnfm.infra_drivers.noop:DeviceNoop
nova = tacker.vnfm.infra_drivers.nova.nova:DeviceNova
heat = tacker.vnfm.infra_drivers.heat.heat:DeviceHeat
tacker.tacker.mgmt.drivers =
noop = tacker.vm.mgmt_drivers.noop:DeviceMgmtNoop
openwrt = tacker.vm.mgmt_drivers.openwrt.openwrt:DeviceMgmtOpenWRT
noop = tacker.vnfm.mgmt_drivers.noop:DeviceMgmtNoop
openwrt = tacker.vnfm.mgmt_drivers.openwrt.openwrt:DeviceMgmtOpenWRT
tacker.tacker.monitor.drivers =
ping = tacker.vm.monitor_drivers.ping.ping:VNFMonitorPing
http_ping = tacker.vm.monitor_drivers.http_ping.http_ping:VNFMonitorHTTPPing
ping = tacker.vnfm.monitor_drivers.ping.ping:VNFMonitorPing
http_ping = tacker.vnfm.monitor_drivers.http_ping.http_ping:VNFMonitorHTTPPing
oslo.config.opts =
tacker.common.config = tacker.common.config:config_opts
tacker.wsgi = tacker.wsgi:config_opts
tacker.service = tacker.service:config_opts
tacker.nfvo.nfvo_plugin = tacker.nfvo.nfvo_plugin:config_opts
tacker.nfvo.drivers.vim.openstack_driver = tacker.nfvo.drivers.vim.openstack_driver:config_opts
tacker.vm.monitor = tacker.vm.monitor:config_opts
tacker.vm.plugin = tacker.vm.plugin:config_opts
tacker.vm.vim_client = tacker.vm.vim_client:config_opts
tacker.vm.infra_drivers.heat.heat= tacker.vm.infra_drivers.heat.heat:config_opts
tacker.vm.mgmt_drivers.openwrt.openwrt = tacker.vm.mgmt_drivers.openwrt.openwrt:config_opts
tacker.vm.monitor_drivers.http_ping.http_ping = tacker.vm.monitor_drivers.http_ping.http_ping:config_opts
tacker.vm.monitor_drivers.ping.ping = tacker.vm.monitor_drivers.ping.ping:config_opts
tacker.vnfm.monitor = tacker.vnfm.monitor:config_opts
tacker.vnfm.plugin = tacker.vm.plugin:config_opts
tacker.vnfm.vim_client = tacker.vnfm.vim_client:config_opts
tacker.vnfm.infra_drivers.heat.heat= tacker.vnfm.infra_drivers.heat.heat:config_opts
tacker.vnfm.mgmt_drivers.openwrt.openwrt = tacker.vnfm.mgmt_drivers.openwrt.openwrt:config_opts
tacker.vnfm.monitor_drivers.http_ping.http_ping = tacker.vnfm.monitor_drivers.http_ping.http_ping:config_opts
tacker.vnfm.monitor_drivers.ping.ping = tacker.vnfm.monitor_drivers.ping.ping:config_opts

View File

@ -11,7 +11,7 @@
# under the License.
from heatclient import client as heatclient
from tacker.vm import keystone
from tacker.vnfm import keystone
class OpenstackClients(object):

View File

@ -167,11 +167,11 @@ class NfvoPluginDb(nfvo.NFVOPluginBase, db_base.CommonDbMixin):
def is_vim_still_in_use(self, context, vim_id):
with context.session.begin(subtransactions=True):
devices_db = self._model_query(context, vm_db.VNF).filter_by(
vnfs_db = self._model_query(context, vm_db.VNF).filter_by(
vim_id=vim_id).first()
if devices_db is not None:
if vnfs_db is not None:
raise nfvo.VimInUseException(vim_id=vim_id)
return devices_db
return vnfs_db
def get_vim(self, context, vim_id, fields=None, mask_password=True):
vim_db = self._get_resource(context, Vim, vim_id)

View File

@ -57,9 +57,9 @@ class VNFD(model_base.BASE, models_v1.HasId, models_v1.HasTenant,
# service type that this service vm provides.
# At first phase, this includes only single service
# In future, single service VM may accomodate multiple services.
service_types = orm.relationship('ServiceType', backref='template')
service_types = orm.relationship('ServiceType', backref='vnfd')
# driver to create hosting device. e.g. noop, nova, heat, etc...
# driver to create hosting vnf. e.g. noop, nova, heat, etc...
infra_driver = sa.Column(sa.String(255))
# driver to communicate with service managment
@ -67,13 +67,13 @@ class VNFD(model_base.BASE, models_v1.HasId, models_v1.HasTenant,
# (key, value) pair to spin up
attributes = orm.relationship('VNFDAttribute',
backref='template')
backref='vnfd')
class ServiceType(model_base.BASE, models_v1.HasId, models_v1.HasTenant):
"""Represents service type which hosting device provides.
"""Represents service type which hosting vnf provides.
Since a device may provide many services, This is one-to-many
Since a vnf may provide many services, This is one-to-many
relationship.
"""
vnfd_id = sa.Column(types.Uuid, sa.ForeignKey('vnfd.id'),
@ -86,7 +86,7 @@ class VNFDAttribute(model_base.BASE, models_v1.HasId):
key value pair is adopted for being agnostic to actuall manager of VMs
like nova, heat or others. e.g. image-id, flavor-id for Nova.
The interpretation is up to actual driver of hosting device.
The interpretation is up to actual driver of hosting vnf.
"""
__tablename__ = 'vnfd_attribute'
@ -98,7 +98,7 @@ class VNFDAttribute(model_base.BASE, models_v1.HasId):
class VNF(model_base.BASE, models_v1.HasId, models_v1.HasTenant,
models_v1.Audit):
"""Represents devices that hosts services.
"""Represents vnfs that hosts services.
Here the term, 'VM', is intentionally avoided because it can be
VM or other container.
@ -111,15 +111,15 @@ class VNF(model_base.BASE, models_v1.HasId, models_v1.HasTenant,
name = sa.Column(sa.String(255), nullable=False)
description = sa.Column(sa.Text, nullable=True)
# sufficient information to uniquely identify hosting device.
# sufficient information to uniquely identify hosting vnf.
# In case of service VM, it's UUID of nova VM.
instance_id = sa.Column(sa.String(64), nullable=True)
# For a management tool to talk to manage this hosting device.
# For a management tool to talk to manage this hosting vnf.
# opaque string.
# e.g. (driver, mgmt_url) = (ssh, ip address), ...
mgmt_url = sa.Column(sa.String(255), nullable=True)
attributes = orm.relationship("VNFAttribute", backref="device")
attributes = orm.relationship("VNFAttribute", backref="vnf")
status = sa.Column(sa.String(64), nullable=False)
vim_id = sa.Column(types.Uuid, sa.ForeignKey('vims.id'), nullable=False)
@ -133,7 +133,7 @@ class VNFAttribute(model_base.BASE, models_v1.HasId):
key value pair is adopted for being agnostic to actuall manager of VMs
like nova, heat or others. e.g. image-id, flavor-id for Nova.
The interpretation is up to actual driver of hosting device.
The interpretation is up to actual driver of hosting vnf.
"""
__tablename__ = 'vnf_attribute'
@ -164,11 +164,11 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
return self._get_by_id(context, model, id)
except orm_exc.NoResultFound:
if issubclass(model, VNFD):
raise vnfm.DeviceTemplateNotFound(device_template_id=id)
raise vnfm.VNFDNotFound(vnfd_id=id)
elif issubclass(model, ServiceType):
raise vnfm.ServiceTypeNotFound(service_type_id=id)
if issubclass(model, VNF):
raise vnfm.DeviceNotFound(device_id=id)
raise vnfm.VNFNotFound(vnf_id=id)
else:
raise
@ -180,57 +180,57 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
'service_type': service_type.service_type}
for service_type in service_types]
def _make_template_dict(self, template, fields=None):
def _make_vnfd_dict(self, vnfd, fields=None):
res = {
'attributes': self._make_attributes_dict(template['attributes']),
'attributes': self._make_attributes_dict(vnfd['attributes']),
'service_types': self._make_service_types_list(
template.service_types)
vnfd.service_types)
}
key_list = ('id', 'tenant_id', 'name', 'description',
'infra_driver', 'mgmt_driver',
'created_at', 'updated_at')
res.update((key, template[key]) for key in key_list)
res.update((key, vnfd[key]) for key in key_list)
return self._fields(res, fields)
def _make_dev_attrs_dict(self, dev_attrs_db):
return dict((arg.key, arg.value) for arg in dev_attrs_db)
def _make_device_dict(self, device_db, fields=None):
LOG.debug(_('device_db %s'), device_db)
LOG.debug(_('device_db attributes %s'), device_db.attributes)
def _make_vnf_dict(self, vnf_db, fields=None):
LOG.debug(_('vnf_db %s'), vnf_db)
LOG.debug(_('vnf_db attributes %s'), vnf_db.attributes)
res = {
'device_template':
self._make_template_dict(device_db.vnfd),
'attributes': self._make_dev_attrs_dict(device_db.attributes),
'vnfd':
self._make_vnfd_dict(vnf_db.vnfd),
'attributes': self._make_dev_attrs_dict(vnf_db.attributes),
}
key_list = ('id', 'tenant_id', 'name', 'description', 'instance_id',
'vim_id', 'placement_attr', 'vnfd_id', 'status',
'mgmt_url', 'error_reason', 'created_at', 'updated_at')
res.update((key, device_db[key]) for key in key_list)
res.update((key, vnf_db[key]) for key in key_list)
return self._fields(res, fields)
@staticmethod
def _infra_driver_name(device_dict):
return device_dict['device_template']['infra_driver']
def _infra_driver_name(vnf_dict):
return vnf_dict['vnfd']['infra_driver']
@staticmethod
def _mgmt_driver_name(device_dict):
return device_dict['device_template']['mgmt_driver']
def _mgmt_driver_name(vnf_dict):
return vnf_dict['vnfd']['mgmt_driver']
@staticmethod
def _instance_id(device_dict):
return device_dict['instance_id']
def _instance_id(vnf_dict):
return vnf_dict['instance_id']
def create_device_template(self, context, device_template):
template = device_template['device_template']
LOG.debug(_('template %s'), template)
tenant_id = self._get_tenant_id_for_create(context, template)
infra_driver = template.get('infra_driver')
mgmt_driver = template.get('mgmt_driver')
service_types = template.get('service_types')
def create_vnfd(self, context, vnfd):
vnfd = vnfd['vnfd']
LOG.debug(_('vnfd %s'), vnfd)
tenant_id = self._get_tenant_id_for_create(context, vnfd)
infra_driver = vnfd.get('infra_driver')
mgmt_driver = vnfd.get('mgmt_driver')
service_types = vnfd.get('service_types')
if (not attributes.is_attr_set(infra_driver)):
LOG.debug(_('hosting device driver unspecified'))
LOG.debug(_('hosting vnf driver unspecified'))
raise vnfm.InfraDriverNotSpecified()
if (not attributes.is_attr_set(mgmt_driver)):
LOG.debug(_('mgmt driver unspecified'))
@ -240,35 +240,35 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
raise vnfm.ServiceTypesNotSpecified()
with context.session.begin(subtransactions=True):
template_id = str(uuid.uuid4())
template_db = VNFD(
id=template_id,
vnfd_id = str(uuid.uuid4())
vnfd_db = VNFD(
id=vnfd_id,
tenant_id=tenant_id,
name=template.get('name'),
description=template.get('description'),
name=vnfd.get('name'),
description=vnfd.get('description'),
infra_driver=infra_driver,
mgmt_driver=mgmt_driver)
context.session.add(template_db)
for (key, value) in template.get('attributes', {}).items():
context.session.add(vnfd_db)
for (key, value) in vnfd.get('attributes', {}).items():
attribute_db = VNFDAttribute(
id=str(uuid.uuid4()),
vnfd_id=template_id,
vnfd_id=vnfd_id,
key=key,
value=value)
context.session.add(attribute_db)
for service_type in (item['service_type']
for item in template['service_types']):
for item in vnfd['service_types']):
service_type_db = ServiceType(
id=str(uuid.uuid4()),
tenant_id=tenant_id,
vnfd_id=template_id,
vnfd_id=vnfd_id,
service_type=service_type)
context.session.add(service_type_db)
LOG.debug(_('template_db %(template_db)s %(attributes)s '),
{'template_db': template_db,
'attributes': template_db.attributes})
vnfd_dict = self._make_template_dict(template_db)
LOG.debug(_('vnfd_db %(vnfd_db)s %(attributes)s '),
{'vnfd_db': vnfd_db,
'attributes': vnfd_db.attributes})
vnfd_dict = self._make_vnfd_dict(vnfd_db)
LOG.debug(_('vnfd_dict %s'), vnfd_dict)
self._cos_db_plg.create_event(
context, res_id=vnfd_dict['id'],
@ -278,14 +278,14 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
tstamp=vnfd_dict[constants.RES_EVT_CREATED_FLD])
return vnfd_dict
def update_device_template(self, context, device_template_id,
device_template):
def update_vnfd(self, context, vnfd_id,
vnfd):
with context.session.begin(subtransactions=True):
template_db = self._get_resource(context, VNFD,
device_template_id)
template_db.update(device_template['device_template'])
template_db.update({'updated_at': timeutils.utcnow()})
vnfd_dict = self._make_template_dict(template_db)
vnfd_db = self._get_resource(context, VNFD,
vnfd_id)
vnfd_db.update(vnfd['vnfd'])
vnfd_db.update({'updated_at': timeutils.utcnow()})
vnfd_dict = self._make_vnfd_dict(vnfd_db)
self._cos_db_plg.create_event(
context, res_id=vnfd_dict['id'],
res_type=constants.RES_TYPE_VNFD,
@ -294,48 +294,47 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
tstamp=vnfd_dict[constants.RES_EVT_UPDATED_FLD])
return vnfd_dict
def delete_device_template(self,
context,
device_template_id,
soft_delete=True):
def delete_vnfd(self,
context,
vnfd_id,
soft_delete=True):
with context.session.begin(subtransactions=True):
# TODO(yamahata): race. prevent from newly inserting hosting device
# that refers to this template
devices_db = context.session.query(VNF).filter_by(
vnfd_id=device_template_id).first()
if devices_db is not None and devices_db.deleted_at is None:
raise vnfm.DeviceTemplateInUse(
device_template_id=device_template_id)
# TODO(yamahata): race. prevent from newly inserting hosting vnf
# that refers to this vnfd
vnfs_db = context.session.query(VNF).filter_by(
vnfd_id=vnfd_id).first()
if vnfs_db is not None and vnfs_db.deleted_at is None:
raise vnfm.VNFDInUse(
vnfd_id=vnfd_id)
template_db = self._get_resource(context, VNFD,
device_template_id)
vnfd_db = self._get_resource(context, VNFD,
vnfd_id)
if soft_delete:
template_db.update({'deleted_at': timeutils.utcnow()})
vnfd_db.update({'deleted_at': timeutils.utcnow()})
self._cos_db_plg.create_event(
context, res_id=template_db['id'],
context, res_id=vnfd_db['id'],
res_type=constants.RES_TYPE_VNFD,
res_state=constants.RES_EVT_VNFD_NA_STATE,
evt_type=constants.RES_EVT_DELETE,
tstamp=template_db[constants.RES_EVT_DELETED_FLD])
tstamp=vnfd_db[constants.RES_EVT_DELETED_FLD])
else:
context.session.query(ServiceType).filter_by(
vnfd_id=device_template_id).delete()
vnfd_id=vnfd_id).delete()
context.session.query(VNFDAttribute).filter_by(
vnfd_id=device_template_id).delete()
context.session.delete(template_db)
vnfd_id=vnfd_id).delete()
context.session.delete(vnfd_db)
def get_device_template(self, context, device_template_id, fields=None):
template_db = self._get_resource(context, VNFD,
device_template_id)
return self._make_template_dict(template_db)
def get_vnfd(self, context, vnfd_id, fields=None):
vnfd_db = self._get_resource(context, VNFD, vnfd_id)
return self._make_vnfd_dict(vnfd_db)
def get_device_templates(self, context, filters, fields=None):
def get_vnfds(self, context, filters, fields=None):
return self._get_collection(context, VNFD,
self._make_template_dict,
self._make_vnfd_dict,
filters=filters, fields=fields)
def choose_device_template(self, context, service_type,
required_attributes=None):
def choose_vnfd(self, context, service_type,
required_attributes=None):
required_attributes = required_attributes or []
LOG.debug(_('required_attributes %s'), required_attributes)
with context.session.begin(subtransactions=True):
@ -354,115 +353,115 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
VNFDAttribute.vnfd_id,
VNFDAttribute.key == key)))
LOG.debug(_('statements %s'), query)
template_db = query.first()
if template_db:
return self._make_template_dict(template_db)
vnfd_db = query.first()
if vnfd_db:
return self._make_vnfd_dict(vnfd_db)
def _device_attribute_update_or_create(
self, context, device_id, key, value):
def _vnf_attribute_update_or_create(
self, context, vnf_id, key, value):
arg = (self._model_query(context, VNFAttribute).
filter(VNFAttribute.vnf_id == device_id).
filter(VNFAttribute.vnf_id == vnf_id).
filter(VNFAttribute.key == key).first())
if arg:
arg.value = value
else:
arg = VNFAttribute(
id=str(uuid.uuid4()), vnf_id=device_id,
id=str(uuid.uuid4()), vnf_id=vnf_id,
key=key, value=value)
context.session.add(arg)
# called internally, not by REST API
def _create_device_pre(self, context, device):
LOG.debug(_('device %s'), device)
tenant_id = self._get_tenant_id_for_create(context, device)
template_id = device['template_id']
name = device.get('name')
device_id = str(uuid.uuid4())
attributes = device.get('attributes', {})
vim_id = device.get('vim_id')
placement_attr = device.get('placement_attr', {})
def _create_vnf_pre(self, context, vnf):
LOG.debug(_('vnf %s'), vnf)
tenant_id = self._get_tenant_id_for_create(context, vnf)
vnfd_id = vnf['vnfd_id']
name = vnf.get('name')
vnf_id = str(uuid.uuid4())
attributes = vnf.get('attributes', {})
vim_id = vnf.get('vim_id')
placement_attr = vnf.get('placement_attr', {})
with context.session.begin(subtransactions=True):
template_db = self._get_resource(context, VNFD,
template_id)
device_db = VNF(id=device_id,
tenant_id=tenant_id,
name=name,
description=template_db.description,
instance_id=None,
vnfd_id=template_id,
vim_id=vim_id,
placement_attr=placement_attr,
status=constants.PENDING_CREATE,
error_reason=None)
context.session.add(device_db)
vnfd_db = self._get_resource(context, VNFD,
vnfd_id)
vnf_db = VNF(id=vnf_id,
tenant_id=tenant_id,
name=name,
description=vnfd_db.description,
instance_id=None,
vnfd_id=vnfd_id,
vim_id=vim_id,
placement_attr=placement_attr,
status=constants.PENDING_CREATE,
error_reason=None)
context.session.add(vnf_db)
for key, value in attributes.items():
arg = VNFAttribute(
id=str(uuid.uuid4()), vnf_id=device_id,
id=str(uuid.uuid4()), vnf_id=vnf_id,
key=key, value=value)
context.session.add(arg)
self._cos_db_plg.create_event(
context, res_id=device_id,
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=constants.PENDING_CREATE,
evt_type=constants.RES_EVT_CREATE,
tstamp=timeutils.utcnow(),
details="VNF UUID assigned")
return self._make_device_dict(device_db)
return self._make_vnf_dict(vnf_db)
# called internally, not by REST API
# intsance_id = None means error on creation
def _create_device_post(self, context, device_id, instance_id,
mgmt_url, device_dict):
LOG.debug(_('device_dict %s'), device_dict)
def _create_vnf_post(self, context, vnf_id, instance_id,
mgmt_url, vnf_dict):
LOG.debug(_('vnf_dict %s'), vnf_dict)
with context.session.begin(subtransactions=True):
query = (self._model_query(context, VNF).
filter(VNF.id == device_id).
filter(VNF.id == vnf_id).
filter(VNF.status.in_(CREATE_STATES)).
one())
query.update({'instance_id': instance_id, 'mgmt_url': mgmt_url})
if instance_id is None or device_dict['status'] == constants.ERROR:
if instance_id is None or vnf_dict['status'] == constants.ERROR:
query.update({'status': constants.ERROR})
for (key, value) in device_dict['attributes'].items():
# do not store decrypted vim auth in device attr table
for (key, value) in vnf_dict['attributes'].items():
# do not store decrypted vim auth in vnf attr table
if 'vim_auth' not in key:
self._device_attribute_update_or_create(context, device_id,
key, value)
self._vnf_attribute_update_or_create(context, vnf_id,
key, value)
evt_details = ("Infra Instance ID created: %s and "
"Mgmt URL set: %s") % (instance_id, mgmt_url)
self._cos_db_plg.create_event(
context, res_id=device_dict['id'],
context, res_id=vnf_dict['id'],
res_type=constants.RES_TYPE_VNF,
res_state=device_dict['status'],
res_state=vnf_dict['status'],
evt_type=constants.RES_EVT_CREATE,
tstamp=timeutils.utcnow(), details=evt_details)
def _create_device_status(self, context, device_id, new_status):
def _create_vnf_status(self, context, vnf_id, new_status):
with context.session.begin(subtransactions=True):
query = (self._model_query(context, VNF).
filter(VNF.id == device_id).
filter(VNF.id == vnf_id).
filter(VNF.status.in_(CREATE_STATES)).one())
query.update({'status': new_status})
self._cos_db_plg.create_event(
context, res_id=device_id,
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=new_status,
evt_type=constants.RES_EVT_CREATE,
tstamp=timeutils.utcnow(), details="VNF status updated")
def _get_device_db(self, context, device_id, current_statuses, new_status):
def _get_vnf_db(self, context, vnf_id, current_statuses, new_status):
try:
device_db = (
vnf_db = (
self._model_query(context, VNF).
filter(VNF.id == device_id).
filter(VNF.id == vnf_id).
filter(VNF.status.in_(current_statuses)).
with_lockmode('update').one())
except orm_exc.NoResultFound:
raise vnfm.DeviceNotFound(device_id=device_id)
if device_db.status == constants.PENDING_UPDATE:
raise vnfm.DeviceInUse(device_id=device_id)
device_db.update({'status': new_status})
return device_db
raise vnfm.VNFNotFound(vnf_id=vnf_id)
if vnf_db.status == constants.PENDING_UPDATE:
raise vnfm.VNFInUse(vnf_id=vnf_id)
vnf_db.update({'status': new_status})
return vnf_db
def _update_vnf_scaling_status(self,
context,
@ -471,75 +470,75 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
status,
mgmt_url=None):
with context.session.begin(subtransactions=True):
device_db = self._get_device_db(
vnf_db = self._get_vnf_db(
context, policy['vnf']['id'], previous_statuses, status)
if mgmt_url:
device_db.update({'mgmt_url': mgmt_url})
return self._make_device_dict(device_db)
vnf_db.update({'mgmt_url': mgmt_url})
return self._make_vnf_dict(vnf_db)
def _update_device_pre(self, context, device_id):
def _update_vnf_pre(self, context, vnf_id):
with context.session.begin(subtransactions=True):
device_db = self._get_device_db(
context, device_id, _ACTIVE_UPDATE, constants.PENDING_UPDATE)
updated_device_dict = self._make_device_dict(device_db)
vnf_db = self._get_vnf_db(
context, vnf_id, _ACTIVE_UPDATE, constants.PENDING_UPDATE)
updated_vnf_dict = self._make_vnf_dict(vnf_db)
self._cos_db_plg.create_event(
context, res_id=device_id,
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=updated_device_dict['status'],
res_state=updated_vnf_dict['status'],
evt_type=constants.RES_EVT_UPDATE,
tstamp=timeutils.utcnow())
return updated_device_dict
return updated_vnf_dict
def _update_device_post(self, context, device_id, new_status,
new_device_dict=None):
def _update_vnf_post(self, context, vnf_id, new_status,
new_vnf_dict=None):
with context.session.begin(subtransactions=True):
(self._model_query(context, VNF).
filter(VNF.id == device_id).
filter(VNF.id == vnf_id).
filter(VNF.status == constants.PENDING_UPDATE).
update({'status': new_status,
'updated_at': timeutils.utcnow()}))
dev_attrs = new_device_dict.get('attributes', {})
dev_attrs = new_vnf_dict.get('attributes', {})
(context.session.query(VNFAttribute).
filter(VNFAttribute.vnf_id == device_id).
filter(VNFAttribute.vnf_id == vnf_id).
filter(~VNFAttribute.key.in_(dev_attrs.keys())).
delete(synchronize_session='fetch'))
for (key, value) in dev_attrs.items():
if 'vim_auth' not in key:
self._device_attribute_update_or_create(context, device_id,
key, value)
self._vnf_attribute_update_or_create(context, vnf_id,
key, value)
self._cos_db_plg.create_event(
context, res_id=device_id,
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=new_device_dict['status'],
res_state=new_vnf_dict['status'],
evt_type=constants.RES_EVT_UPDATE,
tstamp=new_device_dict[constants.RES_EVT_UPDATED_FLD])
tstamp=new_vnf_dict[constants.RES_EVT_UPDATED_FLD])
def _delete_device_pre(self, context, device_id):
def _delete_vnf_pre(self, context, vnf_id):
with context.session.begin(subtransactions=True):
device_db = self._get_device_db(
context, device_id, _ACTIVE_UPDATE_ERROR_DEAD,
vnf_db = self._get_vnf_db(
context, vnf_id, _ACTIVE_UPDATE_ERROR_DEAD,
constants.PENDING_DELETE)
deleted_device_db = self._make_device_dict(device_db)
deleted_vnf_db = self._make_vnf_dict(vnf_db)
self._cos_db_plg.create_event(
context, res_id=device_id,
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=deleted_device_db['status'],
res_state=deleted_vnf_db['status'],
evt_type=constants.RES_EVT_DELETE,
tstamp=timeutils.utcnow(), details="VNF delete initiated")
return deleted_device_db
return deleted_vnf_db
def _delete_device_post(self, context, device_id, error, soft_delete=True):
def _delete_vnf_post(self, context, vnf_id, error, soft_delete=True):
with context.session.begin(subtransactions=True):
query = (
self._model_query(context, VNF).
filter(VNF.id == device_id).
filter(VNF.id == vnf_id).
filter(VNF.status == constants.PENDING_DELETE))
if error:
query.update({'status': constants.ERROR})
self._cos_db_plg.create_event(
context, res_id=device_id,
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=constants.ERROR,
evt_type=constants.RES_EVT_DELETE,
@ -550,7 +549,7 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
deleted_time_stamp = timeutils.utcnow()
query.update({'deleted_at': deleted_time_stamp})
self._cos_db_plg.create_event(
context, res_id=device_id,
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=constants.PENDING_DELETE,
evt_type=constants.RES_EVT_DELETE,
@ -558,78 +557,78 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
details="VNF Delete Complete")
else:
(self._model_query(context, VNFAttribute).
filter(VNFAttribute.vnf_id == device_id).delete())
filter(VNFAttribute.vnf_id == vnf_id).delete())
query.delete()
# reference implementation. needs to be overrided by subclass
def create_device(self, context, device):
device_dict = self._create_device_pre(context, device)
# start actual creation of hosting device.
def create_vnf(self, context, vnf):
vnf_dict = self._create_vnf_pre(context, vnf)
# start actual creation of hosting vnf.
# Waiting for completion of creation should be done backgroundly
# by another thread if it takes a while.
instance_id = str(uuid.uuid4())
device_dict['instance_id'] = instance_id
self._create_device_post(context, device_dict['id'], instance_id, None,
device_dict)
self._create_device_status(context, device_dict['id'],
constants.ACTIVE)
return device_dict
vnf_dict['instance_id'] = instance_id
self._create_vnf_post(context, vnf_dict['id'], instance_id, None,
vnf_dict)
self._create_vnf_status(context, vnf_dict['id'],
constants.ACTIVE)
return vnf_dict
# reference implementation. needs to be overrided by subclass
def update_device(self, context, device_id, device):
device_dict = self._update_device_pre(context, device_id)
# start actual update of hosting device
def update_vnf(self, context, vnf_id, vnf):
vnf_dict = self._update_vnf_pre(context, vnf_id)
# start actual update of hosting vnf
# waiting for completion of update should be done backgroundly
# by another thread if it takes a while
self._update_device_post(context, device_id, constants.ACTIVE)
return device_dict
self._update_vnf_post(context, vnf_id, constants.ACTIVE)
return vnf_dict
# reference implementation. needs to be overrided by subclass
def delete_device(self, context, device_id, soft_delete=True):
self._delete_device_pre(context, device_id)
# start actual deletion of hosting device.
def delete_vnf(self, context, vnf_id, soft_delete=True):
self._delete_vnf_pre(context, vnf_id)
# start actual deletion of hosting vnf.
# Waiting for completion of deletion should be done backgroundly
# by another thread if it takes a while.
self._delete_device_post(context,
device_id,
False,
soft_delete=soft_delete)
self._delete_vnf_post(context,
vnf_id,
False,
soft_delete=soft_delete)
def get_device(self, context, device_id, fields=None):
device_db = self._get_resource(context, VNF, device_id)
return self._make_device_dict(device_db, fields)
def get_vnf(self, context, vnf_id, fields=None):
vnf_db = self._get_resource(context, VNF, vnf_id)
return self._make_vnf_dict(vnf_db, fields)
def get_devices(self, context, filters=None, fields=None):
return self._get_collection(context, VNF, self._make_device_dict,
def get_vnfs(self, context, filters=None, fields=None):
return self._get_collection(context, VNF, self._make_vnf_dict,
filters=filters, fields=fields)
def set_device_error_status_reason(self, context, device_id, new_reason):
def set_vnf_error_status_reason(self, context, vnf_id, new_reason):
with context.session.begin(subtransactions=True):
(self._model_query(context, VNF).
filter(VNF.id == device_id).
filter(VNF.id == vnf_id).
update({'error_reason': new_reason}))
def _mark_device_status(self, device_id, exclude_status, new_status):
def _mark_vnf_status(self, vnf_id, exclude_status, new_status):
context = t_context.get_admin_context()
with context.session.begin(subtransactions=True):
try:
device_db = (
vnf_db = (
self._model_query(context, VNF).
filter(VNF.id == device_id).
filter(VNF.id == vnf_id).
filter(~VNF.status.in_(exclude_status)).
with_lockmode('update').one())
except orm_exc.NoResultFound:
LOG.warning(_('no device found %s'), device_id)
LOG.warning(_('no vnf found %s'), vnf_id)
return False
device_db.update({'status': new_status})
vnf_db.update({'status': new_status})
return True
def _mark_device_error(self, device_id):
return self._mark_device_status(
device_id, [constants.DEAD], constants.ERROR)
def _mark_vnf_error(self, vnf_id):
return self._mark_vnf_status(
vnf_id, [constants.DEAD], constants.ERROR)
def _mark_device_dead(self, device_id):
def _mark_vnf_dead(self, vnf_id):
exclude_status = [
constants.DOWN,
constants.PENDING_CREATE,
@ -637,20 +636,5 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
constants.PENDING_DELETE,
constants.INACTIVE,
constants.ERROR]
return self._mark_device_status(
device_id, exclude_status, constants.DEAD)
def get_vnfs(self, context, filters=None, fields=None):
return self.get_devices(context, filters, fields)
def get_vnf(self, context, vnf_id, fields=None):
return self.get_device(context, vnf_id, fields)
def delete_vnfd(self, context, vnfd_id):
self.delete_device_template(context, vnfd_id)
def get_vnfd(self, context, vnfd_id, fields=None):
return self.get_device_template(context, vnfd_id, fields)
def get_vnfds(self, context, filters=None, fields=None):
return self.get_device_templates(context, filters, fields)
return self._mark_vnf_status(
vnf_id, exclude_status, constants.DEAD)

View File

@ -40,19 +40,19 @@ class MGMTDriverNotSpecified(exceptions.InvalidInput):
class MultipleMGMTDriversSpecified(exceptions.InvalidInput):
message = _('More than one MGMT Driver per template is not supported')
message = _('More than one MGMT Driver per vnfd is not supported')
class ServiceTypesNotSpecified(exceptions.InvalidInput):
message = _('service types are not specified')
class DeviceTemplateInUse(exceptions.InUse):
message = _('VNFD %(device_template_id)s is still in use')
class VNFDInUse(exceptions.InUse):
message = _('VNFD %(vnfd_id)s is still in use')
class DeviceInUse(exceptions.InUse):
message = _('VNF %(device_id)s is still in use')
class VNFInUse(exceptions.InUse):
message = _('VNF %(vnf_id)s is still in use')
class InvalidInfraDriver(exceptions.InvalidInput):
@ -63,28 +63,28 @@ class InvalidServiceType(exceptions.InvalidInput):
message = _('invalid service type %(service_type)s')
class DeviceCreateFailed(exceptions.TackerException):
message = _('creating VNF based on %(device_template_id)s failed')
class VNFCreateFailed(exceptions.TackerException):
message = _('creating VNF based on %(vnfd_id)s failed')
class DeviceCreateWaitFailed(exceptions.TackerException):
class VNFCreateWaitFailed(exceptions.TackerException):
message = _('%(reason)s')
class DeviceDeleteFailed(exceptions.TackerException):
message = _('deleting VNF %(device_id)s failed')
class VNFDeleteFailed(exceptions.TackerException):
message = _('deleting VNF %(vnf_id)s failed')
class DeviceTemplateNotFound(exceptions.NotFound):
message = _('VNFD template %(device_template_id)s could not be found')
class VNFDNotFound(exceptions.NotFound):
message = _('VNFD %(vnfd_id)s could not be found')
class ServiceTypeNotFound(exceptions.NotFound):
message = _('service type %(service_type_id)s could not be found')
class DeviceNotFound(exceptions.NotFound):
message = _('VNF %(device_id)s could not be found')
class VNFNotFound(exceptions.NotFound):
message = _('VNF %(vnf_id)s could not be found')
class ParamYAMLNotWellFormed(exceptions.InvalidInput):

View File

@ -25,7 +25,7 @@ from tacker.agent.linux import utils as linux_utils
from tacker.common import log
from tacker.extensions import nfvo
from tacker.nfvo.drivers.vim import abstract_vim_driver
from tacker.vm import keystone
from tacker.vnfm import keystone
LOG = logging.getLogger(__name__)

View File

@ -19,7 +19,7 @@ from tacker.common import utils
from tacker.tests import constants
from tacker.tests.functional import base
from tacker.tests.utils import read_file
from tacker.vm.tosca import utils as toscautils
from tacker.vnfm.tosca import utils as toscautils
CONF = cfg.CONF

View File

@ -61,8 +61,9 @@ def get_dummy_vnf_config_obj():
def get_dummy_device_obj():
return {'status': 'PENDING_CREATE', 'instance_id': None, 'name':
u'test_openwrt', 'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437',
'template_id': u'eb094833-995e-49f0-a047-dfb56aaf7c4e',
'device_template': {'service_types': [{'service_type': u'vnfd',
'vnfd_id': u'eb094833-995e-49f0-a047-dfb56aaf7c4e',
'vnfd': {
'service_types': [{'service_type': u'vnfd',
'id': u'4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}],
'description': u'OpenWRT with services',
'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437',
@ -80,8 +81,8 @@ def get_dummy_device_obj():
def get_dummy_device_obj_config_attr():
return {'status': 'PENDING_CREATE', 'instance_id': None, 'name':
u'test_openwrt', 'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437',
'template_id': u'eb094833-995e-49f0-a047-dfb56aaf7c4e',
'device_template': {'service_types': [{'service_type': u'vnfd',
'vnfd_id': u'eb094833-995e-49f0-a047-dfb56aaf7c4e',
'vnfd': {'service_types': [{'service_type': u'vnfd',
'id': u'4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}],
'description': u'OpenWRT with services',
'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437',
@ -96,22 +97,22 @@ def get_dummy_device_obj_config_attr():
def get_dummy_device_update_config_attr():
return {'device': {u'attributes': {u'config': u"vdus:\n vdu1:\n "
u"config:\n firewall: |"
u"\n package firewall"
u"\n\n config default"
u"s\n "
u"option syn_flood '10'\n "
u" option input "
u"'REJECT'\n "
u"option output 'REJECT'\n "
u" option "
u"forward 'REJECT'\n"}}}
return {'vnf': {u'attributes': {u'config': u"vdus:\n vdu1:\n "
u"config:\n firewall: |"
u"\n package firewall"
u"\n\n config default"
u"s\n "
u"option syn_flood '10'\n "
u" option input "
u"'REJECT'\n "
u"option output 'REJECT'\n "
u" option "
u"forward 'REJECT'\n"}}}
def get_dummy_device_obj_ipaddr_attr():
return {'status': 'PENDING_CREATE',
'device_template': {'service_types':
'vnfd': {'service_types':
[{'service_type': u'vnfd', 'id':
u'16f8b3f7-a9ff-4338-bbe5-eee48692c468'}, {'service_type':
u'router', 'id': u'58878cb7-689f-47a5-9c2d-654e49e2357f'},
@ -129,7 +130,7 @@ def get_dummy_device_obj_ipaddr_attr():
'instance_id': None, 'mgmt_url': None, 'service_context': [],
'services': [],
'attributes': {u'param_values': ipparams},
'template_id': u'24c31ea1-2e28-4de2-a6cb-8d389a502c75',
'vnfd_id': u'24c31ea1-2e28-4de2-a6cb-8d389a502c75',
'description': u'Parameterized VNF descriptor for IP addresses'}
@ -137,8 +138,8 @@ def get_dummy_device_obj_userdata_attr():
return {'status': 'PENDING_CREATE', 'instance_id': None,
'name': u'test_userdata',
'tenant_id': u'8273659b56fc46b68bd05856d1f08d14',
'template_id': u'206e343f-c580-4494-a739-525849edab7f',
'device_template': {'service_types': [{'service_type': u'firewall',
'vnfd_id': u'206e343f-c580-4494-a739-525849edab7f',
'vnfd': {'service_types': [{'service_type': u'firewall',
'id': u'1fcc2d7c-a6b6-4263-8cac-9590f059a555'}, {'service_type':
u'router', 'id': u'8c99106d-826f-46eb-91a1-08dfdc78c04b'},
{'service_type': u'vnfd', 'id':

View File

@ -22,7 +22,7 @@ import yaml
from tacker import context
from tacker.tests.unit import base
from tacker.tests.unit.db import utils
from tacker.vm.infra_drivers.heat import heat
from tacker.vnfm.infra_drivers.heat import heat
class FakeHeatClient(mock.Mock):
@ -65,45 +65,45 @@ class TestDeviceHeat(base.TestCase):
fake_heat_client = mock.Mock()
fake_heat_client.return_value = self.heat_client
self._mock(
'tacker.vm.infra_drivers.heat.heat.HeatClient', fake_heat_client)
'tacker.vnfm.infra_drivers.heat.heat.HeatClient', fake_heat_client)
def _mock(self, target, new=mock.DEFAULT):
patcher = mock.patch(target, new)
return patcher.start()
def _get_device_template(self, template):
return {'device_template': {'attributes': {'vnfd': template}}}
def _get_vnfd(self, template):
return {'vnfd': {'attributes': {'vnfd': template}}}
def _get_expected_device_template(self, template):
return {'device_template': {'attributes': {'vnfd': template},
'description': 'OpenWRT with services',
'mgmt_driver': 'openwrt',
'name': 'OpenWRT'}}
def _get_expected_vnfd(self, template):
return {'vnfd': {'attributes': {'vnfd': template},
'description': 'OpenWRT with services',
'mgmt_driver': 'openwrt',
'name': 'OpenWRT'}}
def _get_expected_fields(self):
return {'stack_name':
'tacker.vm.infra_drivers.heat.heat_DeviceHeat-eb84260e'
'tacker.vnfm.infra_drivers.heat.heat_DeviceHeat-eb84260e'
'-5ff7-4332-b032-50a14d6c1123', 'template': self.hot_template}
def _get_expected_fields_user_data(self):
return {'stack_name':
'tacker.vm.infra_drivers.heat.heat_DeviceHeat-18685f68'
'tacker.vnfm.infra_drivers.heat.heat_DeviceHeat-18685f68'
'-2b2a-4185-8566-74f54e548811',
'template': self.hot_param_template}
def _get_expected_fields_ipaddr_data(self):
return {'stack_name':
'tacker.vm.infra_drivers.heat.heat_DeviceHeat-d1337add'
'tacker.vnfm.infra_drivers.heat.heat_DeviceHeat-d1337add'
'-d5a1-4fd4-9447-bb9243c8460b',
'template': self.hot_ipparam_template}
def _get_expected_device_wait_obj(self, param_values=''):
def _get_expected_vnf_wait_obj(self, param_values=''):
return {'status': 'PENDING_CREATE',
'instance_id': None,
'name': u'test_openwrt',
'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437',
'template_id': u'eb094833-995e-49f0-a047-dfb56aaf7c4e',
'device_template': {
'vnfd_id': u'eb094833-995e-49f0-a047-dfb56aaf7c4e',
'vnfd': {
'service_types': [{
'service_type': u'vnfd',
'id': u'4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}],
@ -120,11 +120,11 @@ class TestDeviceHeat(base.TestCase):
'id': 'eb84260e-5ff7-4332-b032-50a14d6c1123',
'description': u'OpenWRT with services'}
def _get_expected_device_update_obj(self):
def _get_expected_vnf_update_obj(self):
return {'status': 'PENDING_CREATE', 'instance_id': None, 'name':
u'test_openwrt', 'tenant_id':
u'ad7ebc56538745a08ef7c5e97f8bd437', 'template_id':
u'eb094833-995e-49f0-a047-dfb56aaf7c4e', 'device_template': {
u'ad7ebc56538745a08ef7c5e97f8bd437', 'vnfd_id':
u'eb094833-995e-49f0-a047-dfb56aaf7c4e', 'vnfd': {
'service_types': [{'service_type': u'vnfd', 'id':
u'4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}], 'description':
u'OpenWRT with services', 'tenant_id':
@ -144,87 +144,87 @@ class TestDeviceHeat(base.TestCase):
u'OpenWRT with services'}
def test_create(self):
device_obj = utils.get_dummy_device_obj()
vnf_obj = utils.get_dummy_device_obj()
expected_result = '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'
expected_fields = self._get_expected_fields()
result = self.heat_driver.create(plugin=None, context=self.context,
device=device_obj,
vnf=vnf_obj,
auth_attr=utils.get_vim_auth_obj())
self.heat_client.create.assert_called_once_with(expected_fields)
self.assertEqual(expected_result, result)
def test_create_user_data_param_attr(self):
device_obj = utils.get_dummy_device_obj_userdata_attr()
vnf_obj = utils.get_dummy_device_obj_userdata_attr()
expected_result = '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'
expected_fields = self._get_expected_fields_user_data()
result = self.heat_driver.create(plugin=None, context=self.context,
device=device_obj,
vnf=vnf_obj,
auth_attr=utils.get_vim_auth_obj())
self.heat_client.create.assert_called_once_with(expected_fields)
self.assertEqual(expected_result, result)
def test_create_ip_addr_param_attr(self):
device_obj = utils.get_dummy_device_obj_ipaddr_attr()
vnf_obj = utils.get_dummy_device_obj_ipaddr_attr()
expected_result = '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'
expected_fields = self._get_expected_fields_ipaddr_data()
result = self.heat_driver.create(plugin=None, context=self.context,
device=device_obj,
vnf=vnf_obj,
auth_attr=utils.get_vim_auth_obj())
self.heat_client.create.assert_called_once_with(expected_fields)
self.assertEqual(expected_result, result)
def test_create_wait(self):
device_obj = utils.get_dummy_device_obj()
expected_result = self._get_expected_device_wait_obj()
device_id = '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'
vnf_obj = utils.get_dummy_device_obj()
expected_result = self._get_expected_vnf_wait_obj()
vnf_id = '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'
self.heat_driver.create_wait(plugin=None,
context=self.context,
device_dict=device_obj,
device_id=device_id,
vnf_dict=vnf_obj,
vnf_id=vnf_id,
auth_attr=utils.get_vim_auth_obj())
self.assertEqual(expected_result, device_obj)
self.assertEqual(expected_result, vnf_obj)
def test_delete(self):
device_id = '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'
vnf_id = '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'
self.heat_driver.delete(plugin=None, context=self.context,
device_id=device_id,
vnf_id=vnf_id,
auth_attr=utils.get_vim_auth_obj())
self.heat_client.delete.assert_called_once_with(device_id)
self.heat_client.delete.assert_called_once_with(vnf_id)
def test_update(self):
device_obj = utils.get_dummy_device_obj_config_attr()
device_config_obj = utils.get_dummy_device_update_config_attr()
expected_device_update = self._get_expected_device_update_obj()
device_id = '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'
vnf_obj = utils.get_dummy_device_obj_config_attr()
vnf_config_obj = utils.get_dummy_device_update_config_attr()
expected_vnf_update = self._get_expected_vnf_update_obj()
vnf_id = '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'
self.heat_driver.update(plugin=None, context=self.context,
device_id=device_id, device_dict=device_obj,
device=device_config_obj,
vnf_id=vnf_id, vnf_dict=vnf_obj,
vnf=vnf_config_obj,
auth_attr=utils.get_vim_auth_obj())
self.assertEqual(expected_device_update, device_obj)
self.assertEqual(expected_vnf_update, vnf_obj)
def test_create_device_template_pre_tosca(self):
def test_create_vnfd_pre_tosca(self):
tosca_tpl = _get_template('test_tosca_openwrt.yaml')
dtemplate = self._get_device_template(tosca_tpl)
exp_tmpl = self._get_expected_device_template(tosca_tpl)
self.heat_driver.create_device_template_pre(None, None, dtemplate)
dtemplate = self._get_vnfd(tosca_tpl)
exp_tmpl = self._get_expected_vnfd(tosca_tpl)
self.heat_driver.create_vnfd_pre(None, None, dtemplate)
self.assertEqual(exp_tmpl, dtemplate)
def _get_expected_fields_tosca(self, template):
return {'stack_name':
'tacker.vm.infra_drivers.heat.heat_DeviceHeat-eb84260e'
'tacker.vnfm.infra_drivers.heat.heat_DeviceHeat-eb84260e'
'-5ff7-4332-b032-50a14d6c1123',
'template': _get_template(template)}
def _get_expected_tosca_device(self,
tosca_tpl_name,
hot_tpl_name,
param_values='',
is_monitor=True):
def _get_expected_tosca_vnf(self,
tosca_tpl_name,
hot_tpl_name,
param_values='',
is_monitor=True):
tosca_tpl = _get_template(tosca_tpl_name)
exp_tmpl = self._get_expected_device_template(tosca_tpl)
exp_tmpl = self._get_expected_vnfd(tosca_tpl)
tosca_hw_dict = yaml.safe_load(_get_template(hot_tpl_name))
dvc = {
'device_template': exp_tmpl['device_template'],
'vnfd': exp_tmpl['vnfd'],
'description': u'OpenWRT with services',
'attributes': {
'heat_template': tosca_hw_dict,
@ -236,7 +236,7 @@ class TestDeviceHeat(base.TestCase):
'name': u'test_openwrt',
'service_context': [],
'status': 'PENDING_CREATE',
'template_id': u'eb094833-995e-49f0-a047-dfb56aaf7c4e',
'vnfd_id': u'eb094833-995e-49f0-a047-dfb56aaf7c4e',
'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437'
}
# Add montitoring attributes for those yaml, which are having it
@ -250,18 +250,18 @@ class TestDeviceHeat(base.TestCase):
return dvc
def _get_dummy_tosca_device(self, template, input_params=''):
def _get_dummy_tosca_vnf(self, template, input_params=''):
tosca_template = _get_template(template)
device = utils.get_dummy_device_obj()
dtemplate = self._get_expected_device_template(tosca_template)
vnf = utils.get_dummy_device_obj()
dtemplate = self._get_expected_vnfd(tosca_template)
dtemplate['service_types'] = [{'service_type': 'vnfd', 'id':
'4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}]
dtemplate['tenant_id'] = 'ad7ebc56538745a08ef7c5e97f8bd437'
device['device_template'] = dtemplate['device_template']
device['attributes'] = {}
device['attributes']['param_values'] = input_params
return device
vnf['vnfd'] = dtemplate['vnfd']
vnf['attributes'] = {}
vnf['attributes']['param_values'] = input_params
return vnf
def _test_assert_equal_for_tosca_templates(self,
tosca_tpl_name,
@ -269,15 +269,15 @@ class TestDeviceHeat(base.TestCase):
input_params='',
files=None,
is_monitor=True):
device = self._get_dummy_tosca_device(tosca_tpl_name, input_params)
vnf = self._get_dummy_tosca_vnf(tosca_tpl_name, input_params)
expected_result = '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'
expected_fields = self._get_expected_fields_tosca(hot_tpl_name)
expected_device = self._get_expected_tosca_device(tosca_tpl_name,
hot_tpl_name,
input_params,
is_monitor)
expected_vnf = self._get_expected_tosca_vnf(tosca_tpl_name,
hot_tpl_name,
input_params,
is_monitor)
result = self.heat_driver.create(plugin=None, context=self.context,
device=device,
vnf=vnf,
auth_attr=utils.get_vim_auth_obj())
actual_fields = self.heat_client.create.call_args[0][0]
actual_fields["template"] = yaml.safe_load(actual_fields["template"])
@ -293,24 +293,24 @@ class TestDeviceHeat(base.TestCase):
expected_fields["files"][k] = yaml.safe_load(_get_template(v))
self.assertEqual(expected_fields, actual_fields)
device["attributes"]["heat_template"] = yaml.safe_load(
device["attributes"]["heat_template"])
vnf["attributes"]["heat_template"] = yaml.safe_load(
vnf["attributes"]["heat_template"])
self.heat_client.create.assert_called_once_with(expected_fields)
self.assertEqual(expected_result, result)
if files:
expected_fields["files"] = {}
for k, v in files.items():
expected_device["attributes"][k] = yaml.safe_load(
expected_vnf["attributes"][k] = yaml.safe_load(
_get_template(v))
device["attributes"][k] = yaml.safe_load(
device["attributes"][k])
expected_device["attributes"]['scaling_group_names'] = {
vnf["attributes"][k] = yaml.safe_load(
vnf["attributes"][k])
expected_vnf["attributes"]['scaling_group_names'] = {
'SP1': 'G1'}
device["attributes"]['scaling_group_names'] = json.loads(
device["attributes"]['scaling_group_names']
vnf["attributes"]['scaling_group_names'] = json.loads(
vnf["attributes"]['scaling_group_names']
)
self.assertEqual(expected_device, device)
self.assertEqual(expected_vnf, vnf)
def test_create_tosca(self):
# self.skipTest("Not ready yet")

View File

@ -16,7 +16,7 @@ import mock
import six.moves.urllib.error as urlerr
import testtools
from tacker.vm.monitor_drivers.http_ping import http_ping
from tacker.vnfm.monitor_drivers.http_ping import http_ping
class TestVNFMonitorHTTPPing(testtools.TestCase):

View File

@ -15,7 +15,7 @@
import mock
import testtools
from tacker.vm.monitor_drivers.ping import ping
from tacker.vnfm.monitor_drivers.ping import ping
class TestVNFMonitorPing(testtools.TestCase):

View File

@ -58,7 +58,7 @@ class TestOpenstack_Driver(base.TestCase):
fake_keystone = mock.Mock()
fake_keystone.return_value = self.keystone
self._mock(
'tacker.vm.keystone.Keystone', fake_keystone)
'tacker.vnfm.keystone.Keystone', fake_keystone)
def get_vim_obj(self):
return {'id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff', 'type':

View File

@ -18,7 +18,7 @@ import mock
from oslo_utils import timeutils
import testtools
from tacker.vm.monitor import VNFMonitor
from tacker.vnfm.monitor import VNFMonitor
MOCK_DEVICE_ID = 'a737497c-761c-11e5-89c3-9cb6541d805d'
MOCK_VNF_DEVICE = {
@ -72,14 +72,14 @@ class TestVNFMonitor(testtools.TestCase):
'management_ip_addresses': {
'vdu1': 'a.b.c.d'
},
'device': test_device_dict,
'vnf': test_device_dict,
'monitoring_policy': MOCK_VNF_DEVICE['monitoring_policy']
}
output_dict = VNFMonitor.to_hosting_vnf(test_device_dict,
action_cb)
self.assertEqual(expected_output, output_dict)
@mock.patch('tacker.vm.monitor.VNFMonitor.__run__')
@mock.patch('tacker.vnfm.monitor.VNFMonitor.__run__')
def test_add_hosting_vnf(self, mock_monitor_run):
test_device_dict = MOCK_VNF_DEVICE
test_boot_wait = 30
@ -88,10 +88,10 @@ class TestVNFMonitor(testtools.TestCase):
test_device_id = list(test_vnfmonitor._hosting_vnfs.keys())[0]
self.assertEqual(MOCK_DEVICE_ID, test_device_id)
@mock.patch('tacker.vm.monitor.VNFMonitor.__run__')
@mock.patch('tacker.vnfm.monitor.VNFMonitor.__run__')
def test_run_monitor(self, mock_monitor_run):
test_hosting_vnf = MOCK_VNF_DEVICE
test_hosting_vnf['device'] = {}
test_hosting_vnf['vnf'] = {}
test_boot_wait = 30
mock_kwargs = {
'count': 1,
@ -105,5 +105,5 @@ class TestVNFMonitor(testtools.TestCase):
test_vnfmonitor._monitor_manager = self.mock_monitor_manager
test_vnfmonitor.run_monitor(test_hosting_vnf)
self.mock_monitor_manager\
.invoke.assert_called_once_with('ping', 'monitor_call', device={},
.invoke.assert_called_once_with('ping', 'monitor_call', vnf={},
kwargs=mock_kwargs)

View File

@ -77,7 +77,7 @@ class TestVNFMPlugin(db_base.SqlTestCase):
fake_vim_client = mock.Mock()
fake_vim_client.return_value = self.vim_client
self._mock(
'tacker.vm.vim_client.VimClient', fake_vim_client)
'tacker.vnfm.vim_client.VimClient', fake_vim_client)
def _stub_get_vim(self):
vim_obj = {'vim_id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff',
@ -99,7 +99,7 @@ class TestVNFMPlugin(db_base.SqlTestCase):
fake_vnf_monitor = mock.Mock()
fake_vnf_monitor.return_value = self._vnf_monitor
self._mock(
'tacker.vm.monitor.VNFMonitor', fake_vnf_monitor)
'tacker.vnfm.monitor.VNFMonitor', fake_vnf_monitor)
def _insert_dummy_device_template(self):
session = self.context.session
@ -165,7 +165,7 @@ class TestVNFMPlugin(db_base.SqlTestCase):
mock.ANY,
plugin=mock.ANY,
context=mock.ANY,
device_template=mock.ANY)
vnfd=mock.ANY)
self._cos_db_plugin.create_event.assert_called_once_with(
self.context, evt_type=constants.RES_EVT_CREATE, res_id=mock.ANY,
res_state=constants.RES_EVT_VNFD_NA_STATE,
@ -200,7 +200,7 @@ class TestVNFMPlugin(db_base.SqlTestCase):
self._device_manager.invoke.assert_called_with(mock.ANY, mock.ANY,
plugin=mock.ANY,
context=mock.ANY,
device=mock.ANY,
vnf=mock.ANY,
auth_attr=mock.ANY)
self._pool.spawn_n.assert_called_once_with(mock.ANY)
self._cos_db_plugin.create_event.assert_called_with(
@ -216,7 +216,7 @@ class TestVNFMPlugin(db_base.SqlTestCase):
self._device_manager.invoke.assert_called_with(mock.ANY, mock.ANY,
plugin=mock.ANY,
context=mock.ANY,
device_id=mock.ANY,
vnf_id=mock.ANY,
auth_attr=mock.ANY,
region_name=mock.ANY)
self._vnf_monitor.delete_hosting_vnf.assert_called_with(mock.ANY)

View File

@ -18,7 +18,7 @@ import testtools
from toscaparser import tosca_template
from toscaparser.utils import yamlparser
from tacker.vm.tosca import utils
from tacker.vnfm.tosca import utils
from translator.hot import tosca_translator

View File

@ -16,7 +16,7 @@ import os
import testtools
import yaml
from tacker.vm.tosca import utils as toscautils
from tacker.vnfm.tosca import utils as toscautils
from toscaparser.tosca_template import ToscaTemplate
from translator.hot.tosca_translator import TOSCATranslator
@ -41,7 +41,7 @@ class TestToscaUtils(testtools.TestCase):
super(TestToscaUtils, self).setUp()
def test_updateimport(self):
importspath = os.path.abspath('./tacker/vm/tosca/lib/')
importspath = os.path.abspath('./tacker/vnfm/tosca/lib/')
file1 = importspath + '/tacker_defs.yaml'
file2 = importspath + '/tacker_nfv_defs.yaml'
expected_imports = [file1, file2]

View File

@ -18,7 +18,7 @@ from sqlalchemy.orm import exc as orm_exc
from tacker.extensions import nfvo
from tacker import manager
from tacker.tests.unit import base
from tacker.vm import vim_client
from tacker.vnfm import vim_client
class TestVIMClient(base.TestCase):

View File

@ -14,7 +14,6 @@
# License for the specific language governing permissions and limitations
# under the License.
import copy
import inspect
import six
import yaml
@ -32,9 +31,9 @@ from tacker.common import exceptions
from tacker.db.vm import vm_db
from tacker.extensions import vnfm
from tacker.plugins.common import constants
from tacker.vm.mgmt_drivers import constants as mgmt_constants
from tacker.vm import monitor
from tacker.vm import vim_client
from tacker.vnfm.mgmt_drivers import constants as mgmt_constants
from tacker.vnfm import monitor
from tacker.vnfm import vim_client
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@ -50,7 +49,7 @@ class VNFMMgmtMixin(object):
cfg.ListOpt(
'mgmt_driver', default=['noop', 'openwrt'],
help=_('MGMT driver to communicate with '
'Hosting Device/logical service '
'Hosting VNF/logical service '
'instance tacker plugin will use')),
cfg.IntOpt('boot_wait', default=30,
help=_('Time interval to wait for VM to boot'))
@ -62,46 +61,46 @@ class VNFMMgmtMixin(object):
self._mgmt_manager = driver_manager.DriverManager(
'tacker.tacker.mgmt.drivers', cfg.CONF.tacker.mgmt_driver)
def _invoke(self, device_dict, **kwargs):
def _invoke(self, vnf_dict, **kwargs):
method = inspect.stack()[1][3]
return self._mgmt_manager.invoke(
self._mgmt_driver_name(device_dict), method, **kwargs)
self._mgmt_driver_name(vnf_dict), method, **kwargs)
def mgmt_create_pre(self, context, device_dict):
def mgmt_create_pre(self, context, vnf_dict):
return self._invoke(
device_dict, plugin=self, context=context, device=device_dict)
vnf_dict, plugin=self, context=context, vnf=vnf_dict)
def mgmt_create_post(self, context, device_dict):
def mgmt_create_post(self, context, vnf_dict):
return self._invoke(
device_dict, plugin=self, context=context, device=device_dict)
vnf_dict, plugin=self, context=context, vnf=vnf_dict)
def mgmt_update_pre(self, context, device_dict):
def mgmt_update_pre(self, context, vnf_dict):
return self._invoke(
device_dict, plugin=self, context=context, device=device_dict)
vnf_dict, plugin=self, context=context, vnf=vnf_dict)
def mgmt_update_post(self, context, device_dict):
def mgmt_update_post(self, context, vnf_dict):
return self._invoke(
device_dict, plugin=self, context=context, device=device_dict)
vnf_dict, plugin=self, context=context, vnf=vnf_dict)
def mgmt_delete_pre(self, context, device_dict):
def mgmt_delete_pre(self, context, vnf_dict):
return self._invoke(
device_dict, plugin=self, context=context, device=device_dict)
vnf_dict, plugin=self, context=context, vnf=vnf_dict)
def mgmt_delete_post(self, context, device_dict):
def mgmt_delete_post(self, context, vnf_dict):
return self._invoke(
device_dict, plugin=self, context=context, device=device_dict)
vnf_dict, plugin=self, context=context, vnf=vnf_dict)
def mgmt_get_config(self, context, device_dict):
def mgmt_get_config(self, context, vnf_dict):
return self._invoke(
device_dict, plugin=self, context=context, device=device_dict)
vnf_dict, plugin=self, context=context, vnf=vnf_dict)
def mgmt_url(self, context, device_dict):
def mgmt_url(self, context, vnf_dict):
return self._invoke(
device_dict, plugin=self, context=context, device=device_dict)
vnf_dict, plugin=self, context=context, vnf=vnf_dict)
def mgmt_call(self, context, device_dict, kwargs):
def mgmt_call(self, context, vnf_dict, kwargs):
return self._invoke(
device_dict, plugin=self, context=context, device=device_dict,
vnf_dict, plugin=self, context=context, vnf=vnf_dict,
kwargs=kwargs)
@ -113,7 +112,7 @@ class VNFMPlugin(vm_db.VNFMPluginDb, VNFMMgmtMixin):
OPTS = [
cfg.ListOpt(
'infra_driver', default=['nova', 'heat', 'noop'],
help=_('Hosting device drivers tacker plugin will use')),
help=_('Hosting vnf drivers tacker plugin will use')),
]
cfg.CONF.register_opts(OPTS, 'tacker')
supported_extension_aliases = ['vnfm']
@ -123,7 +122,7 @@ class VNFMPlugin(vm_db.VNFMPluginDb, VNFMMgmtMixin):
self._pool = eventlet.GreenPool()
self.boot_wait = cfg.CONF.tacker.boot_wait
self.vim_client = vim_client.VimClient()
self._device_manager = driver_manager.DriverManager(
self._vnf_manager = driver_manager.DriverManager(
'tacker.tacker.device.drivers',
cfg.CONF.tacker.infra_driver)
self._vnf_monitor = monitor.VNFMonitor(self.boot_wait)
@ -131,30 +130,27 @@ class VNFMPlugin(vm_db.VNFMPluginDb, VNFMMgmtMixin):
def spawn_n(self, function, *args, **kwargs):
self._pool.spawn_n(function, *args, **kwargs)
###########################################################################
# hosting device template
def create_device_template(self, context, device_template):
template = device_template['device_template']
if "tosca_definitions_version" not in template['attributes']['vnfd']:
versionutils.report_deprecated_feature(LOG, 'VNFD legacy templates'
def create_vnfd(self, context, vnfd):
vnfd_data = vnfd['vnfd']
if "tosca_definitions_version" not in vnfd_data['attributes']['vnfd']:
versionutils.report_deprecated_feature(LOG, 'VNFD legacy vnfds'
' are deprecated since Mitaka release and will be removed in'
' Ocata release. Please use NFV TOSCA templates.')
' Ocata release. Please use NFV TOSCA vnfds.')
LOG.debug(_('template %s'), template)
LOG.debug(_('vnfd %s'), vnfd_data)
infra_driver = template.get('infra_driver')
infra_driver = vnfd_data.get('infra_driver')
if not attributes.is_attr_set(infra_driver):
LOG.debug(_('hosting device driver must be specified'))
LOG.debug(_('hosting vnf driver must be specified'))
raise vnfm.InfraDriverNotSpecified()
if infra_driver not in self._device_manager:
LOG.debug(_('unknown hosting device driver '
if infra_driver not in self._vnf_manager:
LOG.debug(_('unknown hosting vnf driver '
'%(infra_driver)s in %(drivers)s'),
{'infra_driver': infra_driver,
'drivers': cfg.CONF.tacker.infra_driver})
raise vnfm.InvalidInfraDriver(infra_driver=infra_driver)
service_types = template.get('service_types')
service_types = vnfd_data.get('service_types')
if not attributes.is_attr_set(service_types):
LOG.debug(_('service type must be specified'))
raise vnfm.ServiceTypesNotSpecified()
@ -164,257 +160,255 @@ class VNFMPlugin(vm_db.VNFMPluginDb, VNFMMgmtMixin):
# so doesn't check it here yet.
pass
self._device_manager.invoke(
infra_driver, 'create_device_template_pre', plugin=self,
context=context, device_template=device_template)
self._vnf_manager.invoke(
infra_driver, 'create_vnfd_pre', plugin=self,
context=context, vnfd=vnfd)
return super(VNFMPlugin, self).create_device_template(
context, device_template)
return super(VNFMPlugin, self).create_vnfd(
context, vnfd)
###########################################################################
# hosting device
def add_device_to_monitor(self, device_dict, vim_auth):
dev_attrs = device_dict['attributes']
mgmt_url = device_dict['mgmt_url']
def add_vnf_to_monitor(self, vnf_dict, vim_auth):
dev_attrs = vnf_dict['attributes']
mgmt_url = vnf_dict['mgmt_url']
if 'monitoring_policy' in dev_attrs and mgmt_url:
def action_cb(hosting_vnf_, action):
action_cls = monitor.ActionPolicy.get_policy(action,
device_dict)
vnf_dict)
if action_cls:
action_cls.execute_action(self, hosting_vnf['device'],
action_cls.execute_action(self, hosting_vnf['vnf'],
vim_auth)
hosting_vnf = self._vnf_monitor.to_hosting_vnf(
device_dict, action_cb)
vnf_dict, action_cb)
LOG.debug('hosting_vnf: %s', hosting_vnf)
self._vnf_monitor.add_hosting_vnf(hosting_vnf)
def config_device(self, context, device_dict):
config = device_dict['attributes'].get('config')
def config_vnf(self, context, vnf_dict):
config = vnf_dict['attributes'].get('config')
if not config:
return
eventlet.sleep(self.boot_wait) # wait for vm to be ready
device_id = device_dict['id']
vnf_id = vnf_dict['id']
update = {
'device': {
'id': device_id,
'vnf': {
'id': vnf_id,
'attributes': {'config': config},
}
}
self.update_device(context, device_id, update)
self.update_vnf(context, vnf_id, update)
def _create_device_wait(self, context, device_dict, auth_attr):
driver_name = self._infra_driver_name(device_dict)
device_id = device_dict['id']
instance_id = self._instance_id(device_dict)
def _create_vnf_wait(self, context, vnf_dict, auth_attr):
driver_name = self._infra_driver_name(vnf_dict)
vnf_id = vnf_dict['id']
instance_id = self._instance_id(vnf_dict)
create_failed = False
try:
self._device_manager.invoke(
self._vnf_manager.invoke(
driver_name, 'create_wait', plugin=self, context=context,
device_dict=device_dict, device_id=instance_id,
vnf_dict=vnf_dict, vnf_id=instance_id,
auth_attr=auth_attr)
except vnfm.DeviceCreateWaitFailed as e:
LOG.error(_LE("VNF Create failed for vnf_id %s"), device_id)
except vnfm.VNFCreateWaitFailed as e:
LOG.error(_LE("VNF Create failed for vnf_id %s"), vnf_id)
create_failed = True
device_dict['status'] = constants.ERROR
self.set_device_error_status_reason(context, device_id,
six.text_type(e))
vnf_dict['status'] = constants.ERROR
self.set_vnf_error_status_reason(context, vnf_id,
six.text_type(e))
if instance_id is None or create_failed:
mgmt_url = None
else:
# mgmt_url = self.mgmt_url(context, device_dict)
# mgmt_url = self.mgmt_url(context, vnf_dict)
# FIXME(yamahata):
mgmt_url = device_dict['mgmt_url']
mgmt_url = vnf_dict['mgmt_url']
self._create_device_post(
context, device_id, instance_id, mgmt_url, device_dict)
self.mgmt_create_post(context, device_dict)
self._create_vnf_post(
context, vnf_id, instance_id, mgmt_url, vnf_dict)
self.mgmt_create_post(context, vnf_dict)
if instance_id is None or create_failed:
return
device_dict['mgmt_url'] = mgmt_url
vnf_dict['mgmt_url'] = mgmt_url
kwargs = {
mgmt_constants.KEY_ACTION: mgmt_constants.ACTION_CREATE_DEVICE,
mgmt_constants.KEY_KWARGS: {'device': device_dict},
mgmt_constants.KEY_ACTION: mgmt_constants.ACTION_CREATE_VNF,
mgmt_constants.KEY_KWARGS: {'vnf': vnf_dict},
}
new_status = constants.ACTIVE
try:
self.mgmt_call(context, device_dict, kwargs)
self.mgmt_call(context, vnf_dict, kwargs)
except exceptions.MgmtDriverException:
LOG.error(_('VNF configuration failed'))
new_status = constants.ERROR
self.set_device_error_status_reason(context, device_id,
self.set_vnf_error_status_reason(context, vnf_id,
'Unable to configure VDU')
device_dict['status'] = new_status
self._create_device_status(context, device_id, new_status)
vnf_dict['status'] = new_status
self._create_vnf_status(context, vnf_id, new_status)
def get_vim(self, context, device):
region_name = device.setdefault('placement_attr', {}).get(
def get_vim(self, context, vnf):
region_name = vnf.setdefault('placement_attr', {}).get(
'region_name', None)
vim_res = self.vim_client.get_vim(context, device['vim_id'],
vim_res = self.vim_client.get_vim(context, vnf['vim_id'],
region_name)
device['placement_attr']['vim_name'] = vim_res['vim_name']
device['vim_id'] = vim_res['vim_id']
vnf['placement_attr']['vim_name'] = vim_res['vim_name']
vnf['vim_id'] = vim_res['vim_id']
return vim_res['vim_auth']
def _create_device(self, context, device, vim_auth):
device_dict = self._create_device_pre(
context, device) if not device.get('id') else device
device_id = device_dict['id']
driver_name = self._infra_driver_name(device_dict)
LOG.debug(_('device_dict %s'), device_dict)
self.mgmt_create_pre(context, device_dict)
def _create_vnf(self, context, vnf, vim_auth):
vnf_dict = self._create_vnf_pre(
context, vnf) if not vnf.get('id') else vnf
vnf_id = vnf_dict['id']
driver_name = self._infra_driver_name(vnf_dict)
LOG.debug(_('vnf_dict %s'), vnf_dict)
self.mgmt_create_pre(context, vnf_dict)
try:
instance_id = self._device_manager.invoke(
instance_id = self._vnf_manager.invoke(
driver_name, 'create', plugin=self,
context=context, device=device_dict, auth_attr=vim_auth)
context=context, vnf=vnf_dict, auth_attr=vim_auth)
except Exception:
with excutils.save_and_reraise_exception():
self.delete_device(context, device_id)
self.delete_vnf(context, vnf_id)
if instance_id is None:
self._create_device_post(context, device_id, None, None,
device_dict)
self._create_vnf_post(context, vnf_id, None, None,
vnf_dict)
return
device_dict['instance_id'] = instance_id
return device_dict
vnf_dict['instance_id'] = instance_id
return vnf_dict
def create_device(self, context, device):
device_info = device['device']
vim_auth = self.get_vim(context, device_info)
device_dict = self._create_device(context, device_info, vim_auth)
def create_vnf(self, context, vnf):
vnf_info = vnf['vnf']
vim_auth = self.get_vim(context, vnf_info)
vnf_dict = self._create_vnf(context, vnf_info, vim_auth)
def create_device_wait():
self._create_device_wait(context, device_dict, vim_auth)
self.add_device_to_monitor(device_dict, vim_auth)
self.config_device(context, device_dict)
self.spawn_n(create_device_wait)
return device_dict
def create_vnf_wait():
self._create_vnf_wait(context, vnf_dict, vim_auth)
self.add_vnf_to_monitor(vnf_dict, vim_auth)
self.config_vnf(context, vnf_dict)
self.spawn_n(create_vnf_wait)
return vnf_dict
# not for wsgi, but for service to create hosting device
# the device is NOT added to monitor.
def create_device_sync(self, context, device):
vim_auth = self.get_vim(context, device)
device_dict = self._create_device(context, device, vim_auth)
self._create_device_wait(context, device_dict, vim_auth)
return device_dict
# not for wsgi, but for service to create hosting vnf
# the vnf is NOT added to monitor.
def create_vnf_sync(self, context, vnf):
vim_auth = self.get_vim(context, vnf)
vnf_dict = self._create_vnf(context, vnf, vim_auth)
self._create_vnf_wait(context, vnf_dict, vim_auth)
return vnf_dict
def _update_device_wait(self, context, device_dict, vim_auth):
driver_name = self._infra_driver_name(device_dict)
instance_id = self._instance_id(device_dict)
def _update_vnf_wait(self, context, vnf_dict, vim_auth):
driver_name = self._infra_driver_name(vnf_dict)
instance_id = self._instance_id(vnf_dict)
kwargs = {
mgmt_constants.KEY_ACTION: mgmt_constants.ACTION_UPDATE_DEVICE,
mgmt_constants.KEY_KWARGS: {'device': device_dict},
mgmt_constants.KEY_ACTION: mgmt_constants.ACTION_UPDATE_VNF,
mgmt_constants.KEY_KWARGS: {'vnf': vnf_dict},
}
new_status = constants.ACTIVE
placement_attr = device_dict['placement_attr']
placement_attr = vnf_dict['placement_attr']
region_name = placement_attr.get('region_name')
try:
self._device_manager.invoke(
self._vnf_manager.invoke(
driver_name, 'update_wait', plugin=self,
context=context, device_id=instance_id, auth_attr=vim_auth,
context=context, vnf_id=instance_id, auth_attr=vim_auth,
region_name=region_name)
self.mgmt_call(context, device_dict, kwargs)
self.mgmt_call(context, vnf_dict, kwargs)
except exceptions.MgmtDriverException as e:
LOG.error(_('VNF configuration failed'))
new_status = constants.ERROR
self.set_device_error_status_reason(context, device_dict['id'],
six.text_type(e))
device_dict['status'] = new_status
self.mgmt_update_post(context, device_dict)
self.set_vnf_error_status_reason(context, vnf_dict['id'],
six.text_type(e))
vnf_dict['status'] = new_status
self.mgmt_update_post(context, vnf_dict)
self._update_device_post(context, device_dict['id'],
new_status, device_dict)
self._update_vnf_post(context, vnf_dict['id'],
new_status, vnf_dict)
def update_device(self, context, device_id, device):
device_dict = self._update_device_pre(context, device_id)
vim_auth = self.get_vim(context, device_dict)
driver_name = self._infra_driver_name(device_dict)
instance_id = self._instance_id(device_dict)
def update_vnf(self, context, vnf_id, vnf):
vnf_dict = self._update_vnf_pre(context, vnf_id)
vim_auth = self.get_vim(context, vnf_dict)
driver_name = self._infra_driver_name(vnf_dict)
instance_id = self._instance_id(vnf_dict)
try:
self.mgmt_update_pre(context, device_dict)
self._device_manager.invoke(
self.mgmt_update_pre(context, vnf_dict)
self._vnf_manager.invoke(
driver_name, 'update', plugin=self, context=context,
device_id=instance_id, device_dict=device_dict,
device=device, auth_attr=vim_auth)
vnf_id=instance_id, vnf_dict=vnf_dict,
vnf=vnf, auth_attr=vim_auth)
except Exception as e:
with excutils.save_and_reraise_exception():
device_dict['status'] = constants.ERROR
self.set_device_error_status_reason(context,
device_dict['id'],
six.text_type(e))
self.mgmt_update_post(context, device_dict)
self._update_device_post(context, device_id, constants.ERROR)
vnf_dict['status'] = constants.ERROR
self.set_vnf_error_status_reason(context,
vnf_dict['id'],
six.text_type(e))
self.mgmt_update_post(context, vnf_dict)
self._update_vnf_post(context, vnf_id, constants.ERROR)
self.spawn_n(self._update_device_wait, context, device_dict, vim_auth)
return device_dict
self.spawn_n(self._update_vnf_wait, context, vnf_dict, vim_auth)
return vnf_dict
def _delete_device_wait(self, context, device_dict, auth_attr):
driver_name = self._infra_driver_name(device_dict)
instance_id = self._instance_id(device_dict)
def _delete_vnf_wait(self, context, vnf_dict, auth_attr):
driver_name = self._infra_driver_name(vnf_dict)
instance_id = self._instance_id(vnf_dict)
e = None
if instance_id:
placement_attr = device_dict['placement_attr']
placement_attr = vnf_dict['placement_attr']
region_name = placement_attr.get('region_name')
try:
self._device_manager.invoke(
self._vnf_manager.invoke(
driver_name,
'delete_wait',
plugin=self,
context=context,
device_id=instance_id,
vnf_id=instance_id,
auth_attr=auth_attr,
region_name=region_name)
except Exception as e_:
e = e_
device_dict['status'] = constants.ERROR
device_dict['error_reason'] = six.text_type(e)
LOG.exception(_('_delete_device_wait'))
vnf_dict['status'] = constants.ERROR
vnf_dict['error_reason'] = six.text_type(e)
LOG.exception(_('_delete_vnf_wait'))
self.mgmt_delete_post(context, device_dict)
device_id = device_dict['id']
self._delete_device_post(context, device_id, e)
self.mgmt_delete_post(context, vnf_dict)
vnf_id = vnf_dict['id']
self._delete_vnf_post(context, vnf_id, e)
def delete_device(self, context, device_id):
device_dict = self._delete_device_pre(context, device_id)
vim_auth = self.get_vim(context, device_dict)
self._vnf_monitor.delete_hosting_vnf(device_id)
driver_name = self._infra_driver_name(device_dict)
instance_id = self._instance_id(device_dict)
placement_attr = device_dict['placement_attr']
def delete_vnf(self, context, vnf_id):
vnf_dict = self._delete_vnf_pre(context, vnf_id)
vim_auth = self.get_vim(context, vnf_dict)
self._vnf_monitor.delete_hosting_vnf(vnf_id)
driver_name = self._infra_driver_name(vnf_dict)
instance_id = self._instance_id(vnf_dict)
placement_attr = vnf_dict['placement_attr']
region_name = placement_attr.get('region_name')
kwargs = {
mgmt_constants.KEY_ACTION: mgmt_constants.ACTION_DELETE_DEVICE,
mgmt_constants.KEY_KWARGS: {'device': device_dict},
mgmt_constants.KEY_ACTION: mgmt_constants.ACTION_DELETE_VNF,
mgmt_constants.KEY_KWARGS: {'vnf': vnf_dict},
}
try:
self.mgmt_delete_pre(context, device_dict)
self.mgmt_call(context, device_dict, kwargs)
self.mgmt_delete_pre(context, vnf_dict)
self.mgmt_call(context, vnf_dict, kwargs)
if instance_id:
self._device_manager.invoke(driver_name,
'delete',
plugin=self,
context=context,
device_id=instance_id,
auth_attr=vim_auth,
region_name=region_name)
self._vnf_manager.invoke(driver_name,
'delete',
plugin=self,
context=context,
vnf_id=instance_id,
auth_attr=vim_auth,
region_name=region_name)
except Exception as e:
# TODO(yamahata): when the devaice is already deleted. mask
# the error, and delete row in db
# Other case mark error
with excutils.save_and_reraise_exception():
device_dict['status'] = constants.ERROR
device_dict['error_reason'] = six.text_type(e)
self.mgmt_delete_post(context, device_dict)
self._delete_device_post(context, device_id, e)
vnf_dict['status'] = constants.ERROR
vnf_dict['error_reason'] = six.text_type(e)
self.mgmt_delete_post(context, vnf_dict)
self._delete_vnf_post(context, vnf_id, e)
self.spawn_n(self._delete_device_wait, context, device_dict, vim_auth)
self.spawn_n(self._delete_vnf_wait, context, vnf_dict, vim_auth)
def _handle_vnf_scaling(self, context, policy):
# validate
@ -474,7 +468,7 @@ class VNFMPlugin(vm_db.VNFMPluginDb, VNFMMgmtMixin):
# action
def _vnf_policy_action():
try:
self._device_manager.invoke(
self._vnf_manager.invoke(
infra_driver,
'scale',
plugin=self,
@ -490,7 +484,7 @@ class VNFMPlugin(vm_db.VNFMPluginDb, VNFMMgmtMixin):
policy)
with excutils.save_and_reraise_exception():
vnf['status'] = constants.ERROR
self.set_device_error_status_reason(
self.set_vnf_error_status_reason(
context,
policy['vnf_id'],
six.text_type(e))
@ -501,7 +495,7 @@ class VNFMPlugin(vm_db.VNFMPluginDb, VNFMMgmtMixin):
try:
LOG.debug(_("Policy %s action is in progress") %
policy)
mgmt_url = self._device_manager.invoke(
mgmt_url = self._vnf_manager.invoke(
infra_driver,
'scale_wait',
plugin=self,
@ -518,7 +512,7 @@ class VNFMPlugin(vm_db.VNFMPluginDb, VNFMMgmtMixin):
LOG.error(_("Policy %s action is failed to complete") %
policy)
with excutils.save_and_reraise_exception():
self.set_device_error_status_reason(
self.set_vnf_error_status_reason(
context,
policy['vnf_id'],
six.text_type(e))
@ -537,28 +531,6 @@ class VNFMPlugin(vm_db.VNFMPluginDb, VNFMMgmtMixin):
return policy
def create_vnf(self, context, vnf):
vnf['device'] = vnf.pop('vnf')
vnf_attributes = vnf['device']
vnf_attributes['template_id'] = vnf_attributes.pop('vnfd_id')
vnf_dict = self.create_device(context, vnf)
vnf_response = copy.deepcopy(vnf_dict)
vnf_response['vnfd_id'] = vnf_response.pop('vnfd_id')
return vnf_response
def update_vnf(
self, context, vnf_id, vnf):
vnf['device'] = vnf.pop('vnf')
return self.update_device(context, vnf_id, vnf)
def delete_vnf(self, context, vnf_id):
self.delete_device(context, vnf_id)
def create_vnfd(self, context, vnfd):
vnfd['device_template'] = vnfd.pop('vnfd')
new_dict = self.create_device_template(context, vnfd)
return new_dict
def _make_policy_dict(self, vnf, name, policy):
p = {}
p['type'] = policy['type']
@ -570,8 +542,8 @@ class VNFMPlugin(vm_db.VNFMPluginDb, VNFMMgmtMixin):
def get_vnf_policies(
self, context, vnf_id, filters=None, fields=None):
vnf = self.get_device(context, vnf_id)
vnfd_tmpl = yaml.load(vnf['device_template']['attributes']['vnfd'])
vnf = self.get_vnf(context, vnf_id)
vnfd_tmpl = yaml.load(vnf['vnfd']['attributes']['vnfd'])
policy_list = []
if vnfd_tmpl.get('tosca_definitions_version'):

View File

@ -18,13 +18,13 @@
SVC_TYPE_ROUTER = 'router'
SVC_TYPE_LOADBALANCER = 'loadbalancer'
# attribute key for service to spin up device
# attribute key for service to spin up vnf
# for nova driver. novaclient library uses those
ATTR_KEY_IMAGE = 'image'
ATTR_KEY_FLAVOR = 'flavor'
ATTR_KEY_MGMT_NETWORK = 'mgmt-network'
# attribute key for device template for heat
# attribute key for vnf template for heat
ATTR_KEY_HEAT_STACK_NAME = 'stack_name'
ATTR_KEY_HEAT_TEMPLATE_URL = 'template_url'
ATTR_KEY_HEAT_TEMPLATE = 'template'

View File

@ -26,7 +26,7 @@ class DeviceAbstractDriver(extensions.PluginInterface):
@abc.abstractmethod
def get_type(self):
"""Return one of predefined type of the hosting device drivers."""
"""Return one of predefined type of the hosting vnf drivers."""
pass
@abc.abstractmethod
@ -39,32 +39,32 @@ class DeviceAbstractDriver(extensions.PluginInterface):
pass
# @abc.abstractmethod
def create_device_template_pre(self, plugin, context, device_template):
"""Called before creating device template."""
def create_vnfd_pre(self, plugin, context, vnfd):
"""Called before creating vnf template."""
pass
@abc.abstractmethod
def create(self, plugin, context, device):
"""Create device and return its id."""
def create(self, plugin, context, vnf):
"""Create vnf and return its id."""
@abc.abstractmethod
def create_wait(self, plugin, context, device_dict, device_id):
"""wait for device creation to complete."""
def create_wait(self, plugin, context, vnf_dict, vnf_id):
"""wait for vnf creation to complete."""
@abc.abstractmethod
def update(self, plugin, context, device_id, device_dict, device):
# device_dict: old device_dict to be updated
# device: update with device dict
def update(self, plugin, context, vnf_id, vnf_dict, vnf):
# vnf_dict: old vnf_dict to be updated
# vnf: update with vnf dict
pass
@abc.abstractmethod
def update_wait(self, plugin, context, device_id):
def update_wait(self, plugin, context, vnf_id):
pass
@abc.abstractmethod
def delete(self, plugin, context, device_id):
def delete(self, plugin, context, vnf_id):
pass
@abc.abstractmethod
def delete_wait(self, plugin, context, device_id):
def delete_wait(self, plugin, context, vnf_id):
pass

View File

@ -30,9 +30,9 @@ import yaml
from tacker.common import clients
from tacker.common import log
from tacker.extensions import vnfm
from tacker.vm.infra_drivers import abstract_driver
from tacker.vm.infra_drivers import scale_driver
from tacker.vm.tosca import utils as toscautils
from tacker.vnfm.infra_drivers import abstract_driver
from tacker.vnfm.infra_drivers import scale_driver
from tacker.vnfm.tosca import utils as toscautils
LOG = logging.getLogger(__name__)
@ -85,7 +85,7 @@ def get_scaling_policy_name(action, policy_name):
class DeviceHeat(abstract_driver.DeviceAbstractDriver,
scale_driver.VnfScaleAbstractDriver):
"""Heat driver of hosting device."""
"""Heat driver of hosting vnf."""
def __init__(self):
super(DeviceHeat, self).__init__()
@ -100,61 +100,63 @@ class DeviceHeat(abstract_driver.DeviceAbstractDriver,
return 'Heat infra driver'
@log.log
def create_device_template_pre(self, plugin, context, device_template):
device_template_dict = device_template['device_template']
vnfd_yaml = device_template_dict['attributes'].get('vnfd')
def create_vnfd_pre(self, plugin, context, vnfd):
vnfd_dict = vnfd['vnfd']
vnfd_yaml = vnfd_dict['attributes'].get('vnfd')
if vnfd_yaml is None:
return
vnfd_dict = yaml.load(vnfd_yaml)
LOG.debug(_('vnfd_dict: %s'), vnfd_dict)
inner_vnfd_dict = yaml.load(vnfd_yaml)
LOG.debug(_('vnfd_dict: %s'), inner_vnfd_dict)
if 'tosca_definitions_version' in vnfd_dict:
if 'tosca_definitions_version' in inner_vnfd_dict:
# Prepend the tacker_defs.yaml import file with the full
# path to the file
toscautils.updateimports(vnfd_dict)
toscautils.updateimports(inner_vnfd_dict)
try:
tosca = ToscaTemplate(a_file=False, yaml_dict_tpl=vnfd_dict)
tosca = ToscaTemplate(a_file=False,
yaml_dict_tpl=inner_vnfd_dict)
except Exception as e:
LOG.exception(_("tosca-parser error: %s"), str(e))
raise vnfm.ToscaParserFailed(error_msg_details=str(e))
if ('description' not in device_template_dict or
device_template_dict['description'] == ''):
device_template_dict['description'] = vnfd_dict.get(
if ('description' not in vnfd_dict or
vnfd_dict['description'] == ''):
vnfd_dict['description'] = inner_vnfd_dict.get(
'description', '')
if (('name' not in device_template_dict or
not len(device_template_dict['name'])) and
'metadata' in vnfd_dict):
device_template_dict['name'] = vnfd_dict['metadata'].get(
if (('name' not in vnfd_dict or
not len(vnfd_dict['name'])) and
'metadata' in inner_vnfd_dict):
vnfd_dict['name'] = inner_vnfd_dict['metadata'].get(
'template_name', '')
device_template_dict['mgmt_driver'] = toscautils.get_mgmt_driver(
vnfd_dict['mgmt_driver'] = toscautils.get_mgmt_driver(
tosca)
else:
KEY_LIST = (('name', 'template_name'),
('description', 'description'))
device_template_dict.update(
dict((key, vnfd_dict[vnfd_key]) for (key, vnfd_key) in KEY_LIST
if ((key not in device_template_dict or
device_template_dict[key] == '') and
vnfd_key in vnfd_dict and
vnfd_dict[vnfd_key] != '')))
vnfd_dict.update(
dict((key, inner_vnfd_dict[vnfd_key]) for (key, vnfd_key)
in KEY_LIST
if ((key not in vnfd_dict or
vnfd_dict[key] == '') and
vnfd_key in inner_vnfd_dict and
inner_vnfd_dict[vnfd_key] != '')))
service_types = vnfd_dict.get('service_properties', {}).get('type',
[])
service_types = inner_vnfd_dict.get(
'service_properties', {}).get('type', [])
if service_types:
device_template_dict.setdefault('service_types', []).extend(
vnfd_dict.setdefault('service_types', []).extend(
[{'service_type': service_type}
for service_type in service_types])
# TODO(anyone) - this code assumes one mgmt_driver per VNFD???
for vdu in vnfd_dict.get('vdus', {}).values():
for vdu in inner_vnfd_dict.get('vdus', {}).values():
mgmt_driver = vdu.get('mgmt_driver')
if mgmt_driver:
device_template_dict['mgmt_driver'] = mgmt_driver
LOG.debug(_('device_template %s'), device_template)
vnfd_dict['mgmt_driver'] = mgmt_driver
LOG.debug(_('vnfd %s'), vnfd)
@log.log
def _update_params(self, original, paramvalues, match=False):
@ -265,15 +267,15 @@ class DeviceHeat(abstract_driver.DeviceAbstractDriver,
return unsupported_resource_prop
@log.log
def create(self, plugin, context, device, auth_attr):
LOG.debug(_('device %s'), device)
def create(self, plugin, context, vnf, auth_attr):
LOG.debug(_('vnf %s'), vnf)
attributes = device['device_template']['attributes'].copy()
attributes = vnf['vnfd']['attributes'].copy()
vnfd_yaml = attributes.pop('vnfd', None)
if vnfd_yaml is None:
# TODO(kangaraj-manickam) raise user level exception
LOG.info(_("VNFD is not provided, so no device is created !!"))
LOG.info(_("VNFD is not provided, so no vnf is created !!"))
return
LOG.debug('vnfd_yaml %s', vnfd_yaml)
@ -286,8 +288,8 @@ class DeviceHeat(abstract_driver.DeviceAbstractDriver,
if key in attributes:
fields[key] = jsonutils.loads(attributes.pop(key))
# overwrite parameters with given dev_attrs for device creation
dev_attrs = device['attributes'].copy()
# overwrite parameters with given dev_attrs for vnf creation
dev_attrs = vnf['attributes'].copy()
fields.update(dict((key, dev_attrs.pop(key)) for key
in ('stack_name', 'template_url', 'template')
if key in dev_attrs))
@ -300,7 +302,7 @@ class DeviceHeat(abstract_driver.DeviceAbstractDriver,
fields, dev_attrs = update_fields()
region_name = device.get('placement_attr', {}).get('region_name', None)
region_name = vnf.get('placement_attr', {}).get('region_name', None)
heatclient_ = HeatClient(auth_attr, region_name)
unsupported_res_prop = self.fetch_unsupported_resource_prop(
heatclient_)
@ -524,7 +526,7 @@ class DeviceHeat(abstract_driver.DeviceAbstractDriver,
# to pass necessary parameters to plugin upwards.
for key in ('service_type',):
if key in vdu_dict:
device.setdefault(
vnf.setdefault(
'attributes', {})[vdu_id] = jsonutils.dumps(
{key: vdu_dict[key]})
@ -559,23 +561,23 @@ class DeviceHeat(abstract_driver.DeviceAbstractDriver,
main_yaml = yaml.dump(main_dict)
fields['template'] = main_yaml
fields['files'] = {'scaling.yaml': heat_template_yaml}
device['attributes']['heat_template'] = main_yaml
vnf['attributes']['heat_template'] = main_yaml
# TODO(kanagaraj-manickam) when multiple groups are
# supported, make this scaling atribute as
# scaling name vs scaling template map and remove
# scaling_group_names
device['attributes']['scaling.yaml'] = heat_template_yaml
device['attributes'][
vnf['attributes']['scaling.yaml'] = heat_template_yaml
vnf['attributes'][
'scaling_group_names'] = jsonutils.dumps(
scaling_group_names
)
else:
if not device['attributes'].get('heat_template'):
device['attributes'][
if not vnf['attributes'].get('heat_template'):
vnf['attributes'][
'heat_template'] = fields['template']
if monitoring_dict:
device['attributes']['monitoring_policy'] = \
vnf['attributes']['monitoring_policy'] = \
jsonutils.dumps(monitoring_dict)
generate_hot()
@ -583,15 +585,15 @@ class DeviceHeat(abstract_driver.DeviceAbstractDriver,
def create_stack():
if 'stack_name' not in fields:
name = (__name__ + '_' + self.__class__.__name__ + '-' +
device['id'])
if device['attributes'].get('failure_count'):
name += ('-RESPAWN-%s') % str(device['attributes'][
vnf['id'])
if vnf['attributes'].get('failure_count'):
name += ('-RESPAWN-%s') % str(vnf['attributes'][
'failure_count'])
fields['stack_name'] = name
# service context is ignored
LOG.debug(_('service_context: %s'),
device.get('service_context', []))
vnf.get('service_context', []))
LOG.debug(_('fields: %s'), fields)
LOG.debug(_('template: %s'), fields['template'])
stack = heatclient_.create(fields)
@ -601,24 +603,24 @@ class DeviceHeat(abstract_driver.DeviceAbstractDriver,
stack = create_stack()
return stack['stack']['id']
def create_wait(self, plugin, context, device_dict, device_id, auth_attr):
region_name = device_dict.get('placement_attr', {}).get(
def create_wait(self, plugin, context, vnf_dict, vnf_id, auth_attr):
region_name = vnf_dict.get('placement_attr', {}).get(
'region_name', None)
heatclient_ = HeatClient(auth_attr, region_name)
stack = heatclient_.get(device_id)
stack = heatclient_.get(vnf_id)
status = stack.stack_status
stack_retries = STACK_RETRIES
error_reason = None
while status == 'CREATE_IN_PROGRESS' and stack_retries > 0:
time.sleep(STACK_RETRY_WAIT)
try:
stack = heatclient_.get(device_id)
stack = heatclient_.get(vnf_id)
except Exception:
LOG.exception(_("Device Instance cleanup may not have "
LOG.exception(_("VNF Instance cleanup may not have "
"happened because Heat API request failed "
"while waiting for the stack %(stack)s to be "
"deleted"), {'stack': device_id})
"deleted"), {'stack': vnf_id})
break
status = stack.stack_status
LOG.debug(_('status: %s'), status)
@ -631,16 +633,16 @@ class DeviceHeat(abstract_driver.DeviceAbstractDriver,
" {wait} seconds as creation of stack {stack}"
" is not completed").format(
wait=(STACK_RETRIES * STACK_RETRY_WAIT),
stack=device_id)
stack=vnf_id)
LOG.warning(_("VNF Creation failed: %(reason)s"),
{'reason': error_reason})
raise vnfm.DeviceCreateWaitFailed(device_id=device_id,
reason=error_reason)
raise vnfm.VNFCreateWaitFailed(vnf_id=vnf_id,
reason=error_reason)
elif stack_retries != 0 and status != 'CREATE_COMPLETE':
error_reason = stack.stack_status_reason
raise vnfm.DeviceCreateWaitFailed(device_id=device_id,
reason=error_reason)
raise vnfm.VNFCreateWaitFailed(vnf_id=vnf_id,
reason=error_reason)
def _find_mgmt_ips(outputs):
LOG.debug(_('outputs %s'), outputs)
@ -652,29 +654,29 @@ class DeviceHeat(abstract_driver.DeviceAbstractDriver,
return mgmt_ips
# scaling enabled
if device_dict['attributes'].get('scaling_group_names'):
if vnf_dict['attributes'].get('scaling_group_names'):
group_names = jsonutils.loads(
device_dict['attributes'].get('scaling_group_names')).values()
vnf_dict['attributes'].get('scaling_group_names')).values()
mgmt_ips = self._find_mgmt_ips_from_groups(heatclient_,
device_id,
vnf_id,
group_names)
else:
mgmt_ips = _find_mgmt_ips(stack.outputs)
if mgmt_ips:
device_dict['mgmt_url'] = jsonutils.dumps(mgmt_ips)
vnf_dict['mgmt_url'] = jsonutils.dumps(mgmt_ips)
@log.log
def update(self, plugin, context, device_id, device_dict, device,
def update(self, plugin, context, vnf_id, vnf_dict, vnf,
auth_attr):
region_name = device_dict.get('placement_attr', {}).get(
region_name = vnf_dict.get('placement_attr', {}).get(
'region_name', None)
heatclient_ = HeatClient(auth_attr, region_name)
heatclient_.get(device_id)
heatclient_.get(vnf_id)
# update config attribute
config_yaml = device_dict.get('attributes', {}).get('config', '')
update_yaml = device['device'].get('attributes', {}).get('config', '')
config_yaml = vnf_dict.get('attributes', {}).get('config', '')
update_yaml = vnf['vnf'].get('attributes', {}).get('config', '')
LOG.debug('yaml orig %(orig)s update %(update)s',
{'orig': config_yaml, 'update': update_yaml})
@ -704,59 +706,59 @@ class DeviceHeat(abstract_driver.DeviceAbstractDriver,
LOG.debug('dict new %(new)s update %(update)s',
{'new': config_dict, 'update': update_dict})
new_yaml = yaml.dump(config_dict)
device_dict.setdefault('attributes', {})['config'] = new_yaml
vnf_dict.setdefault('attributes', {})['config'] = new_yaml
def update_wait(self, plugin, context, device_id, auth_attr,
def update_wait(self, plugin, context, vnf_id, auth_attr,
region_name=None):
# do nothing but checking if the stack exists at the moment
heatclient_ = HeatClient(auth_attr, region_name)
heatclient_.get(device_id)
heatclient_.get(vnf_id)
def delete(self, plugin, context, device_id, auth_attr, region_name=None):
def delete(self, plugin, context, vnf_id, auth_attr, region_name=None):
heatclient_ = HeatClient(auth_attr, region_name)
heatclient_.delete(device_id)
heatclient_.delete(vnf_id)
@log.log
def delete_wait(self, plugin, context, device_id, auth_attr,
def delete_wait(self, plugin, context, vnf_id, auth_attr,
region_name=None):
heatclient_ = HeatClient(auth_attr, region_name)
stack = heatclient_.get(device_id)
stack = heatclient_.get(vnf_id)
status = stack.stack_status
error_reason = None
stack_retries = STACK_RETRIES
while (status == 'DELETE_IN_PROGRESS' and stack_retries > 0):
time.sleep(STACK_RETRY_WAIT)
try:
stack = heatclient_.get(device_id)
stack = heatclient_.get(vnf_id)
except heatException.HTTPNotFound:
return
except Exception:
LOG.exception(_("Device Instance cleanup may not have "
LOG.exception(_("VNF Instance cleanup may not have "
"happened because Heat API request failed "
"while waiting for the stack %(stack)s to be "
"deleted"), {'stack': device_id})
"deleted"), {'stack': vnf_id})
break
status = stack.stack_status
stack_retries = stack_retries - 1
if stack_retries == 0 and status != 'DELETE_COMPLETE':
error_reason = _("Resource cleanup for device is"
error_reason = _("Resource cleanup for vnf is"
" not completed within {wait} seconds as "
"deletion of Stack {stack} is "
"not completed").format(stack=device_id,
"not completed").format(stack=vnf_id,
wait=(STACK_RETRIES * STACK_RETRY_WAIT))
LOG.warning(error_reason)
raise vnfm.DeviceCreateWaitFailed(device_id=device_id,
reason=error_reason)
raise vnfm.VNFCreateWaitFailed(vnf_id=vnf_id,
reason=error_reason)
if stack_retries != 0 and status != 'DELETE_COMPLETE':
error_reason = _("device {device_id} deletion is not completed. "
"{stack_status}").format(device_id=device_id,
error_reason = _("vnf {vnf_id} deletion is not completed. "
"{stack_status}").format(vnf_id=vnf_id,
stack_status=status)
LOG.warning(error_reason)
raise vnfm.DeviceCreateWaitFailed(device_id=device_id,
reason=error_reason)
raise vnfm.VNFCreateWaitFailed(vnf_id=vnf_id,
eason=error_reason)
@classmethod
def _find_mgmt_ips_from_groups(cls,
@ -826,7 +828,7 @@ class DeviceHeat(abstract_driver.DeviceAbstractDriver,
get_scaling_policy_name(policy_name=policy['id'],
action=policy['action']))
except Exception:
LOG.exception(_("Device scaling may not have "
LOG.exception(_("VNF scaling may not have "
"happened because Heat API request failed "
"while waiting for the stack %(stack)s to be "
"scaled"), {'stack': policy['instance_id']})

View File

@ -20,7 +20,7 @@ import uuid
from oslo_log import log as logging
from tacker.common import log
from tacker.vm.infra_drivers import abstract_driver
from tacker.vnfm.infra_drivers import abstract_driver
LOG = logging.getLogger(__name__)
@ -28,7 +28,7 @@ LOG = logging.getLogger(__name__)
class DeviceNoop(abstract_driver.DeviceAbstractDriver):
"""Noop driver of hosting device for tests."""
"""Noop driver of hosting vnf for tests."""
def __init__(self):
super(DeviceNoop, self).__init__()
@ -50,23 +50,23 @@ class DeviceNoop(abstract_driver.DeviceAbstractDriver):
return instance_id
@log.log
def create_wait(self, plugin, context, device_dict, device_id):
def create_wait(self, plugin, context, vnf_dict, vnf_id):
pass
@log.log
def update(self, plugin, context, device_id, device_dict, device):
if device_id not in self._instances:
def update(self, plugin, context, vnf_id, vnf_dict, vnf):
if vnf_id not in self._instances:
LOG.debug(_('not found'))
raise ValueError('No instance %s' % device_id)
raise ValueError('No instance %s' % vnf_id)
@log.log
def update_wait(self, plugin, context, device_id):
def update_wait(self, plugin, context, vnf_id):
pass
@log.log
def delete(self, plugin, context, device_id):
self._instances.remove(device_id)
def delete(self, plugin, context, vnf_id):
self._instances.remove(vnf_id)
@log.log
def delete_wait(self, plugin, context, device_id):
def delete_wait(self, plugin, context, vnf_id):
pass

View File

@ -27,7 +27,7 @@ from six import iteritems
from tacker.api.v1 import attributes
from tacker._i18n import _LE, _LW
from tacker.vm.infra_drivers import abstract_driver
from tacker.vnfm.infra_drivers import abstract_driver
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@ -70,7 +70,7 @@ class DefaultAuthPlugin(v2_auth.Password):
class DeviceNova(abstract_driver.DeviceAbstractDriver):
"""Nova driver of hosting device."""
"""Nova driver of hosting vnf."""
@versionutils.deprecated(
versionutils.deprecated.NEWTON,
@ -118,7 +118,7 @@ class DeviceNova(abstract_driver.DeviceAbstractDriver):
return 'nova'
def get_description(self):
return 'Nuetron Device Nova driver'
return 'VNF Nova driver'
@staticmethod
def _safe_pop(d, name_list):
@ -160,7 +160,7 @@ class DeviceNova(abstract_driver.DeviceAbstractDriver):
LOG.debug(_('port %s'), port)
return port['id']
def create(self, plugin, context, device):
def create(self, plugin, context, vnf):
# typical required arguments are
# 'name': name string
# 'image': uuid
@ -169,27 +169,27 @@ class DeviceNova(abstract_driver.DeviceAbstractDriver):
# for details, see the signature of
# novaclient.v<version>.servers.SeverManager.create()
LOG.debug(_('device %s'), device)
LOG.debug(_('vnf %s'), vnf)
# flavor and image are specially treated by novaclient
attributes = device['device_template']['attributes'].copy()
attributes.update(device['kwargs'])
attributes = vnf['vnfd']['attributes'].copy()
attributes.update(vnf['kwargs'])
name = self._safe_pop(attributes, ('name', ))
if name is None:
# TODO(yamahata): appropreate way to generate instance name
name = (__name__ + ':' + self.__class__.__name__ + '-' +
device['id'])
vnf['id'])
image = self._safe_pop(attributes, ('image', 'imageRef'))
flavor = self._safe_pop(attributes, ('flavor', 'flavorRef'))
files = plugin.mgmt_get_config(context, device)
files = plugin.mgmt_get_config(context, vnf)
if files:
attributes[_FILES] = files
LOG.debug(_('service_context: %s'), device.get('service_context', []))
tenant_id = device['tenant_id']
LOG.debug(_('service_context: %s'), vnf.get('service_context', []))
tenant_id = vnf['tenant_id']
nics = []
for sc_entry in device.get('service_context', []):
for sc_entry in vnf.get('service_context', []):
LOG.debug(_('sc_entry: %s'), sc_entry)
# nova API doesn't return tacker port_id.
@ -227,9 +227,9 @@ class DeviceNova(abstract_driver.DeviceAbstractDriver):
instance = nova.servers.create(name, image, flavor, **attributes)
return instance.id
def create_wait(self, plugin, context, device_dict, device_id):
def create_wait(self, plugin, context, vnf_dict, vnf_id):
nova = self._nova_client()
instance = nova.servers.get(device_id)
instance = nova.servers.get(vnf_id)
status = instance.status
# TODO(yamahata): timeout and error
while status == 'BUILD':
@ -240,38 +240,38 @@ class DeviceNova(abstract_driver.DeviceAbstractDriver):
LOG.debug(_('status: %s'), status)
if status == 'ERROR':
raise RuntimeError(_("creation of server %s faild") % device_id)
raise RuntimeError(_("creation of server %s faild") % vnf_id)
def update(self, plugin, context, device_id, device_dict, device):
def update(self, plugin, context, vnf_id, vnf_dict, vnf):
# do nothing but checking if the instance exists at the moment
nova = self._nova_client()
nova.servers.get(device_id)
nova.servers.get(vnf_id)
def update_wait(self, plugin, context, device_id):
def update_wait(self, plugin, context, vnf_id):
# do nothing but checking if the instance exists at the moment
nova = self._nova_client()
nova.servers.get(device_id)
nova.servers.get(vnf_id)
def delete(self, plugin, context, device_id):
def delete(self, plugin, context, vnf_id):
nova = self._nova_client()
try:
instance = nova.servers.get(device_id)
instance = nova.servers.get(vnf_id)
except self._novaclient.exceptions.NotFound:
LOG.error(_LE("server %s is not found") %
device_id)
vnf_id)
return
instance.delete()
def delete_wait(self, plugin, context, device_id):
def delete_wait(self, plugin, context, vnf_id):
nova = self._nova_client()
# TODO(yamahata): timeout and error
while True:
try:
instance = nova.servers.get(device_id)
instance = nova.servers.get(vnf_id)
LOG.debug(_('instance status %s'), instance.status)
except self._novaclient.exceptions.NotFound:
break
if instance.status == 'ERROR':
raise RuntimeError(_("deletion of server %s faild") %
device_id)
vnf_id)
time.sleep(5)

View File

@ -20,7 +20,7 @@ from oslo_serialization import jsonutils
import six
from tacker.api import extensions
from tacker.vm import constants
from tacker.vnfm import constants
@six.add_metaclass(abc.ABCMeta)
@ -28,7 +28,7 @@ class DeviceMGMTAbstractDriver(extensions.PluginInterface):
@abc.abstractmethod
def get_type(self):
"""Return one of predefined type of the hosting device drivers."""
"""Return one of predefined type of the hosting vnf drivers."""
pass
@abc.abstractmethod
@ -40,29 +40,29 @@ class DeviceMGMTAbstractDriver(extensions.PluginInterface):
def get_description(self):
pass
def mgmt_create_pre(self, plugin, context, device):
def mgmt_create_pre(self, plugin, context, vnf):
pass
def mgmt_create_post(self, plugin, context, device):
def mgmt_create_post(self, plugin, context, vnf):
pass
def mgmt_update_pre(self, plugin, context, device):
def mgmt_update_pre(self, plugin, context, vnf):
pass
def mgmt_update_post(self, plugin, context, device):
def mgmt_update_post(self, plugin, context, vnf):
pass
def mgmt_delete_pre(self, plugin, context, device):
def mgmt_delete_pre(self, plugin, context, vnf):
pass
def mgmt_delete_post(self, plugin, context, device):
def mgmt_delete_post(self, plugin, context, vnf):
pass
def mgmt_get_config(self, plugin, context, device):
def mgmt_get_config(self, plugin, context, vnf):
"""Get a dict of objects.
Returns dict of file-like objects which will be passed to hosting
device.
vnf.
It depends on drivers how to use it.
for nova case, it can be used for meta data, file injection or
config drive
@ -75,17 +75,17 @@ class DeviceMGMTAbstractDriver(extensions.PluginInterface):
return {}
@abc.abstractmethod
def mgmt_url(self, plugin, context, device):
def mgmt_url(self, plugin, context, vnf):
pass
@abc.abstractmethod
def mgmt_call(self, plugin, context, device, kwargs):
def mgmt_call(self, plugin, context, vnf, kwargs):
pass
class DeviceMGMTByNetwork(DeviceMGMTAbstractDriver):
def mgmt_url(self, plugin, context, device):
mgmt_entries = [sc_entry for sc_entry in device.service_context
def mgmt_url(self, plugin, context, vnf):
mgmt_entries = [sc_entry for sc_entry in vnf.service_context
if (sc_entry.role == constants.ROLE_MGMT and
sc_entry.port_id)]
if not mgmt_entries:

View File

@ -19,9 +19,9 @@ KEY_ACTION = 'action'
KEY_KWARGS = 'kwargs'
# ACTION type
ACTION_CREATE_DEVICE = 'create_device'
ACTION_UPDATE_DEVICE = 'update_device'
ACTION_DELETE_DEVICE = 'delete_device'
ACTION_CREATE_VNF = 'create_vnf'
ACTION_UPDATE_VNF = 'update_vnf'
ACTION_DELETE_VNF = 'delete_vnf'
ACTION_CREATE_SERVICE = 'create_service'
ACTION_UPDATE_SERVICE = 'update_service'
ACTION_DELETE_SERVICE = 'delete_service'

View File

@ -16,7 +16,7 @@
from oslo_log import log as logging
from tacker.vm.mgmt_drivers import abstract_driver
from tacker.vnfm.mgmt_drivers import abstract_driver
LOG = logging.getLogger(__name__)
@ -30,12 +30,12 @@ class DeviceMgmtNoop(abstract_driver.DeviceMGMTAbstractDriver):
return 'noop'
def get_description(self):
return 'Tacker DeviceMgmt Noop Driver'
return 'Tacker VNFMgmt Noop Driver'
def mgmt_url(self, plugin, context, device):
LOG.debug(_('mgmt_url %s'), device)
def mgmt_url(self, plugin, context, vnf):
LOG.debug(_('mgmt_url %s'), vnf)
return 'noop-mgmt-url'
def mgmt_call(self, plugin, context, device, kwargs):
LOG.debug(_('mgmt_device_call %(device)s %(kwargs)s'),
{'device': device, 'kwargs': kwargs})
def mgmt_call(self, plugin, context, vnf, kwargs):
LOG.debug(_('mgmt_call %(vnf)s %(kwargs)s'),
{'vnf': vnf, 'kwargs': kwargs})

View File

@ -22,8 +22,8 @@ import yaml
from tacker.common import cmd_executer
from tacker.common.exceptions import MgmtDriverException
from tacker.common import log
from tacker.vm.mgmt_drivers import abstract_driver
from tacker.vm.mgmt_drivers import constants as mgmt_constants
from tacker.vnfm.mgmt_drivers import abstract_driver
from tacker.vnfm.mgmt_drivers import constants as mgmt_constants
LOG = logging.getLogger(__name__)
@ -46,11 +46,11 @@ class DeviceMgmtOpenWRT(abstract_driver.DeviceMGMTAbstractDriver):
return 'openwrt'
def get_description(self):
return 'Tacker DeviceMgmt OpenWRT Driver'
return 'Tacker VNFMgmt OpenWRT Driver'
def mgmt_url(self, plugin, context, device):
LOG.debug(_('mgmt_url %s'), device)
return device.get('mgmt_url', '')
def mgmt_url(self, plugin, context, vnf):
LOG.debug(_('mgmt_url %s'), vnf)
return vnf.get('mgmt_url', '')
@log.log
def _config_service(self, mgmt_ip_address, service, config):
@ -68,13 +68,13 @@ class DeviceMgmtOpenWRT(abstract_driver.DeviceMGMTAbstractDriver):
raise MgmtDriverException()
@log.log
def mgmt_call(self, plugin, context, device, kwargs):
def mgmt_call(self, plugin, context, vnf, kwargs):
if (kwargs[mgmt_constants.KEY_ACTION] !=
mgmt_constants.ACTION_UPDATE_DEVICE):
mgmt_constants.ACTION_UPDATE_VNF):
return
dev_attrs = device.get('attributes', {})
dev_attrs = vnf.get('attributes', {})
mgmt_url = jsonutils.loads(device.get('mgmt_url', '{}'))
mgmt_url = jsonutils.loads(vnf.get('mgmt_url', '{}'))
if not mgmt_url:
return

View File

@ -28,7 +28,7 @@ import six
from tacker.common import clients
from tacker.common import driver_manager
from tacker import context as t_context
from tacker.vm.infra_drivers.heat import heat
from tacker.vnfm.infra_drivers.heat import heat
LOG = logging.getLogger(__name__)
@ -49,7 +49,7 @@ class VNFMonitor(object):
"""VNF Monitor."""
_instance = None
_hosting_vnfs = dict() # device_id => dict of parameters
_hosting_vnfs = dict() # vnf_id => dict of parameters
_status_check_intvl = 0
_lock = threading.RLock()
@ -91,32 +91,32 @@ class VNFMonitor(object):
self.run_monitor(hosting_vnf)
@staticmethod
def to_hosting_vnf(device_dict, action_cb):
def to_hosting_vnf(vnf_dict, action_cb):
return {
'id': device_dict['id'],
'id': vnf_dict['id'],
'management_ip_addresses': jsonutils.loads(
device_dict['mgmt_url']),
vnf_dict['mgmt_url']),
'action_cb': action_cb,
'device': device_dict,
'vnf': vnf_dict,
'monitoring_policy': jsonutils.loads(
device_dict['attributes']['monitoring_policy'])
vnf_dict['attributes']['monitoring_policy'])
}
def add_hosting_vnf(self, new_device):
def add_hosting_vnf(self, new_vnf):
LOG.debug('Adding host %(id)s, Mgmt IP %(ips)s',
{'id': new_device['id'],
'ips': new_device['management_ip_addresses']})
new_device['boot_at'] = timeutils.utcnow()
{'id': new_vnf['id'],
'ips': new_vnf['management_ip_addresses']})
new_vnf['boot_at'] = timeutils.utcnow()
with self._lock:
self._hosting_vnfs[new_device['id']] = new_device
self._hosting_vnfs[new_vnf['id']] = new_vnf
def delete_hosting_vnf(self, device_id):
LOG.debug('deleting device_id %(device_id)s', {'device_id': device_id})
def delete_hosting_vnf(self, vnf_id):
LOG.debug('deleting vnf_id %(vnf_id)s', {'vnf_id': vnf_id})
with self._lock:
hosting_vnf = self._hosting_vnfs.pop(device_id, None)
hosting_vnf = self._hosting_vnfs.pop(vnf_id, None)
if hosting_vnf:
LOG.debug('deleting device_id %(device_id)s, Mgmt IP %(ips)s',
{'device_id': device_id,
LOG.debug('deleting vnf_id %(vnf_id)s, Mgmt IP %(ips)s',
{'vnf_id': vnf_id,
'ips': hosting_vnf['management_ip_addresses']})
def run_monitor(self, hosting_vnf):
@ -146,7 +146,7 @@ class VNFMonitor(object):
params['mgmt_ip'] = mgmt_ips[vdu]
driver_return = self.monitor_call(driver,
hosting_vnf['device'],
hosting_vnf['vnf'],
params)
LOG.debug('driver_return %s', driver_return)
@ -155,32 +155,32 @@ class VNFMonitor(object):
action = actions[driver_return]
hosting_vnf['action_cb'](hosting_vnf, action)
def mark_dead(self, device_id):
self._hosting_vnfs[device_id]['dead'] = True
def mark_dead(self, vnf_id):
self._hosting_vnfs[vnf_id]['dead'] = True
def _invoke(self, driver, **kwargs):
method = inspect.stack()[1][3]
return self._monitor_manager.invoke(
driver, method, **kwargs)
def monitor_get_config(self, device_dict):
def monitor_get_config(self, vnf_dict):
return self._invoke(
device_dict, monitor=self, device=device_dict)
vnf_dict, monitor=self, vnf=vnf_dict)
def monitor_url(self, device_dict):
def monitor_url(self, vnf_dict):
return self._invoke(
device_dict, monitor=self, device=device_dict)
vnf_dict, monitor=self, vnf=vnf_dict)
def monitor_call(self, driver, device_dict, kwargs):
def monitor_call(self, driver, vnf_dict, kwargs):
return self._invoke(driver,
device=device_dict, kwargs=kwargs)
vnf=vnf_dict, kwargs=kwargs)
@six.add_metaclass(abc.ABCMeta)
class ActionPolicy(object):
@classmethod
@abc.abstractmethod
def execute_action(cls, plugin, device_dict):
def execute_action(cls, plugin, vnf_dict):
pass
_POLICIES = {}
@ -193,11 +193,11 @@ class ActionPolicy(object):
return _register
@classmethod
def get_policy(cls, policy, device):
def get_policy(cls, policy, vnf):
action_clses = cls._POLICIES.get(policy)
if not action_clses:
return None
infra_driver = device['device_template'].get('infra_driver')
infra_driver = vnf['vnfd'].get('infra_driver')
cls = action_clses.get(infra_driver)
if cls:
return cls
@ -211,17 +211,17 @@ class ActionPolicy(object):
@ActionPolicy.register('respawn')
class ActionRespawn(ActionPolicy):
@classmethod
def execute_action(cls, plugin, device_dict):
LOG.error(_('device %s dead'), device_dict['id'])
if plugin._mark_device_dead(device_dict['id']):
plugin._vnf_monitor.mark_dead(device_dict['id'])
def execute_action(cls, plugin, vnf_dict):
LOG.error(_('vnf %s dead'), vnf_dict['id'])
if plugin._mark_vnf_dead(vnf_dict['id']):
plugin._vnf_monitor.mark_dead(vnf_dict['id'])
attributes = device_dict['attributes'].copy()
attributes['dead_device_id'] = device_dict['id']
new_device = {'attributes': attributes}
for key in ('tenant_id', 'template_id', 'name'):
new_device[key] = device_dict[key]
LOG.debug(_('new_device %s'), new_device)
attributes = vnf_dict['attributes'].copy()
attributes['dead_vnf_id'] = vnf_dict['id']
new_vnf = {'attributes': attributes}
for key in ('tenant_id', 'vnfd_id', 'name'):
new_vnf[key] = vnf_dict[key]
LOG.debug(_('new_vnf %s'), new_vnf)
# keystone v2.0 specific
authtoken = CONF.keystone_authtoken
@ -233,54 +233,54 @@ class ActionRespawn(ActionPolicy):
context.auth_token = token['id']
context.tenant_id = token['tenant_id']
context.user_id = token['user_id']
new_device_dict = plugin.create_device(context,
{'device': new_device})
LOG.info(_('respawned new device %s'), new_device_dict['id'])
new_vnf_dict = plugin.create_vnf(context,
{'vnf': new_vnf})
LOG.info(_('respawned new vnf %s'), new_vnf_dict['id'])
@ActionPolicy.register('respawn', 'heat')
class ActionRespawnHeat(ActionPolicy):
@classmethod
def execute_action(cls, plugin, device_dict, auth_attr):
device_id = device_dict['id']
LOG.error(_('device %s dead'), device_id)
if plugin._mark_device_dead(device_dict['id']):
plugin._vnf_monitor.mark_dead(device_dict['id'])
attributes = device_dict['attributes']
def execute_action(cls, plugin, vnf_dict, auth_attr):
vnf_id = vnf_dict['id']
LOG.error(_('vnf %s dead'), vnf_id)
if plugin._mark_vnf_dead(vnf_dict['id']):
plugin._vnf_monitor.mark_dead(vnf_dict['id'])
attributes = vnf_dict['attributes']
failure_count = int(attributes.get('failure_count', '0')) + 1
failure_count_str = str(failure_count)
attributes['failure_count'] = failure_count_str
attributes['dead_instance_id_' + failure_count_str] = device_dict[
attributes['dead_instance_id_' + failure_count_str] = vnf_dict[
'instance_id']
placement_attr = device_dict.get('placement_attr', {})
placement_attr = vnf_dict.get('placement_attr', {})
region_name = placement_attr.get('region_name')
# kill heat stack
heatclient = heat.HeatClient(auth_attr=auth_attr,
region_name=region_name)
heatclient.delete(device_dict['instance_id'])
heatclient.delete(vnf_dict['instance_id'])
# TODO(anyone) set the current request ctxt instead of admin ctxt
context = t_context.get_admin_context()
update_device_dict = plugin.create_device_sync(context,
device_dict)
plugin.config_device(context, update_device_dict)
plugin.add_device_to_monitor(update_device_dict, auth_attr)
update_vnf_dict = plugin.create_vnf_sync(context,
vnf_dict)
plugin.config_vnf(context, update_vnf_dict)
plugin.add_vnf_to_monitor(update_vnf_dict, auth_attr)
@ActionPolicy.register('log')
class ActionLogOnly(ActionPolicy):
@classmethod
def execute_action(cls, plugin, device_dict):
device_id = device_dict['id']
LOG.error(_('device %s dead'), device_id)
def execute_action(cls, plugin, vnf_dict):
vnf_id = vnf_dict['id']
LOG.error(_('vnf %s dead'), vnf_id)
@ActionPolicy.register('log_and_kill')
class ActionLogAndKill(ActionPolicy):
@classmethod
def execute_action(cls, plugin, device_dict):
device_id = device_dict['id']
if plugin._mark_device_dead(device_dict['id']):
plugin._vnf_monitor.mark_dead(device_dict['id'])
plugin.delete_device(t_context.get_admin_context(), device_id)
LOG.error(_('device %s dead'), device_id)
def execute_action(cls, plugin, vnf_dict):
vnf_id = vnf_dict['id']
if plugin._mark_vnf_dead(vnf_dict['id']):
plugin._vnf_monitor.mark_dead(vnf_dict['id'])
plugin.delete_vnf(t_context.get_admin_context(), vnf_id)
LOG.error(_('vnf %s dead'), vnf_id)

View File

@ -25,7 +25,7 @@ class VNFMonitorAbstractDriver(extensions.PluginInterface):
@abc.abstractmethod
def get_type(self):
"""Return one of predefined type of the hosting device drivers."""
"""Return one of predefined type of the hosting vnf drivers."""
pass
@abc.abstractmethod
@ -38,45 +38,45 @@ class VNFMonitorAbstractDriver(extensions.PluginInterface):
"""Return description of VNF Monitor plugin."""
pass
def monitor_get_config(self, plugin, context, device):
def monitor_get_config(self, plugin, context, vnf):
"""Return dict of monitor configuration data.
:param plugin:
:param context:
:param device:
:param vnf:
:returns: dict
:returns: dict of monitor configuration data
"""
return {}
@abc.abstractmethod
def monitor_url(self, plugin, context, device):
"""Return the url of device to monitor.
def monitor_url(self, plugin, context, vnf):
"""Return the url of vnf to monitor.
:param plugin:
:param context:
:param device:
:param vnf:
:returns: string
:returns: url of device to monitor
:returns: url of vnf to monitor
"""
pass
@abc.abstractmethod
def monitor_call(self, device, kwargs):
def monitor_call(self, vnf, kwargs):
"""Monitor.
Return boolean value True if VNF is healthy
or return a event string like 'failure' or 'calls-capacity-reached'
for specific VNF health condition.
:param device:
:param vnf:
:param kwargs:
:returns: boolean
:returns: True if VNF is healthy
"""
pass
def monitor_service_driver(self, plugin, context, device,
def monitor_service_driver(self, plugin, context, vnf,
service_instance):
# use same monitor driver to communicate with service
return self.get_name()

View File

@ -19,7 +19,7 @@ import six.moves.urllib.request as urlreq
from tacker._i18n import _LW
from tacker.common import log
from tacker.vm.monitor_drivers import abstract_driver
from tacker.vnfm.monitor_drivers import abstract_driver
LOG = logging.getLogger(__name__)
@ -48,9 +48,9 @@ class VNFMonitorHTTPPing(abstract_driver.VNFMonitorAbstractDriver):
def get_description(self):
return 'Tacker HTTP Ping Driver for VNF'
def monitor_url(self, plugin, context, device):
LOG.debug(_('monitor_url %s'), device)
return device.get('monitor_url', '')
def monitor_url(self, plugin, context, vnf):
LOG.debug(_('monitor_url %s'), vnf)
return vnf.get('monitor_url', '')
def _is_pingable(self, mgmt_ip='', retry=5, timeout=5, port=80, **kwargs):
"""Checks whether the server is reachable by using urllib.
@ -74,7 +74,7 @@ class VNFMonitorHTTPPing(abstract_driver.VNFMonitorAbstractDriver):
return 'failure'
@log.log
def monitor_call(self, device, kwargs):
def monitor_call(self, vnf, kwargs):
if not kwargs['mgmt_ip']:
return

View File

@ -18,7 +18,7 @@ from oslo_log import log as logging
from tacker._i18n import _LW
from tacker.agent.linux import utils as linux_utils
from tacker.common import log
from tacker.vm.monitor_drivers import abstract_driver
from tacker.vnfm.monitor_drivers import abstract_driver
LOG = logging.getLogger(__name__)
@ -47,9 +47,9 @@ class VNFMonitorPing(abstract_driver.VNFMonitorAbstractDriver):
def get_description(self):
return 'Tacker VNFMonitor Ping Driver'
def monitor_url(self, plugin, context, device):
LOG.debug(_('monitor_url %s'), device)
return device.get('monitor_url', '')
def monitor_url(self, plugin, context, vnf):
LOG.debug(_('monitor_url %s'), vnf)
return vnf.get('monitor_url', '')
def _is_pingable(self, mgmt_ip="", count=5, timeout=1, interval='0.2',
**kwargs):
@ -75,7 +75,7 @@ class VNFMonitorPing(abstract_driver.VNFMonitorAbstractDriver):
return 'failure'
@log.log
def monitor_call(self, device, kwargs):
def monitor_call(self, vnf, kwargs):
if not kwargs['mgmt_ip']:
return

View File