OpenStack Networking (Neutron)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

securitygroups_db.py 40KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900
  1. # Copyright 2012 VMware, Inc. All rights reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License"); you may
  4. # not use this file except in compliance with the License. You may obtain
  5. # a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  11. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  12. # License for the specific language governing permissions and limitations
  13. # under the License.
  14. import netaddr
  15. from neutron_lib.api.definitions import port as port_def
  16. from neutron_lib.api import validators
  17. from neutron_lib.callbacks import events
  18. from neutron_lib.callbacks import exceptions
  19. from neutron_lib.callbacks import registry
  20. from neutron_lib.callbacks import resources
  21. from neutron_lib import constants
  22. from neutron_lib import context as context_lib
  23. from neutron_lib.db import api as db_api
  24. from neutron_lib.db import model_query
  25. from neutron_lib.db import resource_extend
  26. from neutron_lib.db import utils as db_utils
  27. from neutron_lib import exceptions as n_exc
  28. from neutron_lib.utils import helpers
  29. from neutron_lib.utils import net
  30. from oslo_log import log as logging
  31. from oslo_utils import uuidutils
  32. import six
  33. from sqlalchemy.orm import scoped_session
  34. from neutron._i18n import _
  35. from neutron.common import _constants as const
  36. from neutron.db.models import securitygroup as sg_models
  37. from neutron.db import rbac_db_mixin as rbac_mixin
  38. from neutron.extensions import securitygroup as ext_sg
  39. from neutron.objects import base as base_obj
  40. from neutron.objects import ports as port_obj
  41. from neutron.objects import securitygroup as sg_obj
  42. LOG = logging.getLogger(__name__)
  43. @resource_extend.has_resource_extenders
  44. @registry.has_registry_receivers
  45. class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase,
  46. rbac_mixin.RbacPluginMixin):
  47. """Mixin class to add security group to db_base_plugin_v2."""
  48. __native_bulk_support = True
  49. def create_security_group_bulk(self, context, security_groups):
  50. return self._create_bulk('security_group', context,
  51. security_groups)
  52. def _registry_notify(self, res, event, id=None, exc_cls=None, **kwargs):
  53. # NOTE(armax): a callback exception here will prevent the request
  54. # from being processed. This is a hook point for backend's validation;
  55. # we raise to propagate the reason for the failure.
  56. try:
  57. if 'payload' in kwargs:
  58. # TODO(boden): remove shim once all callbacks use payloads
  59. registry.publish(res, event, self, payload=kwargs['payload'])
  60. else:
  61. registry.notify(res, event, self, **kwargs)
  62. except exceptions.CallbackFailure as e:
  63. if exc_cls:
  64. reason = (_('cannot perform %(event)s due to %(reason)s') %
  65. {'event': event, 'reason': e})
  66. raise exc_cls(reason=reason, id=id)
  67. @db_api.retry_if_session_inactive()
  68. def create_security_group(self, context, security_group, default_sg=False):
  69. """Create security group.
  70. If default_sg is true that means we are a default security group for
  71. a given tenant if it does not exist.
  72. """
  73. s = security_group['security_group']
  74. kwargs = {
  75. 'context': context,
  76. 'security_group': s,
  77. 'is_default': default_sg,
  78. }
  79. self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_CREATE,
  80. exc_cls=ext_sg.SecurityGroupConflict,
  81. payload=events.DBEventPayload(
  82. context, metadata={'is_default': default_sg},
  83. request_body=security_group,
  84. desired_state=s))
  85. tenant_id = s['tenant_id']
  86. if not default_sg:
  87. self._ensure_default_security_group(context, tenant_id)
  88. else:
  89. existing_def_sg_id = self._get_default_sg_id(context, tenant_id)
  90. if existing_def_sg_id is not None:
  91. # default already exists, return it
  92. return self.get_security_group(context, existing_def_sg_id)
  93. with db_api.CONTEXT_WRITER.using(context):
  94. sg = sg_obj.SecurityGroup(
  95. context, id=s.get('id') or uuidutils.generate_uuid(),
  96. description=s['description'], project_id=tenant_id,
  97. name=s['name'], is_default=default_sg)
  98. sg.create()
  99. for ethertype in ext_sg.sg_supported_ethertypes:
  100. if default_sg:
  101. # Allow intercommunication
  102. ingress_rule = sg_obj.SecurityGroupRule(
  103. context, id=uuidutils.generate_uuid(),
  104. project_id=tenant_id, security_group_id=sg.id,
  105. direction='ingress', ethertype=ethertype,
  106. remote_group_id=sg.id)
  107. ingress_rule.create()
  108. sg.rules.append(ingress_rule)
  109. egress_rule = sg_obj.SecurityGroupRule(
  110. context, id=uuidutils.generate_uuid(),
  111. project_id=tenant_id, security_group_id=sg.id,
  112. direction='egress', ethertype=ethertype)
  113. egress_rule.create()
  114. sg.rules.append(egress_rule)
  115. sg.obj_reset_changes(['rules'])
  116. # fetch sg from db to load the sg rules with sg model.
  117. sg = sg_obj.SecurityGroup.get_object(context, id=sg.id)
  118. secgroup_dict = self._make_security_group_dict(sg)
  119. kwargs['security_group'] = secgroup_dict
  120. self._registry_notify(resources.SECURITY_GROUP,
  121. events.PRECOMMIT_CREATE,
  122. exc_cls=ext_sg.SecurityGroupConflict,
  123. **kwargs)
  124. registry.notify(resources.SECURITY_GROUP, events.AFTER_CREATE, self,
  125. **kwargs)
  126. return secgroup_dict
  127. @db_api.retry_if_session_inactive()
  128. def get_security_groups(self, context, filters=None, fields=None,
  129. sorts=None, limit=None,
  130. marker=None, page_reverse=False, default_sg=False):
  131. # If default_sg is True do not call _ensure_default_security_group()
  132. # so this can be done recursively. Context.tenant_id is checked
  133. # because all the unit tests do not explicitly set the context on
  134. # GETS. TODO(arosen) context handling can probably be improved here.
  135. filters = filters or {}
  136. if not default_sg and context.tenant_id:
  137. tenant_id = filters.get('tenant_id')
  138. if tenant_id:
  139. tenant_id = tenant_id[0]
  140. else:
  141. tenant_id = context.tenant_id
  142. self._ensure_default_security_group(context, tenant_id)
  143. pager = base_obj.Pager(
  144. sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse)
  145. sg_objs = sg_obj.SecurityGroup.get_objects(
  146. context, _pager=pager, validate_filters=False,
  147. fields=fields, **filters)
  148. return [self._make_security_group_dict(obj, fields) for obj in sg_objs]
  149. @db_api.retry_if_session_inactive()
  150. def get_security_groups_count(self, context, filters=None):
  151. filters = filters or {}
  152. return sg_obj.SecurityGroup.count(
  153. context, validate_filters=False, **filters)
  154. @db_api.retry_if_session_inactive()
  155. def get_security_group(self, context, id, fields=None, tenant_id=None):
  156. """Tenant id is given to handle the case when creating a security
  157. group rule on behalf of another use.
  158. """
  159. if tenant_id:
  160. tmp_context_tenant_id = context.tenant_id
  161. context.tenant_id = tenant_id
  162. try:
  163. with db_api.CONTEXT_READER.using(context):
  164. ret = self._make_security_group_dict(self._get_security_group(
  165. context, id,
  166. fields=fields),
  167. fields)
  168. if (fields is None or len(fields) == 0 or
  169. 'security_group_rules' in fields):
  170. rules = self.get_security_group_rules(
  171. context_lib.get_admin_context(),
  172. {'security_group_id': [id]})
  173. ret['security_group_rules'] = rules
  174. finally:
  175. if tenant_id:
  176. context.tenant_id = tmp_context_tenant_id
  177. return ret
  178. def _get_security_group(self, context, id, fields=None):
  179. sg = sg_obj.SecurityGroup.get_object(context, fields=fields, id=id)
  180. if sg is None:
  181. raise ext_sg.SecurityGroupNotFound(id=id)
  182. return sg
  183. def _check_security_group(self, context, id, tenant_id=None):
  184. if tenant_id:
  185. tmp_context_tenant_id = context.tenant_id
  186. context.tenant_id = tenant_id
  187. try:
  188. if not sg_obj.SecurityGroup.objects_exist(context, id=id):
  189. raise ext_sg.SecurityGroupNotFound(id=id)
  190. finally:
  191. if tenant_id:
  192. context.tenant_id = tmp_context_tenant_id
  193. @db_api.retry_if_session_inactive()
  194. def delete_security_group(self, context, id):
  195. filters = {'security_group_id': [id]}
  196. with db_api.CONTEXT_READER.using(context):
  197. ports = self._get_port_security_group_bindings(context, filters)
  198. if ports:
  199. raise ext_sg.SecurityGroupInUse(id=id)
  200. # confirm security group exists
  201. sg = self._get_security_group(context, id, fields=['id', 'name'])
  202. if sg['name'] == 'default' and not context.is_admin:
  203. raise ext_sg.SecurityGroupCannotRemoveDefault()
  204. kwargs = {
  205. 'context': context,
  206. 'security_group_id': id,
  207. 'security_group': sg,
  208. }
  209. self._registry_notify(resources.SECURITY_GROUP,
  210. events.BEFORE_DELETE,
  211. exc_cls=ext_sg.SecurityGroupInUse, id=id,
  212. payload=events.DBEventPayload(
  213. context, states=(sg,), resource_id=id))
  214. with db_api.CONTEXT_WRITER.using(context):
  215. # pass security_group_rule_ids to ensure
  216. # consistency with deleted rules
  217. # get security_group_bindings and security_group one more time
  218. # so that they will be attached for session where sg will be
  219. # deleted
  220. ports = self._get_port_security_group_bindings(context, filters)
  221. sg = self._get_security_group(context, id)
  222. kwargs['security_group_rule_ids'] = [r['id'] for r in sg.rules]
  223. kwargs['security_group'] = self._make_security_group_dict(sg)
  224. self._registry_notify(resources.SECURITY_GROUP,
  225. events.PRECOMMIT_DELETE,
  226. exc_cls=ext_sg.SecurityGroupInUse, id=id,
  227. **kwargs)
  228. sg.delete()
  229. kwargs.pop('security_group')
  230. registry.notify(resources.SECURITY_GROUP, events.AFTER_DELETE,
  231. self, **kwargs)
  232. @db_api.retry_if_session_inactive()
  233. def update_security_group(self, context, id, security_group):
  234. s = security_group['security_group']
  235. kwargs = {
  236. 'context': context,
  237. 'security_group_id': id,
  238. 'security_group': s,
  239. }
  240. self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_UPDATE,
  241. exc_cls=ext_sg.SecurityGroupConflict, **kwargs)
  242. with db_api.CONTEXT_WRITER.using(context):
  243. sg = self._get_security_group(context, id)
  244. if sg.name == 'default' and 'name' in s:
  245. raise ext_sg.SecurityGroupCannotUpdateDefault()
  246. sg_dict = self._make_security_group_dict(sg)
  247. kwargs['original_security_group'] = sg_dict
  248. sg.update_fields(s)
  249. sg.update()
  250. sg_dict = self._make_security_group_dict(sg)
  251. kwargs['security_group'] = sg_dict
  252. self._registry_notify(
  253. resources.SECURITY_GROUP,
  254. events.PRECOMMIT_UPDATE,
  255. exc_cls=ext_sg.SecurityGroupConflict,
  256. payload=events.DBEventPayload(
  257. context, request_body=s,
  258. states=(kwargs['original_security_group'],),
  259. resource_id=id, desired_state=sg_dict))
  260. registry.notify(resources.SECURITY_GROUP, events.AFTER_UPDATE, self,
  261. **kwargs)
  262. return sg_dict
  263. def _make_security_group_dict(self, security_group, fields=None):
  264. res = {'id': security_group['id'],
  265. 'name': security_group['name'],
  266. 'tenant_id': security_group['tenant_id'],
  267. 'description': security_group['description']}
  268. if security_group.rules:
  269. res['security_group_rules'] = [
  270. self._make_security_group_rule_dict(r.db_obj)
  271. for r in security_group.rules
  272. ]
  273. else:
  274. res['security_group_rules'] = []
  275. resource_extend.apply_funcs(ext_sg.SECURITYGROUPS, res,
  276. security_group.db_obj)
  277. return db_utils.resource_fields(res, fields)
  278. @staticmethod
  279. def _make_security_group_binding_dict(security_group, fields=None):
  280. res = {'port_id': security_group['port_id'],
  281. 'security_group_id': security_group['security_group_id']}
  282. return db_utils.resource_fields(res, fields)
  283. @db_api.retry_if_session_inactive()
  284. def _create_port_security_group_binding(self, context, port_id,
  285. security_group_id):
  286. with db_api.CONTEXT_WRITER.using(context):
  287. db = sg_models.SecurityGroupPortBinding(
  288. port_id=port_id, security_group_id=security_group_id)
  289. context.session.add(db)
  290. def _get_port_security_group_bindings(self, context,
  291. filters=None, fields=None):
  292. return model_query.get_collection(
  293. context, sg_models.SecurityGroupPortBinding,
  294. self._make_security_group_binding_dict,
  295. filters=filters, fields=fields)
  296. @db_api.retry_if_session_inactive()
  297. def _delete_port_security_group_bindings(self, context, port_id):
  298. with db_api.CONTEXT_WRITER.using(context):
  299. query = model_query.query_with_hooks(
  300. context, sg_models.SecurityGroupPortBinding)
  301. bindings = query.filter(
  302. sg_models.SecurityGroupPortBinding.port_id == port_id)
  303. for binding in bindings:
  304. context.session.delete(binding)
  305. @db_api.retry_if_session_inactive()
  306. def create_security_group_rule_bulk(self, context, security_group_rules):
  307. return self._create_bulk('security_group_rule', context,
  308. security_group_rules)
  309. @db_api.retry_if_session_inactive()
  310. def create_security_group_rule_bulk_native(self, context,
  311. security_group_rules):
  312. rules = security_group_rules['security_group_rules']
  313. scoped_session(context.session)
  314. security_group_id = self._validate_security_group_rules(
  315. context, security_group_rules)
  316. with db_api.CONTEXT_WRITER.using(context):
  317. self._check_for_duplicate_rules(context, security_group_id, rules)
  318. ret = []
  319. for rule_dict in rules:
  320. res_rule_dict = self._create_security_group_rule(
  321. context, rule_dict, validate=False)
  322. ret.append(res_rule_dict)
  323. for rdict in ret:
  324. registry.notify(
  325. resources.SECURITY_GROUP_RULE, events.AFTER_CREATE, self,
  326. context=context, security_group_rule=rdict)
  327. return ret
  328. @db_api.retry_if_session_inactive()
  329. def create_security_group_rule(self, context, security_group_rule):
  330. res = self._create_security_group_rule(context, security_group_rule)
  331. registry.notify(
  332. resources.SECURITY_GROUP_RULE, events.AFTER_CREATE, self,
  333. context=context, security_group_rule=res)
  334. return res
  335. def _create_security_group_rule(self, context, security_group_rule,
  336. validate=True):
  337. if validate:
  338. sg_id = self._validate_security_group_rule(context,
  339. security_group_rule)
  340. rule_dict = security_group_rule['security_group_rule']
  341. remote_ip_prefix = rule_dict.get('remote_ip_prefix')
  342. if remote_ip_prefix:
  343. remote_ip_prefix = net.AuthenticIPNetwork(remote_ip_prefix)
  344. protocol = rule_dict.get('protocol')
  345. if protocol:
  346. # object expects strings only
  347. protocol = six.text_type(protocol)
  348. args = {
  349. 'id': (rule_dict.get('id') or uuidutils.generate_uuid()),
  350. 'project_id': rule_dict['tenant_id'],
  351. 'security_group_id': rule_dict['security_group_id'],
  352. 'direction': rule_dict['direction'],
  353. 'remote_group_id': rule_dict.get('remote_group_id'),
  354. 'ethertype': rule_dict['ethertype'],
  355. 'protocol': protocol,
  356. 'remote_ip_prefix': remote_ip_prefix,
  357. 'description': rule_dict.get('description'),
  358. }
  359. port_range_min = self._safe_int(rule_dict['port_range_min'])
  360. if port_range_min is not None:
  361. args['port_range_min'] = port_range_min
  362. port_range_max = self._safe_int(rule_dict['port_range_max'])
  363. if port_range_max is not None:
  364. args['port_range_max'] = port_range_max
  365. kwargs = {
  366. 'context': context,
  367. 'security_group_rule': args
  368. }
  369. self._registry_notify(resources.SECURITY_GROUP_RULE,
  370. events.BEFORE_CREATE,
  371. exc_cls=ext_sg.SecurityGroupConflict, **kwargs)
  372. with db_api.CONTEXT_WRITER.using(context):
  373. if validate:
  374. self._check_for_duplicate_rules(context, sg_id,
  375. [security_group_rule])
  376. sg_rule = sg_obj.SecurityGroupRule(context, **args)
  377. sg_rule.create()
  378. # fetch sg_rule from db to load the sg rules with sg model
  379. # otherwise a DetachedInstanceError can occur for model extensions
  380. sg_rule = sg_obj.SecurityGroupRule.get_object(context,
  381. id=sg_rule.id)
  382. res_rule_dict = self._make_security_group_rule_dict(sg_rule.db_obj)
  383. kwargs['security_group_rule'] = res_rule_dict
  384. self._registry_notify(
  385. resources.SECURITY_GROUP_RULE,
  386. events.PRECOMMIT_CREATE,
  387. exc_cls=ext_sg.SecurityGroupConflict, **kwargs)
  388. return res_rule_dict
  389. def _get_ip_proto_number(self, protocol):
  390. if protocol is None:
  391. return
  392. # According to bug 1381379, protocol is always set to string to avoid
  393. # problems with comparing int and string in PostgreSQL. Here this
  394. # string is converted to int to give an opportunity to use it as
  395. # before.
  396. if protocol in constants.IP_PROTOCOL_NAME_ALIASES:
  397. protocol = constants.IP_PROTOCOL_NAME_ALIASES[protocol]
  398. return int(constants.IP_PROTOCOL_MAP.get(protocol, protocol))
  399. def _get_ip_proto_name_and_num(self, protocol, ethertype=None):
  400. if protocol is None:
  401. return
  402. protocol = str(protocol)
  403. # Force all legacy IPv6 ICMP protocol names to be 'ipv6-icmp', and
  404. # protocol number 1 to be 58
  405. if ethertype == constants.IPv6:
  406. if protocol in const.IPV6_ICMP_LEGACY_PROTO_LIST:
  407. protocol = constants.PROTO_NAME_IPV6_ICMP
  408. elif protocol == str(constants.PROTO_NUM_ICMP):
  409. protocol = str(constants.PROTO_NUM_IPV6_ICMP)
  410. if protocol in constants.IP_PROTOCOL_MAP:
  411. return [protocol, str(constants.IP_PROTOCOL_MAP.get(protocol))]
  412. elif protocol in constants.IP_PROTOCOL_NUM_TO_NAME_MAP:
  413. return [constants.IP_PROTOCOL_NUM_TO_NAME_MAP.get(protocol),
  414. protocol]
  415. return [protocol, protocol]
  416. def _safe_int(self, port_range):
  417. if port_range is None:
  418. return
  419. try:
  420. return int(port_range)
  421. except (ValueError, TypeError):
  422. msg = "port range must be an integer"
  423. raise n_exc.InvalidInput(error_message=msg)
  424. def _validate_port_range(self, rule):
  425. """Check that port_range is valid."""
  426. if rule['port_range_min'] is None and rule['port_range_max'] is None:
  427. return
  428. if not rule['protocol']:
  429. raise ext_sg.SecurityGroupProtocolRequiredWithPorts()
  430. ip_proto = self._get_ip_proto_number(rule['protocol'])
  431. # Not all firewall_driver support all these protocols,
  432. # but being strict here doesn't hurt.
  433. if (ip_proto in const.SG_PORT_PROTO_NUMS or
  434. ip_proto in const.SG_PORT_PROTO_NAMES):
  435. if rule['port_range_min'] == 0 or rule['port_range_max'] == 0:
  436. raise ext_sg.SecurityGroupInvalidPortValue(port=0)
  437. elif (rule['port_range_min'] is not None and
  438. rule['port_range_max'] is not None and
  439. rule['port_range_min'] <= rule['port_range_max']):
  440. # When min/max are the same it is just a single port
  441. pass
  442. else:
  443. raise ext_sg.SecurityGroupInvalidPortRange()
  444. elif ip_proto in [constants.PROTO_NUM_ICMP,
  445. constants.PROTO_NUM_IPV6_ICMP]:
  446. for attr, field in [('port_range_min', 'type'),
  447. ('port_range_max', 'code')]:
  448. if rule[attr] is not None and not (0 <= rule[attr] <= 255):
  449. raise ext_sg.SecurityGroupInvalidIcmpValue(
  450. field=field, attr=attr, value=rule[attr])
  451. if (rule['port_range_min'] is None and
  452. rule['port_range_max'] is not None):
  453. raise ext_sg.SecurityGroupMissingIcmpType(
  454. value=rule['port_range_max'])
  455. else:
  456. # Only the protocols above support ports, raise otherwise.
  457. if (rule['port_range_min'] is not None or
  458. rule['port_range_max'] is not None):
  459. port_protocols = (
  460. ', '.join(s.upper() for s in const.SG_PORT_PROTO_NAMES))
  461. raise ext_sg.SecurityGroupInvalidProtocolForPort(
  462. protocol=ip_proto, valid_port_protocols=port_protocols)
  463. def _validate_ethertype_and_protocol(self, rule):
  464. """Check if given ethertype and protocol are valid or not"""
  465. if rule['protocol'] in [constants.PROTO_NAME_IPV6_ENCAP,
  466. constants.PROTO_NAME_IPV6_FRAG,
  467. constants.PROTO_NAME_IPV6_ICMP,
  468. constants.PROTO_NAME_IPV6_ICMP_LEGACY,
  469. constants.PROTO_NAME_IPV6_NONXT,
  470. constants.PROTO_NAME_IPV6_OPTS,
  471. constants.PROTO_NAME_IPV6_ROUTE,
  472. str(constants.PROTO_NUM_IPV6_ENCAP),
  473. str(constants.PROTO_NUM_IPV6_FRAG),
  474. str(constants.PROTO_NUM_IPV6_ICMP),
  475. str(constants.PROTO_NUM_IPV6_NONXT),
  476. str(constants.PROTO_NUM_IPV6_OPTS),
  477. str(constants.PROTO_NUM_IPV6_ROUTE)]:
  478. if rule['ethertype'] == constants.IPv4:
  479. raise ext_sg.SecurityGroupEthertypeConflictWithProtocol(
  480. ethertype=rule['ethertype'], protocol=rule['protocol'])
  481. def _validate_single_tenant_and_group(self, security_group_rules):
  482. """Check that all rules belong to the same security group and tenant
  483. """
  484. sg_groups = set()
  485. tenants = set()
  486. for rule_dict in security_group_rules['security_group_rules']:
  487. rule = rule_dict['security_group_rule']
  488. sg_groups.add(rule['security_group_id'])
  489. if len(sg_groups) > 1:
  490. raise ext_sg.SecurityGroupNotSingleGroupRules()
  491. tenants.add(rule['tenant_id'])
  492. if len(tenants) > 1:
  493. raise ext_sg.SecurityGroupRulesNotSingleTenant()
  494. return sg_groups.pop()
  495. def _make_canonical_ipv6_icmp_protocol(self, rule):
  496. if rule.get('ethertype') == constants.IPv6:
  497. if rule.get('protocol') in const.IPV6_ICMP_LEGACY_PROTO_LIST:
  498. LOG.info('Project %(project)s added a security group rule '
  499. 'with legacy IPv6 ICMP protocol name %(protocol)s, '
  500. '%(new_protocol)s should be used instead. It was '
  501. 'automatically converted.',
  502. {'project': rule['tenant_id'],
  503. 'protocol': rule['protocol'],
  504. 'new_protocol': constants.PROTO_NAME_IPV6_ICMP})
  505. rule['protocol'] = constants.PROTO_NAME_IPV6_ICMP
  506. elif rule.get('protocol') == str(constants.PROTO_NUM_ICMP):
  507. LOG.info('Project %(project)s added a security group rule '
  508. 'with legacy IPv6 ICMP protocol number %(protocol)s, '
  509. '%(new_protocol)s should be used instead. It was '
  510. 'automatically converted.',
  511. {'project': rule['tenant_id'],
  512. 'protocol': rule['protocol'],
  513. 'new_protocol': str(constants.PROTO_NUM_IPV6_ICMP)})
  514. rule['protocol'] = str(constants.PROTO_NUM_IPV6_ICMP)
  515. def _validate_security_group_rule(self, context, security_group_rule):
  516. rule = security_group_rule['security_group_rule']
  517. self._make_canonical_ipv6_icmp_protocol(rule)
  518. self._validate_port_range(rule)
  519. self._validate_ip_prefix(rule)
  520. self._validate_ethertype_and_protocol(rule)
  521. if rule['remote_ip_prefix'] and rule['remote_group_id']:
  522. raise ext_sg.SecurityGroupRemoteGroupAndRemoteIpPrefix()
  523. remote_group_id = rule['remote_group_id']
  524. # Check that remote_group_id exists for tenant
  525. if remote_group_id:
  526. self._check_security_group(context, remote_group_id,
  527. tenant_id=rule['tenant_id'])
  528. security_group_id = rule['security_group_id']
  529. # Confirm that the tenant has permission
  530. # to add rules to this security group.
  531. self._check_security_group(context, security_group_id,
  532. tenant_id=rule['tenant_id'])
  533. return security_group_id
  534. def _validate_security_group_rules(self, context, security_group_rules):
  535. sg_id = self._validate_single_tenant_and_group(security_group_rules)
  536. for rule in security_group_rules['security_group_rules']:
  537. self._validate_security_group_rule(context, rule)
  538. return sg_id
  539. def _make_security_group_rule_dict(self, security_group_rule, fields=None):
  540. res = {'id': security_group_rule['id'],
  541. 'tenant_id': security_group_rule['tenant_id'],
  542. 'security_group_id': security_group_rule['security_group_id'],
  543. 'ethertype': security_group_rule['ethertype'],
  544. 'direction': security_group_rule['direction'],
  545. 'protocol': security_group_rule['protocol'],
  546. 'port_range_min': security_group_rule['port_range_min'],
  547. 'port_range_max': security_group_rule['port_range_max'],
  548. 'remote_ip_prefix': security_group_rule['remote_ip_prefix'],
  549. 'remote_group_id': security_group_rule['remote_group_id']}
  550. resource_extend.apply_funcs(ext_sg.SECURITYGROUPRULES, res,
  551. security_group_rule)
  552. return db_utils.resource_fields(res, fields)
  553. def _rule_to_key(self, rule):
  554. def _normalize_rule_value(key, value):
  555. # This string is used as a placeholder for str(None), but shorter.
  556. none_char = '+'
  557. if key == 'remote_ip_prefix':
  558. all_address = ['0.0.0.0/0', '::/0', None]
  559. if value in all_address:
  560. return none_char
  561. elif value is None:
  562. return none_char
  563. elif key == 'protocol':
  564. return str(self._get_ip_proto_name_and_num(
  565. value, ethertype=rule.get('ethertype')))
  566. return str(value)
  567. comparison_keys = [
  568. 'direction',
  569. 'ethertype',
  570. 'port_range_max',
  571. 'port_range_min',
  572. 'protocol',
  573. 'remote_group_id',
  574. 'remote_ip_prefix',
  575. 'security_group_id'
  576. ]
  577. return '_'.join([_normalize_rule_value(x, rule.get(x))
  578. for x in comparison_keys])
  579. def _check_for_duplicate_rules(self, context, security_group_id,
  580. new_security_group_rules):
  581. # First up, check for any duplicates in the new rules.
  582. new_rules_set = set()
  583. for i in new_security_group_rules:
  584. rule_key = self._rule_to_key(i['security_group_rule'])
  585. if rule_key in new_rules_set:
  586. raise ext_sg.DuplicateSecurityGroupRuleInPost(rule=i)
  587. new_rules_set.add(rule_key)
  588. # Now, let's make sure none of the new rules conflict with
  589. # existing rules; note that we do *not* store the db rules
  590. # in the set, as we assume they were already checked,
  591. # when added.
  592. sg = self.get_security_group(context, security_group_id)
  593. if sg:
  594. for i in sg['security_group_rules']:
  595. rule_key = self._rule_to_key(i)
  596. if rule_key in new_rules_set:
  597. raise ext_sg.SecurityGroupRuleExists(rule_id=i.get('id'))
  598. def _validate_ip_prefix(self, rule):
  599. """Check that a valid cidr was specified as remote_ip_prefix
  600. No need to check that it is in fact an IP address as this is already
  601. validated by attribute validators.
  602. Check that rule ethertype is consistent with remote_ip_prefix ip type.
  603. Add mask to ip_prefix if absent (192.168.1.10 -> 192.168.1.10/32).
  604. """
  605. input_prefix = rule['remote_ip_prefix']
  606. if input_prefix:
  607. addr = netaddr.IPNetwork(input_prefix)
  608. # set input_prefix to always include the netmask:
  609. rule['remote_ip_prefix'] = str(addr)
  610. # check consistency of ethertype with addr version
  611. if rule['ethertype'] != "IPv%d" % (addr.version):
  612. raise ext_sg.SecurityGroupRuleParameterConflict(
  613. ethertype=rule['ethertype'], cidr=input_prefix)
  614. @db_api.retry_if_session_inactive()
  615. def get_security_group_rules(self, context, filters=None, fields=None,
  616. sorts=None, limit=None, marker=None,
  617. page_reverse=False):
  618. filters = filters or {}
  619. pager = base_obj.Pager(
  620. sorts=sorts, marker=marker, limit=limit, page_reverse=page_reverse)
  621. rule_objs = sg_obj.SecurityGroupRule.get_objects(
  622. context, _pager=pager, validate_filters=False, **filters
  623. )
  624. return [
  625. self._make_security_group_rule_dict(obj.db_obj, fields)
  626. for obj in rule_objs
  627. ]
  628. @db_api.retry_if_session_inactive()
  629. def get_security_group_rules_count(self, context, filters=None):
  630. filters = filters or {}
  631. return sg_obj.SecurityGroupRule.count(
  632. context, validate_filters=False, **filters)
  633. @db_api.retry_if_session_inactive()
  634. def get_security_group_rule(self, context, id, fields=None):
  635. security_group_rule = self._get_security_group_rule(context, id)
  636. return self._make_security_group_rule_dict(
  637. security_group_rule.db_obj, fields)
  638. def _get_security_group_rule(self, context, id):
  639. sgr = sg_obj.SecurityGroupRule.get_object(context, id=id)
  640. if sgr is None:
  641. raise ext_sg.SecurityGroupRuleNotFound(id=id)
  642. return sgr
  643. @db_api.retry_if_session_inactive()
  644. def delete_security_group_rule(self, context, id):
  645. kwargs = {
  646. 'context': context,
  647. 'security_group_rule_id': id
  648. }
  649. self._registry_notify(resources.SECURITY_GROUP_RULE,
  650. events.BEFORE_DELETE, id=id,
  651. exc_cls=ext_sg.SecurityGroupRuleInUse, **kwargs)
  652. with db_api.CONTEXT_WRITER.using(context):
  653. sgr = self._get_security_group_rule(context, id)
  654. kwargs['security_group_id'] = sgr['security_group_id']
  655. self._registry_notify(resources.SECURITY_GROUP_RULE,
  656. events.PRECOMMIT_DELETE,
  657. exc_cls=ext_sg.SecurityGroupRuleInUse, id=id,
  658. **kwargs)
  659. sgr.delete()
  660. registry.notify(
  661. resources.SECURITY_GROUP_RULE, events.AFTER_DELETE, self,
  662. **kwargs)
  663. @staticmethod
  664. @resource_extend.extends([port_def.COLLECTION_NAME])
  665. def _extend_port_dict_security_group(port_res, port_db):
  666. # Security group bindings will be retrieved from the SQLAlchemy
  667. # model. As they're loaded eagerly with ports because of the
  668. # joined load they will not cause an extra query.
  669. if isinstance(port_db, port_obj.Port):
  670. port_res[ext_sg.SECURITYGROUPS] = port_db.security_group_ids
  671. else:
  672. security_group_ids = [sec_group_mapping['security_group_id'] for
  673. sec_group_mapping in port_db.security_groups]
  674. port_res[ext_sg.SECURITYGROUPS] = security_group_ids
  675. return port_res
  676. def _process_port_create_security_group(self, context, port,
  677. security_group_ids):
  678. if validators.is_attr_set(security_group_ids):
  679. for security_group_id in security_group_ids:
  680. self._create_port_security_group_binding(context, port['id'],
  681. security_group_id)
  682. # Convert to list as a set might be passed here and
  683. # this has to be serialized
  684. port[ext_sg.SECURITYGROUPS] = (security_group_ids and
  685. list(security_group_ids) or [])
  686. def _get_default_sg_id(self, context, tenant_id):
  687. default_group = sg_obj.DefaultSecurityGroup.get_object(
  688. context,
  689. project_id=tenant_id,
  690. )
  691. if default_group:
  692. return default_group.security_group_id
  693. @registry.receives(resources.PORT, [events.BEFORE_CREATE,
  694. events.BEFORE_UPDATE])
  695. @registry.receives(resources.NETWORK, [events.BEFORE_CREATE])
  696. def _ensure_default_security_group_handler(self, resource, event, trigger,
  697. context, **kwargs):
  698. if event == events.BEFORE_UPDATE:
  699. tenant_id = kwargs['original_' + resource]['tenant_id']
  700. else:
  701. tenant_id = kwargs[resource]['tenant_id']
  702. if tenant_id:
  703. self._ensure_default_security_group(context, tenant_id)
  704. def _ensure_default_security_group(self, context, tenant_id):
  705. """Create a default security group if one doesn't exist.
  706. :returns: the default security group id for given tenant.
  707. """
  708. default_group_id = self._get_default_sg_id(context, tenant_id)
  709. if default_group_id:
  710. return default_group_id
  711. security_group = {
  712. 'security_group':
  713. {'name': 'default',
  714. 'tenant_id': tenant_id,
  715. 'description': _('Default security group')}
  716. }
  717. return self.create_security_group(context, security_group,
  718. default_sg=True)['id']
  719. def _get_security_groups_on_port(self, context, port):
  720. """Check that all security groups on port belong to tenant.
  721. :returns: all security groups IDs on port belonging to tenant.
  722. """
  723. port = port['port']
  724. if not validators.is_attr_set(port.get(ext_sg.SECURITYGROUPS)):
  725. return
  726. if port.get('device_owner') and net.is_port_trusted(port):
  727. return
  728. port_sg = port.get(ext_sg.SECURITYGROUPS, [])
  729. tenant_id = port.get('tenant_id')
  730. sg_objs = sg_obj.SecurityGroup.get_objects(context, id=port_sg)
  731. valid_groups = set(
  732. g.id for g in sg_objs
  733. if (not tenant_id or g.tenant_id == tenant_id or
  734. sg_obj.SecurityGroup.is_shared_with_tenant(
  735. context, g.id, tenant_id))
  736. )
  737. requested_groups = set(port_sg)
  738. port_sg_missing = requested_groups - valid_groups
  739. if port_sg_missing:
  740. raise ext_sg.SecurityGroupNotFound(id=', '.join(port_sg_missing))
  741. return list(requested_groups)
  742. def _ensure_default_security_group_on_port(self, context, port):
  743. # we don't apply security groups for dhcp, router
  744. port = port['port']
  745. if port.get('device_owner') and net.is_port_trusted(port):
  746. return
  747. port_sg = port.get(ext_sg.SECURITYGROUPS)
  748. if port_sg is None or not validators.is_attr_set(port_sg):
  749. port_project = port.get('tenant_id')
  750. default_sg = self._ensure_default_security_group(context,
  751. port_project)
  752. port[ext_sg.SECURITYGROUPS] = [default_sg]
  753. def _check_update_deletes_security_groups(self, port):
  754. """Return True if port has as a security group and it's value
  755. is either [] or not is_attr_set, otherwise return False
  756. """
  757. if (ext_sg.SECURITYGROUPS in port['port'] and
  758. not (validators.is_attr_set(
  759. port['port'][ext_sg.SECURITYGROUPS]) and
  760. port['port'][ext_sg.SECURITYGROUPS] != [])):
  761. return True
  762. return False
  763. def _check_update_has_security_groups(self, port):
  764. """Return True if port has security_groups attribute set and
  765. its not empty, or False otherwise.
  766. This method is called both for port create and port update.
  767. """
  768. if (ext_sg.SECURITYGROUPS in port['port'] and
  769. (validators.is_attr_set(port['port'][ext_sg.SECURITYGROUPS]) and
  770. port['port'][ext_sg.SECURITYGROUPS] != [])):
  771. return True
  772. return False
  773. def update_security_group_on_port(self, context, id, port,
  774. original_port, updated_port):
  775. """Update security groups on port.
  776. This method returns a flag which indicates request notification
  777. is required and does not perform notification itself.
  778. It is because another changes for the port may require notification.
  779. """
  780. need_notify = False
  781. port_updates = port['port']
  782. if (ext_sg.SECURITYGROUPS in port_updates and
  783. not helpers.compare_elements(
  784. original_port.get(ext_sg.SECURITYGROUPS),
  785. port_updates[ext_sg.SECURITYGROUPS])):
  786. # delete the port binding and read it with the new rules
  787. port_updates[ext_sg.SECURITYGROUPS] = (
  788. self._get_security_groups_on_port(context, port))
  789. self._delete_port_security_group_bindings(context, id)
  790. self._process_port_create_security_group(
  791. context,
  792. updated_port,
  793. port_updates[ext_sg.SECURITYGROUPS])
  794. need_notify = True
  795. else:
  796. updated_port[ext_sg.SECURITYGROUPS] = (
  797. original_port[ext_sg.SECURITYGROUPS])
  798. return need_notify