Make py file line endings unix style

The line endings were CRLF (dos/windows style). The standard is to use
UNIX style endings.
No other change was made to the file.
This is a result of following command in dragonflow folder.

find ./ -name '*.py' -type f -exec dos2unix {} \;

Change-Id: Id4667317e03e52f2385b031b28e32bee765ea26d
This commit is contained in:
Hong Hui Xiao
2016-09-12 09:36:39 +08:00
parent 1c4290da02
commit 18f8353e80
7 changed files with 1446 additions and 1446 deletions

View File

@@ -1,128 +1,128 @@
# Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
from oslo_log import log
import ryu.app.ofctl.api as ofctl_api
from ryu.app.ofctl import service
from ryu.base import app_manager
import ryu.exception as ryu_exc
from dragonflow._i18n import _LE
from dragonflow.controller.common import constants as const
LOG = log.getLogger("dragonflow.controller.ofswitch")
class OpenFlowSwitchMixin(object):
"""
Mixin to provide a convenient way to use OpenFlow messages synchronously
"""
def __init__(self, ryu_app):
app_mgr = app_manager.AppManager.get_instance()
self.ofctl_app = app_mgr.instantiate(service.OfctlService)
self.ofctl_app.start()
self._app = ryu_app
def _send_msg(self, msg, reply_cls=None, reply_multi=False):
timeout_sec = 20 # TODO(heshan) should be configured in cfg file
timeout = eventlet.timeout.Timeout(seconds=timeout_sec)
result = None
try:
result = ofctl_api.send_msg(self._app, msg, reply_cls, reply_multi)
except ryu_exc.RyuException as e:
m = _LE("ofctl request %(request)s error %(error)s") % {
"request": msg,
"error": e,
}
LOG.error(_LE("exception occurred, %s"), m)
except eventlet.timeout.Timeout as e:
LOG.error(_LE("exception occurred, %s"), e)
finally:
timeout.cancel()
LOG.debug("ofctl request %(request)s result %(result)s",
{"request": msg, "result": result})
return result
def _get_dp(self):
dp = self._app.datapath
return dp, dp.ofproto, dp.ofproto_parser
def dump_flows(self, table_id=None):
(dp, ofp, ofpp) = self._get_dp()
if table_id is None:
table_id = ofp.OFPTT_ALL
msg = ofpp.OFPFlowStatsRequest(dp, table_id=table_id)
replies = self._send_msg(msg,
reply_cls=ofpp.OFPFlowStatsReply,
reply_multi=True)
if replies is None:
LOG.error(_LE("_send_msg failed when dump_flows"))
return []
flows = []
for rep in replies:
flows += rep.body
LOG.debug("flows is: %s", str(flows))
return flows
def cleanup_flows(self, match_c, match_cmask):
try:
self.delete_flows(cookie=match_c, cookie_mask=match_cmask)
except Exception as e:
LOG.error(_LE("exception occurred when cleanup_flows %s"), e)
@staticmethod
def _match(ofpp, match, **match_kwargs):
if match is not None:
return match
return ofpp.OFPMatch(**match_kwargs)
def delete_flows(
self, table_id=None, strict=False, priority=0,
cookie=0, cookie_mask=0, match=None, **match_kwargs):
(dp, ofp, ofpp) = self._get_dp()
if table_id is None:
table_id = ofp.OFPTT_ALL
match = self._match(ofpp, match, **match_kwargs)
if strict:
cmd = ofp.OFPFC_DELETE_STRICT
else:
cmd = ofp.OFPFC_DELETE
msg = ofpp.OFPFlowMod(dp,
command=cmd,
cookie=cookie,
cookie_mask=cookie_mask,
table_id=table_id,
match=match,
priority=priority,
out_group=ofp.OFPG_ANY,
out_port=ofp.OFPP_ANY)
self._send_msg(msg)
def add_canary_flow(self, cookie):
(dp, ofp, ofpp) = self._get_dp()
msg = ofpp.OFPFlowMod(dp,
command=ofp.OFPFC_ADD,
cookie=cookie,
cookie_mask=const.GLOBAL_AGING_COOKIE_MASK,
table_id=const.CANARY_TABLE)
self._send_msg(msg)
def get_canary_flow(self):
canary_flow = self.dump_flows(table_id=const.CANARY_TABLE)
if len(canary_flow) == 0:
return None
return canary_flow[0]
# Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
from oslo_log import log
import ryu.app.ofctl.api as ofctl_api
from ryu.app.ofctl import service
from ryu.base import app_manager
import ryu.exception as ryu_exc
from dragonflow._i18n import _LE
from dragonflow.controller.common import constants as const
LOG = log.getLogger("dragonflow.controller.ofswitch")
class OpenFlowSwitchMixin(object):
"""
Mixin to provide a convenient way to use OpenFlow messages synchronously
"""
def __init__(self, ryu_app):
app_mgr = app_manager.AppManager.get_instance()
self.ofctl_app = app_mgr.instantiate(service.OfctlService)
self.ofctl_app.start()
self._app = ryu_app
def _send_msg(self, msg, reply_cls=None, reply_multi=False):
timeout_sec = 20 # TODO(heshan) should be configured in cfg file
timeout = eventlet.timeout.Timeout(seconds=timeout_sec)
result = None
try:
result = ofctl_api.send_msg(self._app, msg, reply_cls, reply_multi)
except ryu_exc.RyuException as e:
m = _LE("ofctl request %(request)s error %(error)s") % {
"request": msg,
"error": e,
}
LOG.error(_LE("exception occurred, %s"), m)
except eventlet.timeout.Timeout as e:
LOG.error(_LE("exception occurred, %s"), e)
finally:
timeout.cancel()
LOG.debug("ofctl request %(request)s result %(result)s",
{"request": msg, "result": result})
return result
def _get_dp(self):
dp = self._app.datapath
return dp, dp.ofproto, dp.ofproto_parser
def dump_flows(self, table_id=None):
(dp, ofp, ofpp) = self._get_dp()
if table_id is None:
table_id = ofp.OFPTT_ALL
msg = ofpp.OFPFlowStatsRequest(dp, table_id=table_id)
replies = self._send_msg(msg,
reply_cls=ofpp.OFPFlowStatsReply,
reply_multi=True)
if replies is None:
LOG.error(_LE("_send_msg failed when dump_flows"))
return []
flows = []
for rep in replies:
flows += rep.body
LOG.debug("flows is: %s", str(flows))
return flows
def cleanup_flows(self, match_c, match_cmask):
try:
self.delete_flows(cookie=match_c, cookie_mask=match_cmask)
except Exception as e:
LOG.error(_LE("exception occurred when cleanup_flows %s"), e)
@staticmethod
def _match(ofpp, match, **match_kwargs):
if match is not None:
return match
return ofpp.OFPMatch(**match_kwargs)
def delete_flows(
self, table_id=None, strict=False, priority=0,
cookie=0, cookie_mask=0, match=None, **match_kwargs):
(dp, ofp, ofpp) = self._get_dp()
if table_id is None:
table_id = ofp.OFPTT_ALL
match = self._match(ofpp, match, **match_kwargs)
if strict:
cmd = ofp.OFPFC_DELETE_STRICT
else:
cmd = ofp.OFPFC_DELETE
msg = ofpp.OFPFlowMod(dp,
command=cmd,
cookie=cookie,
cookie_mask=cookie_mask,
table_id=table_id,
match=match,
priority=priority,
out_group=ofp.OFPG_ANY,
out_port=ofp.OFPP_ANY)
self._send_msg(msg)
def add_canary_flow(self, cookie):
(dp, ofp, ofpp) = self._get_dp()
msg = ofpp.OFPFlowMod(dp,
command=ofp.OFPFC_ADD,
cookie=cookie,
cookie_mask=const.GLOBAL_AGING_COOKIE_MASK,
table_id=const.CANARY_TABLE)
self._send_msg(msg)
def get_canary_flow(self):
canary_flow = self.dump_flows(table_id=const.CANARY_TABLE)
if len(canary_flow) == 0:
return None
return canary_flow[0]

View File

@@ -1,406 +1,406 @@
# Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from neutron.agent.common import config
from oslo_log import log
from ryu.ofproto import ether
from dragonflow._i18n import _LI
from dragonflow.controller.common import constants as const
from dragonflow.controller import df_base_app
config.setup_logging()
LOG = log.getLogger(__name__)
ARP_OP_TYPE_REQUEST = 1
UDP_PROTOCOL_NUMBER = 17
DHCP_CLIENT_PORT = 68
DHCP_SERVER_PORT = 67
class PortSecApp(df_base_app.DFlowApp):
def _add_flow_drop(self, priority, match):
drop_inst = None
self.mod_flow(
self.get_datapath(),
inst=drop_inst,
table_id=const.EGRESS_PORT_SECURITY_TABLE,
priority=priority,
match=match)
def _get_allow_ip_mac_pairs(self, lport):
allowed_ip_mac_pairs = []
fixed_ips = lport.get_ip_list()
fixed_mac = lport.get_mac()
if (fixed_ips is not None) and (fixed_mac is not None):
for fixed_ip in fixed_ips:
allowed_ip_mac_pairs.append(
{'ip_address': fixed_ip,
'mac_address': fixed_mac})
allow_address_pairs = lport.get_allow_address_pairs()
if allow_address_pairs is not None:
allowed_ip_mac_pairs.extend(allow_address_pairs)
return allowed_ip_mac_pairs
def _get_allow_macs(self, lport):
allowed_macs = set()
fixed_mac = lport.get_mac()
if fixed_mac is not None:
allowed_macs.add(fixed_mac)
allow_address_pairs = lport.get_allow_address_pairs()
if allow_address_pairs is not None:
for allow_address_pair in allow_address_pairs:
allowed_macs.add(allow_address_pair['mac_address'])
return allowed_macs
def _install_flows_check_valid_ip_and_mac(self, datapath, ofport, ip, mac):
if netaddr.IPNetwork(ip).version == 6:
LOG.info(_LI("IPv6 addresses are not supported yet"))
return
parser = datapath.ofproto_parser
# Valid ip mac pair pass
match = parser.OFPMatch(in_port=ofport,
eth_src=mac,
eth_type=ether.ETH_TYPE_IP,
ipv4_src=ip)
self.add_flow_go_to_table(datapath,
const.EGRESS_PORT_SECURITY_TABLE,
const.PRIORITY_HIGH,
const.EGRESS_CONNTRACK_TABLE,
match=match)
# Valid arp request/reply pass
match = parser.OFPMatch(in_port=ofport,
eth_src=mac,
eth_type=ether.ETH_TYPE_ARP,
arp_spa=ip,
arp_sha=mac)
self.add_flow_go_to_table(datapath,
const.EGRESS_PORT_SECURITY_TABLE,
const.PRIORITY_HIGH,
const.SERVICES_CLASSIFICATION_TABLE,
match=match)
def _uninstall_flows_check_valid_ip_and_mac(self, datapath, ofport,
ip, mac):
if netaddr.IPNetwork(ip).version == 6:
LOG.info(_LI("IPv6 addresses are not supported yet"))
return
parser = datapath.ofproto_parser
# Remove valid ip mac pair pass
match = parser.OFPMatch(in_port=ofport,
eth_src=mac,
eth_type=ether.ETH_TYPE_IP,
ipv4_src=ip)
self._remove_one_port_security_flow(datapath,
const.PRIORITY_HIGH,
match)
# Remove valid arp request/reply pass
match = parser.OFPMatch(in_port=ofport,
eth_src=mac,
eth_type=ether.ETH_TYPE_ARP,
arp_spa=ip,
arp_sha=mac)
self._remove_one_port_security_flow(datapath,
const.PRIORITY_HIGH,
match)
def _install_flows_check_valid_mac(self, datapath, ofport, mac):
parser = datapath.ofproto_parser
# Other packets with valid source mac pass
match = parser.OFPMatch(in_port=ofport,
eth_src=mac)
self.add_flow_go_to_table(datapath,
const.EGRESS_PORT_SECURITY_TABLE,
const.PRIORITY_LOW,
const.SERVICES_CLASSIFICATION_TABLE,
match=match)
def _uninstall_flows_check_valid_mac(self, datapath, ofport, mac):
parser = datapath.ofproto_parser
# Remove other packets with valid source mac pass
match = parser.OFPMatch(in_port=ofport,
eth_src=mac)
self._remove_one_port_security_flow(datapath,
const.PRIORITY_LOW,
match)
def _install_flows_check_only_vm_mac(self, datapath, ofport, vm_mac):
parser = datapath.ofproto_parser
# DHCP packets with the vm mac pass
match = parser.OFPMatch(in_port=ofport,
eth_src=vm_mac,
eth_dst="ff:ff:ff:ff:ff:ff",
eth_type=ether.ETH_TYPE_IP,
ip_proto=UDP_PROTOCOL_NUMBER,
udp_src=68,
udp_dst=67)
self.add_flow_go_to_table(datapath,
const.EGRESS_PORT_SECURITY_TABLE,
const.PRIORITY_HIGH,
const.EGRESS_CONNTRACK_TABLE,
match=match)
# Arp probe packets with the vm mac pass
match = parser.OFPMatch(in_port=ofport,
eth_src=vm_mac,
eth_type=ether.ETH_TYPE_ARP,
arp_op=ARP_OP_TYPE_REQUEST,
arp_spa=0,
arp_sha=vm_mac)
self.add_flow_go_to_table(datapath,
const.EGRESS_PORT_SECURITY_TABLE,
const.PRIORITY_HIGH,
const.SERVICES_CLASSIFICATION_TABLE,
match=match)
def _uninstall_flows_check_only_vm_mac(self, datapath, ofport, vm_mac):
parser = datapath.ofproto_parser
# Remove DHCP packets with the vm mac pass
match = parser.OFPMatch(in_port=ofport,
eth_src=vm_mac,
eth_dst="ff:ff:ff:ff:ff:ff",
eth_type=ether.ETH_TYPE_IP,
ip_proto=UDP_PROTOCOL_NUMBER,
udp_src=68,
udp_dst=67)
self._remove_one_port_security_flow(datapath,
const.PRIORITY_HIGH,
match)
# Remove arp probe packets with the vm mac pass
match = parser.OFPMatch(in_port=ofport,
eth_src=vm_mac,
eth_type=ether.ETH_TYPE_ARP,
arp_op=ARP_OP_TYPE_REQUEST,
arp_spa=0,
arp_sha=vm_mac)
self._remove_one_port_security_flow(datapath,
const.PRIORITY_HIGH,
match)
def _install_port_security_flows(self, datapath, lport):
ofport = lport.get_external_value('ofport')
# install ip and mac check flows
allowed_ip_mac_pairs = self._get_allow_ip_mac_pairs(lport)
for ip_mac_pair in allowed_ip_mac_pairs:
self._install_flows_check_valid_ip_and_mac(
datapath, ofport, ip_mac_pair['ip_address'],
ip_mac_pair['mac_address']
)
# install vm mac and allowed address pairs mac check flows
allowed_macs = self._get_allow_macs(lport)
for allowed_mac in allowed_macs:
self._install_flows_check_valid_mac(
datapath, ofport, allowed_mac
)
# install only vm mac check flows
vm_mac = lport.get_mac()
self._install_flows_check_only_vm_mac(datapath, ofport, vm_mac)
def _update_port_security_flows(self, datapath, lport, original_lport):
ofport = lport.get_external_value('ofport')
# update ip and mac check flows
added_ip_mac_pairs, removed_ip_mac_pairs = \
self._get_added_and_removed_ip_mac_pairs(lport,
original_lport)
for item in added_ip_mac_pairs:
self._install_flows_check_valid_ip_and_mac(
datapath, ofport, item['ip_address'],
item['mac_address'])
for item in removed_ip_mac_pairs:
self._uninstall_flows_check_valid_ip_and_mac(
datapath, ofport, item['ip_address'],
item['mac_address'])
# update vm mac and allowed address pairs mac check flows
added_valid_macs, removed_valid_macs = \
self._get_added_and_removed_valid_macs(lport,
original_lport)
for item in added_valid_macs:
self._install_flows_check_valid_mac(
datapath, ofport, item)
for item in removed_valid_macs:
self._uninstall_flows_check_valid_mac(
datapath, ofport, item)
# update only vm mac check flows
new_vm_mac = lport.get_mac()
old_vm_mac = original_lport.get_mac()
if new_vm_mac != old_vm_mac:
self._install_flows_check_only_vm_mac(datapath, ofport,
new_vm_mac)
self._uninstall_flows_check_only_vm_mac(datapath, ofport,
old_vm_mac)
def _remove_one_port_security_flow(self, datapath, priority, match):
ofproto = datapath.ofproto
self.mod_flow(datapath=datapath,
table_id=const.EGRESS_PORT_SECURITY_TABLE,
priority=priority,
match=match,
command=ofproto.OFPFC_DELETE_STRICT,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY)
def _uninstall_port_security_flows(self, datapath, lport):
ofport = lport.get_external_value('ofport')
# uninstall ip and mac check flows
allowed_ip_mac_pairs = self._get_allow_ip_mac_pairs(lport)
for ip_mac_pair in allowed_ip_mac_pairs:
self._uninstall_flows_check_valid_ip_and_mac(
datapath, ofport, ip_mac_pair['ip_address'],
ip_mac_pair['mac_address']
)
# uninstall vm mac and allowed address pairs mac check flows
allowed_macs = self._get_allow_macs(lport)
for allowed_mac in allowed_macs:
self._uninstall_flows_check_valid_mac(
datapath, ofport, allowed_mac
)
# uninstall only vm mac check flows
vm_mac = lport.get_mac()
self._uninstall_flows_check_only_vm_mac(datapath, ofport, vm_mac)
def _install_disable_flow(self, datapath, lport):
ofport = lport.get_external_value('ofport')
parser = datapath.ofproto_parser
# Send packets to next table directly
match = parser.OFPMatch(in_port=ofport)
self.add_flow_go_to_table(datapath,
const.EGRESS_PORT_SECURITY_TABLE,
const.PRIORITY_HIGH,
const.EGRESS_CONNTRACK_TABLE,
match=match)
def _uninstall_disable_flow(self, datapath, lport):
ofport = lport.get_external_value('ofport')
parser = datapath.ofproto_parser
# Remove send packets to next table directly
match = parser.OFPMatch(in_port=ofport)
self._remove_one_port_security_flow(datapath,
const.PRIORITY_HIGH,
match)
def _subtract_lists(self, list1, list2):
list1_subtract_list2 = [item for item in list1 if item not in list2]
list2_subtract_list1 = [item for item in list2 if item not in list1]
return list1_subtract_list2, list2_subtract_list1
def _get_added_and_removed_ip_mac_pairs(self, lport, original_lport):
new_pairs = self._get_allow_ip_mac_pairs(lport)
old_pairs = self._get_allow_ip_mac_pairs(original_lport)
added_pairs, removed_pairs = self._subtract_lists(new_pairs, old_pairs)
return added_pairs, removed_pairs
def _get_added_and_removed_valid_macs(self, lport, original_lport):
new_valid_macs = self._get_allow_macs(lport)
old_valid_macs = self._get_allow_macs(original_lport)
added_valid_macs, removed_valid_macs = \
self._subtract_lists(new_valid_macs, old_valid_macs)
return added_valid_macs, removed_valid_macs
def switch_features_handler(self, ev):
datapath = self.get_datapath()
if datapath is None:
return
parser = datapath.ofproto_parser
# Ip default drop
match = parser.OFPMatch(eth_type=ether.ETH_TYPE_IP)
self._add_flow_drop(const.PRIORITY_MEDIUM, match)
# Arp default drop
match = parser.OFPMatch(eth_type=ether.ETH_TYPE_ARP)
self._add_flow_drop(const.PRIORITY_MEDIUM, match)
# Default drop
self._add_flow_drop(const.PRIORITY_VERY_LOW, None)
def add_local_port(self, lport):
datapath = self.get_datapath()
if datapath is None:
return
enable = lport.get_port_security_enable()
if enable:
self._install_port_security_flows(datapath, lport)
else:
self._install_disable_flow(datapath, lport)
def update_local_port(self, lport, original_lport):
datapath = self.get_datapath()
if datapath is None:
return
enable = lport.get_port_security_enable()
original_enable = original_lport.get_port_security_enable()
if enable:
if original_enable:
self._update_port_security_flows(datapath, lport,
original_lport)
else:
self._install_port_security_flows(datapath, lport)
self._uninstall_disable_flow(datapath, original_lport)
else:
if original_enable:
self._install_disable_flow(datapath, lport)
self._uninstall_port_security_flows(datapath, original_lport)
def remove_local_port(self, lport):
datapath = self.get_datapath()
if datapath is None:
return
enable = lport.get_port_security_enable()
if enable:
self._uninstall_port_security_flows(datapath, lport)
else:
self._uninstall_disable_flow(datapath, lport)
# Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from neutron.agent.common import config
from oslo_log import log
from ryu.ofproto import ether
from dragonflow._i18n import _LI
from dragonflow.controller.common import constants as const
from dragonflow.controller import df_base_app
config.setup_logging()
LOG = log.getLogger(__name__)
ARP_OP_TYPE_REQUEST = 1
UDP_PROTOCOL_NUMBER = 17
DHCP_CLIENT_PORT = 68
DHCP_SERVER_PORT = 67
class PortSecApp(df_base_app.DFlowApp):
def _add_flow_drop(self, priority, match):
drop_inst = None
self.mod_flow(
self.get_datapath(),
inst=drop_inst,
table_id=const.EGRESS_PORT_SECURITY_TABLE,
priority=priority,
match=match)
def _get_allow_ip_mac_pairs(self, lport):
allowed_ip_mac_pairs = []
fixed_ips = lport.get_ip_list()
fixed_mac = lport.get_mac()
if (fixed_ips is not None) and (fixed_mac is not None):
for fixed_ip in fixed_ips:
allowed_ip_mac_pairs.append(
{'ip_address': fixed_ip,
'mac_address': fixed_mac})
allow_address_pairs = lport.get_allow_address_pairs()
if allow_address_pairs is not None:
allowed_ip_mac_pairs.extend(allow_address_pairs)
return allowed_ip_mac_pairs
def _get_allow_macs(self, lport):
allowed_macs = set()
fixed_mac = lport.get_mac()
if fixed_mac is not None:
allowed_macs.add(fixed_mac)
allow_address_pairs = lport.get_allow_address_pairs()
if allow_address_pairs is not None:
for allow_address_pair in allow_address_pairs:
allowed_macs.add(allow_address_pair['mac_address'])
return allowed_macs
def _install_flows_check_valid_ip_and_mac(self, datapath, ofport, ip, mac):
if netaddr.IPNetwork(ip).version == 6:
LOG.info(_LI("IPv6 addresses are not supported yet"))
return
parser = datapath.ofproto_parser
# Valid ip mac pair pass
match = parser.OFPMatch(in_port=ofport,
eth_src=mac,
eth_type=ether.ETH_TYPE_IP,
ipv4_src=ip)
self.add_flow_go_to_table(datapath,
const.EGRESS_PORT_SECURITY_TABLE,
const.PRIORITY_HIGH,
const.EGRESS_CONNTRACK_TABLE,
match=match)
# Valid arp request/reply pass
match = parser.OFPMatch(in_port=ofport,
eth_src=mac,
eth_type=ether.ETH_TYPE_ARP,
arp_spa=ip,
arp_sha=mac)
self.add_flow_go_to_table(datapath,
const.EGRESS_PORT_SECURITY_TABLE,
const.PRIORITY_HIGH,
const.SERVICES_CLASSIFICATION_TABLE,
match=match)
def _uninstall_flows_check_valid_ip_and_mac(self, datapath, ofport,
ip, mac):
if netaddr.IPNetwork(ip).version == 6:
LOG.info(_LI("IPv6 addresses are not supported yet"))
return
parser = datapath.ofproto_parser
# Remove valid ip mac pair pass
match = parser.OFPMatch(in_port=ofport,
eth_src=mac,
eth_type=ether.ETH_TYPE_IP,
ipv4_src=ip)
self._remove_one_port_security_flow(datapath,
const.PRIORITY_HIGH,
match)
# Remove valid arp request/reply pass
match = parser.OFPMatch(in_port=ofport,
eth_src=mac,
eth_type=ether.ETH_TYPE_ARP,
arp_spa=ip,
arp_sha=mac)
self._remove_one_port_security_flow(datapath,
const.PRIORITY_HIGH,
match)
def _install_flows_check_valid_mac(self, datapath, ofport, mac):
parser = datapath.ofproto_parser
# Other packets with valid source mac pass
match = parser.OFPMatch(in_port=ofport,
eth_src=mac)
self.add_flow_go_to_table(datapath,
const.EGRESS_PORT_SECURITY_TABLE,
const.PRIORITY_LOW,
const.SERVICES_CLASSIFICATION_TABLE,
match=match)
def _uninstall_flows_check_valid_mac(self, datapath, ofport, mac):
parser = datapath.ofproto_parser
# Remove other packets with valid source mac pass
match = parser.OFPMatch(in_port=ofport,
eth_src=mac)
self._remove_one_port_security_flow(datapath,
const.PRIORITY_LOW,
match)
def _install_flows_check_only_vm_mac(self, datapath, ofport, vm_mac):
parser = datapath.ofproto_parser
# DHCP packets with the vm mac pass
match = parser.OFPMatch(in_port=ofport,
eth_src=vm_mac,
eth_dst="ff:ff:ff:ff:ff:ff",
eth_type=ether.ETH_TYPE_IP,
ip_proto=UDP_PROTOCOL_NUMBER,
udp_src=68,
udp_dst=67)
self.add_flow_go_to_table(datapath,
const.EGRESS_PORT_SECURITY_TABLE,
const.PRIORITY_HIGH,
const.EGRESS_CONNTRACK_TABLE,
match=match)
# Arp probe packets with the vm mac pass
match = parser.OFPMatch(in_port=ofport,
eth_src=vm_mac,
eth_type=ether.ETH_TYPE_ARP,
arp_op=ARP_OP_TYPE_REQUEST,
arp_spa=0,
arp_sha=vm_mac)
self.add_flow_go_to_table(datapath,
const.EGRESS_PORT_SECURITY_TABLE,
const.PRIORITY_HIGH,
const.SERVICES_CLASSIFICATION_TABLE,
match=match)
def _uninstall_flows_check_only_vm_mac(self, datapath, ofport, vm_mac):
parser = datapath.ofproto_parser
# Remove DHCP packets with the vm mac pass
match = parser.OFPMatch(in_port=ofport,
eth_src=vm_mac,
eth_dst="ff:ff:ff:ff:ff:ff",
eth_type=ether.ETH_TYPE_IP,
ip_proto=UDP_PROTOCOL_NUMBER,
udp_src=68,
udp_dst=67)
self._remove_one_port_security_flow(datapath,
const.PRIORITY_HIGH,
match)
# Remove arp probe packets with the vm mac pass
match = parser.OFPMatch(in_port=ofport,
eth_src=vm_mac,
eth_type=ether.ETH_TYPE_ARP,
arp_op=ARP_OP_TYPE_REQUEST,
arp_spa=0,
arp_sha=vm_mac)
self._remove_one_port_security_flow(datapath,
const.PRIORITY_HIGH,
match)
def _install_port_security_flows(self, datapath, lport):
ofport = lport.get_external_value('ofport')
# install ip and mac check flows
allowed_ip_mac_pairs = self._get_allow_ip_mac_pairs(lport)
for ip_mac_pair in allowed_ip_mac_pairs:
self._install_flows_check_valid_ip_and_mac(
datapath, ofport, ip_mac_pair['ip_address'],
ip_mac_pair['mac_address']
)
# install vm mac and allowed address pairs mac check flows
allowed_macs = self._get_allow_macs(lport)
for allowed_mac in allowed_macs:
self._install_flows_check_valid_mac(
datapath, ofport, allowed_mac
)
# install only vm mac check flows
vm_mac = lport.get_mac()
self._install_flows_check_only_vm_mac(datapath, ofport, vm_mac)
def _update_port_security_flows(self, datapath, lport, original_lport):
ofport = lport.get_external_value('ofport')
# update ip and mac check flows
added_ip_mac_pairs, removed_ip_mac_pairs = \
self._get_added_and_removed_ip_mac_pairs(lport,
original_lport)
for item in added_ip_mac_pairs:
self._install_flows_check_valid_ip_and_mac(
datapath, ofport, item['ip_address'],
item['mac_address'])
for item in removed_ip_mac_pairs:
self._uninstall_flows_check_valid_ip_and_mac(
datapath, ofport, item['ip_address'],
item['mac_address'])
# update vm mac and allowed address pairs mac check flows
added_valid_macs, removed_valid_macs = \
self._get_added_and_removed_valid_macs(lport,
original_lport)
for item in added_valid_macs:
self._install_flows_check_valid_mac(
datapath, ofport, item)
for item in removed_valid_macs:
self._uninstall_flows_check_valid_mac(
datapath, ofport, item)
# update only vm mac check flows
new_vm_mac = lport.get_mac()
old_vm_mac = original_lport.get_mac()
if new_vm_mac != old_vm_mac:
self._install_flows_check_only_vm_mac(datapath, ofport,
new_vm_mac)
self._uninstall_flows_check_only_vm_mac(datapath, ofport,
old_vm_mac)
def _remove_one_port_security_flow(self, datapath, priority, match):
ofproto = datapath.ofproto
self.mod_flow(datapath=datapath,
table_id=const.EGRESS_PORT_SECURITY_TABLE,
priority=priority,
match=match,
command=ofproto.OFPFC_DELETE_STRICT,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY)
def _uninstall_port_security_flows(self, datapath, lport):
ofport = lport.get_external_value('ofport')
# uninstall ip and mac check flows
allowed_ip_mac_pairs = self._get_allow_ip_mac_pairs(lport)
for ip_mac_pair in allowed_ip_mac_pairs:
self._uninstall_flows_check_valid_ip_and_mac(
datapath, ofport, ip_mac_pair['ip_address'],
ip_mac_pair['mac_address']
)
# uninstall vm mac and allowed address pairs mac check flows
allowed_macs = self._get_allow_macs(lport)
for allowed_mac in allowed_macs:
self._uninstall_flows_check_valid_mac(
datapath, ofport, allowed_mac
)
# uninstall only vm mac check flows
vm_mac = lport.get_mac()
self._uninstall_flows_check_only_vm_mac(datapath, ofport, vm_mac)
def _install_disable_flow(self, datapath, lport):
ofport = lport.get_external_value('ofport')
parser = datapath.ofproto_parser
# Send packets to next table directly
match = parser.OFPMatch(in_port=ofport)
self.add_flow_go_to_table(datapath,
const.EGRESS_PORT_SECURITY_TABLE,
const.PRIORITY_HIGH,
const.EGRESS_CONNTRACK_TABLE,
match=match)
def _uninstall_disable_flow(self, datapath, lport):
ofport = lport.get_external_value('ofport')
parser = datapath.ofproto_parser
# Remove send packets to next table directly
match = parser.OFPMatch(in_port=ofport)
self._remove_one_port_security_flow(datapath,
const.PRIORITY_HIGH,
match)
def _subtract_lists(self, list1, list2):
list1_subtract_list2 = [item for item in list1 if item not in list2]
list2_subtract_list1 = [item for item in list2 if item not in list1]
return list1_subtract_list2, list2_subtract_list1
def _get_added_and_removed_ip_mac_pairs(self, lport, original_lport):
new_pairs = self._get_allow_ip_mac_pairs(lport)
old_pairs = self._get_allow_ip_mac_pairs(original_lport)
added_pairs, removed_pairs = self._subtract_lists(new_pairs, old_pairs)
return added_pairs, removed_pairs
def _get_added_and_removed_valid_macs(self, lport, original_lport):
new_valid_macs = self._get_allow_macs(lport)
old_valid_macs = self._get_allow_macs(original_lport)
added_valid_macs, removed_valid_macs = \
self._subtract_lists(new_valid_macs, old_valid_macs)
return added_valid_macs, removed_valid_macs
def switch_features_handler(self, ev):
datapath = self.get_datapath()
if datapath is None:
return
parser = datapath.ofproto_parser
# Ip default drop
match = parser.OFPMatch(eth_type=ether.ETH_TYPE_IP)
self._add_flow_drop(const.PRIORITY_MEDIUM, match)
# Arp default drop
match = parser.OFPMatch(eth_type=ether.ETH_TYPE_ARP)
self._add_flow_drop(const.PRIORITY_MEDIUM, match)
# Default drop
self._add_flow_drop(const.PRIORITY_VERY_LOW, None)
def add_local_port(self, lport):
datapath = self.get_datapath()
if datapath is None:
return
enable = lport.get_port_security_enable()
if enable:
self._install_port_security_flows(datapath, lport)
else:
self._install_disable_flow(datapath, lport)
def update_local_port(self, lport, original_lport):
datapath = self.get_datapath()
if datapath is None:
return
enable = lport.get_port_security_enable()
original_enable = original_lport.get_port_security_enable()
if enable:
if original_enable:
self._update_port_security_flows(datapath, lport,
original_lport)
else:
self._install_port_security_flows(datapath, lport)
self._uninstall_disable_flow(datapath, original_lport)
else:
if original_enable:
self._install_disable_flow(datapath, lport)
self._uninstall_port_security_flows(datapath, original_lport)
def remove_local_port(self, lport):
datapath = self.get_datapath()
if datapath is None:
return
enable = lport.get_port_security_enable()
if enable:
self._uninstall_port_security_flows(datapath, lport)
else:
self._uninstall_disable_flow(datapath, lport)

View File

@@ -1,35 +1,35 @@
# Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Dragonflow versioned objects
Revision ID: d82aa951c391
Revises: f03c862d2645
Create Date: 2016-05-14 15:03:16.672521
"""
# revision identifiers, used by Alembic.
revision = 'd82aa951c391'
down_revision = 'f03c862d2645'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'dfversionobjects',
sa.Column('object_uuid', sa.String(36), primary_key=True),
sa.Column('version', sa.BigInteger, default=0)
)
# Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Dragonflow versioned objects
Revision ID: d82aa951c391
Revises: f03c862d2645
Create Date: 2016-05-14 15:03:16.672521
"""
# revision identifiers, used by Alembic.
revision = 'd82aa951c391'
down_revision = 'f03c862d2645'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'dfversionobjects',
sa.Column('object_uuid', sa.String(36), primary_key=True),
sa.Column('version', sa.BigInteger, default=0)
)

View File

@@ -1,65 +1,65 @@
# Copyright (c) 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db import exception as db_exc
from oslo_log import log
from sqlalchemy.orm import exc as orm_exc
from dragonflow._i18n import _LW
from dragonflow.db.neutron import models
import sys
LOG = log.getLogger(__name__)
def _create_db_version_row(session, obj_id):
try:
row = models.DFVersionObjects(object_uuid=obj_id,
version=0)
session.add(row)
session.flush()
return 0
except db_exc.DBDuplicateEntry:
LOG.warning(_LW('DuplicateEntry in Neutron DB when'
'create version for object_id:%(id)s'), {'id': obj_id})
return 0
def _update_db_version_row(session, obj_id):
try:
row = session.query(models.DFVersionObjects).filter_by(
object_uuid=obj_id).one()
new_version = row.version + 1
if new_version == sys.maxsize:
new_version = 0
row.version = new_version
session.merge(row)
session.flush()
return new_version
except orm_exc.NoResultFound:
LOG.warning(_LW('NoResultFound in Neutron DB when'
'update version for object_id:%(id)s'), {'id': obj_id})
return _create_db_version_row(session, obj_id)
def _delete_db_version_row(session, obj_id):
try:
row = session.query(models.DFVersionObjects).filter_by(
object_uuid=obj_id).one()
session.delete(row)
session.flush()
except orm_exc.NoResultFound:
pass
# Copyright (c) 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db import exception as db_exc
from oslo_log import log
from sqlalchemy.orm import exc as orm_exc
from dragonflow._i18n import _LW
from dragonflow.db.neutron import models
import sys
LOG = log.getLogger(__name__)
def _create_db_version_row(session, obj_id):
try:
row = models.DFVersionObjects(object_uuid=obj_id,
version=0)
session.add(row)
session.flush()
return 0
except db_exc.DBDuplicateEntry:
LOG.warning(_LW('DuplicateEntry in Neutron DB when'
'create version for object_id:%(id)s'), {'id': obj_id})
return 0
def _update_db_version_row(session, obj_id):
try:
row = session.query(models.DFVersionObjects).filter_by(
object_uuid=obj_id).one()
new_version = row.version + 1
if new_version == sys.maxsize:
new_version = 0
row.version = new_version
session.merge(row)
session.flush()
return new_version
except orm_exc.NoResultFound:
LOG.warning(_LW('NoResultFound in Neutron DB when'
'update version for object_id:%(id)s'), {'id': obj_id})
return _create_db_version_row(session, obj_id)
def _delete_db_version_row(session, obj_id):
try:
row = session.query(models.DFVersionObjects).filter_by(
object_uuid=obj_id).one()
session.delete(row)
session.flush()
except orm_exc.NoResultFound:
pass

View File

@@ -1,188 +1,188 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.agent.linux.utils import wait_until_true
from dragonflow.db.drivers import ovsdb_vswitch_impl
from dragonflow.tests.common import utils
from dragonflow.tests.fullstack import test_base
from dragonflow.tests.fullstack import test_objects as objects
class TestOvsdbMonitor(test_base.DFTestBase):
def setUp(self):
super(TestOvsdbMonitor, self).setUp()
self.set_wanted_vms = set()
self.vswitch_api = ovsdb_vswitch_impl.OvsdbSwitchApi(
self.local_ip, self.nb_api)
self.vswitch_api.initialize()
def tearDown(self):
super(TestOvsdbMonitor, self).tearDown()
def _check_wanted_vm_online(self, update, mac):
if update.table != "ovsinterface":
return False
if update.action != "create" and update.action != "set":
return False
_interface = update.value
if _interface is None:
return False
elif _interface.get_attached_mac() != mac:
return False
elif _interface.get_type() != "vm":
return False
elif _interface.get_iface_id() is None:
return False
elif _interface.get_ofport() <= 0:
return False
elif _interface.get_admin_state() != "up":
return False
else:
return True
def _check_wanted_vm_offline(self, update, mac):
if update.table != "ovsinterface":
return False
if update.action != "delete":
return False
_interface = update.value
if _interface is None:
return False
elif _interface.get_attached_mac() != mac:
return False
elif _interface.get_type() != "vm":
return False
elif _interface.get_iface_id() is None:
return False
else:
return True
def _get_vm_port_by_mac_address(self, mac):
lports = self.nb_api.get_all_logical_ports()
for lport in lports:
if lport.get_mac() == mac:
return lport
return None
def _get_wanted_vm_online(self, mac):
while self.nb_api._queue.qsize() > 0:
self.next_update = self.nb_api._queue.get()
if self._check_wanted_vm_online(self.next_update, mac):
return True
return False
def _get_wanted_vm_offline(self, mac):
while self.nb_api._queue.qsize() > 0:
self.next_update = self.nb_api._queue.get()
if self._check_wanted_vm_offline(self.next_update, mac):
return True
return False
def _get_all_wanted_vms_online(self, mac1, mac2):
while self.nb_api._queue.qsize() > 0:
self.next_update = self.nb_api._queue.get()
if self._check_wanted_vm_online(self.next_update, mac1):
self.set_wanted_vms.add(mac1)
if len(self.set_wanted_vms) == 2:
return True
elif self._check_wanted_vm_online(self.next_update, mac2):
self.set_wanted_vms.add(mac2)
if len(self.set_wanted_vms) == 2:
return True
else:
continue
return False
def test_notify_message(self):
network = self.store(objects.NetworkTestObj(self.neutron, self.nb_api))
network_id = network.create(network={'name': 'private'})
subnet = self.store(objects.SubnetTestObj(self.neutron, self.nb_api,
network_id))
subnet_body = {'network_id': network_id,
'cidr': '10.10.0.0/24',
'gateway_ip': '10.10.0.1',
'ip_version': 4,
'name': 'private',
'enable_dhcp': True}
subnet.create(subnet=subnet_body)
self.assertTrue(network.exists())
self.assertTrue(subnet.exists())
vm = self.store(objects.VMTestObj(self, self.neutron))
vm.create(network=network)
self.assertIsNotNone(vm.server.addresses['private'])
mac = vm.server.addresses['private'][0]['OS-EXT-IPS-MAC:mac_addr']
self.assertIsNotNone(mac)
#wait util get the message we want
wait_until_true(
lambda: self._get_wanted_vm_online(mac), timeout=30, sleep=1,
exception=Exception('Could not get wanted online vm')
)
#wait util get the message we want
vm.close()
wait_until_true(
lambda: self._get_wanted_vm_offline(mac), timeout=30, sleep=1,
exception=Exception('Could not get wanted offline vm')
)
utils.wait_until_none(
lambda: self._get_vm_port_by_mac_address(mac), timeout=30, sleep=1,
exception=Exception('Port was not deleted')
)
def test_reply_message(self):
network = self.store(objects.NetworkTestObj(self.neutron, self.nb_api))
network_id = network.create(network={'name': 'private'})
subnet = self.store(objects.SubnetTestObj(self.neutron, self.nb_api,
network_id))
subnet_body = {'network_id': network_id,
'cidr': '10.20.0.0/24',
'gateway_ip': '10.20.0.1',
'ip_version': 4,
'name': 'private',
'enable_dhcp': True}
subnet.create(subnet=subnet_body)
self.assertTrue(network.exists())
self.assertTrue(subnet.exists())
vm1 = self.store(objects.VMTestObj(self, self.neutron))
vm1.create(network=network)
self.assertIsNotNone(vm1.server.addresses['private'])
mac1 = vm1.server.addresses['private'][0]['OS-EXT-IPS-MAC:mac_addr']
self.assertIsNotNone(mac1)
vm2 = self.store(objects.VMTestObj(self, self.neutron))
vm2.create(network=network)
self.assertIsNotNone(vm2.server.addresses['private'])
mac2 = vm2.server.addresses['private'][0]['OS-EXT-IPS-MAC:mac_addr']
self.assertIsNotNone(mac2)
#wait util get the message we want
self.set_wanted_vms.clear()
wait_until_true(
lambda: self._get_all_wanted_vms_online(mac1, mac2),
timeout=30, sleep=1,
exception=Exception('Could not get wanted online vm')
)
vm1.close()
vm2.close()
utils.wait_until_none(
lambda: self._get_vm_port_by_mac_address(mac1),
timeout=30, sleep=1,
exception=Exception('Port was not deleted')
)
utils.wait_until_none(
lambda: self._get_vm_port_by_mac_address(mac2),
timeout=30, sleep=1,
exception=Exception('Port was not deleted')
)
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.agent.linux.utils import wait_until_true
from dragonflow.db.drivers import ovsdb_vswitch_impl
from dragonflow.tests.common import utils
from dragonflow.tests.fullstack import test_base
from dragonflow.tests.fullstack import test_objects as objects
class TestOvsdbMonitor(test_base.DFTestBase):
def setUp(self):
super(TestOvsdbMonitor, self).setUp()
self.set_wanted_vms = set()
self.vswitch_api = ovsdb_vswitch_impl.OvsdbSwitchApi(
self.local_ip, self.nb_api)
self.vswitch_api.initialize()
def tearDown(self):
super(TestOvsdbMonitor, self).tearDown()
def _check_wanted_vm_online(self, update, mac):
if update.table != "ovsinterface":
return False
if update.action != "create" and update.action != "set":
return False
_interface = update.value
if _interface is None:
return False
elif _interface.get_attached_mac() != mac:
return False
elif _interface.get_type() != "vm":
return False
elif _interface.get_iface_id() is None:
return False
elif _interface.get_ofport() <= 0:
return False
elif _interface.get_admin_state() != "up":
return False
else:
return True
def _check_wanted_vm_offline(self, update, mac):
if update.table != "ovsinterface":
return False
if update.action != "delete":
return False
_interface = update.value
if _interface is None:
return False
elif _interface.get_attached_mac() != mac:
return False
elif _interface.get_type() != "vm":
return False
elif _interface.get_iface_id() is None:
return False
else:
return True
def _get_vm_port_by_mac_address(self, mac):
lports = self.nb_api.get_all_logical_ports()
for lport in lports:
if lport.get_mac() == mac:
return lport
return None
def _get_wanted_vm_online(self, mac):
while self.nb_api._queue.qsize() > 0:
self.next_update = self.nb_api._queue.get()
if self._check_wanted_vm_online(self.next_update, mac):
return True
return False
def _get_wanted_vm_offline(self, mac):
while self.nb_api._queue.qsize() > 0:
self.next_update = self.nb_api._queue.get()
if self._check_wanted_vm_offline(self.next_update, mac):
return True
return False
def _get_all_wanted_vms_online(self, mac1, mac2):
while self.nb_api._queue.qsize() > 0:
self.next_update = self.nb_api._queue.get()
if self._check_wanted_vm_online(self.next_update, mac1):
self.set_wanted_vms.add(mac1)
if len(self.set_wanted_vms) == 2:
return True
elif self._check_wanted_vm_online(self.next_update, mac2):
self.set_wanted_vms.add(mac2)
if len(self.set_wanted_vms) == 2:
return True
else:
continue
return False
def test_notify_message(self):
network = self.store(objects.NetworkTestObj(self.neutron, self.nb_api))
network_id = network.create(network={'name': 'private'})
subnet = self.store(objects.SubnetTestObj(self.neutron, self.nb_api,
network_id))
subnet_body = {'network_id': network_id,
'cidr': '10.10.0.0/24',
'gateway_ip': '10.10.0.1',
'ip_version': 4,
'name': 'private',
'enable_dhcp': True}
subnet.create(subnet=subnet_body)
self.assertTrue(network.exists())
self.assertTrue(subnet.exists())
vm = self.store(objects.VMTestObj(self, self.neutron))
vm.create(network=network)
self.assertIsNotNone(vm.server.addresses['private'])
mac = vm.server.addresses['private'][0]['OS-EXT-IPS-MAC:mac_addr']
self.assertIsNotNone(mac)
#wait util get the message we want
wait_until_true(
lambda: self._get_wanted_vm_online(mac), timeout=30, sleep=1,
exception=Exception('Could not get wanted online vm')
)
#wait util get the message we want
vm.close()
wait_until_true(
lambda: self._get_wanted_vm_offline(mac), timeout=30, sleep=1,
exception=Exception('Could not get wanted offline vm')
)
utils.wait_until_none(
lambda: self._get_vm_port_by_mac_address(mac), timeout=30, sleep=1,
exception=Exception('Port was not deleted')
)
def test_reply_message(self):
network = self.store(objects.NetworkTestObj(self.neutron, self.nb_api))
network_id = network.create(network={'name': 'private'})
subnet = self.store(objects.SubnetTestObj(self.neutron, self.nb_api,
network_id))
subnet_body = {'network_id': network_id,
'cidr': '10.20.0.0/24',
'gateway_ip': '10.20.0.1',
'ip_version': 4,
'name': 'private',
'enable_dhcp': True}
subnet.create(subnet=subnet_body)
self.assertTrue(network.exists())
self.assertTrue(subnet.exists())
vm1 = self.store(objects.VMTestObj(self, self.neutron))
vm1.create(network=network)
self.assertIsNotNone(vm1.server.addresses['private'])
mac1 = vm1.server.addresses['private'][0]['OS-EXT-IPS-MAC:mac_addr']
self.assertIsNotNone(mac1)
vm2 = self.store(objects.VMTestObj(self, self.neutron))
vm2.create(network=network)
self.assertIsNotNone(vm2.server.addresses['private'])
mac2 = vm2.server.addresses['private'][0]['OS-EXT-IPS-MAC:mac_addr']
self.assertIsNotNone(mac2)
#wait util get the message we want
self.set_wanted_vms.clear()
wait_until_true(
lambda: self._get_all_wanted_vms_online(mac1, mac2),
timeout=30, sleep=1,
exception=Exception('Could not get wanted online vm')
)
vm1.close()
vm2.close()
utils.wait_until_none(
lambda: self._get_vm_port_by_mac_address(mac1),
timeout=30, sleep=1,
exception=Exception('Port was not deleted')
)
utils.wait_until_none(
lambda: self._get_vm_port_by_mac_address(mac2),
timeout=30, sleep=1,
exception=Exception('Port was not deleted')
)

View File

@@ -1,227 +1,227 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from dragonflow.controller.common import constants as const
from dragonflow.tests.common import utils
from dragonflow.tests.fullstack import test_base
from dragonflow.tests.fullstack import test_objects as objects
class TestOVSFlowsForPortSecurity(test_base.DFTestBase):
def _is_expected_flow(self, flow, expected_list):
if flow['table'] != str(const.EGRESS_PORT_SECURITY_TABLE):
return False
priority = expected_list['priority']
if flow['priority'] != priority:
return False
match_list = expected_list['match_list']
for expected_match in match_list:
if expected_match not in flow['match']:
return False
actions = expected_list['actions']
if flow['actions'] != actions:
return False
return True
def _get_vm_port(self, ip, mac):
ports = self.nb_api.get_all_logical_ports()
for port in ports:
if port.get_device_owner() == 'compute:None':
if port.get_ip() == ip and port.get_mac() == mac:
return port
return None
def _get_anti_spoof_expected_flows(self, ip, mac, of_port):
expected_flow_list = []
in_port_match = "in_port=" + of_port
dl_src_match = "dl_src=" + mac
goto_conntrack_table_action = \
"goto_table:" + str(const.EGRESS_CONNTRACK_TABLE)
goto_classification_table_action = \
"goto_table:" + str(const.SERVICES_CLASSIFICATION_TABLE)
# priority: High, match: in_port=of_port, dl_src=$vm_mac,
# dl_dst=ff:ff:ff:ff:ff:ff, udp, tp_src=68, tp_dst=67,
# actions: goto const.EGRESS_CONNTRACK_TABLE
dl_dst_match = "dl_dst=ff:ff:ff:ff:ff:ff"
expected_flow_list.append({
"priority": str(const.PRIORITY_HIGH),
"match_list": [in_port_match, dl_src_match, dl_dst_match,
"udp", "tp_src=68", "tp_dst=67"],
"actions": goto_conntrack_table_action
})
# priority: High, match: ip, in_port=of_port, dl_src=$vm_mac,
# nw_src=$fixed_ip,
# actions: goto const.EGRESS_CONNTRACK_TABLE
nw_src_match = "nw_src=" + ip
expected_flow_list.append({
"priority": str(const.PRIORITY_HIGH),
"match_list": ["ip", in_port_match, dl_src_match, nw_src_match],
"actions": goto_conntrack_table_action
})
# priority: High, match: arp, in_port=of_port, dl_src=$vm_mac,
# arp_spa=$fixed_ip, arp_sha=$vm_mac
# actions: goto const.SERVICES_CLASSIFICATION_TABLE
arp_spa_match = "arp_spa=" + ip
arp_sha_match = "arp_sha=" + mac
expected_flow_list.append({
"priority": str(const.PRIORITY_HIGH),
"match_list": ["arp", in_port_match, dl_src_match, arp_spa_match,
arp_sha_match],
"actions": goto_classification_table_action
})
# priority: High, match: arp, in_port=of_port, dl_src=$vm_mac,
# arp_op=1, arp_spa=0, arp_sha=$vm_mac
# actions: goto const.SERVICES_CLASSIFICATION_TABLE
arp_sha_match = "arp_sha=" + mac
expected_flow_list.append({
"priority": str(const.PRIORITY_HIGH),
"match_list": ["arp", in_port_match, dl_src_match,
"arp_spa=0.0.0.0", "arp_op=1", arp_sha_match],
"actions": goto_classification_table_action
})
# priority: Low, match: in_port=of_port, dl_src=$vm_mac
# actions: goto const.SERVICES_CLASSIFICATION_TABLE
expected_flow_list.append({
"priority": str(const.PRIORITY_HIGH),
"match_list": [in_port_match, dl_src_match],
"actions": goto_classification_table_action
})
return expected_flow_list
def _check_all_flows_existed(self, expected_flow_list):
ovs = utils.OvsFlowsParser()
flows = ovs.dump(self.integration_bridge)
for flow in flows:
for expected_flow in expected_flow_list:
if expected_flow.get("aleady_found"):
continue
if self._is_expected_flow(flow, expected_flow):
expected_flow["aleady_found"] = True
for expected_flow in expected_flow_list:
if not expected_flow.get("aleady_found"):
# for knowing which flow didn't be installed when the test
# case failed, asserting expected_flow equals to None to print
# expected_flow
self.assertEqual(expected_flow, None)
def _check_not_flow_existed(self, flow_list):
ovs = utils.OvsFlowsParser()
flows = ovs.dump(self.integration_bridge)
for flow in flows:
for expected_flow in flow_list:
if self._is_expected_flow(flow, expected_flow):
# for knowing which flow didn't be removed when the
# test case failed, asserting expected_flow equals to
# None to print expected_flow
self.assertEqual(expected_flow, None)
def test_default_flows(self):
expected_flow_list = []
# priority: medium, match: ip, actions: drop
expected_flow_list.append({
"priority": str(const.PRIORITY_MEDIUM),
"match_list": ["ip"],
"actions": "drop"
})
# priority: medium, match: arp, actions: drop
expected_flow_list.append({
"priority": str(const.PRIORITY_MEDIUM),
"match_list": ["arp"],
"actions": "drop"
})
# priority: very low, actions: drop
expected_flow_list.append({
"priority": str(const.PRIORITY_VERY_LOW),
"match_list": [],
"actions": "drop"
})
# priority: default, goto const.EGRESS_CONNTRACK_TABLE
expected_flow_list.append({
"priority": str(const.PRIORITY_DEFAULT),
"match_list": [],
"actions": "goto_table:" + str(const.EGRESS_CONNTRACK_TABLE)
})
self._check_all_flows_existed(expected_flow_list)
def test_anti_spoof_flows(self):
network = self.store(objects.NetworkTestObj(self.neutron, self.nb_api))
network_id = network.create(network={'name': 'test_network1'})
self.assertTrue(network.exists())
subnet_info = {'network_id': network_id,
'cidr': '192.168.130.0/24',
'gateway_ip': '192.168.130.1',
'ip_version': 4,
'name': 'test_subnet1',
'enable_dhcp': True}
subnet = self.store(objects.SubnetTestObj(self.neutron,
self.nb_api,
network_id=network_id))
subnet.create(subnet_info)
self.assertTrue(subnet.exists())
vm = self.store(objects.VMTestObj(self, self.neutron))
vm.create(network=network)
addresses = vm.server.addresses['test_network1']
self.assertIsNotNone(addresses)
ip = addresses[0]['addr']
self.assertIsNotNone(ip)
mac = addresses[0]['OS-EXT-IPS-MAC:mac_addr']
self.assertIsNotNone(mac)
port = utils.wait_until_is_and_return(
lambda: self._get_vm_port(ip, mac),
exception=Exception('No port assigned to VM')
)
ovsdb = utils.OvsDBParser()
of_port = ovsdb.get_ofport(port.get_id())
self.assertIsNotNone(of_port)
# Check if the associating flows were installed.
expected_flow_list = self._get_anti_spoof_expected_flows(
ip, mac, of_port
)
self._check_all_flows_existed(expected_flow_list)
vm.close()
time.sleep(utils.DEFAULT_CMD_TIMEOUT)
# Check if the associating flows were removed.
expected_flow_list = self._get_anti_spoof_expected_flows(
ip, mac, of_port
)
self._check_not_flow_existed(expected_flow_list)
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from dragonflow.controller.common import constants as const
from dragonflow.tests.common import utils
from dragonflow.tests.fullstack import test_base
from dragonflow.tests.fullstack import test_objects as objects
class TestOVSFlowsForPortSecurity(test_base.DFTestBase):
def _is_expected_flow(self, flow, expected_list):
if flow['table'] != str(const.EGRESS_PORT_SECURITY_TABLE):
return False
priority = expected_list['priority']
if flow['priority'] != priority:
return False
match_list = expected_list['match_list']
for expected_match in match_list:
if expected_match not in flow['match']:
return False
actions = expected_list['actions']
if flow['actions'] != actions:
return False
return True
def _get_vm_port(self, ip, mac):
ports = self.nb_api.get_all_logical_ports()
for port in ports:
if port.get_device_owner() == 'compute:None':
if port.get_ip() == ip and port.get_mac() == mac:
return port
return None
def _get_anti_spoof_expected_flows(self, ip, mac, of_port):
expected_flow_list = []
in_port_match = "in_port=" + of_port
dl_src_match = "dl_src=" + mac
goto_conntrack_table_action = \
"goto_table:" + str(const.EGRESS_CONNTRACK_TABLE)
goto_classification_table_action = \
"goto_table:" + str(const.SERVICES_CLASSIFICATION_TABLE)
# priority: High, match: in_port=of_port, dl_src=$vm_mac,
# dl_dst=ff:ff:ff:ff:ff:ff, udp, tp_src=68, tp_dst=67,
# actions: goto const.EGRESS_CONNTRACK_TABLE
dl_dst_match = "dl_dst=ff:ff:ff:ff:ff:ff"
expected_flow_list.append({
"priority": str(const.PRIORITY_HIGH),
"match_list": [in_port_match, dl_src_match, dl_dst_match,
"udp", "tp_src=68", "tp_dst=67"],
"actions": goto_conntrack_table_action
})
# priority: High, match: ip, in_port=of_port, dl_src=$vm_mac,
# nw_src=$fixed_ip,
# actions: goto const.EGRESS_CONNTRACK_TABLE
nw_src_match = "nw_src=" + ip
expected_flow_list.append({
"priority": str(const.PRIORITY_HIGH),
"match_list": ["ip", in_port_match, dl_src_match, nw_src_match],
"actions": goto_conntrack_table_action
})
# priority: High, match: arp, in_port=of_port, dl_src=$vm_mac,
# arp_spa=$fixed_ip, arp_sha=$vm_mac
# actions: goto const.SERVICES_CLASSIFICATION_TABLE
arp_spa_match = "arp_spa=" + ip
arp_sha_match = "arp_sha=" + mac
expected_flow_list.append({
"priority": str(const.PRIORITY_HIGH),
"match_list": ["arp", in_port_match, dl_src_match, arp_spa_match,
arp_sha_match],
"actions": goto_classification_table_action
})
# priority: High, match: arp, in_port=of_port, dl_src=$vm_mac,
# arp_op=1, arp_spa=0, arp_sha=$vm_mac
# actions: goto const.SERVICES_CLASSIFICATION_TABLE
arp_sha_match = "arp_sha=" + mac
expected_flow_list.append({
"priority": str(const.PRIORITY_HIGH),
"match_list": ["arp", in_port_match, dl_src_match,
"arp_spa=0.0.0.0", "arp_op=1", arp_sha_match],
"actions": goto_classification_table_action
})
# priority: Low, match: in_port=of_port, dl_src=$vm_mac
# actions: goto const.SERVICES_CLASSIFICATION_TABLE
expected_flow_list.append({
"priority": str(const.PRIORITY_HIGH),
"match_list": [in_port_match, dl_src_match],
"actions": goto_classification_table_action
})
return expected_flow_list
def _check_all_flows_existed(self, expected_flow_list):
ovs = utils.OvsFlowsParser()
flows = ovs.dump(self.integration_bridge)
for flow in flows:
for expected_flow in expected_flow_list:
if expected_flow.get("aleady_found"):
continue
if self._is_expected_flow(flow, expected_flow):
expected_flow["aleady_found"] = True
for expected_flow in expected_flow_list:
if not expected_flow.get("aleady_found"):
# for knowing which flow didn't be installed when the test
# case failed, asserting expected_flow equals to None to print
# expected_flow
self.assertEqual(expected_flow, None)
def _check_not_flow_existed(self, flow_list):
ovs = utils.OvsFlowsParser()
flows = ovs.dump(self.integration_bridge)
for flow in flows:
for expected_flow in flow_list:
if self._is_expected_flow(flow, expected_flow):
# for knowing which flow didn't be removed when the
# test case failed, asserting expected_flow equals to
# None to print expected_flow
self.assertEqual(expected_flow, None)
def test_default_flows(self):
expected_flow_list = []
# priority: medium, match: ip, actions: drop
expected_flow_list.append({
"priority": str(const.PRIORITY_MEDIUM),
"match_list": ["ip"],
"actions": "drop"
})
# priority: medium, match: arp, actions: drop
expected_flow_list.append({
"priority": str(const.PRIORITY_MEDIUM),
"match_list": ["arp"],
"actions": "drop"
})
# priority: very low, actions: drop
expected_flow_list.append({
"priority": str(const.PRIORITY_VERY_LOW),
"match_list": [],
"actions": "drop"
})
# priority: default, goto const.EGRESS_CONNTRACK_TABLE
expected_flow_list.append({
"priority": str(const.PRIORITY_DEFAULT),
"match_list": [],
"actions": "goto_table:" + str(const.EGRESS_CONNTRACK_TABLE)
})
self._check_all_flows_existed(expected_flow_list)
def test_anti_spoof_flows(self):
network = self.store(objects.NetworkTestObj(self.neutron, self.nb_api))
network_id = network.create(network={'name': 'test_network1'})
self.assertTrue(network.exists())
subnet_info = {'network_id': network_id,
'cidr': '192.168.130.0/24',
'gateway_ip': '192.168.130.1',
'ip_version': 4,
'name': 'test_subnet1',
'enable_dhcp': True}
subnet = self.store(objects.SubnetTestObj(self.neutron,
self.nb_api,
network_id=network_id))
subnet.create(subnet_info)
self.assertTrue(subnet.exists())
vm = self.store(objects.VMTestObj(self, self.neutron))
vm.create(network=network)
addresses = vm.server.addresses['test_network1']
self.assertIsNotNone(addresses)
ip = addresses[0]['addr']
self.assertIsNotNone(ip)
mac = addresses[0]['OS-EXT-IPS-MAC:mac_addr']
self.assertIsNotNone(mac)
port = utils.wait_until_is_and_return(
lambda: self._get_vm_port(ip, mac),
exception=Exception('No port assigned to VM')
)
ovsdb = utils.OvsDBParser()
of_port = ovsdb.get_ofport(port.get_id())
self.assertIsNotNone(of_port)
# Check if the associating flows were installed.
expected_flow_list = self._get_anti_spoof_expected_flows(
ip, mac, of_port
)
self._check_all_flows_existed(expected_flow_list)
vm.close()
time.sleep(utils.DEFAULT_CMD_TIMEOUT)
# Check if the associating flows were removed.
expected_flow_list = self._get_anti_spoof_expected_flows(
ip, mac, of_port
)
self._check_not_flow_existed(expected_flow_list)

View File

@@ -1,397 +1,397 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log
from dragonflow._i18n import _LI
from dragonflow.controller.common import constants as const
from dragonflow.tests.common import utils
from dragonflow.tests.fullstack import test_base
from dragonflow.tests.fullstack import test_objects as objects
LOG = log.getLogger(__name__)
class TestOVSFlowsForSecurityGroup(test_base.DFTestBase):
def _is_skip_flow(self, flow, direction):
if direction == 'ingress':
table = const.INGRESS_CONNTRACK_TABLE
goto_table = const.INGRESS_DISPATCH_TABLE
else:
table = const.EGRESS_CONNTRACK_TABLE
goto_table = const.SERVICES_CLASSIFICATION_TABLE
if (flow['table'] == str(table)) and \
(flow['priority'] == str(const.PRIORITY_DEFAULT)) and \
(flow['actions'] == ('goto_table:' + str(goto_table))):
return True
return False
def _is_default_drop_flow(self, flow, direction):
if direction == 'ingress':
table = const.INGRESS_SECURITY_GROUP_TABLE
else:
table = const.EGRESS_SECURITY_GROUP_TABLE
if (flow['table'] == str(table)) and \
(flow['priority'] == str(const.PRIORITY_DEFAULT)) and \
(flow['actions'] == 'drop'):
return True
return False
def _is_conntrack_pass_flow(self, flow, direction, ct_state_match):
if direction == 'ingress':
table = const.INGRESS_SECURITY_GROUP_TABLE
goto_table = const.INGRESS_DISPATCH_TABLE
else:
table = const.EGRESS_SECURITY_GROUP_TABLE
goto_table = const.SERVICES_CLASSIFICATION_TABLE
if (flow['table'] == str(table)) and \
(flow['priority'] == str(const.PRIORITY_CT_STATE)) and \
(ct_state_match in flow['match']) and \
(flow['actions'] == ('goto_table:' + str(goto_table))):
return True
return False
def _is_conntrack_established_pass_flow(self, flow, direction):
return self._is_conntrack_pass_flow(
flow=flow, direction=direction,
ct_state_match='-new+est-rel-inv+trk')
def _is_conntrack_relative_not_new_pass_flow(self, flow, direction):
return self._is_conntrack_pass_flow(
flow=flow, direction=direction,
ct_state_match='-new+rel-inv+trk')
def _is_conntrack_relative_new_pass_flow(self, flow, direction):
if direction == 'ingress':
table = const.INGRESS_SECURITY_GROUP_TABLE
else:
table = const.EGRESS_SECURITY_GROUP_TABLE
if (flow['table'] == str(table)) and \
(flow['priority'] == str(const.PRIORITY_CT_STATE)) and \
('+new+rel-inv+trk' in flow['match']) and \
('ct(commit,table' in flow['actions']):
return True
return False
def _is_conntrack_invalid_drop_flow(self, flow, direction):
if direction == 'ingress':
table = const.INGRESS_SECURITY_GROUP_TABLE
else:
table = const.EGRESS_SECURITY_GROUP_TABLE
if (flow['table'] == str(table)) and \
(flow['priority'] == str(const.PRIORITY_CT_STATE)) and \
('ct_state=+inv+trk' in flow['match']) and \
(flow['actions'] == 'drop'):
return True
return False
def _is_associating_flow(self, flow, direction, of_port, reg7):
if direction == 'ingress':
match = 'reg7=' + reg7
table = const.INGRESS_SECURITY_GROUP_TABLE
else:
match = 'in_port=' + of_port
table = const.EGRESS_SECURITY_GROUP_TABLE
if (flow['table'] == str(table)) and \
('ct_state=+new-est-rel-inv+trk' in flow['match']) and \
(match in flow['match']) and \
('conjunction(' in flow['actions']) and \
(',1/2)' in flow['actions']):
return True
return False
def _find_associating_flows(self, flows, of_port, reg7):
ingress_associating_flow = None
egress_associating_flow = None
for flow in flows:
if self._is_associating_flow(flow=flow, direction='ingress',
of_port=of_port, reg7=reg7):
ingress_associating_flow = flow
elif self._is_associating_flow(flow=flow, direction='egress',
of_port=of_port, reg7=reg7):
egress_associating_flow = flow
return ingress_associating_flow, egress_associating_flow
def _is_rule_flow(self, flow, direction):
if direction == 'ingress':
table = const.INGRESS_SECURITY_GROUP_TABLE
else:
table = const.EGRESS_SECURITY_GROUP_TABLE
if (flow['table'] == str(table)) and \
('conjunction(' in flow['actions']) and \
(',2/2' in flow['actions']):
return True
return False
def _is_permit_flow(self, flow, direction):
if direction == 'ingress':
table = const.INGRESS_SECURITY_GROUP_TABLE
else:
table = const.EGRESS_SECURITY_GROUP_TABLE
if (flow['table'] == str(table)) and \
('conj_id=' in flow['match']) and \
('ct(commit,table' in flow['actions']):
return True
return False
def _check_rule_flows(self, flows, expect):
ingress_rule_flow_check = not expect
egress_rule_flow_check = not expect
ingress_permit_flow_check = not expect
egress_permit_flow_check = not expect
for flow in flows:
if self._is_rule_flow(flow, 'ingress'):
ingress_rule_flow_check = expect
elif self._is_rule_flow(flow, 'egress'):
egress_rule_flow_check = expect
elif self._is_permit_flow(flow, 'ingress'):
ingress_permit_flow_check = expect
elif self._is_permit_flow(flow, 'egress'):
egress_permit_flow_check = expect
self.assertTrue(ingress_rule_flow_check)
self.assertTrue(egress_rule_flow_check)
self.assertTrue(ingress_permit_flow_check)
self.assertTrue(egress_permit_flow_check)
def _get_vm_port(self, ip, mac):
ports = self.nb_api.get_all_logical_ports()
for port in ports:
if port.get_device_owner() == 'compute:None':
if port.get_ip() == ip and port.get_mac() == mac:
return port
return None
def _get_of_port(self, port_id):
ovsdb = utils.OvsDBParser()
return ovsdb.get_ofport(port_id)
def test_default_flows(self):
found_ingress_skip_flow = False
found_egress_skip_flow = False
found_ingress_default_drop_flow = False
found_egress_default_drop_flow = False
found_ingress_conntrack_established_pass_flow = False
found_egress_conntrack_established_pass_flow = False
found_ingress_conntrack_relative_not_new_pass_flow = False
found_egress_conntrack_relative_not_new_pass_flow = False
found_ingress_conntrack_relative_new_pass_flow = False
found_egress_conntrack_relative_new_pass_flow = False
found_ingress_conntrack_invalied_drop_flow = False
found_egress_conntrack_invalied_drop_flow = False
ovs = utils.OvsFlowsParser()
flows = ovs.dump(self.integration_bridge)
for flow in flows:
if self._is_skip_flow(flow=flow, direction='ingress'):
found_ingress_skip_flow = True
elif self._is_skip_flow(flow=flow, direction='egress'):
found_egress_skip_flow = True
elif self._is_default_drop_flow(flow=flow, direction='ingress'):
found_ingress_default_drop_flow = True
elif self._is_default_drop_flow(flow=flow, direction='egress'):
found_egress_default_drop_flow = True
elif self._is_conntrack_established_pass_flow(flow=flow,
direction='ingress'):
found_ingress_conntrack_established_pass_flow = True
elif self._is_conntrack_established_pass_flow(flow=flow,
direction='egress'):
found_egress_conntrack_established_pass_flow = True
elif self._is_conntrack_relative_not_new_pass_flow(
flow=flow, direction='ingress'):
found_ingress_conntrack_relative_not_new_pass_flow = True
elif self._is_conntrack_relative_not_new_pass_flow(
flow=flow, direction='egress'):
found_egress_conntrack_relative_not_new_pass_flow = True
elif self._is_conntrack_relative_new_pass_flow(
flow=flow, direction='ingress'):
found_ingress_conntrack_relative_new_pass_flow = True
elif self._is_conntrack_relative_new_pass_flow(
flow=flow, direction='egress'):
found_egress_conntrack_relative_new_pass_flow = True
elif self._is_conntrack_invalid_drop_flow(flow=flow,
direction='ingress'):
found_ingress_conntrack_invalied_drop_flow = True
elif self._is_conntrack_invalid_drop_flow(flow=flow,
direction='egress'):
found_egress_conntrack_invalied_drop_flow = True
LOG.info(_LI("default flows are: %s"),
ovs.get_ovs_flows(self.integration_bridge))
self.assertTrue(found_ingress_skip_flow)
self.assertTrue(found_egress_skip_flow)
self.assertTrue(found_ingress_default_drop_flow)
self.assertTrue(found_egress_default_drop_flow)
self.assertTrue(found_ingress_conntrack_established_pass_flow)
self.assertTrue(found_egress_conntrack_established_pass_flow)
self.assertTrue(found_ingress_conntrack_relative_not_new_pass_flow)
self.assertTrue(found_egress_conntrack_relative_not_new_pass_flow)
self.assertTrue(found_ingress_conntrack_relative_new_pass_flow)
self.assertTrue(found_egress_conntrack_relative_new_pass_flow)
self.assertTrue(found_ingress_conntrack_invalied_drop_flow)
self.assertTrue(found_egress_conntrack_invalied_drop_flow)
def test_associating_flows(self):
network = self.store(objects.NetworkTestObj(self.neutron, self.nb_api))
network_id = network.create(network={'name': 'test_network1'})
self.assertTrue(network.exists())
subnet_info = {'network_id': network_id,
'cidr': '192.168.123.0/24',
'gateway_ip': '192.168.123.1',
'ip_version': 4,
'name': 'test_subnet1',
'enable_dhcp': True}
subnet = self.store(objects.SubnetTestObj(self.neutron,
self.nb_api,
network_id=network_id))
subnet.create(subnet_info)
self.assertTrue(subnet.exists())
security_group = self.store(objects.SecGroupTestObj(
self.neutron,
self.nb_api))
security_group_id = security_group.create()
self.assertTrue(security_group.exists())
vm = self.store(objects.VMTestObj(self, self.neutron))
vm.create(network=network, security_groups=[security_group_id])
addresses = vm.server.addresses['test_network1']
self.assertIsNotNone(addresses)
ip = addresses[0]['addr']
self.assertIsNotNone(ip)
mac = addresses[0]['OS-EXT-IPS-MAC:mac_addr']
self.assertIsNotNone(mac)
port = utils.wait_until_is_and_return(
lambda: self._get_vm_port(ip, mac),
exception=Exception('No port assigned to VM')
)
tunnel_key = port.get_tunnel_key()
tunnel_key_hex = hex(tunnel_key)
of_port = self._get_of_port(port.get_id())
self.assertIsNotNone(of_port)
ovs = utils.OvsFlowsParser()
flows_after_change = ovs.dump(self.integration_bridge)
# Check if the associating flows were installed.
ingress_associating_flow, egress_associating_flow = \
self._find_associating_flows(flows_after_change, of_port,
tunnel_key_hex)
LOG.info(_LI("flows after associating a port and a security group"
" are: %s"),
ovs.get_ovs_flows(self.integration_bridge))
self.assertIsNotNone(ingress_associating_flow)
self.assertIsNotNone(egress_associating_flow)
vm.close()
time.sleep(utils.DEFAULT_CMD_TIMEOUT)
flows_after_update = ovs.dump(self.integration_bridge)
# Check if the associating flows were removed.
ingress_associating_flow, egress_associating_flow = \
self._find_associating_flows(flows_after_update, of_port,
tunnel_key_hex)
self.assertIsNone(ingress_associating_flow)
self.assertIsNone(egress_associating_flow)
def test_rule_flows(self):
network = self.store(objects.NetworkTestObj(self.neutron, self.nb_api))
network_id = network.create(network={'name': 'test_network2'})
self.assertTrue(network.exists())
subnet_info = {'network_id': network_id,
'cidr': '192.168.124.0/24',
'gateway_ip': '192.168.124.1',
'ip_version': 4,
'name': 'test_subnet4',
'enable_dhcp': True}
subnet = self.store(objects.SubnetTestObj(self.neutron,
self.nb_api,
network_id=network_id))
subnet.create(subnet_info)
security_group = self.store(objects.SecGroupTestObj(
self.neutron,
self.nb_api))
security_group_id = security_group.create()
self.assertTrue(security_group.exists())
ingress_rule_info = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': '8000',
'port_range_max': '8100',
'remote_ip_prefix': '192.168.124.0/24'}
ingress_rule_id = security_group.rule_create(secrule=ingress_rule_info)
self.assertTrue(security_group.rule_exists(ingress_rule_id))
egress_rule_info = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': '17',
'port_range_min': '53',
'port_range_max': '53',
'remote_group_id': security_group_id}
egress_rule_id = security_group.rule_create(secrule=egress_rule_info)
self.assertTrue(security_group.rule_exists(egress_rule_id))
vm = self.store(objects.VMTestObj(self, self.neutron))
vm.create(network=network, security_groups=[security_group_id])
time.sleep(utils.DEFAULT_CMD_TIMEOUT)
ovs = utils.OvsFlowsParser()
flows_after_change = ovs.dump(self.integration_bridge)
LOG.info(_LI("flows after adding rules are: %s"),
ovs.get_ovs_flows(self.integration_bridge))
# Check if the rule flows were installed.
self._check_rule_flows(flows_after_change, True)
vm.close()
# We can't guarantee that all rule flows have been deleted because
# those rule flows may be installed in other test cases for all
# test cases are running synchronously.
# time.sleep(utils.DEFAULT_CMD_TIMEOUT)
# flows_after_update = ovs.dump(self.integration_bridge)
# self._check_rule_flows(flows_after_update, False)
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log
from dragonflow._i18n import _LI
from dragonflow.controller.common import constants as const
from dragonflow.tests.common import utils
from dragonflow.tests.fullstack import test_base
from dragonflow.tests.fullstack import test_objects as objects
LOG = log.getLogger(__name__)
class TestOVSFlowsForSecurityGroup(test_base.DFTestBase):
def _is_skip_flow(self, flow, direction):
if direction == 'ingress':
table = const.INGRESS_CONNTRACK_TABLE
goto_table = const.INGRESS_DISPATCH_TABLE
else:
table = const.EGRESS_CONNTRACK_TABLE
goto_table = const.SERVICES_CLASSIFICATION_TABLE
if (flow['table'] == str(table)) and \
(flow['priority'] == str(const.PRIORITY_DEFAULT)) and \
(flow['actions'] == ('goto_table:' + str(goto_table))):
return True
return False
def _is_default_drop_flow(self, flow, direction):
if direction == 'ingress':
table = const.INGRESS_SECURITY_GROUP_TABLE
else:
table = const.EGRESS_SECURITY_GROUP_TABLE
if (flow['table'] == str(table)) and \
(flow['priority'] == str(const.PRIORITY_DEFAULT)) and \
(flow['actions'] == 'drop'):
return True
return False
def _is_conntrack_pass_flow(self, flow, direction, ct_state_match):
if direction == 'ingress':
table = const.INGRESS_SECURITY_GROUP_TABLE
goto_table = const.INGRESS_DISPATCH_TABLE
else:
table = const.EGRESS_SECURITY_GROUP_TABLE
goto_table = const.SERVICES_CLASSIFICATION_TABLE
if (flow['table'] == str(table)) and \
(flow['priority'] == str(const.PRIORITY_CT_STATE)) and \
(ct_state_match in flow['match']) and \
(flow['actions'] == ('goto_table:' + str(goto_table))):
return True
return False
def _is_conntrack_established_pass_flow(self, flow, direction):
return self._is_conntrack_pass_flow(
flow=flow, direction=direction,
ct_state_match='-new+est-rel-inv+trk')
def _is_conntrack_relative_not_new_pass_flow(self, flow, direction):
return self._is_conntrack_pass_flow(
flow=flow, direction=direction,
ct_state_match='-new+rel-inv+trk')
def _is_conntrack_relative_new_pass_flow(self, flow, direction):
if direction == 'ingress':
table = const.INGRESS_SECURITY_GROUP_TABLE
else:
table = const.EGRESS_SECURITY_GROUP_TABLE
if (flow['table'] == str(table)) and \
(flow['priority'] == str(const.PRIORITY_CT_STATE)) and \
('+new+rel-inv+trk' in flow['match']) and \
('ct(commit,table' in flow['actions']):
return True
return False
def _is_conntrack_invalid_drop_flow(self, flow, direction):
if direction == 'ingress':
table = const.INGRESS_SECURITY_GROUP_TABLE
else:
table = const.EGRESS_SECURITY_GROUP_TABLE
if (flow['table'] == str(table)) and \
(flow['priority'] == str(const.PRIORITY_CT_STATE)) and \
('ct_state=+inv+trk' in flow['match']) and \
(flow['actions'] == 'drop'):
return True
return False
def _is_associating_flow(self, flow, direction, of_port, reg7):
if direction == 'ingress':
match = 'reg7=' + reg7
table = const.INGRESS_SECURITY_GROUP_TABLE
else:
match = 'in_port=' + of_port
table = const.EGRESS_SECURITY_GROUP_TABLE
if (flow['table'] == str(table)) and \
('ct_state=+new-est-rel-inv+trk' in flow['match']) and \
(match in flow['match']) and \
('conjunction(' in flow['actions']) and \
(',1/2)' in flow['actions']):
return True
return False
def _find_associating_flows(self, flows, of_port, reg7):
ingress_associating_flow = None
egress_associating_flow = None
for flow in flows:
if self._is_associating_flow(flow=flow, direction='ingress',
of_port=of_port, reg7=reg7):
ingress_associating_flow = flow
elif self._is_associating_flow(flow=flow, direction='egress',
of_port=of_port, reg7=reg7):
egress_associating_flow = flow
return ingress_associating_flow, egress_associating_flow
def _is_rule_flow(self, flow, direction):
if direction == 'ingress':
table = const.INGRESS_SECURITY_GROUP_TABLE
else:
table = const.EGRESS_SECURITY_GROUP_TABLE
if (flow['table'] == str(table)) and \
('conjunction(' in flow['actions']) and \
(',2/2' in flow['actions']):
return True
return False
def _is_permit_flow(self, flow, direction):
if direction == 'ingress':
table = const.INGRESS_SECURITY_GROUP_TABLE
else:
table = const.EGRESS_SECURITY_GROUP_TABLE
if (flow['table'] == str(table)) and \
('conj_id=' in flow['match']) and \
('ct(commit,table' in flow['actions']):
return True
return False
def _check_rule_flows(self, flows, expect):
ingress_rule_flow_check = not expect
egress_rule_flow_check = not expect
ingress_permit_flow_check = not expect
egress_permit_flow_check = not expect
for flow in flows:
if self._is_rule_flow(flow, 'ingress'):
ingress_rule_flow_check = expect
elif self._is_rule_flow(flow, 'egress'):
egress_rule_flow_check = expect
elif self._is_permit_flow(flow, 'ingress'):
ingress_permit_flow_check = expect
elif self._is_permit_flow(flow, 'egress'):
egress_permit_flow_check = expect
self.assertTrue(ingress_rule_flow_check)
self.assertTrue(egress_rule_flow_check)
self.assertTrue(ingress_permit_flow_check)
self.assertTrue(egress_permit_flow_check)
def _get_vm_port(self, ip, mac):
ports = self.nb_api.get_all_logical_ports()
for port in ports:
if port.get_device_owner() == 'compute:None':
if port.get_ip() == ip and port.get_mac() == mac:
return port
return None
def _get_of_port(self, port_id):
ovsdb = utils.OvsDBParser()
return ovsdb.get_ofport(port_id)
def test_default_flows(self):
found_ingress_skip_flow = False
found_egress_skip_flow = False
found_ingress_default_drop_flow = False
found_egress_default_drop_flow = False
found_ingress_conntrack_established_pass_flow = False
found_egress_conntrack_established_pass_flow = False
found_ingress_conntrack_relative_not_new_pass_flow = False
found_egress_conntrack_relative_not_new_pass_flow = False
found_ingress_conntrack_relative_new_pass_flow = False
found_egress_conntrack_relative_new_pass_flow = False
found_ingress_conntrack_invalied_drop_flow = False
found_egress_conntrack_invalied_drop_flow = False
ovs = utils.OvsFlowsParser()
flows = ovs.dump(self.integration_bridge)
for flow in flows:
if self._is_skip_flow(flow=flow, direction='ingress'):
found_ingress_skip_flow = True
elif self._is_skip_flow(flow=flow, direction='egress'):
found_egress_skip_flow = True
elif self._is_default_drop_flow(flow=flow, direction='ingress'):
found_ingress_default_drop_flow = True
elif self._is_default_drop_flow(flow=flow, direction='egress'):
found_egress_default_drop_flow = True
elif self._is_conntrack_established_pass_flow(flow=flow,
direction='ingress'):
found_ingress_conntrack_established_pass_flow = True
elif self._is_conntrack_established_pass_flow(flow=flow,
direction='egress'):
found_egress_conntrack_established_pass_flow = True
elif self._is_conntrack_relative_not_new_pass_flow(
flow=flow, direction='ingress'):
found_ingress_conntrack_relative_not_new_pass_flow = True
elif self._is_conntrack_relative_not_new_pass_flow(
flow=flow, direction='egress'):
found_egress_conntrack_relative_not_new_pass_flow = True
elif self._is_conntrack_relative_new_pass_flow(
flow=flow, direction='ingress'):
found_ingress_conntrack_relative_new_pass_flow = True
elif self._is_conntrack_relative_new_pass_flow(
flow=flow, direction='egress'):
found_egress_conntrack_relative_new_pass_flow = True
elif self._is_conntrack_invalid_drop_flow(flow=flow,
direction='ingress'):
found_ingress_conntrack_invalied_drop_flow = True
elif self._is_conntrack_invalid_drop_flow(flow=flow,
direction='egress'):
found_egress_conntrack_invalied_drop_flow = True
LOG.info(_LI("default flows are: %s"),
ovs.get_ovs_flows(self.integration_bridge))
self.assertTrue(found_ingress_skip_flow)
self.assertTrue(found_egress_skip_flow)
self.assertTrue(found_ingress_default_drop_flow)
self.assertTrue(found_egress_default_drop_flow)
self.assertTrue(found_ingress_conntrack_established_pass_flow)
self.assertTrue(found_egress_conntrack_established_pass_flow)
self.assertTrue(found_ingress_conntrack_relative_not_new_pass_flow)
self.assertTrue(found_egress_conntrack_relative_not_new_pass_flow)
self.assertTrue(found_ingress_conntrack_relative_new_pass_flow)
self.assertTrue(found_egress_conntrack_relative_new_pass_flow)
self.assertTrue(found_ingress_conntrack_invalied_drop_flow)
self.assertTrue(found_egress_conntrack_invalied_drop_flow)
def test_associating_flows(self):
network = self.store(objects.NetworkTestObj(self.neutron, self.nb_api))
network_id = network.create(network={'name': 'test_network1'})
self.assertTrue(network.exists())
subnet_info = {'network_id': network_id,
'cidr': '192.168.123.0/24',
'gateway_ip': '192.168.123.1',
'ip_version': 4,
'name': 'test_subnet1',
'enable_dhcp': True}
subnet = self.store(objects.SubnetTestObj(self.neutron,
self.nb_api,
network_id=network_id))
subnet.create(subnet_info)
self.assertTrue(subnet.exists())
security_group = self.store(objects.SecGroupTestObj(
self.neutron,
self.nb_api))
security_group_id = security_group.create()
self.assertTrue(security_group.exists())
vm = self.store(objects.VMTestObj(self, self.neutron))
vm.create(network=network, security_groups=[security_group_id])
addresses = vm.server.addresses['test_network1']
self.assertIsNotNone(addresses)
ip = addresses[0]['addr']
self.assertIsNotNone(ip)
mac = addresses[0]['OS-EXT-IPS-MAC:mac_addr']
self.assertIsNotNone(mac)
port = utils.wait_until_is_and_return(
lambda: self._get_vm_port(ip, mac),
exception=Exception('No port assigned to VM')
)
tunnel_key = port.get_tunnel_key()
tunnel_key_hex = hex(tunnel_key)
of_port = self._get_of_port(port.get_id())
self.assertIsNotNone(of_port)
ovs = utils.OvsFlowsParser()
flows_after_change = ovs.dump(self.integration_bridge)
# Check if the associating flows were installed.
ingress_associating_flow, egress_associating_flow = \
self._find_associating_flows(flows_after_change, of_port,
tunnel_key_hex)
LOG.info(_LI("flows after associating a port and a security group"
" are: %s"),
ovs.get_ovs_flows(self.integration_bridge))
self.assertIsNotNone(ingress_associating_flow)
self.assertIsNotNone(egress_associating_flow)
vm.close()
time.sleep(utils.DEFAULT_CMD_TIMEOUT)
flows_after_update = ovs.dump(self.integration_bridge)
# Check if the associating flows were removed.
ingress_associating_flow, egress_associating_flow = \
self._find_associating_flows(flows_after_update, of_port,
tunnel_key_hex)
self.assertIsNone(ingress_associating_flow)
self.assertIsNone(egress_associating_flow)
def test_rule_flows(self):
network = self.store(objects.NetworkTestObj(self.neutron, self.nb_api))
network_id = network.create(network={'name': 'test_network2'})
self.assertTrue(network.exists())
subnet_info = {'network_id': network_id,
'cidr': '192.168.124.0/24',
'gateway_ip': '192.168.124.1',
'ip_version': 4,
'name': 'test_subnet4',
'enable_dhcp': True}
subnet = self.store(objects.SubnetTestObj(self.neutron,
self.nb_api,
network_id=network_id))
subnet.create(subnet_info)
security_group = self.store(objects.SecGroupTestObj(
self.neutron,
self.nb_api))
security_group_id = security_group.create()
self.assertTrue(security_group.exists())
ingress_rule_info = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': '8000',
'port_range_max': '8100',
'remote_ip_prefix': '192.168.124.0/24'}
ingress_rule_id = security_group.rule_create(secrule=ingress_rule_info)
self.assertTrue(security_group.rule_exists(ingress_rule_id))
egress_rule_info = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': '17',
'port_range_min': '53',
'port_range_max': '53',
'remote_group_id': security_group_id}
egress_rule_id = security_group.rule_create(secrule=egress_rule_info)
self.assertTrue(security_group.rule_exists(egress_rule_id))
vm = self.store(objects.VMTestObj(self, self.neutron))
vm.create(network=network, security_groups=[security_group_id])
time.sleep(utils.DEFAULT_CMD_TIMEOUT)
ovs = utils.OvsFlowsParser()
flows_after_change = ovs.dump(self.integration_bridge)
LOG.info(_LI("flows after adding rules are: %s"),
ovs.get_ovs_flows(self.integration_bridge))
# Check if the rule flows were installed.
self._check_rule_flows(flows_after_change, True)
vm.close()
# We can't guarantee that all rule flows have been deleted because
# those rule flows may be installed in other test cases for all
# test cases are running synchronously.
# time.sleep(utils.DEFAULT_CMD_TIMEOUT)
# flows_after_update = ovs.dump(self.integration_bridge)
# self._check_rule_flows(flows_after_update, False)