Merge "Fix spelling mistakes"

This commit is contained in:
Jenkins 2014-08-02 14:09:11 +00:00 committed by Gerrit Code Review
commit 7a56aa6cd7
28 changed files with 43 additions and 43 deletions

View File

@ -602,7 +602,7 @@ class IptablesManager(object):
return True return True
# We filter duplicates. Go through the chains and rules, letting # We filter duplicates. Go through the chains and rules, letting
# the *last* occurrence take precendence since it could have a # the *last* occurrence take precedence since it could have a
# non-zero [packet:byte] count we want to preserve. We also filter # non-zero [packet:byte] count we want to preserve. We also filter
# out anything in the "remove" list. # out anything in the "remove" list.
new_filter.reverse() new_filter.reverse()

View File

@ -53,7 +53,7 @@ def build_resource_info(plural_mappings, resource_map, which_service,
are being created. This name will be used to pass are being created. This name will be used to pass
the appropriate plugin to the WSGI resource. the appropriate plugin to the WSGI resource.
It can be set to None or "CORE" to create WSGI It can be set to None or "CORE" to create WSGI
resources for the the core plugin resources for the core plugin
:param action_map: custom resource actions :param action_map: custom resource actions
:param register_quota: it can be set to True to register quotas for the :param register_quota: it can be set to True to register quotas for the
resource(s) being created resource(s) being created

View File

@ -184,7 +184,7 @@ def subprocess_popen(args, stdin=None, stdout=None, stderr=None, shell=False,
def parse_mappings(mapping_list, unique_values=True): def parse_mappings(mapping_list, unique_values=True):
"""Parse a list of of mapping strings into a dictionary. """Parse a list of mapping strings into a dictionary.
:param mapping_list: a list of strings of the form '<key>:<value>' :param mapping_list: a list of strings of the form '<key>:<value>'
:param unique_values: values must be unique if True :param unique_values: values must be unique if True

View File

@ -59,7 +59,7 @@ class L3_NAT_db_mixin(l3_db.L3_NAT_db_mixin):
super(L3_NAT_db_mixin, self)._update_router_gw_info( super(L3_NAT_db_mixin, self)._update_router_gw_info(
context, router_id, info, router=router) context, router_id, info, router=router)
# Returning the router might come back useful if this # Returning the router might come back useful if this
# method is overriden in child classes # method is overridden in child classes
return router return router
def _build_routers_list(self, context, routers, gw_ports): def _build_routers_list(self, context, routers, gw_ports):

View File

@ -144,7 +144,7 @@ class PortSecurityDbMixin(object):
def _determine_port_security_and_has_ip(self, context, port): def _determine_port_security_and_has_ip(self, context, port):
"""Returns a tuple of booleans (port_security_enabled, has_ip). """Returns a tuple of booleans (port_security_enabled, has_ip).
Port_security is the value assocated with the port if one is present Port_security is the value associated with the port if one is present
otherwise the value associated with the network is returned. has_ip is otherwise the value associated with the network is returned. has_ip is
if the port is associated with an ip or not. if the port is associated with an ip or not.
""" """

View File

@ -405,7 +405,7 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase):
# remote_group_id. Therefore it is not possible to do this # remote_group_id. Therefore it is not possible to do this
# query unless the behavior of _get_collection() # query unless the behavior of _get_collection()
# is changed which cannot be because other methods are already # is changed which cannot be because other methods are already
# relying on this behavor. Therefore, we do the filtering # relying on this behavior. Therefore, we do the filtering
# below to check for these corner cases. # below to check for these corner cases.
for db_rule in db_rules: for db_rule in db_rules:
# need to remove id from db_rule for matching # need to remove id from db_rule for matching

View File

@ -30,7 +30,7 @@ LOG = logging.getLogger(__name__)
class ProviderResourceAssociation(model_base.BASEV2): class ProviderResourceAssociation(model_base.BASEV2):
provider_name = sa.Column(sa.String(255), provider_name = sa.Column(sa.String(255),
nullable=False, primary_key=True) nullable=False, primary_key=True)
# should be manualy deleted on resource deletion # should be manually deleted on resource deletion
resource_id = sa.Column(sa.String(36), nullable=False, primary_key=True, resource_id = sa.Column(sa.String(36), nullable=False, primary_key=True,
unique=True) unique=True)

View File

@ -110,7 +110,7 @@ class Notifier(object):
@property @property
def _plugin(self): def _plugin(self):
# NOTE(arosen): this cannot be set in __init__ currently since # NOTE(arosen): this cannot be set in __init__ currently since
# this class is initalized at the same time as NeutronManager() # this class is initialized at the same time as NeutronManager()
# which is decorated with synchronized() # which is decorated with synchronized()
if not hasattr(self, '_plugin_ref'): if not hasattr(self, '_plugin_ref'):
self._plugin_ref = manager.NeutronManager.get_plugin() self._plugin_ref = manager.NeutronManager.get_plugin()

View File

@ -150,7 +150,7 @@ def lock(name, lock_file_prefix=None, external=False, lock_path=None):
:param external: The external keyword argument denotes whether this lock :param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different should work across multiple processes. This means that if two different
workers both run a a method decorated with @synchronized('mylock', workers both run a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time. external=True), only one of them will execute at a time.
:param lock_path: The lock_path keyword argument is used to specify a :param lock_path: The lock_path keyword argument is used to specify a

View File

@ -627,13 +627,13 @@ class NeutronRestProxyV2(NeutronRestProxyV2Base,
@put_context_in_serverpool @put_context_in_serverpool
def create_port(self, context, port): def create_port(self, context, port):
"""Create a port, which is a connection point of a device """Create a port, which is a connection point of a device
(e.g., a VM NIC) to attach to a L2 Neutron network. (e.g., a VM NIC) to attach an L2 Neutron network.
:param context: neutron api request context :param context: neutron api request context
:param port: dictionary describing the port :param port: dictionary describing the port
:returns: :returns:
{ {
"id": uuid represeting the port. "id": uuid representing the port.
"network_id": uuid of network. "network_id": uuid of network.
"tenant_id": tenant_id "tenant_id": tenant_id
"mac_address": mac address to use on this port. "mac_address": mac address to use on this port.
@ -641,7 +641,7 @@ class NeutronRestProxyV2(NeutronRestProxyV2Base,
does not forward packets. does not forward packets.
"status": dicates whether port is currently operational "status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR") (limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet ID"s and IP addresses to be used on "fixed_ips": list of subnet IDs and IP addresses to be used on
this port this port
"device_id": identifies the device (e.g., virtual server) using "device_id": identifies the device (e.g., virtual server) using
this port. this port.
@ -725,7 +725,7 @@ class NeutronRestProxyV2(NeutronRestProxyV2Base,
:returns: a mapping sequence with the following signature: :returns: a mapping sequence with the following signature:
{ {
"id": uuid represeting the port. "id": uuid representing the port.
"network_id": uuid of network. "network_id": uuid of network.
"tenant_id": tenant_id "tenant_id": tenant_id
"mac_address": mac address to use on this port. "mac_address": mac address to use on this port.
@ -733,7 +733,7 @@ class NeutronRestProxyV2(NeutronRestProxyV2Base,
does not forward packets. does not forward packets.
"status": dicates whether port is currently operational "status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR") (limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet ID's and IP addresses to be used on "fixed_ips": list of subnet IDs and IP addresses to be used on
this port this port
"device_id": identifies the device (e.g., virtual server) using "device_id": identifies the device (e.g., virtual server) using
this port. this port.

View File

@ -126,7 +126,7 @@ class RequestedStateInvalid(webob.exc.HTTPClientError):
subclass of :class:`~HTTPClientError` subclass of :class:`~HTTPClientError`
This indicates that the server could not update the port state to This indicates that the server could not update the port state
to the request value to the request value
code: 431, title: Requested State Invalid code: 431, title: Requested State Invalid

View File

@ -140,7 +140,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
""" """
Setup Cisco Nexus 1000V related parameters and pull policy profiles. Setup Cisco Nexus 1000V related parameters and pull policy profiles.
Retrieve all the policy profiles from the VSM when the plugin is Retrieve all the policy profiles from the VSM when the plugin
is instantiated for the first time and then continue to poll for is instantiated for the first time and then continue to poll for
policy profile updates. policy profile updates.
""" """

View File

@ -554,13 +554,13 @@ class SyncService(object):
try: try:
self._rpc.delete_this_region() self._rpc.delete_this_region()
msg = _('No Tenants configured in Neutron DB. But %d ' msg = _('No Tenants configured in Neutron DB. But %d '
'tenants disovered in EOS during synchronization.' 'tenants discovered in EOS during synchronization.'
'Enitre EOS region is cleared') % len(eos_tenants) 'Entire EOS region is cleared') % len(eos_tenants)
LOG.info(msg) LOG.info(msg)
# Re-register with EOS so that the timestamp is updated. # Re-register with EOS so that the timestamp is updated.
self._rpc.register_with_eos() self._rpc.register_with_eos()
# Region has been completely cleaned. So there is nothing to # Region has been completely cleaned. So there is nothing to
# syncronize # synchronize
self._force_sync = False self._force_sync = False
except arista_exc.AristaRpcError: except arista_exc.AristaRpcError:
LOG.warning(EOS_UNREACHABLE_MSG) LOG.warning(EOS_UNREACHABLE_MSG)
@ -655,7 +655,7 @@ class SyncService(object):
class AristaDriver(driver_api.MechanismDriver): class AristaDriver(driver_api.MechanismDriver):
"""Ml2 Mechanism driver for Arista networking hardware. """Ml2 Mechanism driver for Arista networking hardware.
Remebers all networks and VMs that are provisioned on Arista Hardware. Remembers all networks and VMs that are provisioned on Arista Hardware.
Does not send network provisioning request if the network has already been Does not send network provisioning request if the network has already been
provisioned before for the given port. provisioned before for the given port.
""" """

View File

@ -57,7 +57,7 @@ class APICManager(object):
apic_conf = cfg.CONF.ml2_cisco_apic apic_conf = cfg.CONF.ml2_cisco_apic
self.switch_dict = config.create_switch_dictionary() self.switch_dict = config.create_switch_dictionary()
# Connect to the the APIC # Connect to the APIC
self.apic = apic_client.RestClient( self.apic = apic_client.RestClient(
apic_conf.apic_host, apic_conf.apic_host,
apic_conf.apic_port, apic_conf.apic_port,

View File

@ -278,7 +278,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
# possible (but unlikely) that the port's state could change # possible (but unlikely) that the port's state could change
# concurrently while these calls are being made. If another # concurrently while these calls are being made. If another
# thread or process succeeds in binding the port before this # thread or process succeeds in binding the port before this
# thread commits its results, the already commited results are # thread commits its results, the already committed results are
# used. If attributes such as binding:host_id, # used. If attributes such as binding:host_id,
# binding:profile, or binding:vnic_type are updated # binding:profile, or binding:vnic_type are updated
# concurrently, this loop retries binding using the new # concurrently, this loop retries binding using the new
@ -825,7 +825,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
self.delete_port(context, result['id']) self.delete_port(context, result['id'])
# REVISIT(rkukura): Is there any point in calling this before # REVISIT(rkukura): Is there any point in calling this before
# a binding has been succesfully established? # a binding has been successfully established?
self.notify_security_groups_member_updated(context, result) self.notify_security_groups_member_updated(context, result)
try: try:

View File

@ -399,13 +399,13 @@ class OVSDVRNeutronAgent(dvr_rpc.DVRAgentRpcApiMixin):
subnet_uuid = ips['subnet_id'] subnet_uuid = ips['subnet_id']
ldm = self.local_dvr_map[subnet_uuid] ldm = self.local_dvr_map[subnet_uuid]
if not ldm.is_dvr_owned(): if not ldm.is_dvr_owned():
# well this is csnat stuff, let dvr come in # well this is CSNAT stuff, let dvr come in
# and do plumbing for this vm later # and do plumbing for this vm later
continue continue
# This confirms that this compute port belongs # This confirms that this compute port belongs
# to a dvr hosted subnet. # to a dvr hosted subnet.
# Accomodate this VM Port into the existing rule in # Accommodate this VM Port into the existing rule in
# the integration bridge # the integration bridge
LOG.debug("DVR: Plumbing compute port %s", port.vif_id) LOG.debug("DVR: Plumbing compute port %s", port.vif_id)
subnet_info = ldm.get_subnet_info() subnet_info = ldm.get_subnet_info()
@ -598,7 +598,7 @@ class OVSDVRNeutronAgent(dvr_rpc.DVRAgentRpcApiMixin):
ovsport = self.local_ports[port.vif_id] ovsport = self.local_ports[port.vif_id]
# This confirms that this compute port being removed belonged # This confirms that this compute port being removed belonged
# to a dvr hosted subnet. # to a dvr hosted subnet.
# Accomodate this VM Port into the existing rule in # Accommodate this VM Port into the existing rule in
# the integration bridge # the integration bridge
LOG.debug("DVR: Removing plumbing for compute port %s", port) LOG.debug("DVR: Removing plumbing for compute port %s", port)
subnet_ids = ovsport.get_subnets() subnet_ids = ovsport.get_subnets()
@ -654,9 +654,9 @@ class OVSDVRNeutronAgent(dvr_rpc.DVRAgentRpcApiMixin):
def _unbind_centralized_snat_port_on_dvr_subnet(self, port, local_vlan): def _unbind_centralized_snat_port_on_dvr_subnet(self, port, local_vlan):
ovsport = self.local_ports[port.vif_id] ovsport = self.local_ports[port.vif_id]
# This comfirms that this compute port being removed belonged # This confirms that this compute port being removed belonged
# to a dvr hosted subnet. # to a dvr hosted subnet.
# Accomodate this VM Port into the existing rule in # Accommodate this VM Port into the existing rule in
# the integration bridge # the integration bridge
LOG.debug("DVR: Removing plumbing for csnat port %s", port) LOG.debug("DVR: Removing plumbing for csnat port %s", port)
sub_uuid = list(ovsport.get_subnets())[0] sub_uuid = list(ovsport.get_subnets())[0]

View File

@ -751,7 +751,7 @@ class NeutronPluginPLUMgridV2(db_base_plugin_v2.NeutronDbPluginV2,
raise plum_excep.PLUMgridException(err_msg=err_message) raise plum_excep.PLUMgridException(err_msg=err_message)
""" """
Internal PLUMgrid Fuctions Internal PLUMgrid Functions
""" """
def _get_plugin_version(self): def _get_plugin_version(self):

View File

@ -146,7 +146,7 @@ class ApiRequest(object):
if cookie is None and self._url != "/ws.v1/login": if cookie is None and self._url != "/ws.v1/login":
# The connection still has no valid cookie despite # The connection still has no valid cookie despite
# attemps to authenticate and the request has failed # attempts to authenticate and the request has failed
# with unauthorized status code. If this isn't a # with unauthorized status code. If this isn't a
# a request to authenticate, we should abort the # a request to authenticate, we should abort the
# request since there is no point in retrying. # request since there is no point in retrying.

View File

@ -228,12 +228,12 @@ class QoSDbMixin(qos.QueuePluginBase):
port['device_owner'].startswith('network:')): port['device_owner'].startswith('network:')):
return return
# Check if there is a queue assocated with the network # Check if there is a queue associated with the network
filters = {'network_id': [port['network_id']]} filters = {'network_id': [port['network_id']]}
network_queue_id = self._get_network_queue_bindings( network_queue_id = self._get_network_queue_bindings(
context, filters, ['queue_id']) context, filters, ['queue_id'])
if network_queue_id: if network_queue_id:
# get networks that queue is assocated with # get networks that queue is associated with
filters = {'queue_id': [network_queue_id[0]['queue_id']]} filters = {'queue_id': [network_queue_id[0]['queue_id']]}
networks_with_same_queue = self._get_network_queue_bindings( networks_with_same_queue = self._get_network_queue_bindings(
context, filters) context, filters)

View File

@ -2258,7 +2258,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
nsx_device_id = self._get_nsx_device_id(context, device_id) nsx_device_id = self._get_nsx_device_id(context, device_id)
super(NsxPluginV2, self).delete_gateway_device( super(NsxPluginV2, self).delete_gateway_device(
context, device_id) context, device_id)
# DB operation was successful, peform NSX operation # DB operation was successful, perform NSX operation
# TODO(salv-orlando): State consistency with neutron DB # TODO(salv-orlando): State consistency with neutron DB
# should be ensured even in case of backend failures # should be ensured even in case of backend failures
try: try:

View File

@ -555,7 +555,7 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
return service_name return service_name
def _get_available_service(self, service_name): def _get_available_service(self, service_name):
"""Check if service exsists and return its name if it does.""" """Check if service exists and return its name if it does."""
resource = '/api/service/' + service_name resource = '/api/service/' + service_name
try: try:
_rest_wrapper(self.rest_client.call('GET', _rest_wrapper(self.rest_client.call('GET',

View File

@ -39,7 +39,7 @@ class VPNAgent(l3_agent.L3NATAgentWithStateReport):
:param host: hostname. This is needed for rpc :param host: hostname. This is needed for rpc
Each devices will stays as processes. Each devices will stays as processes.
They will communiate with They will communicate with
server side service plugin using rpc with server side service plugin using rpc with
device specific rpc topic. device specific rpc topic.
:returns: None :returns: None

View File

@ -179,7 +179,7 @@ class CiscoCsrIPsecDriver(device_drivers.DeviceDriver):
This class is designed for use with L3-agent now. This class is designed for use with L3-agent now.
However this driver will be used with another agent in future. However this driver will be used with another agent in future.
so the use of "Router" is kept minimul now. so the use of "Router" is kept minimul now.
Insted of router_id, we are using process_id in this code. Instead of router_id, we are using process_id in this code.
""" """
# history # history

View File

@ -477,7 +477,7 @@ class IPsecDriver(device_drivers.DeviceDriver):
This class is designed for use with L3-agent now. This class is designed for use with L3-agent now.
However this driver will be used with another agent in future. However this driver will be used with another agent in future.
so the use of "Router" is kept minimul now. so the use of "Router" is kept minimul now.
Insted of router_id, we are using process_id in this code. Instead of router_id, we are using process_id in this code.
""" """
# history # history

View File

@ -111,7 +111,7 @@ class FakePortContext(api.PortContext):
class AgentMechanismBaseTestCase(base.BaseTestCase): class AgentMechanismBaseTestCase(base.BaseTestCase):
# These following must be overriden for the specific mechanism # The following must be overridden for the specific mechanism
# driver being tested: # driver being tested:
VIF_TYPE = None VIF_TYPE = None
CAP_PORT_FILTER = None CAP_PORT_FILTER = None

View File

@ -875,7 +875,7 @@ class TestCiscoCsrIPsecDeviceDriverSyncStatuses(base.BaseTestCase):
# Build notification message # Build notification message
(service1_data, (service1_data,
service2_data) = self.notification_for_two_services_with_two_conns() service2_data) = self.notification_for_two_services_with_two_conns()
# Simulate plugin returning notifcation, when requested # Simulate plugin returning notification, when requested
self.driver.agent_rpc.get_vpn_services_on_host.return_value = [ self.driver.agent_rpc.get_vpn_services_on_host.return_value = [
service1_data, service2_data] service1_data, service2_data]
vpn_services = self.driver.update_all_services_and_connections( vpn_services = self.driver.update_all_services_and_connections(

View File

@ -1129,7 +1129,7 @@ class TestBasicRouterOperations(base.BaseTestCase):
l3_agent.L3NATAgent, l3_agent.L3NATAgent,
'internal_network_added') as internal_network_added: 'internal_network_added') as internal_network_added:
# raise RuntimeError to simulate that an unexpected exception # raise RuntimeError to simulate that an unexpected exception
# occurrs # occurs
internal_network_added.side_effect = RuntimeError internal_network_added.side_effect = RuntimeError
self.assertRaises(RuntimeError, agent.process_router, ri) self.assertRaises(RuntimeError, agent.process_router, ri)
self.assertNotIn( self.assertNotIn(
@ -1158,7 +1158,7 @@ class TestBasicRouterOperations(base.BaseTestCase):
l3_agent.L3NATAgent, l3_agent.L3NATAgent,
'internal_network_removed') as internal_net_removed: 'internal_network_removed') as internal_net_removed:
# raise RuntimeError to simulate that an unexpected exception # raise RuntimeError to simulate that an unexpected exception
# occurrs # occurs
internal_net_removed.side_effect = RuntimeError internal_net_removed.side_effect = RuntimeError
ri.internal_ports[0]['admin_state_up'] = False ri.internal_ports[0]['admin_state_up'] = False
# The above port is set to down state, remove it. # The above port is set to down state, remove it.

View File

@ -619,7 +619,7 @@ class TestL3NatTestCase(L3NatTest,
res.status_int) res.status_int)
def test_router_add_gateway_invalid_network_returns_404(self): def test_router_add_gateway_invalid_network_returns_404(self):
# NOTE(salv-orlando): This unit test has been overriden # NOTE(salv-orlando): This unit test has been overridden
# as the nsx plugin support the ext_gw_mode extension # as the nsx plugin support the ext_gw_mode extension
# which mandates a uuid for the external network identifier # which mandates a uuid for the external network identifier
with self.router() as r: with self.router() as r: