From fb027ec92410ee4f6055e6652c87e061a24a49d3 Mon Sep 17 00:00:00 2001 From: Mathieu Mitchell Date: Tue, 13 Sep 2016 07:52:14 -0400 Subject: [PATCH] [install-guide] Import "integration with other OpenStack components" The following sections of the legacy install guide were imported: * Configure Compute to use the Bare Metal service * Configure Networking to communicate with the bare metal server * Configure Tenant Networks * Configure the Bare Metal service for cleaning Change-Id: I5cf832401aba9499e9eec9bd50adcbedd356cb00 Partial-bug: #1612278 --- doc/source/deploy/install-guide.rst | 218 +----------------- install-guide/source/configure-cleaning.rst | 37 +++ .../source/configure-integration.rst | 2 + .../source/configure-tenant-networks.rst | 8 + .../include/configure-neutron-networks.rst | 113 +++++++++ .../source/include/configure-nova-compute.rst | 111 ++++++++- install-guide/source/index.rst | 2 + 7 files changed, 279 insertions(+), 212 deletions(-) create mode 100644 install-guide/source/configure-cleaning.rst create mode 100644 install-guide/source/configure-tenant-networks.rst create mode 100644 install-guide/source/include/configure-neutron-networks.rst diff --git a/doc/source/deploy/install-guide.rst b/doc/source/deploy/install-guide.rst index b73c5a3538..08b82262f1 100644 --- a/doc/source/deploy/install-guide.rst +++ b/doc/source/deploy/install-guide.rst @@ -47,201 +47,21 @@ Metal service Install Guide. Configure Compute to use the Bare Metal service =============================================== -The Compute service needs to be configured to use the Bare Metal service's -driver. The configuration file for the Compute service is typically located at -``/etc/nova/nova.conf``. *This configuration file must be modified on the -Compute service's controller nodes and compute nodes.* +The `Configure Compute to use the Bare Metal service`_ section has been moved +to the Bare Metal service Install Guide. -1. Change these configuration options in the ``default`` section, as follows:: - - [default] - - # Driver to use for controlling virtualization. Options - # include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, - # fake.FakeDriver, baremetal.BareMetalDriver, - # vmwareapi.VMwareESXDriver, vmwareapi.VMwareVCDriver (string - # value) - #compute_driver= - compute_driver=ironic.IronicDriver - - # Firewall driver (defaults to hypervisor specific iptables - # driver) (string value) - #firewall_driver= - firewall_driver=nova.virt.firewall.NoopFirewallDriver - - # The scheduler host manager class to use (string value) - #scheduler_host_manager=host_manager - scheduler_host_manager=ironic_host_manager - - # Virtual ram to physical ram allocation ratio which affects - # all ram filters. This configuration specifies a global ratio - # for RamFilter. For AggregateRamFilter, it will fall back to - # this configuration value if no per-aggregate setting found. - # (floating point value) - #ram_allocation_ratio=1.5 - ram_allocation_ratio=1.0 - - # Amount of disk in MB to reserve for the host (integer value) - #reserved_host_disk_mb=0 - reserved_host_memory_mb=0 - - # Flag to decide whether to use baremetal_scheduler_default_filters or not. - # (boolean value) - #scheduler_use_baremetal_filters=False - scheduler_use_baremetal_filters=True - - # Determines if the Scheduler tracks changes to instances to help with - # its filtering decisions (boolean value) - #scheduler_tracks_instance_changes=True - scheduler_tracks_instance_changes=False - - # New instances will be scheduled on a host chosen randomly from a subset - # of the N best hosts, where N is the value set by this option. Valid - # values are 1 or greater. Any value less than one will be treated as 1. - # For ironic, this should be set to a number >= the number of ironic nodes - # to more evenly distribute instances across the nodes. - #scheduler_host_subset_size=1 - scheduler_host_subset_size=9999999 - -2. Change these configuration options in the ``ironic`` section. - Replace: - - - IRONIC_PASSWORD with the password you chose for the ``ironic`` - user in the Identity Service - - IRONIC_NODE with the hostname or IP address of the ironic-api node - - IDENTITY_IP with the IP of the Identity server - - :: - - [ironic] - - # Ironic keystone admin name - admin_username=ironic - - #Ironic keystone admin password. - admin_password=IRONIC_PASSWORD - - # keystone API endpoint - admin_url=http://IDENTITY_IP:35357/v2.0 - - # Ironic keystone tenant name. - admin_tenant_name=service - - # URL for Ironic API endpoint. - api_endpoint=http://IRONIC_NODE:6385/v1 - -3. On the Compute service's controller nodes, restart the ``nova-scheduler`` process:: - - Fedora/RHEL7/CentOS7: - sudo systemctl restart openstack-nova-scheduler - - Ubuntu: - sudo service nova-scheduler restart - -4. On the Compute service's compute nodes, restart the ``nova-compute`` process:: - - Fedora/RHEL7/CentOS7: - sudo systemctl restart openstack-nova-compute - - Ubuntu: - sudo service nova-compute restart +.. _`Configure Compute to use the Bare Metal service`: http://docs.openstack.org/project-install-guide/baremetal/draft/configure-integration.html#configure-compute-to-use-the-bare-metal-service .. _NeutronFlatNetworking: Configure Networking to communicate with the bare metal server ============================================================== -You need to configure Networking so that the bare metal server can communicate -with the Networking service for DHCP, PXE boot and other requirements. -This section covers configuring Networking for a single flat -network for bare metal provisioning. +The `Configure Networking to communicate with the bare metal server`_ section +has been moved to the Bare Metal service Install Guide. -You will also need to provide Bare Metal service with the MAC address(es) of -each node that it is provisioning; Bare Metal service in turn will pass this -information to Networking service for DHCP and PXE boot configuration. -An example of this is shown in the `Enrollment`_ section. +.. _`Configure Networking to communicate with the bare metal server`: http://docs.openstack.org/project-install-guide/baremetal/draft/configure-integration.html#configure-networking-to-communicate-with-the-bare-metal-server -#. Edit ``/etc/neutron/plugins/ml2/ml2_conf.ini`` and modify these:: - - [ml2] - type_drivers = flat - tenant_network_types = flat - mechanism_drivers = openvswitch - - [ml2_type_flat] - flat_networks = physnet1 - - [securitygroup] - firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver - enable_security_group = True - - [ovs] - bridge_mappings = physnet1:br-eth2 - # Replace eth2 with the interface on the neutron node which you - # are using to connect to the bare metal server - -#. If neutron-openvswitch-agent runs with ``ovs_neutron_plugin.ini`` as the input - config-file, edit ``ovs_neutron_plugin.ini`` to configure the bridge mappings - by adding the [ovs] section described in the previous step, and restart the - neutron-openvswitch-agent. - -#. Add the integration bridge to Open vSwitch:: - - ovs-vsctl add-br br-int - -#. Create the br-eth2 network bridge to handle communication between the - OpenStack services (and the Bare Metal services) and the bare metal nodes - using eth2. - Replace eth2 with the interface on the network node which you are - using to connect to the Bare Metal service:: - - ovs-vsctl add-br br-eth2 - ovs-vsctl add-port br-eth2 eth2 - -#. Restart the Open vSwitch agent:: - - service neutron-plugin-openvswitch-agent restart - -#. On restarting the Networking service Open vSwitch agent, the veth pair - between the bridges br-int and br-eth2 is automatically created. - - Your Open vSwitch bridges should look something like this after - following the above steps:: - - ovs-vsctl show - - Bridge br-int - fail_mode: secure - Port "int-br-eth2" - Interface "int-br-eth2" - type: patch - options: {peer="phy-br-eth2"} - Port br-int - Interface br-int - type: internal - Bridge "br-eth2" - Port "phy-br-eth2" - Interface "phy-br-eth2" - type: patch - options: {peer="int-br-eth2"} - Port "eth2" - Interface "eth2" - Port "br-eth2" - Interface "br-eth2" - type: internal - ovs_version: "2.3.0" - -#. Create the flat network on which you are going to launch the - instances:: - - neutron net-create --tenant-id $TENANT_ID sharednet1 --shared \ - --provider:network_type flat --provider:physical_network physnet1 - -#. Create the subnet on the newly created network:: - - neutron subnet-create sharednet1 $NETWORK_CIDR --name $SUBNET_NAME \ - --ip-version=4 --gateway=$GATEWAY_IP --allocation-pool \ - start=$START_IP,end=$END_IP --enable-dhcp Configuring Tenant Networks =========================== @@ -253,30 +73,10 @@ See :ref:`multitenancy` Configure the Bare Metal service for cleaning ============================================= -#. If you configure Bare Metal service to use :ref:`cleaning` (which is enabled by - default), you will need to set the ``cleaning_network_uuid`` configuration - option. Note the network UUID (the `id` field) of the network you created in - :ref:`NeutronFlatNetworking` or another network you created for cleaning:: +The `Configure the Bare Metal service for cleaning`_ section +has been moved to the Bare Metal service Install Guide. - neutron net-list - -#. Configure the cleaning network UUID via the ``cleaning_network_uuid`` - option in the Bare Metal service configuration file (/etc/ironic/ironic.conf). - In the following, replace NETWORK_UUID with the UUID you noted in the - previous step:: - - [neutron] - ... - - cleaning_network_uuid = NETWORK_UUID - -#. Restart the Bare Metal service's ironic-conductor:: - - Fedora/RHEL7/CentOS7: - sudo systemctl restart openstack-ironic-conductor - - Ubuntu: - sudo service ironic-conductor restart +.. _`Configure the Bare Metal service for cleaning`: http://docs.openstack.org/project-install-guide/baremetal/draft/configure-cleaning.html .. _ImageRequirement: diff --git a/install-guide/source/configure-cleaning.rst b/install-guide/source/configure-cleaning.rst new file mode 100644 index 0000000000..d77d44adc5 --- /dev/null +++ b/install-guide/source/configure-cleaning.rst @@ -0,0 +1,37 @@ +.. _configure-cleaning: + +Configure the Bare Metal service for cleaning +============================================= + +.. note:: If you configured the Bare Metal service to use `Node cleaning`_ + (which is enabled by default), you will need to set the + ``cleaning_network_uuid`` configuration option. + +.. _`Node cleaning`: http://docs.openstack.org/developer/ironic/deploy/cleaning.html#node-cleaning + +#. Note the network UUID (the `id` field) of the network you created in + :ref:`configure-networking` or another network you created for cleaning: + + .. code-block:: console + + $ neutron net-list + +#. Configure the cleaning network UUID via the ``cleaning_network_uuid`` + option in the Bare Metal service configuration file + (``/etc/ironic/ironic.conf``). In the following, replace ``NETWORK_UUID`` + with the UUID you noted in the previous step: + + .. code-block:: ini + + [neutron] + cleaning_network_uuid = NETWORK_UUID + +#. Restart the Bare Metal service's ironic-conductor: + + .. code-block:: console + + Fedora/RHEL7/CentOS7: + sudo systemctl restart openstack-ironic-conductor + + Ubuntu: + sudo service ironic-conductor restart diff --git a/install-guide/source/configure-integration.rst b/install-guide/source/configure-integration.rst index d9dfe0f245..ef9f9cbe98 100644 --- a/install-guide/source/configure-integration.rst +++ b/install-guide/source/configure-integration.rst @@ -5,3 +5,5 @@ Integration with other OpenStack services .. include:: include/configure-identity.rst .. include:: include/configure-nova-compute.rst + +.. include:: include/configure-neutron-networks.rst diff --git a/install-guide/source/configure-tenant-networks.rst b/install-guide/source/configure-tenant-networks.rst new file mode 100644 index 0000000000..ab583a7f5d --- /dev/null +++ b/install-guide/source/configure-tenant-networks.rst @@ -0,0 +1,8 @@ +.. _configure-tenant-networks: + +Configure tenant networks +========================= + +See `Multitenancy in Bare Metal service`_. + +.. _`Multitenancy in Bare Metal service`: http://docs.openstack.org/developer/ironic/deploy/multitenancy.html#multitenancy diff --git a/install-guide/source/include/configure-neutron-networks.rst b/install-guide/source/include/configure-neutron-networks.rst new file mode 100644 index 0000000000..861580d451 --- /dev/null +++ b/install-guide/source/include/configure-neutron-networks.rst @@ -0,0 +1,113 @@ +.. _configure-networking: + +Configure Networking to communicate with the bare metal server +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You need to configure Networking so that the bare metal server can communicate +with the Networking service for DHCP, PXE boot and other requirements. +This section covers configuring Networking for a single flat network for bare +metal provisioning. + +You will also need to provide Bare Metal service with the MAC address(es) of +each node that it is provisioning; Bare Metal service in turn will pass this +information to Networking service for DHCP and PXE boot configuration. +An example of this is shown in the `Enrollment`_ section. + +.. _`Enrollment`: http://docs.openstack.org/developer/ironic/deploy/install-guide.html#enrollment + +#. Edit ``/etc/neutron/plugins/ml2/ml2_conf.ini`` and modify these: + + .. code-block:: ini + + [ml2] + type_drivers = flat + tenant_network_types = flat + mechanism_drivers = openvswitch + + [ml2_type_flat] + flat_networks = physnet1 + + [securitygroup] + firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + enable_security_group = True + + [ovs] + bridge_mappings = physnet1:br-eth2 + # Replace eth2 with the interface on the neutron node which you + # are using to connect to the bare metal server + +#. If neutron-openvswitch-agent runs with ``ovs_neutron_plugin.ini`` as the input + config-file, edit ``ovs_neutron_plugin.ini`` to configure the bridge mappings + by adding the [ovs] section described in the previous step, and restart the + neutron-openvswitch-agent. + +#. Add the integration bridge to Open vSwitch: + + .. code-block:: console + + $ ovs-vsctl add-br br-int + +#. Create the br-eth2 network bridge to handle communication between the + OpenStack services (and the Bare Metal services) and the bare metal nodes + using eth2. + Replace eth2 with the interface on the network node which you are using to + connect to the Bare Metal service: + + .. code-block:: console + + $ ovs-vsctl add-br br-eth2 + $ ovs-vsctl add-port br-eth2 eth2 + +#. Restart the Open vSwitch agent: + + .. code-block:: console + + # service neutron-plugin-openvswitch-agent restart + +#. On restarting the Networking service Open vSwitch agent, the veth pair + between the bridges br-int and br-eth2 is automatically created. + + Your Open vSwitch bridges should look something like this after + following the above steps: + + .. code-block:: console + + $ ovs-vsctl show + + Bridge br-int + fail_mode: secure + Port "int-br-eth2" + Interface "int-br-eth2" + type: patch + options: {peer="phy-br-eth2"} + Port br-int + Interface br-int + type: internal + Bridge "br-eth2" + Port "phy-br-eth2" + Interface "phy-br-eth2" + type: patch + options: {peer="int-br-eth2"} + Port "eth2" + Interface "eth2" + Port "br-eth2" + Interface "br-eth2" + type: internal + ovs_version: "2.3.0" + +#. Create the flat network on which you are going to launch the + instances: + + .. code-block:: console + + $ neutron net-create --tenant-id $TENANT_ID sharednet1 --shared \ + --provider:network_type flat --provider:physical_network physnet1 + +#. Create the subnet on the newly created network: + + .. code-block:: console + + $ neutron subnet-create sharednet1 $NETWORK_CIDR --name $SUBNET_NAME \ + --ip-version=4 --gateway=$GATEWAY_IP --allocation-pool \ + start=$START_IP,end=$END_IP --enable-dhcp + diff --git a/install-guide/source/include/configure-nova-compute.rst b/install-guide/source/include/configure-nova-compute.rst index 1f2219c154..6198bd0384 100644 --- a/install-guide/source/include/configure-nova-compute.rst +++ b/install-guide/source/include/configure-nova-compute.rst @@ -1,7 +1,112 @@ Configure Compute to use the Bare Metal service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Please read the `Configure Compute to use the Bare Metal service`_ section from -the legacy installation guide. +The Compute service needs to be configured to use the Bare Metal service's +driver. The configuration file for the Compute service is typically located at +``/etc/nova/nova.conf``. -.. _`Configure Compute to use the Bare Metal service`: http://docs.openstack.org/developer/ironic/deploy/install-guide.html#configure-compute-to-use-the-bare-metal-service +.. note:: + This configuration file must be modified on the Compute service's + controller nodes and compute nodes. + +#. Change these configuration options in the ``default`` section, as follows: + + .. code-block:: ini + + [default] + + # Driver to use for controlling virtualization. Options + # include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, + # fake.FakeDriver, baremetal.BareMetalDriver, + # vmwareapi.VMwareESXDriver, vmwareapi.VMwareVCDriver (string + # value) + #compute_driver= + compute_driver=ironic.IronicDriver + + # Firewall driver (defaults to hypervisor specific iptables + # driver) (string value) + #firewall_driver= + firewall_driver=nova.virt.firewall.NoopFirewallDriver + + # The scheduler host manager class to use (string value) + #scheduler_host_manager=host_manager + scheduler_host_manager=ironic_host_manager + + # Virtual ram to physical ram allocation ratio which affects + # all ram filters. This configuration specifies a global ratio + # for RamFilter. For AggregateRamFilter, it will fall back to + # this configuration value if no per-aggregate setting found. + # (floating point value) + #ram_allocation_ratio=1.5 + ram_allocation_ratio=1.0 + + # Amount of disk in MB to reserve for the host (integer value) + #reserved_host_disk_mb=0 + reserved_host_memory_mb=0 + + # Flag to decide whether to use baremetal_scheduler_default_filters or not. + # (boolean value) + #scheduler_use_baremetal_filters=False + scheduler_use_baremetal_filters=True + + # Determines if the Scheduler tracks changes to instances to help with + # its filtering decisions (boolean value) + #scheduler_tracks_instance_changes=True + scheduler_tracks_instance_changes=False + + # New instances will be scheduled on a host chosen randomly from a subset + # of the N best hosts, where N is the value set by this option. Valid + # values are 1 or greater. Any value less than one will be treated as 1. + # For ironic, this should be set to a number >= the number of ironic nodes + # to more evenly distribute instances across the nodes. + #scheduler_host_subset_size=1 + scheduler_host_subset_size=9999999 + +#. Change these configuration options in the ``ironic`` section. + Replace: + + - ``IRONIC_PASSWORD`` with the password you chose for the ``ironic`` + user in the Identity Service + - ``IRONIC_NODE`` with the hostname or IP address of the ironic-api node + - ``IDENTITY_IP`` with the IP of the Identity server + + .. code-block:: ini + + [ironic] + + # Ironic keystone admin name + admin_username=ironic + + #Ironic keystone admin password. + admin_password=IRONIC_PASSWORD + + # keystone API endpoint + admin_url=http://IDENTITY_IP:35357/v2.0 + + # Ironic keystone tenant name. + admin_tenant_name=service + + # URL for Ironic API endpoint. + api_endpoint=http://IRONIC_NODE:6385/v1 + +#. On the Compute service's controller nodes, restart the ``nova-scheduler`` + process: + + .. code-block:: console + + Fedora/RHEL7/CentOS7: + sudo systemctl restart openstack-nova-scheduler + + Ubuntu: + sudo service nova-scheduler restart + +#. On the Compute service's compute nodes, restart the ``nova-compute`` + process: + + .. code-block:: console + + Fedora/RHEL7/CentOS7: + sudo systemctl restart openstack-nova-compute + + Ubuntu: + sudo service nova-compute restart diff --git a/install-guide/source/index.rst b/install-guide/source/index.rst index b917975d58..3b01bc467c 100644 --- a/install-guide/source/index.rst +++ b/install-guide/source/index.rst @@ -8,6 +8,8 @@ Bare Metal service get_started.rst install.rst configure-integration.rst + configure-cleaning.rst + configure-tenant-networks.rst verify.rst next-steps.rst