From 9876fe99afc023b341130080c6114572573a5d83 Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Wed, 16 Oct 2013 08:56:40 +0200 Subject: [PATCH] Use traditional init script commands for openSUSE These changes allow sharing the code of Fedora with openSUSE and also SLES. Also, tag some openSUSE changes with sles as well, more to come. Change-Id: I6b7fb9fb271e4ddbf267a09b2f10c4c968ef7092 --- doc/install-guide/ap_configuration_files.xml | 2 +- doc/install-guide/ch_basics.xml | 33 ++++++++--------- ...ject-storage-install-config-proxy-node.xml | 12 +++---- ...t-storage-install-config-storage-nodes.xml | 8 ++--- .../section_object-storage-install.xml | 4 +-- .../section_ceilometer-cinder.xml | 6 ++-- .../section_ceilometer-glance.xml | 6 ++-- .../section_ceilometer-install.xml | 22 +++++------- doc/install-guide/section_ceilometer-nova.xml | 10 +++--- .../section_ceilometer-swift.xml | 5 ++- .../section_compute-cloud-controller.xml | 2 +- .../section_dashboard-install.xml | 26 +++++++------- doc/install-guide/section_glance-install.xml | 17 ++++----- doc/install-guide/section_heat-install.xml | 13 +++---- .../section_keystone-install.xml | 20 +++++------ doc/install-guide/section_nova-compute.xml | 21 ++++++----- doc/install-guide/section_nova-controller.xml | 35 +++++++------------ doc/install-guide/section_nova-network.xml | 11 +++--- 18 files changed, 107 insertions(+), 146 deletions(-) diff --git a/doc/install-guide/ap_configuration_files.xml b/doc/install-guide/ap_configuration_files.xml index fa03dc6650..065bfb0750 100644 --- a/doc/install-guide/ap_configuration_files.xml +++ b/doc/install-guide/ap_configuration_files.xml @@ -95,7 +95,7 @@ - +
diff --git a/doc/install-guide/ch_basics.xml b/doc/install-guide/ch_basics.xml index 2e010824a2..bf42cc0a7a 100644 --- a/doc/install-guide/ch_basics.xml +++ b/doc/install-guide/ch_basics.xml @@ -111,7 +111,7 @@ DEFROUTE=yes ONBOOT=yes - + To set up the two network interfaces, start the YaST network module, as follows: # yast2 network @@ -161,8 +161,7 @@ iface eth1 inet static Once you've configured the network, restart the daemon for changes to take effect: # service networking restart - # service network restart - # systemctl restart network.service + # service network restart Set the host name of each machine. Name the controller node controller and the first compute node @@ -171,7 +170,7 @@ iface eth1 inet static Use the hostname command to set the host name: # hostname controller - Use yast network to set the + Use yast network to set the host name with YaST. To have the host name change persist when the system reboots, you need to specify it in the proper @@ -216,16 +215,14 @@ iface eth1 inet static # apt-get install ntp # yum install ntp - # zypper install ntp + # zypper install ntp - Set up the NTP server on your + Set up the NTP server on your controller node so that it receives data by modifying the ntp.conf file and restarting the service. - # service ntpd start + # service ntpd start # chkconfig ntpd on - # systemctl start ntp.service -# systemctl enable ntp.service Set up all additional nodes to synchronize their time from the controller node. The simplest way to do this is to add a daily cron job. @@ -286,12 +283,10 @@ hwclock -w MariaDB database server and set it to start automatically when the system boots. - # service mysqld start + # service mysqld start # chkconfig mysqld on - # systemctl enable mysql.service -# systemctl start mysql.service - Finally, you should set a root password for your + Finally, you should set a root password for your MySQL MariaDB database. The OpenStack programs that set up databases and tables will @@ -318,13 +313,13 @@ hwclock -w
Messaging Server On the controller node, install the messaging queue server. Typically this is RabbitMQQpid but QpidRabbitMQQpid but QpidRabbitMQ and ZeroMQ (0MQ) are also available. # apt-get install rabbitmq-server - # zypper install rabbitmq-server + # zypper install rabbitmq-server # yum install qpid-cpp-server memcached @@ -345,9 +340,9 @@ hwclock -w # service qpidd start # chkconfig qpidd on - Start the messaging service and set it to start automatically when the system boots: - # systemctl start rabbitmq-server.service -# systemctl enable rabbitmq-server.service + Start the messaging service and set it to start automatically when the system boots: + # service rabbitmq-server start +# service rabbitmq-server enable
diff --git a/doc/install-guide/object-storage/section_object-storage-install-config-proxy-node.xml b/doc/install-guide/object-storage/section_object-storage-install-config-proxy-node.xml index 1d7d6e464f..1f496e17ee 100644 --- a/doc/install-guide/object-storage/section_object-storage-install-config-proxy-node.xml +++ b/doc/install-guide/object-storage/section_object-storage-install-config-proxy-node.xml @@ -13,11 +13,11 @@ Swift processes run under a separate user and group, set by configuration options, and referred to as swift:swiftopenstack-swift:openstack-swift. The + os="opensuse;sles">openstack-swift:openstack-swift. The default user is swift, which may not exist on your system.openstack-swift. + os="opensuse;sles">openstack-swift. @@ -25,7 +25,7 @@ Install swift-proxy service: # apt-get install swift-proxy memcached python-keystoneclient python-swiftclient python-webob # yum install openstack-swift-proxy memcached openstack-utils python-swiftclient python-keystone-auth-token - # zypper install openstack-swift-proxy memcached openstack-utils python-swiftclient python-keystoneclient + # zypper install openstack-swift-proxy memcached openstack-utils python-swiftclient python-keystoneclient @@ -62,7 +62,7 @@ to Create /etc/swift/proxy-server.conf: - + If you run multiple memcache servers, put the multiple @@ -77,7 +77,7 @@ to >signing_dir and set its permissions accordingly.# mkdir -p /home/swift/keystone-signing # chown -R swift:swift /home/swift/keystone-signing - # mkdir -p /home/swift/keystone-signing + # mkdir -p /home/swift/keystone-signing # chown -R openstack-swift:openstack-swift /home/swift/keystone-signing @@ -158,7 +158,7 @@ to user: # chown -R swift:swift /etc/swift - # chown -R openstack-swift:openstack-swift /etc/swift + # chown -R openstack-swift:openstack-swift /etc/swift diff --git a/doc/install-guide/object-storage/section_object-storage-install-config-storage-nodes.xml b/doc/install-guide/object-storage/section_object-storage-install-config-storage-nodes.xml index b084737542..3f8bc4cee7 100644 --- a/doc/install-guide/object-storage/section_object-storage-install-config-storage-nodes.xml +++ b/doc/install-guide/object-storage/section_object-storage-install-config-storage-nodes.xml @@ -19,7 +19,7 @@ # apt-get install swift-account swift-container swift-object xfsprogs # yum install openstack-swift-account openstack-swift-container openstack-swift-object xfsprogs - # zypper install openstack-swift-account openstack-swift-container openstack-swift-object xfsprogs + # zypper install openstack-swift-account openstack-swift-container openstack-swift-object xfsprogs @@ -36,7 +36,7 @@ # mkdir -p /srv/node/sdb1 # mount /srv/node/sdb1 # chown -R swift:swift /srv/node - # fdisk /dev/sdb + # fdisk /dev/sdb # mkfs.xfs /dev/sdb1 # echo "/dev/sdb1 /srv/node/sdb1 xfs noatime,nodiratime,nobarrier,logbufs=8 0 0" >> /etc/fstab # mkdir -p /srv/node/sdb1 @@ -70,7 +70,7 @@ max connections = 2 path = /srv/node/ read only = false lock file = /var/lock/object.lock - uid = openstack-swift + uid = openstack-swift gid = openstack-swift log file = /var/log/rsyncd.log pid file = /var/run/rsyncd.pid @@ -125,7 +125,7 @@ address = <STORAGE_REPLICATION_NET_IP> Create the swift recon cache directory and set its permissions.# mkdir -p /var/swift/recon # chown -R swift:swift /var/swift/recon - # mkdir -p /var/swift/recon + # mkdir -p /var/swift/recon # chown -R openstack-swift:openstack-swift /var/swift/recon diff --git a/doc/install-guide/object-storage/section_object-storage-install.xml b/doc/install-guide/object-storage/section_object-storage-install.xml index 5fd302d6d8..cd366f71ff 100644 --- a/doc/install-guide/object-storage/section_object-storage-install.xml +++ b/doc/install-guide/object-storage/section_object-storage-install.xml @@ -47,14 +47,14 @@ Install core Swift files and openSSH. # apt-get install swift openssh-server rsync memcached python-netifaces python-xattr python-memcache # yum install openstack-swift openstack-swift-proxy openstack-swift-account openstack-swift-container openstack-swift-object memcached - # zypper install openstack-swift openstack-swift-proxy openstack-swift-account openstack-swift-container openstack-swift-object memcached + # zypper install openstack-swift openstack-swift-proxy openstack-swift-account openstack-swift-container openstack-swift-object memcached Create and populate configuration directories on all nodes: # mkdir -p /etc/swift # chown -R swift:swift /etc/swift/ - # mkdir -p /etc/swift + # mkdir -p /etc/swift # chown -R openstack-swift:openstack-swift /etc/swift/ diff --git a/doc/install-guide/section_ceilometer-cinder.xml b/doc/install-guide/section_ceilometer-cinder.xml index 6b8a5b6b8d..d03bc3376f 100644 --- a/doc/install-guide/section_ceilometer-cinder.xml +++ b/doc/install-guide/section_ceilometer-cinder.xml @@ -20,12 +20,10 @@ # service cinder-volume restart # service cinder-api restart - We now restart the Block Storage service with its new + We now restart the Block Storage service with its new settings. - # service openstack-cinder-api restart + # service openstack-cinder-api restart # service openstack-cinder-agent-central restart - # systemctl restart openstack-cinder-api.service -# systemctl restart openstack-cinder-volume.service diff --git a/doc/install-guide/section_ceilometer-glance.xml b/doc/install-guide/section_ceilometer-glance.xml index c828568ae7..63621072a0 100644 --- a/doc/install-guide/section_ceilometer-glance.xml +++ b/doc/install-guide/section_ceilometer-glance.xml @@ -21,13 +21,11 @@ # service glance-api restart - Restart the Restart the glance-api and glance-registry services. - # service openstack-glance-api restart + # service openstack-glance-api restart # service openstack-glance-registry restart - # systemctl restart openstack-glance-api.service -# systemctl restart openstack-glance-registry.service
diff --git a/doc/install-guide/section_ceilometer-install.xml b/doc/install-guide/section_ceilometer-install.xml index 27e98b7cce..eff73bbc26 100644 --- a/doc/install-guide/section_ceilometer-install.xml +++ b/doc/install-guide/section_ceilometer-install.xml @@ -14,7 +14,7 @@ Install the Metering Service on the controller node: # apt-get install ceilometer-api ceilometer-collector ceilometer-agent-central python-ceilometerclient # yum install openstack-ceilometer-api openstack-ceilometer-collector openstack-ceilometer-agent-central python-ceilometerclient FIXME - # zypper install openstack-ceilometer-api openstack-ceilometer-collector openstack-ceilometer-agent-central python-ceilometerclient + # zypper install openstack-ceilometer-api openstack-ceilometer-collector openstack-ceilometer-agent-central python-ceilometerclient @@ -22,7 +22,7 @@ Specify the location of the database in the configuration file. In this guide, we use a MongoDB database on the controller node.
# FIXME - # zypper install mongodb + # zypper install mongodb # apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10 @@ -35,7 +35,7 @@ # apt-get update # apt-get install mongodb-10gen
- + Start the MonoDB server and configure it to start when the system boots: # systemctl start mongodb.service # systemctl enable mongodb.service @@ -53,7 +53,7 @@ Tell the Metering Service to use the created database. - # openstack-config --set /etc/ceilometer/ceilometer.conf \ + # openstack-config --set /etc/ceilometer/ceilometer.conf \ database connection mongodb://ceilometer:CEILOMETER_DBPASS@controller:27017/ceilometer Edit /etc/ceilometer/ceilometer.conf and change the [database] section. @@ -74,7 +74,7 @@ connection = mongodb://ceilometer:CEILOMETER_DBPASS@< shared secret between the Metering Service nodes. Use openssl to generate a random token, then store it in the configuration file.
- # ADMIN_TOKEN=$(openssl rand -hex 10) + # ADMIN_TOKEN=$(openssl rand -hex 10) # echo $ADMIN_TOKEN # openstack-config --set /etc/keystone/keystone.conf publisher_rpc metering_secret $ADMIN_TOKEN # openssl rand -hex 10 @@ -102,7 +102,7 @@ metering_secret = ADMIN_TOKEN Add the credentials to the Metering Service's configuration files. - # openstack-config --set /etc/ceilometer/ceilometer.conf keystone_authtoken auth_host controller + # openstack-config --set /etc/ceilometer/ceilometer.conf keystone_authtoken auth_host controller # openstack-config --set /etc/ceilometer/ceilometer.conf keystone_authtoken admin_user ceilometer # openstack-config --set /etc/ceilometer/ceilometer.conf keystone_authtoken admin_tenant_name service # openstack-config --set /etc/ceilometer/ceilometer.conf keystone_authtoken auth_protocol http @@ -144,24 +144,18 @@ admin_password = CEILOMETER_PASS # service ceilometer-collector restart - Start the Start the ceilometer-api, ceilometer-agent-central and ceilometer-collector services and configure them to start when the system boots. - # service openstack-ceilometer-api start + # service openstack-ceilometer-api start # service openstack-ceilometer-agent-central start # service openstack-ceilometer-collector start # chkconfig openstack-ceilometer-api on # chkconfig openstack-ceilometer-agent-central on # chkconfig openstack-ceilometer-collector on - # systemctl start openstack-ceilometer-api.service -# systemctl start openstack-ceilometer-agent-central.service -# systemctl start openstack-ceilometer-collector.service -# systemctl enable openstack-ceilometer-api.service -# systemctl enable openstack-ceilometer-agent-central.service -# systemctl enable openstack-ceilometer-collector.service diff --git a/doc/install-guide/section_ceilometer-nova.xml b/doc/install-guide/section_ceilometer-nova.xml index 185166ff27..d095c6dfcc 100644 --- a/doc/install-guide/section_ceilometer-nova.xml +++ b/doc/install-guide/section_ceilometer-nova.xml @@ -13,12 +13,12 @@ Install the Metering service on the controller node: # apt-get install ceilometer-agent-compute # yum install openstack-ceilometer-agent-compute FIXME - # zypper install openstack-ceilometer-agent-compute + # zypper install openstack-ceilometer-agent-compute Set the following options in /etc/nova/nova.conf. - + # openstack-config --set /etc/nova/nova.conf DEFAULT instance_usage_audit True # openstack-config --set /etc/nova/nova.conf DEFAULT instance_usage_audit Hhour # openstack-config --set /etc/nova/nova.conf DEFAULT notify_on_state_change vm_and_task_state @@ -61,12 +61,10 @@ metering_secret = ADMIN_TOKEN - Start the ceilometer-agent-compute service and configure + Start the ceilometer-agent-compute service and configure to start when the system boots. - # service openstack-ceilometer-agent-compute start + # service openstack-ceilometer-agent-compute start # chkconfig openstack-ceilometer-agent-compute on - # systemctl start openstack-ceilometer-agent-compute.service -# systemctl enable openstack-ceilometer-agent-compute.service diff --git a/doc/install-guide/section_ceilometer-swift.xml b/doc/install-guide/section_ceilometer-swift.xml index 22d1994112..2ec65403fa 100644 --- a/doc/install-guide/section_ceilometer-swift.xml +++ b/doc/install-guide/section_ceilometer-swift.xml @@ -39,9 +39,8 @@ use = egg:ceilometer#swiftNext, We now restart the service with its new settings. # service swift-proxy-server restart - We now restart the service with its new settings. - # service openstack-swift-proxy-server restart - # systemctl restart openstack-swift-proxy-server.service + We now restart the service with its new settings. + # service openstack-swift-proxy-server restart
diff --git a/doc/install-guide/section_compute-cloud-controller.xml b/doc/install-guide/section_compute-cloud-controller.xml index 8a6e887f86..dae2d666ff 100644 --- a/doc/install-guide/section_compute-cloud-controller.xml +++ b/doc/install-guide/section_compute-cloud-controller.xml @@ -21,5 +21,5 @@ $ sudo apt-get install ubuntu-cloud-keyring $ sudo yum install openstack-nova - $ sudo zypper install openstack-nova + $ sudo zypper install openstack-nova diff --git a/doc/install-guide/section_dashboard-install.xml b/doc/install-guide/section_dashboard-install.xml index 70baf5bb28..c41fcb0c70 100644 --- a/doc/install-guide/section_dashboard-install.xml +++ b/doc/install-guide/section_dashboard-install.xml @@ -29,7 +29,7 @@ the Identity Service as root: # apt-get install memcached libapache2-mod-wsgi openstack-dashboard # yum install memcached python-memcached mod_wsgi openstack-dashboard - # zypper install memcached python-python-memcached apache2-mod_wsgi openstack-dashboard + # zypper install memcached python-python-memcached apache2-mod_wsgi openstack-dashboard Remove the @@ -47,11 +47,11 @@ >/etc/openstack-dashboard/local_settings.py/etc/openstack-dashboard/local_settings/usr/share/openstack-dashboard/openstack_dashboard/local/local_settings.py to match the ones set in /etc/memcached.conf/etc/sysconfig/memcached.conf. Open /etc/openstack-dashboard/local_settings.py @@ -72,7 +72,7 @@ The address and port must match the ones set in /etc/memcached.conf/etc/sysconfig/memcached. If you change the memcached settings, you must restart the Apache web server for @@ -92,7 +92,7 @@ >/etc/openstack-dashboard/local_settings/etc/openstack-dashboard/local_settings.py/usr/share/openstack-dashboard/openstack_dashboard/local/local_settings.py file. Change the following parameter: @@ -111,20 +111,20 @@ >/etc/openstack-dashboard/local_settings/etc/openstack-dashboard/local_settings.py/usr/share/openstack-dashboard/openstack_dashboard/local/local_settings.py and change OPENSTACK_HOST to the hostname of your Identity Service. OPENSTACK_HOST = "controller" - + Setup Apache configuration: # cp /etc/apache2/conf.d/openstack-dashboard.conf.sample \ /etc/apache2/conf.d/openstack-dashboard.conf # a2enmod rewrite;a2enmod ssl;a2enmod wsgi - + By default, the openstack-dashboard package enables a database as session store. Before you continue, either @@ -134,17 +134,19 @@ linkend="dashboard-session-database"/>. - + Start the Apache web server and memcached: - # systemctl start apache2.service memcached.service -# systemctl enable apache2.service memcached.service + # service apache2 start +# service memcached start +# chckconfig apache2 on +# chckconfig memcached on You can now access the dashboard at http://controller/horizon - http://controller. + http://controller. Login with credentials for any user that you created with the OpenStack Identity Service. diff --git a/doc/install-guide/section_glance-install.xml b/doc/install-guide/section_glance-install.xml index aa24c4425e..5773185f6f 100644 --- a/doc/install-guide/section_glance-install.xml +++ b/doc/install-guide/section_glance-install.xml @@ -22,9 +22,9 @@ Install the Image Service on the controller node. # apt-get install glance # yum install openstack-glance - # zypper install openstack-glance python-glanceclient + # zypper install openstack-glance python-glanceclient - The Image Service stores information about images in a database. + The Image Service stores information about images in a database. This guide uses the MySQL database used by other OpenStack services. Use the openstack-db command to create the database and tables for the Image Service, as well as a database user @@ -54,7 +54,7 @@ IDENTIFIED BY 'GLANCE_DBPASS'; services: glance-api and glance-registry. They each have separate configuration files, so you must configure both files throughout this section. - # openstack-config --set /etc/glance/glance-api.conf \ + # openstack-config --set /etc/glance/glance-api.conf \ DEFAULT sql_connection mysql://glance:GLANCE_DBPASS@controller/glance # openstack-config --set /etc/glance/glance-registry.conf \ DEFAULT sql_connection mysql://glance:GLANCE_DBPASS@controller/glance @@ -87,7 +87,7 @@ sql_connection = mysql://glance:GLANCE_DBPASS@localhost/glance # keystone user-role-add --user=glance --tenant=service --role=admin Add the credentials to the Image Service's configuration files. - # openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_host controller + # openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_host controller # openstack-config --set /etc/glance/glance-api.conf keystone_authtoken admin_user glance # openstack-config --set /etc/glance/glance-api.conf keystone_authtoken admin_tenant_name service # openstack-config --set /etc/glance/glance-api.conf keystone_authtoken admin_password GLANCE_DBPASS @@ -146,16 +146,13 @@ admin_password=GLANCE_DBPASS --> - Start the glance-api and + Start the glance-api and glance-registry services and configure them to start when the system boots. - # service openstack-glance-api start + # service openstack-glance-api start # service openstack-glance-registry start # chkconfig openstack-glance-api on # chkconfig openstack-glance-registry on - # systemctl start openstack-glance-api.service -# systemctl start openstack-glance-registry.service -# systemctl enable openstack-glance-api.service -# systemctl enable openstack-glance-registry.service + diff --git a/doc/install-guide/section_heat-install.xml b/doc/install-guide/section_heat-install.xml index 0cd80d479e..a4a34987f3 100644 --- a/doc/install-guide/section_heat-install.xml +++ b/doc/install-guide/section_heat-install.xml @@ -9,7 +9,7 @@ Install the Orchestration Service on the controller node: # apt-get install heat-api heat-api-cfn # yum install openstack-heat-api FIXME - # zypper install openstack-heat-api openstack-heat-api-cfn + # zypper install openstack-heat-api openstack-heat-api-cfn @@ -19,7 +19,7 @@ with the username heat. Replace HEAT_DBPASS with a suitable password for the database user. - # openstack-config --set /etc/heat/heat.conf \ + # openstack-config --set /etc/heat/heat.conf \ database connection mysql://heat:HEAT_DBPASS@controller/heat Edit /etc/heat/heat.conf and change the [DEFAULT] section. @@ -103,18 +103,13 @@ admin_password = HEAT_PASS - Start the heat-api and + Start the heat-api and heat-api-cfn services and configure them to start when the system boots. - # service openstack-heat-api start + # service openstack-heat-api start # service openstack-heat-api-cfn start # chkconfig openstack-heat-api on # chkconfig openstack-heat-api-cfn on - # systemctl start openstack-heat-api.service -# systemctl start openstack-heat-api-cfn.service -# systemctl enable openstack-heat-api.service -# systemctl enable openstack-heat-api-cfn.service - diff --git a/doc/install-guide/section_keystone-install.xml b/doc/install-guide/section_keystone-install.xml index 6168e96b46..c4fd594c97 100644 --- a/doc/install-guide/section_keystone-install.xml +++ b/doc/install-guide/section_keystone-install.xml @@ -9,7 +9,7 @@ Install the Identity Service on the controller node: # apt-get install keystone python-keystone python-keystoneclient # yum install openstack-keystone python-keystoneclient - # zypper install openstack-keystone python-keystoneclient openstack-utils + # zypper install openstack-keystone python-keystoneclient openstack-utils @@ -19,7 +19,7 @@ with the username keystone. Replace KEYSTONE_DBPASS with a suitable password for the database user. - # openstack-config --set /etc/keystone/keystone.conf \ + # openstack-config --set /etc/keystone/keystone.conf \ sql connection mysql://keystone:KEYSTONE_DBPASS@controller/keystone Edit /etc/keystone/keystone.conf and change the [sql] section. @@ -31,7 +31,7 @@ connection = mysql://keystone:KEYSTONE_DBPASS@controller/keystone - + Use the openstack-db command to create the database and tables, as well as a database user called keystone to connect to the database. Replace @@ -61,7 +61,7 @@ IDENTIFIED BY 'KEYSTONE_DBPASS'; shared secret between the Identity Service and other OpenStack services. Use openssl to generate a random token, then store it in the configuration file. - # ADMIN_TOKEN=$(openssl rand -hex 10) + # ADMIN_TOKEN=$(openssl rand -hex 10) # echo $ADMIN_TOKEN # openstack-config --set /etc/keystone/keystone.conf DEFAULT admin_token $ADMIN_TOKEN # openssl rand -hex 10 @@ -77,16 +77,16 @@ admin_token = ADMIN_TOKEN - + By default Keystone will use PKI tokens. Create the signing keys and certificates. # keystone-manage pki_setup --keystone-user keystone --keystone-group keystone # chown -R keystone:keystone /etc/keystone/* /var/log/keystone/keystone.log - # keystone-manage pki_setup --keystone-user openstack-keystone --keystone-group openstack-keystone + # keystone-manage pki_setup --keystone-user openstack-keystone --keystone-group openstack-keystone # chown -R openstack-keystone:openstack-keystone /etc/keystone/* /var/log/keystone/keystone.log - + Setup the /etc/keystone/default_catalog.templates file: # KEYSTONE_CATALOG=/etc/keystone/default_catalog.templates @@ -98,13 +98,11 @@ admin_token = ADMIN_TOKEN # service keystone restart - + Start the Identity Service and enable it so it start when the system boots. - # service openstack-keystone start + # service openstack-keystone start # chkconfig openstack-keystone on - # systemctl start openstack-keystone.service -# systemctl enable openstack-keystone.service diff --git a/doc/install-guide/section_nova-compute.xml b/doc/install-guide/section_nova-compute.xml index 23f404d42a..2c4373c9e0 100644 --- a/doc/install-guide/section_nova-compute.xml +++ b/doc/install-guide/section_nova-compute.xml @@ -48,18 +48,18 @@ packages for the compute service. # apt-get install nova-compute-kvm python-novaclient python-guestfs # yum install openstack-nova-compute - # zypper install openstack-nova-compute kvm openstack-utils + # zypper install openstack-nova-compute kvm openstack-utils - Due to a bug + Due to a bug that Canonical "Won't Fix", guestfs is broken out of the box. Run the following command to fix it: - # chmod 0644 /boot/vmlinuz* + # chmod 0644 /boot/vmlinuz* Either copy the file /etc/nova/nova.conf from the controller node, or run the same configuration commands. - # openstack-config --set /etc/nova/nova.conf \ + # openstack-config --set /etc/nova/nova.conf \ database connection mysql://nova:NOVA_DBPASS@controller/nova # openstack-config --set /etc/nova/nova.conf DEFAULT auth_strategy keystone # openstack-config --set /etc/nova/nova.conf DEFAULT auth_host controller @@ -105,7 +105,7 @@ rabbit_host = controller vncserver_listen, and vncserver_proxyclient_address to the IP address of the compute node on the internal network. - # openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 192.168.0.11 + # openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 192.168.0.11 # openstack-config --set /etc/nova/nova.conf DEFAULT vncserver_listen 192.168.0.11 # openstack-config --set /etc/nova/nova.conf DEFAULT vncserver_proxyclient_address 192.168.0.11 Edit /etc/nova/nova.conf and add to the [DEFAULT] section. @@ -117,7 +117,7 @@ vncserver_proxyclient_address=192.168.0.11 Specify the host running the Image Service. Edit /etc/nova/nova.conf and add to the [DEFAULT] section. - # openstack-config --set /etc/nova/nova.conf DEFAULT glance_host controller + # openstack-config --set /etc/nova/nova.conf DEFAULT glance_host controller [DEFAULT] ... glance_host=controller @@ -133,16 +133,15 @@ admin_user=nova admin_tenant_name=service admin_password=NOVA_DBPASS - Ensure that api_paste_config=/etc/nova/api-paste.ini is set in + Ensure that api_paste_config=/etc/nova/api-paste.ini is set in /etc/nova/nova.conf. - Start the Compute service and configure it to start when the system boots. + Start the Compute service and configure it to start when the system boots. Restart the Compute service. # service nova-compute restart - # service openstack-nova-compute start + # service openstack-nova-compute start # chkconfig openstack-nova-compute on - # systemctl start openstack-nova-compute.service -# systemctl enable openstack-nova-compute.service + diff --git a/doc/install-guide/section_nova-controller.xml b/doc/install-guide/section_nova-controller.xml index 9f536789ee..bc118b5a27 100644 --- a/doc/install-guide/section_nova-controller.xml +++ b/doc/install-guide/section_nova-controller.xml @@ -20,13 +20,14 @@ # yum install openstack-nova python-novaclient - Install the following Nova packages. These packages provide + Install the following Nova packages. These packages provide the OpenStack Compute services that will be run on the controller node in this guide. # apt-get install nova-novncproxy novnc nova-api nova-ajax-console-proxy nova-cert \ nova-conductor nova-consoleauth nova-doc nova-scheduler python-novaclient - # zypper install openstack-nova-api openstack-nova-scheduler \ + # zypper install openstack-nova-api openstack-nova-scheduler \ +>>>>>>> 0558383... Use traditional init script commands for openSUSE openstack-nova-cert openstack-nova-conductor openstack-nova-console \ openstack-nova-consoleauth openstack-nova-doc \ openstack-nova-novncproxy python-novaclient @@ -35,13 +36,13 @@ The Compute Service stores information in a database. This guide uses the MySQL database used by other OpenStack services. - Use the + Use the openstack-db command to create the database and tables for the Compute Service, as well as a database user called nova to connect to the database. Replace NOVA_DBPASS with a password of your choosing. - # openstack-db --init --service nova --password NOVA_DBPASS + # openstack-db --init --service nova --password NOVA_DBPASS Edit /etc/nova/nova.conf and add the [database] section. ... @@ -65,7 +66,7 @@ IDENTIFIED BY 'NOVA_DBPASS'; # nova-manage db sync - + Tell the Compute Service to use the created database. # openstack-config --set /etc/nova/nova.conf \ @@ -78,7 +79,7 @@ IDENTIFIED BY 'NOVA_DBPASS'; vncserver_proxyclient_address to the internal IP address of the controller node. - # openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 192.168.0.10 + # openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 192.168.0.10 # openstack-config --set /etc/nova/nova.conf DEFAULT vncserver_listen 192.168.0.10 # openstack-config --set /etc/nova/nova.conf DEFAULT vncserver_proxyclient_address 192.168.0.10 Edit /etc/nova/nova.conf and add to the [DEFAULT] section. @@ -105,7 +106,7 @@ vncserver_proxyclient_address=192.168.0.10 For the Compute Service to use these credentials, you must alter the nova.conf configuration file. - # openstack-config --set /etc/nova/nova.conf DEFAULT auth_strategy keystone + # openstack-config --set /etc/nova/nova.conf DEFAULT auth_strategy keystone # openstack-config --set /etc/nova/nova.conf DEFAULT auth_host controller # openstack-config --set /etc/nova/nova.conf DEFAULT admin_user nova # openstack-config --set /etc/nova/nova.conf DEFAULT admin_tenant_name service @@ -135,7 +136,7 @@ admin_tenant_name=service admin_user=nova admin_password=NOVA_DBPASS - Ensure that api_paste_config=/etc/nova/api-paste.ini + Ensure that api_paste_config=/etc/nova/api-paste.ini is set in /etc/nova/nova.conf. @@ -174,7 +175,7 @@ admin_password=NOVA_DBPASS rpc_backend = nova.rpc.impl_kombu rabbit_host = controller - + Configure the Compute Service to use the RabbitMQ message broker by setting the following configuration keys. # openstack-config --set /etc/nova/nova.conf \ @@ -182,7 +183,7 @@ rabbit_host = controller # openstack-config --set /etc/nova/nova.conf DEFAULT rabbit_host controller - Finally, start the various Nova services and configure them + Finally, start the various Nova services and configure them to start when the system boots. Finally, restart the various Nova services. # service nova-api restart @@ -191,7 +192,7 @@ rabbit_host = controller # service nova-scheduler restart # service nova-conductor restart # service nova-novncproxy restart - # service openstack-nova-api start + # service openstack-nova-api start # service openstack-nova-cert start # service openstack-nova-consoleauth start # service openstack-nova-scheduler start @@ -203,18 +204,6 @@ rabbit_host = controller # chkconfig openstack-nova-scheduler on # chkconfig openstack-nova-conductor on # chkconfig openstack-nova-novncproxy on - # systemctl start openstack-nova-api.service -# systemctl start openstack-nova-cert.service -# systemctl start openstack-nova-consoleauth.service -# systemctl start openstack-nova-scheduler.service -# systemctl start openstack-nova-conductor.service -# systemctl start openstack-nova-novncproxy.service -# systemctl enable openstack-nova-api.service -# systemctl enable openstack-nova-cert.service -# systemctl enable openstack-nova-consoleauth.service -# systemctl enable openstack-nova-scheduler.service -# systemctl enable openstack-nova-conductor.service -# systemctl enable openstack-nova-novncproxy.service To verify that everything is configured correctly, use the diff --git a/doc/install-guide/section_nova-network.xml b/doc/install-guide/section_nova-network.xml index 0b04cf9a2e..d7bcd5758c 100644 --- a/doc/install-guide/section_nova-network.xml +++ b/doc/install-guide/section_nova-network.xml @@ -17,12 +17,12 @@ install the appropriate packages for compute networking. # apt-get install nova-network # yum install openstack-nova-network - # zypper install openstack-nova-network + # zypper install openstack-nova-network First, set the configuration options needed in nova.conf for the chosen networking mode. - + # openstack-config --set /etc/nova/nova.conf DEFAULT network_manager nova.network.manager.FlatDHCPManager # openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.libvirt.firewall.IptablesFirewallDriver # openstack-config --set /etc/nova/nova.conf DEFAULT network_size 254 @@ -60,15 +60,14 @@ public_interface = br100 # service nova-network restart - + # service openstack-nova-network restart - -# systemctl restart openstack-nova-network.service - You must run the command that creates the network that the virtual machines use. + You must run the command that creates the network that the virtual machines use. You only need to run this command once, from a place where your admin user credentials are loaded. source keystonerc + # nova network-create vmnet --fixed-range-v4=10.0.0.0/24 --bridge-interface=br100 --multi-host=T