From 2de4495018298c9e4a1a823e5bc31d201b731dc8 Mon Sep 17 00:00:00 2001 From: annegentle Date: Tue, 4 Oct 2011 14:21:51 -0500 Subject: [PATCH] Revises RHEL installation and scripted installation in Compute Admin manual Change-Id: Ie05e8b9f0699e8c2af46b3bccf77a098c99086be --- .../computeinstall.xml | 211 +-- doc/target/docbkx/images/cloud/cover.svg | 2 +- .../os-compute-adminguide.fo | 1332 +++++++------- .../os-identity-starter-guide-trunk.pdf | Bin 86257 -> 0 bytes .../os-identity-starter-guide.fo | 150 -- .../os-image-adminguide.fo | 366 ++-- .../os-objectstorage-adminguide.fo | 389 ++-- .../admin/content/about-the-dashboard.html | 10 +- .../allocating-associating-ip-addresses.html | 10 +- .../admin/content/associating-public-ip.html | 12 +- ...ild-and-configure-openstack-dashboard.html | 37 +- .../content/certificates-and-revocation.html | 10 +- .../ch_configuring-openstack-compute.html | 14 +- .../ch_getting-started-with-openstack.html | 10 +- .../admin/content/ch_hypervisors.html | 14 +- .../ch_installing-openstack-compute.html | 10 +- .../ch_introduction-to-openstack-compute.html | 10 +- .../admin/content/ch_networking.html | 10 +- ...stack-compute-automated-installations.html | 10 +- .../ch_openstack-compute-tutorials.html | 10 +- .../content/ch_openstack-interfaces.html | 10 +- .../ch_support-and-troubleshooting.html | 10 +- ...-administration-for-openstack-compute.html | 16 +- ...loud-provider-conceptual-architecture.html | 12 +- .../content/cloudpipe-per-project-vpns.html | 12 +- ...rrors-and-fixes-for-openstack-compute.html | 10 +- .../admin/content/community-support.html | 18 +- .../content/components-of-openstack.html | 10 +- .../content/compute-system-requirements.html | 22 +- ...figuring-authentication-authorization.html | 84 +- .../configuring-compute-messaging.html | 18 +- ...iguring-compute-to-use-ipv6-addresses.html | 16 +- ...ring-compute-to-use-the-image-service.html | 17 +- .../configuring-database-connections.html | 12 +- .../configuring-flat-dhcp-networking.html | 24 +- .../content/configuring-flat-networking.html | 24 +- .../content/configuring-hypervisors.html | 12 +- .../content/configuring-live-migrations.html | 12 +- .../admin/content/configuring-logging.html | 12 +- .../configuring-multiple-compute-nodes.html | 24 +- ...guring-networking-on-the-compute-node.html | 13 +- .../configuring-openstack-compute-basics.html | 10 +- .../content/configuring-the-vnc-proxy.html | 8 +- .../content/configuring-vlan-networking.html | 10 +- .../content/creating-a-cloudpipe-image.html | 10 +- .../admin/content/creating-a-linux-image.html | 18 +- .../content/creating-a-windows-image.html | 10 +- .../content/creating-certifications.html | 17 +- .../admin/content/creating-custom-images.html | 16 +- .../dashboard-system-requirements.html | 15 +- .../admin/content/deleting-instances.html | 10 +- .../content/detecting-failed-drives.html | 10 +- .../determining-version-of-compute.html | 14 +- ...ing-access-to-vms-on-the-compute-node.html | 12 +- .../content/enabling-ping-and-ssh-on-vms.html | 10 +- .../enabling-vnc-consoles-in-nova.html | 8 +- .../example-installation-architecture.html | 10 +- ...eneral-compute-configuration-overview.html | 14 +- .../getting-an-instances-vnc-console.html | 8 +- .../getting-started-with-the-vnc-proxy.html | 8 +- .../admin/content/handling-drive-failure.html | 10 +- .../content/handling-server-failure.html | 10 +- .../hypervisor-configuration-basics.html | 16 +- .../admin/content/hypervisors.html | 18 +- .../admin/content/images-and-instances.html | 10 +- .../admin/content/index.html | 19 +- ...talling-needed-software-for-web-scale.html | 18 +- ...installing-openstack-compute-on-rhel6.html | 42 +- ...nstalling-openstack-compute-on-ubuntu.html | 10 +- .../installing-openstack-dashboard.html | 13 +- .../installing-the-cloud-controller.html | 26 +- .../content/installing-the-compute-node.html | 47 +- .../content/iso-ubuntu-installation.html | 10 +- .../admin/content/live-migration-usage.html | 10 +- .../log-files-for-openstack-compute.html | 10 +- .../admin/content/managing-compute-users.html | 14 +- .../admin/content/managing-the-cloud.html | 12 +- .../admin/content/managing-volumes.html | 16 +- .../content/manual-ubuntu-installation.html | 10 +- .../migrating-from-bexar-to-cactus.html | 51 - .../admin/content/networking-options.html | 10 +- .../content/nova-conceptual-mapping.html | 13 +- .../openstack-architecture-overview.html | 10 +- ...k-compute-deployment-tool-with-puppet.html | 24 +- ...ion-using-virtualbox-vagrant-and-chef.html | 20 +- .../openstack-nova-logical-architecture.html | 16 +- ...traffic-flow-with-any-flat-networking.html | 10 +- ...art-i-setting-up-cloud-infrastructure.html | 21 +- .../part-ii-getting-virtual-machines.html | 10 +- .../reference-for-flags-in-nova-conf.html | 25 +- .../removing-network-from-project.html | 14 +- .../admin/content/restart-nova-services.html | 12 +- ...arting-and-logging-into-cloudpipe-vpn.html | 10 +- .../admin/content/run-the-server.html | 14 +- .../content/running-a-blog-in-the-cloud.html | 10 +- .../running-elastic-web-app-cloud.html | 10 +- .../sample-nova-configuration-files.html | 16 +- .../content/scripted-ubuntu-installation.html | 33 +- .../admin/content/search/htmlFileInfoList.js | 117 +- .../admin/content/search/htmlFileList.js | 113 +- .../admin/content/search/index-1.js | 1514 ++++++++-------- .../admin/content/search/index-2.js | 1602 +++++++++-------- .../admin/content/search/index-3.js | 1557 ++++++++-------- .../admin/content/selecting-a-hypervisor.html | 17 +- .../admin/content/service-architecture.html | 10 +- .../setting-flags-in-nova-conf-file.html | 12 +- ...mpute-environment-on-the-compute-node.html | 16 +- .../setting-up-sql-database-mysql.html | 12 +- .../admin/content/starting-images.html | 10 +- .../storage-and-openstack-compute.html | 10 +- .../admin/content/system-architecture.html | 10 +- .../troubleshooting-openstack-compute.html | 10 +- ...ubleshooting-openstack-object-storage.html | 10 +- ...ding-the-compute-service-architecture.html | 14 +- .../admin/content/users-and-projects.html | 12 +- .../admin/content/vpn-access.html | 10 +- .../admin/content/what-is-openstack.html | 10 +- .../admin/content/why-cloud.html | 10 +- .../admin/os-compute-adminguide-trunk.pdf | Bin 1520551 -> 1694489 bytes .../admin/common/comments.js | 8 - .../admin/common/css/ie.css | 13 - .../common/css/positioning-openstack.css | 267 --- .../common/css/positioning-rackspace.css | 336 ---- .../admin/common/css/positioning.css | 267 --- .../openstack-identity/admin/common/ga.js | 18 - .../admin/common/images/admon/caution.png | Bin 2958 -> 0 bytes .../admin/common/images/admon/important.png | Bin 2960 -> 0 bytes .../admin/common/images/admon/note.png | Bin 2414 -> 0 bytes .../admin/common/images/admon/tip.png | Bin 3062 -> 0 bytes .../admin/common/images/admon/warning.png | Bin 1926 -> 0 bytes .../admin/common/images/book_open.png | Bin 1211 -> 0 bytes .../common/images/breadcrumb-arrow-white.png | Bin 246 -> 0 bytes .../admin/common/images/breadcrumb-arrow.png | Bin 172 -> 0 bytes .../admin/common/images/callouts/1.png | Bin 396 -> 0 bytes .../admin/common/images/callouts/10.png | Bin 524 -> 0 bytes .../admin/common/images/callouts/11.png | Bin 397 -> 0 bytes .../admin/common/images/callouts/12.png | Bin 517 -> 0 bytes .../admin/common/images/callouts/13.png | Bin 520 -> 0 bytes .../admin/common/images/callouts/14.png | Bin 474 -> 0 bytes .../admin/common/images/callouts/15.png | Bin 522 -> 0 bytes .../admin/common/images/callouts/16.png | Bin 539 -> 0 bytes .../admin/common/images/callouts/17.png | Bin 475 -> 0 bytes .../admin/common/images/callouts/18.png | Bin 542 -> 0 bytes .../admin/common/images/callouts/19.png | Bin 539 -> 0 bytes .../admin/common/images/callouts/2.png | Bin 475 -> 0 bytes .../admin/common/images/callouts/20.png | Bin 588 -> 0 bytes .../admin/common/images/callouts/21.png | Bin 517 -> 0 bytes .../admin/common/images/callouts/22.png | Bin 503 -> 0 bytes .../admin/common/images/callouts/23.png | Bin 589 -> 0 bytes .../admin/common/images/callouts/24.png | Bin 542 -> 0 bytes .../admin/common/images/callouts/25.png | Bin 564 -> 0 bytes .../admin/common/images/callouts/26.png | Bin 596 -> 0 bytes .../admin/common/images/callouts/27.png | Bin 547 -> 0 bytes .../admin/common/images/callouts/28.png | Bin 594 -> 0 bytes .../admin/common/images/callouts/29.png | Bin 594 -> 0 bytes .../admin/common/images/callouts/3.png | Bin 506 -> 0 bytes .../admin/common/images/callouts/30.png | Bin 597 -> 0 bytes .../admin/common/images/callouts/4.png | Bin 425 -> 0 bytes .../admin/common/images/callouts/5.png | Bin 490 -> 0 bytes .../admin/common/images/callouts/6.png | Bin 526 -> 0 bytes .../admin/common/images/callouts/7.png | Bin 437 -> 0 bytes .../admin/common/images/callouts/8.png | Bin 509 -> 0 bytes .../admin/common/images/callouts/9.png | Bin 537 -> 0 bytes .../admin/common/images/cc/by-nc-nd.png | Bin 7424 -> 0 bytes .../admin/common/images/cc/by-nc-sa.png | Bin 7636 -> 0 bytes .../admin/common/images/cc/by-nc.png | Bin 6520 -> 0 bytes .../admin/common/images/cc/by-nd.png | Bin 6203 -> 0 bytes .../admin/common/images/cc/by-sa.png | Bin 6519 -> 0 bytes .../admin/common/images/cc/by.png | Bin 5088 -> 0 bytes .../admin/common/images/favicon-openstack.ico | Bin 1150 -> 0 bytes .../admin/common/images/favicon-rackspace.ico | Bin 806 -> 0 bytes .../admin/common/images/header-bg.gif | Bin 619 -> 0 bytes .../admin/common/images/highlight-blue.gif | Bin 471 -> 0 bytes .../admin/common/images/highlight-yellow.gif | Bin 331 -> 0 bytes .../admin/common/images/highlighter.png | Bin 598 -> 0 bytes .../admin/common/images/loading.gif | Bin 729 -> 0 bytes .../admin/common/images/logo.png | Bin 35061 -> 0 bytes .../admin/common/images/main_bg_fade.png | Bin 28710 -> 0 bytes .../admin/common/images/next-arrow.png | Bin 199 -> 0 bytes .../admin/common/images/openstack-logo.png | Bin 3666 -> 0 bytes .../admin/common/images/page_white_text.png | Bin 406 -> 0 bytes .../admin/common/images/pdf.png | Bin 3271 -> 0 bytes .../admin/common/images/previous-arrow.png | Bin 198 -> 0 bytes .../admin/common/images/rackspace-logo.png | Bin 2846 -> 0 bytes .../admin/common/images/search-icon.png | Bin 278 -> 0 bytes .../admin/common/images/showHideTreeIcons.png | Bin 726 -> 0 bytes .../admin/common/images/sidebar.png | Bin 177 -> 0 bytes .../admin/common/images/toc-icon.png | Bin 169 -> 0 bytes .../admin/common/jquery/jquery-1.4.2.min.js | 154 -- .../jquery/jquery-ui-1.8.2.custom.min.js | 321 ---- .../admin/common/jquery/jquery.cookie.js | 93 - .../images/ui-anim_basic_16x16.gif | Bin 1553 -> 0 bytes .../images/ui-bg_flat_0_aaaaaa_40x100.png | Bin 180 -> 0 bytes .../images/ui-bg_flat_55_fbec88_40x100.png | Bin 182 -> 0 bytes .../images/ui-bg_glass_75_d0e5f5_1x400.png | Bin 162 -> 0 bytes .../images/ui-bg_glass_85_dfeffc_1x400.png | Bin 123 -> 0 bytes .../images/ui-bg_glass_95_fef1ec_1x400.png | Bin 119 -> 0 bytes .../ui-bg_gloss-wave_55_5c9ccc_500x100.png | Bin 3457 -> 0 bytes .../ui-bg_inset-hard_100_f5f8f9_1x100.png | Bin 104 -> 0 bytes .../ui-bg_inset-hard_100_fcfdfd_1x100.png | Bin 88 -> 0 bytes .../images/ui-icons_217bc0_256x240.png | Bin 4369 -> 0 bytes .../images/ui-icons_2e83ff_256x240.png | Bin 4369 -> 0 bytes .../images/ui-icons_469bdd_256x240.png | Bin 4369 -> 0 bytes .../images/ui-icons_6da8d5_256x240.png | Bin 5355 -> 0 bytes .../images/ui-icons_cd0a0a_256x240.png | Bin 4369 -> 0 bytes .../images/ui-icons_d8e7f3_256x240.png | Bin 4369 -> 0 bytes .../images/ui-icons_f9bd01_256x240.png | Bin 5355 -> 0 bytes .../theme-redmond/jquery-ui-1.8.2.custom.css | 430 ----- .../common/jquery/treeview/images/file.gif | Bin 1008 -> 0 bytes .../jquery/treeview/images/folder-closed.gif | Bin 631 -> 0 bytes .../jquery/treeview/images/folder-closed2.gif | Bin 105 -> 0 bytes .../common/jquery/treeview/images/folder.gif | Bin 631 -> 0 bytes .../common/jquery/treeview/images/folder2.gif | Bin 106 -> 0 bytes .../common/jquery/treeview/images/minus.gif | Bin 837 -> 0 bytes .../treeview/images/page_white_text.png | Bin 406 -> 0 bytes .../common/jquery/treeview/images/plus.gif | Bin 841 -> 0 bytes .../treeview/images/treeview-black-line.gif | Bin 1877 -> 0 bytes .../jquery/treeview/images/treeview-black.gif | Bin 1216 -> 0 bytes .../treeview/images/treeview-default-line.gif | Bin 1993 -> 0 bytes .../treeview/images/treeview-default.gif | Bin 1222 -> 0 bytes .../images/treeview-famfamfam-line.gif | Bin 807 -> 0 bytes .../treeview/images/treeview-famfamfam.gif | Bin 1280 -> 0 bytes .../treeview/images/treeview-gray-line.gif | Bin 1877 -> 0 bytes .../jquery/treeview/images/treeview-gray.gif | Bin 1230 -> 0 bytes .../treeview/images/treeview-red-line.gif | Bin 1877 -> 0 bytes .../jquery/treeview/images/treeview-red.gif | Bin 1230 -> 0 bytes .../jquery/treeview/jquery.treeview.async.js | 72 - .../jquery/treeview/jquery.treeview.css | 90 - .../common/jquery/treeview/jquery.treeview.js | 255 --- .../jquery/treeview/jquery.treeview.min.js | 16 - .../jquery/treeview/jquery.treeview.pack.js | 16 - .../admin/common/main-openstack.js | 198 -- .../admin/common/main-rackspace.js | 200 -- .../openstack-identity/admin/common/main.js | 198 -- .../Identity-Service-Concepts-e1362.html | 87 - .../creating-your-first-global-admin.html | 58 - .../admin/content/curl-examples.html | 46 - .../admin/content/debianubuntu-1.html | 71 - .../admin/content/debianubuntu.html | 56 - .../admin/content/dependencies.html | 45 - .../admin/content/index.html | 54 - .../installing-from-a-github-branch.html | 46 - .../content/installing-from-packages.html | 46 - .../installing-from-source-tarballs.html | 62 - .../admin/content/installing-keystone.html | 41 - ...uide-to-getting-started-with-keystone.html | 43 - .../admin/content/search/default.props | 1 - .../admin/content/search/en-us.props | 45 - .../admin/content/search/es-es.props | 179 -- .../admin/content/search/htmlFileInfoList.js | 12 - .../admin/content/search/htmlFileList.js | 13 - .../admin/content/search/index-1.js | 120 -- .../admin/content/search/index-2.js | 119 -- .../admin/content/search/index-3.js | 115 -- .../admin/content/search/ja-jp.props | 1 - .../admin/content/search/nwSearchFnt.js | 517 ------ .../admin/content/search/punctuation.props | 31 - .../content/search/stemmers/de_stemmer.js | 247 --- .../content/search/stemmers/en_stemmer.js | 187 -- .../content/search/stemmers/fr_stemmer.js | 299 --- .../openstack-identity/admin/favicon.ico | Bin 1150 -> 0 bytes .../trunk/openstack-identity/admin/index.html | 14 - .../admin/os-identity-starter-guide-trunk.pdf | Bin 86257 -> 0 bytes .../content/adding-a-member-to-an-image.html | 4 +- .../adding-a-new-virtual-machine-image.html | 4 +- .../admin/content/api-in-summary.html | 4 +- ...ommon-configuration-options-in-glance.html | 4 +- .../configuring-glance-storage-backends.html | 4 +- .../admin/content/configuring-glance.html | 4 +- .../configuring-logging-in-glance.html | 4 +- .../content/configuring-notifications.html | 4 +- ...guring-the-filesystem-storage-backend.html | 4 +- .../configuring-the-glance-registry.html | 4 +- .../configuring-the-s3-storage-backend.html | 4 +- ...configuring-the-swift-storage-backend.html | 4 +- .../admin/content/container-format.html | 4 +- .../content/controlling-glance-servers.html | 4 +- .../admin/content/debianubuntu-1.html | 10 +- .../admin/content/debianubuntu.html | 8 +- .../content/disk-and-container-formats.html | 4 +- .../admin/content/disk-format.html | 4 +- .../admin/content/examples.html | 4 +- ...d-via-get-images-and-get-imagesdetail.html | 4 +- ...ia-get_images-and-get_images_detailed.html | 4 +- .../admin/content/glance-api-server.html | 4 +- .../glance-authentication-with-keystone.html | 4 +- .../admin/content/glance-registry-api.html | 4 +- .../content/glance-registry-servers.html | 4 +- .../admin/content/image-identifiers.html | 4 +- .../admin/content/image-registries.html | 4 +- .../admin/content/image-statuses.html | 4 +- ...nt-information-about-uploading-images.html | 4 +- .../admin/content/index.html | 10 +- .../installing-from-a-bazaar-branch.html | 4 +- .../content/installing-from-packages.html | 4 +- .../installing-from-source-tarballs.html | 12 +- .../admin/content/installing-glance.html | 4 +- ...available-only-in-configuration-files.html | 4 +- .../content/manually-starting-the-server.html | 4 +- .../overview-of-glance-architecture.html | 4 +- .../admin/content/post-images.html | 4 +- ...-guide-to-getting-started-with-glance.html | 6 +- ...ual-machine-image-in-another-location.html | 4 +- .../removing-a-member-from-an-image.html | 4 +- ...lacing-a-membership-list-for-an-image.html | 4 +- ...requesting-a-list-of-public-vm-images.html | 4 +- ...detailed-metadata-on-a-specific-image.html | 4 +- ...detailed-metadata-on-public-vm-images.html | 4 +- .../content/requesting-image-memberships.html | 4 +- .../content/requesting-member-images.html | 4 +- .../admin/content/restarting-a-server.html | 4 +- .../retrieving-a-virtual-machine-image.html | 4 +- .../content/sharing-images-with-others.html | 4 +- ...ia-get_images-and-get_images_detailed.html | 4 +- .../admin/content/starting-a-server.html | 4 +- .../admin/content/stopping-a-server.html | 4 +- ...rtual-machine-image-data-and-metadata.html | 4 +- .../admin/content/the-add-command.html | 4 +- .../admin/content/the-clear-command.html | 4 +- .../admin/content/the-delete-command.html | 4 +- .../admin/content/the-details-command.html | 4 +- .../admin/content/the-help-command.html | 4 +- .../content/the-image-members-command.html | 4 +- .../admin/content/the-index-command.html | 4 +- .../admin/content/the-member-add-command.html | 4 +- .../content/the-member-delete-command.html | 4 +- .../content/the-member-images-command.html | 4 +- .../content/the-members-replace-command.html | 4 +- .../admin/content/the-show-command.html | 4 +- .../admin/content/the-update-command.html | 4 +- ...-programmatically-with-glances-client.html | 4 +- .../content/using-the-glance-cli-tool.html | 4 +- ...e-control-program-to-start-the-server.html | 4 +- .../admin/os-image-adminguide-trunk.pdf | Bin 227420 -> 227404 bytes .../about-the-ring-data-structure.html | 8 +- ...account-reaper-background-and-history.html | 8 +- .../content/account-server-configuration.html | 18 +- .../content/accounts-and-account-servers.html | 8 +- .../admin/content/adding-a-proxy-server.html | 8 +- .../additional-notes-on-large-objects.html | 8 +- .../content/analyzing-log-files-with-st.html | 162 -- ...authentication-and-access-permissions.html | 10 +- .../admin/content/before-you-begin.html | 53 - .../admin/content/building-the-ring.html | 8 +- .../ch_getting-started-with-openstack.html | 8 +- ...-configuring-openstack-object-storage.html | 8 +- ...roduction-to-openstack-object-storage.html | 8 +- ...ch_openstack-object-storage-tutorials.html | 12 +- .../ch_running-openstack-object-storage.html | 8 +- .../ch_support-and-troubleshooting.html | 8 +- ...loud-provider-conceptual-architecture.html | 10 +- ...rrors-and-fixes-for-openstack-compute.html | 8 +- .../admin/content/community-support.html | 16 +- .../content/components-of-openstack.html | 8 +- .../configuration-for-rate-limiting.html | 12 +- ...g-and-tuning-openstack-object-storage.html | 8 +- ...-openstack-object-storage-with-s3_api.html | 10 +- .../configuring-openstack-object-storage.html | 8 +- .../content/considerations-and-tuning.html | 8 +- .../container-server-configuration.html | 18 +- .../admin/content/containers-and-objects.html | 8 +- .../admin/content/database-replication.html | 8 +- .../content/detecting-failed-drives.html | 8 +- ...irect-api-management-of-large-objects.html | 12 +- ...ample-installation-architecture-swift.html | 12 +- .../content/filesystem-considerations.html | 8 +- .../general-installation-steps-swift.html | 8 +- .../admin/content/general-service-tuning.html | 8 +- .../admin/content/general-system-tuning.html | 8 +- .../admin/content/handling-drive-failure.html | 8 +- .../content/handling-server-failure.html | 8 +- .../content/history-of-the-ring-design.html | 8 +- .../admin/content/index.html | 14 +- .../installing-and-configuring-auth-node.html | 8 +- ...talling-and-configuring-storage-nodes.html | 8 +- ...alling-and-configuring-the-proxy-node.html | 8 +- ...ng-openstack-object-storage-on-ubuntu.html | 10 +- .../language-specific-api-bindings.html | 8 +- ...object-storage-history-and-background.html | 8 +- .../content/list-of-devices-in-the-ring.html | 10 +- .../log-files-for-openstack-compute.html | 8 +- .../admin/content/logging-considerations.html | 8 +- .../admin/content/managing-large-objects.html | 10 +- ...ging-openstack-object-storage-with-st.html | 50 - .../managing-rings-with-the-ring-builder.html | 8 +- .../content/memcached-considerations.html | 8 +- .../admin/content/network-setup-notes.html | 8 +- .../content/nova-conceptual-mapping.html | 11 +- .../admin/content/object-replication.html | 8 +- .../content/object-server-configuration.html | 18 +- .../object-storage-system-requirements.html | 8 +- .../openstack-architecture-overview.html | 8 +- .../openstack-nova-logical-architecture.html | 14 +- .../admin/content/operations.html | 8 +- .../part-i-setting-up-secure-access.html | 8 +- .../part-ii-configuring-cyberduck.html | 10 +- .../admin/content/part-iii-copying-files.html | 10 +- .../content/partition-assignment-list.html | 8 +- .../admin/content/partition-shift-value.html | 8 +- .../admin/content/preparing-the-ring.html | 8 +- .../content/proxy-server-configuration.html | 14 +- .../admin/content/replication.html | 8 +- .../admin/content/search/htmlFileInfoList.js | 12 +- .../admin/content/search/htmlFileList.js | 12 +- .../admin/content/search/index-1.js | 153 +- .../admin/content/search/index-2.js | 127 +- .../admin/content/search/index-3.js | 117 +- .../server-configuration-reference.html | 8 +- .../admin/content/st-basics.html | 63 - ...g-large-photos-or-videos-on-the-cloud.html | 8 +- .../admin/content/system-time.html | 8 +- .../admin/content/the-account-reaper.html | 8 +- ...ling-resources-by-setting-rate-limits.html | 8 +- .../admin/content/troubleshooting-notes.html | 8 +- .../troubleshooting-openstack-compute.html | 8 +- ...ubleshooting-openstack-object-storage.html | 8 +- ...nderstanding-how-object-storage-works.html | 18 +- .../using-st-to-manage-segmented-objects.html | 58 - .../content/verify-swift-installation.html | 8 +- .../admin/content/what-is-openstack.html | 8 +- .../admin/content/why-cloud.html | 8 +- .../admin/content/working-with-rings.html | 8 +- .../os-objectstorage-adminguide-trunk.pdf | Bin 838545 -> 845596 bytes 423 files changed, 5186 insertions(+), 11108 deletions(-) delete mode 100644 doc/target/docbkx/pdf/openstack-identity-service-starter/os-identity-starter-guide-trunk.pdf delete mode 100644 doc/target/docbkx/pdf/openstack-identity-service-starter/os-identity-starter-guide.fo delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/migrating-from-bexar-to-cactus.html delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/comments.js delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/css/ie.css delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/css/positioning-openstack.css delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/css/positioning-rackspace.css delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/css/positioning.css delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/ga.js delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/admon/caution.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/admon/important.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/admon/note.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/admon/tip.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/admon/warning.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/book_open.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/breadcrumb-arrow-white.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/breadcrumb-arrow.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/1.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/10.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/11.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/12.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/13.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/14.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/15.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/16.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/17.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/18.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/19.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/2.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/20.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/21.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/22.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/23.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/24.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/25.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/26.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/27.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/28.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/29.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/3.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/30.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/4.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/5.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/6.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/7.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/8.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/callouts/9.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/cc/by-nc-nd.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/cc/by-nc-sa.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/cc/by-nc.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/cc/by-nd.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/cc/by-sa.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/cc/by.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/favicon-openstack.ico delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/favicon-rackspace.ico delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/header-bg.gif delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/highlight-blue.gif delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/highlight-yellow.gif delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/highlighter.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/loading.gif delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/logo.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/main_bg_fade.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/next-arrow.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/openstack-logo.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/page_white_text.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/pdf.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/previous-arrow.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/rackspace-logo.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/search-icon.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/showHideTreeIcons.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/sidebar.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/images/toc-icon.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/jquery-1.4.2.min.js delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/jquery-ui-1.8.2.custom.min.js delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/jquery.cookie.js delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/theme-redmond/images/ui-anim_basic_16x16.gif delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/theme-redmond/images/ui-bg_flat_0_aaaaaa_40x100.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/theme-redmond/images/ui-bg_flat_55_fbec88_40x100.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/theme-redmond/images/ui-bg_glass_75_d0e5f5_1x400.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/theme-redmond/images/ui-bg_glass_85_dfeffc_1x400.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/theme-redmond/images/ui-bg_glass_95_fef1ec_1x400.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/theme-redmond/images/ui-bg_gloss-wave_55_5c9ccc_500x100.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/theme-redmond/images/ui-bg_inset-hard_100_f5f8f9_1x100.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/theme-redmond/images/ui-bg_inset-hard_100_fcfdfd_1x100.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/theme-redmond/images/ui-icons_217bc0_256x240.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/theme-redmond/images/ui-icons_2e83ff_256x240.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/theme-redmond/images/ui-icons_469bdd_256x240.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/theme-redmond/images/ui-icons_6da8d5_256x240.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/theme-redmond/images/ui-icons_cd0a0a_256x240.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/theme-redmond/images/ui-icons_d8e7f3_256x240.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/theme-redmond/images/ui-icons_f9bd01_256x240.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/theme-redmond/jquery-ui-1.8.2.custom.css delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/treeview/images/file.gif delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/treeview/images/folder-closed.gif delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/treeview/images/folder-closed2.gif delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/treeview/images/folder.gif delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/treeview/images/folder2.gif delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/treeview/images/minus.gif delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/treeview/images/page_white_text.png delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/treeview/images/plus.gif delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/treeview/images/treeview-black-line.gif delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/treeview/images/treeview-black.gif delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/treeview/images/treeview-default-line.gif delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/treeview/images/treeview-default.gif delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/treeview/images/treeview-famfamfam-line.gif delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/treeview/images/treeview-famfamfam.gif delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/treeview/images/treeview-gray-line.gif delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/treeview/images/treeview-gray.gif delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/treeview/images/treeview-red-line.gif delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/treeview/images/treeview-red.gif delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/treeview/jquery.treeview.async.js delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/treeview/jquery.treeview.css delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/treeview/jquery.treeview.js delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/treeview/jquery.treeview.min.js delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/jquery/treeview/jquery.treeview.pack.js delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/main-openstack.js delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/main-rackspace.js delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/common/main.js delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/Identity-Service-Concepts-e1362.html delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/creating-your-first-global-admin.html delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/curl-examples.html delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/debianubuntu-1.html delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/debianubuntu.html delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/dependencies.html delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/index.html delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/installing-from-a-github-branch.html delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/installing-from-packages.html delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/installing-from-source-tarballs.html delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/installing-keystone.html delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/quick-guide-to-getting-started-with-keystone.html delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/search/default.props delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/search/en-us.props delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/search/es-es.props delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/search/htmlFileInfoList.js delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/search/htmlFileList.js delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/search/index-1.js delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/search/index-2.js delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/search/index-3.js delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/search/ja-jp.props delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/search/nwSearchFnt.js delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/search/punctuation.props delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/search/stemmers/de_stemmer.js delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/search/stemmers/en_stemmer.js delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/content/search/stemmers/fr_stemmer.js delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/favicon.ico delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/index.html delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-identity/admin/os-identity-starter-guide-trunk.pdf delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-object-storage/admin/content/analyzing-log-files-with-st.html delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-object-storage/admin/content/before-you-begin.html delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-object-storage/admin/content/managing-openstack-object-storage-with-st.html delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-object-storage/admin/content/st-basics.html delete mode 100644 doc/target/docbkx/webhelp/trunk/openstack-object-storage/admin/content/using-st-to-manage-segmented-objects.html diff --git a/doc/src/docbkx/openstack-compute-admin/computeinstall.xml b/doc/src/docbkx/openstack-compute-admin/computeinstall.xml index f17b8659a4..c9c52a6585 100644 --- a/doc/src/docbkx/openstack-compute-admin/computeinstall.xml +++ b/doc/src/docbkx/openstack-compute-admin/computeinstall.xml @@ -170,63 +170,32 @@ http://getsatisfaction.com/stackops.
Scripted Installation - You can download a script from GitHub at https://github.com/elasticdog/OpenStack-NOVA-Installer-Script/raw/master/nova-install. - Copy the file to the servers where you want to install OpenStack Compute services - - with multiple servers, you could install a cloud controller node and multiple - compute nodes. The compute nodes manage the virtual machines through the - nova-compute service. The cloud controller node contains all other nova- - services. - Ensure you can execute the script by modifying the permissions on the script - file. - wget --no-check-certificate https://github.com/elasticdog/OpenStack-NOVA-Installer-Script/raw/master/nova-install -sudo chmod 755 nova-installYou - must run the script with root permissions. - sudo bash nova-install -t cloud - The way this script is designed, you can have multiple servers for the cloud - controller, the messaging service, and the database server, or run it all on one - server. The -t or -type parameter has two options: nova-install -t - cloud installs the cloud controller and nova-install -t - compute installs a compute node for an existing cloud controller. - These are the parameters you enter using the script: - - - - Enter the Cloud Controller Host IP address. - - - Enter the S3 IP, or use the default address as the current server's IP - address. - - - Enter the RabbitMQ Host IP. Again, you can use the default to install - it to the local server. RabbitMQ will be installed. - - - Enter the MySQL host IP address. - - - Enter the MySQL root password and verify it. - - - Enter a network range for all projects in CIDR format. - - - - The script uses all these values entered for the configuration information to - create the nova.conf configuration file. The script also walks you through creating - a user and project. Enter a user name and project name when prompted. After the script is finished, you also need to create the project zip file. Credentials are generated after you create the project zip file with nova-manage project zipfile projname username - After configuring OpenStack Compute and creating a project zip file using the nova-manage project create command, be sure to unizp the project zip file and then source the novarc - credential file that you extracted. - source /root/creds/novarc - Now all the necessary nova services are started up and you can begin to issue - nova-manage commands. If you configured it to all run from one server, you're done. - If you have a second server that you intend to use as a compute node (a node that - does not contain the database), install the nova services on the second node using - the -t compute parameters using the same nova-install script. - To run from two or more servers, copy the nova.conf from the cloud controller node to the compute node. + You can download a script for a standalone install for proof-of-concept, learning, or for development purposes for Ubuntu 11.04 at https://devstack.org. + + + Install Ubuntu 11.04 (Natty): In order to correctly install all the dependencies, we assume a specific version of Ubuntu to + make it as easy as possible. OpenStack works on other flavors of Linux (and + some folks even run it on Windows!) We recommend using a minimal install of + Ubuntu server in a VM if this is your first time. + + + + Download DevStack: + git clone git://github.com/cloudbuilders/devstack.git + The devstack repo contains a script that installs OpenStack Compute, the Image + Service and the Identity Service and offers templates for configuration + files plus data scripts. + + + + Start the install:cd devstack; ./stack.shIt takes a few minutes, we recommend reading the well-documented script while it is building to learn + more about what is going on. + +
Manual Installation @@ -365,43 +334,8 @@ sudo service mysql restart Your installation can run any nova- services anywhere, so long as the service can access nova.conf so it knows where the rabbitmq server is installed. The Compute Node is where you configure the Compute network, the networking - between your instances. There are three options: flat, flatDHCP, and - VLAN. - If you use FlatManager as your network manager, there are some additional - networking changes to ensure connectivity between your nodes and VMs. If you - chose VlanManager or FlatDHCP, you may skip this section because they are set up - for you automatically. - Compute defaults to a bridge device named ‘br100’. This needs to be created - and somehow integrated into your network. To keep things as simple as possible, - have all the VM guests on the same network as the VM hosts (the compute nodes). - To do so, set the compute node’s external IP address to be on the bridge and add - eth0 to that bridge. To do this, edit your network interfaces configuration to - look like the following example: - - -< begin /etc/network/interfaces > -# The loopback network interface -auto lo -iface lo inet loopback - -# Networking for OpenStack Compute -auto br100 - -iface br100 inet dhcp -bridge_ports eth0 -bridge_stp off -bridge_maxwait 0 -bridge_fd 0 -< end /etc/network/interfaces > - - - Next, restart networking to apply the changes: - sudo /etc/init.d/networking restart - If you use flat networking, you must manually insert the IP address into the - 'fixed_ips' table in the nova database. Also ensure that the database lists the - bridge name correctly that matches the network configuration you are working - within. Flat networking should insert this automatically but you may need to - check it. + between your instances. There are three options: flat, flatDHCP, and VLAN. Read + more about specific configurations in the Networking chapter. Because you may need to query the database from the Compute node and learn more information about instances, euca2ools and mysql-client packages should be installed on any additional Compute nodes. @@ -414,19 +348,37 @@ bridge_fd 0 restart libvirt-bin; restart nova-network; restart nova-compute; restart nova-api; restart nova-objectstore; restart nova-scheduler + All nova services are now installed, the rest of your steps involve specific configuration steps. Please refer to Configuring Compute for additional information.
Installing OpenStack Compute on Red Hat Enterprise Linux 6 This section documents a multi-node installation using RHEL 6. RPM repos for the Bexar - release, the Cactus release, and also per-commit trunk builds for OpenStack Nova are - available at http://yum.griddynamics.net. + release, the Cactus release, milestone releases of Diablo, and also per-commit trunk + builds for OpenStack Nova are available at http://yum.griddynamics.net. The + final release of Diablo is available at http://yum.griddynamics.net/yum/diablo/, but is not yet tested completely + (as of Oct 4, 2011). Check this page for updates: http://wiki.openstack.org/NovaInstall/RHEL6Notes. - Known limitations for RHEL version 6 installations: + Known considerations for RHEL version 6 installations: -iSCSI LUN not supported due to tgtadm vs ietadm differences + + iSCSI LUN not supported due to tgtadm versus ietadm differences + + + GuestFS is used for files injection + + + Files injection works with libvirt + + + Static network configuration can detect OS type for RHEL and Ubuntu + Only KVM hypervisor has been tested with this installation To install Nova on RHEL v.6 you need access to two repositories, one available on the yum.griddynamics.net website and the RHEL DVD image connected as repo. @@ -443,17 +395,17 @@ baseurl=file:///mnt/cdrom/Server enabled=1 gpgcheck=0 Download and install repo config and key. - wget http://yum.griddynamics.net/openstack-repo-2011.1-2.noarch.rpm -sudo rpm -i openstack-repo-2011.1-2.noarch.rpm + wget http://yum.griddynamics.net/yum/diablo/openstack-repo-2011.3-0.3.noarch.rpm +sudo rpm -i openstack-repo-2011.1-3.noarch.rpm Install the libvirt package (these instructions are tested only on KVM). sudo yum install libvirt sudo chkconfig libvirtd on sudo service libvirtd start Repeat the basic installation steps to put the pre-requisites on all cloud controller and compute nodes. Nova has many different possible configurations. You can install Nova services on separate servers as needed but these are the basic pre-reqs. These are the basic packages to install for a cloud controller node: - sudo yum install euca2ools openstack-nova-{api,compute,network,objectstore,scheduler,volume} openstack-nova-cc-config openstack-glance + sudo yum install euca2ools openstack-nova-node-full These are the basic packages to install compute nodes. Repeat for each compute node (the node that runs the VMs) that you want to install. - sudo yum install openstack-nova-compute openstack-nova-compute-config + sudo yum install openstack-nova-compute On the cloud controller node, create a MySQL database named nova. sudo service mysqld start sudo chkconfig mysqld on @@ -481,13 +433,29 @@ echo "GRANT ALL PRIVILEGES ON $DB_NAME.* TO $DB_USER IDENTIFIED BY '$DB_PASS';" echo "GRANT ALL PRIVILEGES ON $DB_NAME.* TO root IDENTIFIED BY '$DB_PASS';" | mysql -uroot -p$DB_PASS mysql Now, ensure the database version matches the version of nova that you are installing: nova-manage db sync - - On each node, set up the configuration file in /etc/nova/nova.conf. + For iptables configuration, update your firewall configuration to allow incoming + requests on ports 5672 (RabbitMQ), 3306 (MySQL DB), 9292 (Glance), 6080 (noVNC web + console), API (8773, 8774) and DHCP traffic from instances. For non-production + environments the easiest way to fix any firewall problems is removing final REJECT in + INPUT chain of filter table. + $ sudo iptables -I INPUT 1 -p tcp --dport 5672 -j ACCEPT + $ sudo iptables -I INPUT 1 -p tcp --dport 3306 -j ACCEPT + $ sudo iptables -I INPUT 1 -p tcp --dport 9292 -j ACCEPT + $ sudo iptables -I INPUT 1 -p tcp --dport 6080 -j ACCEPT + $ sudo iptables -I INPUT 1 -p tcp --dport 8773 -j ACCEPT + $ sudo iptables -I INPUT 1 -p tcp --dport 8774 -j ACCEPT + $ sudo iptables -I INPUT 1 -p udp --dport 67 -j ACCEPT + + On every node when you have nova-compute running ensure that unencrypted VNC access is allowed only from Cloud Controller node: + + $ sudo iptables -I INPUT 1 -p tcp -s <CLOUD_CONTROLLER_IP_ADDRESS> --dport 5900:6400 -j ACCEPT + On each node, set up the configuration file in /etc/nova/nova.conf. Start the Nova services after configuring and you then are running an OpenStack cloud! - for n in api compute network objectstore scheduler volume; do sudo service openstack-nova-$n start; done -sudo service openstack-glance start -for n in node1 node2 node3; do ssh $n sudo service openstack-nova-compute start; done + $ for n in api compute network objectstore scheduler vncproxy; do sudo service openstack-nova-$n start; done +$ sudo service openstack-glance-api start +$ sudo service openstack-glance-registry start +$ for n in node1 node2 node3; do ssh $n sudo service openstack-nova-compute start; done
Post-Installation Configuration for OpenStack Compute @@ -578,6 +546,10 @@ for n in node1 node2 node3; do ssh $n sudo service openstack-nova-compute start; --network_size Number value; Number of addresses in each private subnet. + + --glance_api_servers + IP and port; Address for Image Service. + @@ -595,6 +567,7 @@ for n in node1 node2 node3; do ssh $n sudo service openstack-nova-compute start; --ec2_url=http://184.106.239.134:8773/services/Cloud --fixed_range=192.168.0.0/16 --network_size=8 +--glance_api_servers=184.106.239.134:9292 --routing_source_ip=184.106.239.134 --sql_connection=mysql://nova:notnova@184.106.239.134/nova Create a “nova” group, so you can set permissions on the configuration file: @@ -606,17 +579,18 @@ chmod 640 /etc/nova/nova.conf
Setting Up OpenStack Compute Environment on the Compute Node These are the commands you run to ensure the database schema is current, and - then set up a user and project: + then set up a user and project, if you are using built-in auth with the + --use_deprecated_auth flag rather than the Identity Service: -/usr/bin/nova-manage db sync -/usr/bin/nova-manage user admin <user_name> -/usr/bin/nova-manage project create <project_name> <user_name> -/usr/bin/nova-manage network create <network-label> <project-network> <number-of-networks-in-project> <addresses-in-each-network> +nova-manage db sync +nova-manage user admin <user_name> +nova-manage project create <project_name> <user_name> +nova-manage network create <network-label> <project-network> <number-of-networks-in-project> <addresses-in-each-network> Here is an example of what this looks like with real values entered: - /usr/bin/nova-manage db sync -/usr/bin/nova-manage user admin dub -/usr/bin/nova-manage project create dubproject dub -/usr/bin/nova-manage network create novanet 192.168.0.0/24 1 256 + nova-manage db sync +nova-manage user admin dub +nova-manage project create dubproject dub +ova-manage network create novanet 192.168.0.0/24 1 256 For this example, the number of IPs is /24 since that falls inside the /16 range that was set in ‘fixed-range’ in nova.conf. Currently, there can only be one network, and this set up would use the max IPs available in a /24. You can @@ -772,8 +746,9 @@ chmod g+rwx /dev/kvm apt-get upgrade Next, update the database schema. nova-manage db sync Restart all the nova- services. - A separate command is available to migrate users from the deprecated auth system to the Identity Service. - nova-manage shell export + A separate command is available to migrate users from the deprecated auth system + to the Identity Service. + nova-manage shell export textfilename.txt Within the Keystone project there is a keystone-import script that you can run to import these users. Make sure that you can launch images. You can convert images that were previously stored in the nova object store using this command: diff --git a/doc/target/docbkx/images/cloud/cover.svg b/doc/target/docbkx/images/cloud/cover.svg index 79dac10e11..37bb79e88c 100644 --- a/doc/target/docbkx/images/cloud/cover.svg +++ b/doc/target/docbkx/images/cloud/cover.svg @@ -1560,7 +1560,7 @@ OpenStack Compute - trunk (Aug 19, 2011) + Diablo (Sep 22, 2011) diff --git a/doc/target/docbkx/pdf/openstack-compute-admin/os-compute-adminguide.fo b/doc/target/docbkx/pdf/openstack-compute-admin/os-compute-adminguide.fo index 716c21f599..70c3efcd57 100644 --- a/doc/target/docbkx/pdf/openstack-compute-admin/os-compute-adminguide.fo +++ b/doc/target/docbkx/pdf/openstack-compute-admin/os-compute-adminguide.fo @@ -1,6 +1,6 @@ -TrueCopyright © 2010, 2011 OpenStack LLC Some rights reserved.OpenStack Compute Administration ManualCloud API Docs PluginOpenStack Compute Administration ManualTable of Contents1. Getting Started with OpenStackWhat is OpenStack?Components of OpenStackOpenStack Project Architecture OverviewCloud Provider Conceptual ArchitectureOpenStack Compute Logical ArchitectureNova Conceptual MappingWhy Cloud?2. Introduction to OpenStack ComputeHypervisorsUsers and ProjectsImages and InstancesSystem ArchitectureStorage and OpenStack Compute3. Installing OpenStack ComputeSystem RequirementsExample Installation ArchitecturesService ArchitectureInstalling OpenStack Compute on UbuntuISO Distribution InstallationScripted InstallationManual InstallationInstalling the Cloud ControllerSetting up the SQL Database (MySQL) on the Cloud ControllerInstalling the Compute NodeRestart All Relevant Services on the Compute NodeInstalling OpenStack Compute on Red Hat Enterprise Linux 6Post-Installation Configuration for OpenStack ComputeSetting Flags in the nova.conf FileSetting Up OpenStack Compute Environment on the Compute NodeCreating CertificationsEnabling Access to VMs on the Compute NodeConfiguring Multiple Compute NodesDetermining the Version of ComputeMigrating from Bexar to Cactus4. Configuring OpenStack ComputeGeneral Compute Configuration OverviewExample nova.conf Configuration FilesConfiguring LoggingConfiguring HypervisorsConfiguring Compute to use IPv6 AddressesConfiguring Image Service and Storage for ComputeConfiguring Live MigrationsConfiguring Database ConnectionsConfiguring the Compute Messaging SystemConfiguring Authentication and Authorization5. HypervisorsSelecting a HypervisorHypervisor Configuration Basics6. OpenStack Compute Automated InstallationsDeployment Tool for OpenStack using PuppetOpenStack Compute Installation Using VirtualBox, Vagrant, And Chef7. NetworkingNetworking OptionsCloudpipe — Per Project VpnsCreating a Cloudpipe ImageVPN AccessCertificates and RevocationRestarting and Logging into the Cloudpipe VPNConfiguring Networking on the Compute NodeConfiguring Flat NetworkingConfiguring Flat DHCP NetworkingOutbound Traffic Flow with Any Flat NetworkingConfiguring VLAN NetworkingEnabling Ping and SSH on VMsAllocating and Associating IP Addresses with InstancesAssociating a Public IP AddressRemoving a Network from a Project8. System AdministrationStarting ImagesDeleting InstancesImage managementCreating a Linux Image – Ubuntu & FedoraCreating a Windows ImageUnderstanding the Compute Service ArchitectureManaging the CloudManaging Compute UsersManaging VolumesUsing Live MigrationReference for Flags in nova.conf9. OpenStack InterfacesAbout the DashboardSystem Requirements for the DashboardInstalling the OpenStack DashboardBuild and Configure Openstack-DashboardRun the ServerGetting Started with the VNC ProxyConfiguring the VNC ProxyEnabling VNC Consoles in NovaGetting an Instance's VNC Console10. OpenStack Compute TutorialsRunning Your First Elastic Web Application on the CloudPart I: Setting Up the Cloud InfrastructurePart II: Getting Virtual Machines to Run the Virtual ServersPart III: Installing the Needed Software for the Web-Scale ScenarioRunning a Blog in the Cloud11. Support and TroubleshootingCommunity SupportTroubleshooting OpenStack Object StorageHandling Drive FailureHandling Server FailureDetecting Failed DrivesTroubleshooting OpenStack ComputeLog files for OpenStack ComputeCommon Errors and Fixes for OpenStack Compute OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration Manual trunk (2011-08-19)Copyright © 2010, 2011 OpenStack LLC Some rights reserved.OpenStack™ Compute offers open source software for cloud administration and +TrueCopyright © 2010, 2011 OpenStack LLC Some rights reserved.OpenStack Compute Administration ManualCloud API Docs PluginOpenStack Compute Administration ManualTable of Contents1. Getting Started with OpenStackWhat is OpenStack?Components of OpenStackOpenStack Project Architecture OverviewCloud Provider Conceptual ArchitectureOpenStack Compute Logical ArchitectureNova Conceptual MappingWhy Cloud?2. Introduction to OpenStack ComputeHypervisorsUsers and ProjectsImages and InstancesSystem ArchitectureStorage and OpenStack Compute3. Installing OpenStack ComputeSystem RequirementsExample Installation ArchitecturesService ArchitectureInstalling OpenStack Compute on UbuntuISO Distribution InstallationScripted InstallationManual InstallationInstalling the Cloud ControllerSetting up the SQL Database (MySQL) on the Cloud ControllerSetting Up PostgreSQL as the DatabaseInstalling the Compute NodeRestart All Relevant Services on the Compute NodeInstalling OpenStack Compute on Red Hat Enterprise Linux 6Post-Installation Configuration for OpenStack ComputeSetting Flags in the nova.conf FileSetting Up OpenStack Compute Environment on the Compute NodeCreating CredentialsEnabling Access to VMs on the Compute NodeConfiguring Multiple Compute NodesDetermining the Version of ComputeMigrating from Cactus to Diablo4. Configuring OpenStack ComputeGeneral Compute Configuration OverviewExample nova.conf Configuration FilesConfiguring LoggingConfiguring HypervisorsConfiguring Authentication and AuthorizationConfiguring Compute to use IPv6 AddressesConfiguring Image Service and Storage for ComputeConfiguring Live MigrationsConfiguring Database ConnectionsConfiguring the Compute Messaging System5. HypervisorsSelecting a HypervisorHypervisor Configuration Basics6. OpenStack Compute Automated InstallationsDeployment Tool for OpenStack using PuppetOpenStack Compute Installation Using VirtualBox, Vagrant, And Chef7. NetworkingNetworking OptionsCloudpipe — Per Project VpnsCreating a Cloudpipe ImageVPN AccessCertificates and RevocationRestarting and Logging into the Cloudpipe VPNConfiguring Networking on the Compute NodeConfiguring Flat NetworkingConfiguring Flat DHCP NetworkingOutbound Traffic Flow with Any Flat NetworkingConfiguring VLAN NetworkingEnabling Ping and SSH on VMsAllocating and Associating IP Addresses with InstancesAssociating a Public IP AddressRemoving a Network from a ProjectExisting High Availability Options for Networking8. System AdministrationStarting ImagesDeleting InstancesImage managementCreating a Linux Image – Ubuntu & FedoraCreating a Windows ImageUnderstanding the Compute Service ArchitectureManaging the CloudManaging Compute UsersManaging VolumesUsing Live MigrationReference for Flags in nova.conf9. OpenStack InterfacesAbout the DashboardSystem Requirements for the DashboardInstalling the OpenStack DashboardConfigure Openstack-DashboardInstall the DashboardRun the ServerGetting Started with the VNC ProxyConfiguring the VNC ProxyEnabling VNC Consoles in NovaGetting an Instance's VNC Console10. OpenStack Compute TutorialsRunning Your First Elastic Web Application on the CloudPart I: Setting Up the Cloud InfrastructurePart II: Getting Virtual Machines to Run the Virtual ServersPart III: Installing the Needed Software for the Web-Scale ScenarioRunning a Blog in the Cloud11. Support and TroubleshootingCommunity SupportTroubleshooting OpenStack Object StorageHandling Drive FailureHandling Server FailureDetecting Failed DrivesTroubleshooting OpenStack ComputeLog files for OpenStack ComputeCommon Errors and Fixes for OpenStack Compute OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration Manual Diablo (2011-09-22)Copyright © 2010, 2011 OpenStack LLC Some rights reserved.OpenStack™ Compute offers open source software for cloud administration and management for any organization. This manual provides guidance for installing, - managing, and understanding the software that runs OpenStack Compute. + managing, and understanding the software that runs OpenStack Compute. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -10,15 +10,16 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - Except where otherwise noted, this document is licensed under Creative Commons Attribution ShareAlike 3.0 License.http://creativecommons.org/licenses/by-sa/3.0/legalcode OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk Table of Contents1. Getting Started with OpenStack What is OpenStack? Components of OpenStack OpenStack Project Architecture Overview Cloud Provider Conceptual Architecture OpenStack Compute Logical Architecture Nova Conceptual Mapping Why Cloud? 2. Introduction to OpenStack Compute Hypervisors Users and Projects Images and Instances System Architecture Storage and OpenStack Compute 3. Installing OpenStack Compute System Requirements Example Installation Architectures Service Architecture Installing OpenStack Compute on Ubuntu ISO Distribution Installation Scripted Installation Manual Installation Installing OpenStack Compute on Red Hat Enterprise Linux 6 Post-Installation Configuration for OpenStack Compute Setting Flags in the nova.conf File Setting Up OpenStack Compute Environment on the Compute Node Creating Certifications Enabling Access to VMs on the Compute Node Configuring Multiple Compute Nodes Determining the Version of Compute Migrating from Bexar to Cactus 4. Configuring OpenStack Compute General Compute Configuration Overview Example nova.conf Configuration Files Configuring Logging Configuring Hypervisors Configuring Compute to use IPv6 Addresses Configuring Image Service and Storage for Compute Configuring Live Migrations Configuring Database Connections Configuring the Compute Messaging System Configuring Authentication and Authorization 5. Hypervisors Selecting a Hypervisor Hypervisor Configuration Basics 6. OpenStack Compute Automated Installations Deployment Tool for OpenStack using Puppet OpenStack Compute Installation Using VirtualBox, Vagrant, And Chef 7. Networking Networking Options Cloudpipe — Per Project Vpns Creating a Cloudpipe Image VPN Access Certificates and Revocation Restarting and Logging into the Cloudpipe VPN Configuring Networking on the Compute Node Configuring Flat Networking Configuring Flat DHCP Networking Outbound Traffic Flow with Any Flat Networking Configuring VLAN Networking Enabling Ping and SSH on VMs Allocating and Associating IP Addresses with Instances Associating a Public IP Address Removing a Network from a Project 8. System Administration Starting Images Deleting Instances Image management Creating a Linux Image – Ubuntu & Fedora Creating a Windows Image Understanding the Compute Service Architecture Managing the Cloud Managing Compute Users Managing Volumes Using Live Migration Reference for Flags in nova.conf 9. OpenStack Interfaces About the Dashboard System Requirements for the Dashboard Installing the OpenStack Dashboard Getting Started with the VNC Proxy Configuring the VNC Proxy Enabling VNC Consoles in Nova Getting an Instance's VNC Console 10. OpenStack Compute Tutorials Running Your First Elastic Web Application on the Cloud Part I: Setting Up the Cloud Infrastructure Part II: Getting Virtual Machines to Run the Virtual Servers Part III: Installing the Needed Software for the Web-Scale Scenario Running a Blog in the Cloud 11. Support and Troubleshooting Community Support Troubleshooting OpenStack Object Storage Handling Drive Failure Handling Server Failure Detecting Failed Drives Troubleshooting OpenStack Compute Log files for OpenStack Compute Common Errors and Fixes for OpenStack Compute OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk List of Figures7.1. Flat network, all-in-one server installation 7.2. Flat network, single interface, multiple servers 7.3. Flat network, multiple interfaces, multiple servers 7.4. Flat DHCP network, multiple interfaces, multiple servers 7.5. Single adaptor hosts, first route 7.6. Single adaptor hosts, second route OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk List of Tables3.1. Hardware Recommendations 3.2. Description of nova.conf flags (not comprehensive) 4.1. Description of general purpose nova.conf flags 4.2. Description of nova.conf flags for all services 4.3. Description of nova.conf flags for logging 4.4. Description of nova.conf flags for customized log formats 4.5. Description of nova.conf flags for configuring IPv6 4.6. Description of nova.conf flags for the Glance image service and - storage 4.7. Description of nova.conf flags for local image storage 4.8. Description of nova.conf flags for live migration 4.9. Description of nova.conf flags for database access 4.10. Description of nova.conf flags for Remote Procedure Calls and RabbitMQ Messaging 4.11. Description of nova.conf flags for Tuning RabbitMQ Messaging 4.12. Description of nova.conf flags for Customizing Exchange or Topic Names 4.13. Description of nova.conf flag for Authentication 4.14. Description of nova.conf flags for customizing roles 4.15. Description of nova.conf flags for credentials 4.16. Description of nova.conf flags for CA (Certificate Authority) 5.1. Description of nova.conf flags for the compute node 8.1. Description of common nova.conf flags (nova-api, nova-compute) 8.2. Description of nova.conf flags specific to nova-volume OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk 1. Getting Started with OpenStackOpenStack is a collection of open source technology that provides massively scalable open + Except where otherwise noted, this document is licensed under Creative Commons Attribution ShareAlike 3.0 License.http://creativecommons.org/licenses/by-sa/3.0/legalcode OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo Table of Contents1. Getting Started with OpenStack What is OpenStack? Components of OpenStack OpenStack Project Architecture Overview Cloud Provider Conceptual Architecture OpenStack Compute Logical Architecture Nova Conceptual Mapping Why Cloud? 2. Introduction to OpenStack Compute Hypervisors Users and Projects Images and Instances System Architecture Storage and OpenStack Compute 3. Installing OpenStack Compute System Requirements Example Installation Architectures Service Architecture Installing OpenStack Compute on Ubuntu ISO Distribution Installation Scripted Installation Manual Installation Installing OpenStack Compute on Red Hat Enterprise Linux 6 Post-Installation Configuration for OpenStack Compute Setting Flags in the nova.conf File Setting Up OpenStack Compute Environment on the Compute Node Creating Credentials Enabling Access to VMs on the Compute Node Configuring Multiple Compute Nodes Determining the Version of Compute Migrating from Cactus to Diablo 4. Configuring OpenStack Compute General Compute Configuration Overview Example nova.conf Configuration Files Configuring Logging Configuring Hypervisors Configuring Authentication and Authorization Configuring Compute to use IPv6 Addresses Configuring Image Service and Storage for Compute Configuring Live Migrations Configuring Database Connections Configuring the Compute Messaging System 5. Hypervisors Selecting a Hypervisor Hypervisor Configuration Basics 6. OpenStack Compute Automated Installations Deployment Tool for OpenStack using Puppet OpenStack Compute Installation Using VirtualBox, Vagrant, And Chef 7. Networking Networking Options Cloudpipe — Per Project Vpns Creating a Cloudpipe Image VPN Access Certificates and Revocation Restarting and Logging into the Cloudpipe VPN Configuring Networking on the Compute Node Configuring Flat Networking Configuring Flat DHCP Networking Outbound Traffic Flow with Any Flat Networking Configuring VLAN Networking Enabling Ping and SSH on VMs Allocating and Associating IP Addresses with Instances Associating a Public IP Address Removing a Network from a Project Existing High Availability Options for Networking 8. System Administration Starting Images Deleting Instances Image management Creating a Linux Image – Ubuntu & Fedora Creating a Windows Image Understanding the Compute Service Architecture Managing the Cloud Managing Compute Users Managing Volumes Using Live Migration Reference for Flags in nova.conf 9. OpenStack Interfaces About the Dashboard System Requirements for the Dashboard Installing the OpenStack Dashboard Getting Started with the VNC Proxy Configuring the VNC Proxy Enabling VNC Consoles in Nova Getting an Instance's VNC Console 10. OpenStack Compute Tutorials Running Your First Elastic Web Application on the Cloud Part I: Setting Up the Cloud Infrastructure Part II: Getting Virtual Machines to Run the Virtual Servers Part III: Installing the Needed Software for the Web-Scale Scenario Running a Blog in the Cloud 11. Support and Troubleshooting Community Support Troubleshooting OpenStack Object Storage Handling Drive Failure Handling Server Failure Detecting Failed Drives Troubleshooting OpenStack Compute Log files for OpenStack Compute Common Errors and Fixes for OpenStack Compute OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo List of Figures7.1. Flat network, all-in-one server installation 7.2. Flat network, single interface, multiple servers 7.3. Flat network, multiple interfaces, multiple servers 7.4. Flat DHCP network, multiple interfaces, multiple servers 7.5. Single adaptor hosts, first route 7.6. Single adaptor hosts, second route 7.7. High Availability Networking Option OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo List of Tables3.1. Hardware Recommendations 3.2. Description of nova.conf flags (not comprehensive) 4.1. Description of general purpose nova.conf flags 4.2. Description of nova.conf flags for all services 4.3. Description of nova.conf flags for logging 4.4. Description of nova.conf flags for customized log formats 4.5. Description of nova.conf flags for Authentication 4.6. Description of nova.conf flags for customizing roles in deprecated + auth 4.7. Description of nova.conf flags for credentials in deprecated auth 4.8. Description of nova.conf flags for CA (Certificate Authority) 4.9. Description of nova.conf flags for configuring IPv6 4.10. Description of nova.conf flags for the Glance image service and + storage 4.11. Description of nova.conf flags for local image storage 4.12. Description of nova.conf flags for live migration 4.13. Description of nova.conf flags for database access 4.14. Description of nova.conf flags for Remote Procedure Calls and RabbitMQ Messaging 4.15. Description of nova.conf flags for Tuning RabbitMQ Messaging 4.16. Description of nova.conf flags for Customizing Exchange or Topic Names 5.1. Description of nova.conf flags for the compute node 8.1. Description of common nova.conf flags (nova-api, nova-compute) 8.2. Description of nova.conf flags specific to nova-volume OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo 1. Getting Started with OpenStackOpenStack is a collection of open source technology that provides massively scalable open source cloud computing software. Currently OpenStack develops two related projects: OpenStack Compute, which offers computing power through virtual machine and network management, and OpenStack Object Storage which is software for redundant, scalable object storage capacity. Closely related to the OpenStack Compute project is the Image Service project, named Glance. OpenStack can be used by corporations, service providers, VARS, SMBs, researchers, and global data centers looking to deploy large-scale cloud deployments for - private or public clouds. What is OpenStack?What is OpenStack?OpenStack offers open source software to build public and private clouds. OpenStack is + private or public clouds. What is OpenStack?What is OpenStack?OpenStack offers open source software to build public and private clouds. OpenStack is a community and a project as well as open source software to help organizations run clouds for virtual computing or storage. OpenStack contains a collection of open source projects that are community-maintained including OpenStack Compute (code-named Nova), @@ -33,7 +34,7 @@ during the on-ramp process. Because the project is so new and constantly changing, be aware of the revision time for all information. If you are reading a document that is a few months old and you feel that it isn't entirely accurate, then please let us know - through the mailing list at https://launchpad.net/~openstack so it can be updated or removed. Components of OpenStackComponents of OpenStackThere are currently three main components of OpenStack: Compute, Object Storage, and + through the mailing list at https://launchpad.net/~openstack so it can be updated or removed. Components of OpenStackComponents of OpenStackThere are currently three main components of OpenStack: Compute, Object Storage, and Image Service. Let's look at each in turn.OpenStack Compute is a cloud fabric controller, used to start up virtual instances for either a user or a group. It's also used to configure networking for each instance or project that contains multiple instances for a particular project. OpenStack Object Storage is a system to store objects in a massively scalable large @@ -46,33 +47,33 @@ It can be configured in three ways: using OpenStack Object Store to store images; using Amazon's Simple Storage Solution (S3) storage directly; or using S3 storage with Object Store as the intermediate for S3 access.The following diagram shows the basic relationships between the projects, how they - relate to each other, and how they can fulfill the goals of open source cloud computing. OpenStack Project Architecture OverviewOpenStack Project Architecture Overviewby Ken PeppleBefore we dive into the conceptual and logic architecture, let’s take a second to explain the OpenStack project: OpenStack is a collection of open source technologies delivering a massively scalable cloud operating system.You can think of it as software to power your own Infrastructure as a Service (IaaS) offering like Amazon Web Services. It currently encompasses three main projects:Swift which provides object/blob storage. This is roughly analogous to Rackspace Cloud Files (from which it is derived) or Amazon S3.Glance which provides discovery, storage and retrieval of virtual machine images for OpenStack Nova.Nova which provides virtual servers upon + relate to each other, and how they can fulfill the goals of open source cloud computing. OpenStack Project Architecture OverviewOpenStack Project Architecture Overviewby Ken PeppleBefore we dive into the conceptual and logic architecture, let’s take a second to explain the OpenStack project: OpenStack is a collection of open source technologies delivering a massively scalable cloud operating system.You can think of it as software to power your own Infrastructure as a Service (IaaS) offering like Amazon Web Services. It currently encompasses three main projects:Swift which provides object/blob storage. This is roughly analogous to Rackspace Cloud Files (from which it is derived) or Amazon S3.Glance which provides discovery, storage and retrieval of virtual machine images for OpenStack Nova.Nova which provides virtual servers upon demand. This is similar to Rackspace Cloud Servers or Amazon EC2.While these three projects provide the core of the cloud infrastructure, OpenStack is open and evolving — there will be more projects (there are already related projects for web interfaces and a queue service). With that brief introduction, let’s delve into a conceptual architecture and then - examine how OpenStack Compute could map to it. Cloud Provider Conceptual ArchitectureCloud Provider Conceptual ArchitectureKen, PeppleImagine that we are going to build our own IaaS cloud and offer it to customers. To achieve this, we would need to provide several high level features:1.Allow application owners to register for our cloud services, view their usage and see their bill (basic customer relations management functionality)2.Allow Developers/DevOps folks to create and store custom images for their applications (basic build-time functionality)3.Allow DevOps/Developers to launch, monitor and terminate instances (basic run-time functionality)4.Allow the Cloud Operator to configure and operate the cloud infrastructureWhile there are certainly many, many other features that we would need to offer (especially if we were to follow are more complete industry framework like eTOM), these four get to the very heart of providing IaaS. Now assuming that you agree with these four top level features, you might put together a conceptual architecture that looks something like this:In this model, I’ve imagined four sets of users (developers, devops, owners and operators) + examine how OpenStack Compute could map to it. Cloud Provider Conceptual ArchitectureCloud Provider Conceptual ArchitectureKen, PeppleImagine that we are going to build our own IaaS cloud and offer it to customers. To achieve this, we would need to provide several high level features:1.Allow application owners to register for our cloud services, view their usage and see their bill (basic customer relations management functionality)2.Allow Developers/DevOps folks to create and store custom images for their applications (basic build-time functionality)3.Allow DevOps/Developers to launch, monitor and terminate instances (basic run-time functionality)4.Allow the Cloud Operator to configure and operate the cloud infrastructureWhile there are certainly many, many other features that we would need to offer (especially if we were to follow are more complete industry framework like eTOM), these four get to the very heart of providing IaaS. Now assuming that you agree with these four top level features, you might put together a conceptual architecture that looks something like this:In this model, I’ve imagined four sets of users (developers, devops, owners and operators) that need to interact with the cloud and then separated out the functionality needed for each. From there, I’ve followed a pretty common tiered approach to the architecture (presentation, logic and resources) with two orthogonal areas - (integration and management). Let’s explore each a little further: As with presentation layers in more typical application architectures, components here interact with users to accept and present information. In this layer, you will find web portals to provide graphical interfaces for non-developers and API endpoints for developers. For more advanced architectures, you might find load balancing, console proxies, security and naming services present here also.The logic tier would provide the intelligence and control functionality for our cloud. This tier would house orchestration (workflow for complex tasks), scheduling (determining mapping of jobs to resources), policy (quotas and such) , image registry (metadata about instance images), logging (events and metering). There will need to integration functions within the architecture. It is assumed that most service providers will already have a customer identity and billing systems. Any cloud architecture would need to integrate with these systems.As with any complex environment, we will need a management tier to operate the environment. This should include an API to access the cloud administration features as well as some forms of monitoring. It is likely that the monitoring functionality will take the form of integration into an existing tool. While I’ve highlighted monitoring and an admin API for our fictional provider, in a more complete architecture you would see a vast array of operational support functions like provisioning and configuration management.Finally, since this is a compute cloud, we will need actual compute, network and storage resources to provide to our customers. This tier provides these services, whether they be servers, network switches, network attached storage or other resources.With this model in place, let’s shift gears and look at OpenStack Compute’s logical - architecture.OpenStack Compute Logical ArchitectureOpenStack Compute Logical ArchitectureNow that we’ve looked at a proposed conceptual architecture, let’s see how OpenStack Compute - is logically architected. Since Cactus is the newest release, I will concentrate - there (which means if you are viewing this after around July 2011, this will be out - of date). There are several logical components of OpenStack Compute architecture but + (integration and management). Let’s explore each a little further: As with presentation layers in more typical application architectures, components here interact with users to accept and present information. In this layer, you will find web portals to provide graphical interfaces for non-developers and API endpoints for developers. For more advanced architectures, you might find load balancing, console proxies, security and naming services present here also.The logic tier would provide the intelligence and control functionality for our cloud. This tier would house orchestration (workflow for complex tasks), scheduling (determining mapping of jobs to resources), policy (quotas and such) , image registry (metadata about instance images), logging (events and metering). There will need to integration functions within the architecture. It is assumed that most service providers will already have a customer identity and billing systems. Any cloud architecture would need to integrate with these systems.As with any complex environment, we will need a management tier to operate the environment. This should include an API to access the cloud administration features as well as some forms of monitoring. It is likely that the monitoring functionality will take the form of integration into an existing tool. While I’ve highlighted monitoring and an admin API for our fictional provider, in a more complete architecture you would see a vast array of operational support functions like provisioning and configuration management.Finally, since this is a compute cloud, we will need actual compute, network and storage resources to provide to our customers. This tier provides these services, whether they be servers, network switches, network attached storage or other resources.With this model in place, let’s shift gears and look at OpenStack Compute’s logical + architecture.OpenStack Compute Logical ArchitectureOpenStack Compute Logical ArchitectureNow that we’ve looked at a proposed conceptual architecture, let’s see how OpenStack Compute + is logically architected. At the time of this writing, Cactus was the newest release + (which means if you are viewing this after around July 2011, this may be out of + date). There are several logical components of OpenStack Compute architecture but the majority of these components are custom written python daemons of two - varieties:WSGI applications to receive and mediate API calls (nova-api, glance-api, etc.)Worker daemons to carry out orchestration tasks (nova-compute, nova-network, nova-schedule, etc.)However, there are two essential pieces of the logical architecture are neither custom written nor Python based: the messaging queue and the database. These two components facilitate the asynchronous orchestration of complex tasks through message passing and information sharing. Putting this all together we get a picture like this:This complicated, but not overly informative, diagram as it can be summed up in three sentences:End users (DevOps, Developers and even other OpenStack components) talk to - nova-api to interface with OpenStack ComputeOpenStack Compute daemons exchange info through the queue (actions) and database (information) - to carry out API requestsOpenStack Glance is basically a completely separate infrastructure which OpenStack Compute - interfaces through the Glance APINow that we see the overview of the processes and their interactions, let’s take a closer look at each component.The nova-api daemon is the heart of the OpenStack Compute. You may see it + varieties:WSGI applications to receive and mediate API calls (nova-api, glance-api, etc.)Worker daemons to carry out orchestration tasks (nova-compute, nova-network, nova-schedule, etc.)However, there are two essential pieces of the logical architecture are neither custom written nor Python based: the messaging queue and the database. These two components facilitate the asynchronous orchestration of complex tasks through message passing and information sharing. Putting this all together we get a picture like this:This complicated, but not overly informative, diagram as it can be summed up in three sentences:End users (DevOps, Developers and even other OpenStack components) talk to + nova-api to interface with OpenStack ComputeOpenStack Compute daemons exchange info through the queue (actions) and database (information) + to carry out API requestsOpenStack Glance is basically a completely separate infrastructure which OpenStack Compute + interfaces through the Glance APINow that we see the overview of the processes and their interactions, let’s take a closer look at each component.The nova-api daemon is the heart of the OpenStack Compute. You may see it illustrated on many pictures of OpenStack Compute as API and “Cloud Controller”. While this is partly true, cloud controller is really just a class (specifically the CloudController in trunk/nova/api/ec2/cloud.py) within the nova-api daemon. It provides an endpoint for all API queries (either OpenStack API or EC2 API), initiates most of the orchestration activities (such as running an instance) and also enforces some policy (mostly quota - checks).The nova-schedule process is conceptually the simplest piece of code in OpenStack + checks).The nova-schedule process is conceptually the simplest piece of code in OpenStack Compute: take a virtual machine instance request from the queue and determines where it should run (specifically, which compute server host it should run on). In practice however, I am sure this will grow to be the most @@ -82,13 +83,13 @@ that let’s you choose (or write) your own algorithm for scheduling. Currently, there are several to choose from (simple, chance, etc) and it is a area of hot development for the future releases of OpenStack - Compute.The nova-compute process is primarily a worker daemon that creates and terminates virtual machine instances. The process by which it does so is fairly complex (see this blog post by Laurence Luce for the gritty details) but the basics are simple: accept actions from the queue and then perform a series of system commands (like launching a KVM instance) to carry them out while updating state in the database.As you can gather by the name, nova-volume manages the creation, attaching and detaching of persistent volumes to compute instances (similar functionality to Amazon’s Elastic Block Storage). It can use volumes from a variety of providers such as iSCSI or AoE.The nova-network worker daemon is very similar to nova-compute and nova-volume. It accepts networking tasks from the queue and then performs tasks to manipulate the network (such as setting up bridging interfaces or changing iptables rules).The queue provides a central hub for passing messages between daemons. This is currently implemented with RabbitMQ today, but theoretically could be any AMPQ message queue supported by the python ampqlib.The SQL database stores most of the + Compute.The nova-compute process is primarily a worker daemon that creates and terminates virtual machine instances. The process by which it does so is fairly complex (see this blog post by Laurence Luce for the gritty details) but the basics are simple: accept actions from the queue and then perform a series of system commands (like launching a KVM instance) to carry them out while updating state in the database.As you can gather by the name, nova-volume manages the creation, attaching and detaching of persistent volumes to compute instances (similar functionality to Amazon’s Elastic Block Storage). It can use volumes from a variety of providers such as iSCSI or AoE.The nova-network worker daemon is very similar to nova-compute and nova-volume. It accepts networking tasks from the queue and then performs tasks to manipulate the network (such as setting up bridging interfaces or changing iptables rules).The queue provides a central hub for passing messages between daemons. This is currently implemented with RabbitMQ today, but theoretically could be any AMPQ message queue supported by the python ampqlib.The SQL database stores most of the build-time and run-time state for a cloud infrastructure. This includes the instance types that are available for use, instances in use, networks available and projects. Theoretically, OpenStack Compute can support any database supported by SQL-Alchemy but the only databases currently being widely used are sqlite3 (only - appropriate for test and development work), MySQL and PostgreSQL.OpenStack Glance is a separate project from OpenStack Compute, but as shown above, + appropriate for test and development work), MySQL and PostgreSQL.OpenStack Glance is a separate project from OpenStack Compute, but as shown above, complimentary. While it is an optional part of the overall compute architecture, I can’t imagine that most OpenStack Compute installations will not be using it (or a complimentary product). There are three pieces to @@ -97,7 +98,7 @@ much like nova-api, and the actual image blobs are placed in the image store. The glance-registry stores and retrieves metadata about images. The image store can be a number of different object - stores, include OpenStack Object Storage (Swift).Finally, another optional project that we will need for our fictional service provider is an + stores, include OpenStack Object Storage (Swift).Finally, another optional project that we will need for our fictional service provider is an user dashboard. I have picked the OpenStack Dashboard here, but there are also several other web front ends available for OpenStack Compute. The OpenStack Dashboard provides a web interface into OpenStack Compute to give @@ -106,34 +107,35 @@ application.This logical architecture represents just one way to architect OpenStack Compute. With its pluggable architecture, we could easily swap out OpenStack Glance with another image service or use another dashboard. In the coming releases of OpenStack, expect to see - more modularization of the code especially in the network and volume areas.Nova Conceptual MappingNova Conceptual MappingNow that we’ve seen a conceptual architecture for a fictional cloud provider and examined the logical architecture of OpenStack Nova, it is fairly easy to map the OpenStack components to the conceptual areas to see what we are lacking:As you can see from the illustration, I’ve overlaid logical components of OpenStack Nova, Glance and Dashboard to denote functional coverage. For each of the overlays, I’ve added the name of the logical component within the project that provides the functionality. While all of these judgements are highly subjective, you can see that we have a majority coverage of the functional areas with a few notable exceptions:The largest gap in our functional coverage is logging and billing. At the moment, OpenStack Nova doesn’t have a billing component that can mediate logging events, rate the logs and create/present bills. That being said, most service providers will already have one (or many) of these so the focus is really on the logging and integration with billing. This could be remedied in a variety of ways: augmentations of the code (which should happen in the next release “Diablo”), integration with commercial products or services (perhaps Zuora) or custom log parsing. Identity is also a point which will likely need to be augmented. Unless we are running a stock + more modularization of the code especially in the network and volume areas.Nova Conceptual MappingNova Conceptual MappingNow that we’ve seen a conceptual architecture for a fictional cloud provider and examined the logical architecture of OpenStack Nova, it is fairly easy to map the OpenStack components to the conceptual areas to see what we are lacking:As you can see from the illustration, I’ve overlaid logical components of OpenStack Nova, Glance and Dashboard to denote functional coverage. For each of the overlays, I’ve added the name of the logical component within the project that provides the functionality. While all of these judgements are highly subjective, you can see that we have a majority coverage of the functional areas with a few notable exceptions:The largest gap in our functional coverage is logging and billing. At the moment, OpenStack Nova doesn’t have a billing component that can mediate logging events, rate the logs and create/present bills. That being said, most service providers will already have one (or many) of these so the focus is really on the logging and integration with billing. This could be remedied in a variety of ways: augmentations of the code (which should happen in the next release “Diablo”), integration with commercial products or services (perhaps Zuora) or custom log parsing. Identity is also a point which will likely need to be augmented. Unless we are running a stock LDAP for our identity system, we will need to integrate our solution with OpenStack Compute. Having said that, this is true of almost all cloud - solutions.The customer portal will also be an integration point. While OpenStack Compute provides a user + solutions.The customer portal will also be an integration point. While OpenStack Compute provides a user dashboard (to see running instance, launch new instances, etc.), it doesn’t provide an interface to allow application owners to signup for service, track their bills and lodge trouble tickets. Again, this is probably - something that it is already in place at our imaginary service provider. Ideally, the Admin API would replicate all functionality that we’d be able to do via the + something that it is already in place at our imaginary service provider. Ideally, the Admin API would replicate all functionality that we’d be able to do via the command line interface (which in this case is mostly exposed through the nova-manage command). This will get better in the Diablo release with the Admin - API work.Cloud monitoring and operations will be an important area of focus for our service provider. A + API work.Cloud monitoring and operations will be an important area of focus for our service provider. A key to any good operations approach is good tooling. While OpenStack Compute provides nova-instancemonitor, which tracks compute node utilization, we’re - really going to need a number of third party tools for monitoring. Policy is an extremely important area but very provider specific. Everything from quotas + really going to need a number of third party tools for monitoring. Policy is an extremely important area but very provider specific. Everything from quotas (which are supported) to quality of service (QoS) to privacy controls can fall under this. I’ve given OpenStack Nova partial coverage here, but that might vary depending on the intricacies of the providers needs. For the record, the Catus release of OpenStack Compute provides quotas for instances (number and cores used, volumes (size and number), floating IP addresses and - metadata.Scheduling within OpenStack Compute is fairly rudimentary for larger installations today. The + metadata.Scheduling within OpenStack Compute is fairly rudimentary for larger installations today. The pluggable scheduler supports chance (random host assignment), simple (least loaded) and zone (random nodes within an availability zone). As within most areas on this list, this will be greatly augmented in Diablo. In development are distributed schedulers and schedulers that understand heterogeneous hosts (for support of GPUs and differing CPU architectures).As you can see, OpenStack Compute provides a fair basis for our mythical service provider, as long as the mythical service providers are willing to do some integration here and - there. Why Cloud?Why Cloud?In data centers today, many computers suffer the same underutilization in computing + there. Note that since the time of this writing, OpenStack Identity Service has been + added.Why Cloud?Why Cloud?In data centers today, many computers suffer the same underutilization in computing power and networking bandwidth. For example, projects may need a large amount of computing capacity to complete a computation, but no longer need the computing power after completing the computation. You want cloud computing when you want a service @@ -144,17 +146,17 @@ diagrams contains the services that afford computing power harnessed to get work done. Much like the electrical power we receive each day, cloud computing provides subscribers or users with access to a shared collection of computing resources: networks for - transfer, servers for storage, and applications or services for completing tasks. These are the compelling features of a cloud:On-demand self-service: Users can provision servers and networks with little - human intervention. Network access: Any computing capabilities are available over the network. - Many different devices are allowed access through standardized mechanisms. Resource pooling: Multiple users can access clouds that serve other consumers - according to demand. Elasticity: Provisioning is rapid and scales out or in based on need. Metered or measured service: Just like utilities that are paid for by the + transfer, servers for storage, and applications or services for completing tasks. These are the compelling features of a cloud:On-demand self-service: Users can provision servers and networks with little + human intervention. Network access: Any computing capabilities are available over the network. + Many different devices are allowed access through standardized mechanisms. Resource pooling: Multiple users can access clouds that serve other consumers + according to demand. Elasticity: Provisioning is rapid and scales out or in based on need. Metered or measured service: Just like utilities that are paid for by the hour, clouds should optimize resource use and control it for the level of service or type of servers such as storage or processing.Cloud computing offers different service models depending on the capabilities a - consumer may require. SaaS: Software as a Service. Provides the consumer the ability to use the software - in a cloud environment, such as web-based email for example. PaaS: Platform as a Service. Provides the consumer the ability to deploy + consumer may require. SaaS: Software as a Service. Provides the consumer the ability to use the software + in a cloud environment, such as web-based email for example. PaaS: Platform as a Service. Provides the consumer the ability to deploy applications through a programming language or tools supported by the cloud platform provider. An example of platform as a service is an Eclipse/Java programming - platform provided with no downloads required. IaaS: Infrastructure as a Service. Provides infrastructure such as computer + platform provided with no downloads required. IaaS: Infrastructure as a Service. Provides infrastructure such as computer instances, network connections, and storage so that people can run any software or operating system. When you hear terms such as public cloud or private cloud, these refer to the deployment model for the cloud. A private cloud operates for a single organization, but @@ -173,44 +175,46 @@ drives on each users's desktop and enabling access to huge data storage capacity online in the cloud. For a more detailed discussion of cloud computing's essential characteristics and its models of service and deployment, see http://www.nist.gov/itl/cloud/, published by the US - National Institute of Standards and Technology. OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk 2. Introduction to OpenStack ComputeOpenStack Compute gives you a tool to orchestrate a cloud, including running instances, + National Institute of Standards and Technology. OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo 2. Introduction to OpenStack ComputeOpenStack Compute gives you a tool to orchestrate a cloud, including running instances, managing networks, and controlling access to the cloud through users and projects. The underlying open source project's name is Nova, and it provides the software that can control an Infrastructure as a Service (IaaS) cloud computing platform. It is similar in scope to Amazon EC2 and Rackspace Cloud Servers. OpenStack Compute does not include any virtualization software; rather it defines drivers that interact with underlying virtualization mechanisms that run on your host operating system, and exposes functionality - over a web-based API.HypervisorsHypervisorsOpenStack Compute requires a hypervisor and Compute controls the hypervisors through an + over a web-based API.HypervisorsHypervisorsOpenStack Compute requires a hypervisor and Compute controls the hypervisors through an API server. The process for selecting a hypervisor usually means prioritizing and making decisions based on budget and resource constraints as well as the inevitable list of - supported features and required technical specifications. With OpenStack Compute, you - can orchestrate clouds using multiple hypervisors in different zones. The types of - virtualization standards that may be used with Compute include:Hyper-V 2008 - KVM - - Kernel-based Virtual MachineLXC - Linux Containers - (through libvirt)QEMU - Quick - EMUlatorUML - User - Mode LinuxVMWare ESX/ESXi 4.1 update 1Xen - - XenServer 5.5, Xen Cloud Platform (XCP)Users and ProjectsUsers and ProjectsThe OpenStack Compute system is designed to be used by many different cloud computing - consumers or customers, using role-based access assignments. Roles control the + supported features and required technical specifications. The majority of development is + done with the KVM and Xen-based hypervisors. Refer to http://wiki.openstack.org/HypervisorSupportMatrix for a detailed list of + features and support across the hypervisors. With OpenStack Compute, you can orchestrate clouds using multiple hypervisors in + different zones. The types of virtualization standards that may be used with Compute + include:Hyper-V 2008 + KVM - + Kernel-based Virtual MachineLXC - Linux Containers + (through libvirt)QEMU - Quick + EMUlatorUML - User + Mode LinuxVMWare ESX/ESXi 4.1 update 1Xen - + XenServer 5.5, Xen Cloud Platform (XCP)Users and ProjectsUsers and ProjectsThe OpenStack Compute system is designed to be used by many different cloud computing + consumers or customers, using role-based access assignments. With the use of the Identity Service, the issuing of a token also issues the roles assigned to the user. Roles control the actions that a user is allowed to perform. For example, a user cannot allocate a public IP without the netadmin or admin role. There are both global roles and per-project role assignments. A user's access to particular images is limited by project, but the access key and secret key are assigned per user. Key pairs granting access to an instance are enabled per user, but quotas to control resource consumption across available hardware resources are per project. OpenStack Compute uses a rights management system that employs a Role-Based Access - Control (RBAC) model and supports the following five roles:Cloud Administrator (admin): Global role. Users of this class enjoy complete system access.IT Security (itsec): Global role. This role is limited to IT security personnel. It permits role holders to - quarantine instances on any project.Project Manager (projectmanager): Project role. The default for project owners, this role affords users the + Control (RBAC) model and supports the following five roles:Cloud Administrator (admin): Global role. Users of this class enjoy complete system access.IT Security (itsec): Global role. This role is limited to IT security personnel. It permits role holders to + quarantine instances on any project.Project Manager (projectmanager): Project role. The default for project owners, this role affords users the ability to add other users to a project, interact with project images, and - launch and terminate instances.Network Administrator (netadmin): Project role. Users with this role are permitted to allocate and assign + launch and terminate instances.Network Administrator (netadmin): Project role. Users with this role are permitted to allocate and assign publicly accessible IP addresses as well as create and modify firewall - rules.Developer (developer): Project role. This is a general purpose role that is assigned to users by + rules.Developer (developer): Project role. This is a general purpose role that is assigned to users by default.While the original EC2 API supports users, OpenStack Compute adds the concept of projects. Projects are isolated resource containers forming the principal organizational structure within Nova. They consist of a separate VLAN, volumes, instances, images, keys, and users. A user can specify which project he or she wishes to use by appending :project_id to his or her access key. If no project is specified in the API request, Compute - attempts to use a project with the same id as the user. For projects, quota controls are available to limit the: Number of volumes which may be createdTotal size of all volumes within a project as measured in GBNumber of instances which may be launchedNumber of processor cores which may be allocatedPublicly accessible IP addressesImages and InstancesImages and InstancesAn image is a file containing information about a virtual disk that completely + attempts to use a project with the same id as the user. For projects, quota controls are available to limit the: Number of volumes which may be createdTotal size of all volumes within a project as measured in GBNumber of instances which may be launchedNumber of processor cores which may be allocatedPublicly accessible IP addressesImages and InstancesImages and InstancesAn image is a file containing information about a virtual disk that completely replicates all information about a working computer at a point in time including operating system information and file system information. Compute can use certificate management for decrypting bundled images. For now, Compute relies on using the euca2ools @@ -225,7 +229,7 @@ Image Service is installed.An instance is a running virtual machine within the cloud. An instance has a life cycle that is controlled by OpenStack Compute. Compute creates the instances and it is responsible for building a disk image, launching it, reporting the state, attaching - persistent storage, and terminating it. System ArchitectureSystem ArchitectureOpenStack Compute consists of several main components. A "cloud controller" contains many of these components, + persistent storage, and terminating it. System ArchitectureSystem ArchitectureOpenStack Compute consists of several main components. A "cloud controller" contains many of these components, and it represents the global state and interacts with all other components. An API Server acts as the web services front end for the cloud controller. The compute controller provides compute server resources and typically contains the compute service, The Object Store component optionally provides storage @@ -240,16 +244,16 @@ with a scheduler, network controller, and volume controller via AMQP (Advanced Message Queue Protocol). To avoid blocking each component while waiting for a response, OpenStack Compute uses asynchronous calls, with a call-back that gets triggered when a - response is received.To achieve the shared-nothing property with multiple copies of the same component, OpenStack Compute keeps all the cloud system state in a distributed data store. Updates to system state are written into this store, using atomic transactions when required. Requests for system state are read out of this store. In limited cases, the read results are cached within controllers for short periods of time (for example, the current list of system users.)Storage and OpenStack ComputeStorage and OpenStack ComputeA ‘volume’ is a detachable block storage device. You can think of it as a USB hard drive. It + response is received.To achieve the shared-nothing property with multiple copies of the same component, OpenStack Compute keeps all the cloud system state in a distributed data store. Updates to system state are written into this store, using atomic transactions when required. Requests for system state are read out of this store. In limited cases, the read results are cached within controllers for short periods of time (for example, the current list of system users.)Storage and OpenStack ComputeStorage and OpenStack ComputeA ‘volume’ is a detachable block storage device. You can think of it as a USB hard drive. It can only be attached to one instance at a time, so it does not work like a SAN. If you wish to expose the same volume to multiple instances, you will have to use an NFS or SAMBA share from an existing instance. Every instance larger than m1.tiny starts with some local storage (up to 160GB for m1.xlarge). - This storage is currently the second partition on the root drive. OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk 3. Installing OpenStack ComputeThe OpenStack system has several key projects that are separate installations but can + This storage is currently the second partition on the root drive. OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo 3. Installing OpenStack ComputeThe OpenStack system has several key projects that are separate installations but can work together depending on your cloud needs: OpenStack Compute, OpenStack Object Storage, and OpenStack Image Service. You can install any of these projects separately and then - configure them either as standalone or connected entities.System RequirementsSystem RequirementsHardware: OpenStack components are intended to + configure them either as standalone or connected entities.System RequirementsSystem RequirementsHardware: OpenStack components are intended to run on standard hardware. Recommended hardware configurations for a minimum production - deployment are as follows for the cloud controller nodes and compute nodes.Table 3.1. Hardware Recommendations + deployment are as follows for the cloud controller nodes and compute nodes.Table 3.1. Hardware Recommendations Server Recommended Hardware Notes @@ -295,10 +299,9 @@ Technology List. For LXC, the VT extensions are not required. - - Operating System: OpenStack currently has - packages for the following distributions: Ubuntu, RHEL, SUSE, Debian, and Fedora. These - packages are maintained by community members, refer to http://wiki.openstack.org/Packaging for additional links. Networking: 1000 Mbps are suggested. For + Operating System: OpenStack currently has packages + for the following distributions: Ubuntu, RHEL, SUSE, Debian, and Fedora. These packages + are maintained by community members, refer to http://wiki.openstack.org/Packaging for additional links. Networking: 1000 Mbps are suggested. For OpenStack Compute, networking is configured on multi-node installations between the physical machines on a single subnet. For networking between virtual machine instances, three network options are available: flat, DHCP, and VLAN. Two NICs (Network Interface @@ -306,59 +309,39 @@ to either a PostgreSQL or MySQL database, or you can install it as part of the OpenStack Compute installation process.Permissions: You can install OpenStack Compute either as root or as a user with sudo permissions if you configure the sudoers file to - enable all the permissions. Example Installation ArchitecturesExample Installation ArchitecturesOpenStack Compute uses a shared-nothing, messaging-based architecture. While very + enable all the permissions. Network Time Protocol: You must install a time synchronization program such as NTP to keep your cloud + controller and compute nodes talking to the same time server to avoid problems scheduling VM launches on compute nodes.Example Installation ArchitecturesExample Installation ArchitecturesOpenStack Compute uses a shared-nothing, messaging-based architecture. While very flexible, the fact that you can install each nova- service on an independent server means there are many possible methods for installing OpenStack Compute. The only co-dependency between possible multi-node installations is that the Dashboard must be - installed nova-api server. Here are the types of installation architectures:Single node: Only one server + installed nova-api server. Here are the types of installation architectures:Single node: Only one server runs all nova- services and also drives all the virtual instances. Use this configuration only for trying out OpenStack Compute, or for development - purposes.Two nodes: A cloud controller node runs the nova- services except for nova-compute, and a + purposes.Two nodes: A cloud controller node runs the nova- services except for nova-compute, and a compute node runs nova-compute. A client computer is likely needed to bundle images and interfacing to the servers, but a client is not required. Use this - configuration for proof of concepts or development environments. Multiple nodes: You can add more compute nodes to the + configuration for proof of concepts or development environments. Multiple nodes: You can add more compute nodes to the two node installation by simply installing nova-compute on an additional server and copying a nova.conf file to the added node. This would result in a multiple node installation. You can also add a volume controller and a network controller as additional nodes in a more complex multiple node installation. A minimum of 4 nodes is best for running multiple virtual instances that require a lot of processing power.This is an illustration of one possible multiple server installation of OpenStack - Compute; virtual server networking in the cluster may vary.An alternative architecture would be to add more messaging servers if you notice a lot + Compute; virtual server networking in the cluster may vary.An alternative architecture would be to add more messaging servers if you notice a lot of back up in the messaging queue causing performance problems. In that case you would add an additional RabbitMQ server in addition to or instead of scaling up the database server. Your installation can run any nova- service on any server as long as the nova.conf is configured to point to the RabbitMQ server and the server can send messages to the server.Multiple installation architectures are possible, here is another example - illustration. Service ArchitectureService ArchitectureBecause Compute has multiple services and many configurations are possible, here is a diagram showing the overall service architecture and communication systems between the services.Installing OpenStack Compute on Ubuntu Installing OpenStack Compute on Ubuntu How you go about installing OpenStack Compute depends on your goals for the + illustration. Service ArchitectureService ArchitectureBecause Compute has multiple services and many configurations are possible, here is a diagram showing the overall service architecture and communication systems between the services.Installing OpenStack Compute on Ubuntu Installing OpenStack Compute on Ubuntu How you go about installing OpenStack Compute depends on your goals for the installation. You can use an ISO image, you can use a scripted installation, and you can - manually install with a step-by-step installation.ISO Distribution InstallationISO Distribution InstallationYou can download and use an ISO image that is based on a Ubuntu Linux Server 10.04 + manually install with a step-by-step installation.ISO Distribution InstallationISO Distribution InstallationYou can download and use an ISO image that is based on a Ubuntu Linux Server 10.04 LTS distribution containing only the components needed to run OpenStack Compute. See http://sourceforge.net/projects/stackops/files/ for download files and information, license information, and a README file. For documentation on the StackOps distro, see http://docs.stackops.org. For free support, go to - http://getsatisfaction.com/stackops.Scripted InstallationScripted InstallationYou can download a script from GitHub at https://github.com/elasticdog/OpenStack-NOVA-Installer-Script/raw/master/nova-install.Copy the file to the servers where you want to install OpenStack Compute services - - with multiple servers, you could install a cloud controller node and multiple - compute nodes. The compute nodes manage the virtual machines through the - nova-compute service. The cloud controller node contains all other nova- - services.Ensure you can execute the script by modifying the permissions on the script - file.wget --no-check-certificate https://github.com/elasticdog/OpenStack-NOVA-Installer-Script/raw/master/nova-install -sudo chmod 755 nova-installYou - must run the script with root permissions. sudo bash nova-install -t cloudThe way this script is designed, you can have multiple servers for the cloud - controller, the messaging service, and the database server, or run it all on one - server. The -t or -type parameter has two options: nova-install -t - cloud installs the cloud controller and nova-install -t - compute installs a compute node for an existing cloud controller.These are the parameters you enter using the script: - Enter the Cloud Controller Host IP address.Enter the S3 IP, or use the default address as the current server's IP - address.Enter the RabbitMQ Host IP. Again, you can use the default to install - it to the local server. RabbitMQ will be installed. Enter the MySQL host IP address.Enter the MySQL root password and verify it.Enter a network range for all projects in CIDR format. - The script uses all these values entered for the configuration information to - create the nova.conf configuration file. The script also walks you through creating - a user and project. Enter a user name and project name when prompted. After the script is finished, you also need to create the project zip file. Credentials are generated after you create the project zip file with nova-manage project zipfile projname usernameAfter configuring OpenStack Compute and creating a project zip file using the nova-manage project create command, be sure to unizp the project zip file and then source the novarc - credential file that you extracted. source /root/creds/novarc Now all the necessary nova services are started up and you can begin to issue - nova-manage commands. If you configured it to all run from one server, you're done. - If you have a second server that you intend to use as a compute node (a node that - does not contain the database), install the nova services on the second node using - the -t compute parameters using the same nova-install script.To run from two or more servers, copy the nova.conf from the cloud controller node to the compute node. Manual InstallationManual InstallationThe manual installation involves installing from packages on Ubuntu 10.04 or 10.10 + http://getsatisfaction.com/stackops.Scripted InstallationScripted InstallationYou can download a script for a standalone install for proof-of-concept, learning, or for development purposes for Ubuntu 11.04 at https://devstack.org.1.Install Ubuntu 11.04 (Natty):In order to correctly install all the dependencies, we assume a specific version of ubuntu to make it as easy as possible. OpenStack works on other flavors of linux (and some folks even run it on windows!) We recommend using a minimal install of ubuntu server in a VM if this is your first time.2.Download DevStack:git clone git://github.com/cloudbuilders/devstack.gitThe devstack repo contains a script that installs openstack and templates for configuration files + 3.Start the installcd devstack; ./stack.shIt takes a few minutes, we recommend reading the script while it is building.Manual InstallationManual InstallationThe manual installation involves installing from packages on Ubuntu 10.10 or 11.04 as a user with root permission. Depending on your environment, you may need to prefix these commands with sudo.This installation process walks through installing a cloud controller node and a compute node. The cloud controller node contains all the nova- services including @@ -366,16 +349,20 @@ sudo chmod 755 nova-installInstalling the Cloud ControllerFirst, set up pre-requisites to use the Nova PPA (Personal Packages Archive) - provided through https://launchpad.net/~nova-core/+archive/trunk. The - ‘python-software-properties’ package is a pre-requisite for setting up the nova - package repository. You can also use the release package by adding the - ppa:nova-core/release repository.sudo apt-get install python-software-propertiessudo add-apt-repository ppa:nova-core/trunkRun update.sudo apt-get updateInstall the messaging queue server, RabbitMQ.sudo apt-get install -y rabbitmq-serverNow, install the Python dependencies. sudo apt-get install -y python-greenlet python-mysqldb Install the required nova- packages, and dependencies should be automatically - installed.sudo apt-get install -y nova-common nova-doc python-nova nova-api - nova-network nova-objectstore nova-scheduler nova-computeInstall the supplemental tools such as euca2ools and unzip.sudo apt-get install -y euca2ools unzipSetting up the SQL Database (MySQL) on the Cloud ControllerYou must use a SQLAlchemy-compatible database, such as MySQL or + images).Installing the Cloud ControllerFirst, set up pre-requisites to use the Nova PPA (Personal Packages Archive) + provided through https://launchpad.net/. The ‘python-software-properties’ + package is a pre-requisite for setting up the nova package repository. You can + also use the trunk package built daily by adding the ppa:nova-core/trunk + repository, but trunk changes rapidly and may not run any given day.sudo apt-get install python-software-propertiessudo add-apt-repository ppa:openstack-release/2011.3Run update.sudo apt-get updateInstall the messaging queue server, RabbitMQ.sudo apt-get install -y rabbitmq-serverNow, install the Python dependencies. sudo apt-get install -y python-greenlet python-mysqldb NoteYou can use either MySQL or PostgreSQL.Install the required nova- packages, and dependencies are automatically + installed. +sudo apt-get install nova-volume nova-vncproxy nova-api nova-ajax-console-proxy +sudo apt-get install nova-doc nova-scheduler nova-objectstore +sudo apt-get install nova-network nova-compute +sudo apt-get install glance +Install the supplemental tools such as euca2ools and unzip.sudo apt-get install -y euca2ools unzip Next set up the database, either MySQL or PostgreSQL.Setting up the SQL Database (MySQL) on the Cloud ControllerYou must use a SQLAlchemy-compatible database, such as MySQL or PostgreSQL. This example shows MySQL. First you can set environments with a "pre-seed" line to bypass all the installation prompts, running this as root: - bash + bash MYSQL_PASS=nova NOVA_PASS=notnova cat <<MYSQL_PRESEED | debconf-set-selections @@ -387,72 +374,61 @@ MYSQL_PRESEED mysql-server Edit /etc/mysql/my.cnf to change ‘bind-address’ from localhost (127.0.0.1) to any (0.0.0.0) and restart the mysql service: - sudo sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf -sudo service mysql restartTo configure the MySQL database, create the nova database: sudo mysql -uroot -p$MYSQL_PASS -e 'CREATE DATABASE nova;'Update the DB to give user ‘nova’@’%’ full control of the nova + sudo sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf +sudo service mysql restartTo configure the MySQL database, create the nova database: sudo mysql -uroot -p$MYSQL_PASS -e 'CREATE DATABASE nova;'Update the DB to give user ‘nova’@’%’ full control of the nova database: - sudo mysql -uroot -p$MYSQL_PASS -e "GRANT ALL PRIVILEGES ON *.* TO + sudo mysql -uroot -p$MYSQL_PASS -e "GRANT ALL PRIVILEGES ON *.* TO 'nova'@'%' WITH GRANT OPTION;" Set MySQL password for 'nova'@'%': - sudo mysql -uroot -p$MYSQL_PASS -e "SET PASSWORD FOR 'nova'@'%' = + sudo mysql -uroot -p$MYSQL_PASS -e "SET PASSWORD FOR 'nova'@'%' = PASSWORD('$NOVA_PASS');" - Installing the Compute NodeThere are many different ways to perform a multinode install of Compute. In + Setting Up PostgreSQL as the DatabaseOpenStack can use PostgreSQL as an alternative database. This is a matter of substituting the MySQL steps with PostgreSQL equivalents, as outlined here.First, install PostgreSQL on the controller node.$ apt-fast install postgresql postgresql-server-dev-8.4 python-dev python-psycopg2Edit /etc/postgresql/8.4/main/postgresql.conf and change the listen_address to listen to all appropriate addesses, PostgreSQL listen only to localhost by default. For example:To listen on a specific IP address:# - Connection Settings - + listen_address = '10.1.1.200,192.168.100.2'To listen on all addresses:# - Connection Settings - + listen_address = '*'Add appropriate addresses and networks to /etc/postgresql/8.4/main/pg_hba.conf to allow remote access to PostgreSQL, this should include all servers hosting OpenStack (but not neccessarily those hosted by Openstack). As an example, append the following lines:host all all 192.168.0.0/16 + host all all 10.1.0.0/16 + Change the default PostgreSQL user's password:$ sudo -u postgres psql template1 + template1=#\password + Enter Password: + Enter again: + template1=#\qRestart PostgreSQL:$ service postgresql restartCreate nova databases:$ sudo -u postgres createdb nova + $ sudo -u postgres createdb glanceCreate nova database user which will be used for all OpenStack services, note the adduser and createuser steps will prompt for the user's password ($PG_PASS):$ adduser nova + $ sudo -u postgres createuser -PSDR nova + $ sudo -u postgres psql template1 + template1=#GRANT ALL PRIVILEGES ON DATABASE nova TO nova + template1=#GRANT ALL PRIVILEGES ON DATABASE glance TO nova + template1=#\qFor the Cactus version of Nova, the following fix is required for the PostgreSQL database schema. You don't need to do this for Diablo:$ sudo -u postgres psql template1 + template1=#alter table instances alter instance_type_id type integer using cast(instance_type_id as integer); + template1=#\qFor Nova components that require access to this database the required configuration in /etc/nova/nova.conf should be (replace $PG_PASS with password):--sql_connection=postgresql://nova:$PG_PASS@control.example.com/novaAt this stage the databases are empty and contain no content. These will be initialised when you do the nova-manage db sync command. Installing the Compute NodeThere are many different ways to perform a multinode install of Compute. In this case, you can install all the nova- packages and dependencies as you did for the Cloud Controller node, or just install nova-network and nova-compute. Your installation can run any nova- services anywhere, so long as the service can access nova.conf so it knows where the rabbitmq server is installed.The Compute Node is where you configure the Compute network, the networking - between your instances. There are three options: flat, flatDHCP, and - VLAN.If you use FlatManager as your network manager, there are some additional - networking changes to ensure connectivity between your nodes and VMs. If you - chose VlanManager or FlatDHCP, you may skip this section because they are set up - for you automatically. Compute defaults to a bridge device named ‘br100’. This needs to be created - and somehow integrated into your network. To keep things as simple as possible, - have all the VM guests on the same network as the VM hosts (the compute nodes). - To do so, set the compute node’s external IP address to be on the bridge and add - eth0 to that bridge. To do this, edit your network interfaces configuration to - look like the following example: - -< begin /etc/network/interfaces > -# The loopback network interface -auto lo -iface lo inet loopback - -# Networking for OpenStack Compute -auto br100 - -iface br100 inet dhcp -bridge_ports eth0 -bridge_stp off -bridge_maxwait 0 -bridge_fd 0 -< end /etc/network/interfaces > - - Next, restart networking to apply the changes: sudo /etc/init.d/networking restartIf you use flat networking, you must manually insert the IP address into the - 'fixed_ips' table in the nova database. Also ensure that the database lists the - bridge name correctly that matches the network configuration you are working - within. Flat networking should insert this automatically but you may need to - check it.Because you may need to query the database from the Compute node and learn + between your instances. There are three options: flat, flatDHCP, and VLAN. Read + more about specific configurations in the Chapter 7, Networking. Because you may need to query the database from the Compute node and learn more information about instances, euca2ools and mysql-client packages should be - installed on any additional Compute nodes.Restart All Relevant Services on the Compute NodeOn both nodes, restart all six services in total, just to cover the entire + installed on any additional Compute nodes.Restart All Relevant Services on the Compute NodeOn both nodes, restart all six services in total, just to cover the entire spectrum: - restart libvirt-bin; restart nova-network; restart nova-compute; + restart libvirt-bin; restart nova-network; restart nova-compute; restart nova-api; restart nova-objectstore; restart nova-scheduler - Installing OpenStack Compute on Red Hat Enterprise Linux 6 Installing OpenStack Compute on Red Hat Enterprise Linux 6 This section documents a multi-node installation using RHEL 6. RPM repos for the Bexar - release, the Cactus release, and also per-commit trunk builds for OpenStack Nova are - available at http://yum.griddynamics.net. Known limitations for RHEL version 6 installations: iSCSI LUN not supported due to tgtadm vs ietadm differencesOnly KVM hypervisor has been tested with this installationTo install Nova on RHEL v.6 you need access to two repositories, one available on the - yum.griddynamics.net website and the RHEL DVD image connected as repo. First, install RHEL 6.0, preferrably with a minimal set of packages.Disable SELinux in /etc/sysconfig/selinux and then reboot. Connect the RHEL 3. 6.0 x86_64 DVD as a repository in YUM. sudo mount /dev/cdrom /mnt/cdrom + All nova services are now installed, the rest of your steps involve specific configuration steps. Please refer to the section called “Post-Installation Configuration for OpenStack Compute” for additional information. Installing OpenStack Compute on Red Hat Enterprise Linux 6 Installing OpenStack Compute on Red Hat Enterprise Linux 6 This section documents a multi-node installation using RHEL 6. RPM repos for the Bexar + release, the Cactus release, milestone releases of Diablo, and also per-commit trunk + builds for OpenStack Nova are available at http://yum.griddynamics.net. The + final release of Diablo is not yet tested and released (as of Oct 4, 2011) but check + this page for updates: http://wiki.openstack.org/NovaInstall/RHEL6Notes.Known limitations for RHEL version 6 installations: iSCSI LUN not supported due to tgtadm versus ietadm differencesOnly KVM hypervisor has been tested with this installationTo install Nova on RHEL v.6 you need access to two repositories, one available on the + yum.griddynamics.net website and the RHEL DVD image connected as repo. First, install RHEL 6.0, preferrably with a minimal set of packages.Disable SELinux in /etc/sysconfig/selinux and then reboot. Connect the RHEL 3. 6.0 x86_64 DVD as a repository in YUM. sudo mount /dev/cdrom /mnt/cdrom cat /etc/yum.repos.d/rhel.repo [rhel] name=RHEL 6.0 baseurl=file:///mnt/cdrom/Server enabled=1 -gpgcheck=0Download and install repo config and key.wget http://yum.griddynamics.net/openstack-repo-2011.1-2.noarch.rpm -sudo rpm -i openstack-repo-2011.1-2.noarch.rpmInstall the libvirt package (these instructions are tested only on KVM). sudo yum install libvirt +gpgcheck=0Download and install repo config and key.wget http://yum.griddynamics.net/yum/diablo-3/openstack/openstack-repo-2011.3-0.3.noarch.rpm +sudo rpm -i openstack-repo-2011.1-3.noarch.rpmInstall the libvirt package (these instructions are tested only on KVM). sudo yum install libvirt sudo chkconfig libvirtd on -sudo service libvirtd startRepeat the basic installation steps to put the pre-requisites on all cloud controller and compute nodes. Nova has many different possible configurations. You can install Nova services on separate servers as needed but these are the basic pre-reqs.These are the basic packages to install for a cloud controller node:sudo yum install euca2ools openstack-nova-{api,compute,network,objectstore,scheduler,volume} openstack-nova-cc-config openstack-glanceThese are the basic packages to install compute nodes. Repeat for each compute node (the node that runs the VMs) that you want to install.sudo yum install openstack-nova-compute openstack-nova-compute-configOn the cloud controller node, create a MySQL database named nova. sudo service mysqld start +sudo service libvirtd startRepeat the basic installation steps to put the pre-requisites on all cloud controller and compute nodes. Nova has many different possible configurations. You can install Nova services on separate servers as needed but these are the basic pre-reqs.These are the basic packages to install for a cloud controller node:sudo yum install euca2ools openstack-nova-node-fullThese are the basic packages to install compute nodes. Repeat for each compute node (the node that runs the VMs) that you want to install.sudo yum install openstack-nova-compute On the cloud controller node, create a MySQL database named nova. sudo service mysqld start sudo chkconfig mysqld on sudo service rabbitmq-server start sudo chkconfig rabbitmq-server on -mysqladmin -uroot password novaYou can use this script to create the database. #!/bin/bash +mysqladmin -uroot password novaYou can use this script to create the database. #!/bin/bash DB_NAME=nova DB_USER=nova @@ -469,25 +445,37 @@ for h in $HOSTS localhost; do echo "GRANT ALL PRIVILEGES ON $DB_NAME.* TO '$DB_USER'@'$h' IDENTIFIED BY '$DB_PASS';" | mysql -uroot -p$DB_PASS mysql done echo "GRANT ALL PRIVILEGES ON $DB_NAME.* TO $DB_USER IDENTIFIED BY '$DB_PASS';" | mysql -uroot -p$DB_PASS mysql -echo "GRANT ALL PRIVILEGES ON $DB_NAME.* TO root IDENTIFIED BY '$DB_PASS';" | mysql -uroot -p$DB_PASS mysql Now, ensure the database version matches the version of nova that you are installing:nova-manage db syncOn each node, set up the configuration file in /etc/nova/nova.conf.Start the Nova services after configuring and you then are running an OpenStack - cloud!for n in api compute network objectstore scheduler volume; do sudo service openstack-nova-$n start; done -sudo service openstack-glance start -for n in node1 node2 node3; do ssh $n sudo service openstack-nova-compute start; donePost-Installation Configuration for OpenStack ComputePost-Installation Configuration for OpenStack ComputeConfiguring your Compute installation involves nova-manage commands plus editing the +echo "GRANT ALL PRIVILEGES ON $DB_NAME.* TO root IDENTIFIED BY '$DB_PASS';" | mysql -uroot -p$DB_PASS mysql Now, ensure the database version matches the version of nova that you are installing:nova-manage db syncFor iptables configuration, update your firewall configuration to allow incoming + requests on ports 5672 (RabbitMQ), 3306 (MySQL DB), 9292 (Glance), 6080 (noVNC web + console), API (8773, 8774) and DHCP traffic from instances. For non-production + environments the easiest way to fix any firewall problems is removing final REJECT in + INPUT chain of filter table. $ sudo iptables -I INPUT 1 -p tcp --dport 5672 -j ACCEPT + $ sudo iptables -I INPUT 1 -p tcp --dport 3306 -j ACCEPT + $ sudo iptables -I INPUT 1 -p tcp --dport 9292 -j ACCEPT + $ sudo iptables -I INPUT 1 -p tcp --dport 6080 -j ACCEPT + $ sudo iptables -I INPUT 1 -p tcp --dport 8773 -j ACCEPT + $ sudo iptables -I INPUT 1 -p tcp --dport 8774 -j ACCEPT + $ sudo iptables -I INPUT 1 -p udp --dport 67 -j ACCEPTOn every node when you have nova-compute running ensure that unencrypted VNC access is allowed only from Cloud Controller node:$ sudo iptables -I INPUT 1 -p tcp -s <CLOUD_CONTROLLER_IP_ADDRESS> --dport 5900:6400 -j ACCEPT + On each node, set up the configuration file in /etc/nova/nova.conf.Start the Nova services after configuring and you then are running an OpenStack + cloud!$ for n in api compute network objectstore scheduler vncproxy; do sudo service openstack-nova-$n start; done +$ sudo service openstack-glance-api start +$ sudo service openstack-glance-registry start +$ for n in node1 node2 node3; do ssh $n sudo service openstack-nova-compute start; donePost-Installation Configuration for OpenStack ComputePost-Installation Configuration for OpenStack ComputeConfiguring your Compute installation involves nova-manage commands plus editing the nova.conf file to ensure the correct flags are set. This section contains the basics for a simple multi-node installation, but Compute can be configured many ways. You can find networking options and hypervisor options described in separate chapters, and you will - read about additional configuration information in a separate chapter as well.Setting Flags in the nova.conf FileSetting Flags in the nova.conf FileThe configuration file nova.conf is installed in /etc/nova by default. You only + read about additional configuration information in a separate chapter as well.Setting Flags in the nova.conf FileSetting Flags in the nova.conf FileThe configuration file nova.conf is installed in /etc/nova by default. You only need to do these steps when installing manually, the scripted installation above does this configuration during the installation. A default set of options are already configured in nova.conf when you install manually. The defaults are as - follows:--daemonize=1 + follows:--daemonize=1 --dhcpbridge_flagfile=/etc/nova/nova.conf --dhcpbridge=/usr/bin/nova-dhcpbridge --logdir=/var/log/nova --state_path=/var/lib/nova Starting with the default file, you must define the following required items in /etc/nova/nova.conf. The flag variables are described below. You can place comments in the nova.conf file by entering a new line with a # sign at the beginning of the line. To see a listing of all possible flag settings, see - the output of running /bin/nova-api --help.Table 3.2. Description of nova.conf flags (not comprehensive) + the output of running /bin/nova-api --help.Table 3.2. Description of nova.conf flags (not comprehensive) Flag Description @@ -515,7 +503,7 @@ for n in node1 node2 node3; do ssh $n sudo service openstack-nova-compute start; Configures how your controller will communicate with additional OpenStack Compute nodes and virtual machines. Options: - nova.network.manager.FlatManagerSimple, non-VLAN networkingnova.network.manager.FlatDHCPManagerFlat networking with DHCPnova.network.manager.VlanManagerVLAN networking with DHCP; This is the Default if no + nova.network.manager.FlatManagerSimple, non-VLAN networkingnova.network.manager.FlatDHCPManagerFlat networking with DHCPnova.network.manager.VlanManagerVLAN networking with DHCP; This is the Default if no network manager is defined here in nova.conf. @@ -527,7 +515,7 @@ for n in node1 node2 node3; do ssh $n sudo service openstack-nova-compute start; Number value; Number of addresses in each private subnet. Here is a simple example nova.conf file for a small private cloud, with all the cloud controller services, database server, and messaging server on the same - server.--dhcpbridge_flagfile=/etc/nova/nova.conf + server.--dhcpbridge_flagfile=/etc/nova/nova.conf --dhcpbridge=/usr/bin/nova-dhcpbridge --logdir=/var/log/nova --state_path=/var/lib/nova @@ -539,17 +527,17 @@ for n in node1 node2 node3; do ssh $n sudo service openstack-nova-compute start; --fixed_range=192.168.0.0/16 --network_size=8 --routing_source_ip=184.106.239.134 ---sql_connection=mysql://nova:notnova@184.106.239.134/nova Create a “nova” group, so you can set permissions on the configuration file: sudo addgroup novaThe nova.config file should have its owner set to root:nova, and mode set to 0640, - since the file contains your MySQL server’s username and password. chown -R root:nova /etc/nova -chmod 640 /etc/nova/nova.confSetting Up OpenStack Compute Environment on the Compute NodeSetting Up OpenStack Compute Environment on the Compute NodeThese are the commands you run to ensure the database schema is current, and +--sql_connection=mysql://nova:notnova@184.106.239.134/nova Create a “nova” group, so you can set permissions on the configuration file: sudo addgroup novaThe nova.config file should have its owner set to root:nova, and mode set to 0640, + since the file contains your MySQL server’s username and password. chown -R root:nova /etc/nova +chmod 640 /etc/nova/nova.confSetting Up OpenStack Compute Environment on the Compute NodeSetting Up OpenStack Compute Environment on the Compute NodeThese are the commands you run to ensure the database schema is current, and then set up a user and project: -/usr/bin/nova-manage db sync +/usr/bin/nova-manage db sync /usr/bin/nova-manage user admin <user_name> /usr/bin/nova-manage project create <project_name> <user_name> -/usr/bin/nova-manage network create <project-network> <number-of-networks-in-project> <addresses-in-each-network>Here is an example of what this looks like with real values entered: /usr/bin/nova-manage db sync +/usr/bin/nova-manage network create <network-label> <project-network> <number-of-networks-in-project> <addresses-in-each-network>Here is an example of what this looks like with real values entered: /usr/bin/nova-manage db sync /usr/bin/nova-manage user admin dub /usr/bin/nova-manage project create dubproject dub -/usr/bin/nova-manage network create 192.168.0.0/24 1 256 For this example, the number of IPs is /24 since that falls inside the /16 +/usr/bin/nova-manage network create novanet 192.168.0.0/24 1 256 For this example, the number of IPs is /24 since that falls inside the /16 range that was set in ‘fixed-range’ in nova.conf. Currently, there can only be one network, and this set up would use the max IPs available in a /24. You can choose values that let you use any valid amount that you would like. The nova-manage service assumes that the first IP address is your network @@ -559,32 +547,36 @@ chmod 640 /etc/nova/nova.confCreating CertificationsCreating CertificationsGenerate the certifications as a zip file. These are the certs you will use to - launch instances, bundle images, and all the other assorted API functions. - mkdir –p /root/creds + automatically for Flat Manager.Creating CredentialsCreating CredentialsGenerate the credentials as a zip file. These are the certs you will use to + launch instances, bundle images, and all the other assorted API functions. + mkdir –p /root/creds /usr/bin/python /usr/bin/nova-manage project zipfile $NOVA_PROJECT $NOVA_PROJECT_USER /root/creds/novacreds.zip If you are using one of the Flat modes for networking, you may see a Warning message "No vpn data for project <project_name>" which you can safely - ignore.Unzip them in your home directory, and add them to your environment. unzip /root/creds/novacreds.zip -d /root/creds/ + ignore.Unzip them in your home directory, and add them to your environment. unzip /root/creds/novacreds.zip -d /root/creds/ cat /root/creds/novarc >> ~/.bashrc -source ~/.bashrc Enabling Access to VMs on the Compute NodeEnabling Access to VMs on the Compute NodeOne of the most commonly missed configuration areas is not allowing the proper +source ~/.bashrc + If you already have Nova credentials present in your environment, you can use a script included with Glance the Image Service, tools/nova_to_os_env.sh, to create Glance-style credentials. This script adds OS_AUTH credentials to the environment which are used by the Image Service to enable private images when the Identity Service is configured as the authentication system for Compute and the Image Service.Enabling Access to VMs on the Compute NodeEnabling Access to VMs on the Compute NodeOne of the most commonly missed configuration areas is not allowing the proper access to VMs. Use the ‘euca-authorize’ command to enable access. Below, you - will find the commands to allow ‘ping’ and ‘ssh’ to your VMs: euca-authorize -P icmp -t -1:-1 default + will find the commands to allow ‘ping’ and ‘ssh’ to your VMs: euca-authorize -P icmp -t -1:-1 default euca-authorize -P tcp -p 22 defaultAnother common issue is you cannot ping or SSH your instances after issuing the ‘euca-authorize’ commands. Something to look at is the amount of ‘dnsmasq’ processes that are running. If you have a running instance, check to see that - TWO ‘dnsmasq’ processes are running. If not, perform the following:killall dnsmasq -service nova-network restartConfiguring Multiple Compute NodesConfiguring Multiple Compute NodesIf your goal is to split your VM load across more than one server, you can connect an + TWO ‘dnsmasq’ processes are running. If not, perform the following:killall dnsmasq +service nova-network restartConfiguring Multiple Compute NodesConfiguring Multiple Compute NodesIf your goal is to split your VM load across more than one server, you can connect an additional nova-compute node to a cloud controller node. This configuring can be reproduced on multiple compute servers to start building a true multi-node OpenStack Compute cluster. To build out and scale the Compute platform, you spread out services amongst many servers. While there are additional ways to accomplish the build-out, this section describes adding compute nodes, and the service we are scaling out is called - 'nova-compute.'With the Bexar release we have two configuration files: nova-api.conf and nova.conf. For a multi-node install you only make changes to nova.conf and copy it to additional compute nodes. Ensure each nova.conf file points to the correct IP addresses for the respective services. Customize the nova.config example below to match your environment. The CC_ADDR is the Cloud Controller IP Address. - + 'nova-compute.'For a multi-node install you only make changes to nova.conf and copy it to + additional compute nodes. Ensure each nova.conf file points to the correct IP + addresses for the respective services. Customize the nova.conf example below to + match your environment. The CC_ADDR is the Cloud Controller IP Address. --dhcpbridge_flagfile=/etc/nova/nova.conf --dhcpbridge=/usr/bin/nova-dhcpbridge + --flat_network_bridge=br100 --logdir=/var/log/nova --state_path=/var/lib/nova --verbose @@ -595,7 +587,9 @@ service nova-network restartBy default, Nova sets 'br100' as the bridge device, and this is what needs to be done next. Edit /etc/network/interfaces with the following template, updated with your IP information. + --network_size=number of addressesBy default, Nova sets the bridge device based on the setting in --flat_network_bridge. Now you + can edit /etc/network/interfaces with the following template, updated with your IP + information. # The loopback network interface auto lo iface lo inet loopback @@ -613,8 +607,9 @@ service nova-network restartRestart networking:/etc/init.d/networking restartWith nova.conf updated and networking set, configuration is nearly complete. First, lets bounce the relevant services to take the latest updates:restart libvirt-bin; service nova-compute restartTo avoid issues with KVM and permissions with Nova, run the following commands to ensure we have VM's that are running optimally:chgrp kvm /dev/kvm -chmod g+rwx /dev/kvmIf you want to use the 10.04 Ubuntu Enterprise Cloud images that are readily available at http://uec-images.ubuntu.com/releases/10.04/release/, you may run into delays with booting. Any server that does not have nova-api running on it needs this iptables entry so that UEC images can get metadata info. On compute nodes, configure the iptables with this next step: # iptables -t nat -A PREROUTING -d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT --to-destination $NOVA_API_IP:8773Lastly, confirm that your compute node is talking to your cloud controller. From the cloud controller, run this database query:mysql -u$MYSQL_USER -p$MYSQL_PASS nova -e 'select * from services;'In return, you should see something similar to this: +---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+ + dns-nameservers xxx.xxx.xxx.xxxRestart networking:/etc/init.d/networking restartWith nova.conf updated and networking set, configuration is nearly complete. + First, bounce the relevant services to take the latest updates:restart libvirt-bin; service nova-compute restartTo avoid issues with KVM and permissions with Nova, run the following commands to ensure we have VM's that are running optimally:chgrp kvm /dev/kvm +chmod g+rwx /dev/kvmIf you want to use the 10.04 Ubuntu Enterprise Cloud images that are readily available at http://uec-images.ubuntu.com/releases/10.04/release/, you may run into delays with booting. Any server that does not have nova-api running on it needs this iptables entry so that UEC images can get metadata info. On compute nodes, configure the iptables with this next step: # iptables -t nat -A PREROUTING -d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT --to-destination $NOVA_API_IP:8773Lastly, confirm that your compute node is talking to your cloud controller. From the cloud controller, run this database query:mysql -u$MYSQL_USER -p$MYSQL_PASS nova -e 'select * from services;'In return, you should see something similar to this: +---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+ | created_at | updated_at | deleted_at | deleted | id | host | binary | topic | report_count | disabled | availability_zone | +---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+ | 2011-01-28 22:52:46 | 2011-02-03 06:55:48 | NULL | 0 | 1 | osdemo02 | nova-network | network | 46064 | 0 | nova | @@ -623,21 +618,21 @@ chmod g+rwx /dev/kvmYou can see that 'osdemo0{1,2,4,5} are all running 'nova-compute.' When you start spinning up instances, they will allocate on any node that is running nova-compute from this list.Determining the Version of ComputeDetermining the Version of ComputeIn the Diablo release, you can find the version of the installation by using the - nova-manage command:nova-manage version listMigrating from Bexar to CactusMigrating from Bexar to CactusIf you have an installation already installed and running, to migrate to - Cactus you must update the installation first, then your database, then perhaps - your images if you were already running images on Bexar in the nova-objectstore. - If you were running images through Glance, your images should work automatically - after an upgrade. Here are the overall steps. If your installation already pointed to ppa:nova-core/release, the release - package has been updated from Bexar to Cactus so you can simply run: apt-get update -apt-get upgradeNext, update the database schema. nova-manage db syncRestart all the nova- services. Make sure that you can launch images. You can convert images that were previously stored in the nova object store using this command: nova-manage image convert /var/lib/nova/images OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk 4. Configuring OpenStack ComputeThe OpenStack system has several key projects that are separate installations but can + +---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+You can see that 'osdemo0{1,2,4,5} are all running 'nova-compute.' When you start spinning up instances, they will allocate on any node that is running nova-compute from this list.Determining the Version of ComputeDetermining the Version of ComputeYou can find the version of the installation by using the nova-manage + command:nova-manage version listMigrating from Cactus to DiabloMigrating from Cactus to DiabloIf you have an installation already installed and running, to migrate to Diablo + you must update the installation first, then your database, then perhaps your images + if you were already running images in the nova-objectstore. You can also export your + users for importing into the OpenStack Identity Service (Keystone). Here are the overall steps for upgrading the Image Service.Download and install the Diablo Glance packages.Migrate the registry database schema by running: glance-manage db_sync Update configuration files, including the glance-api.conf and glance-registry.conf configuration files by using the examples in the examples/paste directory for the Diablo release.Here are the overall steps for upgrading Compute. If your installation already pointed to ppa:nova-core/release, the release + package has been updated from Cactus to Diablo so you can simply run: apt-get update +apt-get upgradeNext, update the database schema. nova-manage db syncRestart all the nova- services. A separate command is available to migrate users from the deprecated auth system to the Identity Service. nova-manage shell exportWithin the Keystone project there is a keystone-import script that you can run to + import these users.Make sure that you can launch images. You can convert images that were previously stored in the nova object store using this command: nova-manage image convert /var/lib/nova/images OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo 4. Configuring OpenStack ComputeThe OpenStack system has several key projects that are separate installations but can work together depending on your cloud needs: OpenStack Compute, OpenStack Object Storage, and OpenStack Image Store. You can install any of these projects separately and - then configure them either as standalone or connected entities.General Compute Configuration OverviewGeneral Compute Configuration OverviewMost configuration information is available in the nova.conf flag file. Here are + then configure them either as standalone or connected entities.General Compute Configuration OverviewGeneral Compute Configuration OverviewMost configuration information is available in the nova.conf flag file. Here are some general purpose flags that you can use to learn more about the flag file and the node. The configuration file nova.conf is typically stored in /etc/nova/nova.conf.You can use a particular flag file by using the --flagfile (nova.conf) parameter when - running one of the nova- services. This inserts flag definitions from the given configuration file name, which may be useful for debugging or performance tuning. Here are some general purpose flags. Table 4.1. Description of general purpose nova.conf flags + running one of the nova- services. This inserts flag definitions from the given configuration file name, which may be useful for debugging or performance tuning. Here are some general purpose flags. Table 4.1. Description of general purpose nova.conf flags Flag Default Description @@ -661,7 +656,7 @@ apt-get upgrade--[no]helpxml None Show this help, but with XML output instead of text - If you want to maintain the state of all the services, you can use the --state_path flag to indicate a top-level directory for storing data related to the state of Compute including images if you are using the Compute object store. Here are additional flags that apply to all nova- services.Table 4.2. Description of nova.conf flags for all services + If you want to maintain the state of all the services, you can use the --state_path flag to indicate a top-level directory for storing data related to the state of Compute including images if you are using the Compute object store. Here are additional flags that apply to all nova- services.Table 4.2. Description of nova.conf flags for all services Flag Default Description @@ -677,7 +672,9 @@ apt-get upgrade--report_interval default: '10' Integer value; Seconds between nodes reporting state to the data store. - Example nova.conf Configuration FilesExample nova.conf Configuration FilesThe following sections describe many of the flag settings that can go into the nova.conf files. These need to be copied to each compute node. Here are some sample nova.conf files that offer examples of specific configurationsConfiguration using KVM, FlatDHCP, MySQL, Glance, LDAP, and optionally sheepdog, API is EC2Configuration using KVM, FlatDHCP, MySQL, Glance, LDAP, and optionally sheepdog, API is EC2From wikimedia.org, used with permission. Where you see parameters passed in, it's likely an IP address you need. + Example nova.conf Configuration FilesExample nova.conf Configuration FilesThe following sections describe many of the flag settings that can go into the + nova.conf files. These need to be copied to each compute node. Here are some sample + nova.conf files that offer examples of specific configurations.Configuration using KVM, FlatDHCP, MySQL, Glance, LDAP, and optionally sheepdog, API is EC2Configuration using KVM, FlatDHCP, MySQL, Glance, LDAP, and optionally sheepdog, API is EC2From wikimedia.org, used with permission. Where you see parameters passed in, it's likely an IP address you need. # configured using KVM, FlatDHCP, MySQL, Glance, LDAP, and optionally sheepdog, API is EC2 --verbose --daemonize=1 @@ -693,6 +690,7 @@ apt-get upgradeKVM, Flat, MySQL, and Glance, OpenStack or EC2 APIKVM, Flat, MySQL, and Glance, OpenStack or EC2 APIThis example nova.conf file is from an internal Rackspace test system used for demonstrations. + KVM, Flat, MySQL, and Glance, OpenStack or EC2 APIKVM, Flat, MySQL, and Glance, OpenStack or EC2 APIThis example nova.conf file is from an internal Rackspace test system used for demonstrations. # configured using KVM, Flat, MySQL, and Glance, API is OpenStack (or EC2) --daemonize=1 --dhcpbridge_flagfile=/etc/nova/nova.conf --dhcpbridge=/usr/bin/nova-dhcpbridge +--flat_network_bridge=br100 --lock_path=/var/lock/nova --logdir=/var/log/nova --state_path=/var/lib/nova @@ -730,7 +729,7 @@ apt-get upgradeConfiguring LoggingConfiguring LoggingYou can use nova.conf flags to indicate where Compute will log events, the level of logging, and customize log formats.Table 4.3. Description of nova.conf flags for logging + Configuring LoggingConfiguring LoggingYou can use nova.conf flags to indicate where Compute will log events, the level of logging, and customize log formats.Table 4.3. Description of nova.conf flags for logging Flag Default Description @@ -755,7 +754,7 @@ apt-get upgrade--verbose default: 'false' Set to 1 or true to turn on; Shows debug output - optional but helpful during initial setup. - To customize log formats for OpenStack Compute, use these flag settings.Table 4.4. Description of nova.conf flags for customized log formats + To customize log formats for OpenStack Compute, use these flag settings.Table 4.4. Description of nova.conf flags for customized log formats Flag Default Description @@ -777,7 +776,133 @@ apt-get upgrade--logging_exception_prefix default: '(%(name)s): TRACE: ' String value; Prefix each line of exception output with this format. - Configuring Hypervisors Configuring Hypervisors OpenStack Compute requires a hypervisor and supports several hypervisors and virtualization standards. Configuring and running OpenStack Compute to use a particular hypervisor takes several installation and configuration steps. Configuring Compute to use IPv6 Addresses Configuring Compute to use IPv6 Addresses You can configure Compute to use both IPv4 and IPv6 addresses for communication by + Configuring Hypervisors Configuring Hypervisors OpenStack Compute requires a hypervisor and supports several hypervisors and virtualization standards. Configuring and running OpenStack Compute to use a particular hypervisor takes several installation and configuration steps. Configuring Authentication and Authorization Configuring Authentication and Authorization There are different methods of authentication for the OpenStack Compute project. The + default setting is to use the novarc file that contains credentials. To do so, set the + --use_deprecated-auth flag in your nova.conf, which is True by default. For no auth, + modify the paste.ini that is included in the etc/nova directory. With additional + configuration, you can use the OpenStack Identity Service, code-named Keystone. In + Compute, the settings for using Keystone are commented lines in etc/nova/api-paste.ini, + and Keystone also provides an example file in + keystone/examples/paste/nova-api-paste.ini. Restart the nova-api service for these + settings to be configured. Refer to the Identity Service Starter Guide for additional + information.OpenStack Compute uses an implementation of an authentication system structured with + an Active Directory or other federated LDAP user store that backends to an identity + manager or other SAML Policy Controller that then maps to groups. Credentials for API + calls are stored in the project zip file when using this auth system. Certificate + authority is also customized in nova.conf for the this built-in auth system. If you see errors such as "EC2ResponseError: 403 Forbidden" it is likely you are + trying to use euca commands without the auth system properly configured. Either install + and use the default auth setting, or change out the default paste.ini file to use no + auth, or configure the Identity Service.Table 4.5. Description of nova.conf flags for Authentication + Flag + Default + Description + + --auth_driver + default:'nova.auth.dbdriver.DbDriver' + + String value; Name of the driver for authentication + nova.auth.dbdriver.DbDriver - Default setting, uses + credentials stored in zip file, one per project.nova.auth.ldapdriver.FakeLdapDriver - create a replacement for + this driver supporting other backends by creating another class + that exposes the same public methods. + + + --use_deprecated_auth + default:'True' + + True or false; Sets the auth system to use the zip file provided with + the project files to store all credentials + + Table 4.6. Description of nova.conf flags for customizing roles in deprecated + auth + Flag + Default + Description + + --allowed_roles + default: 'cloudadmin,itsec,sysadmin,netadmin,developer') + Comma separated list; Allowed roles for project + + --global_roles + default: 'cloudadmin,itsec') + Comma separated list; Roles that apply to all projects + + --superuser_roles + default: 'cloudadmin') + Comma separated list; Roles that ignore authorization checking + completely + Table 4.7. Description of nova.conf flags for credentials in deprecated auth + Flag + Default + Description + + --credentials_template + default: '') + Directory; Template for creating users' RC file + + --credential_rc_file + default: '%src') + File name; File name of rc in credentials zip + + --credential_cert_file + default: 'cert.pem') + File name; File name of certificate in credentials zip + + --credential_key_file + default: 'pk.pem') + File name; File name of rc in credentials zip + + --vpn_client_template + default: 'nova/cloudpipe/client/ovpn.template') + Directory; Refers to where the template lives for creating users vpn + file + + --credential_vpn_file + default: 'nova-vpn.conf') + File name; Filename of certificate in credentials.zip + Table 4.8. Description of nova.conf flags for CA (Certificate Authority) + Flag + Default + Description + + --keys_path + default: '$state_path/keys') + Directory; Where Nova keeps the keys + + --ca_file + default: 'cacert.pem') + File name; File name of root CA + + --crl_file + default: 'crl.pem') + File name; File name of Certificate Revocation List + + --key_file + default: 'private/cakey.pem') + File name; File name of private key + + --use_project_ca + default: 'false') + True or false; Indicates whether to use a CA for each project; false means + CA is not used for each project + + --project_cert_subject + default: '/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=proje + ct-ca-%s-%s') + String; Subject for certificate for projects, %s for project, timestamp + + + --user_cert_subject + default: + '/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=%s-%s-%s') + String; Subject for certificate for users, %s for project, users, timestamp + + + --vpn_cert_subject + default: + '/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=project-vpn-%s-%s') + String; Subject for certificate for vpns, %s for project, timestamp + Configuring Compute to use IPv6 Addresses Configuring Compute to use IPv6 Addresses You can configure Compute to use both IPv4 and IPv6 addresses for communication by putting it into a IPv4/IPv6 dual stack mode. In IPv4/IPv6 dual stack mode, instances can acquire their IPv6 global unicast address by stateless address autoconfiguration mechanism [RFC 4862/2462]. IPv4/IPv6 dual stack mode works with VlanManager and @@ -790,23 +915,23 @@ apt-get upgradeOn all nova-nodes, install python-netaddr: - sudo apt-get install -y python-netaddr - On all nova-network nodes install radvd and configure IPv6 networking: sudo apt-get install -y radvd + sudo apt-get install -y python-netaddr + On all nova-network nodes install radvd and configure IPv6 networking: sudo apt-get install -y radvd sudo bash -c "echo 1 > /proc/sys/net/ipv6/conf/all/forwarding" sudo bash -c "echo 0 > /proc/sys/net/ipv6/conf/all/accept_ra"Edit the nova.conf file on all nodes to set the --use_ipv6 flag to True. Restart all nova- services. When using the command 'nova-manage network create' you can add a fixed range for IPv6 addresses. You must specify public or private after the create parameter. - nova-manage network create public fixed_range num_networks network_size [vlan_start] [vpn_start] [fixed_range_v6] + nova-manage network create public fixed_range num_networks network_size [vlan_start] [vpn_start] [fixed_range_v6] You can set IPv6 global routing prefix by using the fixed_range_v6 parameter. The default is: fd00::/48. When you use FlatDHCPManager, the command uses the original value of fixed_range_v6. When you use VlanManager, the command creates prefixes of subnet by incrementing subnet id. Guest VMs uses this prefix for generating their IPv6 global unicast address. Here is a usage example for VlanManager: - nova-manage network create public 10.0.1.0/24 3 32 100 1000 fd00:1::/48 + nova-manage network create public 10.0.1.0/24 3 32 100 1000 fd00:1::/48 Here is a usage example for FlatDHCPManager: - nova-manage network create public 10.0.2.0/24 3 32 0 0 fd00:1::/48 + nova-manage network create public 10.0.2.0/24 3 32 0 0 fd00:1::/48 Note that [vlan_start] and [vpn_start] parameters are not used by - FlatDHCPManager.Table 4.5. Description of nova.conf flags for configuring IPv6 + FlatDHCPManager.Table 4.9. Description of nova.conf flags for configuring IPv6 Flag Default Description @@ -817,10 +942,10 @@ sudo bash -c "echo 0 > /proc/sys/net/ipv6/conf/all/accept_ra"--flat_injected default: 'false' Cactus only:Indicates whether Compute (Nova) should use attempt to inject IPv6 network configuration information into the guest. It attempts to modify /etc/network/interfaces and currently only works on Debian-based systems. - Configuring Image Service and Storage for ComputeConfiguring Image Service and Storage for ComputeYou can either use a local image storage system or install Glance for storing and retrieving images. After you have installed a Glance server, you can configure nova-compute to + Configuring Image Service and Storage for ComputeConfiguring Image Service and Storage for ComputeYou can either use a local image storage system or install Glance for storing and retrieving images. After you have installed a Glance server, you can configure nova-compute to use Glance for image storage and retrieval. You must change the --image_service flag to 'nova.image.glance.GlanceImageService' in order to use Glance to store and retrieve - images for OpenStack Compute.Table 4.6. Description of nova.conf flags for the Glance image service and + images for OpenStack Compute.Table 4.10. Description of nova.conf flags for the Glance image service and storage Flag Default @@ -829,7 +954,7 @@ sudo bash -c "echo 0 > /proc/sys/net/ipv6/conf/all/accept_ra"--image_service default: 'nova.image.local.LocalImageService' The service to use for retrieving and searching for images. Images must be registered using - euca2ools. Options: nova.image.s3.S3ImageServiceS3 backend for the Image Service.nova.image.local.LocalImageServiceImage service storing images to local disk. It assumes that image_ids are integers. This is the default setting if no image manager is defined here.nova.image.glance.GlanceImageServiceGlance back end for storing and retrieving images; See http://glance.openstack.org for more info. + euca2ools. Options: nova.image.s3.S3ImageServiceS3 backend for the Image Service.nova.image.local.LocalImageServiceImage service storing images to local disk. It assumes that image_ids are integers. This is the default setting if no image manager is defined here.nova.image.glance.GlanceImageServiceGlance back end for storing and retrieving images; See http://glance.openstack.org for more info. --glance_api_servers default: '$my_ip:9292' @@ -856,7 +981,7 @@ sudo bash -c "echo 0 > /proc/sys/net/ipv6/conf/all/accept_ra"Set to 1 or true to turn on; Determines whether to get images from s3 or use a local copy If you choose not to use Glance for the image service, you can use the object store that maintains images in a particular location, namely the state path on the server local to the nova.conf file. You can also use a set of S3 buckets to store - images.Table 4.7. Description of nova.conf flags for local image storage + images.Table 4.11. Description of nova.conf flags for local image storage Flag Default Description @@ -864,13 +989,14 @@ sudo bash -c "echo 0 > /proc/sys/net/ipv6/conf/all/accept_ra"--image_service default: 'nova.image.local.LocalImageService' The service to use for retrieving and searching for images. Images must be registered using - euca2ools. Options: nova.image.s3.S3ImageServiceS3 backend for the Image Service; In Cactus, the S3 + euca2ools. Options: nova.image.s3.S3ImageServiceS3 backend for the Image Service; In Cactus, the S3 image service wraps the other image services for use by the EC2 API. The EC2 api will always use the S3 image service by default - so setting the flag is not necessary. + so setting the flag is not necessary. nova.image.local.LocalImageService Image service storing images to local disk. It assumes that - image_ids are integers.nova.image.glance.GlanceImageServiceGlance back end for storing and retrieving images; See http://glance.openstack.org for more info. + image_ids are integers.nova.image.glance.GlanceImageServiceOpenStack Image Service (Glance) back end for storing and + retrieving images; See http://glance.openstack.org for more info. --state_path '/Users/username/p/nova/nova/../' @@ -883,46 +1009,46 @@ sudo bash -c "echo 0 > /proc/sys/net/ipv6/conf/all/accept_ra"--images_path '$state_path/images' Directory path; Directory that stores images when using object store. - Configuring Live Migrations Configuring Live Migrations The live migration feature is useful when you need to upgrade or installing patches to hypervisors/BIOS and you need the machines to keep running. For example, when one of HDD volumes RAID or one of bonded NICs is out of order. Also for regular periodic maintenance, you may need to migrate VM instances. When many VM instances are running on a specific physical machine, you can redistribute the high load. Sometimes when VM instances are scattered, you can move VM instances to a physical machine to arrange them more logically. + Configuring Live Migrations Configuring Live Migrations The live migration feature is useful when you need to upgrade or installing patches to hypervisors/BIOS and you need the machines to keep running. For example, when one of HDD volumes RAID or one of bonded NICs is out of order. Also for regular periodic maintenance, you may need to migrate VM instances. When many VM instances are running on a specific physical machine, you can redistribute the high load. Sometimes when VM instances are scattered, you can move VM instances to a physical machine to arrange them more logically. Environments - OS: Ubuntu 10.04/10.10 - for both instances and host.Shared storage: + OS: Ubuntu 10.04/10.10 + for both instances and host.Shared storage: NOVA-INST-DIR/instances/ has to be mounted by shared storage - (tested using NFS).Instances: Instance can - be migrated with ISCSI/AoE based volumesHypervisor: - KVM with libvirt(NOTE1) + (tested using NFS).Instances: Instance can + be migrated with ISCSI/AoE based volumesHypervisor: + KVM with libvirt(NOTE1) "NOVA-INST-DIR/instance" is expected that vm image is put on to. see "flags.instances_path" in nova.compute.manager for the default - value(NOTE2) This feature is + value(NOTE2) This feature is admin only, since nova-manage is necessary. Sample Nova Installation before starting - Prepare 3 servers at least, lets say, HostA, HostB - and HostCnova-api/nova-network/nova-volume/nova-objectstore/ - nova-scheduler(and other daemon) are running on HostA.nova-compute is running on both HostB and HostC.HostA export NOVA-INST-DIR/instances, HostB and HostC - mount it.To avoid any confusion, NOVA-INST-DIR is same at - HostA/HostB/HostC("NOVA-INST-DIR" shows top of install dir). HostA export NOVA-INST-DIR/instances, HostB and HostC mount it. + Prepare 3 servers at least, lets say, HostA, HostB + and HostCnova-api/nova-network/nova-volume/nova-objectstore/ + nova-scheduler(and other daemon) are running on HostA.nova-compute is running on both HostB and HostC.HostA export NOVA-INST-DIR/instances, HostB and HostC + mount it.To avoid any confusion, NOVA-INST-DIR is same at + HostA/HostB/HostC("NOVA-INST-DIR" shows top of install dir). HostA export NOVA-INST-DIR/instances, HostB and HostC mount it. Pre-requisite configurations - 1.Configure /etc/hosts, Make sure 3 Hosts can do name-resolution - with each other. Ping with each other is better way to test. + 1.Configure /etc/hosts, Make sure 3 Hosts can do name-resolution + with each other. Ping with each other is better way to test. # ping HostA # ping HostB # ping HostC - 2.Configure NFS at HostA by adding below to /etc/exportsNOVA-INST-DIR/instances HostA/255.255.0.0(rw,sync,fsid=0,no_root_squash Change "255.255.0.0" appropriate netmask, which should include - HostB/HostC. Then restart nfs server. + 2.Configure NFS at HostA by adding below to /etc/exportsNOVA-INST-DIR/instances HostA/255.255.0.0(rw,sync,fsid=0,no_root_squash Change "255.255.0.0" appropriate netmask, which should include + HostB/HostC. Then restart nfs server. # /​etc/​init.​d/​nfs-kernel-server restart # /​etc/​init.​d/​idmapd restart - 3.Configure NFS at HostB and HostC by adding below to - /etc/fstabHostA:/ DIR nfs4 defaults 0 0Then mount, check exported directory can be mounted.# mount -a -vIf fail, try this at any hosts.# iptables -FAlso, check file/daemon permissions. We expect any nova daemons - are running as root. + 3.Configure NFS at HostB and HostC by adding below to + /etc/fstabHostA:/ DIR nfs4 defaults 0 0Then mount, check exported directory can be mounted.# mount -a -vIf fail, try this at any hosts.# iptables -FAlso, check file/daemon permissions. We expect any nova daemons + are running as root. # ps -ef | grep nova root 5948 5904 9 11:29 pts/​4 00:00:00 python /​opt/​nova-2010.​4/​/​bin/​nova-api root 5952 5908 6 11:29 pts/​5 00:00:00 python /​opt/​nova-2010.​4/​/​bin/​nova-objectstore .​.​.​ (snip) - "NOVA-INST-DIR/instances/" directory can be seen at HostA + "NOVA-INST-DIR/instances/" directory can be seen at HostA # ls -ld NOVA-INST-DIR/​instances/​ drwxr-xr-x 2 root root 4096 2010-12-07 14:34 nova-install-dir/​instances/​ - Same check at HostB and HostC + Same check at HostB and HostC # ls -ld NOVA-INST-DIR/​instances/​ drwxr-xr-x 2 root root 4096 2010-12-07 14:34 nova-install-dir/​instances/​ @@ -935,7 +1061,7 @@ none 16502856 368 16502488 1% /​var/​run none 16502856 0 16502856 0% /​var/​lock none 16502856 0 16502856 0% /​lib/​init/​rw HostA: 921515008 101921792 772783104 12% /​opt ( <--- this line is important.​) - 4.Libvirt configurations. Modify /etc/libvirt/libvirt.conf: + 4.Libvirt configurations. Modify /etc/libvirt/libvirt.conf: before : #listen_tls =​ 0 after : listen_tls =​ 0 @@ -943,19 +1069,19 @@ before : #listen_tcp =​ 1 after : listen_tcp =​ 1 add: auth_tcp =​ "none" - Modify /etc/init/libvirt-bin.conf + Modify /etc/init/libvirt-bin.conf before : exec /​usr/​sbin/​libvirtd -d after : exec /​usr/​sbin/​libvirtd -d -l - Modify /etc/default/libvirt-bin + Modify /etc/default/libvirt-bin before :libvirtd_opts=​" -d" after :libvirtd_opts=​" -d -l" - then, restart libvirt. Make sure libvirt is restarted. + then, restart libvirt. Make sure libvirt is restarted. # stop libvirt-bin &​&​ start libvirt-bin # ps -ef | grep libvirt root 1145 1 0 Nov27 ?​ 00:00:03 /​usr/​sbin/​libvirtd -d -l - 5.Flag configuration. usually, you do not have to configure + 5.Flag configuration. usually, you do not have to configure any flags. Below chart is only for advanced usage. - Table 4.8. Description of nova.conf flags for live migration + Table 4.12. Description of nova.conf flags for live migration Flag Default Description @@ -974,7 +1100,7 @@ root 1145 1 0 Nov27 ?​ 00:00:03 /​usr/​sbin/​libvirtd -d -l --live_migration_flag default: 'VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER' Define libvirt flag for live migration. - Configuring Database Connections Configuring Database Connections You can configure OpenStack Compute to use any SQLAlchemy-compatible database. The database name is 'nova' and entries to it are mostly written by the nova-scheduler service, although all the services need to be able to update entries in the database. Use these settings to configure the connection string for the nova database.Table 4.9. Description of nova.conf flags for database access + Configuring Database Connections Configuring Database Connections You can configure OpenStack Compute to use any SQLAlchemy-compatible database. The database name is 'nova' and entries to it are mostly written by the nova-scheduler service, although all the services need to be able to update entries in the database. Use these settings to configure the connection string for the nova database.Table 4.13. Description of nova.conf flags for database access Flag Default Description @@ -985,7 +1111,7 @@ root 1145 1 0 Nov27 ?​ 00:00:03 /​usr/​sbin/​libvirtd -d -l --sql_idle_timeout default: '3600' - + Integer value; Number of seconds to wait for a database connection --sql_max_retries default: '12' @@ -1002,10 +1128,10 @@ root 1145 1 0 Nov27 ?​ 00:00:03 /​usr/​sbin/​libvirtd -d -l --db_driver default: 'nova.db.api' The drive to use for database access - Configuring the Compute Messaging SystemConfiguring the Compute Messaging SystemOpenStack Compute uses an open standard for messaging middleware known as AMQP. + Configuring the Compute Messaging SystemConfiguring the Compute Messaging SystemOpenStack Compute uses an open standard for messaging middleware known as AMQP. RabbitMQ enables this messaging system so that nova- services can talk to each other. You can configure the messaging communication for different installation scenarios as - well as tune RabbitMQ's retries and the size of the RPC thread pool. Table 4.10. Description of nova.conf flags for Remote Procedure Calls and RabbitMQ Messaging + well as tune RabbitMQ's retries and the size of the RPC thread pool. Table 4.14. Description of nova.conf flags for Remote Procedure Calls and RabbitMQ Messaging Flag Default Description @@ -1029,7 +1155,7 @@ root 1145 1 0 Nov27 ?​ 00:00:03 /​usr/​sbin/​libvirtd -d -l --rabbit_virtual_host default: '/' Location of a virtual RabbitMQ installation. - Table 4.11. Description of nova.conf flags for Tuning RabbitMQ Messaging + Table 4.15. Description of nova.conf flags for Tuning RabbitMQ Messaging Flag Default Description @@ -1045,7 +1171,7 @@ root 1145 1 0 Nov27 ?​ 00:00:03 /​usr/​sbin/​libvirtd -d -l --rpc_thread_pool_size default: '1024' Integer value: Size of Remote Procedure Call thread pool. - Table 4.12. Description of nova.conf flags for Customizing Exchange or Topic Names + Table 4.16. Description of nova.conf flags for Customizing Exchange or Topic Names Flag Default Description @@ -1073,125 +1199,32 @@ root 1145 1 0 Nov27 ?​ 00:00:03 /​usr/​sbin/​libvirtd -d -l --volume_topic default: 'volume' String value; Name of the topic that volume nodes listen on - Configuring Authentication and Authorization Configuring Authentication and Authorization OpenStack Compute uses an implementation of an authentication system structured like - having an Active Directory or other federated LDAP user store that backends to an - identity manager or other SAML Policy Controller that then maps to groups. You can also - customize roles for projects. Credentials for API calls are stored in the project zip - file. Certificate authority is also customzed in nova.conf. Table 4.13. Description of nova.conf flag for Authentication - Flag - Default - Description - - --auth_driver - default:'nova.auth.dbdriver.DbDriver' - String value; Name of the driver for authenticationnova.auth.dbdriver.DbDriver - Default setting.nova.auth.ldapdriver.FakeLdapDriver - create a replacement - for this driver supporting other backends by creating another - class that exposes the same public methods. - Table 4.14. Description of nova.conf flags for customizing roles - Flag - Default - Description - - --allowed_roles - default: 'cloudadmin,itsec,sysadmin,netadmin,developer') - Comma separated list; Allowed roles for project - - --global_roles - default: 'cloudadmin,itsec') - Comma separated list; Roles that apply to all projects - - --superuser_roles - default: 'cloudadmin') - Comma separated list; Roles that ignore authorization checking completely - Table 4.15. Description of nova.conf flags for credentials - Flag - Default - Description - - --credentials_template - default: '/Users/termie/p/nova/nova/auth/novarc.template') - Directory; Template for creating users' RC file - - --credential_rc_file - default: '%src') - File name; File name of rc in credentials zip - - --credential_cert_file - default: 'cert.pem') - File name; File name of certificate in credentials zip - - --credential_key_file - default: 'pk.pem') - File name; File name of rc in credentials zip - - --vpn_client_template - default: 'nova/cloudpipe/client/ovpn.template') - Directory; Refers to where the template lives for creating users vpn file - - --credential_vpn_file - default: 'nova-vpn.conf') - File name; Filename of certificate in credentials.zip - Table 4.16. Description of nova.conf flags for CA (Certificate Authority) - Flag - Default - Description - - --keys_path - default: '$state_path/keys') - Directory; Where Nova keeps the keys - - --ca_file - default: 'cacert.pem') - File name; File name of root CA - - --crl_file - default: 'crl.pem') - File name; File name of Certificate Revocation List - - --key_file - default: 'private/cakey.pem') - File name; File name of private key - - --use_project_ca - default: 'false') - True or false; Indicates whether to use a CA for each project; false means CA is not used for each project - - --project_cert_subject - default: '/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=proje ct-ca-%s-%s') - String; Subject for certificate for projects, %s for project, timestamp - - --user_cert_subject - default: '/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=%s-%s-%s') - String; Subject for certificate for users, %s for project, users, timestamp - - --vpn_cert_subject - default: '/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=project-vpn-%s-%s') - String; Subject for certificate for vpns, %s for project, timestamp - OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk 5. HypervisorsThis section assumes you have a working installation of OpenStack Compute and want to + OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo 5. HypervisorsThis section assumes you have a working installation of OpenStack Compute and want to select a particular hypervisor or run with multiple hypervisors. Before you try to get a VM running within OpenStack Compute, be sure you have installed a hypervisor and used the - hypervisor's documentation to run a test VM and get it working.Selecting a HypervisorSelecting a HypervisorOpenStack Compute supports many hypervisors, an array of which must provide a bit of + hypervisor's documentation to run a test VM and get it working.Selecting a HypervisorSelecting a HypervisorOpenStack Compute supports many hypervisors, an array of which must provide a bit of difficulty in selecting a hypervisor unless you are already familiar with one. You - cannot configure more than one virtualization type on the compute nodes in the Cactus - release, so the hypervisor selection is for the entire installation. These links provide - additional information for choosing a hypervisor. Here is a list of the supported hypervisors with links to a relevant web site for - configuration and use:Hyper-V + cannot configure more than one virtualization type on the compute nodes, so the + hypervisor selection is for the entire installation. These links provide additional + information for choosing a hypervisor. Refer to http://wiki.openstack.org/HypervisorSupportMatrix for a detailed list of + features and support across the hypervisors. Here is a list of the supported hypervisors with links to a relevant web site for + configuration and use:Hyper-V 2008 - Use to run Windows-based virtual machines, specifically Windows 2008 R2 Datacenter or Enterprise Edition. You must install and run nova-compute on Windows servers that run the Windows-based virtual - machines.KVM - Kernel-based Virtual + machines.KVM - Kernel-based Virtual Machine. The virtual disk formats that it supports it inherits from QEMU since it uses a modified QEMU program to launch the virtual machine. The supported - formats include raw images, the qcow2, and VMware formats. LXC - Linux Containers (through - libvirt), use to run Linux-based virtual machines.QEMU - Quick EMUlator, generally only - used for development purposes.UML - User Mode Linux, - generally only used for development purposes. VMWare + formats include raw images, the qcow2, and VMware formats. LXC - Linux Containers (through + libvirt), use to run Linux-based virtual machines.QEMU - Quick EMUlator, generally only + used for development purposes.UML - User Mode Linux, + generally only used for development purposes. VMWare ESX/ESXi 4.1 update 1, runs VMWare-based Linux and Windows images - through a connection with the ESX server.Xen - XenServer 5.5, + through a connection with the ESX server.Xen - XenServer 5.5, Xen Cloud Platform (XCP), use to run Linux or Windows virtual machines. You must - install the nova-compute service on DomU. Hypervisor Configuration BasicsHypervisor Configuration BasicsThe node where the nova-compute service is installed and running is the machine that + install the nova-compute service on DomU. Hypervisor Configuration BasicsHypervisor Configuration BasicsThe node where the nova-compute service is installed and running is the machine that runs all the virtual machines, referred to as the compute node in this guide. By default, the selected hypervisor is KVM. To change to another hypervisor, change - the --libvirt_type flag in nova.conf and restart the nova-compute service. Here are the nova.conf flags that are used to configure the compute node.Table 5.1. Description of nova.conf flags for the compute node + the --libvirt_type flag in nova.conf and restart the nova-compute service. Here are the nova.conf flags that are used to configure the compute node.Table 5.1. Description of nova.conf flags for the compute node Flag Default Description @@ -1230,7 +1263,7 @@ root 1145 1 0 Nov27 ?​ 00:00:03 /​usr/​sbin/​libvirtd -d -l String; Firewall driver for instances, defaults to iptables --injected_network_template - default: '/Users/termie/p/nova/nova/virt/interfaces.template' + default: '' Directory and file name; Template file for injected network information @@ -1239,7 +1272,7 @@ root 1145 1 0 Nov27 ?​ 00:00:03 /​usr/​sbin/​libvirtd -d -l String; Override the default libvirt URI (which is dependent on libvirt_type) --libvirt_xml_template - default: '/Users/termie/p/nova/nova/virt/libvirt.xml.template' + default: '' Directory and file name; Libvirt XML template --use_cow_images @@ -1257,63 +1290,63 @@ root 1145 1 0 Nov27 ?​ 00:00:03 /​usr/​sbin/​libvirtd -d -l --rescue_ramdisk_id default: 'ari-rescue' String; ARI image to use for rescue - OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk 6. OpenStack Compute Automated InstallationsIn a large-scale cloud deployment, automated installations are a requirement for + OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo 6. OpenStack Compute Automated InstallationsIn a large-scale cloud deployment, automated installations are a requirement for successful, efficient, repeatable installations. Automation for installation also helps with continuous integration and testing. This chapter offers some tested methods for deploying OpenStack Compute with either Puppet (an infrastructure management platform) or Chef (an infrastructure management framework) paired with Vagrant (a tool for building and - distributing virtualized development environments).Deployment Tool for OpenStack using PuppetDeployment Tool for OpenStack using PuppetThanks to a new project available that couples Puppet automation with a configuration + distributing virtualized development environments).Deployment Tool for OpenStack using PuppetDeployment Tool for OpenStack using PuppetThanks to a new project available that couples Puppet automation with a configuration file and deployment tool, you can install many servers automatically by simply editing the configuration file (deploy.conf) and running the deployment tool (deploy.py in the - nova-deployment-tool project in Launchpad).PrerequisitesPrerequisitesNetworking: The servers must be connected to a subnet. Networking: Ensure that the puppet server can access nova component + nova-deployment-tool project in Launchpad).PrerequisitesPrerequisitesNetworking: The servers must be connected to a subnet. Networking: Ensure that the puppet server can access nova component servers by name. The command examples in this document identify the user as “nii”. You should change the name but you need to create the same users on all Nova, Glance and Swift component servers in ~/DeploymentTool/conf/deploy.conf - (ssh_user=’user’). Permissions: You must have root user permission for installation and - service provision. Software: You must configure the installation server to access the Puppet server by name. - (Puppet 0.25 or higher)Software: You must configure LVM if you do not change the default setting - of the VolumeManager in the nova-volume service. Software: Python 2.6 or higherSoftware: Because of the current Nova implementation architecture, the + (ssh_user=’user’). Permissions: You must have root user permission for installation and + service provision. Software: You must configure the installation server to access the Puppet server by name. + (Puppet 0.25 or higher)Software: You must configure LVM if you do not change the default setting + of the VolumeManager in the nova-volume service. Software: Python 2.6 or higherSoftware: Because of the current Nova implementation architecture, the binaries for nova-api, nova-objectstore, and euca2ools must have been loaded - in one server.Operating system: Ubuntu 10.04, 10.10 or 11.04The tool does not support system configurations other than those listed above. If you want + in one server.Operating system: Ubuntu 10.04, 10.10 or 11.04The tool does not support system configurations other than those listed above. If you want to use other configurations, you have to change the configuration after running the - deployment tool or modify the deployment tool. This deployment tool has been tested under the following configurations. Nova-compute components are installed on multiple servers. OS: Ubuntu10.04, Ubuntu10.10 or Ubuntu 11.04 Multiple network modes (VLAN Mode, Flat Mode)Although we conducted extensive tests, we were unable to test every configuration. + deployment tool or modify the deployment tool. This deployment tool has been tested under the following configurations. Nova-compute components are installed on multiple servers. OS: Ubuntu10.04, Ubuntu10.10 or Ubuntu 11.04 Multiple network modes (VLAN Mode, Flat Mode)Although we conducted extensive tests, we were unable to test every configuration. Please let us know any problems that occur in your environment by contacting us at https://answers.launchpad.net/nova-deployment-tool. We will try to resolve any problem you send us and make the tool better for Stackers. - NoteThe configurations, which are not described on this document, are Nova + NoteThe configurations, which are not described on this document, are Nova default settings. Note also that, although we have not done so ourselves, you should be able to change the network mode to flat DHCP mode and hypervisor to Xen if you follow the instructions in the Notes section below. - Overview of Deployment Tool StepsOverview of Deployment Tool StepsYou can install/test/uninstall Nova, Glance and Swift with the Nova deployment tool as follows, + Overview of Deployment Tool StepsOverview of Deployment Tool StepsYou can install/test/uninstall Nova, Glance and Swift with the Nova deployment tool as follows, which is simply an overview. The detailed steps are in the sections that follow.Deploy.py takes care of the details using puppet. Puppet is an automation tool with standardized scripts that manage a machine's configuration. See an - Introduction to Puppet on the PuppetLabs site.Install by typing the following command.python deploy.py installConfirm that the installation succeeded by typing the following - command.python deploy.py testUninstall by typing the following command.python deploy.py uninstall -python deploy.py all = python deploy.py uninstall; python deploy.py install; python deploy.py test Uninstall/install/test only Nova.python deploy.py all novaUninstall/install/test only Swift.python deploy.py all swiftUninstall/install/test only Glance.python deploy.py all glanceInstalling the Deployment ToolInstalling the Deployment ToolType or copy/paste the following command to use the OpenStack PPA on all component servers. + Introduction to Puppet on the PuppetLabs site.Install by typing the following command.python deploy.py installConfirm that the installation succeeded by typing the following + command.python deploy.py testUninstall by typing the following command.python deploy.py uninstall +python deploy.py all = python deploy.py uninstall; python deploy.py install; python deploy.py test Uninstall/install/test only Nova.python deploy.py all novaUninstall/install/test only Swift.python deploy.py all swiftUninstall/install/test only Glance.python deploy.py all glanceInstalling the Deployment ToolInstalling the Deployment ToolType or copy/paste the following command to use the OpenStack PPA on all component servers. sudo apt-get install python-software-properties -y sudo add-apt-repository ppa:openstack-release/2011.2 -sudo apt-get updateSet permissions to the deployment 'user'Set permissions to the deployment 'user'Edit sudoers file to give the correct permissions to the 'user' running all the components. +sudo apt-get updateSet permissions to the deployment 'user'Set permissions to the deployment 'user'Edit sudoers file to give the correct permissions to the 'user' running all the components. Type or copy/paste the visudo command to set ‘user’ (= nii in this document) as a sudouer on all nova component servers. - sudo visudoAppend the following lines to the visudo file, and then save the file.nii ALL=(ALL) NOPASSWD:ALL -nova ALL=(ALL) NOPASSWD:ALLConfigure SSHConfigure SSHNext, we'll configure the system so that SSH works by generating public and private key pairs that provide credentials without a password intervention. The Deployment tool needs to connect to all nova, glance and swift component servers without having the operator enter a password for any of the servers.Type or copy/paste the following command to generate public and private key pairs on the server running the Nova deployment tool.ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsaCopy this generated public key to all nova component servers.Next, type or copy/paste the following commands to register the public keys on all nova component servers.ssh-copy-id nii@(each nova component server name) Download the code for the deployment tool next, and extract the contents of the - compressed file. wget http://launchpad.net/nova-deployment-tool/cactus/cactus1.3/+download/nova-deployment-tool-cactus.tgz -tar xzvf nova-deployment-tool-cactus.tgzCreate Swift storage folder and mount deviceCreate Swift storage folder and mount deviceFirst, create a Swift-storage folder and mount device on each swift-storage server. The commands vary depending on which destination (Partition or Lookback device) is to be used. The steps are detailed in the sections that follow. “$storage_path” and “$storage_dev” are defined in “deploy.conf”.Partition + sudo visudoAppend the following lines to the visudo file, and then save the file.nii ALL=(ALL) NOPASSWD:ALL +nova ALL=(ALL) NOPASSWD:ALLConfigure SSHConfigure SSHNext, we'll configure the system so that SSH works by generating public and private key pairs that provide credentials without a password intervention. The Deployment tool needs to connect to all nova, glance and swift component servers without having the operator enter a password for any of the servers.Type or copy/paste the following command to generate public and private key pairs on the server running the Nova deployment tool.ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsaCopy this generated public key to all nova component servers.Next, type or copy/paste the following commands to register the public keys on all nova component servers.ssh-copy-id nii@(each nova component server name) Download the code for the deployment tool next, and extract the contents of the + compressed file. wget http://launchpad.net/nova-deployment-tool/cactus/cactus1.3/+download/nova-deployment-tool-cactus.tgz +tar xzvf nova-deployment-tool-cactus.tgzCreate Swift storage folder and mount deviceCreate Swift storage folder and mount deviceFirst, create a Swift-storage folder and mount device on each swift-storage server. The commands vary depending on which destination (Partition or Lookback device) is to be used. The steps are detailed in the sections that follow. “$storage_path” and “$storage_dev” are defined in “deploy.conf”.Partition sudo apt-get install xfsprogs -y sudo sh -c "echo '/dev/$storage_dev $storage_path/$storage_dev xfs noatime,nodiratime,nobarrier,logbufs=8 0 0' >> /etc/fstab" sudo mount $storage_path/$storage_dev -Loopback device +Loopback device sudo apt-get install xfsprogs -y sudo mkdir -p $storage_path/$storage_dev sudo dd if=/dev/zero of=/srv/swift-disk bs=1024 count=0 seek=1000000 sudo mkfs.xfs -i size=1024 /srv/swift-disk sudo sh -c "echo '/srv/swift-disk $storage_path/$storage_dev xfs loop,noatime,nodiratime,nobarrier,logbufs=8 0 0' >> /etc/fstab" sudo mount $storage_path/$storage_dev -Configuring the Deployment ToolConfiguring the Deployment ToolYou must change the configuration file in order to execute the Nova deployment tool according to your environment and configuration design. In the unzipped files, edit conf/deploy.conf to change the settings according to your environment and desired installation (single or multiple servers, for example). Here are the definitions of the values which are used in deploy.conf.default sectionpuppet_server Name of server in which the puppet server is installedsh_user User name that is used to SSH into a nova componentnova sectionnova_api Name of server in which the nava-api component is installednova_objectstore Name of server in which the nova-objectstore component is installed*nova_compute Name of server in which the nova-compute component is installednova_scheduler Name of server in which the nova-scheduler component is installednova_network Name of server in which the nova-network component is installednova_volume Name of server in which the nova-volume component is installedeuca2ools Name of server that runs the test sequencemysql Name of server in which mysql is installedglance_host Glance server namelibvirt_type Virtualization typenetwork_manager Network management class nameimage_service Image management class namenetwork_interface Network interface that is used in the nova-compute componentnetwork_ip_range IP address range used by guest VMS. This value should be included in the values of fixed_range.volume_group LVM volume group name that is used in the nova volume componentfixed_range Range of IP addresses used in all projects. If you want to change the value, please also change the IP addresses X.X.X.X of the command "nova-manage network create X.X.X.X ..." in file setup-network.sh, and the IP addresses should include the new value.network_size Number of IP addresses used by Guest VM in all projectsglance sectionglance Name of server in which the glance is installeddefault_store Image store that is used in glance. Available value: file, swift, s3swift sectionswift_proxy Name of server in which the glance is installedswift_storage Name of server in which the swift=storage is installedaccount swift account nameusername swift user namepassword swift passwordstorage_path Folder for saving account, container and object information in swift storage serverstorage_dev Device holding account, container and object informationring_builder_replicas Number of account, container, and object copies. The value has to be equal or less than the number of swift-storage servers.super_admin_key A key for creating swift users If you install swift on Ubuntu 11.04, due to the bug https://bugs.launchpad.net/swift/+bug/796404 swift_proxy should be installed on the different machine from the machine where swift_storage will be installed.Because of the current implementation architecture, you must load nova-api, nova-objectstore and euca2ools on a single server.The following configuration information is an example. If you want to have multiple +Configuring the Deployment ToolConfiguring the Deployment ToolYou must change the configuration file in order to execute the Nova deployment tool according to your environment and configuration design. In the unzipped files, edit conf/deploy.conf to change the settings according to your environment and desired installation (single or multiple servers, for example). Here are the definitions of the values which are used in deploy.conf.default sectionpuppet_server Name of server in which the puppet server is installedsh_user User name that is used to SSH into a nova componentnova sectionnova_api Name of server in which the nava-api component is installednova_objectstore Name of server in which the nova-objectstore component is installed*nova_compute Name of server in which the nova-compute component is installednova_scheduler Name of server in which the nova-scheduler component is installednova_network Name of server in which the nova-network component is installednova_volume Name of server in which the nova-volume component is installedeuca2ools Name of server that runs the test sequencemysql Name of server in which mysql is installedglance_host Glance server namelibvirt_type Virtualization typenetwork_manager Network management class nameimage_service Image management class namenetwork_interface Network interface that is used in the nova-compute componentnetwork_ip_range IP address range used by guest VMS. This value should be included in the values of fixed_range.volume_group LVM volume group name that is used in the nova volume componentfixed_range Range of IP addresses used in all projects. If you want to change the value, please also change the IP addresses X.X.X.X of the command "nova-manage network create X.X.X.X ..." in file setup-network.sh, and the IP addresses should include the new value.network_size Number of IP addresses used by Guest VM in all projectsglance sectionglance Name of server in which the glance is installeddefault_store Image store that is used in glance. Available value: file, swift, s3swift sectionswift_proxy Name of server in which the glance is installedswift_storage Name of server in which the swift=storage is installedaccount swift account nameusername swift user namepassword swift passwordstorage_path Folder for saving account, container and object information in swift storage serverstorage_dev Device holding account, container and object informationring_builder_replicas Number of account, container, and object copies. The value has to be equal or less than the number of swift-storage servers.super_admin_key A key for creating swift users If you install swift on Ubuntu 11.04, due to the bug https://bugs.launchpad.net/swift/+bug/796404 swift_proxy should be installed on the different machine from the machine where swift_storage will be installed.Because of the current implementation architecture, you must load nova-api, nova-objectstore and euca2ools on a single server.The following configuration information is an example. If you want to have multiple nova-computes, you can do so by nova_compute=ubuntu3, ubuntu8, for example. And if you want to have multiple swift storage, you can do so by swift_storage=ubuntu3, - ubuntu8, for example. + ubuntu8, for example. <begin ~/DeploymentTool/conf/deploy.conf> [default] puppet_server=ubuntu7 @@ -1359,37 +1392,37 @@ storage_dev=sdb1 ring_builder_replicas=1 super_admin_key=swauth -<end ~/DeploymentTool/conf/deploy.conf>OpenStack Compute Installation Using VirtualBox, Vagrant, And ChefOpenStack Compute Installation Using VirtualBox, Vagrant, And ChefIntegration testing for distributed systems that have many dependencies can be a huge challenge. Ideally, you would have a cluster of machines that you could PXE boot to a base OS install and run a complete install of the system. Unfortunately not everyone has a bunch of extra hardware sitting around. For those of us that are a bit on the frugal side, a whole lot of testing can be done with Virtual Machines. Read on for a simple guide to installing OpenStack Compute (Nova) with VirtualBox and Vagrant.Installing VirtualBoxInstalling VirtualBoxVirtualBox is virtualization software by Oracle. It runs on Mac/Linux/Windows and can be controlled from the command line. Note that we will be using VirtualBox 4.0 and the vagrant prerelease.OSXcurl -O http://download.virtualbox.org/virtualbox/4.0.2/VirtualBox-4.0.2-69518-OSX.dmg -open VirtualBox-4.0.2-69518-OSX.dmgUbuntu Maverickwget -q http://download.virtualbox.org/virtualbox/debian/oracle_vbox.asc -O- | sudo apt-key add - +<end ~/DeploymentTool/conf/deploy.conf>OpenStack Compute Installation Using VirtualBox, Vagrant, And ChefOpenStack Compute Installation Using VirtualBox, Vagrant, And ChefIntegration testing for distributed systems that have many dependencies can be a huge challenge. Ideally, you would have a cluster of machines that you could PXE boot to a base OS install and run a complete install of the system. Unfortunately not everyone has a bunch of extra hardware sitting around. For those of us that are a bit on the frugal side, a whole lot of testing can be done with Virtual Machines. Read on for a simple guide to installing OpenStack Compute (Nova) with VirtualBox and Vagrant.Installing VirtualBoxInstalling VirtualBoxVirtualBox is virtualization software by Oracle. It runs on Mac/Linux/Windows and can be controlled from the command line. Note that we will be using VirtualBox 4.0 and the vagrant prerelease.OSXcurl -O http://download.virtualbox.org/virtualbox/4.0.2/VirtualBox-4.0.2-69518-OSX.dmg +open VirtualBox-4.0.2-69518-OSX.dmgUbuntu Maverickwget -q http://download.virtualbox.org/virtualbox/debian/oracle_vbox.asc -O- | sudo apt-key add - echo "deb http://download.virtualbox.org/virtualbox/debian maverick contrib" | sudo tee /etc/apt/sources.list.d/virtualbox.list sudo apt-get update -sudo apt-get install -y virtualbox-4.0Ubuntu Lucidwget -q http://download.virtualbox.org/virtualbox/debian/oracle_vbox.asc -O- | sudo apt-key add - +sudo apt-get install -y virtualbox-4.0Ubuntu Lucidwget -q http://download.virtualbox.org/virtualbox/debian/oracle_vbox.asc -O- | sudo apt-key add - echo "deb http://download.virtualbox.org/virtualbox/debian lucid contrib" | sudo tee /etc/apt/sources.list.d/virtualbox.list sudo apt-get update -sudo apt-get install -y virtualbox-4.0Install RubyGemsInstall RubyGemsThe following instructions for installing Vagrant use RubyGems for the installation commands. You can download RubyGems from http://rubygems.org/pages/download. Get the Vagrant Pre-releaseGet the Vagrant Pre-releaseOSXsudo gem update -- system -sudo gem install vagrant -- preUbuntu Mavericksudo gem install vagrant --pre -sudo ln -s /var/lib/gems/1.8/bin/vagrant /usr/local/bin/vagrantUbuntu Lucidwget http://production.cf.rubygems.org/rubygems/rubygems-1.3.6.zip +sudo apt-get install -y virtualbox-4.0Install RubyGemsInstall RubyGemsThe following instructions for installing Vagrant use RubyGems for the installation commands. You can download RubyGems from http://rubygems.org/pages/download. Get the Vagrant Pre-releaseGet the Vagrant Pre-releaseOSXsudo gem update -- system +sudo gem install vagrant -- preUbuntu Mavericksudo gem install vagrant --pre +sudo ln -s /var/lib/gems/1.8/bin/vagrant /usr/local/bin/vagrantUbuntu Lucidwget http://production.cf.rubygems.org/rubygems/rubygems-1.3.6.zip sudo apt-get install -y unzip unzip rubygems-1.3.6.zip cd rubygems-1.3.6 sudo ruby setup.rb -sudo gem1.8 install vagrant --preGet the Chef RecipesGet the Chef Recipescd ~ -git clone https://github.com/ansolabs/openstack-cookbooks/openstack-cookbooks.gitSet Up Some DirectoriesSet Up Some Directoriesmkdir aptcache +sudo gem1.8 install vagrant --preGet the Chef RecipesGet the Chef Recipescd ~ +git clone https://github.com/ansolabs/openstack-cookbooks/openstack-cookbooks.gitSet Up Some DirectoriesSet Up Some Directoriesmkdir aptcache mkdir chef -cd chefGet the chef-solo Vagrant fileGet the chef-solo Vagrant fileProvisioning for vagrant can use chef-solo, chef-server, or puppet. We’re going to use chef-solo for the installation of OpenStack Compute.curl -o Vagrantfile https://raw.github.com/gist/786945/solo.rbRunning OpenStack Compute within a Vagrant InstanceRunning OpenStack Compute within a Vagrant InstanceInstalling and running OpenStack Compute is as simple as typing "vagrant up"vagrant upIn 3-10 minutes, your vagrant instance should be running. NOTE: Some people report an +cd chefGet the chef-solo Vagrant fileGet the chef-solo Vagrant fileProvisioning for vagrant can use chef-solo, chef-server, or puppet. We’re going to use chef-solo for the installation of OpenStack Compute.curl -o Vagrantfile https://raw.github.com/gist/786945/solo.rbRunning OpenStack Compute within a Vagrant InstanceRunning OpenStack Compute within a Vagrant InstanceInstalling and running OpenStack Compute is as simple as typing "vagrant up"vagrant upIn 3-10 minutes, your vagrant instance should be running. NOTE: Some people report an error from vagrant complaining about MAC addresses the first time they vagrant up. - Doing vagrant up again seems to resolve the problem.vagrant sshNow you can run an instance and connect to it:. /vagrant/novarc + Doing vagrant up again seems to resolve the problem.vagrant sshNow you can run an instance and connect to it:. /vagrant/novarc euca-add-keypair test > test.pem chmod 600 test.pem euca-run-instances -t m1.tiny -k test ami-tty # wait for boot (euca-describe-instances should report running) -ssh -i test.pem root@10.0.0.3Yo, dawg, your VMs have VMs! That is, you are now running an instance inside of OpenStack Compute, which itself is running inside a VirtualBox VM.When the you are finished, you can destroy the entire system with vagrant destroy. You will also need to remove the .pem files and the novarc if you want to run the system again.vagrant destroy -rm *.pem novarcUsing the dashboard +ssh -i test.pem root@10.0.0.3Yo, dawg, your VMs have VMs! That is, you are now running an instance inside of OpenStack Compute, which itself is running inside a VirtualBox VM.When the you are finished, you can destroy the entire system with vagrant destroy. You will also need to remove the .pem files and the novarc if you want to run the system again.vagrant destroy +rm *.pem novarcUsing the dashboard Using the dashboard - The OpenStack Dashboard should be running on 192.168.86.100. You can login using username: admin, password: vagrant. OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk 7. NetworkingBy understanding the available networking configuration options you can design the best - configuration for your OpenStack Compute instances.Networking OptionsNetworking OptionsThis section offers a brief overview of each concept in networking for Compute. In Compute, users organize their cloud resources in projects. A Compute project + The OpenStack Dashboard should be running on 192.168.86.100. You can login using username: admin, password: vagrant. OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo 7. NetworkingBy understanding the available networking configuration options you can design the best + configuration for your OpenStack Compute instances.Networking OptionsNetworking OptionsThis section offers a brief overview of each concept in networking for Compute. In Compute, users organize their cloud resources in projects. A Compute project consists of a number of VM instances created by a user. For each VM instance, Compute assigns to it a private IP address. (Currently, Nova only supports Linux bridge networking that allows the virtual interfaces to connect to the outside network through @@ -1409,7 +1442,7 @@ rm *.pem novarc - NoteThe configuration injection currently only works on Linux-style systems that + NoteThe configuration injection currently only works on Linux-style systems that keep networking configuration in /etc/network/interfaces. In Flat DHCP Mode, you start a DHCP server to pass out IP addresses to VM instances from the specified subnet in addition to manually configuring the networking bridge. IP @@ -1433,12 +1466,12 @@ rm *.pem novarcCloudpipe — Per Project VpnsCloudpipe — Per Project Vpns + Compute creates the Linux networking bridges and VLANs when required.Cloudpipe — Per Project VpnsCloudpipe — Per Project Vpns Cloudpipe is a method for connecting end users to their project instances in VLAN networking mode. The support code for cloudpipe implements admin commands (via - nova-manage) to automatically create a vm for a project that + nova-manage) to automatically create a VM for a project that allows users to vpn into the private network of their project. Access to this vpn is provided through a public port on the network host for the project. This allows users to have free @@ -1456,46 +1489,46 @@ rm *.pem novarcCreating a Cloudpipe ImageCreating a Cloudpipe Image + Creating a Cloudpipe ImageCreating a Cloudpipe Image Making a cloudpipe image is relatively easy. - + # install openvpn on a base ubuntu image. - # set up a - server.conf.template in /etc/openvpn/ + # set up a + server.conf.template in /etc/openvpn/ # set up.sh in /etc/openvpn/ - + # set down.sh in /etc/openvpn/ - + # download and run the payload on boot from /etc/rc.local - + # setup /etc/network/interfaces - + # register the image and set the image id in your flagfile: - + --vpn_image_id=ami-xxxxxxxx - + # you should set a few other flags to make vpns work properly: - + --use_project_ca --cnt_vpn_clients=5 When you use nova-manage to launch a cloudpipe for a user, it goes through the following process: - 1. + 1. creates a keypair called <project_id>-vpn and saves it in the keys directory - 2. + 2. creates a security group <project_id>-vpn and opens up 1194 and icmp - 3. + 3. creates a cert and private key for the vpn instance and saves it in the CA/projects/<project_id>/ directory - 4. + 4. zips up the info and puts it b64 encoded as user data - 5. + 5. launches an m1.tiny instance with the above settings using the flag-specified vpn image - VPN AccessVPN Access + VPN AccessVPN Access In vlan networking mode, the second IP in each private network is reserved for the cloudpipe instance. This gives a consistent IP to the instance so that nova-network can create forwarding rules for @@ -1508,7 +1541,7 @@ rm *.pem novarcCertificates and RevocationCertificates and RevocationIf the use_project_ca flag is set (required to for cloudpipes to work securely), + Certificates and RevocationCertificates and RevocationIf the use_project_ca flag is set (required to for cloudpipes to work securely), then each project has its own ca. This ca is used to sign the certificate for the vpn, and is also passed to the user for bundling images. When a certificate is revoked using nova-manage, a new Certificate Revocation List (crl) is generated. As @@ -1517,23 +1550,24 @@ rm *.pem novarcRestarting and Logging into the Cloudpipe VPNRestarting and Logging into the Cloudpipe VPNYou can reboot a cloudpipe vpn through the api if something goes wrong (using + Restarting and Logging into the Cloudpipe VPNRestarting and Logging into the Cloudpipe VPNYou can reboot a cloudpipe vpn through the api if something goes wrong (using euca-reboot-instances for example), but if you generate a new crl, you will have to terminate it and start it again using nova-manage vpn run. The cloudpipe instance always gets the first ip in the subnet and it can take up to 10 minutes for the ip to be recovered. If you try to start the new vpn instance too soon, the instance will fail to start because of a NoMoreAddresses error. If you can’t wait 10 minutes, you can manually update the ip with something like the following (use the right ip - for the project): + for the project): euca-terminate-instances <instance_id> mysql nova -e "update fixed_ips set allocated=0, leased=0, instance_id=NULL where fixed_ip='10.0.0.2'" - You also will need to terminate the dnsmasq running for the user (make sure you use the right pid file):sudo kill `cat /var/lib/nova/br100.pid`Now you should be able to re-run the vpn:nova-manage vpn run <project_id>The keypair that was used to launch the cloudpipe instance should be in the keys/<project_id> folder. You can use this key to log into the cloudpipe instance for debugging purposes.Configuring Networking on the Compute NodeConfiguring Networking on the Compute NodeTo configure the Compute node's networking for the VM images, the overall steps are:1.Set the --network-manager flag in nova.conf.2.Use the nova-manage network create networkname CIDR n n - command to create the subnet that the VMs reside on.3.Integrate the bridge with your network. By default, Compute uses the VLAN Network Mode. You choose the networking mode for your - virtual instances in the nova.conf file. Here are the three possible options: --network_manager = nova.network.manager.FlatManagerSimple, non-VLAN networking--network_manager = nova.network.manager.FlatDHCPManagerFlat networking with DHCP--network_manager = nova.network.manager.VlanManagerVLAN networking with DHCP. This is the Default if no network manager is + You also will need to terminate the dnsmasq running for the user (make sure you use the right pid file):sudo kill `cat /var/lib/nova/br100.pid`Now you should be able to re-run the vpn:nova-manage vpn run <project_id>The keypair that was used to launch the cloudpipe instance should be in the keys/<project_id> folder. You can use this key to log into the cloudpipe instance for debugging purposes.Configuring Networking on the Compute NodeConfiguring Networking on the Compute NodeTo configure the Compute node's networking for the VM images, the overall steps are:1.Set the --network-manager flag in nova.conf.2.Use the nova-manage network create networkname CIDR n n + command to create the subnet that the VMs reside on.3.Integrate the bridge with your network. By default, Compute uses the VLAN Network Mode. You choose the networking mode for your + virtual instances in the nova.conf file. Here are the three possible options: --network_manager = nova.network.manager.FlatManagerSimple, non-VLAN networking--network_manager = nova.network.manager.FlatDHCPManagerFlat networking with DHCP, you must set a bridge using the + --flat_network_bridge flag--network_manager = nova.network.manager.VlanManagerVLAN networking with DHCP. This is the Default if no network manager is defined in nova.conf. Also, when you issue the nova-manage network create command, it uses the settings from the nova.conf flag file. Use the "nova-manage network create public 192.168.0.0/24 1 255" command to create the subnet that your VMs will run on. You specify public or - private after the create command.Configuring Flat NetworkingConfiguring Flat NetworkingFlatNetworking uses ethernet adapters configured as bridges to allow network + private after the create command.Configuring Flat NetworkingConfiguring Flat NetworkingFlatNetworking uses ethernet adapters configured as bridges to allow network traffic to transit between all the various nodes. This setup can be done with a single adapter on the physical host, or multiple. This option does not require a switch that does VLAN tagging as VLAN networking does, and is a common development @@ -1542,13 +1576,19 @@ rm *.pem novarcTo configure flat networking, ensure that your nova.conf file contains the line: - --network_manager = nova.network.manager.FlatManager + --network_manager = nova.network.manager.FlatManager Compute defaults to a bridge device named ‘br100’ which is stored in the Nova database, so you can change the name of the bridge device by modifying the entry in - the database. Consult the diagrams for additional configuration options.In any set up with FlatNetworking (either Flat or FlatDHCP), the host with nova-network on it is responsible for forwarding traffic from the private network configured with the --fixed_range= directive in nova.conf. This host needs to have br100 configured and talking to any other nodes that are hosting VMs. With either of the Flat Networking options, the default gateway for the virtual machines is set to the host which is running nova-network. Set the compute node's external IP address to be on the bridge and add eth0 to + the database. Consult the diagrams for additional configuration options.In any set up with FlatNetworking (either Flat or FlatDHCP), the host with + nova-network on it is responsible for forwarding traffic from the private network + configured with the --fixed_range= directive in nova.conf and the + --flat_network_bridge setting. This host needs to have br100 configured and talking + to any other nodes that are hosting VMs. With either of the Flat Networking options, + the default gateway for the virtual machines is set to the host which is running + nova-network. Set the compute node's external IP address to be on the bridge and add eth0 to that bridge. To do this, edit your network interfaces configuration to look like the following example: - < begin /etc/network/interfaces > + < begin /etc/network/interfaces > # The loopback network interface auto lo iface lo inet loopback @@ -1564,23 +1604,24 @@ iface br100 inet dhcp < end /etc/network/interfaces > Next, restart networking to apply the changes: sudo /etc/init.d/networking restartFor an all-in-one development setup, this diagram represents the network - setup.Figure 7.1. Flat network, all-in-one server installation For multiple compute nodes with a single network adapter, which you can use for + setup.Figure 7.1. Flat network, all-in-one server installation For multiple compute nodes with a single network adapter, which you can use for smoke testing or a proof of concept, this diagram represents the network - setup.Figure 7.2. Flat network, single interface, multiple serversFor multiple compute nodes with multiple network adapters, this diagram + setup.Figure 7.2. Flat network, single interface, multiple serversFor multiple compute nodes with multiple network adapters, this diagram represents the network setup. You may want to use this setup for separate admin and - data traffic.Figure 7.3. Flat network, multiple interfaces, multiple serversConfiguring Flat DHCP NetworkingConfiguring Flat DHCP NetworkingWith Flat DHCP, the host running nova-network acts as the gateway to the virtual nodes. You + data traffic.Figure 7.3. Flat network, multiple interfaces, multiple serversConfiguring Flat DHCP NetworkingConfiguring Flat DHCP NetworkingWith Flat DHCP, the host running nova-network acts as the gateway to the virtual nodes. You can run one nova-network per cluster. Set the flag --network_host on the nova.conf stored on the nova-compute node to tell it which host the nova-network is running on - so it can communicate with nova-network. The nova-network service will track leases - and releases in the database so it knows if a VM instance has stopped properly - configuring via DHCP. Lastly, it sets up iptables rules to allow the VMs to - communicate with the outside world and contact a special metadata server to retrieve - information from the cloud.Compute hosts in the FlatDHCP model are responsible for bringing up a matching + so it can communicate with nova-network. You must also set the --flat_network_bridge + setting to the name of the bridge (no default is set for it). The nova-network + service will track leases and releases in the database so it knows if a VM instance + has stopped properly configuring via DHCP. Lastly, it sets up iptables rules to + allow the VMs to communicate with the outside world and contact a special metadata + server to retrieve information from the cloud.Compute hosts in the FlatDHCP model are responsible for bringing up a matching bridge and bridging the VM tap devices into the same ethernet device that the network host is on. The compute hosts do not need an IP address on the VM network, because the bridging puts the VMs and the network host on the same logical network. When a VM boots, the VM sends out DHCP packets, and the DHCP server on the network - host responds with their assigned IP address.Visually, the setup looks like the diagram below:Figure 7.4. Flat DHCP network, multiple interfaces, multiple serversFlatDHCP doesn't create VLANs, it creates a bridge. This bridge works just fine on + host responds with their assigned IP address.Visually, the setup looks like the diagram below:Figure 7.4. Flat DHCP network, multiple interfaces, multiple serversFlatDHCP doesn't create VLANs, it creates a bridge. This bridge works just fine on a single host, but when there are multiple hosts, traffic needs a way to get out of the bridge onto a physical interface. Be careful when setting up --flat_interface, if you specify an interface that already has an IP it will break and if this is the @@ -1595,33 +1636,34 @@ iface br100 inet dhcp automatically add the gateway ip to this bridge. You can also add the interface to br100 manually and not set flat_interface. If this is the case for you, edit your nova.conf file to contain the following lines: - --dhcpbridge_flagfile=/etc/nova/nova.conf + --dhcpbridge_flagfile=/etc/nova/nova.conf --dhcpbridge=/usr/bin/nova-dhcpbridge --network_manager=nova.network.manager.FlatDHCPManager --flat_network_dhcp_start=10.0.0.2 +--flat_network_bridge=br100 --flat_interface=eth2 --flat_injected=False --public_interface=eth0 - Integrate your network interfaces to match this configuration.Outbound Traffic Flow with Any Flat NetworkingOutbound Traffic Flow with Any Flat NetworkingIn any set up with FlatNetworking, the host with nova-network on it is responsible for forwarding traffic from the private network configured with the {{--fixed_range=...} directive in nova.conf. This host needs to have br100 configured and talking to any other nodes that are hosting VMs. With either of the Flat Networking options, the default gateway for the virtual machines is set to the host which is running nova-network.When a virtual machine sends traffic out to the public networks, it sends it first to its default gateway, which is where nova-network is configured. Figure 7.5. Single adaptor hosts, first routeNext, the host on which nova-network is configured acts as a router and forwards the traffic out to the Internet.Figure 7.6. Single adaptor hosts, second routeWarningIf you're using a single interface, then that interface (often eth0) needs to be set into promiscuous mode for the forwarding to happen correctly. This does not appear to be needed if you're running with physical hosts that have and use two interfaces.Configuring VLAN NetworkingConfiguring VLAN NetworkingIn some networking environments, you may have a large IP space which is cut up + Integrate your network interfaces to match this configuration.Outbound Traffic Flow with Any Flat NetworkingOutbound Traffic Flow with Any Flat NetworkingIn any set up with FlatNetworking, the host with nova-network on it is responsible for forwarding traffic from the private network configured with the {{--fixed_range=...} directive in nova.conf. This host needs to have br100 configured and talking to any other nodes that are hosting VMs. With either of the Flat Networking options, the default gateway for the virtual machines is set to the host which is running nova-network.When a virtual machine sends traffic out to the public networks, it sends it first to its default gateway, which is where nova-network is configured. Figure 7.5. Single adaptor hosts, first routeNext, the host on which nova-network is configured acts as a router and forwards the traffic out to the Internet.Figure 7.6. Single adaptor hosts, second routeWarningIf you're using a single interface, then that interface (often eth0) needs to be set into promiscuous mode for the forwarding to happen correctly. This does not appear to be needed if you're running with physical hosts that have and use two interfaces.Configuring VLAN NetworkingConfiguring VLAN NetworkingIn some networking environments, you may have a large IP space which is cut up into smaller subnets. The smaller subnets are then trunked together at the switch level (dividing layer 3 by layer 2) so that all machines in the larger IP space can communicate. The purpose of this is generally to control the size of broadcast domains.Using projects as a way to logically separate each VLAN, we can setup our cloud in this environment. Please note that you must have IP forwarding enabled for this - network mode to work.Obtain the parameters for each network. You may need to ask a network administrator for this information, including netmask, broadcast, gateway, ethernet device and VLAN ID.You need to have networking hardware that supports VLAN tagging.Please note that currently eth0 is hardcoded as the vlan_interface in the default flags. If you need to attach your bridges to a device other than eth0, you will need to add following flag to /etc/nova/nova.conf:--vlan_interface=eth1In VLAN mode, the setting for --network_size is the number of IPs per project as + network mode to work.Obtain the parameters for each network. You may need to ask a network administrator for this information, including netmask, broadcast, gateway, ethernet device and VLAN ID.You need to have networking hardware that supports VLAN tagging.Please note that currently eth0 is hardcoded as the vlan_interface in the default flags. If you need to attach your bridges to a device other than eth0, you will need to add following flag to /etc/nova/nova.conf:--vlan_interface=eth1In VLAN mode, the setting for --network_size is the number of IPs per project as opposed to the FlatDHCP mode where --network_size indicates number of IPs in the entire fixed range. For VLAN, the settings in nova.conf that affect networking are also --fixed_range, where the space is divided up into subnets of --network_size.VLAN is the default networking mode for Compute, so if you have no - --network_manager entry in your nova.conf file, you are set up for VLAN. To set your nova.conf file to VLAN, use this flag in /etc/nova/nova.conf:--network_manager=nova.network.manager.VlanManagerFor the purposes of this example walk-through, we will use the following settings. These are intentionally complex in an attempt to cover most situations:VLANs: 171, 172, 173 and - 174IP Blocks: 10.1.171.0/24, - 10.1.172.0/24, 10.1.173.0/24 and 10.1.174.0/24Each VLAN maps to its corresponding /24 (171 = 10.1.171.0/24, etc)Each VLAN will get its own - bridge device, which is in the format br_$VLANIDEach /24 has an upstream - default gateway on .1The first 6 IPs in each /24 - are reservedFirst, create the networks that Compute can pull from using nova-manage commands:nova-manage --flagfile=/etc/nova/nova.conf network create private 10.1.171.0/24 1 256 + --network_manager entry in your nova.conf file, you are set up for VLAN. To set your nova.conf file to VLAN, use this flag in /etc/nova/nova.conf:--network_manager=nova.network.manager.VlanManagerFor the purposes of this example walk-through, we will use the following settings. These are intentionally complex in an attempt to cover most situations:VLANs: 171, 172, 173 and + 174IP Blocks: 10.1.171.0/24, + 10.1.172.0/24, 10.1.173.0/24 and 10.1.174.0/24Each VLAN maps to its corresponding /24 (171 = 10.1.171.0/24, etc)Each VLAN will get its own + bridge device, which is in the format br_$VLANIDEach /24 has an upstream + default gateway on .1The first 6 IPs in each /24 + are reservedFirst, create the networks that Compute can pull from using nova-manage commands:nova-manage --flagfile=/etc/nova/nova.conf network create private 10.1.171.0/24 1 256 nova-manage --flagfile=/etc/nova/nova.conf network create private 10.1.172.0/24 1 256 nova-manage --flagfile=/etc/nova/nova.conf network create private 10.1.173.0/24 1 256 -nova-manage --flagfile=/etc/nova/nova.conf network create private 10.1.174.0/24 1 256Log in to the nova database to determine the network ID assigned to each VLAN:select id,cidr from networks;Update the DB to match your network settings. The following script will generate SQL based on the predetermined settings for this example. You will need to modify this database update to fit your environment. +nova-manage --flagfile=/etc/nova/nova.conf network create private 10.1.174.0/24 1 256Log in to the nova database to determine the network ID assigned to each VLAN:select id,cidr from networks;Update the DB to match your network settings. The following script will generate SQL based on the predetermined settings for this example. You will need to modify this database update to fit your environment. if [ -z $1 ]; then echo "You need to specify the vlan to modify" fi @@ -1639,8 +1681,8 @@ update networks set bridge = 'br_$VLAN' where id = $ID; update networks set gateway = '10.1.$VLAN.7' where id = $ID; update networks set dhcp_start = '10.1.$VLAN.8' where id = $ID; update fixed_ips set reserved = 1 where address in ('10.1.$VLAN.1','10.1.$VLAN.2','10.1.$VLAN.3','10.1.$VLAN.4','10.1.$VLAN.5','10.1.$VLAN.6','10.1.$VLAN.7'); -__EOF_After verifying that the above SQL will work for your environment, run it against the nova database, once for every VLAN you have in the environment.Next, create a project manager for the Compute project:nova-manage --flagfile=/etc/nova/nova.conf user admin $usernameThen create a project and assign that user as the admin user:nova-manage --flagfile=/etc/nova/nova.conf project create $projectname $usernameFinally, get the credentials for the user just created, which also assigns - one of the networks to this project:)nova-manage --flagfile=/etc/nova/nova.conf project zipfile $projectname $usernameWhen you start nova-network, the bridge devices and associated VLAN tags will be created. When you create a new VM you must determine (either manually or programatically) which VLAN it should be a part of, and start the VM in the corresponding project.In certain cases, the network manager may not properly tear down bridges and VLANs when it is stopped. If you attempt to restart the network manager and it does not start, check the logs for errors indicating that a bridge device already exists. If this is the case, you will likely need to tear down the bridge and VLAN devices manually.vconfig rem vlanNNN +__EOF_After verifying that the above SQL will work for your environment, run it against the nova database, once for every VLAN you have in the environment.Next, create a project manager for the Compute project:nova-manage --flagfile=/etc/nova/nova.conf user admin $usernameThen create a project and assign that user as the admin user:nova-manage --flagfile=/etc/nova/nova.conf project create $projectname $usernameFinally, get the credentials for the user just created, which also assigns + one of the networks to this project:)nova-manage --flagfile=/etc/nova/nova.conf project zipfile $projectname $usernameWhen you start nova-network, the bridge devices and associated VLAN tags will be created. When you create a new VM you must determine (either manually or programatically) which VLAN it should be a part of, and start the VM in the corresponding project.In certain cases, the network manager may not properly tear down bridges and VLANs when it is stopped. If you attempt to restart the network manager and it does not start, check the logs for errors indicating that a bridge device already exists. If this is the case, you will likely need to tear down the bridge and VLAN devices manually.vconfig rem vlanNNN ifconfig br_NNN down brctl delbr br_NNNAlso, if users need to access the instances in their project across a VPN, a special VPN instance (code named cloudpipe) needs to be created. You can create the cloudpipe instance. The @@ -1652,13 +1694,13 @@ brctl delbr br_NNNEnabling Ping and SSH on VMsEnabling Ping and SSH on VMsBe sure you enable access to your VMs by using the ‘euca-authorize’ command. Below, - you will find the commands to allow ‘ping’ and ‘ssh’ to your VMs: euca-authorize -P icmp -t -1:-1 default + securely so that each project has its own Certificate Authority (CA).Enabling Ping and SSH on VMsEnabling Ping and SSH on VMsBe sure you enable access to your VMs by using the ‘euca-authorize’ command. Below, + you will find the commands to allow ‘ping’ and ‘ssh’ to your VMs: euca-authorize -P icmp -t -1:-1 default euca-authorize -P tcp -p 22 defaultIf you still cannot ping or SSH your instances after issuing the ‘euca-authorize’ commands, look at the number of ‘dnsmasq’ processes that are running. If you have a running instance, check to see that TWO ‘dnsmasq’ processes are running. If not, perform the - following: killall dnsmasq; service nova-network restartAllocating and Associating IP Addresses with InstancesAllocating and Associating IP Addresses with InstancesYou can use Euca2ools commands to manage floating IP addresses used with Flat DHCP or VLAN + following: killall dnsmasq; service nova-network restartAllocating and Associating IP Addresses with InstancesAllocating and Associating IP Addresses with InstancesYou can use Euca2ools commands to manage floating IP addresses used with Flat DHCP or VLAN networking. To assign a reserved IP address to your project, removing it from the pool of available floating IP addresses, use euca-allocate-address. It'll return an IP address, assign it to the project you own, and remove it from the pool @@ -1669,31 +1711,32 @@ euca-authorize -P tcp -p 22 defaultIf pool of IPs for someone else to grab.There are nova-manage commands that also help you manage the floating IPs.nova-manage floating list - This command lists the floating IP addresses in the pool. nova-manage floating create [hostname] [cidr] - This command creates specific floating IPs for a specific network host and either a single address or a subnet. nova-manage floating destroy [hostname] [cidr] - This command removes floating IP - addresses using the same parameters as the create command.Associating a Public IP AddressAssociating a Public IP AddressOpenStack Compute uses NAT for public IPs. If you plan to use public IP + addresses using the same parameters as the create command.Associating a Public IP AddressAssociating a Public IP AddressOpenStack Compute uses NAT for public IPs. If you plan to use public IP addresses for your virtual instances, you must configure --public_interface=vlan100' in the nova.conf file so that Nova knows where to bind public IP addresses. Restart nova-network if you change nova.conf while the service is running. Also, ensure you - have opened port 22 for the nova network.You must add the IP address or block of public ip addresses to the floating IP + have opened port 22 for the nova network.You must add the IP address or block of public IP addresses to the floating IP list using the nova-manage floating create command. When you start a new virtual instance, associate one of the public addresses to the new instance - using the euca-associate-address command.These are the basic overall steps and checkpoints. First, set up the public address.nova-manage floating create my-hostname 68.99.26.170/31 + using the euca-associate-address command.These are the basic overall steps and checkpoints. First, set up the public address.nova-manage floating create my-hostname 68.99.26.170/31 euca-allocate-address 68.99.26.170 -euca-associate-address -i i-1 68.99.26.170Make sure the security groups are open.root@my-hostname:~# euca-describe-groups +euca-associate-address -i i-1 68.99.26.170Make sure the security groups are open.root@my-hostname:~# euca-describe-groups GROUP admin-project default default PERMISSION admin-project default ALLOWS icmp -1 -1 FROM CIDR 0.0.0.0/0 PERMISSION admin-project default ALLOWS tcp 22 22 -FROM CIDR 0.0.0.0/0Ensure the NAT rules have been added to iptables.-A nova-network-OUTPUT -d 68.99.26.170/32 -j DNAT --to-destination 10.0.0.3 +FROM CIDR 0.0.0.0/0Ensure the NAT rules have been added to iptables.-A nova-network-OUTPUT -d 68.99.26.170/32 -j DNAT --to-destination 10.0.0.3 -A nova-network-PREROUTING -d 68.99.26.170/32 -j DNAT --to-destination10.0.0.3 -A nova-network-floating-snat -s 10.0.0.3/32 -j SNAT --to-source 68.99.26.170Check that the public address, in this example 68.99.26.170, has been added to your public interface. You - should see the address in the listing when you enter "ip addr" at the command prompt.2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 + should see the address in the listing when you enter "ip addr" at the command prompt.2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether xx:xx:xx:17:4b:c2 brd ff:ff:ff:ff:ff:ff inet 13.22.194.80/24 brd 13.22.194.255 scope global eth0 inet 68.99.26.170/32 scope global eth0 inet6 fe80::82b:2bf:fe1:4b2/64 scope link valid_lft forever preferred_lft foreverNote that you cannot SSH to an instance with a public IP from within the same - server as the routing configuration won't allow it. Removing a Network from a ProjectRemoving a Network from a ProjectYou will find that you cannot remove a network that has already been associated to a project by simply deleting it. You can disassociate the project from the network with a scrub command and the project name as the final parameter: - nova-manage project scrub projectname OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk 8. System AdministrationBy understanding how the different installed nodes interact with each other you can + server as the routing configuration won't allow it. Removing a Network from a ProjectRemoving a Network from a ProjectYou will find that you cannot remove a network that has already been associated to a project by simply deleting it. You can disassociate the project from the network with a scrub command and the project name as the final parameter: + nova-manage project scrub projectnameExisting High Availability Options for NetworkingExisting High Availability Options for Networkingfrom Vish IshayaAs you can see from the Flat DHCP diagram titled "Flat DHCP network, multiple interfaces, multiple servers," traffic from the VM to the public internet has to go through the host running nova network. Dhcp is handled by nova-network as well, listening on the gateway address of the fixed_range network. The compute hosts can optionally have their own public IPs, or they can use the network host as their gateway. This mode is pretty simple and it works in the majority of situations, but it has one major drawback: the network host is a single point of failure! If the network host goes down for any reason, it is impossible to communicate with the VMs. Here are some options for avoiding the single point of failure.Option 1: FailoverOption 1: FailoverThe folks at NTT labs came up with a ha-linux configuration that allows for a 4 second failover to a hot backup of the network host. Details on their approach can be found in the following post to the openstack mailing list: https://lists.launchpad.net/openstack/msg02099.htmlThis solution is definitely an option, although it requires a second host that essentially does nothing unless there is a failure. Also four seconds can be too long for some real-time applications.Option 2: Multi-nicOption 2: Multi-nicRecently, nova gained support for multi-nic. This allows us to bridge a given VM into multiple networks. This gives us some more options for high availability. It is possible to set up two networks on separate vlans (or even separate ethernet devices on the host) and give the VMs a NIC and an IP on each network. Each of these networks could have its own network host acting as the gateway.In this case, the VM has two possible routes out. If one of them fails, it has the option of using the other one. The disadvantage of this approach is it offloads management of failure scenarios to the guest. The guest needs to be aware of multiple networks and have a strategy for switching between them. It also doesn't help with floating IPs. One would have to set up a floating IP associated with each of the IPs on private the private networks to achieve some type of redundancy.Option 3: HW GatewayOption 3: HW GatewayIt is possible to tell dnsmasq to use an external gateway instead of acting as the gateway for the VMs. You can pass dhcpoption=3,<ip of gateway> to make the VMs use an external gateway. This will require some manual setup. The metadata IP forwarding rules will need to be set on the hardware gateway instead of the nova-network host. You will have to make sure to set up routes properly so that the subnet that you use for VMs is routable.This offloads HA to standard switching hardware and it has some strong benefits. Unfortunately, nova-network is still responsible for floating IP natting and dhcp, so some failover strategy needs to be employed for those options.New HA OptionNew HA OptionEssentially, what the current options are lacking, is the ability to specify different gateways for different VMs. An agnostic approach to a better model might propose allowing multiple gateways per VM. Unfortunately this rapidly leads to some serious networking complications, especially when it comes to the natting for floating IPs. With a few assumptions about the problem domain, we can come up with a much simpler solution that is just as effective.The key realization is that there is no need to isolate the failure domain away from the host where the VM is running. If the host itself goes down, losing networking to the VM is a non-issue. The VM is already gone. So the simple solution involves allowing each compute host to do all of the networking jobs for its own VMs. This means each compute host does NAT, dhcp, and acts as a gateway for all of its own VMs. While we still have a single point of failure in this scenario, it is the same point of failure that applies to all virtualized systems, and so it is about the best we can do.So the next question is: how do we modify the Nova code to provide this option. One possibility would be to add code to the compute worker to do complicated networking setup. This turns out to be a bit painful, and leads to a lot of duplicated code between compute and network. Another option is to modify nova-network slightly so that it can run successfully on every compute node and change the message passing logic to pass the network commands to a local network worker.Surprisingly, the code is relatively simple. A couple fields needed to be added to the database in order to support these new types of "multihost" networks without breaking the functionality of the existing system. All-in-all it is a pretty small set of changes for a lot of added functionality: about 250 lines, including quite a bit of cleanup. You can see the branch here: https://code.launchpad.net/~vishvananda/nova/ha-net/+merge/67078The drawbacks here are relatively minor. It requires adding an IP on the VM network to each host in the system, and it implies a little more overhead on the compute hosts. It is also possible to combine this with option 3 above to remove the need for your compute hosts to gateway. In that hybrid version they would no longer gateway for the VMs and their responsibilities would only be dhcp and nat.The resulting layout for the new HA networking option looks the following diagram:Figure 7.7. High Availability Networking OptionIn contrast with the earlier diagram, all the hosts in the system are running both nova-compute and nova-network. Each host does DHCP and does NAT for public traffic for the VMs running on that particular host. In this model every compute host requires a connection to the public internet and each host is also assigned an address from the VM network where it listens for dhcp traffic.The requirements for configuring are the following: --multi_host flag must be in place for network creation and nova-network must be run on every compute host. These created multi hosts networks will send all network related commands to the host that the VM is on. + Future of NetworkingFuture of NetworkingWith the existing multi-nic code and the HA networking code, we have a pretty robust system with a lot of deployment options. This should be enough to provide deployers enough room to solve todays networking problems. Ultimately, we want to provide users the ability to create arbitrary networks and have real and virtual network appliances managed automatically. The efforts underway in the Quantum and Melange projects will help us reach this lofty goal, but with the current additions we should have enough flexibility to get us by until those projects can take over. OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo 8. System AdministrationBy understanding how the different installed nodes interact with each other you can administer the OpenStack Compute installation. OpenStack Compute offers many ways to install using multiple servers but the general idea is that you can have multiple compute nodes that control the virtual servers and a cloud controller node that contains the remaining Nova services. The OpenStack Compute cloud works via the interaction of a series of daemon processes @@ -1708,25 +1751,25 @@ valid_lft forever preferred_lft foreverFor example, other components that need to deal with volumes in some way, should do so by calling methods on the VolumeManager instead of directly changing fields in the database. - This allows us to keep all of the code relating to volumes in the same place. nova-api - The nova-api service receives xml requests and sends them to the rest + This allows us to keep all of the code relating to volumes in the same place. nova-api - The nova-api service receives xml requests and sends them to the rest of the system. It is a wsgi app that routes and authenticate requests. It supports - the ec2 and openstack apis. There is a nova-api.conf file created when you install - Compute.nova-objectstore - The nova-objectstore service is an ultra simple file-based + the EC2 and OpenStack APIs. There is a nova-api.conf file created when you install + Compute.nova-objectstore - The nova-objectstore service is an ultra simple file-based storage system for images that replicates most of the S3 API. It can be replaced with OpenStack Image Service and a simple image manager or use OpenStack Object Storage as the virtual machine image storage facility. It must reside on the same - node as nova-compute.nova-compute - The nova-compute service is responsible for managing virtual + node as nova-compute.nova-compute - The nova-compute service is responsible for managing virtual machines. It loads a Service object which exposes the public methods on - ComputeManager via Remote Procedure Call (RPC).nova-volume - The nova-volume service is responsible for managing attachable block + ComputeManager via Remote Procedure Call (RPC).nova-volume - The nova-volume service is responsible for managing attachable block storage devices. It loads a Service object which exposes the public methods on - VolumeManager via RPC.nova-network - The nova-network service is responsible for managing floating and + VolumeManager via RPC.nova-network - The nova-network service is responsible for managing floating and fixed IPs, DHCP, bridging and VLANs. It loads a Service object which exposes the public methods on one of the subclasses of NetworkManager. Different networking strategies are available to the service by changing the network_manager flag to FlatManager, FlatDHCPManager, or VlanManager (default is VLAN if no other is - specified):nova-network --network_manager=nova.network.manager.FlatManagerStarting ImagesStarting ImagesOnce you have an installation, you want to get images that you can use in your Compute cloud. We've created a basic Ubuntu image for testing your installation. First you'll download the image, then use uec-publish-tarball to publish it:image="ubuntu1010-UEC-localuser-image.tar.gz" + specified):nova-network --network_manager=nova.network.manager.FlatManagerStarting ImagesStarting ImagesOnce you have an installation, you want to get images that you can use in your Compute cloud. We've created a basic Ubuntu image for testing your installation. First you'll download the image, then use uec-publish-tarball to publish it:image="ubuntu1010-UEC-localuser-image.tar.gz" wget http://c0179148.cdn1.cloudfiles.rackspacecloud.com/ubuntu1010-UEC-localuser-image.tar.gz -uec-publish-tarball $image [bucket-name] [hardware-arch]Here's an example of what this command looks like with data: uec-publish-tarball ubuntu1010-UEC-localuser-image.tar.gz dub-bucket x86_64The command in return should output three references: emi, eri and eki. You need to use the emi value (for example, “ami-zqkyh9th″) for the euca-run-instances command.Now you can schedule, launch and connect to the instance, which you do with tools from the Euca2ools on the command line. Create the emi value from the uec-publish-tarball command, and then you can use the euca-run-instances command.One thing to note here, once you publish the tarball, it has to untar before you can launch an image from it. Using the 'euca-describe-images' command, wait until the state turns to "available" from "untarring.":euca-describe-imagesDepending on the image that you're using, you need a public key to connect to it. Some images have built-in accounts already created. Images can be shared by many users, so it is dangerous to put passwords into the images. Nova therefore supports injecting ssh keys into instances before they are +uec-publish-tarball $image [bucket-name] [hardware-arch]Here's an example of what this command looks like with data: uec-publish-tarball ubuntu1010-UEC-localuser-image.tar.gz dub-bucket x86_64The command in return should output three references: emi, eri and eki. You need to use the emi value (for example, “ami-zqkyh9th″) for the euca-run-instances command.Now you can schedule, launch and connect to the instance, which you do with tools from the Euca2ools on the command line. Create the emi value from the uec-publish-tarball command, and then you can use the euca-run-instances command.One thing to note here, once you publish the tarball, it has to untar before you can launch an image from it. Using the 'euca-describe-images' command, wait until the state turns to "available" from "untarring.":euca-describe-imagesDepending on the image that you're using, you need a public key to connect to it. Some images have built-in accounts already created. Images can be shared by many users, so it is dangerous to put passwords into the images. Nova therefore supports injecting ssh keys into instances before they are booted. This allows a user to login to the instances that he or she creates securely. Generally the first thing that a user does when using the system is create a keypair. Keypairs provide secure authentication to your instances. As part of the first boot of a @@ -1734,55 +1777,55 @@ uec-publish-tarball $image [bucket-name] [hardware-arch]Keypairs are created through the api and you use them as a parameter when launching an instance. They can be created on the command line using the euca2ools script - euca-add-keypair. Refer to the man page for the available options. Example usage:euca-add-keypair test > test.pem -chmod 600 test.pemNow, you can run the instances:euca-run-instances -k test -t m1.tiny ami-zqkyh9thHere's a description of the parameters used above:-t what type of image to create-k name of the key to inject in to the image at launch Optionally, you can use the -n parameter to indicate how many images of this type to - launch. The instance will go from “launching” to “running” in a short time, and you should be able to connect via SSH using the 'ubuntu' account, with the password 'ubuntu': (replace $ipaddress with the one you got from euca-describe-instances):ssh ubuntu@$ipaddressThe 'ubuntu' user is part of the sudoers group, so you can escalate to 'root' - via the following command:sudo -iDeleting InstancesDeleting InstancesWhen you are done playing with an instance, you can tear the instance down + euca-add-keypair. Refer to the man page for the available options. Example usage:euca-add-keypair test > test.pem +chmod 600 test.pemNow, you can run the instances:euca-run-instances -k test -t m1.tiny ami-zqkyh9thHere's a description of the parameters used above:-t what type of image to create-k name of the key to inject in to the image at launch Optionally, you can use the -n parameter to indicate how many images of this type to + launch. The instance will go from “launching” to “running” in a short time, and you should be able to connect via SSH using the 'ubuntu' account, with the password 'ubuntu': (replace $ipaddress with the one you got from euca-describe-instances):ssh ubuntu@$ipaddressThe 'ubuntu' user is part of the sudoers group, so you can escalate to 'root' + via the following command:sudo -iDeleting InstancesDeleting InstancesWhen you are done playing with an instance, you can tear the instance down using the following command (replace $instanceid with the instance IDs from above or - look it up with euca-describe-instances):euca-terminate-instances $instanceidImage managementImage managementCSS Corp- Open Source Servicesby CSS Corp Open Source Services There are several pre-built images for OpenStack available from various sources. You can download such images and use them to get familiar with OpenStack. You can refer to http://docs.openstack.org/cactus/openstack-compute/admin/content/starting-images.html for details on using such images.For any production deployment, you may like to have the ability to bundle custom images, with a custom set of applications or configuration. This chapter will guide you through the process of creating Linux images of Debian and Redhat based distributions from scratch. We have also covered an approach to bundling Windows images.There are some minor differences in the way you would bundle a Linux image, based on the distribution. Ubuntu makes it very easy by providing cloud-init package, which can be used to take care of the instance configuration at the time of launch. cloud-init handles importing ssh keys for password-less login, setting hostname etc. The instance acquires the instance specific configuration from Nova-compute by connecting to a meta data interface running on 169.254.169.254.While creating the image of a distro that does not have cloud-init or an equivalent package, you may need to take care of importing the keys etc. by running a set of commands at boot time from rc.local.The process used for Ubuntu and Fedora is largely the same with a few minor differences, which are explained below.In both cases, the documentation below assumes that you have a working KVM installation to use for creating the images. We are using the machine called ‘client1′ as explained in the chapter on “Installation and Configuration” for this purpose.The approach explained below will give you disk images that represent a disk without any partitions. Nova-compute can resize such disks ( including resizing the file system) based on the instance type chosen at the time of launching the instance. These images cannot have ‘bootable’ flag and hence it is mandatory to have associated kernel and ramdisk images. These kernel and ramdisk images need to be used by nova-compute at the time of launching the instance.However, we have also added a small section towards the end of the chapter about creating bootable images with multiple partitions that can be be used by nova to launch an instance without the need for kernel and ramdisk images. The caveat is that while nova-compute can re-size such disks at the time of launching the instance, the file system size is not altered and hence, for all practical purposes, such disks are not re-sizable.Creating a Linux Image – Ubuntu & FedoraCreating a Linux Image – Ubuntu & FedoraThe first step would be to create a raw image on Client1. This will represent the main HDD of the virtual machine, so make sure to give it as much space as you will need. + look it up with euca-describe-instances):euca-terminate-instances $instanceidImage managementImage managementCSS Corp- Open Source Servicesby CSS Corp Open Source Services There are several pre-built images for OpenStack available from various sources. You can download such images and use them to get familiar with OpenStack. You can refer to http://docs.openstack.org/cactus/openstack-compute/admin/content/starting-images.html for details on using such images.For any production deployment, you may like to have the ability to bundle custom images, with a custom set of applications or configuration. This chapter will guide you through the process of creating Linux images of Debian and Redhat based distributions from scratch. We have also covered an approach to bundling Windows images.There are some minor differences in the way you would bundle a Linux image, based on the distribution. Ubuntu makes it very easy by providing cloud-init package, which can be used to take care of the instance configuration at the time of launch. cloud-init handles importing ssh keys for password-less login, setting hostname etc. The instance acquires the instance specific configuration from Nova-compute by connecting to a meta data interface running on 169.254.169.254.While creating the image of a distro that does not have cloud-init or an equivalent package, you may need to take care of importing the keys etc. by running a set of commands at boot time from rc.local.The process used for Ubuntu and Fedora is largely the same with a few minor differences, which are explained below.In both cases, the documentation below assumes that you have a working KVM installation to use for creating the images. We are using the machine called ‘client1′ as explained in the chapter on “Installation and Configuration” for this purpose.The approach explained below will give you disk images that represent a disk without any partitions. Nova-compute can resize such disks ( including resizing the file system) based on the instance type chosen at the time of launching the instance. These images cannot have ‘bootable’ flag and hence it is mandatory to have associated kernel and ramdisk images. These kernel and ramdisk images need to be used by nova-compute at the time of launching the instance.However, we have also added a small section towards the end of the chapter about creating bootable images with multiple partitions that can be be used by nova to launch an instance without the need for kernel and ramdisk images. The caveat is that while nova-compute can re-size such disks at the time of launching the instance, the file system size is not altered and hence, for all practical purposes, such disks are not re-sizable.Creating a Linux Image – Ubuntu & FedoraCreating a Linux Image – Ubuntu & FedoraThe first step would be to create a raw image on Client1. This will represent the main HDD of the virtual machine, so make sure to give it as much space as you will need. kvm-img create -f raw server.img 5G -OS InstallationDownload the iso file of the Linux distribution you want installed in the image. The instructions below are tested on Ubuntu 11.04 Natty Narwhal 64-bit server and Fedora 14 64-bit. Most of the instructions refer to Ubuntu. The points of difference between Ubuntu and Fedora are mentioned wherever required. +OS InstallationDownload the iso file of the Linux distribution you want installed in the image. The instructions below are tested on Ubuntu 11.04 Natty Narwhal 64-bit server and Fedora 14 64-bit. Most of the instructions refer to Ubuntu. The points of difference between Ubuntu and Fedora are mentioned wherever required. wget http://releases.ubuntu.com/natty/ubuntu-11.04-server-amd64.iso -Boot a KVM Instance with the OS installer ISO in the virtual CD-ROM. This will start the installation process. The command below also sets up a VNC display at port 0 +Boot a KVM Instance with the OS installer ISO in the virtual CD-ROM. This will start the installation process. The command below also sets up a VNC display at port 0 sudo kvm -m 256 -cdrom ubuntu-11.04-server-amd64.iso -drive file=server.img,if=scsi,index=0 -boot d -net nic -net user -nographic -vnc :0 -Connect to the VM through VNC (use display number :0) and finish the installation.For Example, where 10.10.10.4 is the IP address of client1: +Connect to the VM through VNC (use display number :0) and finish the installation.For Example, where 10.10.10.4 is the IP address of client1: vncviewer 10.10.10.4 :0 -During the installation of Ubuntu, create a single ext4 partition mounted on ‘/’. Do not create a swap partition.In the case of Fedora 14, the installation will not progress unless you create a swap partition. Please go ahead and create a swap partition.After finishing the installation, relaunch the VM by executing the following command. +During the installation of Ubuntu, create a single ext4 partition mounted on ‘/’. Do not create a swap partition.In the case of Fedora 14, the installation will not progress unless you create a swap partition. Please go ahead and create a swap partition.After finishing the installation, relaunch the VM by executing the following command. sudo kvm -m 256 -drive file=server.img,if=scsi,index=0,boot=on -boot c -net nic -net user -nographic -vnc :0 -At this point, you can add all the packages you want to have installed, update the installation, add users and make any configuration changes you want in your image.At the minimum, for Ubuntu you may run the following commands +At this point, you can add all the packages you want to have installed, update the installation, add users and make any configuration changes you want in your image.At the minimum, for Ubuntu you may run the following commands sudo apt-get update sudo apt-get upgrade sudo apt-get install openssh-server cloud-init -For Fedora run the following commands as root +For Fedora run the following commands as root yum update yum install openssh-server chkconfig sshd on -Also remove the network persistence rules from /etc/udev/rules.d as their presence will result in the network interface in the instance coming up as an interface other than eth0. +Also remove the network persistence rules from /etc/udev/rules.d as their presence will result in the network interface in the instance coming up as an interface other than eth0. sudo rm -rf /etc/udev/rules.d/70-persistent-net.rules -Shutdown the Virtual machine and proceed with the next steps.Extracting the EXT4 partitionThe image that needs to be uploaded to OpenStack needs to be an ext4 filesystem image. Here are the steps to create a ext4 filesystem image from the raw image i.e server.img +Shutdown the Virtual machine and proceed with the next steps.Extracting the EXT4 partitionThe image that needs to be uploaded to OpenStack needs to be an ext4 filesystem image. Here are the steps to create a ext4 filesystem image from the raw image i.e server.img sudo losetup -f server.img sudo losetup -a -You should see an output like this: +You should see an output like this: /dev/loop0: [0801]:16908388 ($filepath) -Observe the name of the loop device ( /dev/loop0 in our setup) when $filepath is the path to the mounted .raw file.Now we need to find out the starting sector of the partition. Run: +Observe the name of the loop device ( /dev/loop0 in our setup) when $filepath is the path to the mounted .raw file.Now we need to find out the starting sector of the partition. Run: sudo fdisk -cul /dev/loop0 -You should see an output like this: +You should see an output like this: Disk /dev/loop0: 5368 MB, 5368709120 bytes @@ -1799,34 +1842,34 @@ Disk identifier: 0x00072bd4 Device Boot Start End Blocks Id System /dev/loop0p1 * 2048 10483711 5240832 83 Linux -Make a note of the starting sector of the /dev/loop0p1 partition i.e the partition whose ID is 83. This number should be multiplied by 512 to obtain the correct value. In this case: 2048 x 512 = 1048576Unmount the loop0 device: +Make a note of the starting sector of the /dev/loop0p1 partition i.e the partition whose ID is 83. This number should be multiplied by 512 to obtain the correct value. In this case: 2048 x 512 = 1048576Unmount the loop0 device: sudo losetup -d /dev/loop0 -Now mount only the partition(/dev/loop0p1) of server.img which we had previously noted down, by adding the -o parameter with value previously calculated value +Now mount only the partition(/dev/loop0p1) of server.img which we had previously noted down, by adding the -o parameter with value previously calculated value sudo losetup -f -o 1048576 server.img sudo losetup -a -You’ll see a message like this: +You’ll see a message like this: /dev/loop0: [0801]:16908388 ($filepath) offset 1048576 -Make a note of the mount point of our device(/dev/loop0 in our setup) when $filepath is the path to the mounted .raw file.Copy the entire partition to a new .raw file +Make a note of the mount point of our device(/dev/loop0 in our setup) when $filepath is the path to the mounted .raw file.Copy the entire partition to a new .raw file sudo dd if=/dev/loop0 of=serverfinal.img -Now we have our ext4 filesystem image i.e serverfinal.imgUnmount the loop0 device +Now we have our ext4 filesystem image i.e serverfinal.imgUnmount the loop0 device sudo losetup -d /dev/loop0 -Tweaking /etc/fstabYou will need to tweak /etc/fstab to make it suitable for a cloud instance. Nova-compute may resize the disk at the time of launch of instances based on the instance type chosen. This can make the UUID of the disk invalid. Hence we have to use File system label as the identifier for the partition instead of the UUID.Loop mount the serverfinal.img, by running +Tweaking /etc/fstabYou will need to tweak /etc/fstab to make it suitable for a cloud instance. Nova-compute may resize the disk at the time of launch of instances based on the instance type chosen. This can make the UUID of the disk invalid. Hence we have to use File system label as the identifier for the partition instead of the UUID.Loop mount the serverfinal.img, by running sudo mount -o loop serverfinal.img /mnt -Edit /mnt/etc/fstab and modify the line for mounting root partition(which may look like the following) +Edit /mnt/etc/fstab and modify the line for mounting root partition(which may look like the following) UUID=e7f5af8d-5d96-45cc-a0fc-d0d1bde8f31c / ext4 errors=remount-ro 0 1 -to +to LABEL=uec-rootfs / ext4 defaults 0 0 -Fetching Metadata in FedoraSince, Fedora does not ship with cloud-init or an equivalent, you will need to take a few steps to have the instance fetch the meta data like ssh keys etc.Edit the /etc/rc.local file and add the following lines before the line “touch /var/lock/subsys/local” +Fetching Metadata in FedoraSince, Fedora does not ship with cloud-init or an equivalent, you will need to take a few steps to have the instance fetch the meta data like ssh keys etc.Edit the /etc/rc.local file and add the following lines before the line “touch /var/lock/subsys/local” depmod -a modprobe acpiphp @@ -1839,25 +1882,25 @@ echo "AUTHORIZED_KEYS:" echo "************************" cat /root/.ssh/authorized_keys echo "************************" -Kernel and Initrd for OpenStackKernel and Initrd for OpenStackCopy the kernel and the initrd image from /mnt/boot to user home directory. These will be used later for creating and uploading a complete virtual image to OpenStack. +Kernel and Initrd for OpenStackKernel and Initrd for OpenStackCopy the kernel and the initrd image from /mnt/boot to user home directory. These will be used later for creating and uploading a complete virtual image to OpenStack. sudo cp /mnt/boot/vmlinuz-2.6.38-7-server /home/localadmin sudo cp /mnt/boot/initrd.img-2.6.38-7-server /home/localadmin -Unmount the Loop partition +Unmount the Loop partition sudo umount /mnt -Change the filesystem label of serverfinal.img to ‘uec-rootfs’ +Change the filesystem label of serverfinal.img to ‘uec-rootfs’ sudo tune2fs -L uec-rootfs serverfinal.img -Now, we have all the components of the image ready to be uploaded to OpenStack imaging server.Registering with OpenStackRegistering with OpenStackThe last step would be to upload the images to Openstack Imaging Server glance. The files that need to be uploaded for the above sample setup of Ubuntu are: vmlinuz-2.6.38-7-server, initrd.img-2.6.38-7-server, serverfinal.imgRun the following command +Now, we have all the components of the image ready to be uploaded to OpenStack imaging server.Registering with OpenStackRegistering with OpenStackThe last step would be to upload the images to Openstack Imaging Server glance. The files that need to be uploaded for the above sample setup of Ubuntu are: vmlinuz-2.6.38-7-server, initrd.img-2.6.38-7-server, serverfinal.imgRun the following command uec-publish-image -t image --kernel-file vmlinuz-2.6.38-7-server --ramdisk-file initrd.img-2.6.38-7-server amd64 serverfinal.img bucket1 -For Fedora, the process will be similar. Make sure that you use the right kernel and initrd files extracted above.uec-publish-image, like several other commands from euca2ools, returns the prompt back immediately. However, the upload process takes some time and the images will be usable only after the process is complete. You can keep checking the status using the command ‘euca-describe-images’ as mentioned below.Bootable ImagesBootable ImagesYou can register bootable disk images without associating kernel and ramdisk images. When you do not want the flexibility of using the same disk image with different kernel/ramdisk images, you can go for bootable disk images. This greatly simplifies the process of bundling and registering the images. However, the caveats mentioned in the introduction to this chapter apply. Please note that the instructions below use server.img and you can skip all the cumbersome steps related to extracting the single ext4 partition. +For Fedora, the process will be similar. Make sure that you use the right kernel and initrd files extracted above.uec-publish-image, like several other commands from euca2ools, returns the prompt back immediately. However, the upload process takes some time and the images will be usable only after the process is complete. You can keep checking the status using the command ‘euca-describe-images’ as mentioned below.Bootable ImagesBootable ImagesYou can register bootable disk images without associating kernel and ramdisk images. When you do not want the flexibility of using the same disk image with different kernel/ramdisk images, you can go for bootable disk images. This greatly simplifies the process of bundling and registering the images. However, the caveats mentioned in the introduction to this chapter apply. Please note that the instructions below use server.img and you can skip all the cumbersome steps related to extracting the single ext4 partition. euca-bundle-image -i server.img euca-upload-bundle -b mybucket -m /tmp/server.img.manifest.xml euca-register mybucket/server.img.manifest.xml -Image ListingImage ListingThe status of the images that have been uploaded can be viewed by using euca-describe-images command. The output should like this: +Image ListingImage ListingThe status of the images that have been uploaded can be viewed by using euca-describe-images command. The output should like this: localadmin@client1:~$ euca-describe-images @@ -1868,53 +1911,53 @@ IMAGE ami-5e17eb9d bucket1/serverfinal.img.manifest.xml css avail IMAGE aki-3d0aeb08 bucket1/vmlinuz-2.6.38-7-server.manifest.xml css available private x86_64 kernel localadmin@client1:~$ -Creating a Windows ImageCreating a Windows ImageThe first step would be to create a raw image on Client1, this will represent the main HDD of the virtual machine, so make sure to give it as much space as you will need. +Creating a Windows ImageCreating a Windows ImageThe first step would be to create a raw image on Client1, this will represent the main HDD of the virtual machine, so make sure to give it as much space as you will need. kvm-img create -f raw windowsserver.img 20G -OpenStack presents the disk using aVIRTIO interface while launching the instance. Hence the OS needs to have drivers for VIRTIO. By default, the Windows Server 2008 ISO does not have the drivers for VIRTIO. Sso download a virtual floppy drive containing VIRTIO drivers from the following locationhttp://alt.fedoraproject.org/pub/alt/virtio-win/latest/images/bin/and attach it during the installationStart the installation by running +OpenStack presents the disk using aVIRTIO interface while launching the instance. Hence the OS needs to have drivers for VIRTIO. By default, the Windows Server 2008 ISO does not have the drivers for VIRTIO. Sso download a virtual floppy drive containing VIRTIO drivers from the following locationhttp://alt.fedoraproject.org/pub/alt/virtio-win/latest/images/bin/and attach it during the installationStart the installation by running sudo kvm -m 1024 -cdrom win2k8_dvd.iso -drive file=windowsserver.img,if=virtio,boot=on -fda virtio-win-1.1.16.vfd -boot d -nographic -vnc :0 -When the installation prompts you to choose a hard disk device you won’t see any devices available. Click on “Load drivers” at the bottom left and load the drivers from A:\i386\Win2008After the Installation is over, boot into it once and install any additional applications you need to install and make any configuration changes you need to make. Also ensure that RDP is enabled as that would be the only way you can connect to a running instance of Windows. Windows firewall needs to be configured to allow incoming ICMP and RDP connections.For OpenStack to allow incoming RDP Connections, use euca-authorize command to open up port 3389 as described in the chapter on “Security”.Shut-down the VM and upload the image to OpenStack +When the installation prompts you to choose a hard disk device you won’t see any devices available. Click on “Load drivers” at the bottom left and load the drivers from A:\i386\Win2008After the Installation is over, boot into it once and install any additional applications you need to install and make any configuration changes you need to make. Also ensure that RDP is enabled as that would be the only way you can connect to a running instance of Windows. Windows firewall needs to be configured to allow incoming ICMP and RDP connections.For OpenStack to allow incoming RDP Connections, use euca-authorize command to open up port 3389 as described in the chapter on “Security”.Shut-down the VM and upload the image to OpenStack euca-bundle-image -i windowsserver.img euca-upload-bundle -b mybucket -m /tmp/windowsserver.img.manifest.xml euca-register mybucket/windowsserver.img.manifest.xml -Understanding the Compute Service ArchitectureUnderstanding the Compute Service ArchitectureThese basic categories describe the service architecture and what's going on within the cloud controller.API ServerAPI ServerAt the heart of the cloud framework is an API Server. This API Server makes command and control of the hypervisor, storage, and networking programmatically available to users in realization of the definition of cloud computing. +Understanding the Compute Service ArchitectureUnderstanding the Compute Service ArchitectureThese basic categories describe the service architecture and what's going on within the cloud controller.API ServerAPI ServerAt the heart of the cloud framework is an API Server. This API Server makes command and control of the hypervisor, storage, and networking programmatically available to users in realization of the definition of cloud computing. The API endpoints are basic http web services which handle authentication, authorization, and basic command and control functions using various API interfaces under the Amazon, Rackspace, and related models. This enables API compatibility with multiple existing tool sets created for interaction with offerings from other vendors. This broad compatibility prevents vendor lock-in. - Message QueueMessage Queue + Message QueueMessage Queue A messaging queue brokers the interaction between compute nodes (processing), volumes (block storage), the networking controllers (software which controls network infrastructure), API endpoints, the scheduler (determines which physical hardware to allocate to a virtual resource), and similar components. Communication to and from the cloud controller is by HTTP requests through multiple API endpoints. A typical message passing event begins with the API server receiving a request from a user. The API server authenticates the user and ensures that the user is permitted to issue the subject command. Availability of objects implicated in the request is evaluated and, if available, the request is routed to the queuing engine for the relevant workers. Workers continually listen to the queue based on their role, and occasionally their type hostname. When such listening produces a work request, the worker takes assignment of the task and begins its execution. Upon completion, a response is dispatched to the queue which is received by the API server and relayed to the originating user. Database entries are queried, added, or removed as necessary throughout the process. -Compute WorkerCompute WorkerCompute workers manage computing instances on host machines. Through the API, commands are dispatched to compute workers to:Run instancesTerminate instancesReboot instancesAttach volumesDetach volumesGet console outputNetwork ControllerNetwork ControllerThe Network Controller manages the networking resources on host machines. The API server dispatches commands through the message queue, which are subsequently processed by Network Controllers. Specific operations include:Allocate fixed IP addressesConfiguring VLANs for projectsConfiguring networks for compute nodesVolume WorkersVolume WorkersVolume Workers interact with iSCSI storage to manage LVM-based instance volumes. Specific functions include: - Create volumesDelete volumesEstablish Compute volumesVolumes may easily be transferred between instances, but may be attached to only a single instance at a time.Managing the CloudManaging the CloudThere are two main tools that a system administrator will find useful to manage their cloud; +Compute WorkerCompute WorkerCompute workers manage computing instances on host machines. Through the API, commands are dispatched to compute workers to:Run instancesTerminate instancesReboot instancesAttach volumesDetach volumesGet console outputNetwork ControllerNetwork ControllerThe Network Controller manages the networking resources on host machines. The API server dispatches commands through the message queue, which are subsequently processed by Network Controllers. Specific operations include:Allocate fixed IP addressesConfiguring VLANs for projectsConfiguring networks for compute nodesVolume WorkersVolume WorkersVolume Workers interact with iSCSI storage to manage LVM-based instance volumes. Specific functions include: + Create volumesDelete volumesEstablish Compute volumesVolumes may easily be transferred between instances, but may be attached to only a single instance at a time.Managing the CloudManaging the CloudThere are two main tools that a system administrator will find useful to manage their cloud; the nova-manage command or the Euca2ools command line commands. With the Diablo release, the nova-manage command has been deprecated and you must specify if you want to use it by using the --use_deprecated_auth flag in nova.conf. You must also use the modified middleware stack that is commented out in the default paste.ini file.The nova-manage command may only be run by users with admin privileges. Commands for euca2ools can be used by all users, though specific commands may be restricted by Role - Based Access Control in the deprecated nova auth system. Using the nova-manage commandUsing the nova-manage commandThe nova-manage command may be used to perform many essential functions for + Based Access Control in the deprecated nova auth system. Using the nova-manage commandUsing the nova-manage commandThe nova-manage command may be used to perform many essential functions for administration and ongoing maintenance of nova, such as user creation, vpn - management, and much more.The standard pattern for executing a nova-manage command is: nova-manage category command [args]For example, to obtain a list of all projects: nova-manage project listRun without arguments to see a list of available command categories: nova-manageCommand categories are: account, agent, config, db, fixed, flavor, floating, host, + management, and much more.The standard pattern for executing a nova-manage command is: nova-manage category command [args]For example, to obtain a list of all projects: nova-manage project listRun without arguments to see a list of available command categories: nova-manageCommand categories are: account, agent, config, db, fixed, flavor, floating, host, instance_type, image, network, project, role, service, shell, user, version, vm, - volume, and vpn. You can also run with a category argument such as user to see a list of all commands in that category: nova-manage userManaging Compute UsersManaging Compute UsersAccess to the Euca2ools (ec2) API is controlled by an access and secret key. The + volume, and vpn. You can also run with a category argument such as user to see a list of all commands in that category: nova-manage userManaging Compute UsersManaging Compute UsersAccess to the Euca2ools (ec2) API is controlled by an access and secret key. The user’s access key needs to be included in the request, and the request must be signed with the secret key. Upon receipt of API requests, Compute will verify the signature and execute commands on behalf of the user. In order to begin using nova, you will need to create a user. This can be easily accomplished using the user create or user admin commands in nova-manage. user create will create a regular user, whereas user admin will create an admin user. The syntax of - the command is nova-manage user create username [access] [secretword]. For example: nova-manage user create john my-access-key a-super-secret-keyIf you do not specify an access or secret key, a random uuid will be created - automatically.CredentialsCredentialsNova can generate a handy set of credentials for a user. These credentials include a CA for bundling images and a file for setting environment variables to be used by euca2ools. If you don’t need to bundle images, just the environment script is required. You can export one with the project environment command. The syntax of the command is nova-manage project environment project_id user_id [filename]. If you don’t specify a filename, it will be exported as novarc. After generating the file, you can simply source it in bash to add the variables to your environment: + the command is nova-manage user create username [access] [secretword]. For example: nova-manage user create john my-access-key a-super-secret-keyIf you do not specify an access or secret key, a random uuid will be created + automatically.CredentialsCredentialsNova can generate a handy set of credentials for a user. These credentials include a CA for bundling images and a file for setting environment variables to be used by euca2ools. If you don’t need to bundle images, just the environment script is required. You can export one with the project environment command. The syntax of the command is nova-manage project environment project_id user_id [filename]. If you don’t specify a filename, it will be exported as novarc. After generating the file, you can simply source it in bash to add the variables to your environment: nova-manage project environment john_project john . novarcIf you do need to bundle images, you will need to get all of the credentials using project zipfile. Note that zipfile will give you an error message if networks haven’t been created yet. Otherwise zipfile has the same syntax as environment, only the default file name is nova.zip. Example usage: - + nova-manage project zipfile john_project john unzip nova.zip . novarc - Role Based Access ControlRole Based Access ControlRoles control the API actions that a user is allowed to perform. For example, a user + Role Based Access ControlRole Based Access ControlRoles control the API actions that a user is allowed to perform. For example, a user cannot allocate a public ip without the netadmin role. It is important to remember that a users de facto permissions in a project is the intersection of user (global) roles and project (local) roles. So for john to have netadmin permissions in his project, he needs to separate roles specified. You can add roles with role add. The syntax is nova-manage role add user_id role [project_id]. Let’s give john the - netadmin role for his project: nova-manage role add john netadmin + netadmin role for his project: nova-manage role add john netadmin nova-manage role add john netadmin john_projectRole-based access control (RBAC) is an approach to restricting system access to authorized users based on an individual's role within an organization. Various employee functions require certain levels of system access in order to be successful. These functions are mapped to defined roles and individuals are categorized accordingly. Since users are not assigned permissions directly, but only acquire them through their role (or roles), management of individual user rights becomes a matter of assigning appropriate roles to the user. This simplifies common operations, such as adding a user, or changing a user’s department. - Nova’s rights management system employs the RBAC model and currently supports the following five roles:Cloud Administrator. (cloudadmin) Users of this class enjoy complete system access.IT Security. (itsec) This role is limited to IT security personnel. It permits role holders to quarantine instances.System Administrator. (sysadmin)The default for project owners, this role affords users the ability to add other users to a project, interact with project images, and launch and terminate instances.Network Administrator. (netadmin) Users with this role are permitted to allocate and assign publicly accessible IP addresses as well as create and modify firewall rules.Developer. This is a general purpose role that is assigned to users by default.Project Manager. (projectmanager) This is a role that is assigned upon project creation and can't be added or removed, but this role can do anything a sysadmin can do.RBAC management is exposed through the dashboard for simplified user management.Managing VolumesManaging VolumesNova-volume is the service that allows you to give extra block level storage to your + Nova’s rights management system employs the RBAC model and currently supports the following five roles:Cloud Administrator. (cloudadmin) Users of this class enjoy complete system access.IT Security. (itsec) This role is limited to IT security personnel. It permits role holders to quarantine instances.System Administrator. (sysadmin)The default for project owners, this role affords users the ability to add other users to a project, interact with project images, and launch and terminate instances.Network Administrator. (netadmin) Users with this role are permitted to allocate and assign publicly accessible IP addresses as well as create and modify firewall rules.Developer. This is a general purpose role that is assigned to users by default.Project Manager. (projectmanager) This is a role that is assigned upon project creation and can't be added or removed, but this role can do anything a sysadmin can do.RBAC management is exposed through the dashboard for simplified user management.Managing VolumesManaging VolumesNova-volume is the service that allows you to give extra block level storage to your OpenStack Compute instances. You may recognize this as a similar offering that Amazon EC2 offers, Elastic Block Storage (EBS). However, nova-volume is not the same implementation that EC2 uses today. Nova-volume is an iSCSI solution that employs the @@ -1923,14 +1966,14 @@ euca-register mybucket/windowsserver.img.manifest.xml which multiple servers can attach to. Before going any further ; let's present the nova-volume implementation in OpenStack : The nova-volumes service uses iSCSI-exposed LVM volumes to the compute nodes which run instances. Thus, there are two components involved: - 1.lvm2, which works with a VG called "nova-volumes" (Refer to + 1.lvm2, which works with a VG called "nova-volumes" (Refer to http://en.wikipedia.org/wiki/Logical_Volume_Manager_(Linux) - for further details)2.open-iscsi, the iSCSI implementation which manages iSCSI sessions on the compute nodes + for further details)2.open-iscsi, the iSCSI implementation which manages iSCSI sessions on the compute nodes Here is what happens from the volume creation to its attachment (we use here the - euca2ools, but the same explanation goes with the API): 1.The volume is created via $euca-create-volume; which creates an LV into the - volume group (VG) "nova-volumes" 2.The volume is attached to an instance via $euca-attach-volume; which creates a - unique iSCSI IQN that will be exposed to the compute node. 3.The compute node which run the concerned instance has now an active ISCSI - session; and a new local storage (usually a /dev/sdX disk) 4.libvirt uses that local storage as a storage for the instance; the instance + euca2ools, but the same explanation goes with the API): 1.The volume is created via $euca-create-volume; which creates an LV into the + volume group (VG) "nova-volumes" 2.The volume is attached to an instance via $euca-attach-volume; which creates a + unique iSCSI IQN that will be exposed to the compute node. 3.The compute node which run the concerned instance has now an active ISCSI + session; and a new local storage (usually a /dev/sdX disk) 4.libvirt uses that local storage as a storage for the instance; the instance get a new disk (usually a /dev/vdX disk) For this particular walkthrough, there is one cloud controller running nova-api, nova-compute, nova-scheduler, nova-objectstore, nova-network and nova-volume services. There are two additional compute nodes running nova-compute. The walkthrough uses a custom @@ -1939,39 +1982,39 @@ euca-register mybucket/windowsserver.img.manifest.xml but it is essential for nova-volumes to work that the mode you are currently using is set up. Please refer to the Section 7 "Networking" for more details.To set up Compute to use volumes, ensure that nova-volume is installed along with lvm2. The guide will be split in three parts : - A- Installing the nova-volume service on the cloud controller.B- Configuring the nova-volumes volume group on the compute nodes.C- Troubleshooting your nova-volumes installation. - A- Install nova-volumes on the cloud controller.A- Install nova-volumes on the cloud controller. This is simply done by installing the two components on the cloud controller : apt-get install lvm2 nova-volume + A- Installing the nova-volume service on the cloud controller.B- Configuring the nova-volumes volume group on the compute nodes.C- Troubleshooting your nova-volumes installation. + A- Install nova-volumes on the cloud controller.A- Install nova-volumes on the cloud controller. This is simply done by installing the two components on the cloud controller : apt-get install lvm2 nova-volume Configure Volumes for use with nova-volumes If you do not already have LVM volumes on hand, but have free drive space, you will need to create a LVM volume before proceeding. Here is a short run down of how you would create a LVM from free drive space on your system. Start off by issuing an fdisk command to your drive with the free space: - fdisk /dev/sda - Once in fdisk, perform the following commands: 1.Press ‘n' to create a new disk - partition,2.Press 'p' to create a primary disk - partition,3.Press '1' to denote it as 1st disk - partition,4.Either press ENTER twice to accept the default of 1st and + fdisk /dev/sda + Once in fdisk, perform the following commands: 1.Press ‘n' to create a new disk + partition,2.Press 'p' to create a primary disk + partition,3.Press '1' to denote it as 1st disk + partition,4.Either press ENTER twice to accept the default of 1st and last cylinder – to convert the remainder of hard disk to a single disk partition -OR- press ENTER once to accept the default of the 1st, and then choose how big you want the partition to be by specifying +size{K,M,G} e.g. +5G or - +6700M.5.Press 't', then select the new partition you - made.6.Press '8e' change your new partition to 8e, - i.e. Linux LVM partition type.7.Press ‘p' to display the hard disk partition + +6700M.5.Press 't', then select the new partition you + made.6.Press '8e' change your new partition to 8e, + i.e. Linux LVM partition type.7.Press ‘p' to display the hard disk partition setup. Please take note that the first partition is denoted - as /dev/sda1 in Linux.8.Press 'w' to write the partition table and + as /dev/sda1 in Linux.8.Press 'w' to write the partition table and exit fdisk upon completion.Refresh your partition table to ensure your new partition shows up, and verify with fdisk. We then inform the OS about the table partition update : - partprobe + partprobe Again : fdisk -l (you should see your new partition in this listing) Here is how you can set up partitioning during the OS install to prepare for this nova-volume configuration:root@osdemo03:~# fdisk -l - + Device Boot Start End Blocks Id System /​dev/​sda1 * 1 12158 97280 83 Linux @@ -1989,9 +2032,9 @@ Device Boot Start End Blocks Id System for LVM use, perform the following steps to configure LVM and prepare it as nova-volumes. You must name your volume group ‘nova-volumes’ or things - will not work as expected:pvcreate /dev/sda5 -vgcreate nova-volumes /dev/sda5 B- Configuring nova-volumes on the compute nodes B- Configuring nova-volumes on the compute nodes Since you have created the volume group, you will be able to use the following - tools for managing your volumes : euca-create-volumeeuca-attach-volumeeuca-detach-volumeeuca-delete-volume + will not work as expected:pvcreate /dev/sda5 +vgcreate nova-volumes /dev/sda5 B- Configuring nova-volumes on the compute nodes B- Configuring nova-volumes on the compute nodes Since you have created the volume group, you will be able to use the following + tools for managing your volumes : euca-create-volumeeuca-attach-volumeeuca-detach-volumeeuca-delete-volume Installing and configuring the iSCSI initiator Remember that every node will act as the iSCSI initiator while the server running nova-volumes will act as the iSCSI target. So make sure, before @@ -1999,35 +2042,35 @@ vgcreate nova-volumes /dev/sda5 First install the open-iscsi package on your compute-nodes only : - apt-get install open-iscsi You have to enable it so the startut script (/etc/init.d/open-iscsi) will + apt-get install open-iscsi You have to enable it so the startut script (/etc/init.d/open-iscsi) will work : - sed -i ‘s/false/true/g’ /etc/default/iscsitarget + sed -i ‘s/false/true/g’ /etc/default/iscsitarget Then run : - service iscsitarget startConfigure nova.conf flag fileEdit your nova.conf to include a new flag, "–iscsi_ip_prefix=192.168." The + service iscsitarget startConfigure nova.conf flag fileEdit your nova.conf to include a new flag, "–iscsi_ip_prefix=192.168." The flag will be used by the compute node when the iSCSI discovery will be performed and the session created. The prefix based on the two first bytes will allows the iSCSI discovery to use all the available routes (also known as multipathing) to the iSCSI server (eg. nova-volumes) into your network. We will see into the "Troubleshooting" section how to deal with ISCSI - sessions. + sessions. Start nova-volume and create volumesYou are now ready to fire up nova-volume, and start creating - volumes!service nova-volume startOnce the service is started, login to your controller and ensure you’ve + volumes!service nova-volume startOnce the service is started, login to your controller and ensure you’ve properly sourced your ‘novarc’ file. You will be able to use the euca2ools related to volumes interactions (see above).One of the first things you should do is make sure that nova-volume is - checking in as expected. You can do so using nova-manage:nova-manage service listIf you see a smiling ‘nova-volume’ in there, you are looking good. Now - create a new volume:euca-create-volume -s 7 -z nova (-s refers to the size of the volume in GB, and -z is the default zone (usually nova))You should get some output similar to this: - VOLUME vol-0000000b 7 creating (wayne,​ None,​ None,​ None) 2011-02-11 06:58:46.​941818 + checking in as expected. You can do so using nova-manage:nova-manage service listIf you see a smiling ‘nova-volume’ in there, you are looking good. Now + create a new volume:euca-create-volume -s 7 -z nova (-s refers to the size of the volume in GB, and -z is the default zone (usually nova))You should get some output similar to this: + VOLUME vol-0000000b 7 creating (wayne,​ None,​ None,​ None) 2011-02-11 06:58:46.​941818 You can view that status of the volumes creation using ‘euca-describe-volumes’. Once that status is ‘available,’ it is ready to be - attached to an instance:euca-attach-volume -i i-00000008 -d /dev/vdb vol-00000009 (-i refers to the instance you will attach the volume to, -d is the mountpoint (on the compute-node ! and then the volume name.)By doing that, the compute-node which runs the instance basically performs + attached to an instance:euca-attach-volume -i i-00000008 -d /dev/vdb vol-00000009 (-i refers to the instance you will attach the volume to, -d is the mountpoint (on the compute-node ! and then the volume name.)By doing that, the compute-node which runs the instance basically performs an iSCSI connection and creates a session. You can ensure that the session has been created by running : iscsciadm -m session Which should output : - root@nova-cn1:~# iscsiadm -m session + root@nova-cn1:~# iscsiadm -m session tcp: [1] 172.​16.​40.​244:3260,​1 iqn.​2010-10.​org.​openstack:volume-0000000b If you do not get any errors, it is time to login to instance ‘i-00000008′ and see if the new space is there. You can check the volume attachment by running : dmesg | tail You should from there see a new disk. Here is the output from ‘fdisk -l’ - from i-00000008:Disk /​dev/​vda: 10.​7 GB,​ 10737418240 bytes + from i-00000008:Disk /​dev/​vda: 10.​7 GB,​ 10737418240 bytes 16 heads,​ 63 sectors/​track,​ 20805 cylinders Units =​ cylinders of 1008 * 512 =​ 516096 bytes Sector size (logical/​physical): 512 bytes /​ 512 bytes @@ -2038,40 +2081,40 @@ Disk /​dev/​vda doesn’t contain a valid partition table 16 heads,​ 63 sectors/​track,​ 41610 cylinders Units =​ cylinders of 1008 * 512 =​ 516096 bytes Sector size (logical/​physical): 512 bytes /​ 512 bytes -I/​O size (minimum/​optimal): 512 bytes /​ 512 bytes Disk identifier: 0×00000000 Now with the space presented, let’s configure it for use:fdisk /dev/vdb1.Press ‘n' to create a new disk partition.2.Press 'p' to create a primary disk partition.3.Press '1' to denote it as 1st disk partition.4.Press ENTER twice to accept the default of 1st and last cylinder – +I/​O size (minimum/​optimal): 512 bytes /​ 512 bytes Disk identifier: 0×00000000 Now with the space presented, let’s configure it for use:fdisk /dev/vdb1.Press ‘n' to create a new disk partition.2.Press 'p' to create a primary disk partition.3.Press '1' to denote it as 1st disk partition.4.Press ENTER twice to accept the default of 1st and last cylinder – to convert the remainder of hard disk to a single disk - partition.5.Press 't', then select the new partition you - made.6.Press '83' change your new partition to 83, i.e. - Linux partition type.7.Press ‘p' to display the hard disk partition setup. + partition.5.Press 't', then select the new partition you + made.6.Press '83' change your new partition to 83, i.e. + Linux partition type.7.Press ‘p' to display the hard disk partition setup. Please take note that the first partition is denoted as /dev/vda1 in - your instance.8.Press 'w' to write the partition table and exit fdisk - upon completion.9.Lastly, make a file system on the partition and mount it. - mkfs.​ext3 /​dev/​vdb1 + your instance.8.Press 'w' to write the partition table and exit fdisk + upon completion.9.Lastly, make a file system on the partition and mount it. + mkfs.​ext3 /​dev/​vdb1 mkdir /​extraspace mount /​dev/​vdb1 /​extraspace Your new volume has now been successfully mounted, and is ready for use! The ‘euca’ commands are pretty self-explanatory, so play around with them and create new volumes, tear them down, attach and reattach, and so on. - C- Troubleshoot your nova-volumes installationC- Troubleshoot your nova-volumes installationIf the volume attachment doesn't work, you should be able to perform different + C- Troubleshoot your nova-volumes installationC- Troubleshoot your nova-volumes installationIf the volume attachment doesn't work, you should be able to perform different checks in order to see where the issue is. The nova-volume.log and nova-compute.log will help you to diagnosis the errors you could encounter : nova-compute.log / nova-volume.log - ERROR "15- already exists" - "ProcessExecutionError: Unexpected error while running command.​\​nCommand: sudo iscsiadm -m node -T iqn.​2010-10.​org.​openstack:volume-00000001 -p + ERROR "15- already exists" + "ProcessExecutionError: Unexpected error while running command.​\​nCommand: sudo iscsiadm -m node -T iqn.​2010-10.​org.​openstack:volume-00000001 -p 10.​192.​12.​34:3260 --login\​nExit code: 255\​nStdout: 'Logging in to [iface: default,​ target: iqn.​2010-10.​org.​openstack:volume-00000001,​ portal: 10.​192.​12.​34,​3260]\​\​n'\​nStderr: 'iscsiadm: Could not login to [iface: default,​ target: iqn.​2010-10.​org.​openstack:volume-00000001,​ portal:10.​192.​12.​34,​3260]: openiscsiadm: initiator reported error (15 - already exists)\​\​n'\​n"] This errors happens sometimes when you run an euca-detach-volume and euca-attach-volume and/ or try to attach another volume to an instance. It happens when the compute node has a running session while you try to - attach a volume by using the same IQN. You could check that by running : iscsiadm -m session + attach a volume by using the same IQN. You could check that by running : iscsiadm -m session You should have a session with the same name that the compute is trying to open. Actually, it seems to be related to the several routes available for the iSCSI exposition, those routes could be seen by running on the compute node : - iscsiadm -m discovery -t st -p $ip_of_nova-volumes + iscsiadm -m discovery -t st -p $ip_of_nova-volumes You should see for a volume multiple addresses to reach it. The only known workaround to that is to change the "–iscsi_ip_prefix" flag and - use the 4 bytes (full IP) of the nova-volumes server, eg : "–iscsi_ip_prefix=192.168.2.1 - You'll have then to restart both nova-compute and nova-volume services. ERROR "Cannot resolve host" - (nova.​root): TRACE: ProcessExecutionError: Unexpected error while running command.​ + use the 4 bytes (full IP) of the nova-volumes server, eg : "–iscsi_ip_prefix=192.168.2.1 + You'll have then to restart both nova-compute and nova-volume services. ERROR "Cannot resolve host" + (nova.​root): TRACE: ProcessExecutionError: Unexpected error while running command.​ (nova.​root): TRACE: Command: sudo iscsiadm -m discovery -t sendtargets -p ubuntu03c (nova.​root): TRACE: Exit code: 255 (nova.​root): TRACE: Stdout: '' @@ -2080,15 +2123,15 @@ cannot resolve host name ubuntu03c\​niscsiadm: Could not perform SendTargets d (nova.​root): TRACE:This error happens when the compute node is unable to resolve the nova-volume server name. You could either add a record for the server if you have a - DNS server; or add it into the "/etc/hosts" file of the nova-compute. ERROR "No route to host" - iscsiadm: cannot make connection to 172.​29.​200.​37: No route to host\​niscsiadm: cannot make connection to 172.​29.​200.​37 + DNS server; or add it into the "/etc/hosts" file of the nova-compute. ERROR "No route to host" + iscsiadm: cannot make connection to 172.​29.​200.​37: No route to host\​niscsiadm: cannot make connection to 172.​29.​200.​37 This error could be caused by several things, but it means only one thing : openiscsi is unable to establish a communication with your nova-volumes server.The first thing you could do is running a telnet session in order to see if you are able to reach the nova-volume server. From the - compute-node, run :telnet $ip_of_nova_volumes 3260 If the session times out, check the server firewall ; or try to ping + compute-node, run :telnet $ip_of_nova_volumes 3260 If the session times out, check the server firewall ; or try to ping it. You could also run a tcpdump session which will likely gives you - extra information : tcpdump -nvv -i $iscsi_interface port dest $ip_of_nova_volumes Again, try to manually run an iSCSI discovery via : iscsiadm -m discovery -t st -p $ip_of_nova-volumes"Lost connectivity between nova-volumes and + extra information : tcpdump -nvv -i $iscsi_interface port dest $ip_of_nova_volumes Again, try to manually run an iSCSI discovery via : iscsiadm -m discovery -t st -p $ip_of_nova-volumes"Lost connectivity between nova-volumes and node-compute ; how to restore a clean state ?" Network disconnection can happens, from an "iSCSI view", losing connectivity could be seen as a physical removal of a server's disk. If @@ -2096,7 +2139,7 @@ cannot resolve host name ubuntu03c\​niscsiadm: Could not perform SendTargets d won't be able to detach the volume. You would encounter several errors. Here is how you could clean this : First, from the nova-compute, close the active (but stalled) iSCSI session, refer to the volume attached to get the session, and perform - the following command : iscsiadm -m session -r $session_id -uHere is an iscsi -m session output : + the following command : iscsiadm -m session -r $session_id -uHere is an iscsi -m session output : tcp: [1] 172.​16.​40.​244:3260,​1 iqn.​2010-10.​org.​openstack:volume-0000000e tcp: [2] 172.​16.​40.​244:3260,​1 iqn.​2010-10.​org.​openstack:volume-00000010 tcp: [3] 172.​16.​40.​244:3260,​1 iqn.​2010-10.​org.​openstack:volume-00000011 @@ -2107,10 +2150,10 @@ tcp: [7] 172.​16.​40.​244:3260,​1 iqn.​2010-10.​org.​openstack:vol tcp: [9] 172.​16.​40.​244:3260,​1 iqn.​2010-10.​org.​openstack:volume-00000014 I would close the session number 9 if I want to free the volume 00000014. The cloud-controller is actually unaware about the iSCSI session closing, and will keeps the volume state as "in-use": - VOLUME vol-00000014 30 nova in-use (nuage-and-co,​ nova-cc1,​ i-0000009a[nova-cn1],​ \​/​dev\​/​sdb) 2011-07-18T12:45:39ZYou + VOLUME vol-00000014 30 nova in-use (nuage-and-co,​ nova-cc1,​ i-0000009a[nova-cn1],​ \​/​dev\​/​sdb) 2011-07-18T12:45:39ZYou now have to inform it that the disk can be used. Nova stores the volumes info into the "volumes" table. You will have to update four fields into - the database nova uses (eg. MySQL). First, conect to the database : mysql -uroot -p$password novaThen, we get some information from the table "volumes" : + the database nova uses (eg. MySQL). First, conect to the database : mysql -uroot -p$password novaThen, we get some information from the table "volumes" : mysql> select id,​created_at,​ size,​ instance_id,​ status,​ attach_status,​ display_name from volumes; +----+---------------------+------+-------------+----------------+---------------+--------------+ | id | created_at | size | instance_id | status | attach_status | display_name | @@ -2138,28 +2181,28 @@ tcp: [9] 172.​16.​40.​244:3260,​1 iqn.​2010-10.​org.​openstack:vol | 21 | 2011-08-30 15:39:16 | 5 | NULL | error_deleting | detached | NULL | +----+---------------------+------+-------------+----------------+---------------+--------------+ 21 rows in set (0.​00 sec) Once you get the volume id, you will have to run the following sql - queries (let's say, my volume 14 as the id number 21 : + queries (let's say, my volume 14 as the id number 21 : mysql> update volumes set mountpoint=​NULL where id=​21; mysql> update volumes set status=​"available" where status "error_deleting" where id=​21; mysql> update volumes set attach_status=​"detached" where id=​21; mysql> update volumes set instance_id=​0 where id=​21; Now if you run again euca-describe-volumesfrom the cloud - controller, you should see an available volume now : VOLUME vol-00000014 30 nova available (nuage-and-co,​ nova-cc1,​ None,​ None) 2011-07-18T12:45:39ZYou can now proceed to the volume attachment again! - Using Live MigrationUsing Live MigrationBefore starting live migration, check "Configuring Live Migration" sections.Live migration provides a scheme to migrate running instances from one OpenStack + controller, you should see an available volume now : VOLUME vol-00000014 30 nova available (nuage-and-co,​ nova-cc1,​ None,​ None) 2011-07-18T12:45:39ZYou can now proceed to the volume attachment again! + Using Live MigrationUsing Live MigrationBefore starting live migration, check "Configuring Live Migration" sections.Live migration provides a scheme to migrate running instances from one OpenStack Compute server to another OpenStack Compute server. No visible downtime and no - transaction loss is the ideal goal. This feature can be used as depicted below. First, make sure any instances running on a specific server. + transaction loss is the ideal goal. This feature can be used as depicted below. First, make sure any instances running on a specific server. # euca-describe-instances Reservation:r-2raqmabo RESERVATION r-2raqmabo admin default INSTANCE i-00000003 ami-ubuntu-lucid a.​b.​c.​d e.​f.​g.​h running testkey (admin,​ HostB) 0 m1.​small 2011-02-15 07:28:32 nova - In this example, i-00000003 is running on HostB.Second, pick up other server where instances are migrated to. + In this example, i-00000003 is running on HostB.Second, pick up other server where instances are migrated to. # nova-manage service list HostA nova-scheduler enabled :-) None HostA nova-volume enabled :-) None HostA nova-network enabled :-) None HostB nova-compute enabled :-) None HostC nova-compute enabled :-) None - In this example, HostC can be picked up because nova-compute is running onto it.Third, check HostC has enough resource for live migration. + In this example, HostC can be picked up because nova-compute is running onto it.Third, check HostC has enough resource for live migration. # nova-manage service update_resource HostC # nova-manage service describe_resource HostC HOST PROJECT cpu mem(mb) disk(gb) @@ -2169,13 +2212,13 @@ HostC p1 5 10240 150 HostC p2 5 10240 150 .​.​.​.​.​ Remember to use update_resource first, then describe_resource. Otherwise, - Host(used) is not updated.cpu:the nuber of cpumem(mb):total amount of memory (MB)disk(gb)total amount of NOVA-INST-DIR/instances(GB)1st line shows total amount of resource physical server has.2nd line shows current used resource.3rd line and under is used resource per project.Finally, live migration + Host(used) is not updated.cpu:the nuber of cpumem(mb):total amount of memory (MB)disk(gb)total amount of NOVA-INST-DIR/instances(GB)1st line shows total amount of resource physical server has.2nd line shows current used resource.3rd line and under is used resource per project.Finally, live migration # nova-manage vm live_migration i-00000003 HostC Migration of i-00000001 initiated.​ Check its progress using euca-describe-instances.​ Make sure instances are migrated successfully with euca-describe-instances. If instances are still running on HostB, check logfiles( src/dest nova-compute - and nova-scheduler)Reference for Flags in nova.confReference for Flags in nova.confFor a complete list of all available flags for each OpenStack Compute service, - run bin/nova-<servicename> --help. Table 8.1. Description of common nova.conf flags (nova-api, nova-compute) + and nova-scheduler)Reference for Flags in nova.confReference for Flags in nova.confFor a complete list of all available flags for each OpenStack Compute service, + run bin/nova-<servicename> --help. Table 8.1. Description of common nova.conf flags (nova-api, nova-compute) Flag Default Description @@ -2310,11 +2353,12 @@ Migration of i-00000001 initiated.​ Check its progress using euca-describe-ins --flat_interface default: '' - FlatDhcp will bridge into this interface if set + FlatDhcp will bridge into this interface --flat_network_bridge - default: 'br100' - Bridge for simple network instances + default: '' + Bridge for simple network instances, formerly defaulted to br100; required + setting for Flat DHCP --flat_network_dhcp_start default: '10.0.0.2' @@ -2367,7 +2411,7 @@ Migration of i-00000001 initiated.​ Check its progress using euca-describe-ins --image_service default: 'nova.image.s3.S3ImageService' The service to use for retrieving and searching for images. Images must be registered using - euca2ools. Options: nova.image.s3.S3ImageServiceS3 backend for the Image Service.nova.image.local.LocalImageServiceImage service storing images to local disk. It assumes that image_ids are integers. This is the default setting if no image manager is defined here.nova.image.glance.GlanceImageServiceGlance back end for storing and retrieving images; See http://glance.openstack.org for more info. + euca2ools. Options: nova.image.s3.S3ImageServiceS3 backend for the Image Service.nova.image.local.LocalImageServiceImage service storing images to local disk. It assumes that image_ids are integers. This is the default setting if no image manager is defined here.nova.image.glance.GlanceImageServiceGlance back end for storing and retrieving images; See http://glance.openstack.org for more info. --image_decryption_dir default: 'tmp/' @@ -2406,6 +2450,10 @@ Migration of i-00000001 initiated.​ Check its progress using euca-describe-ins --logging_exception_prefix default: '(%(name)s): TRACE: ' String value; Prefix each line of exception output with this format. + + --max_cores + default: '16' + Integer value; Maximum number of instance cores to allow per compute host. --my_ip default: '' @@ -2416,7 +2464,7 @@ Migration of i-00000001 initiated.​ Check its progress using euca-describe-ins Configures how your controller will communicate with additional OpenStack Compute nodes and virtual machines. Options: - nova.network.manager.FlatManagerSimple, non-VLAN networkingnova.network.manager.FlatDHCPManagerFlat networking with DHCPnova.network.manager.VlanManagerVLAN networking with DHCP; This is the Default if no network + nova.network.manager.FlatManagerSimple, non-VLAN networkingnova.network.manager.FlatDHCPManagerFlat networking with DHCPnova.network.manager.VlanManagerVLAN networking with DHCP; This is the Default if no network manager is defined here in nova.conf. @@ -2583,7 +2631,7 @@ Migration of i-00000001 initiated.​ Check its progress using euca-describe-ins --vpn_key_suffix default: '-vpn' This is the interface that VlanManager uses to bind bridges and VLANs to. - Table 8.2. Description of nova.conf flags specific to nova-volume + Table 8.2. Description of nova.conf flags specific to nova-volume Flag Default Description @@ -2603,47 +2651,54 @@ Migration of i-00000001 initiated.​ Check its progress using euca-describe-ins --volume_topic default: 'volume' String value; Name of the topic that volume nodes listen on - OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk 9. OpenStack InterfacesOpenStack has components that provide a view of the OpenStack installation such as a Django-built website that serves as a dashboard and the ability to connect to running instances using a VNC connection via a VNC Proxy.About the DashboardAbout the DashboardYou can use a dashboard interface with an OpenStack Compute installation with a web-based + OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo 9. OpenStack InterfacesOpenStack has components that provide a view of the OpenStack installation such as a Django-built website that serves as a dashboard and the ability to connect to running instances using a VNC connection via a VNC Proxy.About the DashboardAbout the DashboardYou can use a dashboard interface with an OpenStack Compute installation with a web-based console provided by the Openstack-Dashboard project. It provides a reference implementation of a Django site that provides web-based interactions with the OpenStack Compute cloud controller. For more information about the Openstack-Dashboard project, - please visit: http://launchpad.net/openstack-dashboard. These instructions are for a test deployment of an OpenStack Dashboard. They configure your dashboard to use the default Django server. To create a more robust, production-ready installation, you would configure this with an Apache web server. System Requirements for the DashboardSystem Requirements for the DashboardYou should have a running OpenStack Compute installation with the Keystone module enabled. The dashboard needs to be installed on the node that can contact the Keystone service.You should know the URL of your Keystone endpoint. You must know the credentials of a valid Keystone tenant.Python 2.6 is required, and these instructions have been tested with Ubuntu 10.10.Installing the OpenStack DashboardInstalling the OpenStack DashboardHere are the overall steps for building the dashboard.1.Get the source for the openstack-dashboard project.2.Build and configure the openstack-dashboard.3.Run the server that starts the dashboard.Before you begin, you must have git installed. It's straightforward to install - it with sudo apt-get install git-core. Create a source directory to house the project:mkdir src -cd src Next, get the openstack-dashboard project, which provides all the look and feel for the OpenStack Dashboard. + please visit: http://launchpad.net/openstack-dashboard. These instructions are for a test deployment of an OpenStack Dashboard. They configure your dashboard to use the default Django server. To create a more robust, production-ready installation, you would configure this with an Apache web server. System Requirements for the DashboardSystem Requirements for the DashboardYou should have a running OpenStack Compute installation with the Keystone module + enabled for identity management. Follow these general instructions for installing Identity Management. For a good all-in-one Nova/Glance/Keystone installation there is the devstack project.The dashboard needs to be installed on the node that can contact the Keystone service.You should know the URL of your Identity endpoint and the Compute endpoint. You must know the credentials of a valid Keystone tenant.You must have git installed. It's straightforward to install it with sudo + apt-get install git-core. Python 2.6 is required, and these instructions have been tested with Ubuntu 10.10. It + should run on any system with Python 2.6 or 2.7 that is capable of running Django + including Mac OS X (installing prerequisites may differ depending on platform). Optional components:an Image Store (Glance) endpoint an Object Store (Swift) endpoint a Quantum (networking) + endpointInstalling the OpenStack DashboardInstalling the OpenStack DashboardHere are the overall steps for building the dashboard.1.Get the source for the openstack-dashboard project.2.Configure the openstack-dashboard.3.Install.4.Run the server that starts the dashboard.Before you begin, you must have git installed. Create a source directory to house the project:mkdir src +cd src Next, get the openstack-dashboard project, which provides all the look and feel for the OpenStack Dashboard. git clone https://github.com/4P/openstack-dashboard -You should now have a directory called openstack-dashboard, which contains the OpenStack Dashboard application.Build and Configure Openstack-DashboardNow you can configure the dashboard application. The first step in configuring the application +You should now have a directory called openstack-dashboard, which contains the OpenStack Dashboard application.Configure Openstack-DashboardNow you can configure the dashboard application. The first step in configuring the application is to create your local_settings.py file. An example is provided that you can copy to local_settings.py and then modify for your environment. - -cd openstack-dashboard/openstack-dashboard/local -cp local_settings.py.example local_settings.py + +cd openstack-dashboard/openstack-dashboard +cp local_settings.py.example local/local_settings.py vi local_settings.py - In the new copy of the local_settings.py file, change these important options:OPENSTACK_ADMIN_TOKEN : Token for Keystone endpoint.OPENSTACK_KEYSTONE_URL : URL for the Keystone endpoint.SWIFT_ENABLED : Flag to enable/disable swift support from the dashboard. Now install the openstack-dashboard environment. This installs all the dependencies for - openstack-dashboard. If you don't already have easy_install installed, - use sudo apt-get install python-setuptools. - - -sudo apt-get install -y python-setuptools -sudo easy_install virtualenv -python tools/install_venv.py - - This step takes some time since it downloads a number of dependencies.Run the ServerNow run the server on a high port value so that you can validate the installation.tools/with_venv.sh dashboard/manage.py runserver 0.0.0.0:8000Make sure that your firewall isn't blocking TCP/8000 and just point your browser at this server on port 8000. If you are running the server on the same machine as your browser, this would be "http://localhost:8000". Getting Started with the VNC ProxyGetting Started with the VNC Proxy + In the new copy of the local_settings.py file, change these important options:OPENSTACK_ADMIN_TOKEN : Token for Keystone endpoint.OPENSTACK_KEYSTONE_URL : URL for the Keystone endpoint.Keystone Configuration (required) + + The admin token can be generated by executing something like the following using the keystone-manage command on the Keystone host: keystone-manage token add 999888777666 admin admin 2015-02-05T00:00To use this token you would add the following to local_settings.py:OPENSTACK_ADMIN_TOKEN = "999888777666"The Keystone endpoint setting takes the following form:OPENSTACK_KEYSTONE_URL = "http://mykeystoneurl:5000/v2.0/".Object Storage Configuration (optional)If a Swift endpoint is available and configured in the Keystone service catalog turning on the Swift UI is as simple as adding the following to local_settings.py: + + SWIFT_ENABLED = TrueQuantum Configuration (optional)Quantum currently requires the following settings: +QUANTUM_ENABLED = True +QUANTUM_URL = '127.0.0.1' +QUANTUM_PORT = '9696' +QUANTUM_TENANT = '1234' +QUANTUM_CLIENT_VERSION='0.1' + Install the DashboardAfter Dashboard has been configured install the Dashboard virtual environment using the terminal commands below:NoteNote: the instructions below are for Ubuntu, however, setuptools can be installed on a wide variety of platforms: http://pypi.python.org/pypi/setuptools$ apt-get install -y python-setuptools + $ sudo easy_install virtualenv + $ python tools/install_venv.pyInstalling the virtual environment will take some time depending on download speeds. Run the ServerDashboard is run using the standard Django manage.py script from the context of the virtual environment. Run the server on a high port value so that you can validate the installation.tools/with_venv.sh dashboard/manage.py runserver 0.0.0.0:8000Make sure that your firewall isn't blocking TCP/8000 and just point your browser at this server on port 8000. If you are running the server on the same machine as your browser, this would be "http://localhost:8000". Getting Started with the VNC ProxyGetting Started with the VNC Proxy The VNC Proxy is an OpenStack component that allows users of Nova to access their instances through a websocket enabled browser (like Google Chrome 4.0). See http://caniuse.com/#search=websocket for a reference list of supported web browsers. A VNC Connection works like so: - + User connects over an API and gets a URL like http://ip:port/?token=xyz - + User pastes URL in browser - + Browser connects to VNC Proxy though a websocket enabled client like noVNC - + VNC Proxy authorizes users token, maps the token to a host and port of an instance's VNC server - + VNC Proxy initiates connection to VNC server, and continues proxying until the session ends Configuring the VNC ProxyConfiguring the VNC ProxyThe nova-vncproxy requires a websocket enabled html client to work properly. At this time, @@ -2652,33 +2707,33 @@ python tools/install_venv.py /var/lib/nova/noVNC. nova-vncproxy will fail to launch until this code is properly installed. By default, nova-vncproxy binds 0.0.0.0:6080. This can be configured with: - + --vncproxy_port=[port] - + --vncproxy_host=[host] Enabling VNC Consoles in NovaEnabling VNC Consoles in Nova At the moment, VNC support is supported only when using libvirt. To enable VNC Console, configure the following flags in the nova.conf file: - + --vnc_console_proxy_url=http://[proxy_host]:[proxy_port] - proxy_port defaults to 6080. This URL must point to nova-vncproxy - + --vnc_enabled=[True|False] - defaults to True. If this flag is not set your instances will launch without VNC support. Getting an Instance's VNC ConsoleGetting an Instance's VNC Console You can access an instance's VNC Console URL in the following methods: - + Using the direct api: eg: 'stack --user=admin --project=admin compute get_vnc_console instance_id=1' - + Support for Dashboard, and the Openstack API will be forthcoming At the moment, VNC Consoles are only supported through the web browser, but more general VNC support is in the works. - OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk 10. OpenStack Compute TutorialsWe want OpenStack to make sense, and sometimes the best way to make sense of the cloud is to try out some basic ideas with cloud computing. Flexible, elastic, and scalable are a few attributes of cloud computing, so these tutorials show various ways to use virtual computing or web-based storage with OpenStack components.Running Your First Elastic Web Application on the CloudRunning Your First Elastic Web Application on the CloudIn this OpenStack Compute tutorial, we’ll walk through the creation of an elastic, + OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo 10. OpenStack Compute TutorialsWe want OpenStack to make sense, and sometimes the best way to make sense of the cloud is to try out some basic ideas with cloud computing. Flexible, elastic, and scalable are a few attributes of cloud computing, so these tutorials show various ways to use virtual computing or web-based storage with OpenStack components.Running Your First Elastic Web Application on the CloudRunning Your First Elastic Web Application on the CloudIn this OpenStack Compute tutorial, we’ll walk through the creation of an elastic, scalable cloud running a WordPress installation on a few virtual machines.The tutorial assumes you have OpenStack Compute already installed on Ubuntu 10.04. You can tell OpenStack Compute is installed by running "sudo nova-manage service list" to ensure it is installed and the necessary services are running and ready. You should see @@ -2687,27 +2742,28 @@ python tools/install_venv.py user with sudo access.If you haven't installed OpenStack Compute yet, you can use an ISO image that is based on a Ubuntu Linux Server 10.04 LTS distribution containing only the components needed to run OpenStack Compute. See http://sourceforge.net/projects/stackops/files/ for download files and - information, license information, and a README file to get started.We'll go through this tutorial in parts:Setting up a user, project, and network for this cloud.Getting images for your application servers.On the instances you spin up, installing Wordpress and its dependencies, the Memcached plugin, and multiple memcache servers.Part I: Setting Up the Cloud InfrastructurePart I: Setting Up the Cloud InfrastructureIn this part, we'll get the networking layer set up based on what we think most + information, license information, and a README file to get started.We'll go through this tutorial in parts:Setting up a user, project, and network for this cloud.Getting images for your application servers.On the instances you spin up, installing Wordpress and its dependencies, the Memcached plugin, and multiple memcache servers.Part I: Setting Up the Cloud InfrastructurePart I: Setting Up the Cloud InfrastructureIn this part, we'll get the networking layer set up based on what we think most networks would work like. We'll also create a user and a project to house our cloud - and its network. Onward, brave cloud pioneers! Configuring the networkIdeally on large OpenStack Compute deployments, each project is in a protected + and its network. Onward, brave cloud pioneers! Configuring the networkIdeally on large OpenStack Compute deployments, each project is in a protected network segment. Our project in this case is a LAMP stack running Wordpress with the Memcached plugin for added database efficiency. So we need a public IP address for the Wordpress server but we can use flat networking for this. Here's how you set those network settings. Usually networking is set in nova.conf, but VLAN-based networking with DHCP is the default setting when no network manager is defined in nova.conf. To check this network setting, open your nova.conf, typically in /etc/nova/nova.conf and - look for -network_manager. The possible options are:-network_manager=nova.network.manager.FlatManager for a simple, - no-VLAN networking type, -network_manager=nova.network.manager.FlatDHCPManager for flat - networking with a built-in DHCP server, -network_manager= nova.network.manager.VlanManager, which is the most + look for -network_manager. The possible options are:-network_manager=nova.network.manager.FlatManager for a simple, + no-VLAN networking type, -network_manager=nova.network.manager.FlatDHCPManager for flat + networking with a built-in DHCP server, -network_manager= nova.network.manager.VlanManager, which is the most tested in production but requires network hardware with VLAN tagging.Here is an example nova.conf for a single node installation of OpenStack - Compute.# Sets the network type + Compute.# Sets the network type --network_manager=nova.network.manager.FlatManager # Sets whether to use IPV6 addresses --use_ipv6=false # DHCP bridge information --dhcpbridge_flagfile=/etc/nova/nova.conf --dhcpbridge=nova-dhcpbridge +--flat_network_bridge=br100 --logdir=/var/log/nova # Top-level directory for maintaining nova's state --state_path=/var/lib/nova @@ -2733,44 +2789,44 @@ python tools/install_venv.py tutorial. Be careful when setting up --flat_interface in nova.conf, if you specify an interface that already has an IP it will break and if this is the interface you are connecting through with SSH, you cannot fix it unless you have - ipmi/console access. For this tutorial, we set a 24 value for network_size, the number of addresses + ipmi/console access. Also the --flat_network_bridge is now required.For this tutorial, we set a 24 value for network_size, the number of addresses in each private subnet, since that falls inside the /12 CIDR-notated range that's set in ‘fixed-range’ in nova.conf. We probably won't use that many at first, but it's good to have the room to scale.Currently, there can only be one network set in nova.conf. When you issue the nova-manage network create command, it uses the settings in the nova.conf flag file. From the --fixed_range setting, iptables are set. Those iptables are - regenerated each time the nova-network service restarts, also. NoteThe nova-manage service assumes that the first IP address is your network + regenerated each time the nova-network service restarts, also. NoteThe nova-manage service assumes that the first IP address is your network (like 192.168.0.0), that the 2nd IP is your gateway (192.168.0.1), and that the broadcast is the very last IP in the range you defined (192.168.0.255). If this is not the case you will need to manually edit the sql db ‘networks’ - table.o but that scenario shouldn't happen for this tutorial.Run this command as root or sudo: nova-manage network create public 192.168.3.0/12 1 256On running this command, entries are made in the ‘networks’ and ‘fixed_ips’ + table.o but that scenario shouldn't happen for this tutorial.Run this command as root or sudo: nova-manage network create public 192.168.3.0/12 1 256On running this command, entries are made in the ‘networks’ and ‘fixed_ips’ table in the nova database. However, one of the networks listed in the ‘networks’ table needs to be marked as bridge in order for the code to know that a bridge exists. The Network is marked as bridged automatically based on the type of network manager selected. Next you want to integrate this network bridge, named br100, into your - network. A bridge connects two Ethernet segments together.Ensure the Database is Up-to-dateThe first command you run using nova-manage is one called db sync, which - ensures that your database is updated. You must run this as root.nova-manage db syncCreating a userOpenStack Compute can run many projects for many users, so for our tutorial + network. A bridge connects two Ethernet segments together.Ensure the Database is Up-to-dateThe first command you run using nova-manage is one called db sync, which + ensures that your database is updated. You must run this as root.nova-manage db syncCreating a userOpenStack Compute can run many projects for many users, so for our tutorial we'll create a user and project just for this scenario. We control the actions a user can take through roles, such as admin for Administrator who has complete system access, itsec for IT Security, netadmin for Network Administrator, and so on.In addition to these roles controlling access to the Eucalyptus API, credentials are supplied and bundled by OpenStack compute in a zip file when you create a project. The user accessing the cloud infrastructure through ec2 commands are given an access and secret key through the project itself. Let's - create a user that has the access we want for this project.To add an admin user named cloudypants, use:nova-manage user admin cloudypantsCreating a project and related credentialsNext we'll create the project, which in turn gives you certifications in a zip + create a user that has the access we want for this project.To add an admin user named cloudypants, use:nova-manage user admin cloudypantsCreating a project and related credentialsNext we'll create the project, which in turn gives you certifications in a zip file.Enter this command to create a project named wpscales as the admin user, - cloudypants, that you created above.nova-manage project create wpscales cloudypantsGreat, now you have a project that is set apart from the rest of the clouds + cloudypants, that you created above.nova-manage project create wpscales cloudypantsGreat, now you have a project that is set apart from the rest of the clouds you might control with OpenStack Compute. Now you need to give the user some credentials so they can run commands for the instances with in that project's cloud. These are the certs you will use to launch instances, bundle images, and all the other assorted API and command-line functions.First, we'll create a directory that'll house these credentials, in this case in the root directory. You need to sudo here or save this to your own directory with 'mkdir -p ~/creds' so that the credentials match the user and are stored in - their home.mkdir –p /root/credsNow, run nova-manage to create a zip file for your project called wpscales - with the user cloudypants (the admin user we created previously). sudo nova-manage project zipfile wpscales cloudypants /root/creds/novacreds.zipNext, you can unzip novacreds.zip in your home directory, and add these - credentials to your environment. unzip /root/creds/novacreds.zip -d /root/creds/Sending that information and sourcing it as part of your .bashrc file - remembers those credentials for next time.cat /root/creds/novarc >> ~/.bashrc + their home.mkdir –p /root/credsNow, run nova-manage to create a zip file for your project called wpscales + with the user cloudypants (the admin user we created previously). sudo nova-manage project zipfile wpscales cloudypants /root/creds/novacreds.zipNext, you can unzip novacreds.zip in your home directory, and add these + credentials to your environment. unzip /root/creds/novacreds.zip -d /root/creds/Sending that information and sourcing it as part of your .bashrc file + remembers those credentials for next time.cat /root/creds/novarc >> ~/.bashrc source ~/.bashrcOkay, you've created the basic scaffolding for your cloud so that you can get - some images and run instances. Onward to Part II!Part II: Getting Virtual Machines to Run the Virtual ServersPart II: Getting Virtual Machines to Run the Virtual ServersUnderstanding what you can do with cloud computing means you should have a grasp + some images and run instances. Onward to Part II!Part II: Getting Virtual Machines to Run the Virtual ServersPart II: Getting Virtual Machines to Run the Virtual ServersUnderstanding what you can do with cloud computing means you should have a grasp on the concept of virtualization. With virtualization, you can run operating systems and applications on virtual machines instead of physical computers. To use a virtual machine, you must have an image that contains all the information about which @@ -2779,7 +2835,7 @@ source ~/.bashrcHere are the commands to get your virtual image. Be aware that the download of the - compressed file may take a few minutes.image="ubuntu1010-UEC-localuser-image.tar.gz" + compressed file may take a few minutes.image="ubuntu1010-UEC-localuser-image.tar.gz" wget http://c0179148.cdn1.cloudfiles.rackspacecloud.com/ ubuntu1010-UEC-localuser-image.tar.gz uec-publish-tarball $image wpbucket x86_64What you'll get in return from this command is three references: emi, eri and eki. @@ -2788,25 +2844,25 @@ uec-publish-tarball $image wpbucket x86_64You need to use the emi value when you run the instance. These look something like “ami-zqkyh9th″ - basically a unique identifier.Okay, now that you have your image and it's published, realize that it has to be decompressed before you can launch an image from it. We can realize what state an - image is in using the 'euca-describe-instances' command. Basically, run:euca-describe-instancesand look for the state in the text that returns. You can also use + image is in using the 'euca-describe-instances' command. Basically, run:euca-describe-instancesand look for the state in the text that returns. You can also use euca-describe-images to ensure the image is untarred. Wait until the state shows - "available" so that you know the instances is ready to roll.Part III: Installing the Needed Software for the Web-Scale ScenarioPart III: Installing the Needed Software for the Web-Scale ScenarioOnce that state is "available" you can enter this command, which will use your + "available" so that you know the instances is ready to roll.Part III: Installing the Needed Software for the Web-Scale ScenarioPart III: Installing the Needed Software for the Web-Scale ScenarioOnce that state is "available" you can enter this command, which will use your credentials to start up the instance with the identifier you got by publishing the - image.emi=ami-zqkyh9th + image.emi=ami-zqkyh9th euca-run-instances $emi -k mykey -t m1.tinyNow you can look at the state of the running instances by using euca-describe-instances again. The instance will go from “launching” to “running” in a short time, and you should be able to connect via SSH. Look at the IP addresses so - that you can connect to the instance once it starts running.Basically launch a terminal window from any computer, and enter: ssh -i mykey ubuntu@10.127.35.119On this particular image, the 'ubuntu' user has been set up as part of the sudoers - group, so you can escalate to 'root' via the following command:sudo -iOn the first VM, install WordPressNow, you can install WordPress. Create and then switch to a blog - directory:mkdir blog -cd blogDownload WordPress directly to you by using wget:wget http://wordpress.org/latest.tar.gz Then unzip the package using: tar -xzvf latest.tar.gzThe WordPress package will extract into a folder called wordpress in the same - directory that you downloaded latest.tar.gz. Next, enter "exit" and disconnect from this SSH session.On a second VM, install MySQLNext, SSH into another virtual machine and install MySQL and use these + that you can connect to the instance once it starts running.Basically launch a terminal window from any computer, and enter: ssh -i mykey ubuntu@10.127.35.119On this particular image, the 'ubuntu' user has been set up as part of the sudoers + group, so you can escalate to 'root' via the following command:sudo -iOn the first VM, install WordPressNow, you can install WordPress. Create and then switch to a blog + directory:mkdir blog +cd blogDownload WordPress directly to you by using wget:wget http://wordpress.org/latest.tar.gz Then unzip the package using: tar -xzvf latest.tar.gzThe WordPress package will extract into a folder called wordpress in the same + directory that you downloaded latest.tar.gz. Next, enter "exit" and disconnect from this SSH session.On a second VM, install MySQLNext, SSH into another virtual machine and install MySQL and use these instructions to install the WordPress database using the MySQL Client from a - command line: Using the MySQL Client - Wordpress Codex.On a third VM, install MemcacheMemcache makes Wordpress database reads and writers more efficient, so your virtual servers + command line: Using the MySQL Client - Wordpress Codex.On a third VM, install MemcacheMemcache makes Wordpress database reads and writers more efficient, so your virtual servers can go to work for you in a scalable manner. SSH to a third virtual machine and install Memcache: - apt-get install memcached - Configure the Wordpress Memcache pluginFrom a web browser, point to the IP address of your Wordpress server. Download and install the Memcache Plugin. Enter the IP address of your Memcache server.Running a Blog in the CloudRunning a Blog in the CloudThat's it! You're now running your blog on a cloud server in OpenStack Compute, and you've scaled it horizontally using additional virtual images to run the database and Memcache. Now if your blog gets a big boost of comments, you'll be ready for the extra reads-and-writes to the database. OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk OpenStack Compute Administration ManualAug 19, 2011trunk 11. Support and TroubleshootingOnline resources aid in supporting OpenStack and the community members are willing and able to answer questions and help with bug suspicions. We are constantly improving and adding to the main features of OpenStack, but if you have any problems, do not hesitate to ask. Here are some ideas for supporting OpenStack and troubleshooting your existing installations.Community SupportCommunity SupportHere are some places you can locate others who want to help.The Launchpad Answers areaThe Launchpad Answers areaDuring setup or testing, you may have questions about how to do something, or end up in + apt-get install memcached + Configure the Wordpress Memcache pluginFrom a web browser, point to the IP address of your Wordpress server. Download and install the Memcache Plugin. Enter the IP address of your Memcache server.Running a Blog in the CloudRunning a Blog in the CloudThat's it! You're now running your blog on a cloud server in OpenStack Compute, and you've scaled it horizontally using additional virtual images to run the database and Memcache. Now if your blog gets a big boost of comments, you'll be ready for the extra reads-and-writes to the database. OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo OpenStack Compute Administration ManualSep 22, 2011Diablo 11. Support and TroubleshootingOnline resources aid in supporting OpenStack and the community members are willing and able to answer questions and help with bug suspicions. We are constantly improving and adding to the main features of OpenStack, but if you have any problems, do not hesitate to ask. Here are some ideas for supporting OpenStack and troubleshooting your existing installations.Community SupportCommunity SupportHere are some places you can locate others who want to help.The Launchpad Answers areaThe Launchpad Answers areaDuring setup or testing, you may have questions about how to do something, or end up in a situation where you can't seem to get a feature to work correctly. One place to look for help is the Answers section on Launchpad. Launchpad is the "home" for the project code and its developers and thus is a natural place to ask about the @@ -2815,29 +2871,29 @@ cd bloghttps://answers.launchpad.net/nova OpenStack Object Storage: https://answers.launchpad.net/swift. OpenStack mailing listOpenStack mailing listPosting your question or scenario to the OpenStack mailing list is a great way to get + so on. The Launchpad Answers areas are available here - OpenStack Compute: https://answers.launchpad.net/nova OpenStack Object Storage: https://answers.launchpad.net/swift. OpenStack mailing listOpenStack mailing listPosting your question or scenario to the OpenStack mailing list is a great way to get answers and insights. You can learn from and help others who may have the same scenario as you. Go to https://launchpad.net/~openstack and click "Subscribe to mailing list" - or view the archives at https://lists.launchpad.net/openstack/.The OpenStack Wiki search The OpenStack Wiki search The OpenStack wiki contains content + or view the archives at https://lists.launchpad.net/openstack/.The OpenStack Wiki search The OpenStack Wiki search The OpenStack wiki contains content on a broad range of topics, but some of it sits a bit below the surface. Fortunately, the wiki search feature is very powerful in that it can do both searches by title and by content. If you are searching for specific information, say about "networking" or "api" for nova, you can find lots of content using the search feature. More is being added all the time, so be sure to check back often. You can find the search box in the upper right hand corner of any OpenStack wiki - page. The Launchpad Bugs area The Launchpad Bugs area So you think you've found a bug. That's great! Seriously, it is. The OpenStack community + page. The Launchpad Bugs area The Launchpad Bugs area So you think you've found a bug. That's great! Seriously, it is. The OpenStack community values your setup and testing efforts and wants your feedback. To log a bug you must have a Launchpad account, so sign up at https://launchpad.net/+login if you do not already have a Launchpad ID. You can view existing bugs and report your bug in the Launchpad Bugs area. It is suggested that you first use the search facility to see if the bug you found has already been reported (or even better, already fixed). If it still seems like your bug is new or unreported then it is time to fill out a bug - report. Some tips: Give a clear, concise summary! Provide as much detail as possible + report. Some tips: Give a clear, concise summary! Provide as much detail as possible in the description. Paste in your command output or stack traces, link to - screenshots, etc. Be sure to include what version of the software you are using. + screenshots, etc. Be sure to include what version of the software you are using. This is especially critical if you are using a development branch eg. "Austin - release" vs lp:nova rev.396. Any deployment specific info is helpful as well. eg. + release" vs lp:nova rev.396. Any deployment specific info is helpful as well. eg. Ubuntu 10.04, multi-node install.The Launchpad Bugs areas are available here - OpenStack Compute: https://bugs.launchpad.net/nova OpenStack Object Storage: https://bugs.launchpad.net/swift - The OpenStack IRC channel The OpenStack IRC channel The OpenStack community lives and breathes in the #openstack IRC channel on the + The OpenStack IRC channel The OpenStack IRC channel The OpenStack community lives and breathes in the #openstack IRC channel on the Freenode network. You can come by to hang out, ask questions, or get immediate feedback for urgent and pressing issues. To get into the IRC channel you need to install an IRC client or use a browser-based client by going to @@ -2847,8 +2903,8 @@ cd blogTroubleshooting OpenStack Object StorageTroubleshooting OpenStack Object StorageFor OpenStack Object Storage, everything is logged in /var/log/syslog (or messages on some distros). Several settings enable further customization of logging, such as log_name, log_facility, and log_level, within the object server configuration files.Handling Drive FailureHandling Drive Failure In the event that a drive has failed, the first step is to make sure the drive is unmounted. This will make it easier for OpenStack Object Storage to work around the failure until it has been resolved. If the drive is going to be replaced immediately, then it is just best to replace the drive, format it, remount it, and let replication fill it up.If the drive can’t be replaced immediately, then it is best to leave it unmounted, and remove the drive from the ring. This will allow all the replicas that were on that drive to be replicated elsewhere until the drive is replaced. Once the drive is replaced, it can be re-added to the ring.Handling Server FailureHandling Server FailureIf a server is having hardware issues, it is a good idea to make sure the OpenStack Object Storage services are not running. This will allow OpenStack Object Storage to work around the failure while you troubleshoot.If the server just needs a reboot, or a small amount of work that should only last a couple of hours, then it is probably best to let OpenStack Object Storage work around the failure and get the machine fixed and back online. When the machine comes back online, replication will make sure that anything that is missing during the downtime will get updated.If the server has more serious issues, then it is probably best to remove all of the server’s devices from the ring. Once the server has been repaired and is back online, the server’s devices can be added back into the ring. It is important that the devices are reformatted before putting them back into the ring as it is likely to be responsible for a different set of partitions than before.Detecting Failed DrivesDetecting Failed DrivesIt has been our experience that when a drive is about to fail, error messages will spew into /var/log/kern.log. There is a script called swift-drive-audit that can be run via cron to watch for bad drives. If errors are detected, it will unmount the bad drive, so that OpenStack Object Storage can work around it. The script takes a configuration file with the following settings: - + channel is: #openstack on irc.freenode.net. Troubleshooting OpenStack Object StorageTroubleshooting OpenStack Object StorageFor OpenStack Object Storage, everything is logged in /var/log/syslog (or messages on some distros). Several settings enable further customization of logging, such as log_name, log_facility, and log_level, within the object server configuration files.Handling Drive FailureHandling Drive Failure In the event that a drive has failed, the first step is to make sure the drive is unmounted. This will make it easier for OpenStack Object Storage to work around the failure until it has been resolved. If the drive is going to be replaced immediately, then it is just best to replace the drive, format it, remount it, and let replication fill it up.If the drive can’t be replaced immediately, then it is best to leave it unmounted, and remove the drive from the ring. This will allow all the replicas that were on that drive to be replicated elsewhere until the drive is replaced. Once the drive is replaced, it can be re-added to the ring.Handling Server FailureHandling Server FailureIf a server is having hardware issues, it is a good idea to make sure the OpenStack Object Storage services are not running. This will allow OpenStack Object Storage to work around the failure while you troubleshoot.If the server just needs a reboot, or a small amount of work that should only last a couple of hours, then it is probably best to let OpenStack Object Storage work around the failure and get the machine fixed and back online. When the machine comes back online, replication will make sure that anything that is missing during the downtime will get updated.If the server has more serious issues, then it is probably best to remove all of the server’s devices from the ring. Once the server has been repaired and is back online, the server’s devices can be added back into the ring. It is important that the devices are reformatted before putting them back into the ring as it is likely to be responsible for a different set of partitions than before.Detecting Failed DrivesDetecting Failed DrivesIt has been our experience that when a drive is about to fail, error messages will spew into /var/log/kern.log. There is a script called swift-drive-audit that can be run via cron to watch for bad drives. If errors are detected, it will unmount the bad drive, so that OpenStack Object Storage can work around it. The script takes a configuration file with the following settings: + [drive-audit] Option Default Description log_facility LOG_LOCAL0 Syslog log facility @@ -2857,6 +2913,6 @@ cd blogThis script has only been tested on Ubuntu 10.04, so if you are using a different distro or OS, some care should be taken before using in production. - Troubleshooting OpenStack ComputeTroubleshooting OpenStack ComputeLog files for OpenStack ComputeLog files for OpenStack ComputeLog files are stored in /var/log/nova and there is a log file for each service, for example nova-compute.log. You can format the log strings using flags for the nova.log module. The flags used to set format strings are: logging_context_format_string and logging_default_format_string. If the log level is set to debug, you can also specify logging_debug_format_suffix to append extra formatting. For information about what variables are available for the formatter see: http://docs.python.org/library/logging.html#formatter You have two options for logging for OpenStack Compute based on configuration settings. In nova.conf, include the --logfile flag to enable logging. Alternatively you can set --use_syslog=1, and then the nova daemon logs to syslog.Common Errors and Fixes for OpenStack ComputeCommon Errors and Fixes for OpenStack ComputeThe Launchpad Answers site offers a place to ask and answer questions, and you can also mark questions as frequently asked questions. This section describes some errors people have posted to Launchpad Answers and IRC. We are constantly fixing bugs, so online resources are a great way to get the most up-to-date errors and fixes.Credential errors, 401, 403 forbidden errorsA 403 forbidden error is caused by missing credentials. Through current installation methods, there are basically two ways to get the novarc file. The manual method requires getting it from within a project zipfile, and the scripted method just generates novarc out of the project zip file and sources it for you. If you do the manual method through a zip file, then the following novarc alone, you end up losing the creds that are tied to the user you created with nova-manage in the steps before.When you run nova-api the first time, it generates the certificate authority information, including openssl.cnf. If it gets started out of order, you may not be able to create your zip file. Once your CA information is available, you should be able to go back to nova-manage to create your zipfile. You may also need to check your proxy settings to see if they are causing problems with the novarc creation.Instance errorsSometimes a particular instance shows "pending" or you cannot SSH to it. Sometimes the image itself is the problem. For example, when using flat manager networking, you do not have a dhcp server, and an ami-tiny image doesn't support interface injection so you cannot connect to it. The fix for this type of problem is to use an Ubuntu image, which should obtain an IP address correctly with FlatManager network settings. To troubleshoot other possible problems with an instance, such as one that stays in a spawning state, first check your instances directory for i-ze0bnh1q dir to make sure it has the following files:libvirt.xmldiskdisk-rawkernelramdiskconsole.log (Once the instance actually starts you should see a console.log.)Check the file sizes to see if they are reasonable. If any are missing/zero/very small then nova-compute has somehow not completed download of the images from objectstore. Also check nova-compute.log for exceptions. Sometimes they don't show up in the + Troubleshooting OpenStack ComputeTroubleshooting OpenStack ComputeCommon problems for Compute typically involve misconfigured networking or credentials that are not sourced properly in the environment. Also, most flat networking configurations do not enable ping or ssh from a compute node to the instances running on that node. Another common problem is trying to run 32-bit images on a 64-bit compute node. This section offers more information about how to troubleshoot Compute.Log files for OpenStack ComputeLog files for OpenStack ComputeLog files are stored in /var/log/nova and there is a log file for each service, for example nova-compute.log. You can format the log strings using flags for the nova.log module. The flags used to set format strings are: logging_context_format_string and logging_default_format_string. If the log level is set to debug, you can also specify logging_debug_format_suffix to append extra formatting. For information about what variables are available for the formatter see: http://docs.python.org/library/logging.html#formatter You have two options for logging for OpenStack Compute based on configuration settings. In nova.conf, include the --logfile flag to enable logging. Alternatively you can set --use_syslog=1, and then the nova daemon logs to syslog.Common Errors and Fixes for OpenStack ComputeCommon Errors and Fixes for OpenStack ComputeThe Launchpad Answers site offers a place to ask and answer questions, and you can also mark questions as frequently asked questions. This section describes some errors people have posted to Launchpad Answers and IRC. We are constantly fixing bugs, so online resources are a great way to get the most up-to-date errors and fixes.Credential errors, 401, 403 forbidden errorsA 403 forbidden error is caused by missing credentials. Through current installation methods, there are basically two ways to get the novarc file. The manual method requires getting it from within a project zipfile, and the scripted method just generates novarc out of the project zip file and sources it for you. If you do the manual method through a zip file, then the following novarc alone, you end up losing the creds that are tied to the user you created with nova-manage in the steps before.When you run nova-api the first time, it generates the certificate authority information, including openssl.cnf. If it gets started out of order, you may not be able to create your zip file. Once your CA information is available, you should be able to go back to nova-manage to create your zipfile. You may also need to check your proxy settings to see if they are causing problems with the novarc creation.Instance errorsSometimes a particular instance shows "pending" or you cannot SSH to it. Sometimes the image itself is the problem. For example, when using flat manager networking, you do not have a dhcp server, and an ami-tiny image doesn't support interface injection so you cannot connect to it. The fix for this type of problem is to use an Ubuntu image, which should obtain an IP address correctly with FlatManager network settings. To troubleshoot other possible problems with an instance, such as one that stays in a spawning state, first check your instances directory for i-ze0bnh1q dir to make sure it has the following files:libvirt.xmldiskdisk-rawkernelramdiskconsole.log (Once the instance actually starts you should see a console.log.)Check the file sizes to see if they are reasonable. If any are missing/zero/very small then nova-compute has somehow not completed download of the images from objectstore. Also check nova-compute.log for exceptions. Sometimes they don't show up in the console output. Next, check the /var/log/libvirt/qemu/i-ze0bnh1q.log file to see if it exists and has any useful error messages in it.Finally, from the instances/i-ze0bnh1q directory, try virsh create libvirt.xml and see if you get an error there. \ No newline at end of file diff --git a/doc/target/docbkx/pdf/openstack-identity-service-starter/os-identity-starter-guide-trunk.pdf b/doc/target/docbkx/pdf/openstack-identity-service-starter/os-identity-starter-guide-trunk.pdf deleted file mode 100644 index faa7e6c19a9d116e3595040baee776986ffed0b2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 86257 zcmcG0bwJh2wlF9yAuYY>lGvN>?hZjZHw_yJX;45qq@}w-x=UKRL68toy1V%{p5r;^ z-h1BtzIVU(_ycFwZ_UiAS+mxfL26}jNhTmOI|_ACaY-o(JAf4cF}6bC=SN{tw{)@r z1L&0O!FFm+Mkdw(8B?&Glckdz0NQnM0y_YtoGnek^e8MM&Q2B(2LK(kDdqq+a(ZZs z*+8640V2vW0C9+kBS6{4+1%0&ilFQOF?BY94yF^aH!`sR10H|R1(&m9^7D*fEyy9RJh$;AC4o4>_$+jr2DXH-;>T0uifg>$k zEy~ePLt;m*EmR*#qqRs!F*5*>pUQrE4@1Ezu5K@`5|xJpqWktHTP&)ABIF&cO2sxD zf%xPrdmN&88PigNrDnJNq2wj9b8qYPmXQVXeXr?8nD;k+@E!6mk>8+ZzQkEx?+ywN zH8(_rMLmFdYy&5VK#}_7x*oW=zz;>h6$mtc9{G0*aj^MAdK>d zu=S&hSzQABK5hZc6zV#qz7MtR3d5|j!xUP+AdMKuqvtxFXNh18o@0lZA_5eor@Srv`7FB)_0gbL&KzIyx=<>*j!g^@;AS*goIOPiPOCo_5 z@v;Mq*~pC+j+-q(B?aFXZXRw{TPE99x=A0so8;*8*YW!PK6?b>WEY)hn%;?MFy(pR zn%S#BTT5IRy=_?KrrW?FLl&5j%2sD|uUDjvp8Bd?E6>dti|6Nc_RpN&ntWsD zMr3|FUT_$ossZlnl*{Pw;B z5H}gKZ{4Rj`^TP7_xzPQ6TV@-L=k$MkNM^)CT~_KD<)otq-c~Y6*4Ay@q4VCN0!2K z?@4pW8bYL_@;~d~AV^36mE~if^poMfTgi&g)X3z_E>b~MPAw3ed@)I9g>s6iAZeX7 zG&Z~ede2KPA zC4=E5103A}jt1i-ofCtuifdkwCcFBHx^BsbvX_~z;;*C+3$gV3q=QSlRJ3!|vz^p8 z1IDXCFJ>iq)Op0*svJU(;D1QrJns%#5nEOXp}VFtr!!IaFH!=lHB35BN^WUwaifW` z<-w1tTINJvaZmjaA`u=@`I(n^>?eCG4A3+Cv@A00ph)s zXr93J`DuTSA$Cf3O^dw=;?b1p@#&+9{_&!5-t1HLe4i9mrvB!lp4O+$f9qj-vVqM-<(MDkb@F^Qcv;0Wz;CO@Vp zOij)2Lp)m>TGLQ?G(EWCOHjIk}$EGA5ji%S!kW@>xR=5&vAml}%KUypd% zncMZ&eKm2JNV6g{ZfkFyx%2bO_WPVUWHdhZ2{=|UeTWd+gJNQ9mg*wP=f+d|ns@DF zdbV9_$Mef2K7O8Dl3eIOwp-8f#WCS}c*9nMgYLFot{rNfcwLK~s*9dWgooetV+=2h zG^vG z^(X5t?|et!jP7QhcZ7Auzw;uCK*o4dhV1jq2wUapSlgRM!#>}5_%ezXiUEp!@kDW% zEca~OZ0xM$Y=d6c-q>DglTu^N9{rGsAnY#ZUX#`A)#R0Y{Hy2l4fg!)f|=aQR}`xf zZlTQ2r(dSDUR+>xQ>Rc%VT}a?gPDT&c`{QFhEK-dmLrakK)muN4;4_!C#sRHKg6KjEoANz1>=pU!Pne4_9*{fu6KrbXa!Lez&>>e85b z$k(76wqsCW)nHXfRn9<4!mchSH3}97SZp@5vZcwbLZn3-Ob{Vs_Kx+?V=xvRE zjit2@T{ZJ_7S9G|Gd-m!jj^Pxic{}xolx8miEb{=dYgi;L34Cpxa%$V@`@D5uk&>zbehZ~EM#9vD_a~9O{DrF z68I8!nw5<-Rq8^^TVBqxr_$k?3Ld3qyLodnO>RwPJBME;H1c0) zYMi!GYbau9h$)E) zTxwsO<9uY;(LS)`>NBy>V^a*tuS+0(6i5|F>XUr2d)g@y=n+{SQSD`Zp=XO&lbwUT zrMt*86*o@38?j9)Na$K!*PENa>M^i?PBtN+;i&t=br!iJI14*naV};h}UGi z2qXx6fy9B0EPQ%<1ZiBJDvST8{W$b>Gv*TSBgr&WXI)Z!7e+00M$r0=x%??S6bF^A)s0;QJHr<_+>oqi+9U z@ng!bQoS@@v@AmU7p^xl(=mzp-9n7_y4Qo>XInVWCpQyi$p!^&U+%~E z!23>LPHr^`%FOf=npJ!pzcFpw-Zy=4!DegNuh?f4`ry5B`RwFyFF#Bv-TU#m|MB%h zy!m^$_Lr|*b zZ0z(C_)Z zO1nFN)!Y@;P28>PPr1t^T5F*BGK+I|4DPza*% zJD8gBs)|egjsoooP*^xQ+4HinxVX45yRb1s9L!mOJUl!stRNN;hzW|pXiG!uR(?cNt+hV3Be@tZW>|pbA8dDP%unpK2nn^ga0GWX-6o7}-jf^4A zP68AUQ<<9ZLX!*|u;BkK@!)6q1NG-9liyMgmfz-Zur#-Da{Qe+5gVu9TQ-&twDH;) z*_jJaxH6f7&5WFFoG1jvAogxQhX6VX0nmhl_4&hZAmC3TKiQhFe4Ky4-=3_9mvWH;^O_8&G53a{$%AR z%I}l>MV7x4{ujva(zXAKp6nc%f2x()1Y*kq{Qwm$+rNqy`hgdk@BN-_2}098{s-j0 z!ch2s0)9`@1phuq`+d|er2huZP-FbVB>sVBS0j5U&8+`QGjs&+&!iIkkPiMk5&jb< z)b@W3g*d1~AU1-3o09Ug{4?5b1AlGO|Cd$&0r$Tn4I_KipJw{+W%8RH{f%gT4d+!b zaQ`f01sP5)rFe>e$f=MO&v^>6|d4nMufzxq8O>p%E4MI&3N zUz4!~KDe;ooZL$#V=J(U(_eksLvi81%*6ld3uSDL%)t*MH7!k{DLgxf$U^0_bvsLe}EfG?JLCm`O8XnR4kDT_fq%f zcrUqeaotdg$VKStp6nu0W6XcZNgPA3uSlo}RS zXb(!NTJ8qC-E|-dl?XPeo0;O`CRR)+u8^^C;@P$?H8Z)Sl(m$yb83jV+v0UT&5D?y z@3wsH(JW{Xx_@k&6VYPF{Xc$JX=oj{saVvP>Z1P`BkL^Nc*LvLCtfnOUR0VDi|5oh4)V&)?OFV0 zW%IfTQbO4a#bTyWb*`>ck)<4m2bWY%H6^v&-JB2CID3a3*MEo&QBA_kWME*bQFM`` zBM!fHN*ySq%O$xCsr;gsCd?hbA${kWS}X^Y(@edH0IAOsrx{YE$vf-!JJ#{WHj*8E zF)xK21+MlDf4o->R6Q3E0QOBjojT~ZBavfU+FiN5Mq z#?O_RB+;Dcikl~QP%KTuM%HVlZVIkH6mi5lvE&M zla4jSKt5t#qhFG~X7flVCg~Z7BMLaSvpdFe!jW+`nq|3FJd}p z{95{wZqd|s3GhXEu)Ll_;nvlfa0wgRiOt^W^-$_<_Npgw!|>&LGmAm1=bfjwL%pMQ z;mgmf>D5RW4B^khFk4H$0wH^{cC$;o0scklwL2G85$-# zCM}pf^a|A}I%q&lbaFVY_C+qPTT|DiU*&8+>KZT%(SU~NggoO^T^jN@HT4w+az9GP zX>dlV$a)EBZSA-!F>r8xarxn(nzZY!J*LM{Q+^|orZWH1YWsF&uqO9QQchf!>Ufy1 zYFlZj>i9e)4cd^)rJ17Z4i~qz&PX}cPD zVkt74G#Sfs>R`D#(SA7FP-!jN#Um7xnnp}UMnbx?ziU(4nn<#{zbA{g<^Qg>c3L^3 zd)?AuQX6$}q(=Em5{?2mfJVPr@9QETe^hN_N@@tNRA3-t$KB%IaBnFF(YAH*K-qfQ zkH(l?l?FxS$wPMm(#WQUz8@9^#WfnQH1#PjOJ$rD?xZXExCEQ(D=S(kZOOjkI}6&% zIZsQZ$GJfChuS_QCwdFsv46$2KD};U+M1l1nVOolww>N$aLkS3f>=v7@rkoG#>$5bcBqygPrX>-R5fE?0kRC|8i2J2U1{RoTZXwaA<&I^P(IqJ9 zxou#a%&0En?$}><20NJI1Ya!SXaDgR--#v zNS6{8BlLMAQ)t*go&n6Nvm0S%z&f6`6HR*ho-|@hwujO-^JW%fPtx4)**7a&j;R+k z*{$^v#0BDlHX-tJ(QDqg2r)N#Cp3{tym#b)^uc9^$*(_njf1dfBF7As5$Xq=rJW!- zY!v;ck}(ZM+JdB_JaHRY5wTKq#FLV#h*{W$IVPI16RoKIHEQaC{B6NEMrxP)#9KFu z-rO9qy#IS=^u_%pz9U}^#@_@*z`qTOKo5b*e;E}0Ocx(8{_c7L3jQNO{2vL5{xwg1 zSV95nO94MaI@Ny)mA37&S)aZcW5BQ6zwi3GM~xN?`xq#e=*ZrtU0U5eP822u$hTdD(^ITAPU2K3D7)ThSUs+><^I1q;FZ_lKmQPjF-&A@&c z{*tOMJv(PwYQ=x@$ntsP(sd%ZSbC|OOArv)xJQy@xgJ#r>U$`WdutMtCT)z895w6GX+lVt3FoyOxs zRIhNWU&|8?T|YDNlyS{YWOjJO(fxyuP&1-^%vIcL*NBHtn^yw4`xUMnV7~B?LiTfn z*2O#91i^g5_6h~Et;3YogwY{JogX$Yg*N?SCAVFCX0MZ4PrbfM-6B9WYVV4D+O%}x zm0#w3?z`>MMmWa!xcbFm-qJl$Tln5pZQ0W9=HiGH>3b39+q&Z zOZBP!%Ji!imN(hvTJDns=t{T5YfpG~o4nU+4bJU9u$aC$FK|@8Rova?n)~!1J#E#t zaKU2%Tg|q&!9RuiH!_S7{3X`S-z1$sccoWXwlwM@kR&mp$q$HGn1Ljde9XZZpLM=% zJ$JRNYkRAp2Pho7Y9ZdLm<=XX`l{1%NYeI}sr139=9($P-A{GMK8*wI*N;EddP7`a z@Kwh+o>0KR;*o5;{)qJL0GP1uy%NGU+mfF#q>$dSP2SSj=_%K%g{O2>O8?bmDTE=- z#!c!fi-rHG&FfocH7z0*9TvNp4ka3c^^b}MC)9|qGY<--xRl#GN;Kk18EZZoG+$K? z$rzl(A)J9%*p{rh=8BD51AL~or-TpP_qmwm4NfHBy#p!&ZyS@Y1Y*rgZ0g$W9|lZ4 ziZEgD;!ui1y^nF*JifyDGRe6addCRcuaPq%K=i-s7XFuJx2J1^-80u(`}Wvg95}@A3DrjT%7D&&|{1l1mxjl<%TwZ93UR{-*JFH zaX8o>ejYyk(O`vQ|AwOooez430REuk&jRQ_)4}$4bo`>!5xUe*9#nslCt>$+UH=z) zMIp}6S}YU*CH}9=K}r7o0Rwb)H47tCOFQ#Fxc=AbfU1<}1M?7w!_UdsK|IV{?A$y& zYyeIkR%TWpHz$+`b~Ye02*l0G%?;q>=4R$#i81?t+ev6aKz~M3|4hgK)oOn7@ZYl<(60^nbF=)nt%i+< zhnba=hmDm3z{$zR%*qV}K3EMakeQp43#tPs6v{R?8yh$0e`Ym5#qp0;1N!A2evJ!35X1_C3W}SV9mK`S#Re_xV){+7|5{50{oMZlLD~Rfe;D+8DfR!a zkZNN1j1YQO7QnzhxQ~Cl7kk+0zaEnPg{Pm3|Cc;HIKuy=KKvi>^l$PY(1SYuF%N=r z3Qvg%0|R^Ux4iy3bAW#w$S1#m!*Y5)fadM*Q?Cj~1&4;qp{KR#H@&yFZGJb}Jk{0l?`0Cij}FH~g!bid!! znnGV$GDDzmH60(`YCc@GLSIs<{e}a*MrM(Q-p`;sWcR-Zh-`m)oj=}6vi%mF{GDb1 ze_lnqrhIRaDFe4er*VxKRL~NxVP2c9M8UQrpniF382&@NkczUZzlcHW?$SFQFf&Kx zl6Kt3$zRV0;ouqxX(5bmH>`%-tV8aWPlT2(%jP;)d&?mAmqeN&7_XPp@AeM&N$(GK z4Fv9X@2?=F_ct#0M+SEo*X?a%_qQz{@6zu#_wTmv&m!+!K8L2>H0)UoQAu>{;zJo_dU*YB+`@N(GHOI16@5-E1t(ka~F7%*bt# z@!+g`JdaFY3UMK8NWf`PG8m6vzr8slYrAcN+<&qeBshw^SGhlL+rGcsuDckMEs4C} zOpe^_llubq*ToStJsB>W`OL2&>`TrG-aM8y zjffcuF1>bP*xDPV>pWMg)hk1qtKm@W5|QSS121c`j~2T0I7HrkNug^my_Tv; zPHus zJ$yBu$B~RTkJ_gOe6p`x1FqD^=B~^}stLlnk_rLaPp`+GCxQ$H7+$Rec7kmXH1YEcs(Zwa@-~y%n5#NceCTz&o`8q!&4+X(D&uJ(xr(bjivrFfoAv! zsL059FtNHlZ4Hyfj$vf>ChSPi5wAFbnYdf)g}Kk&XO8n}s~<2WJKFR@AFVHB^89A} zJie7`adqQ~CgJW$G{c#sZo3~{>B9>x6*(XZ z4nvch-{-gy7T2GdhIK2@qpwbe#Oo%98_cOrwMOxxL4uTekS`@WF+#bKTRf##w@cZ( zs?ZxE84ek`Gm?h@1m3&JE(AV`yuLbY6>q0?l5xPh&R&tNnt^h0q7%xGz(O7*dJXTd5^0HnR|L}>|7w($4FW*aHJt?-mLp`t$f%LG(I-=kC@XHatcCU zpI}KI3Ul6K9d=%v1gXd5s5K#M(tn`cmNe%(7!CS;FpGVHPlIMBsTBy_q5d9mw9R}{a9Dd?V)t!^7 z0TYJVsT|~sm0NtMzJY@yA(!tcgB7D&B}Bp#O%!z@5wKui@WtLb}( zqeqCi2<3UWmG%4GMgD$_@-XTc&I(XHZZ^XVqHf}RhYLICOxVy}kmL}-$!1(iakhQWWX+dUpD!mqI~~cJ$khyQlld`0!BxZe#$jv#u-i`8R#ZF zrijXYV9qmLZPBuSOKo7tJ^qVnJ$FyaDR-%h2?2?F-b~;73;P5qUXvn=X}^+r7P@^l zbD-t$prk~s%z|aB^d-@?L-q?>)hrTm|3#_T{Qll4a=Vy>EA0Hts{xBU!8LZSy0n91 z9bx|$XBp4EEI0y}wFC2%j$&H6*v=Z(;9axbV&D|Lqzz}~Kv;W|vFg=RMfK7k%lwwH zgWb^t5uDSx=eXl;72z0*H#^bHsz{!}-CfG9;2OC8W9G!e%Qb$2ghsER^;dAbtskW) z%a2tFDgbI7xhd1%6J4~5-OZ3>SV9!t%{d72(>YRu7jjcB#5luym=qE=oNMJ_S14Uw zPsNr(o`VUR8{AV?lJ5G~UmU+5xEaHjSq`mf8Z|D*m~lVNs;OA#;R`oD8VV>b-0i0H zWy4TXtm?qQEfnD~N28#%t)f!{vAXeoWvy7@BatYyJo^l?FPz0BIe0}b^$kN{WW)*X zd4#qz4lRk=L7;p%rkjrI5$q?7-X&~vl*&}wjG5#sSuwK75|&a09S6{ya+I$<}duKpx>xY2Ww zvoGDIccsk8oC`fKqQblSLX{pZJ4n4cBuQXz!A(XYN8dO`h3rvK8699Fb?VQGBTZWk zJ7#iXc7$xbOEvsVFX}43F>wclFI`QhYhZTNU6oK$P+`mPd?7cY@D~x_Vct@JPe@sy zmdSRGTZ{Cb;h=hDY}rkh^GrJbnn-DMY@drX%2NEp>#$0h13_$9htK0H!M$IKwzOtQ zYfHUH=G^f;sp+W##zr$^X^;UPF)!>-#Y(s@L_xBQQ*y7mBYa;si4Z00P<+m{l8^50 z;Hk1lE3QAmN&?F4tUOV0EFaO)cb@;D)vGUdpK`#}l9O&OCdHo1E@mag$?lzVkj=0(lH$e zV>}yydv0^&`-PZH=acVpFVp%)8F#&2nkfbJ96NUdhD|r-s3j*pKmDqfCNU-?dGu{P zmTM)Ei9j1q18Fwp!h_kV^1`Bys*!0~%e}dT7?~{mnI)xZRz=x zqcsJ4iHaCWYc&_)T@n&WsFS%@Ee3sh@aTspyC^X&BNuhz7B9*38PU9z{Oz=iMKOBH z8uR(;s=v2-%DRz>G|`Hh$m?Pq%lqqF?stMbD5@2=HnT=pf=5^K6PJl2Jk7PtT|~b8 zCIs9hKr^=V60= z@o43PIQkXROefy4(``{yQb+om3cVMYAgz_6-tSA!H+(Cy?<*$ypB-SDAQ4-S%*QhI zJUysLYlkN>s>C(pz2MW5)3Jy3X*9DQ9K+Q8gyuu`9m*TuGzz_(5`EJ3M@ts zZapsrip8*3o>2NI!`sSzEyYVk-XlB}B$?NyJ0@kMnD96~vbBJ#cGLd3~IvbyiiOe}fj_nrvZ)^x0Q1R26^WGxJf9%^dF;dp9` z313xXx*Yi^n!^r;c0mUB;h-Fj4-)-+HU8wrIFc>{#~EyGnPW>+Y`c_bD( zX!!gHu5XS4S}_W;y93IsjES%nircYm0+ly; z9v(i(jIMBH3#bPw^G zLZK{j{ezeqzsX9sT`sDzZWvSZ{p8Ju+{#xN6vBy8gXS|!K?c)Ek0ZV|eRhvzb!9wC zWKGc_Zs&T1i^u|7G(r#EFFO7rwbT*_lsWd|&%%uC-kFC}x+bQLA0gUwu0yw56?F$p z@(snb(+EMYvQ*$L9oLO$Ajlj9}kXs zHfP)}nO#^1i5SnlL2&m*0~)dzIwU=HOFt|lM=DyGd_s-p9-|Ag2z(MCnBPg zg!)A<@m)M)Ruc+^j1bDM@-b*^Tnk;HPoQPqJP6kNNE3GuPxcdUC~-&Q?GtF8GDrwM z$eA;EKjIPFu9G?PZo3fM~lOCG9NZz7G!!8xpOYQs(l!>6KV~Pok-PxQe7-q zm3zQiKTa>~8<38SIs+$LE{WG0hPV^XXneD(LuBZaB);vxx+iF>K-q(j+*O0F;U$)U z!ng;0aCPk!8@BfLMNdP)yYRe$$gSg-YXEC0JlI;bDhvD3(p=ESo3HVjYLNb?&i+d7QTmnFf0Y(B}EbC zD5~nyEe<9G(qt*yG2#3HoPcK`#$@neZt=d0(^gey+00glj%NY)xQ;SkA!_@Zl( z5YjQEUeni9AMGn7KD!sL$1XqlHl{0?EE{mUF|PNls@AM$8#kY2(d<5WX6B@KD!b>Y zH!Xt;Ilp7!K%S;5`oJ`!j3?$5%$w^&Cho)?OJ5x;a-mNIHL{OF=<@gqf=t*GS~MJJ z+bLUg39jB2;vqaM$~XF9iL^+l#^DYh4@*c0qu+24_{2xLKGv>~^)R?jo4;Kvv~BLV z|53urlTM-?oY7aB`REQ_byOH{s6U7FB$kX>;bV{DBNDx*_-s80m+y`+!&aU4$Qi?= zNgi$nAvSGOZuv?7(#i5IL-Sj$dh%@)FK=eo(dF|ZK)g)3Y=J=4LbMSZDqWT67I8rW zJYeF+bPnsYc;c>ZbzPTl9WU#d7%3WlC3^73REt+x^QBe27O1J&FAqW_%zPnkD@Ge` zE4=ydz7v6%`vlLaHuCE*^>y7lPLgfi$gK{LMVb?P-U??w-PyC?r@f0R#Y{!7euSET z=K6jObyeRwx61h)+?yE&P6hq3_@{iRiH{$d#$QY$cf=Pkmo1c3gDJ6O zi1aFN#I{SL{GP%MhwMe);2^RqrH3E>I4Ot2($JY{#prVri5}Mv?bI?Bo+xp|2r>yK zMfMQ(uPMg*)ZUF}E8F@Z7NyfB-`#eCs%a{HgK9v8pm3r+be2>QR!yz z6RmU<3qrBl(WmjzD~g!tWgfYw7X2z?Y~Dx(Kn-x2rrG-Rn;$y(KOc(-fIv ztE+jO2{~y+_A0+E;rl(;66%P@q^gTtyUnXS!#NmMnd#%j&)PUmGGGh)5`<_y;;7wf-_tLF0$oKtfv=Mr({CysUFLYDrs^Kk>7-p zugz8?HaowVt4si2ZPZ2sR_GbmKzL6|Avjh%+UN!= zlAnZH0m(MB4ZY{H!|#|iY&9oJ7WX$R`Fdi>1us8v3ay!{)y`N=0xMMM2toJ zX`zxY@R0ZiDM(pXGHuH;XyaObA0$QaWCp>yzRTq-AGxH{=&HUxy^4h7y_6_DQ+$b7 z+Uv4O{DmuBemt@cpZI&1RZUHd{M3d?d<7=H?=T5aGag=5#is^Z2TRpUAV@u&8rzQn z6wstz+AUBfCW>ssF=+H|+}n{AlR#`eC^j$W6kC$)J8+d-U|VJNV(}IjRO!?Ku$$$g znoq9sf`ADF?CMWPsJaxik!Q!4sy=H=H`l-Yn!fN#FCcpwm=*S@u}v8%)WWYxk>#ns z!gzIjgQloVJFf(sHqS=T2%^a9UUiepRcvj{GtM;)R=XI^ZIvo2{@TRxZN7e&%32B> zpN)Dnu$@K+uTch-WJ=V|*-|t+XLP5V;ia&Xto=l%hAiKZ>5c+i)#+S6t&RVW_ZNq@ z)MK7gGE>a)nyvQz`EUx=&rM6u?1gOxspTm9)=o1TmXE@C4LdFjWtME1G`lPlC6O2R z;Yz&;Ysy}JzZL-JG|}SkP%Ah_VM$Ohiag0)l`gTK9+b_@^b@!4j84l1( zy&h;(RozjXF!1H&fG}P^r~Pn@#nr%SRiV2NELCpl<5)a!w8eJ?kWP%*m-mcdOkx5S zany~pVvy?dC;2kZa`jx=FM7Jy2b0|INSD%U35V%Zy7LH^qo#GO^bFa(3eVLanK_m| zk3GS3?6wVZunbC5Ee=DYI7_-i?3Eoe@#=G>>W1UhW5?&VS61le&|cl;eJ$9pDp#|Q z6Ov99jK!d^lAZJ+&rxfH4r#2{#nOU6V&NMs>o)LwQ>9+jn=n@{Y>P1wvP)0$wM+eM zTjiLJRGMXmMc{ky9hEKrVYVwLb=4?1N&4|IO_be_jHkpMK3~tbUF8I+9@RW5-OUL{ ze%hxnXj)n%xSV{fMOwyzE@MJZ!OJ|se#Uo&nr6z5_MxSTZkOi6>%4c*S|E$bc5TXPq441-0hOctgXwievzQ%v%2#@=6iTLj=t z-%r}Xn>RW8h@=twdHBevgt}eVY|WK_;3NzW9IE^dlhSf?+ftdKO;LQcM3b({TCs`I9~8?lWE}7 zEk4G48+x9edWq$zk{oYQW5yPP&FF)q1@t&_Lr9rp-th{L9?v5lO5je;=W92^`N3`sBV$h4@h8ZWH zTzao<{$vm_8c4`EJpHc5*6x79`d!`j!cmknRjiO^ zOa4hHHP@!o3I&E|0cQYZNmC;`F*$G^b$Rp&|62-@n#$?p*Mar5nBfDRub+1kDWi7? zx6SBM;=aZTsZx(Ji)VD8ej^Ox5+~0U?vOCh;GXJ(O-1bdk^1dx^}5`+B2p(#lq5+s znV{|!=0&2WQLF!6FjE-Ms7g+MQhJwb2hI<%z=bGG>_!t98qMyhQ$G1@%BHz7&DM=S<9p{mqSUQ#g9Ux=GmQ zh7$aPzCE4@QY4S$>R`5ijkU>$6StMK|6O(a^Ro`W5B1N7-*EXMvs|zS!YRGDHaDb_ z#(hsGo)0|yQK0}XoV%-_htSo7Sq&)QpMD~Okmo*iaKeNYHGt^JA&NX#cY>42uHh=p z$%!0IE8@&2RtCc4R@y$1LP2lY3Ryz(t0`jNffHW$jzm2zc@g(q#C#3OhN^a4K`mXh zGO(fE65;*eou^CidP_q|L3iW%iJNjuC9g94PTiDe~=DqLO%^ zJ@7lLgL)VLT!2@L;u(sOtL0`PsS8eGW&)q{v<={7miVwX1I z$*$@NbWdUxtpIb!yj!ztc#MaIanZEVKf3jAsLy?H}UA9EOcek+?evd%tb)V$X z$ySLLf>8~!Fq|>nEw!y0MufSy$deqCK2GtZpa2)>W>v?C@)(aWr|`&GGF-WVwQt|}2bt)tVtx3ZFp{k}72-U4I`EnKHcEb#Iap*blSJHyt9L`gP89*aP= zc9pV$>e(?r3RCei91tThKa%7EnXlbP9?{X2T$(Z(r??E`nD_Z%YQ;L2Z}w2rS=l?- zcoxSqB*#i$AP*{DXJ5f!^5$Xz9hP#TjxqhaMxM20vuLI)racVb56xyWz=7Q_GQ}d7 z$2m-RggC0WHTIpifPjqnD#GqcB6h%6W!GFJy4j|3ARAxB&Yp36A(hkj4VQ(^RT2CI z8sCaQjg>xuU92m?sbgkcxui<0z>BEUFGZAYoGTqAicO%Tob-KDs!}Lja6Zq z84pgOCEnaunAbsGiQ_R8hIG+!yfhL^KC#MMLhaK%n$Xlfw@=DHZy8UMLJ2m zWF@wk{ehk&dD?fL^P(bTZ1M8errAM>J)8r2G>Mn>c+L(xv$TcdrP?ba;a(Bx?1Y+8 zy<+P|jP$4(ueXHBvcu+DDiqsIVIH5v+g;B_bouJ2aR6g-ze!Qt*oUW4>eyR6Pw0@7l?NV7%O6bfG`Qg1{i{2PW;a$lr@rw4_8Lk>^v)0c@Lnqf&0v?fpf8ep@)#@)7ga})YGaK7Z z(a&dz>^&=&Q#bcgqcj9#g3*h*%rl!f#%(^XIyYId8$>d$T`x}axC$@(%!O*rsP8x4qt9?CL zFyI3NtLZXN6o}YS&8NTt2t~6+RG~W`7VK*{WhY6c%a-<#xG~uGInJ3DqM~2y_Qwt- zSLBS|Uerd}7%e;bUb_~4d_s|xfGVXK^TRkeV4UYrDVmRB2+u;*k!jbTn)ht=tK!@_ z4oBV95%;{B!5Ec~nN@~QE`eh3X&I8eMYNe&r5%ri0bP=Zz$f3YlG`%Aj6((Z<_;Wa zZ!V`8G;Uq0u1)3DIN2I;+>}TR_^~o;U+~+gFs3QM-J`QPo8aQ$1hQwN9%JtjjucJj zl)sBP11D;gnMNsIH`u#t_wY{hNc44o zp2bd%ykN8m59zf>{BsIHMZ`d=ND0;Hu597Omj~RN$FD|skwaCEH@jNeP~a?diV3CV zr1r)nwHzYpRLF}KrMtD*GGpANP1{%AXlI8BTc63(7|cSRQGd#e z_W-KUCd+^`{+#!z=xxGQw%k}zVN^OYYEQZ7-LUr5bMQOZ@GWUe`=r%4)ANpFNwHUXFAt{BUok*mIx#1{or-Gqo5Uj^arxEiJ)f>&I0~ z?y8uK<$|Fb`(wXH7FKrJ>F8|?;Mo<0B@Z_l<*ZHs@KkbY<5fZ&idrWAISzI4K_{ij zDU3%17CQ>Zxb(12bgZNwnd}R%uT`ET#v3#@I}(+hT^<)tKPgPpeA8u_Pog2%(!bM@W|I&;WA>Wiu{DlvrG`;h?e zjGbv9;Y)Mm6~EogcT9?b*IO89LpLaMuiRWRLXdN^1|(sXjGILIWhP9w4-A}9-jp5c zuT4ijyHnIQz7Ssba4>kwt#5MCEOzDoaShwmU353uVOnIhf0-Xlyz|P>V|JgMT4|k_ z_ITw7M!aldA&Qj;9GNV+SRU$&%ZP7cpA~Kp^>?t^m&zcRH>uT96$FCZ+JZOKwo8oH zY;764m^aEq=oL86r7NZW4**_3p}+8V+VRW^7DTY0eCnL_RhB6C_fJh-p-7p!5m8a^ImAJ=gJff z-Gk?tKeW#?aJ_J{$$Ixed-qj}B(9|xku`^~j=+mZQwk)$t-?B5>SF=cWRtxzQeH(|>eUIbKZSKf^8tVf?S7Nh8)XsIJg6dEsjOT@bu@Sq2`ayb zv-qMYRl;xqcTr8F0M6}{tVVXd2h|UJls(ys>zea4G^JHkLouLpl%rKtL%^`mcsEhK zU*AOaPTB<)(?a%Zc7I9RYvcB3enHzswGK-H-L+iRu8XMNY3D`*kRz2VW&KmqK7##; zU(hyDy^|JE4WTytT458_pKgWsm(?HzXd`y*-tfRy_F`fCJ}6%HL31*lC3DT0JeUB> zs!2A0Zlc8tGApdE(y$(YWY+`8WQB<+t0Yn>L(F64FW1@Cq`QP}a4PE^fdtuVk+jE> zY91+d*9&k#J(dbZ>NByXaLz~sEor-4*Y_C2+~euJjuh!jNiQa6?!sR^CmPBGmeI?; z2nM)7)Z^tKp@$H;-JX>v&%;yKJ7$ZF?wDL_mQYNpfJczn7wZrsPm>}}jkf^`vkpWA z{RB5yonwro2bK+uY9mFHzQ6d#Bl)NR~?}V*>Whbq1@ks9%nRQ`! zJeK+upuLJVXYMayWf}Y0bsB1PvwrPazQRC3feR%J2j*SA5^-Z|s$!Uajk|nhr|jh` z2klwD0=`)5#9)yL>q*V$Rg$nJNCseATCH|8 zt~M3u%p`N;INt7H_v2zQ8G74Pq}l-Zhs|~vRU2uws%Q-pEOh2~tE!U3F2%`8vsV*| zkBT7kg*{17Zy}RLW{x3gZ1>>52BE`zva4N%$GDW}aUbt?RWUa%ft$SBRlpFG7A#b@ z+Ev^2-LBe5hjl@wktUT~qraf-3eEn^FKCBdRT5AtmWc3TS8cT8nVDQNN}b}X9k$8)>zyEPGX)l?L3*ZCQP`BmMwkP>aD)hd9Bio<%G>X(?jS-ya5m?A?a) z6Y&-6q)6Ex$v`@iB)GlydMo=KSk)f9(!GoYlxr|5#ToS|V=)pLU3Iudo=W#dSjt#V z!cxX^lI}8=ovea4~P$RXbrTW7$c2 z8OuRif_P8>?+jPO`YvOM_!7}r(%4Z;6^rH(057SiNWcyBNXsM^lB(>E{2*}3X{T4q zt73vL9SrXl%6XF`+?1qZkGt+7768em@upD+uotmF!?3B{g6dlli@~Kz=M#Ad!n=sY zu%tC2N#i3p%BM>a0^9n?E8^QL^@-ZkC7*YJDgY;>vMCM^LLsdz#`sujQrIy`Vv$y#kFAP@-6t~*55M2;*tYAEBlPJOA273?Vkbkc+^Sf1 z(o)5;uj^8h!cZ87o*$1Y78uBBB&Cmxjfk-zm>5tn=&m1obO)7vXhoF7Fz-5 zTQR;`%2;>{@u)b5xsk83G~XAl+@`C&ZwQ zh7t!s@=uJBXLSJu%RnX55dodjlRZ{JQ>e`>m_^MM%%O}o|yQ&Iz8lfeFhZ$yGU^!LbjwD zfOocUrw*jLwi9?&B5?_f^m5)52xv7wz%J-vL5inMlEfKZ2YMZeppr*b`f{}ASxl^? z-vF(!{=jPV_!@_WMi4%BH=bDwxZIo&E7xo|kOyUxzwf@6=$%AQ6`ikVl+0R3;o@*!v(-X?l8 z=$I?qxf<>sC^G`RsI02$Y0&N0H-l~`?M4j~Koen!slTG_Vqkyf7qs1=gKvPN8c3nL zLATRRu|w8?j(XSqU(oh3?9cpywi$FgX))*^XkSR3&uum6KHU$0fGEIysUywot36?Q zCoLJ%DRe+$3mNXKmpU1cEN~!ocWdiJlO*uXCTY8YEZoSdrd1M<#j8_KFD^&M^iJ9` zrc-jH2s?9Gu3;hGETYz3a2w|nRW2e%7?4v&VWv!5J>c7 zo6{v@nkAL~Aw}Not>^FYQh7;^nM;0WOhf7DeOBDKXH3JM8gep(*)3xl3SM{`DL=N9 zX(-ZYd>P5)e^knJO4xnT>IR02o&j*f$h~y*^}?Z~Z=_92C$t0hg)8w;)3bGL(;vq2 zyPBTK-M9Wa{ZP}%jHL*tAN9tGxR`A7T+Y}CkE_-aIOJZ>yWp*CF@{Y?&j9QbSDsH> zb@ZLKABfb9C-Sb(`s!pbsDMt+Blf9oXXz3$QCpij|Ei>C8fB8&*xGp)pfIvZMs@Y7 zq!W<>nt7%~uR40RHa38Yyd(@H+QnG6mRA*k5Mi+>jo(%DjcYa){h<6=D!O$GB2wZ< zzPh9+x#!jrcB4HNPnttj{MA5eF9{$tl)|tJ^yk|WLlU04LXC3i+Z~;}_r#GEESKa8 zY16hZXg&HsB-LTq5tz0^c2WWc*9Y+TwfZR=E=>h-6hwZ|Z?$>=8k7^0dt`&3^g2yU^IjS$X!x4QG{UM&>?MO0nY6BP@5w-` zKJ6QNl#SR+^g3|>GPh>S#7|2-whkoOVEwM;$(D$uGXQmNTmYZ}Z}|w^T&Zk!J%etN zhN68X8tsHFL0~8C#}8j3c@q&&e?eRN#+)=-UnYr;GDa;+Qf8$y0^)|2f;Lamay?z$ z4L{~;D#Hh2`+)OM@&>nKbM9*JGri|_1hY)Cnsj34(|Jt)M%om{%c^d5AclzG^PNfCEIXz(ZkLsi-l+_U8QLCBJPqxPtN5No3l{UDI`Op^^i8nf=ST?^>V zVl=(IMs{}^Z4=ETcqK{qGOTvA?QXvPHmZ`6LFYG+*FW7W?=L1YND2iH)wp3PRM2BT z2&vIC`KVMlNLx9=@x&KeaUgpQmEWEN_J9*4t|%Fx*Lq-TtQ1>T z>E>jedIOiW3l@I$Lx?P52b)056AdADfz;Ru8>Gfg+F>*J5UD4`mtP=kcG?c9anNo^ z4Z98stHF}5VeA*m0;vHFnU^js)df<6$u*7Irvcf0Lu#-brE6uw3DmeP;Sm{U+4bE? zix-Z&HcnYVH9lo89!Ebj{1rW&oVStIB3Y^GcyH*obHFsx5x9vvYsfEg@l0X6w7P5{ z$J<@ZUnFHT3_b2f)z~Q;s>V)QP&M}VEC?BV9I@L_tq5;L)vytfqdEZ3*L&7USo^Vk zHgSKznS!SvH&XClTKa6HwWm+E&VW)-@`j%wm5Gd2f+KNv6KA@xB)U25y$W^O18Pb3 zdtKbbnM^xrFc?p}mw2OWPGTN~Bel?Id9@6Aw=6tD9eZaVJ~!vi6VBE?^jGbaja9$9 zi5a?Bk5;>V@9rZ41)SC5Pn(-K(~QWGVmwRYpKf9!C0fT1h@4weo`l;?{6cuTiFeZK zCT4Evv{Bp9%O@9(>F#oLuU&pJB3&=Od{WQ5HpbZc{CvhuFOS~aurZIdMT_>)BCT^>=P#HVXXF?+M zk0!JmYX(`f?X;gK4P5R_DDjnqxztdCyM>V{yc~&`RKsS9;3V^*NdNK~fE6fOB3Gxo z7YKItFWv%bYm+3qv#@6Z)DWMSHv)y*1s^tq3a*j_`c4^9*>2q-$VS@lN>ryYnI{GsV)jAvMB{xN@noFgwO$l3qDZ{7Z;x7%Y@=Ez z}<|Get3e&$tZzN&2CmE>qDtQzGC+QH0M#Ze0(}C2A=N9?`Rsa z4pj<2cxK~}?yShhPV{Rc{O9XQ0TC6jh5mjgY{G9tXYMIF$ONy&a&nihg|ZvlYR8VZ=F&8l^uMDeEXm%-kX=MQg)c=Q*Eo*3fJt=xgoBX88hRzd z_Q$uKbTA&?1rqsx$FLiXRt3@llJBoHK+9=mJVKq)?=zXj<2xE@@|cIBXoNaI<}KPa zjYf5InwAVX_aZKzK z&zU7&ATe8vVaN)TL@OiGo=APx;0ZoMqmrOoh8*Q|jn3m!+0fq425Wv$Xfz<*IGd~K zpdHf=`XSDh*E4=2Mf_e_!tjNXfE#jjX|u18REeM=Gh9z3T(;%o^Ii3B;v;)AC-vj} zav+P=t45x3ec6ILjRGoho3nQxv^WZ50ID$x2didwoXB2EE78S~$Q~XZhc>n`s-`%CnKJT&`4{B+m zy(tiv_Vy+XQQy{S7cdYv01e1>CRnhSO0vB{eeGaoyNmCRJ`GApHCjUEK}kS*`tH;9 zxF_kDb_6J;4I%#p4Yec4vi}`LJm0})e?UPIlDN)eSduGgB*{?sAfezVB}%KRZUk;c z^HN>WUdHi{x*N6E?AtxbD>fD4LY#F}hcfAf^6@YiuC;-Q;`^YG z+Hsw`6*%WO4vuWS>KiRa#}1%hI~fmPD1x#SjgT<&J?(ZOBrb=s3lp)~Xk9ZT(8Pzd zE}0Nf>zME?Q7h|zCmCsprnBFTEl(T-K-zhmrz0N@rWf8LU`f@IkFT`GpG7ipm?Qv0 zqE$~CEv2JXhar5L*XdQ8&6M~mhCb(zl2u#>uX7j+zOC41p4&zPBm(4!$FW$SQw;i0 zUl*^uxtj(m5*`bH_UzjimO5zfwRcJ~DCy?M8tH9-;aD!$Mxw2BM(dCYLPr;{C0iW| za1CVsNzqHikcTwC5zcG|WI&H7n6}a(2guX1kzhVLHR8qg<5(%N22g74O(gYnD}G%a zNe5;5a9*|1*0XYd6UCd6Hqkt$i3b0JKd2b?UAGS` zvIUD(gF4)NQdlj64VP`D0ioHo`Us}1kCKXlX+T_Oha#9C|_D!y70x|!fB&*;EsPa5-qb-t`H}#0NHV6()6BRT8?<-A``so+J zRRQzU;uCZ-{!X5va!Wld*2OZBlQ$FvPo{|<0}gs*E@2obITo!&POlV#RbEVO47t&q z6|;?0!eY)(jnP!C#ox*I<!bZTMl0e>hqv9a!141w9@w+ z==XD_I{khD2=3jGya-e=6BQKe$kPYS%I-B3`#vh5(h&UQ>7XEoK}leJ006d8#!PrJ zIw?ck?*cwU9Fm4hn>vgbfcZKIyJor5R_~iAEbSaGF}_eEDd9Hd(w%#m+Oo^0VzfE2 z9*m)Sx|M#8Ne@(Dq|*^u?Kqb#4^uDEDW6L@3APrEU4=9RYDcC&?2c2_3dE8AcY4D# z3|St-E4R}~FDBtOS%oAiw&06iZVXHxXP-`RUOX7uaD7PGWQV8m>EfT(L)|I73QRw8 zHD}msxeHuHO6<$vA%`eVxKe1BB{q&jA;v?U+AO)$mshWOt~G3=pM#_W)(Wr}WA^2G zqeWM@O?FZ=ggiY;JB^Yek|yGq@(H*$WCuO#O2R8?B#i6nb`ScrBR|0PuA27IYs=al0OBdk^ob#Lx^(gF(|D10-{yfDI@=! z9!nvmdfo*D0&>p{EnZxiWY!N#4uD&fzJJ5uX5Qr7v`wVIQ8PTN5dzgEm(@a_=zdWe z>HpJaxySoWuY02I${RZ-l14z0*&OksT)Izd( z3g=yLHb`k{eg#Q87RcTyk-RKYHf#gzG|(D)mKo0TK+DmQbw!Dvt|7j!XjDBOPNN?- zUh-30@%6k5l}FUOT=%OmK26Oe<}3Js#C zrxDMhw=LD^ZVgiBkVq|B)82Yg0G%ZLbE+R3Z8Pt7cKU*}I8$D@FlbeV-e)rf-z9XS z-Ak&O7i&67NFJwewGp19nM68o0_7-5hbcock_#i9qn?xq2%!r;_W8JlmZ-|F&SL4j zD3Q`JG!wu*JRVc~x=6Fg+IGs|i)BpfUHd#FAXlgt3@2roy>}Bc-S9%f!w1HJY0{vR>OVYVlyA}B$;U9Z@9p&+kCa65 zZRkk4PZEGU&Pnq`_1!dXss;HmqYMKmfgCm{B%+esJSZK|+j)<;HJC#3!=7i5seM>j zGS9Sme_dj*`hg7 zg20;+{X8bSK5frVOP{3k*&LO-#kvmnxC?PA4ZEb_f}$&{4pe_q{J^y+BmA?|tO1!U zH6(J!rj#y!xt1yO%NAGb2rQ@X4rQnCJaQMQ(r*%xU>ki=WGOv?ks}1ev$5peTXLdl)`76yh+Q<0}lyRD7 zh>ipg+V;UK3x(%lM9AWgGskYZYval#4*`lNSlmO{RhIF*4h%$_2`04i#=UNQPCS=2 zs9qCC$q@HD?dPcJ40s6ZV9P+e!+saUp^lASBpd0i<}RC3Wf*ryr0thgqgoEo$lcg9 zkAt>&H-1`mq=Bvk*M3Lypy<`<;kap;;74duUfZDDo-intf~g*+=6EIij!I0uCQvN1 zFBFtQn8DAio;wYf!cK6eLmo~Ml9NXOPOtpP6<$I~2BX$H5}ycP8p;1l!{vGu>CF#&^dtL z$0=@7)zPDa-+qY>+{4wUBlNB{|l0jQ8JU6z;g zQxCj+qm)(W7Tdt?+dZn(Vw0q55xL{8XftLZB72i1(o-Qr%~>Selfu4fd0sR_GuG*{ zq+N-QS#NhqqxZCZw1w?6Z}=bpy_QM9uk&#WZAmLRUgSy4k#a;u8=n{O|CgbpW=Dm2 z4)b}wk={c0N_LahhgLP)JeVrl08wq-W{+TIBG+o8jH>i|&iA`mCy764b#vQ-))NL{ zSzMYRqU4XS1AY>ens6o|LkXj(X;5z0<3R}k<^@_A&*v?)#XWGmY00MiocZQ(3mH(; zyXGv(F~^7`Mia;WP;k8D*o;nZn#oTZC`8ur{RFy5>=s*>m5t3=Up=V8}Np4 z*5;Vi%@O|T^1hpZrEaa#GbO#`V`VE=rtL*~$;aAghf{`U(f|N?{7TE_+HfNlxH)VL z)`Pn465{Ewfktn=;HUAjIBcLbx@gB4-M$SGqoCRO=}EbbAt*F)BbakKY-~=nb9$KO z(CwJ{3(9jI-`8i8X=1}%-Boo9!vw5j*)I8X{{7r(CsPDcMk{G^FU~)}9gua!PvNpM z3@y({0PyXFUq-th$^!fgzl1RI68g*mc=AiIlG2i{^YY0rA&hj2I`3Keg|=j<10&OX z<%%yU%QU_-zfc=Y7p5c4?3aleGN^S1fc-!)I?t?rtR@O{cS59xS_Hyp zKJ7_b;=m9bfM+Aocw28I5ja1uIcqjWjsh|NlCx%W#2FQQ+4+v)VpsxWIUpS~g4{Gix7hptn_-B9cm&EhAgMo=^GLr9Lo4AL; zn*I9rum6W%;ql-9o7i|*o z6pN)R$&3K$?WfUX$~spj37KmdZRn#lPl6}dFB1SKu@R)u;S?96!d$?4n(q->slCyI zsgT~#v}l+P1A`tua++wPkz|>D9mvhNl{DgH+~+Or6dK8P^N*kRq5-jgty#4b?>~Nv zSY%bx6+PRqeh_wV2tZ2cad$V^M5F{T%$S~^Myj22S>u65F1@vO1M@eLl~VYSY>|y_ zrVNy>_gAUxqFDKtJ^P2%G}DozI}-+lxOuOB_%Nxp?D|5QdzQk94fgp(>QN-z9r}U~ z^ayQ*K*O$Jd7=8Z!#2T z$1}!~1bt+)VkYbLp#5Rkb#~#(izfnOF`~A&_qCMN5M^SSPa`Lb#q8E2mvE z8gKw}Z`yr2U4iSd%}K0Vhx{bT)a*1OG{!HX&V4&bG!2$Tr0rnbkQ&Eu2P2pAHO(CR zxzXv{6A`tY?t8^X89TiZCU~2)s9K8-40H@{NoEw-1T?nm?E0OAH>vXLw-VlV-p_&6 zE+J!Rl*K+NIFg*_vDR`B%NfrSITY)=dDwWm7don6jFxHsOYXq<5J@d6&I?g%tOqF{ zuM{N5dg#FmFqMuG3S=Bfee=-MS?4TUyR`oN;dQV{U&8=`rr{fu&ClbJ`8{LA0{(?2|Z{PRQq_K$!5@ehxC#mBL-uK2|$0NUsF!YQT;%+qe@0#h5hbOHKc z-?~5mBx;=Z8W1s|OBV=kz$eG}LA~TSs=7hb>HvtwT}JcPKNeD_Z-5nrM#L?x0uTV* zkUIfINRQ8l^@j+6W3|IvZ40dcQLJAUYdH#RFasE37Pq~0y&-|~EqyUu(D_XIdOPZV zCw!;eMW@xVyJ=YSaJ|*2LJ^&#G&#-FH3H#8*)ufY(?e;aeUt~TG?@xR#ko`wO91$6 zEdL)@_!@&v_&tVTDa6;HJ5H4A#34ZP81qE$XLx(g6ID`=Yhp_`#m&NUQf~a{`ZUJS zeCziZ;^?e*ja-UBx<{@<*gx*sGL>k~ma{6eImcAc+;dzlSLgF#{l8|acrg_(%>YEk zePMrnp>qHHTIHrHs88ua`6#JGzJ_-Z*3C+H%}2ltznaYve_hG2R`cX+!9j0@5Oiy)~?9y`S!$YqeJ2o`R zW89$KdL?}8jN4l=wKtJv7X93@bT191+Pn0d-X^%ua3PWLRECfrx@WwP?mX_hv;p5o zuM0Derxk+|aZTDQY0*5;Rx~bb_gz`>R+pE1h*QO3-;PQJJ5lH^*XDsj{z0wteBvge zRo5{zOq+RfX)~pK9bG_pPeSYl8a|FYHg}BoQK)TfLV=`8+O`Zd{>c`a&9t+{yX=dG zm(K8YWs8n3Xr6MAbzdMe&_T`k4B$hAYg>qYI&c2%fmE$6=ODjO=ksxN6b-kR-|rq%9pbOT+&ph zvm)JFHW9J#WNr*?LZV^txA_4)IpIh)M0?Qj&}Y9NP_u`7$%MX+`sBl{JgjHTSifW{ z3TMo;c*hx|y(?w%K4UIJ+YT8mZXOh^`_AYrUrBPl*_1t5(v7G2LVMw#x;_1?3~5_Rfdnq zOSc7V*}MYgba&*WO{e?QH_e&e$MHJ7?qCtr%j=#linrcVg({%wb~mMHc-?729v83s z@WOE>bU3T^i0rmfF>{Yv*R z=byfEhO}rHU*=bqw+LsY%aTY#V&7LTF$0VTS~WRd(pj~rJB3E@)Xt-E z-BAsrWCEmRY@bG3ku)kVicVo3fhT>V{WKcsM#HqJh5&7jKcoHb(sJTf#6}%k#b6~a z?>H~H9mo{?7huwAE<0sZ2-mR0+LpUysJGj7O-ZOkrSJKmG)GC>pvCyj`E7*>qc zB5T~*JJHTVq``1%RO`Gq9ri0_fC~WRL4Ndc7dQ*qEcoHUl&CWP9Jmeo8TrUVy){Q# zs26U7VME7*_Vb{1gUr$C?FKN1Cg78clX+$|@juSz#Oc^?v=5%~n_njlIirco`9hnR zrYDp>V`K)Mk~KZv#1E|_rYm3!M+d4FJ&j1?9r!RniBzYnL6m+4ilDUqJIap%NpQ2u zeiyl>Hzhrll!Nk&(v8PGNt!1xj>Y%hZqm8fLD4lH*Zv)`G5fsShb6pGCO5d>(FUNz z6=9^{Tzm|u|{q^)i4xED%mPg8Ygu4Uoh zM0fj3p-Snrq}h;8&H8!~(f@T)s1MWU0jNWXcDaIt>_~By*rLqtg4beg6e=jC88<*4 zb3E??7Ys18dP|@Y>!JFS^eQdZpwO5iq)`ss2C{aa!X%XG1Q|u6)}51{oX~EgiI2!Q zHl4YpI><4@?&8xWCx0~Rmin}s3o1>ex4vL=@+s{jtTK*ZHnD zGe;@=r(n7%qiDPks2<{CyUT;S!JZvL2D7hFy8@-$DH!YL{rLRL{}gf2sbHLuSk|Xc z#`<-VNVKn0o_9;*oK^+sFcC~>u&1HOto9U?o+?;R&!F$Af{6w~=m?$IsR9aH36s{M zxQ1G}7g}EPjUrutNcC%0?Rck0?3;;PzB}l*Z^2A9d?$Dj(oV}(;25V0KxR{z2g^aB zRuJIQJPN*6n)Y`KGamTU`a04o6x;f1nx2_+x&oaCt-ub$h_iyjIATsVQI}Vmvgj5; z7HNH_jM7hbKdlwdySB+CMPHXIU`CzdCRy+ibrFoB%UT?zsh&rUVd3| zn=I33Ks;Z^Nw(D8k#97PbZG@7rBZ45w5?XksNw;p2|)MbE)3}>r91;F2PJ(Bz0u`a z3A50EU!SMF)z3)!n}Z$g&N}O0d}qtSD0Gq?=aznc$#^RbeBQO3gQL20`fW{<%zo(F z2Zc65Wu(|$HK8{xng7vJ25tHpou>LJJ-Yahh7~ov$QennokyXbm;2WwE*)0BLa ziMfc`k%ru(AB=h7MxoAfSAGW2e%ialh#jMk#BG#X-{_)7i&6@Hvu>YU>ii^!VF1g*Nj0IIz=n zVa;YL_&NIwyn0@lR#(c~=IoRUB^?q!>BR6c>tQ0ZzXu&KwwHwCr!&kfG209qIV_KIub$ zCE(}*d3o*4F?;QsUk7dI8ski!g(lJqE$II>%?`t1)BHT2`323`s9eF=gH}FhdS3u0 zg3@9hu5~l@-Hi9-KojN}5MBvCM#I)C)AqQ@KUoJ#19mi8oeUtK>B%c2DmkQMpxE4Q z8g|z@EJ}H3l0THvXUfOW z5~q)rPN5XxE}VDMei{u!+aQfBqwrF+p|Jr%r~3JKG`ih~8iv%4v}mt>R4)N_Enz=5 z$CZ3+rB$SH(Rgs9{ID*|C~qOea^6J>Orf>u(AUs@HPm!cWGB;(V|08wXwK4Yrkp4i zGapX(#ktFn*pm@>7o$+SJM6j_T6m^S9W>I(u!U%s8^@~WY4DjAVTD@vuWyZmtWSwa z>tXu*Z8nX5Oqgjhw`{Tw7U1%Y&_T-XUha^dSs3U|cg4$WW_>v`p-G)`x!JOnXDY$e z&_>M9YR>LN_mwhQ`#vS%cGt$l3`iEO_$Qr|_Dn4L@%gS`V=#z)nxSp5@n~2 z*=xD~G08ZY21|Fs4H;fx=3M`>rp-WDS~O^!+v8xQC$UVf!As>J$*k%cBFdZ#Etk~tCxM#)9XaoJEfr>00$jn^5C`yRd>Ml>~^qN|KZxBjL zPLCr{$DoQv4-&(9SK2AJYm3?QPfE0Qe|-Ru*S&(Gs2sGXu0*@% zD(3Vv zJQq{?IGUQ%A}>X&LOb0b8Of_X;A?xO1v9Y0@U)jntsmKP&}dUIjoUMnkl`3k-sEKu!nib4^mA2$|U)Uc07^*`W*uP zns$%ik6A+hJmFrJ5YPGa%O8`Y^J>j;O|&PwO6YR|a`>vOJnu_SQ{ijczQBd~EyJYz znikipiC3DRxkxV*e*WN?GjOLjS&* zz&ysK7t=go%yCKclzgFidX|qrw`cy22KMzxgMOja?CpFWglRAjA>TDSZcyr-16_k5 zU{(Vn7$j*St&LEW_fjFL%`?iC1w7`RAc^}OrGnG#Uk58}_Am@#v*X^$f9 zPyLE^K6GyEiP%IPoYaE=G>)d2{#u2tyIjXn;iblD#2#eqj1X`oHqHHm{%M-CQu=c7 z$2Xes(p1W?m!-C506Vm=zFSL;1a=c`SA(^2{%+u05(Dg$?K*LHR7?;0)BQL50dLWF zb1`-L`2pX6^HiTB*$Qj~8t!_Ps041R(M4w=knpABrdngJXf6lfXucq%4_T_f+4NU} zRs$cehDXE8N~cX-4)hnM3E_U(8(fJ&Hd(KTM7q%9`|=~6p8{ZcX*`Jf6|L|C@WeVA zu6)N{0&LUwby?QSKM?n+7y6T~*O%1>fJTc@@^*FM4W4057-SOdJhNQK%SmCNo~;cj zNeXT*ms_Icd*chG)Omu}+ht}zM}lE+Yy`Y#m6mpP85`4D#;uph5dwlM&xyPV9{BkN z9?SZ0a4{zjC#Koe^&-QpY4ognUR{Q0#~!}*$M5Ikzs2_W8x|a-?P!?PMeJO#5*&5o ztrNon+N1>{e&8K(^4CX|Rlu1amTN!0b&{nE8u{}ops2o8zKWXSM#aW@k%~wu*4f$@ zEY~BLt~ikTgW5UEMl{ZE4zsi`pRiv2@vTrWWH=uiVI7=yN<%V~{qn{F)&wgznkBC6 z`D-Isxl2KNcmOX^fN-$OU3;8NYQ8e+^>MrS<6HZfv%6kKj|C>VY!6}h5tdwYp*i`;Mc)l*uYSvB-A3<=j50b`GEx&)gzR%ln z2ZdL8Zb5B?9`N%MkPl>7N56>`n>#O?U3#m@3fjq#i#{jseiq5c0cdD@NA{$vkLyS~ z$_#CdHP-;|Z`W=9vZg>H^teOw0ocFRchfI)J<*5|%#!bmPGK`ht8VWGB}#(8$d7|f zn>aC}%zVba@4jmjX%z_gZKvqNGI;*oM=Kh3Xf{$|JV}v6fr?$d6lS14$zkp5Rjq;L z5Nn~s_G_?CY2Y_xX)=Ac@0ZE{uh9~a#TsGIC5J9-+{KG(t{KWf-{&}I+_|C9!W604Nc$M41WshP;7C3vHz~-1Z?cU`3@9D!R&1oEwLxX!95@g7UcbA%tWPUmpM2$(`(@Cnyj_5E(`}#L? zB4I)kqo3Q28{w^|D;T$EC;ECAQ5)rHMUjbF8(mLV<0x1$2-z}aGo*s~A-n}ImtWiN zm%PyqSd6Ld@@67zltuI5@!9~^8hy2q*3a3Es$iG|pmgt)ZM2J7_1XN`lO&MomR$~| zdZR1`P%zLO;6}@4E(N@^UOMY>UGD(Xfl`w54w8S0#Mp3jJm0Wh-)C^8Jk6|Nry5Sd}ngkyeiQlyC@_m9m(hfu{5=QK-B- zIUB%HWHX&!PAx^SYLU||+6kfra)9j(jW###xE?^$i#ezHq$kB&NeeiQ0c6Hb*5l^t zq~2XR3`*MS>)ZzL{Pj9qZ`FA8o&0Qm-dwVf(rsSnU>v4xFSE$O78~U$6yk(7Oa?S{ zoxh~pR@^utZl^T>UX@mLb^En8oum4~nKS4Lnq*?dH zW;b|1Y=(f6r6Hxg0R6F2ei{YDZ4f`#Sj}-=7KAoTdcA&GyFd0Pk*QCg4ekI;2Y*J{ zeZG<8EWiMKJk%D(p*yNnf&Gqk*%!<5Ge$X{6o5eRwHb;6Q>a}@uXmnB9u3pO99FBq zD!cPHn(IdDNjjav(VSMgKOT3`QK8U9E)K^Q5v_4OVAEo7t#3AtOkQ$^+-MXXFa*+= z!BjxL%ywO3EW0wUXK{`7R$E8+?s9=#^&&mBRWo{GB}X7O5~eVE*1s`sqZBj)8s7+u zh0gV;1qVBZc?7MV6EWh+pf5$S>!!gBJ6x0`>s-@i3ba)EXw?V0ZpB%BTz>}B>|Clb z9@q6GK!}<*;Nk6#P7z2gVj)w}`2lOI>vW3gIoWmZ@oSDJ(Xd4-khsr$N5D^D0vir1 zMt8C6B+J+IdVTRStP~6Vl1^G++1I~Unejp)O_V&%hqZm^@N?N>mB|!0pXVs=a5cD$ zA0z$1o9QC6DpuPIG;jA6-F{P$qSXxte9wnUC5q!aNEqMlSl5u;HtP4rbx(&z8rpqd zqRh7Hqmj*Yq)&09&UfjRO z_1pE9NS#U`QMc_tOsLAKxi+pZ?qBlfrOgEbKD+yunhJ@0QkOROF93?JSaQJ}kG9OuXJRQw<_ir-$ zUMtABH}@~kij4~Cwz+?KchCz#Az8OBGstx6q&_x#GMr@z`oNFr5uasP6;OZBmRe7k>-t55guMtHh^ z`Irc7KyIr$7nY0~Q*mu~|E`;S{XH3MIa(&j;{H7($RFYUP4qagvC&Tr^FSv{D+XDe zKXjQkS4Pk*dJ-`b;R6^eKjzK8}En8Pe2-KkgZx4qjZ|#toFw3!bb~%Gxoa%m=>QI>m zal+~?ltn7qc%xKxaa{U}vMCSS(>|wU>p}zvL6I3&Mx7`WO*b`mFO>ah7DabtZ+%7C zzj}Sz=M+yJT5>J?h9W-1W0nBBmTKLLtyQgJ3Yyyep1I$BOE;PY(he{$>c_AB`l78+ z!W}XhCW`|zb*jAZj;77snN2;4g#OIt4t{jo(Y#lL+<$ zpMqMyWSw+ZPcEyFi{>ossiGN;RZ0mXJ#?K;r%5rwK}mpK*JKz)-$$N*sfMvguXIve zZI_c#0lW!M!aXicqK*iQ)V+07m0Q;~u2K>bigbxVZ(zfwJ0+x%?(XhxK|)HpySux) zyQLeX8-5!+o^zh>_&u-BZ#;i|?--7ud*hhQb**_{*IIMUbx&*Jtb%e4PLf9wss3gg zk{)UYR*KJgLs*Tp$)H4hrx(5;a?=MPo(2$}hlf1Y`J%kxqgeYvFJFh6-7RK+EiB1y z%kg=Og;W=3@b}bJd>URSkJGElz*m9RVIy=1*`hY+*U;olxm>=|ZT zKbiZeuzZWg5qVt=aCPD|AIPkE5aJ$I?%$4JC`h;y9_Y`vhFDqzq-}hgV&*U&GW6a9 z`9s~=@?~5?CzfOyW{dOO8s%foHAF!^G@-Cj@#kkOLA~!fKv6+p3-gw(#C-~U%T4Jv ze9JmJS9>*RjkAdI;`H=ow|Gz|`AFef7Y@|TNX_bZlGqEmSM7MvrN(AS0mF#b@ah$+ zfEMBHC*L7`)|<3qF>;L{aEoJ1^iE!DK0;IEd07a~0xf)F#N9D7rHgiA3SbvwEMhPu z9_HGd*+;o+&zgLN(n`sHRCUDl7{E_dh0TTu3){&4j#o52*m!{v zVX_Fr6;y7)G8j5m*NY(bdYYP!1}3eCr+lt(;AX#oGL4h{bZlBM;v@7ViAmp?7wTp- zOThA?Lk=@kQ+V+US@jDCAyx&GYYX#XIO%HwHqVk;gB0>`)=lm!fsEs?S|2R|Fuu-V z8K7>ZEuY@a&ZPQVtaEEPO`mY9)l?(!Tx16n$11TXAHC>i@yQq4y=3H1LfR~mfZR+h znJWsk?h?2%blwrr98*t-ZDS+T>cU9D^ff8!ExAJ+L$iPvP+`iSIcD66T{%m}SIW?F z3|c04saFGUlfDVskq*}qVE3C(!`Y8h%3BQkF$!t&dj%8**k(Pt)byLb-k<;$le|d2W~Q~Z_LHYE%Dg(52ap}#l1zNv$<%lvPnk>U=C&FprlqHKd26}V6m0}x zsu^48yRFv_b6ZvJY;1G_73mQmhZHkZm&BxJAAKP*&6NnJBw#Mx$FEj_AfPT)?8m)M zJ*aY>)V43z*4rF@H6Vg@nz9(u_o97y$uoIcqqCSsD8eMmV`MTJkOt1nu z7|hX;G2^mDRGIF~1mYf(%z8bH&fxYr)CT?irQR2GrLrtt$r#v=5zmVn6Gg);I>Vj_ zJ|6${ZsS?s^6OiCY`b7f&B_rouFw~X%(@vR&D8?l-lTyImALOYZDgykkfr+AU!Pyr zc8qZd03C@b)X|2;)jJh#hk!Ey26riKZmz9E8wV)tJBH(?m7&92^Zl){M)<^~ib6MT zHsOZ{o;~Z`Hl(J$6r~@C=Ck{HWxlD0IS}J8GQC>=?BtJ=gZCY&U76n>G1@9XMs>9R z>05(O>PWWVRi9FXd1}sRZoW5Cv1~TpR$W^6`u_agmvz25>h7nyGS~-8qchKO4O#_s zLR-$F#3k1BGm>6$;*Fj6h%mFN%}95@7(X>d0zX(bNJ#hwQC?SfxZ!uozr zDr1+o{`(T7zzbjQxh0;tDk{M@ft7oOr*99*v!ElD=9@KmkGEJ>wU{MI%*)e%_>mo{wLh#CZ@mq0hghc zmgX;iB4w^;^vfTJshMjTTY@|6{?RA#p|u>?r$2khbDJ20`_Ec}zxuO@;m-j*^mcn_ zGEC0^Zl(9L3*65}h5u(eIU5@rfSReAhOX9qD>+I-Jq>VYIV}rHVF7Ml5lLPOAb{#$ z`pf-ckM^%Fhd+DX|J(j|^k6##AbJ`AJ&*thZnH~8%Rm5PpuYe3J5BItezc7IM{_$` zAQ+~sC*tH>si z@0-ew!afjj+*Q|e8v?7LXLazyMC{L|1KN>-Ao6a$Lx}i#ElNipCY7GEInLe)SI_U_ zv8DA7HGMCf^Z{`JbYAedxs>usGGhcUUq3KNomS`Y~{Ml!<;OP z>y6jn7#6i#X)U}yBDV8Z*HU@M6uR$i#lbizO6rzk8mWu<νiU|qfAH|)o^dcIE3 zfZ7+`|5UEGj#V11n>+LU)8wTZU?i%4`y|f=CW2&Wy?*X4213iw{_8xX9iES*w2UP~ zknh|TWIc_>{nEJ)QVIP4ULpc=fN-pks-nv6*UxwxUTC_eCM~eW_@g;oUB`T`dle_P z5-^j9Jd5UqO%A%X%hS~g=sEjFAA>^wD#%=9f>jTGYm}#+1MhQzPQd_^svzz8KNSfp;wtp97*@sg* zB$J~$zyL0Pu#?inGNBHS=w=t*_7$TuIQD~4&cjFdkM)-#g4-y|H`JI%Z*c4@GUj1g zCyjn-bz8?ZUog;JBNlkLOP6j$h`w&DG(yM}+tkLzcZAOlPq|7c14G_xF~ehtQC30? zjSGJg_yIT&J~;7huL)KQG44&qM+cc>RHpYGv~d$vTc<6GkAe28?P^;0}SIRj0p>2Fz%n4qmxqJ|pCnqDwa zLt}Rv&;&H4S&RcUD2CZu^#qqi@^s=T_Es(oV!qA3Ir%kHsDVO4BhVSB`fSI6Vw0zoLQqA-&$w*>;S z^`~__;`FBgqOe>z>F#Vb)78Z*79lm2%0OY9Dv84^!;*aIxo_5Era1dikTwo^i(mO4 z(K}DQ!z~J4xy=q_ZDWht9w_A;FBy)nGL8eXR-!aci$PC`o!N;FC`?1L+9&pDIwkc9 zJ3(zc?!u-CN2r3hGA)~1cQc4MAX2{tUL6+HP z9OmTKPGcY$Ll9S_wdPhRseC$030(|Tvbl9-RD=Z1rZxKzPd&*gAs$2Wps2B9@NxK(`N7&?hK9Yo zxstMyIU7aLi69F`$t4JbrWw&0li`;c+bMvjK)t>~G@-^m(5^cVpDz8xH|hF;QtmZE zAZ>nc&MqH}G=F`p!lP(Ms4Y@ zi!EFqBFI8jnr;OZ$q_}+7ZNszX%X3+dZv0MvcfDx_+stc$l#N zz5W+W$rmR<@49`ZU%DHX5J>B_IM{oxtW_a}qpUp?YpBB2^6=*g_~iRc=p*FzMKy$#54HYBx3oZEn7 zOPMv$^DEyTC8qawjC$8Fk=aBx{!$F0-0o#?B88ioSJ+JS=@&=ROZgACp=O^PKzknu z*|WE#D}`4AzK%6~r4v1R%8N5H!4}&)K)X;(9i12BZ6zF=0pH(T4Pcgxebu4XwO;~y zxjUx!CK}nJc>PshlUZPYaJvRh0%fcWOF{X;wCP~LyO0#P7hN8a#ZcT(io$#9z^Z5X zvKxc#Nd^AQIDy^mSoVN)CMNu7N^(}b6u z#R~5CB5oSo0Si@<;-=?Ns-RMxzmw#msFT4|R-k-l+xpC4xugVwH7xU-Azjo$ax^-~n2IQg|quiW8TT9AI4p(glKVSG`Tco zfa0NH&z^J>6j`sy2h);q6`pc+zUQ^-ujnV-`^|%|6l=^Pg;1s0F`B5%?2~kdLHqt( z=prKKq@EOnEcX(r2NH(oX(jY(Zl}(S*o2v}%Pda)@_b{A)d}elQg%aQ3HHF+unh}$ z=!ir_7wh{{-=ZpRud@Yfu`Xv?YLsx+Qmo;ncWCMQqFLH~v*lG^_SQBq$Vkm-{VDlB ziU(sd)PP@Cn+G1ZT10kbe=}t*z;g4+3#|3%bj3_T90mUp>`=m)i ztb|bTaOsX-Yku+D+p0@j2b(%MrwX>2Sf$+7i|vL5 z=h@ne(-I5V?Yd*t*=d@EK|i@p6(NIgthXc2J|{1G+L62_9exo#5vYNy=a^}*m5$D^ z=V>Ybj4i9Q0;fIum7HjmhbbuBGh66Kb3G@U1EXdV+k8KujeI!oIG))i*3Y zdM5+??QSU;$&4|WE!L!ho%jSk@12QB`uv|g5^2AT|6CGE&hOk`OgyvemaW}{#oWf% zXn3HkFp+@Db6L5z8A%jf(cb@7Hu>EM>5jaim8xm+d$E1Bgx9TxU#T_DQwiy%uCH1w z4ld1)Zbr5DKgN{w#W66mqc7B++se<^Us(HyvD9XMkvz1n<&7(qZd0;s)Pp>>JloYJ ztC3dK9-c^l!m6v!m^ibV8Ks~4Mh!0WPBIg^Q@A>FF)DD}M|$~YcPpK^{c}*y(a9>` z^~pd*1^(zWIw{_%BqC-0tQX(#Lt;ikb(Y40*gwsT&0V>S6;Q}N;WLi+T>!}7E34dL zi*ZtVNswa~%7_t}nPHY_CJxB1_p?+OxwjVI5I-9QE(Aj#a2uQif5f&K3e z)D~Mmdz}E$^owExKl-{LC*|?8(sjAwKK67h8@zPp4#`3o$DA(;R6>K(o-Hz>Pd$+) ziDGxp!B8|-e9Kpp&E(sfi|+bv#klT;ZqP6-uymCb!x3W>?%*4bt35LUl!yeYEV65t zvlim}o09WtV$$|tb>#egH6^d6&iGCvp#j|ho6i-Zu(uI!Z(NX7Kuzet3`nLk7u$rV z^A}$l25*?nae{QVOgykiXh#a>nMIyKTnAokMd{ONV2nk_C92Z;qTmF3uvkJgd7G&c zUl+=zh1sb>sdWy^`6_5%&*A-@Lc)4w6g(A7(>`z2SN*qA5oA}5aS{@U9TXaR8 zBcRpriMv!o%S57H8KcX_GIRRj^*Ro6gWWnosg-Ms*cxbe#2j`XzdNdj^i>azp-5rx z#yVy6%i=7i;hZq3E(3fI2NY%m0dDM>oWharzL2+%^@>G=2-5NOCVi?t;_l*m9e)`- zOLkT$k;ai<1t17G7OVw?gbC0|y9)AMetWbOc6#GJ(QSWi08s$ltHoCyo>8~ekc6S7 z@fB7zT@aa|FDs$TB4cC|MxwP!APsGvO1vG`Xn`u~S>7=JAQe`GPhedhmM0RC)I4+PRv|M}%#>)gM#_77=*-&g?3 zZJJ2(Bj4>csB67^EKxUgRY)o|gJbkIRU*0k`I5(r24s&QRc0K&^9AOT4hMsdz-4!2 zskbb3P7eJCF;h_oGiXEzw@l~7%3ntGwvD!yTj21_6 zh@kT%Bc+WB86DSjFY)v4q_Axh7-S%FDbqF`4;}v;LB&9bfL3c#)VJHbv~J4wxmZvF0Ic@wl+tqcnvvY&B^p zd&M^?mSfE+XtL&nj4&dCz8Ni7S5A0DtejVjrz~?BccNTvna_~WtY(%GQ*W0G6yP)R zz!Lf(yoFXYkEjTq|$&U0G6CLbnuP-BKiL@KTe~6U+EO^&LtQ`+G(6@Ut8^ z$Sy-p@?mf8(A2ZzK+ksV75aLc!!9g3xt7(EXNpc^EheRq{;DhHS}1gUH5D{ zPz=Rb4S|*aIYPH}MBVeWO!G$y8MaRS8k3*feBM&N!{Zfw8v@f>G&nF4%W3Fi60>#^ zLf`8!xw30Gt&maHiDXi%+J`P0FHP@2ZA%t<&2#!P2pem#*K>CAbwQhx@R8FC6_6+} zAWO5k_~3ML(aTQ98SfqI@$(P9&aYC~1P=^^n)JTml^w$b%g)NB*6?LkJ}=!U*B^R} zh+7Q*Wp>jM3)Z__2yfzLNZ8Qp6duN$f!RQu{x=*%u@c<;ry=LVi;wf`+=C$_t^+LG z{iL3!8RSE#VP}qtK{khpei0=_bSTtT64OvlSnLRu*No1=O8U-~_tw#qjG(c+AiVaJ z)xS9};|+`k_o69;pEM7wcXLTBIjQ-0C8H?L$x^kjSE@V-6+hl%vwa_!m1FbA{kmn< z8+f?XShv+rv#uPDQxK#Pytb5{wZmMn8+yXk=y1I~{gB77??yywj$&SkL02 zY)>O4L2~0}_YwmzB1%J|-mH)dZ*lKVvr?fciuG}>C|^28ZoC!mY7epDw2#q&U0^}^ zqR~i!oWm-n{$?kEl~3(C$RFM$(bVsdMD6$ zbZV6+9Cdj1l*=f+dseS5)M;g4678C@o>z=k(~m+>#7?!( z7fiSGj4|pSXXCD*%8&VcK1h~Nj)xLkP|I(bqOMR%tKkwOijzQ)W}nZt>>>xf%0!l1 z&&}rBDaEo1qb^VJ{1*3$83i{fh8a@Xxv8EIkL*+To6Ub`lFZtdC2wYXn1c|GT=Dv}HLL5W3r+%@0=&c?Ouj62l z55&jy&HQ1xQ6sU{j1sj#in;0xg6WyiPq_}D?uZ7Xlqc=xo4)$QhTM9MoX;6j-IsHnfThml7kRTOG+Rc_C*nUbFN<^`g5w)@1 zPAx#{Q03r+lp{Ax>Q(b5NRD3%Rnug{sH1ZlZ#}PslH|HT=+Z^3-jKWATM#DlN~Koi zeeKN1CdV253Ikh07CROOHSNg$JhL$t^f~Uq98I_pAF9;R5;Rnb8B`M*2F8ra}zOG#)kB5H;8T0Fr>L6QTI$f`e#=9KDBfSB!WjyMOIz?{G?M)m!dKf zuaoz6S*o_0*!!YHG>>$Bx@j5SW&Jt;`~h}vBHU|@PT}qMiW!#puXSq*n9!_;(3-XD zJ~rdc#$IPhrMGx~IFXaw$4C`hFb<0}hy6ge`UWH``ix8B>p)M}9N$KQi)!YKF|$*W zq4Z9|z<`b?s(S~jmdT<3lL*BgKw5HL2TJ@9P0yiU9=|yKQ5Yop@P%U47QLR6?6PD+ z4R3S-#v~VmbX2i?c#j72i?>i~X;mzxq{#$c(_OFnv)&^0fFjc9gv;qJUgKrYPIMic{ z0Jyg2^xq$~PvileP@$(a2$ygLBEc7fSCzdRi@cnxA2VtLO zSXz|sM=R|-A3yhG7lug1*BHN?W+oLEGo@TV6sB!dnRvD%#!cM_JS(iTyd~i7I&n-u zczwr4rFw#d%ZYu#gqeE9f-ZH{2BJ|-kddZ)WU={pZMKK?tCw3pyWs6Q{V$eF^naci z{I6Ip-LHfGHDC7gEC)zM_vdm6^fP1lw|p7s{&eKGPC^VsHRzFV;#Jg?q^I*rsZY;w zrQfp@J7p?_(~aeOo+F0{B|U?U@hpG3+Hf{GK`+4l#53(_5*hR9Ikm-Fum8pD7vXOO znAqr_P5{^mN0FL=k;P^kx_ZdXfSxp~F$MuL(ykOMY}HQLcOaq`fZQH7-6shl+-wJ3 z(pQ%KQyuNdI!-eTY)zU8u;k}8uRc}?eRgTPaKD+K+^4mk7wXVN-HWBj!K_4P^Ze#( z1(-UZ=jwC~`o0S*oHWO8Fo^e!nFs%f3zictLB8`XW}Jd_7kR-;y0bHL{G;rEap~>s zI%`qcbsxT(63vRtU|X{qqtd9;*(zP?^$9tXhG8msDt+SQw#2mK3!BnTPCWKlgPI*8 z!C`}8blF2>YI>$zaR#pFZUc zkF|~KL($-&Zd-x!@pp%;4FsQ{#?t=wt}nXj24n_RL( zJqRXR+ut%P5aUIzyM4T9#hIYX##0dzR-8Sn2`9oe)OG0wl-G3;2e+V-pXm|QaXRkF-! z%(VQ(C{OX!%Z$-CbMS}$v~hkT`Y|6CUG*{xXirh=+dr#6Eo3b0!l81yrO2lAf{1}g z?m{Sz;?0&PIuMub9Cb)yv|h4Vtl4z0k&F~-?x zA*g1BHCus(k(tv*9%eNvSu@g>y&CQwIF$&J*2=qclJ zb}WtgZKjq)pfAw)%o!&9Q%yXfb2*#jE(O;qpHGt)htnJ88j6%O>sd~4EGRD?6IoB* z7R~9g2h7FzK06x@CT)%(qH=(cS2jl>>=-3%smQr}CwE7nb9^<}O`KNrr2~)4|Jysl zR$($-vQ9C3^AurWT+wfg!Cd)4UbtE!EI`zbO^+eu)JVDvdKsa>a; zXe51cRIi9*hAuULSn3IE4i+(0(xt{P7T1oG-#0lkZU#{1FV*Y$4HXUmZ^jE_OBb0? z1goOgklApIJF2(Wi=v7*?If!?akLaJ%Ma&dO)h=fWeGBR* zZ@a@v$2c0v=^4!9B)2uL^VrL`Znm>@a?OWhB6-az$~e$^=!BS08bhIZ zR3Kr^&Th5=pOr%k2h>2h%$@eDmTf)Qf-EX;Y#8VZ63%N25|7aDn((XcAr2?4E#wu2T=KJ0 zD93q#ve}R)&s1(q$g~AD53?p_QV!8E9m3GOIh3iVy`cFBrx|`62ed`TyNip8ev5Sm z4;~l$+7p%Gfb=?`~lwi%+yT;m=5q6ldz{g8G8Avc-up>fAmPpJYer zf9rt@6jE5H)euXve;Jg#-m8aM3woa1OZKCDL8EYAgs?w&MSEFwnMaQfue!nO@Tc(4dyLX(@RLd8C`Fon=Lyeqqfa)I9W z`ls@M{yH@O-=I7XlJYOg^B^b>Cp9#53_myOpuf@&1eDSe0tCdmmX@X#Oq7&5dX~CY z>HrNBBT5rfa9M$+nuY*uBoda@h7~c6ma((8Hvbs=dv&j);mu(SZ>k{8De8pd_6Fy-)Kn_RRM9n~dTy?I3+47S_r2RmP9=vTObZWf7;!)3kv) z_CCe=J15H>o15!%7kY)*2B`~k#`H$oDR6ESXiFg{h`ik81O}N?>ZxyeF8a_I1@E%- zn3o`%t`m;ms${pa+0aoC_X7I%5T1C@5`#Qks5twQN#Q*~?v}v;K5U;CR+S(r`jSoT z7Dgp)KvolnX5KkAtd1a54@plHpwV}hNb|@keJvRJq{IHx8gr6_Jn8R4F<&a6R$N9F zq~+)_bJS^5XZ#%zh)Z*InzJ)un}M=R>C}Vut%#6zrT(l>u`$)2ZyZ_l9=z~7n7pk} zw=ag=ZN>10HaZeJ?HOz*l+gzIuJ&4w(hbT5wtXY&8E{g@FnO(Us2u)EjtFmHeVmmG zQ=`hC%1nFPEzUwAlCH-)*{7k7pRHnR>nf_K3yfIr5Ki8`72c!_I=*7iE}CVcYF>tfn94t$STdQ_{b`z?@%2e z5OsQHK??(`i~iG8Z%h59trsjL=OMhS+=IwdHpDHy23KZVckWIaK^X&t%v!zXUXcV% z^_&up)9)~!34EZ(+}|6{XVHPzb_0rguOXChY|im8jd}+ng&ozp9uw%aEx~+OYVT#X zhDU}V{mgX^ViHk0r<)jAY9*QQZsSF8`8xRR8IK_cS|}Ae=y)23e49^ORtuguDVmj3 z=T$CA7|UDqOEp|6FXTOPpy%6cuJ@V~mD3@Rr?Y7qT?mNsI51>#i|=3C7GrCqd@M`1 z0KmsE+VnMtw_|)UYPb%Obq=sL>xW5uEvGMF!o$SSue&OcZj zSjDD%hMUBHtX5mDG+ahis~bq5zIr z5DOOTWD}IB2iM1ZL-}r0jGmBCWdQHWH6Nq=8IpeWaf)2VtkwRTj6-z%AnOt#TLN+U38NShsRnzpd8=rlc_nB zYuto|2Dj^Vp+nBIZlwrTdE?xdjyHTHHkg1%2!`+Xq@=SpHbyaqD5CTznlFFt3b?2d=t03;C3g)JN>?@$aCq@*Udoa(FT4b3TZf4j(=ovy4- zR$rJXz2jh1N`BH|QWaF=@)*F#Dg6tj{FxC}fP2t{ztum0?#sJ=lzja^ETVr@?1Apb z@vrp{55n@Vs`@k2O-(~X^;1>p{#q;iO;y37YNKbM2l!J?ABy()wam@+EM-j04YUnS zY1e?vdLRHTBZvS*1q9GC00}^hAOIuy^Y19@!w7z%taN`( z;GfD0{#C!XQ^=>T+?_3wpI7-`i*Gv*s3a^@` zbidz9Fue=5wUmkeu|a)_-8Xdt^Gee-Ni`OZ!TkbZ4Q}m~zlCf|)gTu!JU6{%aQk5H ziw|977gP1*D!WckxNR9Kq{?-%c@ugA6_&&Qg{;t(tGZN8sK6`?_F|TkX8uQgx=Rky5q2k(@r6kb7vLXUlqYy=_E(73MJA7=4j58fF z^n43N)3&J$_=u#J7zG}dl60zoq{cv>j%vU|pPmtoh5j*aq~J;4I&--kTgk$5TXbH; zPRCJhz1Fr5$raOF?s!jI*c^ap;Am5wRe9hm{6|WnFsfbd&XgQ!yp9Jyfw|!QZG*8Z{6t2~Rmb(THZfLt0N7Ium1n|fidZj+pF!AG` zrrM#0TBDx5VoW$BW>f1YiLd@dv-)Tcdu;T`x!?_EvVwCn-x8~lIs2+h&(k!pPyxu^ zur@+UONHeRcM_wxeH|v()^;=0db#eSn%)zRwW@>~#4J1FCXjERzT^`ROC-Vl_QWUBxXxA zrp)%d?i1mfQYe+ex>`bnrL^Ar(j}dr!@E~j@&wv~pa%`r1phRNtYO+gi%g&BZSOiH zHB{aHNC}dUp#C1U`Mlu)^1^Uk*=(Dqi^8S-aFP99fakC^K1-k%l5s098A(vRDWX*z zgl~p$)5uJejq<+BV+mH2;(tm3-OmzV1-J(m`@0nUOoaV!DBgY;$G=L!0~7v>6g=?Y zy|{r%_D?1FsfB;b?9tutPJSb^Hz(g0_y&YfKOi{@kBqaf!u6h&mFE5INBzc8ldyzZLh)pygrs0?!`aeQx5gq>PrE2rW|-$2REc8xM|xwm*D zday_d-}=~vm4jUg@0wc?|LX!Co(g`wbFgvn=IVq>RW#?Lb7c39)syAJTkJ=eOZe># z@U8p@mly@_lnV~_N#;A&6CVw>vzOIHEDWt?kK3Hf7Ovh(>@8cI;1n3+R`j3jiuP=u zAq^BDRHULfn$03ok0>)lAX_+l&M#-HaZ%|#g)PcVJ6>IY4K}#A$d|G886!tZ#9c94CV$Q*|C0RFUAp{ugS*I~D&IZ7BBy@)L`-yY)bm4+ zO`M9;P+LaQhW{tF=Sfy{tO$&~v`L1?NjZ*c*VgYo7HH&G$HeYvr|_68vmuP4;2|Hgj*C zu=@+9n789?xv`0u0`>rEf3eNfw?_qQ8u?mGN1-dNF;DY8XZ3}XvD==`(wqSfQQQ@+ zmv(Bc7qQHBRzCFvlpM`k93%65<+BHI1wAQ&1*$*k*L*8>WRl{_!eNw7U8JuykReT{ z$!3jC%Ib0GoD6AtDxRn#xq=E8lZ=*U^5wPuwz;gyHkLGuv*eAS$@PF!*y~4i3xc|b zf+tqqaAz$jp*WlF_6ag+zN;ysFCxXV@FIP{?}orqu(-qN&Ben+R}KOZcp4Cr3@P7K zob&eiMJEAdjJL#JJBPxckTdElfyT~_f_?73h9;Oy>@i9!XXwh2xzi63ouKtWjI)-W z!75CIy`L7{ic(q`swqA>QE;@E@tCE824IsJmTjT3qI(K#*hwn9EA}B}3uBG?So;Nr zM&NB|sVOYp)LG;65BZ`K5uBr-kk{|sUgfV^<`GCmZ&ul=kkou3 zbbpJDnY5ptUxX4M1N@RLOtQdQk8FEk>LcaONHg^T=bRcOKH93RoNh#oglktFm+Y#{ zkN9JtSvGzI7ovz0$FY@myarh);-}3To{;VaOe-}pxlu-ZIf=N5_EJbnAC+^hFpmOD z%8R-#O$>4E!iz$~r@`e4ib&)&bBxbpl(qd7Q#IqhSbU52D_aQ0lrrc;98u+0m!&9F z@>a5HqIa|t4zgzEV_n1zaGv>;ki^I z>umbNnLu@w0UO~iQnF$8g+jJA+5vy{z;?~X>n9p3_Pe{fcabHh!X87nWZ!$cuYlf% zg)*rd<_f7A7zFmOw<=Ej#0;+Npg_DMivW+v82$Gcdx^ItWjJB>E)Gb(ubC5=if8r8 zF*M&X4phzW06Su$Y{Z6UaRi87jty0{DA>;^8B@*=9T(2|9zC{i3Mzp%dorftHsud| zyc{!CZ!&_i9q7=1XlFwxMe}UpI^NG`EOU(T;s#T&Xt|P1^2;6Zq+yqZUI`L)SJ^gT zwLW?b&Z}FCl3Awq*qMGzmkJ^d1J};91jcIjdqo>v_0co<-PaTiIx9y4LY?E6L4YP8jr?@j+ZY3|rbH68-(GCYtYj?vNmhO63W3{KOyJ+!(HbI!$XyS|reTC*zX$ zNv(~!59hbp>e(x5=Cs86iB_Cp{WzSK9WvzUCEtwUtRNUdEhy&Q~0ySkh5|| zi*YZ()u~iu^~>Uli@wT~foz8%vZ9ys-M8(#8F)`bkfa*`zVJeWy5NVi?)@&qYP++3 zzG``}F2ZL*5@e0oZ>Sq&P_lx`iO|dkiEBKjc(Oz&ZGlM5?LJjB-nm*^us24QnIZTL zK3>Y|nT92!mgSl-oBRY^VF}x60eOap#op&6ec?+sGAkyfefz0##fivdwn#7Y(J9~c zOi}D{;ED_R@Rf4Vb%NOB*xH%qr8<7Mp)PPhY{HY$@Xc?i{j>Aax zRX<_rUSgr{?7@(LC0!X>koln~URUcY zZa5*^_ci%$GHyO9IDA<~4V4#AS{98)7mva+o5SVWbEneA))7S zXr;LLBM$Y+T(P@JG&?_gI75`b(i?BZDOZ`Z79GpHi(RCNnNn+MW*;*w7tA0gAtR_p za}N$MvBFo>?DM-zg^C#vu&vhMg%+D;A=Hc?lw%@USh{=mTp3y67wgr(a@hY5=O_NG z#-sa@q4--h-ox7RehmtwrK0(B5&G9s`_KD+8kTw{#&DF91eE+HQYLVeJOm1~f9&IB z<>Uz-uKdvhslXTijI9g}mEgd?CbT)8=F{yhh?R1X=kbhPEqI?8^D2YhH6+6C=h^X zXaEdA27;fyfM^&2v|tSXE57_aG4cm0zk@LykOlw*{uAWCpUeCX$Y5B(S6b<)03a}^ zKq>|>*tFn*f-h_SCdl*;H!pty@?Vd{Z-PtWJX3X$o~yr{=S>~4?w12 zqy_>FEI=@Z8aX7{Gb9-wS2>A5ZK*PORvErV;P=iNE2= zeJIo4&+s3DecvHt^IF#M-<@gtIHz=wl>L^3@POc%5u0K@NM{C}Yf z@ZsWrNEfsKAU!<+-5ppQvJ5@G2D;fS1A97ymgN<^*smw z;mZTc;CCwgSA6*ebNvp=An_lSQ~(+(Mglsp z0{*V@F+5ZT{4MJ4K=8{SXuyy9{fW}=Dj(zh2!0XEjQ=5bJ;X9K4fu5HhcEXi(}SyB{wu!x zg1vqRWopLz6~I41{sjU32FT#G_k;8?f{6p1Wd<<-e_Q&%D}rBx{2x-+J;+qx%U4ZDRep~t&AJ#p;po{;I zxc;PzKYaN~7yk`ksHuLzUB5t??m=Mw$Mpp@)i3Dh-=h4m=L3Nkr}uk4AT5}T!6FX= zOW^PB`F@Px7koiY^&e8#17QH^2!49=Kp6MOftf`pKHzf=9&VR z+fmvNu2{U38wG7-q-|azV@#gpL#&i{K4HXQV|;I32*D6h>s&2A>Q8KjaO2kaDWIwM z{f#)G%X6RHymV)$tPJ4Y-MfYW-HZfhy9=wDym&rlfq|I%*;xjdAfwrCC1aDah@MVL ze)vdwId*AQdgdGRY3K5h0miL0AL|=E=Ng1)_UohLi3pdzj?0lAB=&yhJ8M#!?+Fp+ z4V5o%dmOLLFFSfV3MgI9>jNeGnpEzbYog5nZLgbWHn%A=@NZ=W11`qaCVYVV(F^ST z`*aO1^bKr%3F|k!f_hHQT{D#+zKVTwOp6!_r>l#W)1z7ywt@ZSkQ(ys8GKQdyX(V? z?LO-s5NX4ewS!GeRzgQ71>Lpowo`4cnsJk2ONfGDzSha^*(~9LQH6`W(#*_+@961W zY5m9{j-D|V3|VaIXA470k4m|$nut5yWyiC8dC~Xz`-$du#�}F;}NIhdh+*%HwB; zH*+WCCOLBRAkPX~t)SRa8FO-`JnMJz_TLfSGpOF~^&8 zz&0q7K5Q6iC?GS+pxaNVX}qb~l8t3lRZ^;{m0WW!9WAP>TsZ_}&)n+N^*A~V zIaS&{Ufac|j3KEwci3MFUCUut={Y=AkKgb9dPQ|{dK@q%7jrgJt5okYBU-l$vER`G{dt+B;=X_gE={%=tRC-iM zcLw$1N_Tk|?H0M*LNigH9gg1VDFcFS+T%30#}MMW9J+}k9EGl7d=O&taA9BJ($cu} zvc%-E!oEVMrE%)%Ny%rHis>Z{3#Nq&l7-We8c9uOmWpqRq%nu@N!g2Sa;CwATa)fe zO=p!#ZYB0WP%jR?L@BrwLHhpubc>->9c%aXSC6_b2%87S9>V(CM^*y^`es%z~KQs<>?y&1{ zqpO?p!#E#>u?^&=Ep=!GO=oD;yKbV>t%ok?;!{i_Vr0x8iyW4Op5` zvq~%1*DrU)Df`?z!bz!?qr_AGHBh{~yyoV{$Uv^^DxZP?sVA~Ri(LUgR3m(7c68=r zwxE<ZFBC3hqt~sIn_VDCK<*QNVnwdhP2p~i#LBG_U#*S z#SiE|{s^%~S6VD8ti=B$j&#_cAaHE%@T$4iwv}<-|jc{YX69aPv>RwHxRX}GmfytP` ziMATNRu*dM_Yn1ePJFxr9M;cZ%J~^g&mMv)Z&)kCpuA|ifjvl{Xu`3eTF+SWIY6ggj-Pj#?5K~F6nnt2#JjtlW+6AR_+U^b zv4BIxtsc(lG~@}Br>aM`Wu9eY(t{N)s}0Ih>f0EyP(xS7UNJb;1iXFh z=M&I@DMS?t=iGLM@j?UJwzRBp_@ghvp`j6aU+`fdV?BT@ilx@5+o;wEW|E(nh9eTk zNoK1)5?l`>MavXp%8+V$6xL57P4y_vLhvQRJu&_DK;0A5KOd-jVtRO_z|ZFY%^dk) zqNPZd`(sjyA90oTFJWq#@$kH}X8z0b{s-BC9m8-PW=w+vhv&VCdw*pUIX7b^p#BWf zpY-wLQO)sY)X%bKxTPnv&GD3@t({?jn{gUh)y-fEdL(0H9)HomMk0~Z>(WxCtZ1_h z-FK<^x#Mc0EZ-OqUGH3ehQ+mxDDRES)^y{J@6IUN|62I+V2kymJ*fz>2tuT;!7Z84 zQxVJ9r_r(&s&dVy?$H%|eF`aSd1@9-YwmqeG4OOUhWV3F=TY~hVdmqY2G!k1p}Jl*?cN5^HaWahm)?2L>H`_fJDmO~x;#n74-`!#L>+Ne*-J=Ttl>fg5S>|$Y?wRVnIsL~T zU~|!pvdQ8{W6;nqehjebj{(XU8c)s^R;hs};C?0?Rgc^rW@5JJ0XVllh!_~KV`|_s zdKn)K@KlVwh6Y1sy!r zaOmdaC2nsmTP9}-nVeJ9cGhmI8>DP*ERRSI>6eY!Z*ES_XScVVaf>|!SlC1BI~J#R zB5uVH^h{zlO%+(aiXsrjzFW)pdWT8`{h?P0{=<(b6ZL zOBHJx9a-}>S}eA4?O`l*Nc!<}!nB-yB{8t?NI?U}@1hgeruukZjIz{3={^BRC;EL1 z6jiB%W5B!bSqwHf1|Gr*_aU4v8Xw}5na<`uJ`3%{j&`(@Vef5HiD%ha(+ol0+lmQX?(1ZhD+x)DVXr0cuy11RD1 z-23_c{&{>|?m0VWV|I3Sc6VlW_O*A(CVk9XuQB%px7tohd-zyUc834$&o|eEj9yJp zzf#$-X3{3PCm%Zn%w01!C2JDs*x_f{FQ8vWyUZhtCSy{CRfUy|1x>Htv*8f>$ms>4 z&nVvtScP(_jvypFE6~vs@CyIjX)w*!>LmU@@t!mBCBP<&rcWW}TjP^_89VWf@Pazv zzQwSoEN(~%K(aGvJ2YN+CivqU+tgSjuM{CS+3SMVs-a=&G`_fx*cDCo9$O#a8F&=K z^uHIR(_7}G_~N>hEV_US^;k?PpT{nBZZ1nz|AFQd>b!84+zazg_P@jj3g=Yh!J&jv$_8UYRLkM6#l`??F|6GBbP|DjGf zATS17@*Z6IOMf7qIUv)(lsb>Bc$Z@UhLZ#xP6D70Xgjs+G<2Q)b}7`q-&~lDgI&X5YS&$Jo=PQXJ53zF6o|GQ_7%xzM92pq*XM$$Oij>OJR1 z-RMeD$gek}$@v-SZzHtq;RSt8V?&FhCwh%~0z+p8HsiQ&cx-;6{B);4^8?i=i?ck9 z8A1;+%nlf|6;Su#Ps;(e^QH)EuK zxf4Po;!Y#J!-rXjIK=McGpFd_)95pC4>$HDlUBYR%>_<2Pxu zPVLS{aqdjTo~3&+Wu;DG+uI~3P@|Dy5N+*jjPxvi8>#jvfq`pBeJ%q# z+6u^L0FGee+#48Hmi;Qj---<+S^tG%Lruo4kLA?+*{nIrlr$@KdF+V(gjS6yVf?t{ z7X>~EPnEV^B;rZU7#o^D3DZ{G2;%tE=xB8-uLbie?TX`b9oDenZ|%cj6TeP1>54=b zkvz{veB#MtSO9?=RR@+iAJS1`%!rTrB)iqR+-pp$E}uK0ZFCyP1uq7_Y$zSy$A@|D zXK!rz(6Uf|Z`{wz2pwDjJg(5HUf%PVQ6xI04J_>`9l?s>jn?mY2{rf_=@`m$ z-1vG>vm~1&Rf_mlTXjx7yXOw zF-L9(iMx51RnI<64EtD0E9aT3%Ic&W)iz#lY`hRzU}Dwq<-3l{JcaL~lQ!eF0TC~e zW1j~-OieP6yPe;TxVfE&!JKi$(iZo$J*oj#{sUFYq!)F8xuH5Vx6Og0GY>ghJ{`dm`WdecTJL@|fxp82yL% z$Fv*Ujr`w_o5TrIJ*;TH&8vnnruRO!fXGGb7{gK+BgVr`7Adcf(SFloxt=ze$K_6& zNZ(Rfp)g^IW;1!-`9`X)iuHt6n0Uz3`7i8!O5f?8J{No{Y#4xYPhOq zhc#utHT9fMaB}MC^E%DE85Put@b6CzBnqlmh4G6m^x5KD#0EXtJ*Y#{L-CqF>I6xC zn?)NgI*o&Adu8FoQ!SLd6LYkJB>Z=*T15kHU#DJv!riMSb7D!t_q*0=DeUgs50yrj zJd*8SR(zw2)yqkKtyOL)VoNrD{ntyTr^K*rB6&w$ldBowfg}PjiJstjcE|hBkswzq*tC4L@h7CcaOF7YS_75euY{kk*IQ_T`=9 zRh=D6G;9{X6DMzR_pS3Py^LmDlRW8+(ZU)*o^LpQ{51PRvnTu#@Im(VuISWZU#=u*db-=2AWnBt{NOdRCwS-ZP*64~x!yl5%HZI3Ot@ z@drstur45jlk9x|vnKrZ-S~^H9l5~lsM;a7DiC@e{Qt1^la=*=;@;HG+z3W12ku`h zLHDm=*9<}6cvHLkFp#vdGqQn6*qa)H+t@JXy{PlR_6aRuFlJe!dmsosFk|l|g= zvQXdm;l8s}!I%N=NKB#I7JqPuem&F}>lK)Sy(yp*k}NPgE0~0lot>$r@h-g#VGiI{ zJ51{LE%v|AZ5Ne2y6uv2XFCBcd^WZaS$B3Pz`slnncQI&Xq~kOq5qlSfc`=-&Yhvy zk26&45YD1TV06K-8kic{?xzhfaULMX&Ta*&ch^&Ef4zUE7T~}g!i#`y7P9O2!J`PU zTS&Z(&8_tH%wc+l7N(XkJxfCvc!xj_AdCe3weekQ>~(jS8hhOZxa>`mKjQ*H4)syQ z-o_kebWhL1+WbFsa<`p*ojgjsy~($ayV$P4u0f#*%*~-e{x{=#kgCA5Xotvq_)w5()(Fgo#e_*8ddMpIn^@G?~6B40AH68BP z&YK%BAddiRng6|eyQ90;)NXJ0h${r!^$pnH@&U>8cZ%x4B#<#GV8S+fmIfvVdbLN` zot6(17C7zg4EqjYe zc|bzbz{uL}09AiW_+6^*brq8EI}EhHp+6D_T6A|j3&p`{080J#l7+y4n_$esdbUQ; zuSiK|V`^jr0mUp0tPG*~#jIv(DP(DDx(9%qf`6YEfP3X0aSpa5YGi9*V`^<@g{%y! zT~^NmxaNup2?+@cF^GVL%u2$_&cxI}$fKZx~f?$X0I{=4noN0!@@A#JkJ)r0 z@jNZ;6zoB(TzqS@Z@Pzjp!h_a#OY7ul$2xvPCqNNaaaFJ+_-~hAtP@M5;+4P9ia3S3+!%~m=YJOQV6Im;s9`PuH8U#3 z?ZpMStT6pF+uS9TZOWj>&n^(UoNE>ky)bQBjp~9QErGFtH=?v?Er6BZj`~o@7h{g) zl_}FmyLLIuW777K&()Mu_kvfhzAGjAnZnZ{IZz=Gp#AC+Y++I``JpP+#UboAW4B55 zca~@<5u*ZCci^qI53#J>YJ^3Zf6c;{uLhr75)|c%g=q^`St~1Gclf*dzkYZ<8z-dv zJ;6{r>ep7aHaYI-i&HAoIP+D7xOE@Zc|5fR^_!c|Oyb&PEom$*yZBHEwj6Ut`K}_k z?l$1ob&c`YvG-_brI^03HO^SI)7K?2+!0bMq^I~sE4XKI(2PmtFQ2L-ymYL>xk3lV z*C;q6xQueu?WrVI2byTJkoX98>;u>h-qY5X{820J6Yxp7e={7C^LDKNaQgajO}g?J zlzWP|+;p{A`v^hQZX(Z1*`TH>(M}Y)G4W(o{_b;&9gOTWJ6SpQfi>dK(|7T>yL~#5#Ekn*ZS9jU!{9CEyvzj* zUj_0Ot`%FVmfL}&3T7Q)v2p%|SZNG#`UVz(jMR+Em`~1CUyHGdSoYE!Uw!VnI=j_) z*Er`+Q=q}hIms0zlA!P@%tzJ19~C?@P|S6ou%gf9qU(`t z?AoaPlox@rh_>b>TN^6x*G^x>S1LJE5HppSE*$sz6|h^d;^7l+qPR+4Jzntng0jcs z!Sk{sR>e%22|~Q(6*<^@SIp8eHHsMJR6wS)2E#7%X;xd_FH(kBG zv(S>M?x(1QGEH@^`-*ft8{@2vW4y(Htu>z?bmJklvO{oI)00q2ZtRnThd)Kn)M z+dSvWQb@youWPKa;(}>U;tR=ye2Yt!tG<*>HCDn~$4z7!kLRhxbqe}1q_d-urYpoM z5S!#)V)i$0Rph(Q*S{=6Q;bMq>$`HjC4U%w30F=wDf^gXkKeUg>SK)@uvLX=q7-hF zM|Q<3Vj>eAet4d#DnWIX@hb+Zgv+j{w+OaTOLX$`iY(NRnULSTsD|-w&hShF`wSlD zhebud1{!ZJD+5GbUydT)i)x_ch%37!}cIW9v)CdQ|D)l#?J?$Pjb1(9#R=puKqmtafgWh(7+%$5pjaQXLx+$6Ek@CuHoW9OJ+Qrs$16)_gBVB*^XiUmoEzUO+ z)=ph)e%p{%{Y{WW#g6v@mdw+eS$A%tzkdCAs9BEx@i>3Q5{mBgS9Xs+=+n~R8RJm= zWOXJS7(mH25*vzJkG8<4aE$$0pjOa~n-PtFroU&ZUyb0wYIhlqg!R3pI83gVt4;`t z$rXa?LigunT{mJirZ0#mH`kFq>*43;^y(RXMe(-ln^i<KfDfQvVeK zb_ZgINeVuNuxS%?Eos=Y#6va8EF3f~tO;w|&QUftGGo2w(tzp-Y&Emgi!FmJxvvxH z3QePmyF+@_i!JQs`^>D>7a~*N^j%St5bX`KWNHn))o>Cu-}(8&LA1?Dlrc4okAb6I zjH>FNIAT$Qc<#gRM4ft$@!qsWRRt^B%yKN>>uyAF39L52ztru5qmWCK^0U{@dSzM9 z2X0$3Pcxj=dY>d77i{R&`)$!nTpQRO!< z{ejQb!-$e*Je_Ga`h)NJ8!dRp2xUE9(hur4y2KyKyuurN7ipvD*&ns1Ts+B)psBbuA&el9nFD83-zN>JlS z&x6M;{9(+ovZDqa-p3`njsIYKvhEm9_v1S-qw6g1)-~*H>#w0(+PY~)1RLd<*F)~{gCafAwCC*x=~5V6XWEjl&0C|AWmR;so_rUgSvJbT%4y(~o$Y#=DU8V@Esd6( z_}4oc@mt;{3ki=_vMhPJjYh70Wk|P~=?oa)O?DQE&tNhW=ay!fjI+&g$`uYVi+TC- zkxT<3;)?sK%*kR(Poqxr#)^Dlg*)@qy3Q9#$?dFDJ=_Dh{PL51J+Uo%r+Fg7zoyM6 z8eht`nkmn;K#*Cjn?~{EmwY!`3UIjme$lwOqd>KL>mk{Byrs+J7~K}Jq2+Jy4C3Gr z%vLzRtGzJoaH+JesPjhdBn@2glkTVI7SYkN1{iN}xkt{WTDMSan8e@quV*qJ*I;Yt z@c;f=thjx?+le@rx5EV8*cH(#O4K^_Q$oBqcNI(cYip5qqUOtiHU~Qk;}@dQ#K&SZ zDO*CnX1oi2fAj0xrD0Lk1^e#NlZ>%7-LI5j-q<9Q5?6>uZ0p~?o_mY=PB!{g6@My2 z6#asf-V>#c@vck0l_*{eS>jZ+AKlblh3e}bwckXPi)Yyxb%YX)qq(fx7uQ-ex0tzN zGEIw9-??AW^9V8eUO>KtEA@q-?r_an{Q4-;p=+M^y|q7)T^8+%t_ymUD<@hS7mzLE z9FniKg2UDMl>6j1L9*?t@ERrVEtIuH|d5b)|p7mSb^&-+l3OBd-H`|inwfk`6%gSun0YT zj<$K$l|3|B<>f5_{di2j*JoI%WlATLER|w4*up-~y^?0EqWi8Eu`R=Nik@Py+QlP( zC4h|E=KcgqvB!I97B2BDCDT6X^gy{9l*F@Xk0wWE0u;DIIlhUrQ8i%FV8-M!j{6zZY}1SW;)q`B)kUV>>Ri9v#w&H45FA^Q?i)6c(}@jqA$*N}`HYF|J{0LCXL58*%MiQGC$kY_8#H({qqkV05W3>z~kUu-TkwJ9OusqF^C*QrDyOeC`7rA7YmI}9=eRESv+N;O+C5%Jo$C z3dW|zH0Q0Ry$-bG(WvQ6Hz{h+EUWp#u8Igz*u|y z^ZT=yhFrKbo;p*(KPrTljGoS&?yTe`UJ@wmzPJ%vq3EZmy{ zl(CcE_^$d3?Hp4B+V}7J)O%7iQd>USIi_4>Q%`Bshzx(J-&powz({wnR^xg8BKM2x zgqr-jckaI|GH23v_U^@IzH(=I;`QUJxuenkL@(;+Lo)jPUpxxop=#pect4z=eZKWp z^J=d{;>Q;wThiu~#J-M-KSfSypX|yKsgA+VF4KC`P@DJ?6E_a!(VTuGzG8ErLv~rE zmV?eXFRT5ShmFkQ{96wVaMrql~`74^l}?FKW`7=TKG5jsW=6R&gB0YpoWD2=@VnC+?Vy*P=J9M?== zPagk}qMS=_%A0-JKaL>HX8Xlu0;BP5469Yqi-}@S<^}SH{D`Pdf1WZDkPiJxaNj@2 zN~X=!iD^yh4F=ttm2(lAJwg*KzZ{&1wm;~f?@GNVh*MhrMfcZ0UtdXCVVI-9iG?rJ zb24jh!YKXXj;W(q8W4ZGB2<)L6nDAr=8xfFneT(NH+5J_M~7~`&6DW5snY+d0(I+r zz4XYh@~Ia$1Sl|uNm8FbZ7|h5uVrb}T-REH)_QX*QpGeipDXW#@W33CUscMv8_oqT ziM_)&hU>`c$mheII2f)!X$c7QkS1Eqtk?7UVs3vflJyNXg5WH;|BZ`+&S%o_RR+gY z_}Y8*40T?&B0@*@_mAImXK?e{VmsV+c(3jFZYg=PdGicjR@oMhEFXDQKkhpcU0hZ* z($LRdvmMbt`ZXAh>^Y1QzkM%=Elp2>eTb4fAH?8eX6D^-{VeAi5qI>)yy5A(^vckm zRZ_WFx}T3bQ-H7;0uL#jR$Yhj)r-+qmRv(;ZN}GN>(u59B&#gLBHKiP3`4Y!1w<3d zf<-AmxF*&)OrRJ}IK9sKa2HcHyFdij-d(|TBtzUNn=I~EZw1M9^hv5D&7s-MJg$IW zH*-6aeEFinI2fim@os9cCGsfM54d+!E`Awynl>=aO81~ zkE4XJ%bwD@C@7~h7&OPumFDERg8H{iq6zKF#nMziGKZss$jw@Y_@;G_fBRb~;zX1k# zU=FMDjND^k=1JZ}rzh(e^Eg`%KYtEaT=2c05q#|##RAvkR5Z)ZU_!%}XADuhy;7^$ zA_#FjY8)LOU<|5kQ(fb6stas+gS#bSeJ0~9gEKKp*^}l`ho9?)s!bXj-mk_CT*YAW z>ckJjb+k`Uy`wH)t;Ns%s5gB&B^!r+d?^;s*vUbL=2*%T)yyFqN(>@Rd@Aawi6=aH z2Bl@xO_>bjO9go3-4@bQ<6TxopDnRp`2Q&RPD}Ti+0x|ByR<-J8j_e9I<*wIaBY#S zYtzk^JIi&>QI5BwNM zq~^l&!b01sX@9uGtaeYZrk2)NyQIp;fKNYC3k5j+y1IJ5M~^-K;Nm3PAEF)^GZ4b? zd9nr1&wRc0^Vh7*+m{DP(}%t<8^E)PDrMy#;qoZ1PiPzDIk|*KZt}WJW4K&Z_T1d^ znk-wOl;1|9$zIfibh3wxuShNBp6Ytfo8#YxL|)&f#MnA^OsE!n;cr#{F@8YW&Htie zMk-VPsF?q|0(ei!4Aix|T5%>xWc^kumZp$~6vhhQwU_K^cnyIq3)0ov!ZcXfkj4^Z z+zJS1sOvkhJtFMGbD!G#KjQ?$hE%@FRub1lW%R87USU{%8vly{@B2GIJm(MEZr~!2rdGZ1NXXNaL@-R z4?FTUG-wNWcOlc@4mt;@mkYWN4vbM8piT%cZh$u@z=H#{#R)vRpjf7yVC*@8lNW>+ z(DEbu#0q|g>V&BAyN|$HvhyAE4Md}b{@8#46$yj#fj@9P4VebE1E8Y^^xCD77EBAa zFYd_>yX~>h?>U6zv%jkF4?I||72IIi%W_I=c@6tYyBKUC73~Dh89MBHZJ5(bsD?o{ zXdTaB4JWsqsiSi|lm#i&#&Bjy1CpHjoqHDh(?7I) zyna{XtNGapYTKOLQ~zizxw;c%y?qp9ly^@j6@*>5j{ z>u-+6k?QjV+CNV!5=a^`%H`8ooVUMUV79c~nI_mBXE8ASwe-DKw1w)L2ODLuR>!=h zhfT_(d36;$B6$pzIKFARw5RedDX6b$CJCpXhJ-L;E&AQ=dHg9NJyL8q_Zf#1hQhs6 zvEdqa+7g5bhc-$khjvM1Yj%>tX=<|-yJv(k0Z|w+ZXEGHuSK$^JaoSxFLyCN>DH}` z{5J2Rv$tnpY;)-KC^{-}37vf}-C7#3Dlgxe>z94lVk3c3`h=Hl?!N5XN{L9XmQLCg zoX`1l+^@}sZRgY<)Hj6xA|T3}90VX4;vrP-phca3Hk90W;igsAGsQ z|6qpGQgxOlnNP=bi((G#>S?q-!IGKrFY{_ z$W{7(FQz4_&2m$Jmb;F>W@_m66T3UC-?3Ig$uS7~g43yq!HDx$SD3~`FEu)pm*8NP zvfO^a?DVxsFKNqFQg{pXM{HVC%X&{Gn{F;E==lj4v4Hz~eEAFf;Uhnf(O>AZ7Qk6A8M&`W72saYcdoii ziSm?y=!__>z3;n+dHQ3-zc@dVj9#o}^mL57JTkC>E9I``naGn3*PX3YJxiFcn|zAu zWO<}+<=N;UR|(oQ%^WhIrn?KvKsu zw>l+l#XD;i?WjxHSNj!CZZ~JoTh6zl?p$8Z7=ionj&xZz|<1hC|Tx^kxcw zbHnOY%lzvz`1<(Q+HMJi1~$=*BWcE6FUFf1B_gcW0 z^GflHTT4G-BzM*Z1_coEnKoZEEYr2jNMCoVy_gLPFxNh3`2?Mc%F*wkK&fx~!^f2( zZ7y%m^}P-jV>eYz=$FSCU@?5AF>gaQz~w~~QJlEAxp@Dsl!aW}<(z9HEa(qHt~fPh z&CJf?cafrQkldJU<_g8P+Q#p+LC3iJ;Jf$J%PXRR$1W2*6~-0}QodJ5J)STxS1=I90Bn#)%|Q?wXKQwe%^SE(ul(Z7d<^A&Q#b+%lA9H z?9BaGC9_}B!KpH~j9zdxSPWb1Da2>yXOZ>pQWxdgsCqkidLm;8V?%iu+o&VL(rqP* zPZocBJKIQiA>!u5 zx_W#@aPTJ+(T|JQj0;DDH*_L22qyve7VYYueO~a^iZq z2Gt7ZC-KvA7c;KJLLZ6ugH`BZ?D39g)kzS4J z#xJ0uP<@>zfzp{V`nZaBS@m1dl52s&i62~Khbw%FF=hx%zqW|4xQe7XcmANgWp*c` z+ZIh<$mh&Z=xKT9NJ=p!xoP|w)t*)#Q`m zk6ByUf0=+O%JmM;J?)(k8AemV0eYS~8c_H4X_F!@ui zHFAB|bVA+H>HcY*hr{u3_{OJ391}KtCq}^%8lmUyZEh1^oCfT((X>0hy0b()a(~cV zPdfEtNL21p3f8+Dp`V6W&rTL_u(3w<8+IsUew448c&6%qEvxlR7>*H#1?nq`@^P6F zeD_-?@~7NxKj?!6%hxBAxw@%8z1CnCO{~QoAJP>%(ICgxvNWo8(S8fVzA)k9hvb=n zIT0$ZhqhQ_$t0gCPStm`6pe_CCbK$RpC-T!?xje1-G(3xsw|0SeAa2!H(VDmO3Zv^ zXj@gWQB`ey$+b@BhyP7wai7W}?tT)Sl~&XVYV4_~P6W3>(FYTjzQ?Od# z-`iBtlhXc7N4cHgk*aFf{oY${5rL5tV}hgbkgGf)Rx8WHKMI;FWSTCQPP{lZs73ak zZ*>9@apIgte-u{j^OS}~WlwJ+4(ln(ncB=-Y7G{R%Je(9(RykL4cyDW-@YYw97=~%m*Ns6Y0~F&~h+^Ty zT9m|(isSH6l`S=XA_ME;=KR6C7&ZoZSQ~Fet_U384!`^j=M4dk=^2X|Y4bd(PNMWj zsp>M5l5#=Gd__z?%(vjTycEu@TyD+?4s1OpQB&E4DiP6s zZqXklKAJXrb-Fr|=WSqcSkZ>F!CfhnSydll@o@M2^Uc#Y9qiP8%D4}FewC4Gm`qq< zOkaE++jo^$rcIJW`HL-SlmL2RUHLchC`|Kizf#}%XmSC~t6k^u7|Uo)S6tFF$GcB* z>Z===yl1(a^MuJ?Ej(r>jBIeFZO!%F-GKGK?OFGG!tEWZc4EceffOE4a70K+0Uhe0H1JdgaX-P4{?MWR zkFVa|5dp@!6Yl??`A_S4!|f{QP``@J)GlUyNg!D8L~yV$v2$x4rLNRyhkS$Z9pbI^ zk3}Na>E}iz;cBHz_IDBd=e4+=e7}k_AsTPOYmBPpZN_rx9FK$Lg^=th(y&iGzw&hq zb5_~4#HRx@&x;1oJ1%O{i7uVCx?=FMdcF-0_v#6$8NO{|Mz7|mn)z?mMNQF}1KaX) zmS?`POEf2$-MyE(Y1aR|?pgD+iGc!_Y+Co0+E4EL1oS!8@Rt>zYP!6IXPWc6Ge*08 z`)$eB)i(zpXvbxkHaVQP9Je4}7o892I6odV(sxezA!CZPvZQ=S3ccXD>JYM4;ym=z zDc;yw9FN)4&MMF25gQ76^5fnXGNLM!RTdVz{uG5S#hQAZZIaKbQQo=FA3>7&gXb z-GDmsMUAaS4$rgigMS4Va$7{j_cD)S3OFQ=k}%cv>-=rHLynk#j(dM5(SJSt3LifG z3R{^Q9zOemPaoIdW_iyV zs^&)2oHfAluEwrDo?I*YExiVQca*)IJq+OszjKbONin&3zRKRXDB)UPb3&Ni7c_#% zT3P>&g=n-(VNbYc-#ZQ#Pew09%|_Y^oxM;k^!O2Mxx7w#d!EjQ?82u<@`fY2Iu5f% zq(6szPoJ8ENqdr=ri_j`)`lM-E6e5L)i~!wG&WMV6>WL@Y~lsamhzQPCd#MSuom!O>e3a=u zUbU`%`zm|cCVVuauGZxydZ?9Avcdh_{}g3sA!~P|i_+P5$Jf$V)Pr$mZL*I|gk8mc>UP`P z%2&7K;dC8C%8T1xli>d;y8IZVKWoY*#&FL%4m;yqzo1wi^s~}WTUfXgv+}{m#=^2m zjdA=0k})=?bEs^1C@qGUPgFm0dUPrglN9ZzTRDOeCjos-=u|l`A5j7^n*LclGw!)) z)Cb&wRMlR_SEfeX(fe#q{4@j)7S0mrS2bg5&$;@_qcuw8x$$Az;9fZ1P4)2X$611`Ymo=L6V-Qm^vTPg<|-MbsGiW_T$_rSJ62(d}B~7hMD*xB{04F&T9AG zsNz!X>Qv<_)jU~4p(Vt1ho>r@T)M$Zo0Q^K+Z&A1OJy`=X#ICiT${<%l?lY@o3&JxZ32h`dD9Qx;*X1 zN`<1>wXUsop{&b8u=*9UOF}lVteSuT&%KxPWoNac)cQnU^9C?BZ;}>&YQNjh@1(CM9P(5}aJH*|+wMg# zVf6{iOW5WCde*mQ?A7AAEYRcuJ#jo83UF$*hZaIVp0gWGsl<#?j3K zjTGhW9In&JQyeTW-Tm=+@0lB>`+vC2s5I@zn8)UYt9$1ms<#Gx!wr!;H=-N{t~|z$ zfpxZhEQn94#8FW88|wRNt2Zpt%&KK-e&52>-c?=8!0o27q;fJQyyFTJ5Q~=9sTiU%5AC$MJ~f!wCKze^-F~qd>${`^hvwI zd`vTyF>f$*bdSb&DELqo+0ES1N%y4c+GvTGNt(hGSjs9jY^^<~gInv&r71weAZ^LQ zbC>y9h<_=JMznld71bv{uA5qD>ybL*87CHFXn!B?gIJ4)c*|Z*rxrA=GJEx!#-d)5 z>3lJeU9}!voFAzzDD`$9sSU4urW^9n z@8onT7pD42??Ff?o9z245gV2ioYYO8uX{5?`g%yI{Tm%}n&hI1tf|G$2T|Is7g-Vr zqqmFTZ`i>rdg*tkK2fDGMUP%6*Dk5CwU(lwx~I^6;T2i%wUKHHe-uYYbB7jNJX@`` ziRGUB&T`lK3pN3%#F^|HS2^K9sB5Oxx}659U5+F?8mqYH4RUi)d1Wr=WKU);7$p(f zGmwlX;lQibohm;_Y%=hO8{-X;WAJ|zGJYl^jTu&qo-6xJW!;l)@@XLa{KVOB&54-V z^S)oI^Dgr*=nS^&^wf=@#bRIZ)~vnEfv>*VoBubV#{TR;yb;o-8*=b< z>$eO9YHzfH`bw>PZ+s{X`q9SzB*sjtr9SUVeLjBEi%bKbi62&(wWLzGGG5gALX2%> zvGD5ifq|O6lE#I(vVQXMRPH$VGgT8Us{QatK189@bo;o07ilJyxTRe5+Rw)7c~m7L z3&-p9y}htptB1CvWB7i=wah#uR@5g$s}c$kWywA#UddGb#E4cF^}Rnjy5iRSCbC<2 zOYq&bH)-txrKDUW)iM>TtJ|SgodG{`RhZm2o|INOECwrkas^-AB-6C5Eta?%7eh+^ za*#Fg9&269SUJ}{tm!dT3tN_&86We9BgCGa)jthq^HN1g(R<=&dTN|P;dELNVGQvS zMR&#grwH_ks7jT?W@#;-+BI}se=JN-Li&+Dy!*Y)5F75Dah5Gx|0_33TwIs{f60U;)Ue)p& zft}Jd%MLt8-Rt!#IWt9~VN$*sb->x6zv`CFCC(h{xVGYwuF)Lz+^ckn&+SJ)u_A2h@=*m+qsn_d>rkvQtP-~^%pX{ein!Y32u~2rS@%rPjuep?r zopbN7eiVM7_}E`RdGg{#o1fDs7nqE4)vR#t+kD`k_tgl>Uc37%guYWqGSeMv0U{G7xYWvy+U+t`fj8!_n|X`lRVS~L__-0N_QzIX~=ulmC6 zj#GMkEKNg=y>Bqv{Bq#B+MSVH*Jmeb-^|}PkEEGS+IHSHxN+{X>hz|A`zrP%gAMtw z6Yl3pUnLukCc{6OqKvAHGjHCZ}uIC)~#sTh$rz zxzlCNZeaeTKdt^Va7s(`W6+aaU+ZmgO8%%I-jSski3odHw4b5w{Hb@~^R07?hVjlO^7wILcjAVlvimkU}StZn?9M%~91k%{kwhe$1KyJc>W zGv?7~E`RV?a(8U_7LlK*;`QTk+pCR@k&Ql^H~QJ^Ew8D`LDygZu{7&Cjd$+m-;NpkZ&&`_NkgiJfAORn@T555a_rcdY4l%zshSpo6+0LA z{;RSAcr9=NB?AZWI^cl36Cf`HFiLPp0Rc{8>_FiGc`U%e)p`z~)L?_6e}ls!GR`}Q zObmHcaDdy3P&p9)9hnEt1{4&K7X$EMfPUNYt^oA`PX`3#p#jQpLJ{!6tx*Ih!}Z&P zffJPB-0_Y8VQG-{g7yz9G>}8NH#EQb6RC3g>pAa$LJcT1EUREhg~p4oR5s;VqEDz) zQt9uv`2a5k^Rvk>&iQ5M)(w)_N@OY~@ZmKc!}K0gzN%6XBRc;BQ>*<_(ZD&mQR~HU zQK`v+%mjj@E<&Y^&d}P7XHPW;Yad?;nZXF6`lxwc0z1gbQ($z=N`KSD`vrw(oaGPK z1#c&-7X3~8*P#Pz(|!0_b+YviEP5s}*GmcM99Ub+)4VNtLb1hx<#eqv`>91?_|WI+ew(0YPKuQ+cnWdX+NKqLRu} z(2dqnDb@!WB~W3`2%>$F_^2fBQG(M!rRdK7=pt?k`f1fc4<0`~;zD#i>Fep`34+*Y zW$9=YS=E`;8#d8|F6fgvF&H_Nvci>Fw1RUDL$F~rwhCsi49}QKx@#-re6=p!$?iJ! zXl44C?mFH|ir+CY(aTVllUKH~qes?9)rUeWRV(BwEH8j8Q7H ztWa>$r$D)^^Z_NdKxyucpSBsxFJ54N!{9QmX$T*-rHMKB>oS$fadrT?q z+#_b%yGP7q1TNFE(ZT*PhZYDR0dDF3tIBq-9cw|H1fd8qA9(xVw=4sCwLQcPc7qZy zK|u+akU7y3b+`;+@? z-Vfis;(w_6Kgy#2P(2e8>3tOez<+20%DQvi{ZJzZR?XiO*&q7?>chTJ7zY~I2j2~U zaj<%zJl$>_g(8vch)a1suZbk4Mgb z!RvQuh@%Mo{^`G9@;eV$FOKpe$Nr!NzbE41-t0tiVEP@!;V34DbNm;C4rkqs?{J7T zf0bPO@G{$r(sBTo-_XCe|NCcPT=)wu{wkF2Kp|t8jG;(6OmimB4jun`Rb|eCL7vy(Fq^lQ{kqyqp`Uko< zNm>6CM_PcOoNxropNxAf3*JB32i;?;{l)i2ik>v{={06hvLBiKVf9hAW-8j z;djWnqv!`tu1F&6zRkBw@?8q=5)|20Meqz6GUra5Ek&>|1ccjNjsJ)M=p?aY84=L~ zQ828GApjB&7;?0rq>PcBo+0=ZwzEM75NSQgL)pj@gjm_hEv=_-WNr&X4mbppv$q2g zc))gn73@~QpzY<8f&8?YRcwv+GC)7+xZ;!m-MR25(BJi1p@{4l=w`U~J zAXBVdAc>^PP7(ne!T0^j0T>a1rXbplO1?BBg;Xkh*+6h*(ibK!DzP?QKMUW_U3kq62h>Z`amx5-!7Nz%@~M5 zW$i712w(x_KsSH@rh~W%u*hC|7r^!hunU3jLBK`;D#E^(2D?ru&9Rq;@1!~R((F5F zuDvwJP8zY7=G;kh@1?nr>4Q=P$$ZePh0xp!qO?n$-B4Y-5ug5q$hl`@bPF4}wQ_P| zv;6)CCKGhty9L{UAy^@QcI2-x%ik~%&gC$S4Jq`XjKA}N@8He2gJl5-+9W~${^Vmr zKwHT}Fm5P-+W{CH!3j*~2Vn>{=+t=-#=`y=T)=aIVBinb$AVyiWXxe0IH4YfA>*?h z%Et<^;$avh@DIV*px{!6V9+B+hhdP<|6v#xG)WJYg-*(cVcd`eJeZFgxp*Cdu^kyV z7H)P|81c@jA49GRD3IF8iE$$E5* z!0V+)_zJ*|@FXV)w0L+tfLw-_xI_3scXtlMxc*WW4BX*->?}ukje{K-6!~Bq>`(}@ zLv64_?nj4V?9evzP}#rGl${IO;vLF|U_H`bcIf`op?n-bVjY38LNT)s*2e+8V|f_H zendWV0KPw(kMoEO;{eLZBYgwP%p)*P7HEfd5Eo9?BkK%s+Jbhphw{OX%wG;pj-z7) z9vD76FF81&`(TIb0~78DjQi*qaUu0s2XWy#I_8|<9CNr`PBwtdp*BE1=m2w|K2A2o z-(XO5^8@)1Y+OfpoRjS+AAnnMN7gM)IO`EUIPWIQ<8S&pndob0Se+W>Rx2rlgKBYefl&VFPK051a{k+EPEJhC=& zvU4Ay3l|F;v}HWd4=%7KA0A6C7WN}^0kF%FzJaCUh)e{_!V$T~#m)|$Sr4|$&H){u z55c&gFp>vhVD29&3s~*2+~5LUlZW|~3&C|{%()PVBWoxZcxw5uJOHmv9~oaR(6uAH z3-&{Y>4yMxK0Fr?fX;{U1F$1=6#-~}xNis+=+5Xtx`1u$k#!Kkf;b{K5G>qB<{$!W zZI9H)%65c)VC6otb_0ohL>>UxQCxt=;;?*10Dk!k3~~lOFy}uJ4ypqFd|3t zA)xT5hsq+LGx8qH&PESJx3mGLCU9UjeE@D&gToNBqLq~$3~&KB%`+o!Fau1H-$h{> zeB2084i;8XgebcZ3p+R*iHWkafpwl+L_~x|TuhjYANJ2#kRH%TEgSL-2815awNO)w I$%$kCKc+mOVgLXD diff --git a/doc/target/docbkx/pdf/openstack-identity-service-starter/os-identity-starter-guide.fo b/doc/target/docbkx/pdf/openstack-identity-service-starter/os-identity-starter-guide.fo deleted file mode 100644 index a67a1790c4..0000000000 --- a/doc/target/docbkx/pdf/openstack-identity-service-starter/os-identity-starter-guide.fo +++ /dev/null @@ -1,150 +0,0 @@ -TrueCopyright © 2010, 2011 OpenStack LLC All rights reserved.OpenStack Identity Starter GuideCloud API Docs PluginOpenStack Identity Starter GuideTable of Contents1. Quick Guide to Getting Started with KeystoneDependenciesCreating your first global admin and tenant adminCurl examples2. Installing KeystoneInstalling from packagesDebian/UbuntuInstalling from source tarballsInstalling from a Github BranchDebian/Ubuntu3. Identity Service Concepts OpenStack Identity Starter GuideSep 19, 2011trunk OpenStack Identity Starter GuideSep 19, 2011trunk OpenStack Identity Starter GuideSep 19, 2011trunk OpenStack Identity Starter Guide trunk (2011-09-19)Copyright © 2010, 2011 OpenStack LLC All rights reserved.OpenStack™ Identity Service offers open source software for identity management - for cloud users and administrators. This manual provides guidance for installing, - managing, and understanding the software that runs OpenStack Identity Service. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - OpenStack Identity Starter GuideSep 19, 2011trunk OpenStack Identity Starter GuideSep 19, 2011trunk OpenStack Identity Starter GuideSep 19, 2011trunk Table of Contents1. Quick Guide to Getting Started with Keystone Dependencies Creating your first global admin and tenant admin Curl examples 2. Installing Keystone Installing from packages Debian/Ubuntu Installing from source tarballs Installing from a Github Branch Debian/Ubuntu 3. Identity Service Concepts OpenStack Identity Starter GuideSep 19, 2011trunk OpenStack Identity Starter GuideSep 19, 2011trunk OpenStack Identity Starter GuideSep 19, 2011trunk 1. Quick Guide to Getting Started with Keystone - First, you will need to install keystone, if you haven't done so - already. Refer to Installing for more information.DependenciesDependenciesOnce Keystone is installed you need to initialize the database. You can do so with the keystone-manage command line utility. The keystone-manage utility helps with managing and configuring a Keystone installation. You configure the keystone-manage utility itself with a SQL Alchemy connection configuration via a parameter passed to the utility:--sql_connection=CONN_STRINGWhere the CONN_STRING is a proper SQLAlchemy connection string as described in - http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html?highlight=engine#sqlalchemy.create_engine.One important use of keystone-manage is to setup the database. To do so, run: - keystone-manage db_syncCreating your first global admin and tenant adminCreating your first global admin and tenant admin - Change directory to your Keystone install path. - 1. - Run the following to create the first tenant: - - $> bin/​keystone-manage tenant add "MyTenant" - 2. - Run the following to create the first tenant admin: - - $> bin/​keystone-manage user add MyAdmin P@ssw0rd MyTenant - Note -Some reserved roles are defined (and can be modified) through the keystone.conf in the /etc folder.3. - Associate your tenant admin with the Admin role: - - $> bin/​keystone-manage role grant Admin MyAdmin - Curl examplesCurl examplesAll examples assume default port usage (5001) and use the example admin account created above.Admin Initial GETRetrieves version, full API url, pdf doc link, and wadl link:$> curl http:/​/​0.​0.​0.​0:5001or:$> curl http:/​/​0.​0.​0.​0:5001/​v2.​0/Retrieve token:To retrieve the token and expiration date for a user:$> curl -d '{"passwordCredentials":{"username": "MyAdmin",​ "password": "P@ssw0rd"}}' -H "Content-type: application/​json" http:/​/​localhost:5001/​v2.​0/​tokensThis will return something like:$> {"auth": {"token": {"expires": "2011-08-10T17:45:22.​838440",​ "id": "0eed0ced-4667-4221-a0b2-24c91f242b0b"}}}NoteSave the “id” value as you’ll be using it in the calls below.To retrieve a list of tenants:Run:$> curl -H "X-Auth-Token:999888777666" http:/​/​localhost:5001/​v2.​0/​tenantsThis will return something like:$> {"tenants": {"values": [{"enabled": 1,​ "id": "MyTenant",​ "description": null}],​ "links": []}}Retrieve a list of users: Run:$> curl -H "X-Auth-Token:999888777666" http:/​/​localhost:5001/​v2.​0/​usersThis will return something like:$> {"users": {"values": [{"email": null,​ "enabled": true,​ "id": "MyAdmin",​ "tenantId": "MyTenant"}],​ "links": []}}Retrieve information about the token:Run:$> curl -H "X-Auth-Token:999888777666" http:/​/​localhost:5001/​v2.​0/​tokens/​0eed0ced-4667-4221-a0b2-24c91f242b0b This will return something like:$> {"auth": {"token": {"expires": "2011-08-11T04:26:58.​145171",​ "id": "0eed0ced-4667-4221-a0b2-24c91f242b0b"},​ "user": {"username": "MyAdmin",​ "roleRefs": [{"roleId": "Admin",​ "id": 1}],​ "tenantId": "MyTenant"}}} Revoking a token:Run:$> curl -X DELETE -H "X-Auth-Token:999888777666" http:/​/​localhost:5001/​tokens/​0eed0ced-4667-4221-a0b2-24c91f242b0bCreating a tenant:Run: $> curl -H "X-Auth-Token:999888777666" -H "Content-type: application/​json" -d '{"tenant":{"id":"MyTenant2",​ "description":"My 2nd Tenant",​ "enabled":true}}' http:/​/​localhost:5001/​tenants This will return something like:$> {"tenant": {"enabled": true,​ "id": "MyTenant2",​ "description": "My 2nd Tenant"}}Verifying the tenant:Run:$> curl -H "X-Auth-Token:999888777666" http:/​/​localhost:5001/​v2.​0/​tenants/​MyTenant2This will return something like:$> {"tenant": {"enabled": 1,​ "id": "MyTenant2",​ "description": "My 2nd Tenant"}}Updating the tenant:Run:$> curl -X PUT -H "X-Auth-Token:999888777666" -H "Content-type: application/​json" -d '{"tenant":{"description":"My NEW 2nd Tenant"}}' http:/​/​localhost:5001/​v2.​0/​tenants/​MyTenant2 -This will return something like: -$> {"tenant": {"enabled": true,​ "id": "MyTenant2",​ "description": "My NEW 2nd Tenant"}} -Deleting the tenant:Run:$> curl -X DELETE -H "X-Auth-Token:999888777666" http:/​/​localhost:5001/​v2.​0/​tenants/​MyTenant2 OpenStack Identity Starter GuideSep 19, 2011trunk OpenStack Identity Starter GuideSep 19, 2011trunk OpenStack Identity Starter GuideSep 19, 2011trunk 2. Installing KeystoneYou can install the Identity service from packages or from source.Installing from packagesInstalling from packages - To install the latest version of Keystone from the Github - repositories, following the following instructions. - Debian/UbuntuDebian/Ubuntu1. - Add the Keystone PPA to your sources.lst: - - :: - - $> sudo add-apt-repository ppa:keystone-core/trunk $> - sudo apt-get update - 2. - Install Keystone: - - :: - - $> sudo apt-get install keystone - Installing from source tarballsInstalling from source tarballs - To install the latest version of Keystone from the Launchpad - Bazaar repositories, following the following instructions. - 1. - Grab the source tarball from - Github - 2. - Untar the source tarball: - - :: - - $> tar -xzf <FILE> - 3. - Change into the package directory and build/install: - - :: - - $> cd keystone-<RELEASE> $> sudo python setup.py - install - Installing from a Github BranchInstalling from a Github Branch - To install the latest version of Keystone from the Github - repositories, see the following instructions. - Debian/UbuntuDebian/Ubuntu1. - Install Git and build dependencies: - - :: - - $> sudo apt-get install git python-eventlet python-routes - python-greenlet swift $> sudo apt-get install - python-argparse python-sqlalchemy python-wsgiref - python-pastedeploy - - ..note: - -If you want to build the Keystone documentation locally,​ you will also want -to install the python-sphinx package -1. - Branch Keystone's trunk branch:: (see - http://wiki.openstack.org/GerritWorkflow - to get the project initially setup): - - :: - - $> git checkout master $> git pull origin master - 2. - Install Keystone: - - :: - - $> sudo python setup.py install - OpenStack Identity Starter GuideSep 19, 2011trunk OpenStack Identity Starter GuideSep 19, 2011trunk OpenStack Identity Starter GuideSep 19, 2011trunk 3. Identity Service Concepts - The Keystone Identity Service has several key concepts which are - important to understand: - UserA digital representation of a person, system, or service who uses OpenStack cloud services. - Keystone authentication services will validate that incoming request are being made by the user - who claims to be making the call. Users have a login and may be assigned tokens to access - resources. Users may be directly assigned to a particular tenant and - behave as if they are contained in that tenant.Credentials - Data that belongs to, is owned by, and generally only known by a user that the user can present - to prove they are who they are (since nobody else should know that data). - Examples are: - a matching username and passworda matching username and API keyyourself and a driver's license with a picture of youa token that was issued to you that nobody else knows of - Authentication - In the context of Keystone, authentication is the act of confirming the identity of a - user or the truth of a claim. - Keystone will confirm that incoming request are being made by the user - who claims to be making the call by validating a set of claims that the user is making. - These claims are initially in the form of a set of credentials (username & password, - or username and API key). After initial confirmation, Keystone will issue the user a token - which the user can then provide to demonstrate that their identity has been authenticated - when making subsequent requests. - Token - A token is an arbitrary bit of text that is used to access - resources. Each token has a scope which describes which - resources are accessible with it. A token may be - revoked at anytime and is valid for a finite duration. - - While Keystone supports token-based authentication in this release, - the intention is for it to support additional protocols in the - future. The intent is for it to be an integration service foremost, and not - a aspire to be a full-fledged identity store and management solution. - Tenant - A container used to group or isolate resources and/or identity - objects. Depending on the service operator, a tenant may map to a customer, - account, organization, or project. - Service - An OpenStack service, such as Compute (Nova), Object Storage (Swift), or Image Service (Glance). A service provides - one or more endpoints through which users can access resources and perform - (presumably useful) operations. - Endpoint - An network-accessible address, usually described by URL, where a service may be accessed. If using an extension for templates, you can create an endpoint template, which represents the templates of all the consumable services that are available across the regions. - Role A personality that a user assumes when performing a specific set of operations. - A role includes a set of right and privileges. A user assuming that role inherits - those rights and privileges. - - In Keystone, a token that is issued to a user includes the list of roles that user - can assume. Services that are being called by that user determine how they interpret the set - of roles a user has and which operations or resources each roles grants access to. - \ No newline at end of file diff --git a/doc/target/docbkx/pdf/openstack-image-service-admin/os-image-adminguide.fo b/doc/target/docbkx/pdf/openstack-image-service-admin/os-image-adminguide.fo index 80883d818b..05c990013c 100644 --- a/doc/target/docbkx/pdf/openstack-image-service-admin/os-image-adminguide.fo +++ b/doc/target/docbkx/pdf/openstack-image-service-admin/os-image-adminguide.fo @@ -1,4 +1,4 @@ -TrueCopyright © 2010, 2011 OpenStack LLC All rights reserved.OpenStack Image Service Admin ManualCloud API Docs PluginOpenStack Image Service Admin ManualTable of Contents1. Quick Guide to Getting Started with GlanceOverview of Glance ArchitectureGlance API ServerGlance Registry Servers2. Installing GlanceInstalling from packagesDebian/UbuntuInstalling from source tarballsInstalling from a Bazaar BranchDebian/Ubuntu3. Image Identifiers4. Image RegistriesGlance Registry APIAPI in SummaryFiltering Images Returned via GET /images and GET /images/detailPOST /imagesExamples5. Image Statuses6. Disk and Container FormatsDisk FormatContainer Format7. Controlling Glance ServersStarting a serverManually starting the serverUsing the glance-control program to start the serverStopping a serverRestarting a server8. Configuring GlanceCommon Configuration Options in GlanceConfiguring Logging in GlanceLogging Options Available Only in Configuration FilesConfiguring Glance Storage BackendsConfiguring the Filesystem Storage BackendConfiguring the Swift Storage BackendConfiguring the S3 Storage BackendConfiguring the Glance RegistryConfiguring Notifications9. Using the Glance CLI ToolThe help commandThe add commandImportant Information about Uploading ImagesStore virtual machine image data and metadataRegister a virtual machine image in another locationThe update commandThe delete commandThe index commandThe details commandThe show commandThe clear commandThe image-members CommandThe member-images CommandThe member-add CommandThe member-delete CommandThe members-replace Command10. Using Glance Programmatically with Glance's ClientRequesting a List of Public VM ImagesRequesting Detailed Metadata on Public VM ImagesFiltering Images Returned via get_images() and get_images_detailed()Sorting Images Returned via get_images() and get_images_detailed()Requesting Detailed Metadata on a Specific ImageRetrieving a Virtual Machine ImageAdding a New Virtual Machine ImageRequesting Image MembershipsRequesting Member ImagesAdding a Member To an ImageRemoving a Member From an ImageReplacing a Membership List For an Image11. Glance Authentication With KeystoneSharing Images With Others OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin Manual trunk (2011-09-18)Copyright © 2010, 2011 OpenStack LLC All rights reserved.OpenStack™ Image Service offers a service for discovering, registering, and retrieving virtual machine images. Glance has a RESTful API that allows querying of VM image metadata as well as retrieval of the actual image. This manual provides guidance for installing, managing, and understanding the software that runs OpenStack Image Service. +TrueCopyright © 2010, 2011 OpenStack LLC All rights reserved.OpenStack Image Service Admin ManualCloud API Docs PluginOpenStack Image Service Admin ManualTable of Contents1. Quick Guide to Getting Started with GlanceOverview of Glance ArchitectureGlance API ServerGlance Registry Servers2. Installing GlanceInstalling from packagesDebian/UbuntuInstalling from source tarballsInstalling from a Bazaar BranchDebian/Ubuntu3. Image Identifiers4. Image RegistriesGlance Registry APIAPI in SummaryFiltering Images Returned via GET /images and GET /images/detailPOST /imagesExamples5. Image Statuses6. Disk and Container FormatsDisk FormatContainer Format7. Controlling Glance ServersStarting a serverManually starting the serverUsing the glance-control program to start the serverStopping a serverRestarting a server8. Configuring GlanceCommon Configuration Options in GlanceConfiguring Logging in GlanceLogging Options Available Only in Configuration FilesConfiguring Glance Storage BackendsConfiguring the Filesystem Storage BackendConfiguring the Swift Storage BackendConfiguring the S3 Storage BackendConfiguring the Glance RegistryConfiguring Notifications9. Using the Glance CLI ToolThe help commandThe add commandImportant Information about Uploading ImagesStore virtual machine image data and metadataRegister a virtual machine image in another locationThe update commandThe delete commandThe index commandThe details commandThe show commandThe clear commandThe image-members CommandThe member-images CommandThe member-add CommandThe member-delete CommandThe members-replace Command10. Using Glance Programmatically with Glance's ClientRequesting a List of Public VM ImagesRequesting Detailed Metadata on Public VM ImagesFiltering Images Returned via get_images() and get_images_detailed()Sorting Images Returned via get_images() and get_images_detailed()Requesting Detailed Metadata on a Specific ImageRetrieving a Virtual Machine ImageAdding a New Virtual Machine ImageRequesting Image MembershipsRequesting Member ImagesAdding a Member To an ImageRemoving a Member From an ImageReplacing a Membership List For an Image11. Glance Authentication With KeystoneSharing Images With Others OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin Manual Diablo (2011-09-22)Copyright © 2010, 2011 OpenStack LLC All rights reserved.OpenStack™ Image Service offers a service for discovering, registering, and retrieving virtual machine images. Code-named Glance, it has a RESTful API that allows querying of VM image metadata as well as retrieval of the actual image. This manual provides guidance for installing, managing, and understanding the software that runs OpenStack Image Service. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -8,13 +8,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk Table of Contents1. Quick Guide to Getting Started with Glance Overview of Glance Architecture Glance API Server Glance Registry Servers 2. Installing Glance Installing from packages Debian/Ubuntu Installing from source tarballs Installing from a Bazaar Branch Debian/Ubuntu 3. Image Identifiers 4. Image Registries Glance Registry API API in Summary Filtering Images Returned via GET /images - and GET /images/detail POST /images Examples 5. Image Statuses 6. Disk and Container Formats Disk Format Container Format 7. Controlling Glance Servers Starting a server Manually starting the server Using the glance-control program to - start the server Stopping a server Restarting a server 8. Configuring Glance Common Configuration Options in Glance Configuring Logging in Glance Logging Options Available Only in Configuration - Files Configuring Glance Storage Backends Configuring the Filesystem Storage Backend Configuring the Swift Storage Backend Configuring the S3 Storage Backend Configuring the Glance Registry Configuring Notifications 9. Using the Glance CLI Tool The help command The add command Important Information about Uploading Images Store virtual machine image data and metadata Register a virtual machine image in another - location The update command The delete command The index command The details command The show command The clear command The image-members Command The member-images Command The member-add Command The member-delete Command The members-replace Command 10. Using Glance Programmatically with Glance's Client Requesting a List of Public VM Images Requesting Detailed Metadata on Public VM Images Filtering Images Returned via get_images() + OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo Table of Contents1. Quick Guide to Getting Started with Glance Overview of Glance Architecture Glance API Server Glance Registry Servers 2. Installing Glance Installing from packages Debian/Ubuntu Installing from source tarballs Installing from a Bazaar Branch Debian/Ubuntu 3. Image Identifiers 4. Image Registries Glance Registry API API in Summary Filtering Images Returned via GET /images + and GET /images/detail POST /images Examples 5. Image Statuses 6. Disk and Container Formats Disk Format Container Format 7. Controlling Glance Servers Starting a server Manually starting the server Using the glance-control program to + start the server Stopping a server Restarting a server 8. Configuring Glance Common Configuration Options in Glance Configuring Logging in Glance Logging Options Available Only in Configuration + Files Configuring Glance Storage Backends Configuring the Filesystem Storage Backend Configuring the Swift Storage Backend Configuring the S3 Storage Backend Configuring the Glance Registry Configuring Notifications 9. Using the Glance CLI Tool The help command The add command Important Information about Uploading Images Store virtual machine image data and metadata Register a virtual machine image in another + location The update command The delete command The index command The details command The show command The clear command The image-members Command The member-images Command The member-add Command The member-delete Command The members-replace Command 10. Using Glance Programmatically with Glance's Client Requesting a List of Public VM Images Requesting Detailed Metadata on Public VM Images Filtering Images Returned via get_images() and get_images_detailed() Sorting Images Returned via get_images() - and get_images_detailed() Requesting Detailed Metadata on a Specific Image Retrieving a Virtual Machine Image Adding a New Virtual Machine Image Requesting Image Memberships Requesting Member Images Adding a Member To an Image Removing a Member From an Image Replacing a Membership List For an Image 11. Glance Authentication With Keystone Sharing Images With Others OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk 1. Quick Guide to Getting Started with Glance + and get_images_detailed() Requesting Detailed Metadata on a Specific Image Retrieving a Virtual Machine Image Adding a New Virtual Machine Image Requesting Image Memberships Requesting Member Images Adding a Member To an Image Removing a Member From an Image Replacing a Membership List For an Image 11. Glance Authentication With Keystone Sharing Images With Others OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo 1. Quick Guide to Getting Started with Glance Glance is a server that provides the following services: Ability to store and retrieve virtual machine images @@ -33,7 +33,7 @@ However, Glance includes a Client class that makes working with Glance easy and straightforward. - In the Cactus release, there will be also command-line tools for + As of the Cactus release, there are also command-line tools for interacting with Glance. Overview of Glance ArchitectureOverview of Glance Architecture There are two main parts to Glance's architecture: @@ -80,77 +80,61 @@ For more details on Glance's architecture see the Architecture section. For more information on what a Glance registry server is, see the Registries section. - OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk 2. Installing GlanceInstalling from packagesInstalling from packages + OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo 2. Installing GlanceInstalling from packagesInstalling from packages To install the latest version of Glance from the Launchpad Bazaar repositories, following the following instructions. Debian/UbuntuDebian/Ubuntu1. Add the Glance PPA to your sources.lst: - - :: $> sudo add-apt-repository ppa:glance-core/trunk $> sudo apt-get update - 2. + 2. Install Glance: - - :: $> sudo apt-get install glance Installing from source tarballsInstalling from source tarballs To install the latest version of Glance from the Launchpad Bazaar repositories, following the following instructions. - 1. + 1. Grab the source tarball from Launchpad - 2. + 2. Untar the source tarball: - - :: - - $> tar -xzf <FILE> - 3. + $> tar -xzf <FILE> 3. Change into the package directory and build/install: - - :: $> cd glance-<RELEASE> $> sudo python setup.py install Installing from a Bazaar BranchInstalling from a Bazaar Branch To install the latest version of Glance from the Launchpad Bazaar repositories, following the following instructions. - Debian/UbuntuDebian/Ubuntu1. + Debian/UbuntuDebian/Ubuntu1. Install Bazaar and build dependencies: - - :: $> sudo apt-get install bzr python-eventlet python-routes python-greenlet swift $> sudo apt-get install python-argparse python-sqlalchemy python-wsgiref python-pastedeploy - NoteIf you want to build the Glance documentation locally, you will also want -to install the python-sphinx package.1. + NoteIf you want to build the Glance documentation locally, you will also want +to install the python-sphinx package.1. Branch Glance's trunk branch: - - :: $> bzr branch lp:glance - 2. + 2. Install Glance: - - :: $> sudo python setup.py install - OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk 3. Image Identifiers + OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo 3. Image Identifiers Images are uniquely identified by way of a URI that matches the following signature: - + <Glance Server Location>/​images/​<ID> where `<Glance Server Location>` is the resource location of the Glance service that knows about an image, and `<ID>` is the image's identifier that is unique to that Glance server. - OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk 4. Image Registries + OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo 4. Image Registries Image metadata made available through Glance can be stored in image `registries`. Image registries are any web service that adheres to the Glance REST-like API for image metadata. @@ -167,7 +151,7 @@ to install the python-sphinx package.< registry. API in SummaryAPI in Summary The following is a brief description of the Glance API: - + GET /​images Return brief information about public images GET /​images/​detail Return detailed information about public images GET /​images/​<ID> Return metadata about an image in HTTP headers @@ -181,39 +165,39 @@ DELETE /​images/​<ID> Remove an image's metadata from the registry GET /images/detail requests take query parameters that serve to filter the returned list of images. The following list details these query parameters. - + name=NAME Filters images having a name attribute matching NAME. - + container_format=FORMAT Filters images having a container_format attribute matching FORMAT - + disk_format=FORMAT Filters images having a disk_format attribute matching FORMAT - + status=STATUS Filters images having a status attribute matching STATUS - + size_min=BYTES Filters images having a size attribute greater than or equal to BYTES - + size_max=BYTES Filters images having a size attribute less than or equal to BYTES These two resources also accept sort parameters: - + sort_key=KEY Results will be ordered by the specified image attribute @@ -223,7 +207,7 @@ DELETE /​images/​<ID> Remove an image's metadata from the registry container_format, size, created_at (default) and updated_at. - + sort_dir=DIR Results will be sorted in the direction @@ -234,7 +218,7 @@ DELETE /​images/​<ID> Remove an image's metadata from the registry The body of the request will be a JSON-encoded set of data about the image to add to the registry. It will be in the following format: - + {'image': {'id': <ID>|None,​ 'name': <NAME>,​ @@ -248,13 +232,13 @@ DELETE /​images/​<ID> Remove an image's metadata from the registry The request shall validate the following conditions and return a 400 Bad request when any of the conditions are not met: - + status must be non-empty, and must be one of active, saving, queued, or killed - + disk_format must be non-empty, and must be one of ari, aki, @@ -265,14 +249,14 @@ DELETE /​images/​<ID> Remove an image's metadata from the registry vdi, qcow2, or vmdk - + container_format must be non-empty, and must be on of ari, aki, ami, bare, or ovf - + If disk_format or container_format is ari, @@ -282,14 +266,14 @@ DELETE /​images/​<ID> Remove an image's metadata from the registry container_format must be the same. ExamplesExamples ..todo:: Complete examples for Glance registry API - OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk 5. Image Statuses + OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo 5. Image Statuses Images in Glance can be in one the following statuses: - + queued The image identifier has been reserved for an image in the Glance registry. No image data has been uploaded to Glance. - + saving Denotes that an image's raw data is currently being uploaded to @@ -297,27 +281,27 @@ DELETE /​images/​<ID> Remove an image's metadata from the registry /images` and there is an `x-image-meta-location` header present, that image will never be in the `saving` status (as the image data is already available in some other location). - + active Denotes an image that is fully available in Glance. - + killed Denotes that an error occurred during the uploading of an image's data, and that the image is not readable. - + deleted Glance has retained the information about the image, but it is no longer available to use. An image in this state will be removed automatically at a later date. - + pending_delete This is similiar to `deleted`, however, Glance has not yet removed the image data. An image in this state is recoverable. - OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk 6. Disk and Container Formats + OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo 6. Disk and Container Formats When adding an image to Glance, you are may specify what the virtual machine image's disk format and container format are. @@ -330,47 +314,47 @@ DELETE /​images/​<ID> Remove an image's metadata from the registry machine disk image. You can set your image's container format to one of the following: - + raw This is an unstructured disk image format - + vhd This is the VHD disk format, a common disk format used by virtual machine monitors from VMWare, Xen, Microsoft, VirtualBox, and others - + vmdk Another common disk format supported by many common virtual machine monitors - + vdi A disk format supported by VirtualBox virtual machine monitor and the QEMU emulator - + iso An archive format for the data contents of an optical disc (e.g. CDROM). - + qcow2 A disk format supported by the QEMU emulator that can expand dynamically and supports Copy on Write - + aki This indicates what is stored in Glance is an Amazon kernel image - + ari This indicates what is stored in Glance is an Amazon ramdisk image - + ami This indicates what is stored in Glance is an Amazon machine @@ -386,40 +370,40 @@ DELETE /​images/​<ID> Remove an image's metadata from the registry data... You can set your image's container format to one of the following: - + ovf This is the OVF container format - + bare This indicates there is no container or metadata envelope for the image - + aki This indicates what is stored in Glance is an Amazon kernel image - + ari This indicates what is stored in Glance is an Amazon ramdisk image - + ami This indicates what is stored in Glance is an Amazon machine image - OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk 7. Controlling Glance Servers + OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo 7. Controlling Glance Servers This section describes the ways to start, stop, and reload Glance's server programs. Starting a serverStarting a server There are two ways to start a Glance server (either the API server or the reference implementation registry server that ships with Glance): - + Manually calling the server program - + Using the glance-control server daemon wrapper program @@ -429,22 +413,22 @@ DELETE /​images/​<ID> Remove an image's metadata from the registry command-line options and a single argument for a paste.deploy configuration file to use when configuring the server application. - NoteGlance ships with an ``etc/`` directory that contains sample ``paste.deploy`` + NoteGlance ships with an ``etc/`` directory that contains sample ``paste.deploy`` configuration files that you can copy to a standard configuration directory and adapt for your own uses. Specifically, bind_host must be set properly. If you do `not` specify a configuration file on the command line, Glance will do its best to locate a configuration file in one of the following directories, stopping at the first config file it finds: - + $CWD - + ~/.glance - + ~/ - + /etc/glance - + /etc The filename that is searched for depends on the server @@ -453,14 +437,14 @@ adapt for your own uses. Specifically, bind_host must be set properly.glance-registry.conf. If no configuration file is found, you will see an error, like: - + $> glance-api ERROR: Unable to locate any configuration file.​ Cannot load application glance-api Here is an example showing how you can manually start the glance-api server and glance-registry in a shell.: - + $ sudo glance-api glance-api.​conf --debug &​ jsuh@mc-ats1:~$ 2011-04-13 14:50:12 DEBUG [glance-api] ******************************************************************************** 2011-04-13 14:50:12 DEBUG [glance-api] Configuration options gathered from config file: @@ -550,11 +534,11 @@ jsuh 20017 0.​0 0.​0 3368 744 pts/​1 S+ 12:47 0:00 grep g the word "start", followed by any command-line options you wish to provide. Start the server with glance-control in the following way: - + $> sudo glance-control <SERVER> start [CONFPATH] ..note: - + You must use the ``sudo`` program to run ``glance-control`` currently,​ as the pid files for the server programs are written to /​var/​run/​glance/​ @@ -591,21 +575,21 @@ pid files for the server programs are written to /​var/​run/​glance/​ glance-control program, you can use the glance-control program to stop it. Simply do the following: - + $> sudo glance-control <SERVER> stop as this example shows: - + $> sudo glance-control registry stop Stopping glance-registry pid: 17602 signal: 15 Restarting a serverRestarting a server You can restart a server with the glance-control program, as demonstrated here: - + $> sudo glance-control registry restart etc/​glance-registry.​conf Stopping glance-registry pid: 17611 signal: 15 Starting glance-registry with /​home/​jpipes/​repos/​glance/​trunk/​etc/​glance-registry.​conf - OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk 8. Configuring Glance + OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo 8. Configuring Glance Glance has a number of options that you can use to configure the Glance API server, the Glance Registry server, and the various storage backends that Glance can use to store images. @@ -620,13 +604,13 @@ Starting glance-registry with /​home/​jpipes/​repos/​glance/​trunk/​ not specify a configuration file, Glance will look in the following directories for a configuration file, in order: - + ~/.glance - + ~/ - + /etc/glance - + /etc The Glance API server configuration file should be named @@ -646,7 +630,7 @@ Starting glance-registry with /​home/​jpipes/​repos/​glance/​trunk/​ Common Configuration Options in GlanceCommon Configuration Options in Glance Glance has a few command-line options that are common to all Glance programs: - + --verbose Optional. Default: False @@ -655,7 +639,7 @@ Starting glance-registry with /​home/​jpipes/​repos/​glance/​trunk/​ Turns on the INFO level in logging and prints more verbose command-line interface printouts. - + --debug Optional. Default: False @@ -663,7 +647,7 @@ Starting glance-registry with /​home/​jpipes/​repos/​glance/​trunk/​ Can be specified on the command line and in configuration files. Turns on the DEBUG level in logging. - + --config-file=PATH Optional. Default: None @@ -676,13 +660,13 @@ Starting glance-registry with /​home/​jpipes/​repos/​glance/​trunk/​ as the configuration file. If there is no file or there were no arguments, we search for a configuration file in the following order: - + ~/.glance - + ~/ - + /etc/glance - + /etc The filename that is searched for depends on the server @@ -692,7 +676,7 @@ Starting glance-registry with /​home/​jpipes/​repos/​glance/​trunk/​ Configuring Logging in GlanceConfiguring Logging in Glance There are a number of configuration options in Glance that control how Glance servers log messages. - + --log-config=PATH Optional. Default: None @@ -709,10 +693,10 @@ Starting glance-registry with /​home/​jpipes/​repos/​glance/​trunk/​ application configuration file. As an example, you might do the following for the API server, in a configuration file called etc/glance-api.conf: - + [DEFAULT] log_file =​ /​var/​log/​glance/​api.​log - + log_file The filepath of the file to use for logging messages from @@ -721,13 +705,13 @@ log_file =​ /​var/​log/​glance/​api.​log servers in a daemon mode (using glance-control) you should make sure that the log_file option is set appropriately. - + log_dir The filepath of the directory to use for log files. If not specified (the default) the log_file is used as an absolute filepath. - + log_date_format The format string for timestamps in the log output. @@ -741,7 +725,7 @@ log_file =​ /​var/​log/​glance/​api.​log how Glance stores disk images. These configuration options are specified in the glance-api.conf config file in the section [DEFAULT]. - + default_store=STORE Optional. Default: file @@ -752,7 +736,7 @@ log_file =​ /​var/​log/​glance/​api.​log Glance. Available options for this option are (file, swift, or s3). - Configuring the Filesystem Storage BackendConfiguring the Filesystem Storage Backend + Configuring the Filesystem Storage BackendConfiguring the Filesystem Storage Backend filesystem_store_datadir=PATH Optional. Default: /var/lib/glance/images/ @@ -766,7 +750,7 @@ log_file =​ /​var/​log/​glance/​api.​log create this directory if it does not exist. Ensure that the user that glance-api runs under has write permissions to this directory. - Configuring the Swift Storage BackendConfiguring the Swift Storage Backend + Configuring the Swift Storage BackendConfiguring the Swift Storage Backend swift_store_auth_address=URL Required when using the Swift storage backend. @@ -782,7 +766,7 @@ log_file =​ /​var/​log/​glance/​api.​log auth documentation and the overview of Swift authentication. - + swift_store_user=USER Required when using the Swift storage backend. @@ -793,7 +777,7 @@ log_file =​ /​var/​log/​glance/​api.​log Sets the user to authenticate against the swift_store_auth_address with. - + swift_store_key=KEY Required when using the Swift storage backend. @@ -805,7 +789,7 @@ log_file =​ /​var/​log/​glance/​api.​log Sets the authentication key to authenticate against the swift_store_auth_address with for the user swift_store_user. - + swift_store_container=CONTAINER Optional. Default: glance @@ -816,7 +800,7 @@ log_file =​ /​var/​log/​glance/​api.​log Sets the name of the container to use for Glance images in Swift. - + swift_store_create_container_on_put Optional. Default: False @@ -827,7 +811,7 @@ log_file =​ /​var/​log/​glance/​api.​log If true, Glance will attempt to create the container swift_store_container if it does not exist. - + swift_store_large_object_size=SIZE_IN_MB Optional. Default: 5120 @@ -839,7 +823,7 @@ log_file =​ /​var/​log/​glance/​api.​log What size, in MB, should Glance start chunking image files and do a large object manifest in Swift? By default, this is the maximum object size in Swift, which is 5GB - + swift_store_large_object_chunk_size=SIZE_IN_MB Optional. Default: 200 @@ -852,7 +836,7 @@ log_file =​ /​var/​log/​glance/​api.​log Glance write chunks to Swift? This amount of data is written to a temporary disk buffer during the process of chunking the image file, and the default is 200MB - Configuring the S3 Storage BackendConfiguring the S3 Storage Backend + Configuring the S3 Storage BackendConfiguring the S3 Storage Backend s3_store_host=URL Required when using the S3 storage backend. @@ -868,7 +852,7 @@ log_file =​ /​var/​log/​glance/​api.​log authentication system, please see the S3 documentation - + s3_store_access_key=ACCESS_KEY Required when using the S3 storage backend. @@ -881,7 +865,7 @@ log_file =​ /​var/​log/​glance/​api.​log s3_store_host with. You should set this to your 20-character Amazon AWS access key. - + s3_store_secret_key=SECRET_KEY Required when using the S3 storage backend. @@ -895,7 +879,7 @@ log_file =​ /​var/​log/​glance/​api.​log s3_store_access_key. You should set this to your 40-character Amazon AWS secret key. - + s3_store_bucket=BUCKET Required when using the S3 storage backend. @@ -920,7 +904,7 @@ log_file =​ /​var/​log/​glance/​api.​log then make your bucket value be: abcdefghijklmnopqrstglance - + s3_store_create_bucket_on_put Optional. Default: False @@ -938,7 +922,7 @@ log_file =​ /​var/​log/​glance/​api.​log options are specified in the glance-registry.conf config file in the section [DEFAULT]. - + sql_connection=CONNECTION_STRING (--sql-connection when specified on command line) @@ -952,7 +936,7 @@ log_file =​ /​var/​log/​glance/​api.​log the registry database. Please see the documentation for SQLAlchemy connection strings online. - + sql_timeout=SECONDS on command line) Optional. Default: 3600 @@ -966,7 +950,7 @@ log_file =​ /​var/​log/​glance/​api.​log to a RabbitMQ queue. The configuration options are specified in the glance-api.conf config file in the section [DEFAULT]. - + notifier_strategy Optional. Default: noop @@ -974,91 +958,91 @@ log_file =​ /​var/​log/​glance/​api.​log Sets the strategy used for notifications. Options are logging, rabbit and noop. - + rabbit_host Optional. Default: localhost Host to connect to when using rabbit strategy. - + rabbit_port Optional. Default: 5672 Port to connect to when using rabbit strategy. - + rabbit_use_ssl Optional. Default: false Boolean to use SSL for connecting when using rabbit strategy. - + rabbit_userid Optional. Default: guest Userid to use for connection when using rabbit strategy. - + rabbit_password Optional. Default: guest Password to use for connection when using rabbit strategy. - + rabbit_virtual_host Optional. Default: / Virtual host to use for connection when using rabbit strategy. - + rabbit_notification_topic Optional. Default: glance_notifications Topic to use for connection when using rabbit strategy. - OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk 9. Using the Glance CLI Tool + OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo 9. Using the Glance CLI Tool Glance ships with a command-line tool for querying and managing Glance It has a fairly simple but powerful interface of the form: - + Usage: glance <command> [options] [args] Where <command> is one of the following: - + help Show detailed help information about a specific command - + add Adds an image to Glance - + update Updates an image's stored metadata in Glance - + delete Deletes an image and its metadata from Glance - + index Lists brief information about public images that Glance knows about - + details Lists detailed information about public images that Glance knows about - + show Lists detailed information about a specific image - + clear Destroys all public images @@ -1071,7 +1055,7 @@ Usage: glance <command> [options] [args] <COMMAND> argument shows detailed help about a specific command. Running glance without any arguments shows a brief help message, like so: - + $> glance Usage: glance <command> [options] [args] @@ -1114,7 +1098,7 @@ Options: With a <COMMAND> argument, more information on the command is shown, like so: - + $> glance help update glance update [options] <ID> <field1=​value1 field2=​value2 .​.​.​> @@ -1137,10 +1121,10 @@ to spell field names correctly.​ :) The add commandThe add command The add command is used to do both of the following: - + Store virtual machine image data and metadata about that image in Glance - + Let Glance know about an existing virtual machine image that may be stored somewhere else @@ -1185,20 +1169,20 @@ to spell field names correctly.​ :) Here is how we'd upload this image to Glance. Change example ip number to your server ip number.: - + $> glance add name=​"My Image" is_public=​true < /​tmp/​images/​myimage.​iso --host=​65.​114.​169.​29 If Glance was able to successfully upload and store your VM image data and metadata attributes, you would see something like this: - + $> glance add name=​"My Image" is_public=​true < /​tmp/​images/​myimage.​iso --host=​65.​114.​169.​29 Added new image with ID: 2 You can use the --verbose (or -v) command-line option to print some more information about the metadata that was saved with the image: - + $> glance --verbose add name=​"My Image" is_public=​true < /​tmp/​images/​myimage.​iso --host=​65.​114.​169.​29 Added new image with ID: 4 Returned the following metadata for the new image: @@ -1220,7 +1204,7 @@ Completed in 0.​6141 sec.​ If you are unsure about what will be added, you can use the --dry-run command-line option, which will simply show you what would have happened: - + $> glance --dry-run add name=​"Foo" distro=​"Ubuntu" is_publi=​True < /​tmp/​images/​myimage.​iso --host=​65.​114.​169.​29 Dry run.​ We would have done the following: Add new image with metadata: @@ -1259,7 +1243,7 @@ Add new image with metadata: the URL http://example.com/images/myimage.vhd. We can register this image with Glance using the following: - + $> glance --verbose add name=​"Some web image" disk_format=​vhd container_format=​ovf\​ location=​"http:/​/​example.​com/​images/​myimage.​vhd" Added new image with ID: 1 @@ -1287,19 +1271,19 @@ Completed in 0.​0356 sec.​ The update command allows you to update the metadata fields of a stored image. You use this command like so: - + glance update <ID> [field1=​value1 field2=​value2 .​.​.​] Let's say we have an image with identifier 5 that we wish to change the is_public attribute of the image from False to True. The following would accomplish this: - + $> glance update 5 is_public=​true --host=​65.​114.​169.​29 Updated image 5 Using the --verbose flag will show you all the updated data about the image: - + $> glance --verbose update 5 is_public=​true --host=​65.​114.​169.​29 Updated image 5 Updated image metadata for image 5: @@ -1315,14 +1299,14 @@ Completed in 0.​0596 sec.​ The delete commandThe delete command You can delete an image by using the delete command, shown below: - + $> glance --verbose delete 5 --host=​65.​114.​169.​29 Deleted image 5 The index commandThe index command The index command displays brief information about the public images available in Glance, as shown below: - + $> glance index --host=​65.​114.​169.​29 ID Name Disk Format Container Format Size ---------------- ------------------------------ -------------------- -------------------- -------------- @@ -1354,7 +1338,7 @@ ID Name Disk Format Container F The details command displays detailed information about the public images available in Glance, as shown below: - + $> glance details --host=​65.​114.​169.​29 =​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​=​ URI: http:/​/​example.​com/​images/​1 @@ -1407,7 +1391,7 @@ Container format: bare The show command displays detailed information about a specific image, specified with <ID>, as shown below: - + $> glance show 3 --host=​65.​114.​169.​29 URI: http:/​/​example.​com/​images/​3 Id: 3 @@ -1426,7 +1410,7 @@ Property 'distro': Fedora image metadata. Passing the --verbose command will print brief information about all the images that were deleted, as shown below: - + $> glance --verbose clear --host=​65.​114.​169.​29 Deleting image 1 "Some web image" .​.​.​ done Deleting image 2 "Some other web image" .​.​.​ done @@ -1435,7 +1419,7 @@ Completed in 0.​0328 sec.​ The image-members command displays the list of members with which a specific image, specified with <ID>, is shared, as shown below: - + $> glance image-members 3 --host=​65.​114.​169.​29 tenant1 tenant2 * @@ -1445,7 +1429,7 @@ tenant2 * The member-images command displays the list of images which are shared with a specific member, specified with <MEMBER>, as shown below: - + $> glance member-images tenant1 --host=​65.​114.​169.​29 1 2 * @@ -1457,7 +1441,7 @@ $> glance member-images tenant1 --host=​65.​114.​169.​29 private image, specified with <ID>. The --can-share flag can be given to allow the member to share the image, as shown below: - + $> glance member-add 1 tenant1 --host=​65.​114.​169.​29 $> glance member-add 1 tenant2 --can-share --host=​65.​114.​169.​29 The member-delete CommandThe member-delete Command @@ -1465,7 +1449,7 @@ $> glance member-add 1 tenant2 --can-share --host=​65.​114.​169.​29 a member, specified with <MEMBER>, to a private image, specified with <ID>, as shown below: - + $> glance member-delete 1 tenant1 $> glance member-delete 1 tenant2 The members-replace CommandThe members-replace Command @@ -1475,12 +1459,12 @@ $> glance member-delete 1 tenant2 for one member, specified with <MEMBER>. The --can-share flag can be given to allow the member to share the image, as shown below: - + $> glance members-replace 1 tenant1 --can-share --host=​65.​114.​169.​29 The command is given in plural form to make it clear that all existing memberships are affected by the command. - OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk 10. Using Glance Programmatically with Glance's Client + OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo 10. Using Glance Programmatically with Glance's Client While it is perfectly acceptable to issue HTTP requests directly to Glance via its RESTful API, sometimes it is better to be able to access and modify image resources via a client class that removes @@ -1530,32 +1514,32 @@ $> glance members-replace 1 tenant1 --can-share --host=​65.​114.​169. When calling, simply pass an optional dictionary to the method containing the filters by which you wish to limit results, with the filter keys being one or more of the below: - + name: NAME Filters images having a name attribute matching NAME. - + container_format: FORMAT Filters images having a container_format attribute matching FORMAT - + disk_format: FORMAT Filters images having a disk_format attribute matching FORMAT - + status: STATUS Filters images having a status attribute matching STATUS - + size_min: BYTES Filters images having a size attribute greater than or equal to BYTES - + size_max: BYTES Filters images having a size attribute less @@ -1575,7 +1559,7 @@ $> glance members-replace 1 tenant1 --can-share --host=​65.​114.​169. and get_images_detailed() Two parameters are available to sort the list of images returned by these methods. - + sort_key: KEY Images can be ordered by the image attribute @@ -1585,7 +1569,7 @@ $> glance members-replace 1 tenant1 --can-share --host=​65.​114.​169. container_format, disk_format, created_at (default) and updated_at. - + sort_dir: DIR The direction of the sort may be defined by @@ -1649,15 +1633,15 @@ $> glance members-replace 1 tenant1 --can-share --host=​65.​114.​169. f = open('some_local_file', 'wb') for chunk in image_<link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="file:">file:</link> f.write(chunk) f.close() - NoteThe return from Client.get_image() is a tuple of (`metadata`, `file`) + NoteThe return from Client.get_image() is a tuple of (`metadata`, `file`) where `metadata` is a mapping of metadata about the image and `file` is a generator that yields chunks of image data.Adding a New Virtual Machine ImageAdding a New Virtual Machine Image We have created a new virtual machine image in some way (created a "golden image" or snapshotted/backed up an existing image) and we wish to do two things: - + Store the disk image data in Glance - + Store metadata about this image in Glance We can do the above two activities in a single call to the Glance @@ -1666,7 +1650,7 @@ generator that yields chunks of image data. The method signature is as follows: - + glance.​client.​Client.​add_image(image_meta,​ image_data=​None) The `image_meta` argument is a mapping containing various image @@ -1675,7 +1659,7 @@ glance.​client.​Client.​add_image(image_meta,​ image_data=​None) The list of metadata that `image_meta` can contain are listed below. - + `name` This key/value is required. Its value should be the name of @@ -1685,7 +1669,7 @@ glance.​client.​Client.​add_image(image_meta,​ image_data=​None) Glance node. It would be an unrealistic expectation of users to know all the unique names of all other user's images. - + `id` This key/value is optional. @@ -1697,7 +1681,7 @@ glance.​client.​Client.​add_image(image_meta,​ image_data=​None) When this key/value is not present, Glance will generate an identifier for the image and return this identifier in the response (see below) - + `store` This key/value is optional. Valid values are one of `file`, @@ -1711,12 +1695,12 @@ glance.​client.​Client.​add_image(image_meta,​ image_data=​None) When not present, Glance will store the disk image data in the backing store that is marked default. See the configuration option `default_store` for more information. - + `type` This key/values is required. Valid values are one of `kernel`, `machine`, `raw`, or `ramdisk`. - + `size` This key/value is optional. @@ -1728,7 +1712,7 @@ glance.​client.​Client.​add_image(image_meta,​ image_data=​None) When not present, Glance will calculate the image's size based on the size of the request body. - + `is_public` This key/value is optional. @@ -1740,7 +1724,7 @@ glance.​client.​Client.​add_image(image_meta,​ image_data=​None) When not present, the image is assumed to be not public and specific to a user. - + `properties` This key/value is optional. @@ -1750,7 +1734,7 @@ glance.​client.​Client.​add_image(image_meta,​ image_data=​None) For example, if the following is the value of the `properties` key in the `image_meta` argument: - + {'distro': 'Ubuntu 10.​10'} Then a key/value pair of "distro"/"Ubuntu @@ -1774,7 +1758,7 @@ glance.​client.​Client.​add_image(image_meta,​ image_data=​None) from glance.client import Client c = Client("glance.example.com", 9292) - + meta = {'name': 'Ubuntu 10.10 5G', 'type': 'machine', 'is_public': True, 'properties': @@ -1795,7 +1779,7 @@ glance.​client.​Client.​add_image(image_meta,​ image_data=​None) c = Client("glance.example.com", 9292) members = c.get_image_members(1) - NoteThe return from Client.get_image_members() is a list of dictionaries. Each + NoteThe return from Client.get_image_members() is a list of dictionaries. Each dictionary has a `member_id` key, mapping to the tenant the image is shared with, and a `can_share` key, mapping to a boolean value that identifies whether the member can further share the image.Requesting Member ImagesRequesting Member Images @@ -1810,7 +1794,7 @@ whether the member can further share the image. images = c.get_member_images('tenant1') - NoteThe return from Client.get_member_images() is a list of dictionaries. Each + NoteThe return from Client.get_member_images() is a list of dictionaries. Each dictionary has an `image_id` key, mapping to an image shared with the member, and a `can_share` key, mapping to a boolean value that identifies whether the member can further share the image.Adding a Member To an ImageAdding a Member To an Image @@ -1828,7 +1812,7 @@ the member can further share the image. ..note: - + The Client.​add_member() function takes one optional argument,​ the `can_share` value.​ If one is not provided and the membership already exists,​ its current `can_share` setting is left alone.​ If the membership does not already exist,​ @@ -1850,7 +1834,7 @@ it.​ The return value of Client.​add_member() is not significant.​ c.delete_member(1, 'tenant1') ..note: - + The return value of Client.​delete_member() is not significant.​ Replacing a Membership List For an ImageReplacing a Membership List For an Image All existing image memberships may be revoked and replaced in a @@ -1865,17 +1849,17 @@ The return value of Client.​delete_member() is not significant.​ from glance.client import Client c = Client("glance.example.com", 9292) - + c.replace_members(1, {'member_id': 'tenant1', 'can_share': False}, {'member_id': 'tenant2', 'can_share': True}) - NoteThe first argument to Client.replace_members() is the opaque identifier of + NoteThe first argument to Client.replace_members() is the opaque identifier of the image; the remaining arguments are dictionaries with the keys `member_id` (mapping to a tenant name) and `can_share`. Note that `can_share` may be omitted, in which case any existing membership for the specified member will be preserved through the replace operation. -The return value of Client.replace_members() is not significant. OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk OpenStack Image Service Admin ManualSep 18, 2011trunk 11. Glance Authentication With Keystone +The return value of Client.replace_members() is not significant. OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo OpenStack Image Service Admin ManualSep 22, 2011Diablo 11. Glance Authentication With Keystone Glance may optionally be integrated with Keystone. Setting this up is relatively straightforward: the Keystone distribution includes the requisite middleware and examples of appropriately modified @@ -1887,7 +1871,7 @@ specified member will be preserved through the replace operation. authenticated users, and the `is_public` attribute will cause access to those images for which it is `false` to be restricted to only the owner. - NoteThe exception is those images for which `owner` is set to `null`, + NoteThe exception is those images for which `owner` is set to `null`, which may only be done by those users having the ``Admin`` role. These images may still be accessed by the public, but will not appear in the list of public images. This allows the Glance diff --git a/doc/target/docbkx/pdf/openstack-object-storage-admin/os-objectstorage-adminguide.fo b/doc/target/docbkx/pdf/openstack-object-storage-admin/os-objectstorage-adminguide.fo index e42161e983..5f4a79e1e6 100644 --- a/doc/target/docbkx/pdf/openstack-object-storage-admin/os-objectstorage-adminguide.fo +++ b/doc/target/docbkx/pdf/openstack-object-storage-admin/os-objectstorage-adminguide.fo @@ -1,4 +1,4 @@ -TrueCopyright © 2010, 2011 OpenStack LLC All rights reserved.OpenStack Object Storage Administrator ManualCloud API Docs PluginOpenStack Object Storage Administrator ManualTable of Contents1. Getting Started with OpenStackWhat is OpenStack?Components of OpenStackOpenStack Project Architecture OverviewCloud Provider Conceptual ArchitectureOpenStack Compute Logical ArchitectureNova Conceptual MappingWhy Cloud?2. Introduction to OpenStack Object StorageAccounts and Account ServersAuthentication and Access PermissionsContainers and ObjectsOperationsLanguage-Specific API Bindings3. Installing and Configuring OpenStack Object StorageSystem RequirementsInstalling OpenStack Object Storage on UbuntuBefore You BeginExample Installation ArchitectureNetwork Setup NotesGeneral Installation StepsConfiguring OpenStack Object StorageInstalling and Configuring an Auth NodeInstalling and Configuring the Proxy NodeInstalling and Configuring the Storage NodesCreate OpenStack Object Storage admin Account and Verify the InstallationAdding an Additional Proxy ServerTroubleshooting Notes4. System Administration for OpenStack Object StorageUnderstanding How Object Storage WorksConfiguring and Tuning OpenStack Object StoragePreparing the RingServer Configuration ReferenceObject Server ConfigurationContainer Server ConfigurationAccount Server ConfigurationProxy Server ConfigurationConsiderations and TuningMemcached ConsiderationsSystem TimeGeneral Service TuningFilesystem ConsiderationsGeneral System TuningLogging ConsiderationsWorking with RingsManaging Rings with the Ring BuilderAbout the Ring Data StructureList of Devices in the RingPartition Assignment ListPartition Shift ValueBuilding the RingHistory of the Ring DesignThe Account ReaperAccount Reaper Background and HistoryReplicationDatabase ReplicationObject ReplicationManaging Large Objects (Greater than 5 GB)Using swift to Manage Segmented ObjectsDirect API Management of Large ObjectsAdditional Notes on Large ObjectsLarge Object Storage History and BackgroundThrottling Resources by Setting Rate LimitsConfiguration for Rate LimitingConfiguring Object Storage with the S3 APIManaging OpenStack Object Storage with CLI SwiftSwift CLI BasicsAnalyzing Log Files with Swift CLI5. OpenStack Object Storage TutorialsStoring Large Photos or Videos on the CloudPart I: Setting Up Secure AccessPart II: Configuring CyberduckPart III: Creating Containers (Folders) and Uploading Files6. Support and TroubleshootingCommunity SupportTroubleshooting OpenStack Object StorageHandling Drive FailureHandling Server FailureDetecting Failed DrivesTroubleshooting OpenStack ComputeLog files for OpenStack ComputeCommon Errors and Fixes for OpenStack Compute OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 OpenStack Object Storage Administrator Manual 1.4.1 (2011-08-12)Copyright © 2010, 2011 OpenStack LLC All rights reserved.OpenStack Object Storage offers open source software for cloud-based object +TrueCopyright © 2010, 2011 OpenStack LLC All rights reserved.OpenStack Object Storage Administrator ManualCloud API Docs PluginOpenStack Object Storage Administrator ManualTable of Contents1. Getting Started with OpenStackWhat is OpenStack?Components of OpenStackOpenStack Project Architecture OverviewCloud Provider Conceptual ArchitectureOpenStack Compute Logical ArchitectureNova Conceptual MappingWhy Cloud?2. Introduction to OpenStack Object StorageAccounts and Account ServersAuthentication and Access PermissionsContainers and ObjectsOperationsLanguage-Specific API Bindings3. Installing and Configuring OpenStack Object StorageSystem RequirementsInstalling OpenStack Object Storage on UbuntuBefore You BeginExample Installation ArchitectureNetwork Setup NotesGeneral Installation StepsConfiguring OpenStack Object StorageInstalling and Configuring an Auth NodeInstalling and Configuring the Proxy NodeInstalling and Configuring the Storage NodesCreate OpenStack Object Storage admin Account and Verify the InstallationAdding an Additional Proxy ServerTroubleshooting Notes4. System Administration for OpenStack Object StorageUnderstanding How Object Storage WorksConfiguring and Tuning OpenStack Object StoragePreparing the RingServer Configuration ReferenceObject Server ConfigurationContainer Server ConfigurationAccount Server ConfigurationProxy Server ConfigurationConsiderations and TuningMemcached ConsiderationsSystem TimeGeneral Service TuningFilesystem ConsiderationsGeneral System TuningLogging ConsiderationsWorking with RingsManaging Rings with the Ring BuilderAbout the Ring Data StructureList of Devices in the RingPartition Assignment ListPartition Shift ValueBuilding the RingHistory of the Ring DesignThe Account ReaperAccount Reaper Background and HistoryReplicationDatabase ReplicationObject ReplicationManaging Large Objects (Greater than 5 GB)Using swift to Manage Segmented ObjectsDirect API Management of Large ObjectsAdditional Notes on Large ObjectsLarge Object Storage History and BackgroundThrottling Resources by Setting Rate LimitsConfiguration for Rate LimitingConfiguring Object Storage with the S3 APIManaging OpenStack Object Storage with CLI SwiftSwift CLI BasicsAnalyzing Log Files with Swift CLI5. OpenStack Object Storage TutorialsStoring Large Photos or Videos on the CloudPart I: Setting Up Secure AccessPart II: Configuring CyberduckPart III: Creating Containers (Folders) and Uploading Files6. Support and TroubleshootingCommunity SupportTroubleshooting OpenStack Object StorageHandling Drive FailureHandling Server FailureDetecting Failed DrivesTroubleshooting OpenStack ComputeLog files for OpenStack ComputeCommon Errors and Fixes for OpenStack Compute OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 OpenStack Object Storage Administrator Manual 1.4.3 (2011-09-22)Copyright © 2010, 2011 OpenStack LLC All rights reserved.OpenStack Object Storage offers open source software for cloud-based object storage for any organization. This manual provides guidance for installing, managing, and understanding the software that runs OpenStack Object Storage. Licensed under the Apache License, Version 2.0 (the "License"); @@ -10,15 +10,15 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 Table of Contents1. Getting Started with OpenStack What is OpenStack? Components of OpenStack OpenStack Project Architecture Overview Cloud Provider Conceptual Architecture OpenStack Compute Logical Architecture Nova Conceptual Mapping Why Cloud? 2. Introduction to OpenStack Object Storage Accounts and Account Servers Authentication and Access Permissions Containers and Objects Operations Language-Specific API Bindings 3. Installing and Configuring OpenStack Object Storage System Requirements Installing OpenStack Object Storage on Ubuntu Before You Begin Example Installation Architecture Network Setup Notes General Installation Steps Configuring OpenStack Object Storage Installing and Configuring an Auth Node Installing and Configuring the Proxy Node Installing and Configuring the Storage Nodes 4. System Administration for OpenStack Object Storage Understanding How Object Storage Works Configuring and Tuning OpenStack Object Storage Preparing the Ring Server Configuration Reference Object Server Configuration Container Server Configuration Account Server Configuration Proxy Server Configuration Considerations and Tuning Memcached Considerations System Time General Service Tuning Filesystem Considerations General System Tuning Logging Considerations Working with Rings The Account Reaper Replication Database Replication Object Replication Managing Large Objects (Greater than 5 GB) Using swift to Manage Segmented Objects Direct API Management of Large Objects Additional Notes on Large Objects Large Object Storage History and Background Throttling Resources by Setting Rate Limits Configuration for Rate Limiting Configuring Object Storage with the S3 API Managing OpenStack Object Storage with CLI Swift Swift CLI Basics Analyzing Log Files with Swift CLI 5. OpenStack Object Storage Tutorials Storing Large Photos or Videos on the Cloud Part I: Setting Up Secure Access Part II: Configuring Cyberduck Part III: Creating Containers (Folders) and Uploading Files 6. Support and Troubleshooting Community Support Troubleshooting OpenStack Object Storage Handling Drive Failure Handling Server Failure Detecting Failed Drives Troubleshooting OpenStack Compute Log files for OpenStack Compute Common Errors and Fixes for OpenStack Compute OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 List of Figures5.1. Example Cyberduck Swift Connection 5.2. Example Cyberduck Swift Showing Uploads OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 List of Tables4.1. object-server.conf Default Options in the [DEFAULT] section 4.2. object-server.conf Server Options in the [object-server] section 4.3. object-server.conf Replicator Options in the [object-replicator] section 4.4. object-server.conf Updater Options in the [object-updater] section 4.5. object-server.conf Auditor Options in the [object-auditor] section 4.6. container-server.conf Default Options in the [DEFAULT] section 4.7. container-server.conf Server Options in the [container-server] section 4.8. container-server.conf Replicator Options in the [container-replicator] section 4.9. container-server.conf Updater Options in the [container-updater] section 4.10. container-server.conf Auditor Options in the [container-auditor] section 4.11. account-server.conf Default Options in the [DEFAULT] section 4.12. account-server.conf Server Options in the [account-server] section 4.13. account-server.conf Replicator Options in the [account-replicator] section 4.14. account-server.conf Auditor Options in the [account-auditor] section 4.15. account-server.conf Reaper Options in the [account-reaper] section 4.16. proxy-server.conf Default Options in the [DEFAULT] section 4.17. proxy-server.conf Server Options in the [proxy-server] section 4.18. proxy-server.conf Paste.deploy Options in the [filter:swauth] section 4.19. List of Devices and Keys 4.20. Configuration options for rate limiting in proxy-server.conf - file 4.21. Values for Rate Limiting with Sample Configuration Settings OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 1. Getting Started with OpenStackOpenStack is a collection of open source technology that provides massively scalable open + OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 Table of Contents1. Getting Started with OpenStack What is OpenStack? Components of OpenStack OpenStack Project Architecture Overview Cloud Provider Conceptual Architecture OpenStack Compute Logical Architecture Nova Conceptual Mapping Why Cloud? 2. Introduction to OpenStack Object Storage Accounts and Account Servers Authentication and Access Permissions Containers and Objects Operations Language-Specific API Bindings 3. Installing and Configuring OpenStack Object Storage System Requirements Installing OpenStack Object Storage on Ubuntu Before You Begin Example Installation Architecture Network Setup Notes General Installation Steps Configuring OpenStack Object Storage Installing and Configuring an Auth Node Installing and Configuring the Proxy Node Installing and Configuring the Storage Nodes 4. System Administration for OpenStack Object Storage Understanding How Object Storage Works Configuring and Tuning OpenStack Object Storage Preparing the Ring Server Configuration Reference Object Server Configuration Container Server Configuration Account Server Configuration Proxy Server Configuration Considerations and Tuning Memcached Considerations System Time General Service Tuning Filesystem Considerations General System Tuning Logging Considerations Working with Rings The Account Reaper Replication Database Replication Object Replication Managing Large Objects (Greater than 5 GB) Using swift to Manage Segmented Objects Direct API Management of Large Objects Additional Notes on Large Objects Large Object Storage History and Background Throttling Resources by Setting Rate Limits Configuration for Rate Limiting Configuring Object Storage with the S3 API Managing OpenStack Object Storage with CLI Swift Swift CLI Basics Analyzing Log Files with Swift CLI 5. OpenStack Object Storage Tutorials Storing Large Photos or Videos on the Cloud Part I: Setting Up Secure Access Part II: Configuring Cyberduck Part III: Creating Containers (Folders) and Uploading Files 6. Support and Troubleshooting Community Support Troubleshooting OpenStack Object Storage Handling Drive Failure Handling Server Failure Detecting Failed Drives Troubleshooting OpenStack Compute Log files for OpenStack Compute Common Errors and Fixes for OpenStack Compute OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 List of Figures5.1. Example Cyberduck Swift Connection 5.2. Example Cyberduck Swift Showing Uploads OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 List of Tables4.1. object-server.conf Default Options in the [DEFAULT] section 4.2. object-server.conf Server Options in the [object-server] section 4.3. object-server.conf Replicator Options in the [object-replicator] section 4.4. object-server.conf Updater Options in the [object-updater] section 4.5. object-server.conf Auditor Options in the [object-auditor] section 4.6. container-server.conf Default Options in the [DEFAULT] section 4.7. container-server.conf Server Options in the [container-server] section 4.8. container-server.conf Replicator Options in the [container-replicator] section 4.9. container-server.conf Updater Options in the [container-updater] section 4.10. container-server.conf Auditor Options in the [container-auditor] section 4.11. account-server.conf Default Options in the [DEFAULT] section 4.12. account-server.conf Server Options in the [account-server] section 4.13. account-server.conf Replicator Options in the [account-replicator] section 4.14. account-server.conf Auditor Options in the [account-auditor] section 4.15. account-server.conf Reaper Options in the [account-reaper] section 4.16. proxy-server.conf Default Options in the [DEFAULT] section 4.17. proxy-server.conf Server Options in the [proxy-server] section 4.18. proxy-server.conf Paste.deploy Options in the [filter:swauth] section 4.19. List of Devices and Keys 4.20. Configuration options for rate limiting in proxy-server.conf + file 4.21. Values for Rate Limiting with Sample Configuration Settings OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 1. Getting Started with OpenStackOpenStack is a collection of open source technology that provides massively scalable open source cloud computing software. Currently OpenStack develops two related projects: OpenStack Compute, which offers computing power through virtual machine and network management, and OpenStack Object Storage which is software for redundant, scalable object storage capacity. Closely related to the OpenStack Compute project is the Image Service project, named Glance. OpenStack can be used by corporations, service providers, VARS, SMBs, researchers, and global data centers looking to deploy large-scale cloud deployments for - private or public clouds. What is OpenStack?What is OpenStack?OpenStack offers open source software to build public and private clouds. OpenStack is + private or public clouds. What is OpenStack?What is OpenStack?OpenStack offers open source software to build public and private clouds. OpenStack is a community and a project as well as open source software to help organizations run clouds for virtual computing or storage. OpenStack contains a collection of open source projects that are community-maintained including OpenStack Compute (code-named Nova), @@ -33,7 +33,7 @@ during the on-ramp process. Because the project is so new and constantly changing, be aware of the revision time for all information. If you are reading a document that is a few months old and you feel that it isn't entirely accurate, then please let us know - through the mailing list at https://launchpad.net/~openstack so it can be updated or removed. Components of OpenStackComponents of OpenStackThere are currently three main components of OpenStack: Compute, Object Storage, and + through the mailing list at https://launchpad.net/~openstack so it can be updated or removed. Components of OpenStackComponents of OpenStackThere are currently three main components of OpenStack: Compute, Object Storage, and Image Service. Let's look at each in turn.OpenStack Compute is a cloud fabric controller, used to start up virtual instances for either a user or a group. It's also used to configure networking for each instance or project that contains multiple instances for a particular project. OpenStack Object Storage is a system to store objects in a massively scalable large @@ -46,23 +46,23 @@ It can be configured in three ways: using OpenStack Object Store to store images; using Amazon's Simple Storage Solution (S3) storage directly; or using S3 storage with Object Store as the intermediate for S3 access.The following diagram shows the basic relationships between the projects, how they - relate to each other, and how they can fulfill the goals of open source cloud computing. OpenStack Project Architecture OverviewOpenStack Project Architecture Overviewby Ken PeppleBefore we dive into the conceptual and logic architecture, let’s take a second to explain the OpenStack project: OpenStack is a collection of open source technologies delivering a massively scalable cloud operating system.You can think of it as software to power your own Infrastructure as a Service (IaaS) offering like Amazon Web Services. It currently encompasses three main projects:Swift which provides object/blob storage. This is roughly analogous to Rackspace Cloud Files (from which it is derived) or Amazon S3.Glance which provides discovery, storage and retrieval of virtual machine images for OpenStack Nova.Nova which provides virtual servers upon + relate to each other, and how they can fulfill the goals of open source cloud computing. OpenStack Project Architecture OverviewOpenStack Project Architecture Overviewby Ken PeppleBefore we dive into the conceptual and logic architecture, let’s take a second to explain the OpenStack project: OpenStack is a collection of open source technologies delivering a massively scalable cloud operating system.You can think of it as software to power your own Infrastructure as a Service (IaaS) offering like Amazon Web Services. It currently encompasses three main projects:Swift which provides object/blob storage. This is roughly analogous to Rackspace Cloud Files (from which it is derived) or Amazon S3.Glance which provides discovery, storage and retrieval of virtual machine images for OpenStack Nova.Nova which provides virtual servers upon demand. This is similar to Rackspace Cloud Servers or Amazon EC2.While these three projects provide the core of the cloud infrastructure, OpenStack is open and evolving — there will be more projects (there are already related projects for web interfaces and a queue service). With that brief introduction, let’s delve into a conceptual architecture and then - examine how OpenStack Compute could map to it. Cloud Provider Conceptual ArchitectureCloud Provider Conceptual ArchitectureKen, PeppleImagine that we are going to build our own IaaS cloud and offer it to customers. To achieve this, we would need to provide several high level features:1.Allow application owners to register for our cloud services, view their usage and see their bill (basic customer relations management functionality)2.Allow Developers/DevOps folks to create and store custom images for their applications (basic build-time functionality)3.Allow DevOps/Developers to launch, monitor and terminate instances (basic run-time functionality)4.Allow the Cloud Operator to configure and operate the cloud infrastructureWhile there are certainly many, many other features that we would need to offer (especially if we were to follow are more complete industry framework like eTOM), these four get to the very heart of providing IaaS. Now assuming that you agree with these four top level features, you might put together a conceptual architecture that looks something like this:In this model, I’ve imagined four sets of users (developers, devops, owners and operators) + examine how OpenStack Compute could map to it. Cloud Provider Conceptual ArchitectureCloud Provider Conceptual ArchitectureKen, PeppleImagine that we are going to build our own IaaS cloud and offer it to customers. To achieve this, we would need to provide several high level features:1.Allow application owners to register for our cloud services, view their usage and see their bill (basic customer relations management functionality)2.Allow Developers/DevOps folks to create and store custom images for their applications (basic build-time functionality)3.Allow DevOps/Developers to launch, monitor and terminate instances (basic run-time functionality)4.Allow the Cloud Operator to configure and operate the cloud infrastructureWhile there are certainly many, many other features that we would need to offer (especially if we were to follow are more complete industry framework like eTOM), these four get to the very heart of providing IaaS. Now assuming that you agree with these four top level features, you might put together a conceptual architecture that looks something like this:In this model, I’ve imagined four sets of users (developers, devops, owners and operators) that need to interact with the cloud and then separated out the functionality needed for each. From there, I’ve followed a pretty common tiered approach to the architecture (presentation, logic and resources) with two orthogonal areas (integration and management). Let’s explore each a little further: As with presentation layers in more typical application architectures, components here interact with users to accept and present information. In this layer, you will find web portals to provide graphical interfaces for non-developers and API endpoints for developers. For more advanced architectures, you might find load balancing, console proxies, security and naming services present here also.The logic tier would provide the intelligence and control functionality for our cloud. This tier would house orchestration (workflow for complex tasks), scheduling (determining mapping of jobs to resources), policy (quotas and such) , image registry (metadata about instance images), logging (events and metering). There will need to integration functions within the architecture. It is assumed that most service providers will already have a customer identity and billing systems. Any cloud architecture would need to integrate with these systems.As with any complex environment, we will need a management tier to operate the environment. This should include an API to access the cloud administration features as well as some forms of monitoring. It is likely that the monitoring functionality will take the form of integration into an existing tool. While I’ve highlighted monitoring and an admin API for our fictional provider, in a more complete architecture you would see a vast array of operational support functions like provisioning and configuration management.Finally, since this is a compute cloud, we will need actual compute, network and storage resources to provide to our customers. This tier provides these services, whether they be servers, network switches, network attached storage or other resources.With this model in place, let’s shift gears and look at OpenStack Compute’s logical - architecture.OpenStack Compute Logical ArchitectureOpenStack Compute Logical ArchitectureNow that we’ve looked at a proposed conceptual architecture, let’s see how OpenStack Compute - is logically architected. Since Cactus is the newest release, I will concentrate - there (which means if you are viewing this after around July 2011, this will be out - of date). There are several logical components of OpenStack Compute architecture but + architecture.OpenStack Compute Logical ArchitectureOpenStack Compute Logical ArchitectureNow that we’ve looked at a proposed conceptual architecture, let’s see how OpenStack Compute + is logically architected. At the time of this writing, Cactus was the newest release + (which means if you are viewing this after around July 2011, this may be out of + date). There are several logical components of OpenStack Compute architecture but the majority of these components are custom written python daemons of two - varieties:WSGI applications to receive and mediate API calls (nova-api, glance-api, etc.)Worker daemons to carry out orchestration tasks (nova-compute, nova-network, nova-schedule, etc.)However, there are two essential pieces of the logical architecture are neither custom written nor Python based: the messaging queue and the database. These two components facilitate the asynchronous orchestration of complex tasks through message passing and information sharing. Putting this all together we get a picture like this:This complicated, but not overly informative, diagram as it can be summed up in three sentences:End users (DevOps, Developers and even other OpenStack components) talk to + varieties:WSGI applications to receive and mediate API calls (nova-api, glance-api, etc.)Worker daemons to carry out orchestration tasks (nova-compute, nova-network, nova-schedule, etc.)However, there are two essential pieces of the logical architecture are neither custom written nor Python based: the messaging queue and the database. These two components facilitate the asynchronous orchestration of complex tasks through message passing and information sharing. Putting this all together we get a picture like this:This complicated, but not overly informative, diagram as it can be summed up in three sentences:End users (DevOps, Developers and even other OpenStack components) talk to nova-api to interface with OpenStack ComputeOpenStack Compute daemons exchange info through the queue (actions) and database (information) to carry out API requestsOpenStack Glance is basically a completely separate infrastructure which OpenStack Compute interfaces through the Glance APINow that we see the overview of the processes and their interactions, let’s take a closer look at each component.The nova-api daemon is the heart of the OpenStack Compute. You may see it @@ -106,7 +106,7 @@ application.This logical architecture represents just one way to architect OpenStack Compute. With its pluggable architecture, we could easily swap out OpenStack Glance with another image service or use another dashboard. In the coming releases of OpenStack, expect to see - more modularization of the code especially in the network and volume areas.Nova Conceptual MappingNova Conceptual MappingNow that we’ve seen a conceptual architecture for a fictional cloud provider and examined the logical architecture of OpenStack Nova, it is fairly easy to map the OpenStack components to the conceptual areas to see what we are lacking:As you can see from the illustration, I’ve overlaid logical components of OpenStack Nova, Glance and Dashboard to denote functional coverage. For each of the overlays, I’ve added the name of the logical component within the project that provides the functionality. While all of these judgements are highly subjective, you can see that we have a majority coverage of the functional areas with a few notable exceptions:The largest gap in our functional coverage is logging and billing. At the moment, OpenStack Nova doesn’t have a billing component that can mediate logging events, rate the logs and create/present bills. That being said, most service providers will already have one (or many) of these so the focus is really on the logging and integration with billing. This could be remedied in a variety of ways: augmentations of the code (which should happen in the next release “Diablo”), integration with commercial products or services (perhaps Zuora) or custom log parsing. Identity is also a point which will likely need to be augmented. Unless we are running a stock + more modularization of the code especially in the network and volume areas.Nova Conceptual MappingNova Conceptual MappingNow that we’ve seen a conceptual architecture for a fictional cloud provider and examined the logical architecture of OpenStack Nova, it is fairly easy to map the OpenStack components to the conceptual areas to see what we are lacking:As you can see from the illustration, I’ve overlaid logical components of OpenStack Nova, Glance and Dashboard to denote functional coverage. For each of the overlays, I’ve added the name of the logical component within the project that provides the functionality. While all of these judgements are highly subjective, you can see that we have a majority coverage of the functional areas with a few notable exceptions:The largest gap in our functional coverage is logging and billing. At the moment, OpenStack Nova doesn’t have a billing component that can mediate logging events, rate the logs and create/present bills. That being said, most service providers will already have one (or many) of these so the focus is really on the logging and integration with billing. This could be remedied in a variety of ways: augmentations of the code (which should happen in the next release “Diablo”), integration with commercial products or services (perhaps Zuora) or custom log parsing. Identity is also a point which will likely need to be augmented. Unless we are running a stock LDAP for our identity system, we will need to integrate our solution with OpenStack Compute. Having said that, this is true of almost all cloud solutions.The customer portal will also be an integration point. While OpenStack Compute provides a user @@ -133,7 +133,8 @@ are distributed schedulers and schedulers that understand heterogeneous hosts (for support of GPUs and differing CPU architectures).As you can see, OpenStack Compute provides a fair basis for our mythical service provider, as long as the mythical service providers are willing to do some integration here and - there. Why Cloud?Why Cloud?In data centers today, many computers suffer the same underutilization in computing + there. Note that since the time of this writing, OpenStack Identity Service has been + added.Why Cloud?Why Cloud?In data centers today, many computers suffer the same underutilization in computing power and networking bandwidth. For example, projects may need a large amount of computing capacity to complete a computation, but no longer need the computing power after completing the computation. You want cloud computing when you want a service @@ -144,17 +145,17 @@ diagrams contains the services that afford computing power harnessed to get work done. Much like the electrical power we receive each day, cloud computing provides subscribers or users with access to a shared collection of computing resources: networks for - transfer, servers for storage, and applications or services for completing tasks. These are the compelling features of a cloud:On-demand self-service: Users can provision servers and networks with little - human intervention. Network access: Any computing capabilities are available over the network. - Many different devices are allowed access through standardized mechanisms. Resource pooling: Multiple users can access clouds that serve other consumers - according to demand. Elasticity: Provisioning is rapid and scales out or in based on need. Metered or measured service: Just like utilities that are paid for by the + transfer, servers for storage, and applications or services for completing tasks. These are the compelling features of a cloud:On-demand self-service: Users can provision servers and networks with little + human intervention. Network access: Any computing capabilities are available over the network. + Many different devices are allowed access through standardized mechanisms. Resource pooling: Multiple users can access clouds that serve other consumers + according to demand. Elasticity: Provisioning is rapid and scales out or in based on need. Metered or measured service: Just like utilities that are paid for by the hour, clouds should optimize resource use and control it for the level of service or type of servers such as storage or processing.Cloud computing offers different service models depending on the capabilities a - consumer may require. SaaS: Software as a Service. Provides the consumer the ability to use the software - in a cloud environment, such as web-based email for example. PaaS: Platform as a Service. Provides the consumer the ability to deploy + consumer may require. SaaS: Software as a Service. Provides the consumer the ability to use the software + in a cloud environment, such as web-based email for example. PaaS: Platform as a Service. Provides the consumer the ability to deploy applications through a programming language or tools supported by the cloud platform provider. An example of platform as a service is an Eclipse/Java programming - platform provided with no downloads required. IaaS: Infrastructure as a Service. Provides infrastructure such as computer + platform provided with no downloads required. IaaS: Infrastructure as a Service. Provides infrastructure such as computer instances, network connections, and storage so that people can run any software or operating system. When you hear terms such as public cloud or private cloud, these refer to the deployment model for the cloud. A private cloud operates for a single organization, but @@ -173,18 +174,18 @@ drives on each users's desktop and enabling access to huge data storage capacity online in the cloud. For a more detailed discussion of cloud computing's essential characteristics and its models of service and deployment, see http://www.nist.gov/itl/cloud/, published by the US - National Institute of Standards and Technology. OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 2. Introduction to OpenStack Object StorageOpenStack Object Storage is a scalable object storage system - it is not a file system in + National Institute of Standards and Technology. OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 2. Introduction to OpenStack Object StorageOpenStack Object Storage is a scalable object storage system - it is not a file system in the traditional sense. You will not be able to mount this system like traditional SAN or NAS volumes. Since OpenStack Object Storage is a different way of thinking when it comes to - storage, take a few moments to review the key concepts listed below.Accounts and Account ServersAccounts and Account ServersThe OpenStack Object Storage system is designed to be used by many different storage + storage, take a few moments to review the key concepts listed below.Accounts and Account ServersAccounts and Account ServersThe OpenStack Object Storage system is designed to be used by many different storage consumers or customers. Each user must identify themselves using an authentication system.Nodes that run the Account service are a separate concept from individual accounts. Account servers are part of the storage system and must be configured along with - Container servers and Object servers.Authentication and Access PermissionsAuthentication and Access PermissionsYou must authenticate against an Authentication service to receive OpenStack Object + Container servers and Object servers.Authentication and Access PermissionsAuthentication and Access PermissionsYou must authenticate against an Authentication service to receive OpenStack Object Storage connection parameters and an authentication token. The token must be passed in for all subsequent container/object operations. One authentication service that you can use as a middleware example is called swauth and you can download it from - https://github.com/gholt/swauth.NoteTypically the language-specific APIs handle authentication, token passing, and HTTPS + https://github.com/gholt/swauth. You can also integrate with the OpenStack Identity Service, code-named Keystone, which you can download from https://github.com/openstack/keystone. NoteTypically the language-specific APIs handle authentication, token passing, and HTTPS request/response communication.You can implement access control for objects either for users or accounts using X-Container-Read: accountname and X-Container-Write: accountname:username, which allows any user from the accountname account to read but only allows the username user from the @@ -198,7 +199,7 @@ that account. Users must authenticate with their credentials as described above, but once authenticated they can create/delete containers and objects within that account. The only way a user can access the content from another account is if they share an API - access key or a session token provided by your authentication system.Containers and ObjectsContainers and ObjectsA container is a storage compartment for your data and provides a way for you to + access key or a session token provided by your authentication system.Containers and ObjectsContainers and ObjectsA container is a storage compartment for your data and provides a way for you to organize your data. You can think of a container as a folder in Windows® or a directory in UNIX®. The primary difference between a container and these other file system concepts is that containers cannot be nested. You can, however, create an unlimited number of containers @@ -219,11 +220,11 @@ rather than the expected 16.The maximum allowable size for a storage object upon upload is 5 gigabytes (GB) and the minimum is zero bytes. You can use the built-in large object support and the swift utility to retrieve objects larger than 5 GB. For metadata, you should not exceed 90 individual key/value pairs for any one object - and the total byte length of all key/value pairs should not exceed 4KB (4096 bytes).OperationsOperationsOperations are the actions you perform within an OpenStack Object Storage system such + and the total byte length of all key/value pairs should not exceed 4KB (4096 bytes).OperationsOperationsOperations are the actions you perform within an OpenStack Object Storage system such as creating or deleting containers, uploading or downloading objects, and so on. The full list of operations is documented in the Developer Guide. Operations may be performed via the ReST web service API or a language-specific API; currently, we support - Python, PHP, Java, Ruby, and C#/.NET.Important All operations must include a valid authorization token from your authorization system. Language-Specific API BindingsLanguage-Specific API BindingsA set of supported API bindings in several popular languages are available from the + Python, PHP, Java, Ruby, and C#/.NET.Important All operations must include a valid authorization token from your authorization system. Language-Specific API BindingsLanguage-Specific API BindingsA set of supported API bindings in several popular languages are available from the Rackspace Cloud Files product, which uses OpenStack Object Storage code for its implementation. These bindings provide a layer of abstraction on top of the base ReST API, allowing programmers to work with a container and object model instead of working @@ -236,9 +237,9 @@ cloudfiles@rackspacecloud.com. Just make sure to indicate which language and version you modified and send a unified diff. Each binding includes its own documentation (either HTML, PDF, or CHM). They also include code snippets and examples to help you get started. The currently supported API - binding for OpenStack Object Storage are:PHP (requires 5.x and the modules: cURL, FileInfo, mbstring)Python (requires 2.4 or newer)Java (requires JRE v1.5 or newer)C#/.NET (requires .NET Framework v3.5)Ruby (requires 1.8 or newer and mime-tools module)There are no other supported language-specific bindings at this time. You are welcome + binding for OpenStack Object Storage are:PHP (requires 5.x and the modules: cURL, FileInfo, mbstring)Python (requires 2.4 or newer)Java (requires JRE v1.5 or newer)C#/.NET (requires .NET Framework v3.5)Ruby (requires 1.8 or newer and mime-tools module)There are no other supported language-specific bindings at this time. You are welcome to create your own language API bindings and we can help answer any questions during - development, host your code if you like, and give you full credit for your work. OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 3. Installing and Configuring OpenStack Object StorageSystem RequirementsSystem RequirementsHardware: OpenStack Object Storage specifically is designed + development, host your code if you like, and give you full credit for your work. OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 3. Installing and Configuring OpenStack Object StorageSystem RequirementsSystem RequirementsHardware: OpenStack Object Storage specifically is designed to run on commodity hardware. At Rackspace, our storage servers are currently running fairly generic 4U servers with 24 2T SATA drives and 8 cores of processing power. RAID on the storage drives is not required and @@ -252,23 +253,23 @@ SQLite database is part of the OpenStack Object Storage container and account management process.Permissions: You can install OpenStack Object Storage either as root or as a user with sudo permissions if you configure - the sudoers file to enable all the permissions.Installing OpenStack Object Storage on UbuntuInstalling OpenStack Object Storage on UbuntuThough you can install OpenStack Object Storage for development or testing purposes on a single server, a multiple-server installation enables the high availability and redundancy you want in a production distributed object storage system.If you would like to perform a single node installation on Ubuntu for + the sudoers file to enable all the permissions.Installing OpenStack Object Storage on UbuntuInstalling OpenStack Object Storage on UbuntuThough you can install OpenStack Object Storage for development or testing purposes on a single server, a multiple-server installation enables the high availability and redundancy you want in a production distributed object storage system.If you would like to perform a single node installation on Ubuntu for development purposes from source code, use the Swift All In One instructions. See - http://swift.openstack.org/development_saio.html.Before You BeginBefore You BeginHave a copy of the Ubuntu Server 10.04 LTS installation media on hand + http://swift.openstack.org/development_saio.html.Before You BeginBefore You BeginHave a copy of the Ubuntu Server 10.04 LTS installation media on hand if you are installing on a new server. This document demonstrates installing a cluster using the following - types of nodes:One Proxy node which runs the swift-proxy-server + types of nodes:One Proxy node which runs the swift-proxy-server processes and may also run optional swauth services. It serves proxy requests to the appropriate Storage - nodes.Five Storage nodes that run the swift-account-server, + nodes.Five Storage nodes that run the swift-account-server, swift-container-server, and swift-object-server processes which control storage of the account databases, the container databases, as well as the - actual stored objects.NoteFewer Storage nodes can be used initially, but a minimum of 5 - is recommended for a production cluster.Example Installation ArchitectureExample Installation Architecturenode - a host machine running one or more OpenStack - Object Storage servicesProxy node - node that runs Proxy servicesAuth node - an optional node that runs the Auth - service separately from the Proxy servicesStorage node - node that runs Account, Container, and - Object servicesring - a set of mappings of OpenStack Object Storage + actual stored objects.NoteFewer Storage nodes can be used initially, but a minimum of 5 + is recommended for a production cluster.Example Installation ArchitectureExample Installation Architecturenode - a host machine running one or more OpenStack + Object Storage servicesProxy node - node that runs Proxy servicesAuth node - an optional node that runs the Auth + service separately from the Proxy servicesStorage node - node that runs Account, Container, and + Object servicesring - a set of mappings of OpenStack Object Storage data to physical devicesTo increase reliability, you may want to add additional Proxy servers for performance. This document describes each Storage node as a separate zone in the ring. It is recommended to have a minimum of 5 zones. A zone is a @@ -276,26 +277,26 @@ (separate servers, network, power, even geography). The ring guarantees that every replica is stored in a separate zone. This diagram shows one possible configuration for a minimal - installation.Network Setup NotesNetwork Setup NotesThis document refers to two networks. An external network for + installation.Network Setup NotesNetwork Setup NotesThis document refers to two networks. An external network for connecting to the Proxy server, and a storage network that is not accessible from outside the cluster, to which all of the nodes are connected. All of the OpenStack Object Storage services, as well as the rsync daemon on the Storage nodes are configured to listen on - their STORAGE_LOCAL_NET IP addresses.General Installation Steps General Installation Steps 1.Install the baseline Ubuntu Server 10.04 LTS on all - nodes.2.Install common OpenStack Object Storage software and + their STORAGE_LOCAL_NET IP addresses.General Installation Steps General Installation Steps 1.Install the baseline Ubuntu Server 10.04 LTS on all + nodes.2.Install common OpenStack Object Storage software and pre-requisites: - apt-get install python-software-properties + apt-get install python-software-properties add-apt-repository ppa:swift-core/ppa apt-get update apt-get install swift openssh-server - Configuring OpenStack Object Storage Configuring OpenStack Object Storage 1.Create and populate configuration directories on all nodes:mkdir -p /etc/swift -chown -R swift:swift /etc/swift/2.Create /etc/swift/swift.conf:[swift-hash] + Configuring OpenStack Object Storage Configuring OpenStack Object Storage 1.Create and populate configuration directories on all nodes:mkdir -p /etc/swift +chown -R swift:swift /etc/swift/2.Create /etc/swift/swift.conf:[swift-hash] # random unique string that can never change, keep it secret and do NOT lose it swift_hash_path_suffix = changeme - NoteThe suffix value in /etc/swift/swift.conf should be set to some random + NoteThe suffix value in /etc/swift/swift.conf should be set to some random string of text to be used as a salt when hashing to determine mappings in the ring. This file should be the same on every node in - the cluster!Installing and Configuring an Auth NodeInstalling and Configuring an Auth NodeThere are options for running an authorization node to authorize requests against a swift + the cluster!Installing and Configuring an Auth NodeInstalling and Configuring an Auth NodeThere are options for running an authorization node to authorize requests against a swift cluster. Swauth is one implementation, an auth service for Swift as WSGI middleware that uses Swift itself as a backing store. Swauth, the example authorization system that was bundled with the Cactus @@ -303,22 +304,22 @@ swift_hash_path_suffix = changeme Swift, at https://github.com/gholt/swauth. You can install it on the proxy server, or on a separate server, but you need to point to swauth from the proxy-server.conf file in the following - line:[filter:swauth] -use = egg:swauth#swauth In the Diablo release, the Keystone project at http://github.com/rackspace/keystone should become the auth standard for OpenStack, but swauth may be used as an alternative.Installing and Configuring the Proxy NodeInstalling and Configuring the Proxy NodeThe proxy server takes each request and looks up locations for the + line:[filter:swauth] +use = egg:swauth#swauth In the Diablo release, the Keystone project at http://github.com/rackspace/keystone should become the auth standard for OpenStack, but swauth may be used as an alternative.Installing and Configuring the Proxy NodeInstalling and Configuring the Proxy NodeThe proxy server takes each request and looks up locations for the account, container, or object and routes the requests correctly. The proxy server also handles API requests. You enable account - management by configuring it in the proxy-server.conf file. NoteIt is assumed that all commands are run as the root user.1.Install swift-proxy service:apt-get install swift-proxy memcached2.Create self-signed cert for SSL: - cd /etc/swift + management by configuring it in the proxy-server.conf file. NoteIt is assumed that all commands are run as the root user.1.Install swift-proxy service:apt-get install swift-proxy memcached2.Create self-signed cert for SSL: + cd /etc/swift openssl req -new -x509 -nodes -out cert.crt -keyout cert.key - 3.Modify memcached to listen on the default interfaces. + 3.Modify memcached to listen on the default interfaces. Preferably this should be on a local, non-public network. Edit the following line in /etc/memcached.conf, changing: - -l 127.0.0.1 + -l 127.0.0.1 to -l <PROXY_LOCAL_NET_IP> - 4.Restart the memcached server:service memcached restart5.Create /etc/swift/proxy-server.conf: - [DEFAULT] + 4.Restart the memcached server:service memcached restart5.Create /etc/swift/proxy-server.conf: + [DEFAULT] # Enter these next two values if using SSL certifications cert_file = /etc/swift/cert.crt key_file = /etc/swift/cert.key @@ -346,48 +347,48 @@ use = egg:swift#healthcheck [filter:cache] use = egg:swift#memcache memcache_servers = <PROXY_LOCAL_NET_IP>:11211 - NoteIf you run multiple memcache servers, put the multiple + NoteIf you run multiple memcache servers, put the multiple IP:port listings in the [filter:cache] section of the proxy-server.conf file like: - 10.1.2.3:11211,10.1.2.4:11211Only - the proxy server uses memcache.6.Create the account, container and object rings: - cd /etc/swift + 10.1.2.3:11211,10.1.2.4:11211Only + the proxy server uses memcache.6.Create the account, container and object rings: + cd /etc/swift swift-ring-builder account.builder create 18 3 1 swift-ring-builder container.builder create 18 3 1 swift-ring-builder object.builder create 18 3 1 - 7.For every storage device on each node add entries to each + 7.For every storage device on each node add entries to each ring: - swift-ring-builder account.builder add z<ZONE>-<STORAGE_LOCAL_NET_IP>:6002/<DEVICE> 100 + swift-ring-builder account.builder add z<ZONE>-<STORAGE_LOCAL_NET_IP>:6002/<DEVICE> 100 swift-ring-builder container.builder add z<ZONE>-<STORAGE_LOCAL_NET_IP_1>:6001/<DEVICE> 100 swift-ring-builder object.builder add z<ZONE>-<STORAGE_LOCAL_NET_IP_1>:6000/<DEVICE> 100 For example, if you were setting up a storage node with a partition of /dev/sdb1 in Zone 1 on IP 10.0.0.1, the DEVICE would be sdb1 and the commands would look like: - swift-ring-builder account.builder add z1-10.0.0.1:6002/sdb1 100 + swift-ring-builder account.builder add z1-10.0.0.1:6002/sdb1 100 swift-ring-builder container.builder add z1-10.0.0.1:6001/sdb1 100 -swift-ring-builder object.builder add z1-10.0.0.1:6000/sdb1 100NoteAssuming there are 5 zones with 1 node per zone, ZONE +swift-ring-builder object.builder add z1-10.0.0.1:6000/sdb1 100NoteAssuming there are 5 zones with 1 node per zone, ZONE should start at 1 and increment by one for each - additional node.8.Verify the ring contents for each ring: - swift-ring-builder account.builder + additional node.8.Verify the ring contents for each ring: + swift-ring-builder account.builder swift-ring-builder container.builder swift-ring-builder object.builder - 9.Rebalance the rings: - swift-ring-builder account.builder rebalance + 9.Rebalance the rings: + swift-ring-builder account.builder rebalance swift-ring-builder container.builder rebalance swift-ring-builder object.builder rebalance - NoteRebalancing rings can take some time.10.Copy the account.ring.gz, container.ring.gz, and + NoteRebalancing rings can take some time.10.Copy the account.ring.gz, container.ring.gz, and object.ring.gz files to each of the Proxy and Storage nodes - in /etc/swift.11.Make sure all the config files are owned by the swift - user:chown -R swift:swift /etc/swift12.Start Proxy services:swift-init proxy startInstalling and Configuring the Storage NodesInstalling and Configuring the Storage NodesNoteOpenStack Object Storage should work on any modern filesystem that supports + in /etc/swift.11.Make sure all the config files are owned by the swift + user:chown -R swift:swift /etc/swift12.Start Proxy services:swift-init proxy startInstalling and Configuring the Storage NodesInstalling and Configuring the Storage NodesNoteOpenStack Object Storage should work on any modern filesystem that supports Extended Attributes (XATTRS). We currently recommend XFS as it demonstrated the best overall performance for the swift use case after considerable testing and benchmarking at Rackspace. It is also the -only filesystem that has been thoroughly tested.1.Install Storage node packages:apt-get install swift-account swift-container swift-object xfsprogs2.For every device on the node, setup the XFS volume (/dev/sdb is used -as an example):fdisk /dev/sdb (set up a single partition) +only filesystem that has been thoroughly tested.1.Install Storage node packages:apt-get install swift-account swift-container swift-object xfsprogs2.For every device on the node, setup the XFS volume (/dev/sdb is used +as an example):fdisk /dev/sdb (set up a single partition) mkfs.xfs -i size=1024 /dev/sdb1 echo "/dev/sdb1 /srv/node/sdb1 xfs noatime,nodiratime,nobarrier,logbufs=8 0 0" >> /etc/fstab mkdir -p /srv/node/sdb1 mount /srv/node/sdb1 -chown -R swift:swift /srv/node3.Create /etc/rsyncd.conf:uid = swift +chown -R swift:swift /srv/node3.Create /etc/rsyncd.conf:uid = swift gid = swift log file = /var/log/rsyncd.log pid file = /var/run/rsyncd.pid @@ -409,8 +410,8 @@ lock file = /var/lock/container.lock max connections = 2 path = /srv/node/ read only = false -lock file = /var/lock/object.lock4.Edit the following line in /etc/default/rsync:RSYNC_ENABLE = true5.Start rsync daemon:service rsync startNoteThe rsync daemon requires no authentication, so it should be run on -a local, private network.6.Create /etc/swift/account-server.conf:[DEFAULT] +lock file = /var/lock/object.lock4.Edit the following line in /etc/default/rsync:RSYNC_ENABLE = true5.Start rsync daemon:service rsync startNoteThe rsync daemon requires no authentication, so it should be run on +a local, private network.6.Create /etc/swift/account-server.conf:[DEFAULT] bind_ip = <STORAGE_LOCAL_NET_IP> workers = 2 @@ -424,7 +425,7 @@ use = egg:swift#account [account-auditor] -[account-reaper]7.Create /etc/swift/container-server.conf:[DEFAULT] +[account-reaper]7.Create /etc/swift/container-server.conf:[DEFAULT] bind_ip = <STORAGE_LOCAL_NET_IP> workers = 2 @@ -438,7 +439,7 @@ use = egg:swift#container [container-updater] -[container-auditor]8.Create /etc/swift/object-server.conf:[DEFAULT] +[container-auditor]8.Create /etc/swift/object-server.conf:[DEFAULT] bind_ip = <STORAGE_LOCAL_NET_IP> workers = 2 @@ -452,7 +453,7 @@ use = egg:swift#object [object-updater] -[object-auditor]9.Start the storage services:swift-init object-server start +[object-auditor]9.Start the storage services:swift-init object-server start swift-init object-replicator start swift-init object-updater start swift-init object-auditor start @@ -462,10 +463,10 @@ swift-init container-updater start swift-init container-auditor start swift-init account-server start swift-init account-replicator start -swift-init account-auditor startCreate OpenStack Object Storage admin Account and Verify the InstallationYou can run these commands from the proxy server if you have installed swauth there. Look for the default_swift_cluster setting +swift-init account-auditor startCreate OpenStack Object Storage admin Account and Verify the InstallationYou can run these commands from the proxy server if you have installed swauth there. Look for the default_swift_cluster setting in the proxy-server.conf and match the URLs (including http - or https) when issuing swauth commands.1.Prepare the system for authorization commands - by telling it the key and the URL for auth. swauth-prep -K key -A http://<AUTH_HOSTNAME>:8080/auth/2.Create a user with administrative privileges + or https) when issuing swauth commands.1.Prepare the system for authorization commands + by telling it the key and the URL for auth. swauth-prep -K key -A http://<AUTH_HOSTNAME>:8080/auth/2.Create a user with administrative privileges (account = system, username = root, password = testpass). Make sure to replace key in the swauth-add-user command below with whatever @@ -473,30 +474,30 @@ swift-init account-auditor start - swauth-add-user -K key -A http://<AUTH_HOSTNAME>:8080/auth/ -a system root testpass - 3.Get an X-Storage-Url and X-Auth-Token:curl -k -v -H 'X-Storage-User: system:root' -H 'X-Storage-Pass: testpass' http://<AUTH_HOSTNAME>:8080/auth/v1.04.Check that you can HEAD the account:curl -k -v -H 'X-Auth-Token: <token-from-x-auth-token-above>' <url-from-x-storage-url-above>5.Check that the Swift Tool, swift, works: swift -A http://<AUTH_HOSTNAME>:8080/auth/v1.0 -U system:root -K testpass stat6.Use swift to upload a few files named ‘bigfile[1-2].tgz’ to a container named - ‘myfiles’:swift -A http://<AUTH_HOSTNAME>:8080/auth/v1.0 -U system:root -K testpass upload myfiles bigfile1.tgz -swift -A http://<AUTH_HOSTNAME>:8080/auth/v1.0 -U system:root -K testpass upload myfiles bigfile2.tgz7.Use swift to download all files from the ‘myfiles’ container:swift -A http://<AUTH_HOSTNAME>:8080/auth/v1.0 -U system:root -K testpass download myfilesAdding an Additional Proxy ServerFor reliability’s sake you may want to have more than one proxy server. You can set up the additional proxy node in the same manner that you set up the first proxy node but with additional configuration steps.Once you have more than two proxies, you also want to load balance between the two, which means your storage endpoint also changes. You can select from different strategies for load balancing. For example, you could use round robin dns, or an actual load balancer (like pound) in front of the two proxies, and point your storage url to the load balancer.See Configure the Proxy node for the initial setup, and then follow these additional steps.1.Update the list of memcache servers in /etc/swift/proxy-server.conf for all the added proxy servers. If you run multiple memcache servers, use this pattern for the multiple IP:port listings: 10.1.2.3:11211,10.1.2.4:11211 in each proxy server’s conf file.: + swauth-add-user -K key -A http://<AUTH_HOSTNAME>:8080/auth/ -a system root testpass + 3.Get an X-Storage-Url and X-Auth-Token:curl -k -v -H 'X-Storage-User: system:root' -H 'X-Storage-Pass: testpass' http://<AUTH_HOSTNAME>:8080/auth/v1.04.Check that you can HEAD the account:curl -k -v -H 'X-Auth-Token: <token-from-x-auth-token-above>' <url-from-x-storage-url-above>5.Check that the Swift Tool, swift, works: swift -A http://<AUTH_HOSTNAME>:8080/auth/v1.0 -U system:root -K testpass stat6.Use swift to upload a few files named ‘bigfile[1-2].tgz’ to a container named + ‘myfiles’:swift -A http://<AUTH_HOSTNAME>:8080/auth/v1.0 -U system:root -K testpass upload myfiles bigfile1.tgz +swift -A http://<AUTH_HOSTNAME>:8080/auth/v1.0 -U system:root -K testpass upload myfiles bigfile2.tgz7.Use swift to download all files from the ‘myfiles’ container:swift -A http://<AUTH_HOSTNAME>:8080/auth/v1.0 -U system:root -K testpass download myfilesAdding an Additional Proxy ServerFor reliability’s sake you may want to have more than one proxy server. You can set up the additional proxy node in the same manner that you set up the first proxy node but with additional configuration steps.Once you have more than two proxies, you also want to load balance between the two, which means your storage endpoint also changes. You can select from different strategies for load balancing. For example, you could use round robin dns, or an actual load balancer (like pound) in front of the two proxies, and point your storage url to the load balancer.See Configure the Proxy node for the initial setup, and then follow these additional steps.1.Update the list of memcache servers in /etc/swift/proxy-server.conf for all the added proxy servers. If you run multiple memcache servers, use this pattern for the multiple IP:port listings: 10.1.2.3:11211,10.1.2.4:11211 in each proxy server’s conf file.: [filter:cache] use = egg:swift#memcache memcache_servers = <PROXY_LOCAL_NET_IP>:11211 -2.Change the default_cluster_url to point to the load balanced url, rather than the first proxy +2.Change the default_cluster_url to point to the load balanced url, rather than the first proxy server you created in - /etc/swift/proxy-server.conf:[app:auth-server] + /etc/swift/proxy-server.conf:[app:auth-server] use = egg:swift#auth default_cluster_url = https://<LOAD_BALANCER_HOSTNAME>/v1 # Highly recommended to change this key to something else! -super_admin_key = devauth3.After you change the default_cluster_url setting, you have to delete the auth database and recreate the OpenStack Object Storage users, or manually update the auth database with the correct URL for each account.4.Next, copy all the ring information to all the nodes, including your new proxy nodes, and ensure the ring info gets to all the storage nodes as well.5.After you sync all the nodes, make sure the admin has the keys in /etc/swift and the ownership for the ring file is correct.Troubleshooting NotesIf you see problems, look in var/log/syslog (or messages on some distros).Also, at Rackspace we have seen hints at drive failures by looking at error messages in /var/log/kern.log. OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 4. System Administration for OpenStack Object StorageBy understanding the concepts inherent to the Object Storage system you can better monitor and administer your storage solution. Understanding How Object Storage WorksUnderstanding How Object Storage WorksThis section offers a brief overview of each concept in administering Object Storage. The RingThe RingA ring represents a mapping between the names of entities stored on disk and their physical location. There are separate rings for accounts, containers, and objects. When other components need to perform any operation on an object, container, or account, they need to interact with the appropriate ring to determine its location in the cluster. - The Ring maintains this mapping using zones, devices, partitions, and replicas. Each partition in the ring is replicated, by default, 3 times across the cluster, and the locations for a partition are stored in the mapping maintained by the ring. The ring is also responsible for determining which devices are used for handoff in failure scenarios.Data can be isolated with the concept of zones in the ring. Each replica of a partition is guaranteed to reside in a different zone. A zone could represent a drive, a server, a cabinet, a switch, or even a datacenter.The partitions of the ring are equally divided among all the devices in the OpenStack Object Storage installation. When partitions need to be moved around (for example if a device is added to the cluster), the ring ensures that a minimum number of partitions are moved at a time, and only one replica of a partition is moved at a time.Weights can be used to balance the distribution of partitions on drives across the cluster. This can be useful, for example, when different sized drives are used in a cluster.The ring is used by the Proxy server and several background processes (like replication).Proxy ServerProxy ServerThe Proxy Server is responsible for tying together the rest of the OpenStack Object Storage architecture. For each request, it will look up the location of the account, container, or object in the ring (see below) and route the request accordingly. The public API is also exposed through the Proxy Server. +super_admin_key = devauth3.After you change the default_cluster_url setting, you have to delete the auth database and recreate the OpenStack Object Storage users, or manually update the auth database with the correct URL for each account.4.Next, copy all the ring information to all the nodes, including your new proxy nodes, and ensure the ring info gets to all the storage nodes as well.5.After you sync all the nodes, make sure the admin has the keys in /etc/swift and the ownership for the ring file is correct.Troubleshooting NotesIf you see problems, look in var/log/syslog (or messages on some distros).Also, at Rackspace we have seen hints at drive failures by looking at error messages in /var/log/kern.log. OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 4. System Administration for OpenStack Object StorageBy understanding the concepts inherent to the Object Storage system you can better monitor and administer your storage solution. Understanding How Object Storage WorksUnderstanding How Object Storage WorksThis section offers a brief overview of each concept in administering Object Storage. The RingThe RingA ring represents a mapping between the names of entities stored on disk and their physical location. There are separate rings for accounts, containers, and objects. When other components need to perform any operation on an object, container, or account, they need to interact with the appropriate ring to determine its location in the cluster. + The Ring maintains this mapping using zones, devices, partitions, and replicas. Each partition in the ring is replicated, by default, 3 times across the cluster, and the locations for a partition are stored in the mapping maintained by the ring. The ring is also responsible for determining which devices are used for handoff in failure scenarios.Data can be isolated with the concept of zones in the ring. Each replica of a partition is guaranteed to reside in a different zone. A zone could represent a drive, a server, a cabinet, a switch, or even a datacenter.The partitions of the ring are equally divided among all the devices in the OpenStack Object Storage installation. When partitions need to be moved around (for example if a device is added to the cluster), the ring ensures that a minimum number of partitions are moved at a time, and only one replica of a partition is moved at a time.Weights can be used to balance the distribution of partitions on drives across the cluster. This can be useful, for example, when different sized drives are used in a cluster.The ring is used by the Proxy server and several background processes (like replication).Proxy ServerProxy ServerThe Proxy Server is responsible for tying together the rest of the OpenStack Object Storage architecture. For each request, it will look up the location of the account, container, or object in the ring (see below) and route the request accordingly. The public API is also exposed through the Proxy Server. A large number of failures are also handled in the Proxy Server. For example, if a server is unavailable for an object PUT, it will ask the ring for a hand-off server and route there instead. When objects are streamed to or from an object server, they are streamed directly through the proxy server to or from the user – the proxy server does not spool them.You can use a proxy server with account management enabled by configuring it in - the proxy server configuration file.Object ServerObject ServerThe Object Server is a very simple blob storage server that can store, retrieve and delete objects stored on local devices. Objects are stored as binary files on the filesystem with metadata stored in the file’s extended attributes (xattrs). This requires that the underlying filesystem choice for object servers support xattrs on files. Some filesystems, like ext3, have xattrs turned off by default.Each object is stored using a path derived from the object name’s hash and the operation’s timestamp. Last write always wins, and ensures that the latest object version will be served. A deletion is also treated as a version of the file (a 0 byte file ending with “.ts”, which stands for tombstone). This ensures that deleted files are replicated correctly and older versions don’t magically reappear due to failure scenarios.Container ServerContainer ServerThe Container Server’s primary job is to handle listings of objects. It doesn’t know where those object’s are, just what objects are in a specific container. The listings are stored as sqlite database files, and replicated across the cluster similar to how objects are. Statistics are also tracked that include the total number of objects, and total storage usage for that container.Account ServerAccount ServerThe Account Server is very similar to the Container Server, excepting that it is - responsible for listings of containers rather than objects.ReplicationReplicationReplication is designed to keep the system in a consistent state in the face of temporary error conditions like network outages or drive failures. + the proxy server configuration file.Object ServerObject ServerThe Object Server is a very simple blob storage server that can store, retrieve and delete objects stored on local devices. Objects are stored as binary files on the filesystem with metadata stored in the file’s extended attributes (xattrs). This requires that the underlying filesystem choice for object servers support xattrs on files. Some filesystems, like ext3, have xattrs turned off by default.Each object is stored using a path derived from the object name’s hash and the operation’s timestamp. Last write always wins, and ensures that the latest object version will be served. A deletion is also treated as a version of the file (a 0 byte file ending with “.ts”, which stands for tombstone). This ensures that deleted files are replicated correctly and older versions don’t magically reappear due to failure scenarios.Container ServerContainer ServerThe Container Server’s primary job is to handle listings of objects. It doesn’t know where those object’s are, just what objects are in a specific container. The listings are stored as sqlite database files, and replicated across the cluster similar to how objects are. Statistics are also tracked that include the total number of objects, and total storage usage for that container.Account ServerAccount ServerThe Account Server is very similar to the Container Server, excepting that it is + responsible for listings of containers rather than objects.ReplicationReplicationReplication is designed to keep the system in a consistent state in the face of temporary error conditions like network outages or drive failures. The replication processes compare local data with each remote copy to ensure they all contain the latest version. Object replication uses a hash list to quickly compare subsections of each partition, and container and account replication use a combination of hashes and shared high water marks. Replication updates are push based. For object replication, updating is just a matter of rsyncing files to the peer. Account and container replication push missing records over HTTP or rsync whole database files. The replicator also ensures that data is removed from the system. When an item (object, container, or account) is deleted, a tombstone is set as the latest version of the item. The replicator will see the tombstone and ensure that the item is removed from the entire system. - UpdatersUpdatersThere are times when container or account data can not be immediately updated. + UpdatersUpdatersThere are times when container or account data can not be immediately updated. This usually occurs during failure scenarios or periods of high load. If an update fails, the update is queued locally on the file system, and the updater will process the failed updates. This is where an eventual consistency window will most likely @@ -505,10 +506,10 @@ super_admin_key = devauthIn practice, the consistency window is only as large as the frequency at which the updater runs and may not even be noticed as the proxy server will route listing requests to the first container server which responds. The server under load may not be the one that serves subsequent listing requests – one of the other two replicas may handle the listing.AuditorsAuditorsAuditors crawl the local server checking the integrity of the objects, containers, and accounts. If corruption is found (in the case of bit rot, for example), the file is quarantined, and replication will replace the bad file from another replica. If other errors are found they are logged (for example, an object’s listing can’t be found on any container server it should be). + contain the object. In practice, the consistency window is only as large as the frequency at which the updater runs and may not even be noticed as the proxy server will route listing requests to the first container server which responds. The server under load may not be the one that serves subsequent listing requests – one of the other two replicas may handle the listing.AuditorsAuditorsAuditors crawl the local server checking the integrity of the objects, containers, and accounts. If corruption is found (in the case of bit rot, for example), the file is quarantined, and replication will replace the bad file from another replica. If other errors are found they are logged (for example, an object’s listing can’t be found on any container server it should be). - Configuring and Tuning OpenStack Object StorageConfiguring and Tuning OpenStack Object StorageThis section walks through deployment options and considerations.You have multiple deployment options to choose from. The swift services run completely autonomously, which provides for a lot of flexibility when - designing the hardware deployment for swift. The 4 main services are:Proxy ServicesObject ServicesContainer ServicesAccount ServicesThe Proxy Services are more CPU and network I/O intensive. If you are using + Configuring and Tuning OpenStack Object StorageConfiguring and Tuning OpenStack Object StorageThis section walks through deployment options and considerations.You have multiple deployment options to choose from. The swift services run completely autonomously, which provides for a lot of flexibility when + designing the hardware deployment for swift. The 4 main services are:Proxy ServicesObject ServicesContainer ServicesAccount ServicesThe Proxy Services are more CPU and network I/O intensive. If you are using 10g networking to the proxy, or are terminating SSL traffic at the proxy, greater CPU power will be required.The Object, Container, and Account Services (Storage Services) are more disk and network I/O intensive.The easiest deployment is to install all services on each server. There is @@ -521,7 +522,7 @@ Proxies.Load balancing and network design is left as an exercise to the reader, but this is a very important part of the cluster, so time should be spent -designing the network for a Swift cluster.Preparing the RingPreparing the RingThe first step is to determine the number of partitions that will be in the +designing the network for a Swift cluster.Preparing the RingPreparing the RingThe first step is to determine the number of partitions that will be in the ring. We recommend that there be a minimum of 100 partitions per drive to insure even distribution across the drives. A good starting point might be to figure out the maximum number of drives the cluster will contain, and then @@ -544,9 +545,9 @@ network connectivity. For example, in a small cluster you might decide to split the zones up by cabinet, with each cabinet having its own power and network connectivity. The zone concept is very abstract, so feel free to use it in whatever way best isolates your data from failure. Zones are referenced -by number, beginning with 1.You can now start building the ring with:swift-ring-builder <builder_file> create <part_power> <replicas> <min_part_hours>This will start the ring build process creating the <builder_file> with +by number, beginning with 1.You can now start building the ring with:swift-ring-builder <builder_file> create <part_power> <replicas> <min_part_hours>This will start the ring build process creating the <builder_file> with 2^<part_power> partitions. <min_part_hours> is the time in hours before a -specific partition can be moved in succession (24 is a good value for this).Devices can be added to the ring with:swift-ring-builder <builder_file> add z<zone>-<ip>:<port>/<device_name>_<meta> <weight>This will add a device to the ring where <builder_file> is the name of the +specific partition can be moved in succession (24 is a good value for this).Devices can be added to the ring with:swift-ring-builder <builder_file> add z<zone>-<ip>:<port>/<device_name>_<meta> <weight>This will add a device to the ring where <builder_file> is the name of the builder file that was created previously, <zone> is the number of the zone this device is in, <ip> is the ip address of the server the device is in, <port> is the port number that the server is running on, <device_name> is @@ -554,7 +555,7 @@ the name of the device on the server (for example: sdb1), <meta> is a stri of metadata for the device (optional), and <weight> is a float weight that determines how many partitions are put on the device relative to the rest of the devices in the cluster (a good starting point is 100.0 x TB on the drive). -Add each device that will be initially in the cluster.Once all of the devices are added to the ring, run:swift-ring-builder <builder_file> rebalanceThis will distribute the partitions across the drives in the ring. It is +Add each device that will be initially in the cluster.Once all of the devices are added to the ring, run:swift-ring-builder <builder_file> rebalanceThis will distribute the partitions across the drives in the ring. It is important whenever making changes to the ring to make all the changes required before running rebalance. This will ensure that the ring stays as balanced as possible, and as few partitions are moved as possible.The above process should be done to make a ring for each storage service @@ -563,10 +564,10 @@ changes to the ring, so it is very important that these be kept and backed up. The resulting .tar.gz ring file should be pushed to all of the servers in the cluster. For more information about building rings, running swift-ring-builder with no options will display help text with available -commands and options. Server Configuration ReferenceServer Configuration ReferenceSwift uses paste.deploy to manage server configurations. Default configuration +commands and options. Server Configuration ReferenceServer Configuration ReferenceSwift uses paste.deploy to manage server configurations. Default configuration options are set in the [DEFAULT] section, and any options specified there -can be overridden in any of the other sections.Object Server ConfigurationObject Server ConfigurationAn Example Object Server configuration can be found at - etc/object-server.conf-sample in the source code repository.The following configuration options are available:Table 4.1. object-server.conf Default Options in the [DEFAULT] section +can be overridden in any of the other sections.Object Server ConfigurationObject Server ConfigurationAn Example Object Server configuration can be found at + etc/object-server.conf-sample in the source code repository.The following configuration options are available:Table 4.1. object-server.conf Default Options in the [DEFAULT] section Option Default Description @@ -595,7 +596,7 @@ can be overridden in any of the other sections. workers 1 Number of workers to fork - Table 4.2. object-server.conf Server Options in the [object-server] section + Table 4.2. object-server.conf Server Options in the [object-server] section Option Default Description @@ -649,7 +650,7 @@ can be overridden in any of the other sections. 0 If > 0, Minimum time in seconds for a PUT or DELETE request to complete - Table 4.3. object-server.conf Replicator Options in the [object-replicator] section + Table 4.3. object-server.conf Replicator Options in the [object-replicator] section Option Default Description @@ -690,7 +691,7 @@ can be overridden in any of the other sections. reclaim_age 604800 Time elapsed in seconds before an object can be reclaimed - Table 4.4. object-server.conf Updater Options in the [object-updater] section + Table 4.4. object-server.conf Updater Options in the [object-updater] section Option Default Description @@ -726,7 +727,7 @@ can be overridden in any of the other sections. slowdown 0.01 Time in seconds to wait between objects - Table 4.5. object-server.conf Auditor Options in the [object-auditor] section + Table 4.5. object-server.conf Auditor Options in the [object-auditor] section Option Default Description @@ -752,8 +753,8 @@ can be overridden in any of the other sections. 10000000 Maximum bytes audited per second. Should be tuned according to individual system specs. 0 is unlimited. - Container Server ConfigurationContainer Server ConfigurationAn example Container Server configuration can be found at - etc/container-server.conf-sample in the source code repository.The following configuration options are available:Table 4.6. container-server.conf Default Options in the [DEFAULT] section + Container Server ConfigurationContainer Server ConfigurationAn example Container Server configuration can be found at + etc/container-server.conf-sample in the source code repository.The following configuration options are available:Table 4.6. container-server.conf Default Options in the [DEFAULT] section Option Default Description @@ -786,7 +787,7 @@ can be overridden in any of the other sections. user swift User to run as - Table 4.7. container-server.conf Server Options in the [container-server] section + Table 4.7. container-server.conf Server Options in the [container-server] section Option Default Description @@ -815,7 +816,7 @@ can be overridden in any of the other sections. conn_timeout 0.5 Connection timeout to external services - Table 4.8. container-server.conf Replicator Options in the [container-replicator] section + Table 4.8. container-server.conf Replicator Options in the [container-replicator] section Option Default Description @@ -855,7 +856,7 @@ can be overridden in any of the other sections. reclaim_age 604800 Time elapsed in seconds before a container can be reclaimed - Table 4.9. container-server.conf Updater Options in the [container-updater] section + Table 4.9. container-server.conf Updater Options in the [container-updater] section Option Default Description @@ -891,7 +892,7 @@ can be overridden in any of the other sections. slowdown 0.01 Time in seconds to wait between containers - Table 4.10. container-server.conf Auditor Options in the [container-auditor] section + Table 4.10. container-server.conf Auditor Options in the [container-auditor] section Option Default Description @@ -911,8 +912,8 @@ can be overridden in any of the other sections. interval 1800 Minimum time for a pass to take - Account Server ConfigurationAccount Server ConfigurationAn example Account Server configuration can be found at - etc/account-server.conf-sample in the source code repository.The following configuration options are available:Table 4.11. account-server.conf Default Options in the [DEFAULT] section + Account Server ConfigurationAccount Server ConfigurationAn example Account Server configuration can be found at + etc/account-server.conf-sample in the source code repository.The following configuration options are available:Table 4.11. account-server.conf Default Options in the [DEFAULT] section Option Default Description @@ -945,7 +946,7 @@ can be overridden in any of the other sections. user swift User to run as - Table 4.12. account-server.conf Server Options in the [account-server] section + Table 4.12. account-server.conf Server Options in the [account-server] section Option Default Description @@ -966,7 +967,7 @@ can be overridden in any of the other sections. log_level INFO Logging level - Table 4.13. account-server.conf Replicator Options in the [account-replicator] section + Table 4.13. account-server.conf Replicator Options in the [account-replicator] section Option Default Description @@ -1006,7 +1007,7 @@ can be overridden in any of the other sections. reclaim_age 604800 Time elapsed in seconds before an account can be reclaimed - Table 4.14. account-server.conf Auditor Options in the [account-auditor] section + Table 4.14. account-server.conf Auditor Options in the [account-auditor] section Option Default Description @@ -1026,7 +1027,7 @@ can be overridden in any of the other sections. interval 1800 Minimum time for a pass to take - Table 4.15. account-server.conf Reaper Options in the [account-reaper] section + Table 4.15. account-server.conf Reaper Options in the [account-reaper] section Option Default Description @@ -1058,8 +1059,8 @@ can be overridden in any of the other sections. conn_timeout 0.5 Connection timeout to external services - Proxy Server ConfigurationProxy Server ConfigurationAn example Proxy Server configuration can be found at etc/proxy-server.conf-sample - in the source code repository.The following configuration options are available:Table 4.16. proxy-server.conf Default Options in the [DEFAULT] section + Proxy Server ConfigurationProxy Server ConfigurationAn example Proxy Server configuration can be found at etc/proxy-server.conf-sample + in the source code repository.The following configuration options are available:Table 4.16. proxy-server.conf Default Options in the [DEFAULT] section Option Default Description @@ -1091,7 +1092,7 @@ can be overridden in any of the other sections. key_file Path to the ssl .key - Table 4.17. proxy-server.conf Server Options in the [proxy-server] section + Table 4.17. proxy-server.conf Server Options in the [proxy-server] section Option Default Description @@ -1161,7 +1162,7 @@ can be overridden in any of the other sections. allow_account_management false Whether account PUTs and DELETEs are even callable - Table 4.18. proxy-server.conf Paste.deploy Options in the [filter:swauth] section + Table 4.18. proxy-server.conf Paste.deploy Options in the [filter:swauth] section Option Default Description @@ -1214,18 +1215,18 @@ can be overridden in any of the other sections. super_admin_key None The key for the .super_admin account. - Considerations and TuningConsiderations and TuningFine-tuning your deployment and installation may take some time and effort. Here are some considerations for improving performance of an OpenStack Object Storage installation.Memcached ConsiderationsMemcached ConsiderationsSeveral of the Services rely on Memcached for caching certain types of + Considerations and TuningConsiderations and TuningFine-tuning your deployment and installation may take some time and effort. Here are some considerations for improving performance of an OpenStack Object Storage installation.Memcached ConsiderationsMemcached ConsiderationsSeveral of the Services rely on Memcached for caching certain types of lookups, such as auth tokens, and container/account existence. Swift does not do any caching of actual object data. Memcached should be able to run on any servers that have available RAM and CPU. At Rackspace, we run Memcached on the proxy servers. The memcache_servers config option - in the proxy-server.conf should contain all memcached servers.System TimeSystem TimeTime may be relative but it is relatively important for Swift! Swift uses + in the proxy-server.conf should contain all memcached servers.System TimeSystem TimeTime may be relative but it is relatively important for Swift! Swift uses timestamps to determine which is the most recent version of an object. It is very important for the system time on each server in the cluster to by synced as closely as possible (more so for the proxy server, but in general it is a good idea for all the servers). At Rackspace, we use NTP with a local NTP server to ensure that the system times are as close as possible. This - should also be monitored to ensure that the times do not vary too much.General Service TuningGeneral Service TuningMost services support either a worker or concurrency value in the settings. + should also be monitored to ensure that the times do not vary too much.General Service TuningGeneral Service TuningMost services support either a worker or concurrency value in the settings. This allows the services to make effective use of the cores available. A good starting point to set the concurrency level for the proxy and storage services to 2 times the number of cores available. If more than one service is @@ -1238,7 +1239,7 @@ can be overridden in any of the other sections. at a concurrency of 1, with the exception of the replicators which are run at a concurrency of 2.The above configuration setting should be taken as suggestions and testing of configuration settings should be done to ensure best utilization of CPU, - network connectivity, and disk I/O.Filesystem ConsiderationsFilesystem ConsiderationsSwift is designed to be mostly filesystem agnostic–the only requirement + network connectivity, and disk I/O.Filesystem ConsiderationsFilesystem ConsiderationsSwift is designed to be mostly filesystem agnostic–the only requirement being that the filesystem supports extended attributes (xattrs). After thorough testing with our use cases and hardware configurations, XFS was the best all-around choice. If you decide to use a filesystem other than @@ -1255,8 +1256,8 @@ can be overridden in any of the other sections. (as can be seen in the above example of mounting /dev/sda1 as /srv/node/sda). If you choose to mount the drives in another directory, be sure to set the devices config option in all of the server configs to point to the - correct directory.General System TuningGeneral System TuningRackspace currently runs Swift on Ubuntu Server 10.04, and the following - changes have been found to be useful for our use cases.The following settings should be in /etc/sysctl.conf: + correct directory.General System TuningGeneral System TuningRackspace currently runs Swift on Ubuntu Server 10.04, and the following + changes have been found to be useful for our use cases.The following settings should be in /etc/sysctl.conf: # disable TIME_WAIT.. wait.. net.ipv4.tcp_tw_recycle=1 @@ -1273,10 +1274,10 @@ net.ipv4.netfilter.ip_conntrack_max = 262144 received. During high usage, and with the number of connections that are created, it is easy to run out of ports. We can change this since we are in control of the network. If you are not in control of the network, or - do not expect high loads, then you may not want to adjust those values.Logging ConsiderationsLogging ConsiderationsSwift is set up to log directly to syslog. Every service can be configured with + do not expect high loads, then you may not want to adjust those values.Logging ConsiderationsLogging ConsiderationsSwift is set up to log directly to syslog. Every service can be configured with the log_facility option to set the syslog log facility destination. We recommend using syslog-ng to route the logs to specific log files locally on the - server and also to remote log collecting servers.Working with RingsWorking with RingsThe rings determine where data should reside in the cluster. There is a + server and also to remote log collecting servers.Working with RingsWorking with RingsThe rings determine where data should reside in the cluster. There is a separate ring for account databases, container databases, and individual objects but each ring works in the same way. These rings are externally managed, in that the server processes themselves do not modify the rings, they @@ -1291,7 +1292,7 @@ net.ipv4.netfilter.ip_conntrack_max = 262144 number, each replica's device will not be in the same zone as any other replica's device. Zones can be used to group devices based on physical locations, power separations, network separations, or any other attribute that - would lessen multiple replicas being unavailable at the same time.Managing Rings with the Ring BuilderThe rings are built and managed manually by a utility called the ring-builder. + would lessen multiple replicas being unavailable at the same time.Managing Rings with the Ring BuilderThe rings are built and managed manually by a utility called the ring-builder. The ring-builder assigns partitions to devices and writes an optimized Python structure to a gzipped, pickled file on disk for shipping out to the servers. The server processes just check the modification time of the file occasionally @@ -1307,11 +1308,11 @@ net.ipv4.netfilter.ip_conntrack_max = 262144 partitions will end up assigned to different devices, and therefore nearly all data stored will have to be replicated to new locations. So, recovery from a builder file loss is possible, but data will definitely be unreachable for an - extended time.About the Ring Data StructureThe ring data structure consists of three top level fields: a list of devices + extended time.About the Ring Data StructureThe ring data structure consists of three top level fields: a list of devices in the cluster, a list of lists of device ids indicating partition to device assignments, and an integer indicating the number of bits to shift an MD5 hash - to calculate the partition for the hash.List of Devices in the RingThe list of devices is known internally to the Ring class as devs. Each item in - the list of devices is a dictionary with the following keys:Table 4.19. List of Devices and Keys + to calculate the partition for the hash.List of Devices in the RingThe list of devices is known internally to the Ring class as devs. Each item in + the list of devices is a dictionary with the following keys:Table 4.19. List of Devices and Keys Key Type Descriptionid @@ -1355,17 +1356,17 @@ net.ipv4.netfilter.ip_conntrack_max = 262144 Note: The list of devices may contain holes, or indexes set to None, for devices that have been removed from the cluster. Generally, device ids are not reused. Also, some devices may be temporarily disabled by setting their weight - to 0.0. Partition Assignment ListThis is a list of array(‘I') of devices ids. The outermost list contains an + to 0.0. Partition Assignment ListThis is a list of array(‘I') of devices ids. The outermost list contains an array(‘I') for each replica. Each array(‘I') has a length equal to the partition count for the ring. Each integer in the array(‘I') is an index into the above list of devices. The partition list is known internally to the Ring class as _replica2part2dev_id.So, to create a list of device dictionaries assigned to a partition, the Python code would look like: devices = [self.devs[part2dev_id[partition]] for part2dev_id in self._replica2part2dev_id]array(‘I') is used for memory conservation as there may be millions of - partitions.Partition Shift ValueThe partition shift value is known internally to the Ring class as _part_shift. + partitions.Partition Shift ValueThe partition shift value is known internally to the Ring class as _part_shift. This value used to shift an MD5 hash to calculate the partition on which the data for that hash should reside. Only the top four bytes of the hash is used in this process. For example, to compute the partition for the path - /account/container/object the Python code might look like: partition = unpack_from('>I', md5('/account/container/object').digest())[0] >>self._part_shiftBuilding the RingThe initial building of the ring first calculates the number of partitions that + /account/container/object the Python code might look like: partition = unpack_from('>I', md5('/account/container/object').digest())[0] >>self._part_shiftBuilding the RingThe initial building of the ring first calculates the number of partitions that should ideally be assigned to each device based the device's weight. For example, if the partition power of 20 the ring will have 1,048,576 partitions. If there are 1,000 devices of equal weight they will each desire 1,048.576 @@ -1391,7 +1392,7 @@ net.ipv4.netfilter.ip_conntrack_max = 262144 ring, the rebalance process is repeated until near perfect (less 1% off) or when the balance doesn't improve by at least 1% (indicating we probably can't get perfect balance due to wildly imbalanced zones or too many partitions - recently moved).History of the Ring DesignThe ring code went through many iterations before arriving at what it is now + recently moved).History of the Ring DesignThe ring code went through many iterations before arriving at what it is now and while it has been stable for a while now, the algorithm may be tweaked or perhaps even fundamentally changed if new ideas emerge. This section will try to describe the previous ideas attempted and attempt to explain why they were @@ -1441,7 +1442,7 @@ net.ipv4.netfilter.ip_conntrack_max = 262144 faster, but MD5 was built-in and hash computation is a small percentage of the overall request handling time. In all, once it was decided the servers wouldn't be maintaining the rings themselves anyway and only doing hash lookups, MD5 was - chosen for its general availability, good distribution, and adequate speed.The Account ReaperThe Account ReaperThe Account Reaper removes data from deleted accounts in the background.An account is marked for deletion by a reseller through the services server's + chosen for its general availability, good distribution, and adequate speed.The Account ReaperThe Account ReaperThe Account Reaper removes data from deleted accounts in the background.An account is marked for deletion by a reseller through the services server's remove_storage_account XMLRPC call. This simply puts the value DELETED into the status column of the account_stat table in the account database (and replicas), indicating the data for the account should be deleted later. There is no set @@ -1462,7 +1463,7 @@ net.ipv4.netfilter.ip_conntrack_max = 262144 failure so that it doesn't get hung up reclaiming cluster space because of one troublesome spot. The account reaper will keep trying to delete an account until it eventually becomes empty, at which point the database reclaim process - within the db_replicator will eventually remove the database files.Account Reaper Background and HistoryAt first, a simple approach of deleting an account through completely external + within the db_replicator will eventually remove the database files.Account Reaper Background and HistoryAt first, a simple approach of deleting an account through completely external calls was considered as it required no changes to the system. All data would simply be deleted in the same way the actual user would, through the public ReST API. However, the downside was that it would use proxy resources and log @@ -1482,16 +1483,16 @@ net.ipv4.netfilter.ip_conntrack_max = 262144 scanning all the containers for those marked for deletion when the majority wouldn't be seemed wasteful. The db_replicator could do this work while performing its replication scan, but it would have to spawn and track deletion - processes which seemed needlessly complex.In the end, an account server centric approach seemed best, as described above.ReplicationReplicationSince each replica in OpenStack Object Storage functions independently, and clients generally require only a simple majority of nodes responding to consider an operation successful, transient failures like network partitions can quickly cause replicas to diverge. These differences are eventually reconciled by asynchronous, peer-to-peer replicator processes. The replicator processes traverse their local filesystems, concurrently performing operations in a manner that balances load across physical disks.Replication uses a push model, with records and files generally only being copied from local to remote replicas. This is important because data on the node may not belong there (as in the case of handoffs and ring changes), and a replicator can't know what data exists elsewhere in the cluster that it should pull in. It's the duty of any node that contains data to ensure that data gets to where it belongs. Replica placement is handled by the ring.Every deleted record or file in the system is marked by a tombstone, so that deletions can be replicated alongside creations. These tombstones are cleaned up by the replication process after a period of time referred to as the consistency window, which is related to replication duration and how long transient failures can remove a node from the cluster. Tombstone cleanup must be tied to replication to reach replica convergence.If a replicator detects that a remote drive is has failed, it will use the ring's “get_more_nodes” interface to choose an alternate node to synchronize with. The replicator can generally maintain desired levels of replication in the face of hardware failures, though some replicas may not be in an immediately usable location.Replication is an area of active development, and likely rife with potential improvements to speed and correctness.There are two major classes of replicator - the db replicator, which replicates accounts and containers, and the object replicator, which replicates object data.Database ReplicationDatabase ReplicationThe first step performed by db replication is a low-cost hash comparison to find out whether or not two replicas already match. Under normal operation, this check is able to verify that most databases in the system are already synchronized very quickly. If the hashes differ, the replicator brings the databases in sync by sharing records added since the last sync point.This sync point is a high water mark noting the last record at which two databases were known to be in sync, and is stored in each database as a tuple of the remote database id and record id. Database ids are unique amongst all replicas of the database, and record ids are monotonically increasing integers. After all new records have been pushed to the remote database, the entire sync table of the local database is pushed, so the remote database knows it's now in sync with everyone the local database has previously synchronized with.If a replica is found to be missing entirely, the whole local database file is transmitted to the peer using rsync(1) and vested with a new unique id.In practice, DB replication can process hundreds of databases per concurrency setting per second (up to the number of available CPUs or disks) and is bound by the number of DB transactions that must be performed.Object ReplicationObject ReplicationThe initial implementation of object replication simply performed an rsync to push data from a local partition to all remote servers it was expected to exist on. While this performed adequately at small scale, replication times skyrocketed once directory structures could no longer be held in RAM. We now use a modification of this scheme in which a hash of the contents for each suffix directory is saved to a per-partition hashes file. The hash for a suffix directory is invalidated when the contents of that suffix directory are modified.The object replication process reads in these hash files, calculating any invalidated hashes. It then transmits the hashes to each remote server that should hold the partition, and only suffix directories with differing hashes on the remote server are rsynced. After pushing files to the remote server, the replication process notifies it to recalculate hashes for the rsynced suffix directories.Performance of object replication is generally bound by the number of uncached directories it has to traverse, usually as a result of invalidated suffix directory hashes. Using write volume and partition counts from our running systems, it was designed so that around 2% of the hash space on a normal node will be invalidated per day, which has experimentally given us acceptable replication speeds.Managing Large Objects (Greater than 5 GB)Managing Large Objects (Greater than 5 GB)OpenStack Object Storage has a limit on the size of a single uploaded object; by default this is + processes which seemed needlessly complex.In the end, an account server centric approach seemed best, as described above.ReplicationReplicationSince each replica in OpenStack Object Storage functions independently, and clients generally require only a simple majority of nodes responding to consider an operation successful, transient failures like network partitions can quickly cause replicas to diverge. These differences are eventually reconciled by asynchronous, peer-to-peer replicator processes. The replicator processes traverse their local filesystems, concurrently performing operations in a manner that balances load across physical disks.Replication uses a push model, with records and files generally only being copied from local to remote replicas. This is important because data on the node may not belong there (as in the case of handoffs and ring changes), and a replicator can't know what data exists elsewhere in the cluster that it should pull in. It's the duty of any node that contains data to ensure that data gets to where it belongs. Replica placement is handled by the ring.Every deleted record or file in the system is marked by a tombstone, so that deletions can be replicated alongside creations. These tombstones are cleaned up by the replication process after a period of time referred to as the consistency window, which is related to replication duration and how long transient failures can remove a node from the cluster. Tombstone cleanup must be tied to replication to reach replica convergence.If a replicator detects that a remote drive is has failed, it will use the ring's “get_more_nodes” interface to choose an alternate node to synchronize with. The replicator can generally maintain desired levels of replication in the face of hardware failures, though some replicas may not be in an immediately usable location.Replication is an area of active development, and likely rife with potential improvements to speed and correctness.There are two major classes of replicator - the db replicator, which replicates accounts and containers, and the object replicator, which replicates object data.Database ReplicationDatabase ReplicationThe first step performed by db replication is a low-cost hash comparison to find out whether or not two replicas already match. Under normal operation, this check is able to verify that most databases in the system are already synchronized very quickly. If the hashes differ, the replicator brings the databases in sync by sharing records added since the last sync point.This sync point is a high water mark noting the last record at which two databases were known to be in sync, and is stored in each database as a tuple of the remote database id and record id. Database ids are unique amongst all replicas of the database, and record ids are monotonically increasing integers. After all new records have been pushed to the remote database, the entire sync table of the local database is pushed, so the remote database knows it's now in sync with everyone the local database has previously synchronized with.If a replica is found to be missing entirely, the whole local database file is transmitted to the peer using rsync(1) and vested with a new unique id.In practice, DB replication can process hundreds of databases per concurrency setting per second (up to the number of available CPUs or disks) and is bound by the number of DB transactions that must be performed.Object ReplicationObject ReplicationThe initial implementation of object replication simply performed an rsync to push data from a local partition to all remote servers it was expected to exist on. While this performed adequately at small scale, replication times skyrocketed once directory structures could no longer be held in RAM. We now use a modification of this scheme in which a hash of the contents for each suffix directory is saved to a per-partition hashes file. The hash for a suffix directory is invalidated when the contents of that suffix directory are modified.The object replication process reads in these hash files, calculating any invalidated hashes. It then transmits the hashes to each remote server that should hold the partition, and only suffix directories with differing hashes on the remote server are rsynced. After pushing files to the remote server, the replication process notifies it to recalculate hashes for the rsynced suffix directories.Performance of object replication is generally bound by the number of uncached directories it has to traverse, usually as a result of invalidated suffix directory hashes. Using write volume and partition counts from our running systems, it was designed so that around 2% of the hash space on a normal node will be invalidated per day, which has experimentally given us acceptable replication speeds.Managing Large Objects (Greater than 5 GB)Managing Large Objects (Greater than 5 GB)OpenStack Object Storage has a limit on the size of a single uploaded object; by default this is 5GB. However, the download size of a single object is virtually unlimited with the concept of segmentation. Segments of the larger object are uploaded and a special manifest file is created that, when downloaded, sends all the segments concatenated as a single object. This also offers much greater upload speed - with the possibility of parallel uploads of the segments.Using swift to Manage Segmented ObjectsUsing swift to Manage Segmented ObjectsThe quickest way to try out this feature is use the included swift OpenStack - Object Storage Tool. You can use the -S option to specify the segment size to use - when splitting a large file. For example:swift upload test_container -S 1073741824 large_fileThis would split the large_file into 1G segments and begin uploading those + with the possibility of parallel uploads of the segments.Using swift to Manage Segmented ObjectsUsing swift to Manage Segmented ObjectsThe quickest way to try out this feature is use the included swift OpenStack + Object Storage client tool. You can use the -S option to specify the segment size to use + when splitting a large file. For example:swift upload test_container -S 1073741824 large_fileThis would split the large_file into 1G segments and begin uploading those segments in parallel. Once all the segments have been uploaded, swift will then - create the manifest file so the segments can be downloaded as one.So now, the following st command would download the entire large object:swift download test_container large_fileThe swift CLI uses a strict convention for its segmented object support. In the + create the manifest file so the segments can be downloaded as one.So now, the following st command would download the entire large object:swift download test_container large_fileThe swift CLI uses a strict convention for its segmented object support. In the above example it will upload all the segments into a second container named test_container_segments. These segments will have names like large_file/1290206778.25/21474836480/00000000, @@ -1502,7 +1503,7 @@ net.ipv4.netfilter.ip_conntrack_max = 262144 first until the last moment when the manifest file is updated.The swift CLI will manage these segment files for you, deleting old segments on deletes and overwrites, etc. You can override this behavior with the --leave-segments option if desired; this is useful if you want to have multiple - versions of the same large object available.Direct API Management of Large ObjectsDirect API Management of Large ObjectsYou can also work with the segments and manifests directly with HTTP requests + versions of the same large object available.Direct API Management of Large ObjectsDirect API Management of Large ObjectsYou can also work with the segments and manifests directly with HTTP requests instead of having swift do that for you. You can just upload the segments like you would any other object and the manifest is just a zero-byte file with an extra X-Object-Manifest header.All the object segments need to be in the same container, have a common object @@ -1516,7 +1517,7 @@ net.ipv4.netfilter.ip_conntrack_max = 262144 the upload is complete. Also, you can upload a new set of segments to a second location and then update the manifest to point to this new location. During the upload of the new segments, the original manifest will still be available to - download the first set of segments.Here's an example using curl with tiny 1-byte segments: + download the first set of segments.Here's an example using curl with tiny 1-byte segments: # First, upload the segments curl -X PUT -H 'X-Auth-Token: <token>' \ http://<storage_url>/container/myobject/1 --data-binary '1' @@ -1532,22 +1533,22 @@ curl -X PUT -H 'X-Auth-Token: <token>' \ # And now we can download the segments as a single object curl -H 'X-Auth-Token: <token>' \ - http://<storage_url>/container/myobjectAdditional Notes on Large ObjectsAdditional Notes on Large ObjectsWith a GET or HEAD of a manifest file, the X-Object-Manifest: + http://<storage_url>/container/myobjectAdditional Notes on Large ObjectsAdditional Notes on Large ObjectsWith a GET or HEAD of a manifest file, the X-Object-Manifest: <container>/<prefix> header will be returned with the concatenated object - so you can tell where it's getting its segments from.The response's Content-Length for a GET or HEAD on the manifest + so you can tell where it's getting its segments from.The response's Content-Length for a GET or HEAD on the manifest file will be the sum of all the segments in the <container>/<prefix> listing, dynamically. So, uploading additional segments after the manifest is created will cause the concatenated object to be that much larger; there's no - need to recreate the manifest file.The response's Content-Type for a GET or HEAD on the manifest + need to recreate the manifest file.The response's Content-Type for a GET or HEAD on the manifest will be the same as the Content-Type set during the PUT request that created the manifest. You can easily change the Content-Type by reissuing - the PUT.The response's ETag for a GET or HEAD on the manifest file will + the PUT.The response's ETag for a GET or HEAD on the manifest file will be the MD5 sum of the concatenated string of ETags for each of the segments in the <container>/<prefix> listing, dynamically. Usually in OpenStack Object Storage the ETag is the MD5 sum of the contents of the object, and that holds true for each segment independently. But, it's not feasible to generate such an ETag for the manifest itself, so this method was chosen to at least offer change - detection.Large Object Storage History and BackgroundLarge Object Storage History and BackgroundLarge object support has gone through various iterations before settling on + detection.Large Object Storage History and BackgroundLarge Object Storage History and BackgroundLarge object support has gone through various iterations before settling on this implementation.The primary factor driving the limitation of object size in OpenStack Object Storage is maintaining balance among the partitions of the ring. To maintain an even dispersion of disk usage throughout the cluster the obvious storage pattern @@ -1581,12 +1582,12 @@ curl -H 'X-Auth-Token: <token>' \ term. In reality you're unlikely to encounter this scenario unless you're running very high concurrency uploads against a small testing environment which isn't running the object-updaters or container-replicators.Like all of OpenStack Object Storage, Large Object Support is living feature which will continue - to improve and may change over time.Throttling Resources by Setting Rate LimitsThrottling Resources by Setting Rate LimitsRate limiting in OpenStack Object Storage is implemented as a pluggable middleware that you configure on the proxy server. Rate + to improve and may change over time.Throttling Resources by Setting Rate LimitsThrottling Resources by Setting Rate LimitsRate limiting in OpenStack Object Storage is implemented as a pluggable middleware that you configure on the proxy server. Rate limiting is performed on requests that result in database writes to the account and container sqlite dbs. It uses memcached and is dependent on the proxy servers having highly synchronized time. The rate limits are - limited by the accuracy of the proxy server clocks.Configuration for Rate LimitingConfiguration for Rate LimitingAll configuration is optional. If no account or container limits are provided - there will be no rate limiting. Configuration available:Table 4.20. Configuration options for rate limiting in proxy-server.conf + limited by the accuracy of the proxy server clocks.Configuration for Rate LimitingConfiguration for Rate LimitingAll configuration is optional. If no account or container limits are provided + there will be no rate limiting. Configuration available:Table 4.20. Configuration options for rate limiting in proxy-server.conf fileOption Default Description @@ -1634,7 +1635,7 @@ curl -H 'X-Auth-Token: <token>' \ /account_name/container_name/object_name The container rate limits are linearly interpolated from the values given. A - sample container rate limiting could be:container_ratelimit_100 = 100container_ratelimit_200 = 50container_ratelimit_500 = 20This would result inTable 4.21. Values for Rate Limiting with Sample Configuration SettingsContainer Size + sample container rate limiting could be:container_ratelimit_100 = 100container_ratelimit_200 = 50container_ratelimit_500 = 20This would result inTable 4.21. Values for Rate Limiting with Sample Configuration SettingsContainer Size Rate Limit 0-99 No limiting @@ -1646,21 +1647,21 @@ curl -H 'X-Auth-Token: <token>' \ 20 1000 20 - Configuring Object Storage with the S3 APIConfiguring Object Storage with the S3 APIThe Swift3 middleware emulates the S3 REST API on top of Object Storage.The following operations are currently supported:GET ServiceDELETE BucketGET Bucket (List Objects)PUT BucketDELETE ObjectGET ObjectHEAD ObjectPUT ObjectPUT Object (Copy)To add this middleware to your configuration, add the swift3 middleware in front of the auth middleware, and before any other middleware that look at swift requests (like rate limiting). + Configuring Object Storage with the S3 APIConfiguring Object Storage with the S3 APIThe Swift3 middleware emulates the S3 REST API on top of Object Storage.The following operations are currently supported:GET ServiceDELETE BucketGET Bucket (List Objects)PUT BucketDELETE ObjectGET ObjectHEAD ObjectPUT ObjectPUT Object (Copy)To add this middleware to your configuration, add the swift3 middleware in front of the auth middleware, and before any other middleware that look at swift requests (like rate limiting). Ensure that your proxy-server.conf file contains swift3 in the pipeline and the - [filter:swift3] section, as shown below:[pipeline:main] + [filter:swift3] section, as shown below:[pipeline:main] pipeline = healthcheck cache swift3 swauth proxy-server [filter:swift3] use = egg:swift#swift3 - Next, configure the tool that you use to connect to the S3 API. For S3curl, for example, you'll need to add your host IP information by adding y our host IP to the @endpoints array (line 33 in s3curl.pl):my @endpoints = ( '1.2.3.4');Now you can send commands to the endpoint, such as: + Next, configure the tool that you use to connect to the S3 API. For S3curl, for example, you'll need to add your host IP information by adding y our host IP to the @endpoints array (line 33 in s3curl.pl):my @endpoints = ( '1.2.3.4');Now you can send commands to the endpoint, such as: ./s3curl.pl - 'myacc:myuser' -key mypw -get - -s -v http://1.2.3.4:8080 To set up your client, the access key will be the concatenation of the account and user strings that should look like test:tester, and the secret access key is the account password. The host should also point to the Swift storage node's hostname. It also will have to use the old-style calling format, and not the hostname-based container format. Here is an example client setup using the Python boto library on a locally installed - all-in-one Swift installation. + all-in-one Swift installation. connection = boto.s3.Connection( aws_access_key_id='test:tester', aws_secret_access_key='testing', @@ -1668,41 +1669,41 @@ connection = boto.s3.Connection( host='127.0.0.1', is_secure=False, calling_format=boto.s3.connection.OrdinaryCallingFormat()) - Managing OpenStack Object Storage with CLI SwiftManaging OpenStack Object Storage with CLI SwiftIn the Object Store (swift) project there is a tool that can perform a variety of tasks on + Managing OpenStack Object Storage with CLI SwiftManaging OpenStack Object Storage with CLI SwiftIn the Object Store (swift) project there is a tool that can perform a variety of tasks on your storage cluster named swift. This client utility can be used for adhoc processing, to gather statistics, list items, update metadata, upload, download and delete files. It is based on the native swift client library client.py. Incorporating client.py into swift provides many benefits such as seamlessly re-authorizing if the current token expires in the middle of processing, retrying operations up to five times and a processing concurrency of 10. All of these things help make the swift tool robust and - great for operational use.Swift CLI BasicsSwift CLI BasicsThe command line usage for swift, the CLI tool is: - swift (command) [options] [args]Here are the available commands for swift. stat [container] [object]Displays information for the account, container, or object depending on the - args given (if any).list [options] [container]Lists the containers for the account or the objects for a container. -p or -prefix + great for operational use.Swift CLI BasicsSwift CLI BasicsThe command line usage for swift, the CLI tool is: + swift (command) [options] [args]Here are the available commands for swift. stat [container] [object]Displays information for the account, container, or object depending on the + args given (if any).list [options] [container]Lists the containers for the account or the objects for a container. -p or -prefix is an option that will only list items beginning with that prefix. -d or -delimiter is option (for container listings only) that will roll up items with the given delimiter, or character that can act as a nested directory - organizer.upload [options] container file_or_directory [file_or_directory] […] Uploads to the given container the files and directories specified by the remaining args. -c + organizer.upload [options] container file_or_directory [file_or_directory] […] Uploads to the given container the files and directories specified by the remaining args. -c or -changed is an option that will only upload files that have changed since the - last upload.post [options] [container] [object]Updates meta information for the account, container, or object depending on the + last upload.post [options] [container] [object]Updates meta information for the account, container, or object depending on the args given. If the container is not found, it will be created automatically; but this is not true for accounts and objects. Containers also allow the -r (or -read-acl) and -w (or -write-acl) options. The -m or -meta option is allowed on all and used to define the user meta data items to set in the form Name:Value. - This option can be repeated. Example: post -m Color:Blue -m Size:Largedownload —all OR download container [object] [object] …Downloads everything in the account (with —all), or everything in a + This option can be repeated. Example: post -m Color:Blue -m Size:Largedownload —all OR download container [object] [object] …Downloads everything in the account (with —all), or everything in a container, or a list of objects depending on the args given. For a single object download, you may use the -o [—output] (filename) option to redirect the output to a specific file or if “-” then just redirect to - stdout.delete —all OR delete container [object] [object] …Deletes everything in the account (with —all), or everything in a + stdout.delete —all OR delete container [object] [object] …Deletes everything in the account (with —all), or everything in a container, or a list of objects depending on the args given. Example: swift -A https://auth.api.rackspacecloud.com/v1.0 -U user -K key - statOptions for swift-version show program’s version number and exit-h, -help show this help message and exit-s, -snet Use SERVICENET internal network-v, -verbose Print more info-q, -quiet Suppress status output-A AUTH, -auth=AUTH URL for obtaining an auth token-U USER, -user=USER User name for obtaining an auth token-K KEY, -key=KEY Key for obtaining an auth tokenAnalyzing Log Files with Swift CLIAnalyzing Log Files with Swift CLIWhen you want quick, command-line answers to questions about logs, you can use + statOptions for swift-version show program’s version number and exit-h, -help show this help message and exit-s, -snet Use SERVICENET internal network-v, -verbose Print more info-q, -quiet Suppress status output-A AUTH, -auth=AUTH URL for obtaining an auth token-U USER, -user=USER User name for obtaining an auth token-K KEY, -key=KEY Key for obtaining an auth tokenAnalyzing Log Files with Swift CLIAnalyzing Log Files with Swift CLIWhen you want quick, command-line answers to questions about logs, you can use swift with the -o or -output option. The -o —output option can only be used with a single object download to redirect the data stream to either a different file name or to STDOUT (-). The ability to redirect the output to STDOUT allows you to pipe “|” data without saving it to disk first. One common use case is being able to do some quick log file analysis. First let’s use swift to setup some data for the examples. The “logtest” directory contains four log files with the following line - format.files: + format.files: 2010-11-16-21_access.log 2010-11-16-22_access.log 2010-11-15-21_access.log @@ -1711,7 +1712,7 @@ connection = boto.s3.Connection( log lines: Nov 15 21:53:52 lucid64 proxy-server - 127.0.0.1 15/Nov/2010/22/53/52 DELETE /v1/AUTH_cd4f57824deb4248a533f2c28bf156d3/2eefc05599d44df38a7f18b0b42ffedd HTTP/1.0 204 - - test%3Atester%2CAUTH_tkcdab3c6296e249d7b7e2454ee57266ff - - - txaba5984c-aac7-460e-b04b-afc43f0c6571 - 0.0432The swift tool can easily upload the four log files into a container named “logtest”: - + $ cd logs $ swift -A http://swift-auth.com:11000/v1.0 -U test:tester -K \ testing upload logtest *.log @@ -1750,7 +1751,7 @@ connection = boto.s3.Connection( everything during 2200 on November 16th, 2010. Based on the log line format column 9 is the type of request and column 12 is the return code. After awk processes the data stream it is piped to sort and then uniq -c to sum up the number of occurrences - for each combination of request type and return code.$ swift -A http://swift-auth.com:11000/v1.0 -U test:tester -K \ + for each combination of request type and return code.$ swift -A http://swift-auth.com:11000/v1.0 -U test:tester -K \ testing download -o - logtest 2010-11-16-22_access.log \ | awk ‘{ print $9”-“$12}’ | sort | uniq -c @@ -1789,7 +1790,7 @@ connection = boto.s3.Connection( First create a list of objects by running swift with the list command on the “logtest” container; then for each item in the list run swift with download -o - then pipe the output into grep to filter the put requests and finally into wc -l to - count the lines.$ for f in `swift -A http://swift-auth.com:11000/v1.0 -U test:tester -K testing list logtest` ; \ + count the lines.$ for f in `swift -A http://swift-auth.com:11000/v1.0 -U test:tester -K testing list logtest` ; \ do echo -ne “PUTS - ” ; swift -A http://swift-auth.com:11000/v1.0 -U test:tester -K \ testing download -o - logtest $f | grep PUT | wc -l ; done @@ -1804,7 +1805,7 @@ connection = boto.s3.Connection( on the “logtest” container with the prefix option -p 2010-11-15. Then on each of item(s) returned run swift with the download -o - then pipe the output to grep and wc as in the previous example. The echo command is added to display the object - name.$ for f in `swift -A http://swift-auth.com:11000/v1.0 -U test:tester -K testing list \ + name.$ for f in `swift -A http://swift-auth.com:11000/v1.0 -U test:tester -K testing list \ -p 2010-11-15 logtest` ; do echo -ne “$f - PUTS - ” ; \ swift -A http://127.0.0.1:11000/v1.0 -U test:tester -K testing \ download -o - logtest $f | grep PUT | wc -l ; done @@ -1814,20 +1815,20 @@ connection = boto.s3.Connection( The swift utility is simple, scalable, flexible and provides useful solutions all of which are core principles of cloud computing; with the -o output - option being just one of its many features. OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 5. OpenStack Object Storage TutorialsWe want people to use OpenStack for practical problem solving, and the increasing size and + option being just one of its many features. OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 5. OpenStack Object Storage TutorialsWe want people to use OpenStack for practical problem solving, and the increasing size and density of web content makes for a great use-case for object storage. These tutorials show you how to use your OpenStack Object Storage installation for practical purposes, and it - assumes Object Storage is already installed.Storing Large Photos or Videos on the CloudStoring Large Photos or Videos on the CloudIn this OpenStack tutorial, we’ll walk through using an Object Storage installation to + assumes Object Storage is already installed.Storing Large Photos or Videos on the CloudStoring Large Photos or Videos on the CloudIn this OpenStack tutorial, we’ll walk through using an Object Storage installation to back up all your photos or videos. As the sensors on consumer-grade and pro-sumer grade cameras generate more and more megapixels, we all need a place to back our files to and - know they are safe. We'll go through this tutorial in parts:Setting up secure access to Object Storage.Configuring Cyberduck for connecting to OpenStack Object Storage.Copying files to the cloud.Part I: Setting Up Secure AccessPart I: Setting Up Secure AccessIn this part, we'll get the proxy server running with SSL on the Object Storage + know they are safe. We'll go through this tutorial in parts:Setting up secure access to Object Storage.Configuring Cyberduck for connecting to OpenStack Object Storage.Copying files to the cloud.Part I: Setting Up Secure AccessPart I: Setting Up Secure AccessIn this part, we'll get the proxy server running with SSL on the Object Storage installation. It's a requirement for using Cyberduck as a client interface to Object Storage.You will need a key and certificate to do this, which we can create as a self-signed for the tutorial since we can do the extra steps to have Cyberduck accept it. Creating a self-signed cert can usually be done with these commands on - the proxy server: cd /etc/swift + the proxy server: cd /etc/swift openssl req -new -x509 -nodes -out cert.crt -keyout cert.keyEnsure these generated files are in /etc/swift/cert.crt and /etc/swift/cert.key. You also should configure your iptables to enable https traffic. Here's an example - setup that works.Chain INPUT (policy ACCEPT 0 packets, 0 bytes) + setup that works.Chain INPUT (policy ACCEPT 0 packets, 0 bytes) pkts bytes target prot opt in out source destination 76774 1543M ACCEPT all -- lo any localhost anywhere 416K 537M ACCEPT all -- any any anywhere anywhere state RELATED,ESTABLISHED @@ -1842,24 +1843,24 @@ Chain FORWARD (policy ACCEPT 0 packets, 0 bytes) Chain OUTPUT (policy ACCEPT 397K packets, 1561M bytes) pkts bytes target prot opt in out source destination If you don't have access to the Object Storage installation to configure these - settings, ask your service provider to set up secure access for you. Then, edit your proxy-server.conf file to include the following in the [DEFAULT] sections. [DEFAULT] + settings, ask your service provider to set up secure access for you. Then, edit your proxy-server.conf file to include the following in the [DEFAULT] sections. [DEFAULT] bind_port = 443 cert_file = /etc/swift/cert.crt key_file = /etc/swift/cert.key Also, make sure you use https: for all references to the URL for the server in the .conf files as needed.Verify that you can connect using the Public URL to Object Storage by using the "swift" tool: - swift -A https://yourswiftinstall.com:11000/v1.0 -U test:tester -K testing stat + swift -A https://yourswiftinstall.com:11000/v1.0 -U test:tester -K testing stat Okay, you've created the access that Cyberduck expects for your Object Storage - installation. Let's start configuring the Cyberduck side of things.Part II: Configuring CyberduckPart II: Configuring CyberduckNext, you want to change the context of the URL from the default /v1.0 by opening + installation. Let's start configuring the Cyberduck side of things.Part II: Configuring CyberduckPart II: Configuring CyberduckNext, you want to change the context of the URL from the default /v1.0 by opening a Terminal window and using defaults write ch.sudo.cyberduck cf.authentication.context <string> to change the URL. Substitute /auth/v1.0 for the <string> (Mac OSX). Cyberduck 3.8.1 includes a drop-down for selecting Swift (OpenStack Object Storage) when opening a connection. Launch Cyberduck, and then click the New Connection toolbar button or choose File > Open Connection. Select Swift (OpenStack Object Storage). Enter the following values:Server: Enter the URL of the installed Swift server.Port: Enter 443 since you are connecting via https.Username: Enter the account name followed by a colon and then the user name, for - example test:tester.Password: Enter the password for the account and user name entered above.Figure 5.1. Example Cyberduck Swift ConnectionPart III: Creating Containers (Folders) and Uploading FilesPart III: Creating Containers (Folders) and Uploading FilesNow you want to create containers to hold your files. Without containers, Object + example test:tester.Password: Enter the password for the account and user name entered above.Figure 5.1. Example Cyberduck Swift ConnectionPart III: Creating Containers (Folders) and Uploading FilesPart III: Creating Containers (Folders) and Uploading FilesNow you want to create containers to hold your files. Without containers, Object Storage doesn't know where to put the files. In the Action menu, choose New Folder and name the folder.Next you can drag and drop files into the created folder or select File > Upload - to select files to upload to the OpenStack Object Storage service. Figure 5.2. Example Cyberduck Swift Showing UploadsEt voila! You can back up terabytes of data if you just have the space and the data. That's a lot of pictures or video, so get snapping and rolling! OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 OpenStack Object Storage Administrator ManualAug 12, 20111.4.1 6. Support and TroubleshootingOnline resources aid in supporting OpenStack and the community members are willing and able to answer questions and help with bug suspicions. We are constantly improving and adding to the main features of OpenStack, but if you have any problems, do not hesitate to ask. Here are some ideas for supporting OpenStack and troubleshooting your existing installations.Community SupportCommunity SupportHere are some places you can locate others who want to help.The Launchpad Answers areaThe Launchpad Answers areaDuring setup or testing, you may have questions about how to do something, or end up in + to select files to upload to the OpenStack Object Storage service. Figure 5.2. Example Cyberduck Swift Showing UploadsEt voila! You can back up terabytes of data if you just have the space and the data. That's a lot of pictures or video, so get snapping and rolling! OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 OpenStack Object Storage Administrator ManualSep 22, 20111.4.3 6. Support and TroubleshootingOnline resources aid in supporting OpenStack and the community members are willing and able to answer questions and help with bug suspicions. We are constantly improving and adding to the main features of OpenStack, but if you have any problems, do not hesitate to ask. Here are some ideas for supporting OpenStack and troubleshooting your existing installations.Community SupportCommunity SupportHere are some places you can locate others who want to help.The Launchpad Answers areaThe Launchpad Answers areaDuring setup or testing, you may have questions about how to do something, or end up in a situation where you can't seem to get a feature to work correctly. One place to look for help is the Answers section on Launchpad. Launchpad is the "home" for the project code and its developers and thus is a natural place to ask about the @@ -1868,29 +1869,29 @@ key_file = /etc/swift/cert.key https://answers.launchpad.net/nova OpenStack Object Storage: https://answers.launchpad.net/swift. OpenStack mailing listOpenStack mailing listPosting your question or scenario to the OpenStack mailing list is a great way to get + so on. The Launchpad Answers areas are available here - OpenStack Compute: https://answers.launchpad.net/nova OpenStack Object Storage: https://answers.launchpad.net/swift. OpenStack mailing listOpenStack mailing listPosting your question or scenario to the OpenStack mailing list is a great way to get answers and insights. You can learn from and help others who may have the same scenario as you. Go to https://launchpad.net/~openstack and click "Subscribe to mailing list" - or view the archives at https://lists.launchpad.net/openstack/.The OpenStack Wiki search The OpenStack Wiki search The OpenStack wiki contains content + or view the archives at https://lists.launchpad.net/openstack/.The OpenStack Wiki search The OpenStack Wiki search The OpenStack wiki contains content on a broad range of topics, but some of it sits a bit below the surface. Fortunately, the wiki search feature is very powerful in that it can do both searches by title and by content. If you are searching for specific information, say about "networking" or "api" for nova, you can find lots of content using the search feature. More is being added all the time, so be sure to check back often. You can find the search box in the upper right hand corner of any OpenStack wiki - page. The Launchpad Bugs area The Launchpad Bugs area So you think you've found a bug. That's great! Seriously, it is. The OpenStack community + page. The Launchpad Bugs area The Launchpad Bugs area So you think you've found a bug. That's great! Seriously, it is. The OpenStack community values your setup and testing efforts and wants your feedback. To log a bug you must have a Launchpad account, so sign up at https://launchpad.net/+login if you do not already have a Launchpad ID. You can view existing bugs and report your bug in the Launchpad Bugs area. It is suggested that you first use the search facility to see if the bug you found has already been reported (or even better, already fixed). If it still seems like your bug is new or unreported then it is time to fill out a bug - report. Some tips: Give a clear, concise summary! Provide as much detail as possible + report. Some tips: Give a clear, concise summary! Provide as much detail as possible in the description. Paste in your command output or stack traces, link to - screenshots, etc. Be sure to include what version of the software you are using. + screenshots, etc. Be sure to include what version of the software you are using. This is especially critical if you are using a development branch eg. "Austin - release" vs lp:nova rev.396. Any deployment specific info is helpful as well. eg. + release" vs lp:nova rev.396. Any deployment specific info is helpful as well. eg. Ubuntu 10.04, multi-node install.The Launchpad Bugs areas are available here - OpenStack Compute: https://bugs.launchpad.net/nova OpenStack Object Storage: https://bugs.launchpad.net/swift - The OpenStack IRC channel The OpenStack IRC channel The OpenStack community lives and breathes in the #openstack IRC channel on the + The OpenStack IRC channel The OpenStack IRC channel The OpenStack community lives and breathes in the #openstack IRC channel on the Freenode network. You can come by to hang out, ask questions, or get immediate feedback for urgent and pressing issues. To get into the IRC channel you need to install an IRC client or use a browser-based client by going to @@ -1900,8 +1901,8 @@ key_file = /etc/swift/cert.key Troubleshooting OpenStack Object StorageTroubleshooting OpenStack Object StorageFor OpenStack Object Storage, everything is logged in /var/log/syslog (or messages on some distros). Several settings enable further customization of logging, such as log_name, log_facility, and log_level, within the object server configuration files.Handling Drive FailureHandling Drive Failure In the event that a drive has failed, the first step is to make sure the drive is unmounted. This will make it easier for OpenStack Object Storage to work around the failure until it has been resolved. If the drive is going to be replaced immediately, then it is just best to replace the drive, format it, remount it, and let replication fill it up.If the drive can’t be replaced immediately, then it is best to leave it unmounted, and remove the drive from the ring. This will allow all the replicas that were on that drive to be replicated elsewhere until the drive is replaced. Once the drive is replaced, it can be re-added to the ring.Handling Server FailureHandling Server FailureIf a server is having hardware issues, it is a good idea to make sure the OpenStack Object Storage services are not running. This will allow OpenStack Object Storage to work around the failure while you troubleshoot.If the server just needs a reboot, or a small amount of work that should only last a couple of hours, then it is probably best to let OpenStack Object Storage work around the failure and get the machine fixed and back online. When the machine comes back online, replication will make sure that anything that is missing during the downtime will get updated.If the server has more serious issues, then it is probably best to remove all of the server’s devices from the ring. Once the server has been repaired and is back online, the server’s devices can be added back into the ring. It is important that the devices are reformatted before putting them back into the ring as it is likely to be responsible for a different set of partitions than before.Detecting Failed DrivesDetecting Failed DrivesIt has been our experience that when a drive is about to fail, error messages will spew into /var/log/kern.log. There is a script called swift-drive-audit that can be run via cron to watch for bad drives. If errors are detected, it will unmount the bad drive, so that OpenStack Object Storage can work around it. The script takes a configuration file with the following settings: - + channel is: #openstack on irc.freenode.net. Troubleshooting OpenStack Object StorageTroubleshooting OpenStack Object StorageFor OpenStack Object Storage, everything is logged in /var/log/syslog (or messages on some distros). Several settings enable further customization of logging, such as log_name, log_facility, and log_level, within the object server configuration files.Handling Drive FailureHandling Drive Failure In the event that a drive has failed, the first step is to make sure the drive is unmounted. This will make it easier for OpenStack Object Storage to work around the failure until it has been resolved. If the drive is going to be replaced immediately, then it is just best to replace the drive, format it, remount it, and let replication fill it up.If the drive can’t be replaced immediately, then it is best to leave it unmounted, and remove the drive from the ring. This will allow all the replicas that were on that drive to be replicated elsewhere until the drive is replaced. Once the drive is replaced, it can be re-added to the ring.Handling Server FailureHandling Server FailureIf a server is having hardware issues, it is a good idea to make sure the OpenStack Object Storage services are not running. This will allow OpenStack Object Storage to work around the failure while you troubleshoot.If the server just needs a reboot, or a small amount of work that should only last a couple of hours, then it is probably best to let OpenStack Object Storage work around the failure and get the machine fixed and back online. When the machine comes back online, replication will make sure that anything that is missing during the downtime will get updated.If the server has more serious issues, then it is probably best to remove all of the server’s devices from the ring. Once the server has been repaired and is back online, the server’s devices can be added back into the ring. It is important that the devices are reformatted before putting them back into the ring as it is likely to be responsible for a different set of partitions than before.Detecting Failed DrivesDetecting Failed DrivesIt has been our experience that when a drive is about to fail, error messages will spew into /var/log/kern.log. There is a script called swift-drive-audit that can be run via cron to watch for bad drives. If errors are detected, it will unmount the bad drive, so that OpenStack Object Storage can work around it. The script takes a configuration file with the following settings: + [drive-audit] Option Default Description log_facility LOG_LOCAL0 Syslog log facility @@ -1910,6 +1911,6 @@ key_file = /etc/swift/cert.key This script has only been tested on Ubuntu 10.04, so if you are using a different distro or OS, some care should be taken before using in production. - Troubleshooting OpenStack ComputeTroubleshooting OpenStack ComputeLog files for OpenStack ComputeLog files for OpenStack ComputeLog files are stored in /var/log/nova and there is a log file for each service, for example nova-compute.log. You can format the log strings using flags for the nova.log module. The flags used to set format strings are: logging_context_format_string and logging_default_format_string. If the log level is set to debug, you can also specify logging_debug_format_suffix to append extra formatting. For information about what variables are available for the formatter see: http://docs.python.org/library/logging.html#formatter You have two options for logging for OpenStack Compute based on configuration settings. In nova.conf, include the --logfile flag to enable logging. Alternatively you can set --use_syslog=1, and then the nova daemon logs to syslog.Common Errors and Fixes for OpenStack ComputeCommon Errors and Fixes for OpenStack ComputeThe Launchpad Answers site offers a place to ask and answer questions, and you can also mark questions as frequently asked questions. This section describes some errors people have posted to Launchpad Answers and IRC. We are constantly fixing bugs, so online resources are a great way to get the most up-to-date errors and fixes.Credential errors, 401, 403 forbidden errorsA 403 forbidden error is caused by missing credentials. Through current installation methods, there are basically two ways to get the novarc file. The manual method requires getting it from within a project zipfile, and the scripted method just generates novarc out of the project zip file and sources it for you. If you do the manual method through a zip file, then the following novarc alone, you end up losing the creds that are tied to the user you created with nova-manage in the steps before.When you run nova-api the first time, it generates the certificate authority information, including openssl.cnf. If it gets started out of order, you may not be able to create your zip file. Once your CA information is available, you should be able to go back to nova-manage to create your zipfile. You may also need to check your proxy settings to see if they are causing problems with the novarc creation.Instance errorsSometimes a particular instance shows "pending" or you cannot SSH to it. Sometimes the image itself is the problem. For example, when using flat manager networking, you do not have a dhcp server, and an ami-tiny image doesn't support interface injection so you cannot connect to it. The fix for this type of problem is to use an Ubuntu image, which should obtain an IP address correctly with FlatManager network settings. To troubleshoot other possible problems with an instance, such as one that stays in a spawning state, first check your instances directory for i-ze0bnh1q dir to make sure it has the following files:libvirt.xmldiskdisk-rawkernelramdiskconsole.log (Once the instance actually starts you should see a console.log.)Check the file sizes to see if they are reasonable. If any are missing/zero/very small then nova-compute has somehow not completed download of the images from objectstore. Also check nova-compute.log for exceptions. Sometimes they don't show up in the + Troubleshooting OpenStack ComputeTroubleshooting OpenStack ComputeCommon problems for Compute typically involve misconfigured networking or credentials that are not sourced properly in the environment. Also, most flat networking configurations do not enable ping or ssh from a compute node to the instances running on that node. Another common problem is trying to run 32-bit images on a 64-bit compute node. This section offers more information about how to troubleshoot Compute.Log files for OpenStack ComputeLog files for OpenStack ComputeLog files are stored in /var/log/nova and there is a log file for each service, for example nova-compute.log. You can format the log strings using flags for the nova.log module. The flags used to set format strings are: logging_context_format_string and logging_default_format_string. If the log level is set to debug, you can also specify logging_debug_format_suffix to append extra formatting. For information about what variables are available for the formatter see: http://docs.python.org/library/logging.html#formatter You have two options for logging for OpenStack Compute based on configuration settings. In nova.conf, include the --logfile flag to enable logging. Alternatively you can set --use_syslog=1, and then the nova daemon logs to syslog.Common Errors and Fixes for OpenStack ComputeCommon Errors and Fixes for OpenStack ComputeThe Launchpad Answers site offers a place to ask and answer questions, and you can also mark questions as frequently asked questions. This section describes some errors people have posted to Launchpad Answers and IRC. We are constantly fixing bugs, so online resources are a great way to get the most up-to-date errors and fixes.Credential errors, 401, 403 forbidden errorsA 403 forbidden error is caused by missing credentials. Through current installation methods, there are basically two ways to get the novarc file. The manual method requires getting it from within a project zipfile, and the scripted method just generates novarc out of the project zip file and sources it for you. If you do the manual method through a zip file, then the following novarc alone, you end up losing the creds that are tied to the user you created with nova-manage in the steps before.When you run nova-api the first time, it generates the certificate authority information, including openssl.cnf. If it gets started out of order, you may not be able to create your zip file. Once your CA information is available, you should be able to go back to nova-manage to create your zipfile. You may also need to check your proxy settings to see if they are causing problems with the novarc creation.Instance errorsSometimes a particular instance shows "pending" or you cannot SSH to it. Sometimes the image itself is the problem. For example, when using flat manager networking, you do not have a dhcp server, and an ami-tiny image doesn't support interface injection so you cannot connect to it. The fix for this type of problem is to use an Ubuntu image, which should obtain an IP address correctly with FlatManager network settings. To troubleshoot other possible problems with an instance, such as one that stays in a spawning state, first check your instances directory for i-ze0bnh1q dir to make sure it has the following files:libvirt.xmldiskdisk-rawkernelramdiskconsole.log (Once the instance actually starts you should see a console.log.)Check the file sizes to see if they are reasonable. If any are missing/zero/very small then nova-compute has somehow not completed download of the images from objectstore. Also check nova-compute.log for exceptions. Sometimes they don't show up in the console output. Next, check the /var/log/libvirt/qemu/i-ze0bnh1q.log file to see if it exists and has any useful error messages in it.Finally, from the instances/i-ze0bnh1q directory, try virsh create libvirt.xml and see if you get an error there. \ No newline at end of file diff --git a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/about-the-dashboard.html b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/about-the-dashboard.html index 5a10a4d8f4..f268f5a1f6 100644 --- a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/about-the-dashboard.html +++ b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/about-the-dashboard.html @@ -5,7 +5,7 @@ About the Dashboard
 


loading table of contents...
loading table of contents...
\ No newline at end of file +
  • 7. Networking
  • 8. System Administration
  • 9. OpenStack Interfaces
  • 10. OpenStack Compute Tutorials
  • 11. Support and Troubleshooting
  • \ No newline at end of file diff --git a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/allocating-associating-ip-addresses.html b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/allocating-associating-ip-addresses.html index b1933951d9..d9c1771ee5 100644 --- a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/allocating-associating-ip-addresses.html +++ b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/allocating-associating-ip-addresses.html @@ -5,7 +5,7 @@ Allocating and Associating IP Addresses with Instances
    loading table of contents...
    loading table of contents...
    \ No newline at end of file +
  • 7. Networking
  • 8. System Administration
  • 9. OpenStack Interfaces
  • 10. OpenStack Compute Tutorials
  • 11. Support and Troubleshooting
  • \ No newline at end of file diff --git a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/associating-public-ip.html b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/associating-public-ip.html index 7ba1b061eb..37c364dda5 100644 --- a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/associating-public-ip.html +++ b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/associating-public-ip.html @@ -5,7 +5,7 @@ Associating a Public IP Address
    loading table of contents...
    loading table of contents...
    \ No newline at end of file +
  • 7. Networking
  • 8. System Administration
  • 9. OpenStack Interfaces
  • 10. OpenStack Compute Tutorials
  • 11. Support and Troubleshooting
  • \ No newline at end of file diff --git a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/build-and-configure-openstack-dashboard.html b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/build-and-configure-openstack-dashboard.html index ae6d850cb2..3b422bc561 100644 --- a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/build-and-configure-openstack-dashboard.html +++ b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/build-and-configure-openstack-dashboard.html @@ -3,9 +3,9 @@ PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> -Build and Configure Openstack-Dashboard
     



    loading table of contents...
    loading table of contents...
    \ No newline at end of file +
  • 7. Networking
  • 8. System Administration
  • 9. OpenStack Interfaces
  • 10. OpenStack Compute Tutorials
  • 11. Support and Troubleshooting
  • \ No newline at end of file diff --git a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/certificates-and-revocation.html b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/certificates-and-revocation.html index 9f5c5ef7c7..cc4b4820f1 100644 --- a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/certificates-and-revocation.html +++ b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/certificates-and-revocation.html @@ -5,7 +5,7 @@ Certificates and Revocation
    loading table of contents...
    loading table of contents...
    \ No newline at end of file +
  • 7. Networking
  • 8. System Administration
  • 9. OpenStack Interfaces
  • 10. OpenStack Compute Tutorials
  • 11. Support and Troubleshooting
  • \ No newline at end of file diff --git a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_configuring-openstack-compute.html b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_configuring-openstack-compute.html index 61b464b67a..4768d65022 100644 --- a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_configuring-openstack-compute.html +++ b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_configuring-openstack-compute.html @@ -3,9 +3,9 @@ PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> -Chapter 4. Configuring OpenStack Compute
     

    The OpenStack system has several key projects that are separate installations but can work together depending on your cloud needs: OpenStack Compute, OpenStack Object Storage, and OpenStack Image Store. You can install any of these projects separately and then configure them either as standalone or connected entities.



    loading table of contents...
    loading table of contents...
    \ No newline at end of file +
  • 7. Networking
  • 8. System Administration
  • 9. OpenStack Interfaces
  • 10. OpenStack Compute Tutorials
  • 11. Support and Troubleshooting
  • \ No newline at end of file diff --git a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_getting-started-with-openstack.html b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_getting-started-with-openstack.html index e8881c3ed5..55b4a42e93 100644 --- a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_getting-started-with-openstack.html +++ b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_getting-started-with-openstack.html @@ -5,7 +5,7 @@ Chapter 1. Getting Started with OpenStack
     

    OpenStack is a collection of open source technology that provides massively scalable open source cloud computing software. Currently OpenStack develops two related projects: OpenStack Compute, which offers computing power through virtual machine and network management, and OpenStack Object Storage which is software for redundant, scalable object @@ -45,6 +45,6 @@ researchers, and global data centers looking to deploy large-scale cloud deployments for private or public clouds.



    loading table of contents...
    loading table of contents...
    \ No newline at end of file +
  • 7. Networking
  • 8. System Administration
  • 9. OpenStack Interfaces
  • 10. OpenStack Compute Tutorials
  • 11. Support and Troubleshooting
  • \ No newline at end of file diff --git a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_hypervisors.html b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_hypervisors.html index 2ec1b28251..0b78f1a9dc 100644 --- a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_hypervisors.html +++ b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_hypervisors.html @@ -3,9 +3,9 @@ PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> -Chapter 5. Hypervisors
     

    This section assumes you have a working installation of OpenStack Compute and want to select a particular hypervisor or run with multiple hypervisors. Before you try to get a VM running within OpenStack Compute, be sure you have installed a hypervisor and used the hypervisor's documentation to run a test VM and get it working.



    loading table of contents...
    loading table of contents...
    \ No newline at end of file +
  • 7. Networking
  • 8. System Administration
  • 9. OpenStack Interfaces
  • 10. OpenStack Compute Tutorials
  • 11. Support and Troubleshooting
  • \ No newline at end of file diff --git a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_installing-openstack-compute.html b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_installing-openstack-compute.html index f09d480cd6..b63fe57bca 100644 --- a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_installing-openstack-compute.html +++ b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_installing-openstack-compute.html @@ -5,7 +5,7 @@ Chapter 3. Installing OpenStack Compute
    loading table of contents...
    loading table of contents...
    \ No newline at end of file +
  • 7. Networking
  • 8. System Administration
  • 9. OpenStack Interfaces
  • 10. OpenStack Compute Tutorials
  • 11. Support and Troubleshooting
  • \ No newline at end of file diff --git a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_introduction-to-openstack-compute.html b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_introduction-to-openstack-compute.html index a039fb5103..484cd80081 100644 --- a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_introduction-to-openstack-compute.html +++ b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_introduction-to-openstack-compute.html @@ -5,7 +5,7 @@ Chapter 2. Introduction to OpenStack Compute
     

    OpenStack Compute gives you a tool to orchestrate a cloud, including running instances, managing networks, and controlling access to the cloud through users and projects. The underlying open source project's name is Nova, and it provides the software that can control an Infrastructure as a Service (IaaS) cloud computing platform. It is similar in scope to @@ -45,6 +45,6 @@ virtualization mechanisms that run on your host operating system, and exposes functionality over a web-based API.



    loading table of contents...
    loading table of contents...
    \ No newline at end of file +
  • 7. Networking
  • 8. System Administration
  • 9. OpenStack Interfaces
  • 10. OpenStack Compute Tutorials
  • 11. Support and Troubleshooting
  • \ No newline at end of file diff --git a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_networking.html b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_networking.html index e5c5942fc9..6aa6b99e06 100644 --- a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_networking.html +++ b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_networking.html @@ -5,7 +5,7 @@ Chapter 7. Networking
    loading table of contents...
    loading table of contents...
    \ No newline at end of file +
  • 7. Networking
  • 8. System Administration
  • 9. OpenStack Interfaces
  • 10. OpenStack Compute Tutorials
  • 11. Support and Troubleshooting
  • \ No newline at end of file diff --git a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_openstack-compute-automated-installations.html b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_openstack-compute-automated-installations.html index 46d478b4a9..5f0aac0011 100644 --- a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_openstack-compute-automated-installations.html +++ b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_openstack-compute-automated-installations.html @@ -5,7 +5,7 @@ Chapter 6. OpenStack Compute Automated Installations
     

    In a large-scale cloud deployment, automated installations are a requirement for successful, efficient, repeatable installations. Automation for installation also helps with continuous integration and testing. This chapter offers some tested methods for deploying OpenStack Compute with either Puppet (an infrastructure management platform) or Chef (an infrastructure management framework) paired with Vagrant (a tool for building and distributing virtualized development environments).



    loading table of contents...
    loading table of contents...
    \ No newline at end of file +
  • 7. Networking
  • 8. System Administration
  • 9. OpenStack Interfaces
  • 10. OpenStack Compute Tutorials
  • 11. Support and Troubleshooting
  • \ No newline at end of file diff --git a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_openstack-compute-tutorials.html b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_openstack-compute-tutorials.html index 733a88ed64..f4bdfade9e 100644 --- a/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_openstack-compute-tutorials.html +++ b/doc/target/docbkx/webhelp/trunk/openstack-compute/admin/content/ch_openstack-compute-tutorials.html @@ -5,7 +5,7 @@ Chapter 10. OpenStack Compute Tutorials