Adds admin manuals for Identity, Image, Compute, and Object Storage

Change-Id: I368af8a7b843b1f1cba0d8a2ccdaa0f40634db3d
This commit is contained in:
Anne Gentle 2011-09-20 08:50:34 -05:00
parent 02c976ff1d
commit ba65f6e287
1757 changed files with 250414 additions and 0 deletions

9
README.rst Normal file
View File

@ -0,0 +1,9 @@
This repository contains the cloud administrator documentation for the OpenStack project. It includes documentation for OpenStack Compute, OpenStack Identity Service, OpenStack Image Service, and OpenStack Object Storage as well as the Dashboard.
Contributing
============
Our community welcomes all people interested in open source cloud computing, and there are no formal membership requirements. The best way to join the community is to talk with others online or at a meetup and offer contributions through Launchpad, the OpenStack wiki, or blogs. We welcome all types of contributions, from blueprint designs to documentation to testing to deployment scripts.
Installing
==========
Refer to http://docs.openstack.org to see where these documents are published and to learn more about the OpenStack project.

192
doc/pom.xml Normal file
View File

@ -0,0 +1,192 @@
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.openstack.docs</groupId>
<artifactId>openstack-guide</artifactId>
<version>1.0.0-SNAPSHOT</version>
<packaging>jar</packaging>
<name>OpenStack Guides</name>
<!-- ################################################ -->
<!-- USE "mvn clean generate-sources" to run this POM -->
<!-- ################################################ -->
<profiles>
<profile>
<id>Rackspace Research Repositories</id>
<activation>
<activeByDefault>true</activeByDefault>
</activation>
<repositories>
<repository>
<id>rackspace-research</id>
<name>Rackspace Research Repository</name>
<url>http://maven.research.rackspacecloud.com/content/groups/public/</url>
</repository>
</repositories>
<pluginRepositories>
<pluginRepository>
<id>rackspace-research</id>
<name>Rackspace Research Repository</name>
<url>http://maven.research.rackspacecloud.com/content/groups/public/</url>
</pluginRepository>
</pluginRepositories>
</profile>
</profiles>
<build>
<resources>
<resource>
<directory>target/docbkx/pdf</directory>
<excludes>
<exclude>**/*.fo</exclude>
</excludes>
</resource>
</resources>
<plugins>
<plugin>
<groupId>com.rackspace.cloud.api</groupId>
<artifactId>clouddocs-maven-plugin</artifactId>
<version>1.0.4</version>
<executions>
<execution>
<id>goal1</id>
<goals>
<goal>generate-pdf</goal>
</goals>
<phase>generate-sources</phase>
<configuration>
<highlightSource>false</highlightSource>
<!-- The following elements sets the autonumbering of sections in output for chapter numbers but no numbered sections-->
<sectionAutolabel>0</sectionAutolabel>
<sectionLabelIncludesComponentLabel>0</sectionLabelIncludesComponentLabel>
</configuration>
</execution>
<execution>
<id>goal2</id>
<goals>
<goal>generate-webhelp</goal>
</goals>
<phase>generate-sources</phase>
<configuration>
<!-- These parameters only apply to webhelp -->
<enableDisqus>1</enableDisqus>
<disqusShortname>openstackdocs</disqusShortname>
<enableGoogleAnalytics>1</enableGoogleAnalytics>
<googleAnalyticsId>UA-17511903-6</googleAnalyticsId>
<generateToc>
appendix toc,title
article/appendix nop
article toc,title
book title,figure,table,example,equation
chapter toc,title
part toc,title
preface toc,title
qandadiv toc
qandaset toc
reference toc,title
set toc,title
</generateToc>
<!-- The following elements sets the autonumbering of sections in output for chapter numbers but no numbered sections-->
<sectionAutolabel>0</sectionAutolabel>
<sectionLabelIncludesComponentLabel>0</sectionLabelIncludesComponentLabel>
<postProcess>
<!-- Copies the figures to the correct location for webhelp -->
<copy todir="${basedir}/target/docbkx/webhelp/openstack-compute-admin/os-compute-adminguide/figures">
<fileset dir="${basedir}/src/docbkx/figures">
<include name="**/*.*" />
</fileset>
</copy>
<copy todir="${basedir}/target/docbkx/webhelp/openstack-image-service-admin/os-image-adminguide/figures">
<fileset dir="${basedir}/src/docbkx/openstack-image-service-admin/figures">
<include name="**/*.png" />
</fileset>
</copy>
<copy todir="${basedir}/target/docbkx/webhelp/openstack-object-storage-admin/os-objectstorage-adminguide/figures">
<fileset dir="${basedir}/src/docbkx/figures">
<include name="**/*.png" />
</fileset>
</copy>
<copy todir="${basedir}/target/docbkx/webhelp/openstack-compute-admin/os-compute-adminguide/figures">
<fileset dir="${basedir}/src/docbkx/figures">
<include name="**/*.*" />
</fileset>
</copy>
<!-- Copies webhelp (HTML output) to desired URL location on docs.openstack.org -->
<copy todir="${basedir}/target/docbkx/webhelp/trunk/openstack-compute/admin/">
<fileset
dir="${basedir}/target/docbkx/webhelp/openstack-compute-admin/os-compute-adminguide/">
<include name="**/*" />
</fileset>
</copy>
<copy todir="${basedir}/target/docbkx/webhelp/trunk/openstack-identity/admin/">
<fileset
dir="${basedir}/target/docbkx/webhelp/openstack-identity-service-starter/os-identity-starter-guide/">
<include name="**/*" />
</fileset>
</copy>
<copy
todir="${basedir}/target/docbkx/webhelp/trunk/openstack-object-storage/admin">
<fileset
dir="${basedir}/target/docbkx/webhelp/openstack-object-storage-admin/os-objectstorage-adminguide/">
<include name="**/*" />
</fileset>
</copy>
<copy
todir="${basedir}/target/docbkx/webhelp/trunk/openstack-image-service/admin">
<fileset
dir="${basedir}/target/docbkx/webhelp/openstack-image-service-admin/os-image-adminguide/">
<include name="**/*" />
</fileset>
</copy>
<!--Moves PDFs to the needed placement -->
<move failonerror="false"
file="${basedir}/target/docbkx/pdf/openstack-compute-admin/os-compute-adminguide.pdf"
tofile="${basedir}/target/docbkx/webhelp/trunk/openstack-compute/admin/os-compute-adminguide-trunk.pdf"/>
<move failonerror="false"
file="${basedir}/target/docbkx/pdf/openstack-image-service-admin/os-image-adminguide.pdf"
tofile="${basedir}/target/docbkx/webhelp/trunk/openstack-image-service/admin/os-image-adminguide-trunk.pdf"/>
<move failonerror="false"
file="${basedir}/target/docbkx/pdf/openstack-object-storage-admin/os-objectstorage-adminguide.pdf"
tofile="${basedir}/target/docbkx/webhelp/trunk/openstack-object-storage/admin/os-objectstorage-adminguide-trunk.pdf"/>
<move failonerror="false"
file="${basedir}/target/docbkx/pdf/openstack-identity-service-starter/os-identity-starter-guide.pdf"
tofile="${basedir}/target/docbkx/webhelp/trunk/openstack-identity/admin/os-identity-starter-guide-trunk.pdf"/>
<!--Deletes leftover uneeded directories -->
<delete dir="${basedir}/target/docbkx/webhelp/openstack-compute-admin"/>
<delete dir="${basedir}/target/docbkx/webhelp/openstack-object-storage-admin"/>
<delete dir="${basedir}/target/docbkx/webhelp/openstack-image-service-admin"/>
<delete dir="${basedir}/target/docbkx/webhelp/openstack-identity-service-starter"/>
</postProcess>
</configuration>
</execution>
</executions>
<configuration>
<!-- These parameters apply to pdf and webhelp -->
<xincludeSupported>true</xincludeSupported>
<sourceDirectory>src/docbkx</sourceDirectory>
<includes>
openstack-compute-admin/os-compute-adminguide.xml,
openstack-object-storage-admin/os-objectstorage-adminguide.xml,
openstack-image-service-admin/os-image-adminguide.xml,
openstack-identity-service-starter/os-identity-starter-guide.xml
</includes>
<profileSecurity>reviewer</profileSecurity>
<branding>openstack</branding>
</configuration>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,239 @@
<?xml version="1.0" encoding="UTF-8"?>
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"
xml:id="ch_getting-started-with-openstack">
<title>Getting Started with OpenStack</title>
<para>OpenStack is a collection of open source technology that provides massively scalable open
source cloud computing software. Currently OpenStack develops two related projects:
OpenStack Compute, which offers computing power through virtual machine and network
management, and OpenStack Object Storage which is software for redundant, scalable object
storage capacity. Closely related to the OpenStack Compute project is the Image Service
project, named Glance. OpenStack can be used by corporations, service providers, VARS, SMBs,
researchers, and global data centers looking to deploy large-scale cloud deployments for
private or public clouds. </para>
<section xml:id="what-is-openstack">
<title>What is OpenStack?</title>
<para>OpenStack offers open source software to build public and private clouds. OpenStack is
a community and a project as well as open source software to help organizations run
clouds for virtual computing or storage. OpenStack contains a collection of open source
projects that are community-maintained including OpenStack Compute (code-named Nova),
OpenStack Object Storage (code-named Swift), and OpenStack Image Service (code-named
Glance). OpenStack provides an operating platform, or toolkit, for orchestrating clouds. </para>
<para>OpenStack is more easily defined once the concepts of cloud computing become
apparent, but we are on a mission: to provide scalable, elastic cloud computing for
both public and private clouds, large and small. At the heart of our mission is a
pair of basic requirements: clouds must be simple to implement and massively
scalable.</para>
<para>If you are new to OpenStack, you will undoubtedly have questions about installation,
deployment, and usage. It can seem overwhelming at first. But don't fear, there are
places to get information to guide you and to help resolve any issues you may run into
during the on-ramp process. Because the project is so new and constantly changing, be
aware of the revision time for all information. If you are reading a document that is a
few months old and you feel that it isn't entirely accurate, then please let us know
through the mailing list at <link xlink:href="https://launchpad.net/~openstack"
>https://launchpad.net/~openstack</link> so it can be updated or removed. </para>
</section>
<section xml:id="components-of-openstack"><title>Components of OpenStack</title>
<para>There are currently three main components of OpenStack: Compute, Object Storage, and
Image Service. Let's look at each in turn.</para>
<para>OpenStack Compute is a cloud fabric controller, used to start up virtual instances for
either a user or a group. It's also used to configure networking for each instance or
project that contains multiple instances for a particular project. </para>
<para>OpenStack Object Storage is a system to store objects in a massively scalable large
capacity system with built-in redundancy and failover. Object Storage has a variety of
applications, such as backing up or archiving data, serving graphics or videos
(streaming data to a users browser), storing secondary or tertiary static data,
developing new applications with data storage integration, storing data when predicting
storage capacity is difficult, and creating the elasticity and flexibility of
cloud-based storage for your web applications.</para>
<para>OpenStack Image Service is a lookup and retrieval system for virtual machine images.
It can be configured in three ways: using OpenStack Object Store to store images; using
Amazon's Simple Storage Solution (S3) storage directly; or using S3 storage with Object
Store as the intermediate for S3 access.</para>
<para>The following diagram shows the basic relationships between the projects, how they
relate to each other, and how they can fulfill the goals of open source cloud computing. </para>
<informalfigure>
<mediaobject>
<imageobject>
<imagedata fileref="../figures/OpenStackCore.png"/>
</imageobject>
</mediaobject></informalfigure>
</section>
<section xml:id="openstack-architecture-overview"><title>OpenStack Project Architecture Overview</title>
<para>by <link xlink:href="http://ken.pepple.info">Ken Pepple</link></para><para>Before we dive into the conceptual and logic architecture, lets take a second to explain the OpenStack project: </para><blockquote><para>OpenStack is a collection of open source technologies delivering a massively scalable cloud operating system.</para></blockquote><para>You can think of it as software to power your own Infrastructure as a Service (IaaS) offering like <link xlink:href="http://aws.amazon.com">Amazon Web Services</link>. It currently encompasses three main projects:</para><itemizedlist><listitem><para><link xlink:href="https://launchpad.net/swift">Swift</link> which provides object/blob storage. This is roughly analogous to Rackspace Cloud Files (from which it is derived) or Amazon S3.</para></listitem><listitem><para><link xlink:href="https://launchpad.net/glance">Glance</link> which provides discovery, storage and retrieval of virtual machine images for OpenStack Nova.</para></listitem><listitem><para><link xlink:href="https://launchpad.net/nova">Nova</link> which provides virtual servers upon
demand. This is similar to Rackspace Cloud Servers or Amazon EC2.</para></listitem></itemizedlist><para>While these three projects provide the core of the cloud infrastructure, OpenStack is open and
evolving — <link xlink:href="http://wiki.openstack.org/Projects">there will be more
projects</link> (there are already related projects for <link
xlink:href="https://launchpad.net/openstack-dashboard">web interfaces</link> and a
<link xlink:href="http://wiki.openstack.org/QueueService">queue service</link>).
With that brief introduction, lets delve into a conceptual architecture and then
examine how OpenStack Compute could map to it. </para>
<section xml:id="cloud-provider-conceptual-architecture">
<info><author><personname><firstname>Ken</firstname><lineage>Pepple</lineage></personname></author><title>Cloud Provider Conceptual Architecture</title></info><para>Imagine that we are going to build our own IaaS cloud and offer it to customers. To achieve this, we would need to provide several high level features:</para><orderedlist><listitem><para>Allow application owners to register for our cloud services, view their usage and see their bill (basic customer relations management functionality)</para></listitem><listitem><para>Allow Developers/DevOps folks to create and store custom images for their applications (basic build-time functionality)</para></listitem><listitem><para>Allow DevOps/Developers to launch, monitor and terminate instances (basic run-time functionality)</para></listitem><listitem><para>Allow the Cloud Operator to configure and operate the cloud infrastructure</para></listitem></orderedlist><para>While there are certainly many, many other features that we would need to offer (especially if we were to follow are more complete industry framework like <link xlink:href="http://www.tmforum.org/BusinessProcessFramework/1647/home.html">eTOM</link>), these four get to the very heart of providing IaaS. Now assuming that you agree with these four top level features, you might put together a conceptual architecture that looks something like this:</para>
<informalfigure><mediaobject><imageobject><imagedata scale="70" fileref="../figures/nova-cactus-conceptual.png"/></imageobject></mediaobject></informalfigure>
<para>In this model, Ive imagined four sets of users (developers, devops, owners and operators)
that need to interact with the cloud and then separated out the functionality needed
for each. From there, Ive followed a pretty common tiered approach to the
architecture (presentation, logic and resources) with two orthogonal areas
(integration and management). Lets explore each a little further: </para><itemizedlist><listitem><para>As with presentation layers in more typical application architectures, components here interact with users to accept and present information. In this layer, you will find web portals to provide graphical interfaces for non-developers and API endpoints for developers. For more advanced architectures, you might find load balancing, console proxies, security and naming services present here also.</para></listitem><listitem><para>The logic tier would provide the intelligence and control functionality for our cloud. This tier would house orchestration (workflow for complex tasks), scheduling (determining mapping of jobs to resources), policy (quotas and such) , image registry (metadata about instance images), logging (events and metering). </para></listitem><listitem><para>There will need to integration functions within the architecture. It is assumed that most service providers will already have a customer identity and billing systems. Any cloud architecture would need to integrate with these systems.</para></listitem><listitem><para>As with any complex environment, we will need a management tier to operate the environment. This should include an API to access the cloud administration features as well as some forms of monitoring. It is likely that the monitoring functionality will take the form of integration into an existing tool. While Ive highlighted monitoring and an admin API for our fictional provider, in a more complete architecture you would see a vast array of operational support functions like provisioning and configuration management.</para></listitem><listitem><para>Finally, since this is a compute cloud, we will need actual compute, network and storage resources to provide to our customers. This tier provides these services, whether they be servers, network switches, network attached storage or other resources.</para></listitem></itemizedlist><para>With this model in place, lets shift gears and look at OpenStack Computes logical
architecture.</para></section><section xml:id="openstack-nova-logical-architecture"><title>OpenStack Compute Logical Architecture</title><para>Now that weve looked at a proposed conceptual architecture, lets see how OpenStack Compute
is logically architected. At the time of this writing, Cactus was the newest release
(which means if you are viewing this after around July 2011, this may be out of
date). There are several logical components of OpenStack Compute architecture but
the majority of these components are custom written python daemons of two
varieties:</para><itemizedlist><listitem><para>WSGI applications to receive and mediate API calls (<code>nova-api</code>, <code>glance-api</code>, etc.)</para></listitem><listitem><para>Worker daemons to carry out orchestration tasks (<code>nova-compute</code>, <code>nova-network</code>, <code>nova-schedule</code>, etc.)</para></listitem></itemizedlist><para>However, there are two essential pieces of the logical architecture are neither custom written nor Python based: the messaging queue and the database. These two components facilitate the asynchronous orchestration of complex tasks through message passing and information sharing. Putting this all together we get a picture like this:</para><informalfigure><mediaobject><imageobject><imagedata scale="70" fileref="../figures/nova-cactus-logical.png"/></imageobject></mediaobject></informalfigure><para>This complicated, but not overly informative, diagram as it can be summed up in three sentences:</para><itemizedlist><listitem><para>End users (DevOps, Developers and even other OpenStack components) talk to
<code>nova-api</code> to interface with OpenStack Compute</para></listitem><listitem><para>OpenStack Compute daemons exchange info through the queue (actions) and database (information)
to carry out API requests</para></listitem><listitem><para>OpenStack Glance is basically a completely separate infrastructure which OpenStack Compute
interfaces through the Glance API</para></listitem></itemizedlist><para>Now that we see the overview of the processes and their interactions, lets take a closer look at each component.</para><itemizedlist><listitem><para>The <code>nova-api</code> daemon is the heart of the OpenStack Compute. You may see it
illustrated on many pictures of OpenStack Compute as API and “Cloud
Controller”. While this is partly true, cloud controller is really just a
class (specifically the CloudController in trunk/nova/api/ec2/cloud.py)
within the <code>nova-api</code> daemon. It provides an endpoint for all API
queries (either <link xlink:href="http://docs.rackspacecloud.com/api/"
>OpenStack API</link> or <link
xlink:href="http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/"
>EC2 API</link>), initiates most of the orchestration activities (such
as running an instance) and also enforces some policy (mostly quota
checks).</para></listitem><listitem><para>The <code>nova-schedule</code> process is conceptually the simplest piece of code in OpenStack
Compute: take a virtual machine instance request from the queue and
determines where it should run (specifically, which compute server host it
should run on). In practice however, I am sure this will grow to be the most
complex as it needs to factor in current state of the entire cloud
infrastructure and apply complicated algorithm to ensure efficient usage. To
that end, <code>nova-schedule</code> implements a pluggable architecture
that lets you choose (or write) your own algorithm for scheduling.
Currently, there are several to choose from (simple, chance, etc) and it is
a area of hot development for the future releases of OpenStack
Compute.</para></listitem><listitem><para>The <code>nova-compute</code> process is primarily a worker daemon that creates and terminates virtual machine instances. The process by which it does so is fairly complex (<link xlink:href="http://www.laurentluce.com/?p=227">see this blog post by Laurence Luce for the gritty details</link>) but the basics are simple: accept actions from the queue and then perform a series of system commands (like launching a KVM instance) to carry them out while updating state in the database.</para></listitem><listitem><para>As you can gather by the name, <code>nova-volume</code> manages the creation, attaching and detaching of persistent volumes to compute instances (similar functionality to <link xlink:href="http://aws.amazon.com/ebs/">Amazons Elastic Block Storage</link>). It can use volumes from a variety of providers such as iSCSI or AoE.</para></listitem><listitem><para>The <code>nova-network</code> worker daemon is very similar to <code>nova-compute</code> and <code>nova-volume</code>. It accepts networking tasks from the queue and then performs tasks to manipulate the network (such as setting up bridging interfaces or changing <code>iptables</code> rules).</para></listitem><listitem><para>The queue provides a central hub for passing messages between daemons. This is currently implemented with <link xlink:href="http://www.rabbitmq.com/">RabbitMQ</link> today, but theoretically could be any <link xlink:href="http://www.amqp.org/confluence/display/AMQP/Advanced+Message+Queuing+Protocol">AMPQ message queue</link> supported by the python <link xlink:href="http://barryp.org/software/py-amqplib/">ampqlib</link>.</para></listitem><listitem><para>The <link xlink:href="http://en.wikipedia.org/wiki/SQL">SQL database</link> stores most of the
build-time and run-time state for a cloud infrastructure. This includes the
instance types that are available for use, instances in use, networks
available and projects. Theoretically, OpenStack Compute can support any
database supported by <link xlink:href="http://www.sqlalchemy.org/"
>SQL-Alchemy</link> but the only databases currently being widely used
are <link xlink:href="http://www.sqlite.org/">sqlite3</link> (only
appropriate for test and development work), <link
xlink:href="http://mysql.com/">MySQL</link> and <link
xlink:href="http://www.postgresql.org/">PostgreSQL</link>.</para></listitem><listitem><para>OpenStack Glance is a separate project from OpenStack Compute, but as shown above,
complimentary. While it is an optional part of the overall compute
architecture, I cant imagine that most OpenStack Compute installations will
not be using it (or a complimentary product). There are three pieces to
Glance: <code>glance-api</code>, <code>glance-registry</code> and the image
store. As you can probably guess, <code>glance-api</code> accepts API calls,
much like <code>nova-api</code>, and the actual image blobs are placed in
the image store. The <code>glance-registry</code> stores and retrieves
metadata about images. The image store can be a number of different object
stores, include OpenStack Object Storage (Swift).</para></listitem><listitem><para>Finally, another optional project that we will need for our fictional service provider is an
user dashboard. I have picked the OpenStack Dashboard here, but there are
also several other web front ends available for OpenStack Compute. The
OpenStack Dashboard provides a web interface into OpenStack Compute to give
application developers and devops staff similar functionality to the API. It
is currently implemented as a <link
xlink:href="http://www.djangoproject.com/">Django</link> web
application.</para></listitem></itemizedlist><para>This logical architecture represents just one way to architect OpenStack Compute. With its
pluggable architecture, we could easily swap out OpenStack Glance with another image
service or use another dashboard. In the coming releases of OpenStack, expect to see
more modularization of the code especially in the network and volume areas.</para></section>
<section xml:id="nova-conceptual-mapping"><title>Nova Conceptual Mapping</title><para>Now that weve seen a conceptual architecture for a fictional cloud provider and examined the logical architecture of OpenStack Nova, it is fairly easy to map the OpenStack components to the conceptual areas to see what we are lacking:</para><informalfigure><mediaobject><imageobject><imagedata scale="50" fileref="../figures/nova-cactus-conceptual-coverage.png"/></imageobject></mediaobject></informalfigure><para>As you can see from the illustration, Ive overlaid logical components of OpenStack Nova, Glance and Dashboard to denote functional coverage. For each of the overlays, Ive added the name of the logical component within the project that provides the functionality. While all of these judgements are highly subjective, you can see that we have a majority coverage of the functional areas with a few notable exceptions:</para><itemizedlist><listitem><para>The largest gap in our functional coverage is logging and billing. At the moment, OpenStack Nova doesnt have a billing component that can mediate logging events, rate the logs and create/present bills. That being said, most service providers will already have one (or <emphasis>many</emphasis>) of these so the focus is really on the logging and integration with billing. This could be remedied in a variety of ways: augmentations of the code (which should happen in the next release “Diablo”), integration with commercial products or services (perhaps <link xlink:href="http://www.zuora.com/">Zuora</link>) or custom log parsing. </para></listitem><listitem><para>Identity is also a point which will likely need to be augmented. Unless we are running a stock
LDAP for our identity system, we will need to integrate our solution with
OpenStack Compute. Having said that, this is true of almost all cloud
solutions.</para></listitem><listitem><para>The customer portal will also be an integration point. While OpenStack Compute provides a user
dashboard (to see running instance, launch new instances, etc.), it doesnt
provide an interface to allow application owners to signup for service,
track their bills and lodge trouble tickets. Again, this is probably
something that it is already in place at our imaginary service provider. </para></listitem><listitem><para>Ideally, the Admin API would replicate all functionality that wed be able to do via the
command line interface (which in this case is mostly exposed through the
nova-manage command). This will get better in the Diablo release with the
<link xlink:href="http://wiki.openstack.org/NovaAdminAPI">Admin
API</link> work.</para></listitem><listitem><para>Cloud monitoring and operations will be an important area of focus for our service provider. A
key to any good operations approach is good tooling. While OpenStack Compute
provides nova-instancemonitor, which tracks compute node utilization, were
really going to need a number of third party tools for monitoring. </para></listitem><listitem><para>Policy is an extremely important area but very provider specific. Everything from quotas
(which are supported) to quality of service (QoS) to privacy controls can
fall under this. Ive given OpenStack Nova partial coverage here, but that
might vary depending on the intricacies of the providers needs. For the
record, the Catus release of OpenStack Compute provides quotas for instances
(number and cores used, volumes (size and number), floating IP addresses and
metadata.</para></listitem><listitem><para>Scheduling within OpenStack Compute is fairly rudimentary for larger installations today. The
pluggable scheduler supports chance (random host assignment), simple (least
loaded) and zone (random nodes within an availability zone). As within most
areas on this list, this will be greatly augmented in Diablo. In development
are distributed schedulers and schedulers that understand heterogeneous
hosts (for support of GPUs and differing CPU architectures).</para></listitem></itemizedlist><para>As you can see, OpenStack Compute provides a fair basis for our mythical service provider, as
long as the mythical service providers are willing to do some integration here and
there. </para>
<para>Note that since the time of this writing, OpenStack Identity Service has been
added.</para></section></section>
<section xml:id="why-cloud">
<title>Why Cloud?</title>
<para>In data centers today, many computers suffer the same underutilization in computing
power and networking bandwidth. For example, projects may need a large amount of
computing capacity to complete a computation, but no longer need the computing power
after completing the computation. You want cloud computing when you want a service
that's available on-demand with the flexibility to bring it up or down through
automation or with little intervention. The phrase "cloud computing" is often
represented with a diagram that contains a cloud-like shape indicating a layer where
responsibility for service goes from user to provider. The cloud in these types of
diagrams contains the services that afford computing power harnessed to get work done.
Much like the electrical power we receive each day, cloud computing provides subscribers
or users with access to a shared collection of computing resources: networks for
transfer, servers for storage, and applications or services for completing tasks. </para>
<para>These are the compelling features of a cloud:</para>
<itemizedlist spacing="compact">
<listitem>
<para>On-demand self-service: Users can provision servers and networks with little
human intervention. </para></listitem>
<listitem>
<para>Network access: Any computing capabilities are available over the network.
Many different devices are allowed access through standardized mechanisms. </para></listitem>
<listitem>
<para>Resource pooling: Multiple users can access clouds that serve other consumers
according to demand. </para></listitem>
<listitem>
<para>Elasticity: Provisioning is rapid and scales out or in based on need. </para></listitem>
<listitem>
<para>Metered or measured service: Just like utilities that are paid for by the
hour, clouds should optimize resource use and control it for the level of
service or type of servers such as storage or processing.</para></listitem>
</itemizedlist>
<para>Cloud computing offers different service models depending on the capabilities a
consumer may require. </para>
<itemizedlist>
<listitem><para>SaaS: Software as a Service. Provides the consumer the ability to use the software
in a cloud environment, such as web-based email for example. </para></listitem>
<listitem><para>PaaS: Platform as a Service. Provides the consumer the ability to deploy
applications through a programming language or tools supported by the cloud platform
provider. An example of platform as a service is an Eclipse/Java programming
platform provided with no downloads required. </para></listitem>
<listitem><para>IaaS: Infrastructure as a Service. Provides infrastructure such as computer
instances, network connections, and storage so that people can run any software or
operating system. </para></listitem>
</itemizedlist>
<para>When you hear terms such as public cloud or private cloud, these refer to the
deployment model for the cloud. A private cloud operates for a single organization, but
can be managed on-premise or off-premise. A public cloud has an infrastructure that is
available to the general public or a large industry group and is likely owned by a cloud
services company. The NIST also defines community cloud as shared by several
organizations supporting a specific community with shared concerns. </para>
<para>Clouds can also be described as hybrid. A hybrid cloud can be a deployment model, as a
composition of both public and private clouds, or a hybrid model for cloud computing may
involve both virtual and physical servers. </para>
<para>What have people done with cloud computing? Cloud computing can help with large-scale
computing needs or can lead consolidation efforts by virtualizing servers to make more
use of existing hardware and potentially release old hardware from service. People also
use cloud computing for collaboration because of its high availability through networked
computers. Productivity suites for word processing, number crunching, and email
communications, and more are also available through cloud computing. Cloud computing
also avails additional storage to the cloud user, avoiding the need for additional hard
drives on each users's desktop and enabling access to huge data storage capacity online
in the cloud. </para>
<para>For a more detailed discussion of cloud computing's essential
characteristics and its models of service and deployment, see <link
xlink:href="http://www.nist.gov/itl/cloud/"
>http://www.nist.gov/itl/cloud/</link>, published by the US
National Institute of Standards and Technology.</para>
</section>
</chapter>

View File

@ -0,0 +1,139 @@
<?xml version="1.0" encoding="UTF-8"?>
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="ch_support-and-troubleshooting">
<title>Support and Troubleshooting</title>
<para>Online resources aid in supporting OpenStack and the community members are willing and able to answer questions and help with bug suspicions. We are constantly improving and adding to the main features of OpenStack, but if you have any problems, do not hesitate to ask. Here are some ideas for supporting OpenStack and troubleshooting your existing installations.</para>
<section xml:id="community-support">
<title>Community Support</title> <para>Here are some places you can locate others who want to help.</para>
<simplesect><title>The Launchpad Answers area</title>
<para>During setup or testing, you may have questions about how to do something, or end up in
a situation where you can't seem to get a feature to work correctly. One place to
look for help is the Answers section on Launchpad. Launchpad is the "home" for the
project code and its developers and thus is a natural place to ask about the
project. When visiting the Answers section, it is usually good to at least scan over
recently asked questions to see if your question has already been answered. If that
is not the case, then proceed to adding a new question. Be sure you give a clear,
concise summary in the title and provide as much detail as possible in the
description. Paste in your command output or stack traces, link to screenshots, and
so on. The Launchpad Answers areas are available here - OpenStack Compute: <link
xlink:href="https://answers.launchpad.net/nova"
>https://answers.launchpad.net/nova</link> OpenStack Object Storage: <link
xlink:href="https://answers.launchpad.net/swift"
>https://answers.launchpad.net/swift</link>. </para></simplesect>
<simplesect><title>OpenStack mailing list</title>
<para>Posting your question or scenario to the OpenStack mailing list is a great way to get
answers and insights. You can learn from and help others who may have the same
scenario as you. Go to <link xlink:href="https://launchpad.net/~openstack"
>https://launchpad.net/~openstack</link> and click "Subscribe to mailing list"
or view the archives at <link xlink:href="https://lists.launchpad.net/openstack/"
>https://lists.launchpad.net/openstack/</link>.</para></simplesect><simplesect>
<title>The OpenStack Wiki search </title>
<para>The <link xlink:href="http://wiki.openstack.org/">OpenStack wiki</link> contains content
on a broad range of topics, but some of it sits a bit below the surface. Fortunately, the wiki
search feature is very powerful in that it can do both searches by title and by content. If
you are searching for specific information, say about "networking" or "api" for nova, you can
find lots of content using the search feature. More is being added all the time, so be sure to
check back often. You can find the search box in the upper right hand corner of any OpenStack wiki
page. </para></simplesect>
<simplesect><title>The Launchpad Bugs area </title>
<para>So you think you've found a bug. That's great! Seriously, it is. The OpenStack community
values your setup and testing efforts and wants your feedback. To log a bug you must
have a Launchpad account, so sign up at https://launchpad.net/+login if you do not
already have a Launchpad ID. You can view existing bugs and report your bug in the
Launchpad Bugs area. It is suggested that you first use the search facility to see
if the bug you found has already been reported (or even better, already fixed). If
it still seems like your bug is new or unreported then it is time to fill out a bug
report. </para>
<para>Some tips: </para>
<itemizedlist><listitem><para>Give a clear, concise summary! </para></listitem>
<listitem><para>Provide as much detail as possible
in the description. Paste in your command output or stack traces, link to
screenshots, etc. </para></listitem>
<listitem><para>Be sure to include what version of the software you are using.
This is especially critical if you are using a development branch eg. "Austin
release" vs lp:nova rev.396. </para></listitem>
<listitem><para>Any deployment specific info is helpful as well. eg.
Ubuntu 10.04, multi-node install.</para></listitem> </itemizedlist>
<para>The Launchpad Bugs areas are available here - OpenStack Compute: <link
xlink:href="https://bugs.launchpad.net/nova"
>https://bugs.launchpad.net/nova</link> OpenStack Object Storage: <link
xlink:href="https://bugs.launchpad.net/swift"
>https://bugs.launchpad.net/swift</link>
</para></simplesect>
<simplesect><title>The OpenStack IRC channel </title>
<para>The OpenStack community lives and breathes in the #openstack IRC channel on the
Freenode network. You can come by to hang out, ask questions, or get immediate
feedback for urgent and pressing issues. To get into the IRC channel you need to
install an IRC client or use a browser-based client by going to
http://webchat.freenode.net/. You can also use Colloquy (Mac OS X,
http://colloquy.info/) or mIRC (Windows, http://www.mirc.com/) or XChat (Linux).
When you are in the IRC channel and want to share code or command output, the
generally accepted method is to use a Paste Bin, the OpenStack project has one at
http://paste.openstack.org. Just paste your longer amounts of text or logs in the
web form and you get a URL you can then paste into the channel. The OpenStack IRC
channel is: #openstack on irc.freenode.net. </para></simplesect>
</section>
<section xml:id="troubleshooting-openstack-object-storage"><title>Troubleshooting OpenStack Object Storage</title>
<para>For OpenStack Object Storage, everything is logged in /var/log/syslog (or messages on some distros). Several settings enable further customization of logging, such as log_name, log_facility, and log_level, within the object server configuration files.</para>
<section xml:id="handling-drive-failure">
<title>Handling Drive Failure</title>
<para> In the event that a drive has failed, the first step is to make sure the drive is unmounted. This will make it easier for OpenStack Object Storage to work around the failure until it has been resolved. If the drive is going to be replaced immediately, then it is just best to replace the drive, format it, remount it, and let replication fill it up.</para>
<para>If the drive cant be replaced immediately, then it is best to leave it unmounted, and remove the drive from the ring. This will allow all the replicas that were on that drive to be replicated elsewhere until the drive is replaced. Once the drive is replaced, it can be re-added to the ring.</para></section>
<section xml:id="handling-server-failure">
<title>Handling Server Failure</title>
<para>If a server is having hardware issues, it is a good idea to make sure the OpenStack Object Storage services are not running. This will allow OpenStack Object Storage to work around the failure while you troubleshoot.</para>
<para>If the server just needs a reboot, or a small amount of work that should only last a couple of hours, then it is probably best to let OpenStack Object Storage work around the failure and get the machine fixed and back online. When the machine comes back online, replication will make sure that anything that is missing during the downtime will get updated.</para>
<para>If the server has more serious issues, then it is probably best to remove all of the servers devices from the ring. Once the server has been repaired and is back online, the servers devices can be added back into the ring. It is important that the devices are reformatted before putting them back into the ring as it is likely to be responsible for a different set of partitions than before.</para>
</section>
<section xml:id="detecting-failed-drives">
<title>Detecting Failed Drives</title>
<para>It has been our experience that when a drive is about to fail, error messages will spew into /var/log/kern.log. There is a script called swift-drive-audit that can be run via cron to watch for bad drives. If errors are detected, it will unmount the bad drive, so that OpenStack Object Storage can work around it. The script takes a configuration file with the following settings:
</para>
<literallayout>
[drive-audit]
Option Default Description
log_facility LOG_LOCAL0 Syslog log facility
log_level INFO Log level
device_dir /srv/node Directory devices are mounted under
minutes 60 Number of minutes to look back in /var/log/kern.log
error_limit 1 Number of errors to find before a device is unmounted
</literallayout>
<para>This script has only been tested on Ubuntu 10.04, so if you are using a different distro or OS, some care should be taken before using in production.
</para></section></section>
<section xml:id="troubleshooting-openstack-compute"><title>Troubleshooting OpenStack Compute</title>
<para>Common problems for Compute typically involve misconfigured networking or credentials that are not sourced properly in the environment. Also, most flat networking configurations do not enable ping or ssh from a compute node to the instances running on that node. Another common problem is trying to run 32-bit images on a 64-bit compute node. This section offers more information about how to troubleshoot Compute.</para>
<section xml:id="log-files-for-openstack-compute"><title>Log files for OpenStack Compute</title><para>Log files are stored in /var/log/nova and there is a log file for each service, for example nova-compute.log. You can format the log strings using flags for the nova.log module. The flags used to set format strings are: logging_context_format_string and logging_default_format_string. If the log level is set to debug, you can also specify logging_debug_format_suffix to append extra formatting. For information about what variables are available for the formatter see: http://docs.python.org/library/logging.html#formatter </para>
<para>You have two options for logging for OpenStack Compute based on configuration settings. In nova.conf, include the --logfile flag to enable logging. Alternatively you can set --use_syslog=1, and then the nova daemon logs to syslog.</para></section>
<section xml:id="common-errors-and-fixes-for-openstack-compute">
<title>Common Errors and Fixes for OpenStack Compute</title>
<para>The Launchpad Answers site offers a place to ask and answer questions, and you can also mark questions as frequently asked questions. This section describes some errors people have posted to Launchpad Answers and IRC. We are constantly fixing bugs, so online resources are a great way to get the most up-to-date errors and fixes.</para>
<para>Credential errors, 401, 403 forbidden errors</para>
<para>A 403 forbidden error is caused by missing credentials. Through current installation methods, there are basically two ways to get the novarc file. The manual method requires getting it from within a project zipfile, and the scripted method just generates novarc out of the project zip file and sources it for you. If you do the manual method through a zip file, then the following novarc alone, you end up losing the creds that are tied to the user you created with nova-manage in the steps before.</para>
<para>When you run nova-api the first time, it generates the certificate authority information, including openssl.cnf. If it gets started out of order, you may not be able to create your zip file. Once your CA information is available, you should be able to go back to nova-manage to create your zipfile. </para><para>You may also need to check your proxy settings to see if they are causing problems with the novarc creation.</para>
<para>Instance errors</para>
<para>Sometimes a particular instance shows "pending" or you cannot SSH to it. Sometimes the image itself is the problem. For example, when using flat manager networking, you do not have a dhcp server, and an ami-tiny image doesn't support interface injection so you cannot connect to it. The fix for this type of problem is to use an Ubuntu image, which should obtain an IP address correctly with FlatManager network settings. To troubleshoot other possible problems with an instance, such as one that stays in a spawning state, first check your instances directory for i-ze0bnh1q dir to make sure it has the following files:</para>
<itemizedlist><listitem><para>libvirt.xml</para></listitem>
<listitem><para>disk</para></listitem>
<listitem><para>disk-raw</para></listitem>
<listitem><para>kernel</para></listitem>
<listitem><para>ramdisk</para></listitem>
<listitem><para>console.log (Once the instance actually starts you should see a console.log.)</para></listitem>
</itemizedlist>
<para>Check the file sizes to see if they are reasonable. If any are missing/zero/very small then nova-compute has somehow not completed download of the images from objectstore. </para>
<para>Also check nova-compute.log for exceptions. Sometimes they don't show up in the
console output. </para><para>
Next, check the /var/log/libvirt/qemu/i-ze0bnh1q.log file to see if it exists and has any useful error messages in it.</para>
<para>Finally, from the instances/i-ze0bnh1q directory, try <code>virsh create libvirt.xml</code> and see if you get an error there.</para>
</section></section>
</chapter>

View File

@ -0,0 +1,14 @@
README
This docbkx-example folder is provided for those who want to use the maven mojo supplied with the project to build their own documents to PDF and HTML (webhelp) format. It's intended to be a template and model.
You can edit the src/docbkx/example.xml file using vi, emacs, or another DocBook editor. At Rackspace we use Oxygen. Both Oxygen and XML Mind offer free licenses to those working on open source project documentation.
To build the output, install Apache Maven (https://maven.apache.org/) and then run:
mvn clean generate-sources
in the directory containing the pom.xml file.
Feel free to ask questions of the openstack-docs team at https://launchpad.net/~openstack-doc.

View File

@ -0,0 +1,38 @@
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>my-groupid</groupId>
<artifactId>my-guide</artifactId>
<version>1.0.0-SNAPSHOT</version>
<packaging>jar</packaging>
<name>OpenStack stand alone documentation examples</name>
<build>
<plugins>
<plugin>
<groupId>com.agilejava.docbkx</groupId>
<artifactId>docbkx-maven-plugin</artifactId>
<executions>
<execution>
<goals>
<goal>generate-pdf</goal>
<goal>generate-webhelp</goal>
</goals>
<phase>generate-sources</phase>
</execution>
</executions>
<configuration>
<xincludeSupported>true</xincludeSupported>
<chunkSectionDepth>100</chunkSectionDepth>
<postProcess>
<copy todir="target/docbkx/webhelp/example/content/figures">
<fileset dir="src/docbkx/figures">
<include name="**/*.png" />
</fileset>
</copy>
</postProcess>
</configuration>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,318 @@
<book xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:svg="http://www.w3.org/2000/svg"
xmlns:m="http://www.w3.org/1998/Math/MathML" xmlns:html="http://www.w3.org/1999/xhtml" version="5.0" status="DRAFT">
<title>Maven Example Documentation</title>
<info>
<author>
<personname>
<firstname/>
<surname/>
</personname>
<affiliation>
<orgname>Badges! We don't need any stinking badges!</orgname>
</affiliation>
</author>
<copyright>
<year>2011</year>
<holder>Timothy D. Witham</holder>
</copyright>
<releaseinfo>Example v0.1</releaseinfo>
<productname>Product Name Doesn't Exist - it's an example!™</productname>
<pubdate>2011-01-01</pubdate>
<legalnotice role="rs-api">
<annotation>
<remark>Copyright details are filled in by the template. Change the value of the role
attribute on the legalnotice element to change the license. </remark>
</annotation>
</legalnotice>
<abstract>
<para> This document is intended for individuals who whish to produce documentation using Maven and having
the same "feel" as the documentation that is produced by the mainline OpenStack projects.
</para>
</abstract>
<cover>
<para>this is a placeholder for the front cover</para>
</cover>
<cover>
<para>this is a placeholder for the back cover</para>
</cover>
</info>
<chapter>
<title>Overview</title>
<para>Welcome to the getting started with Maven documentation. Congratulations you have
successfully downloaded and built the example.
</para>
<para>For more details on the Product Name service, please refer to <link
xlink:href="http://www.rackspacecloud.com/cloud_hosting_products/files"
>http://www.rackspacecloud.com/cloud_hosting_products/product name</link>
</para>
<para>We welcome feedback, comments, and bug reports at <link
xlink:href="mailto:support@rackspacecloud.com">support@rackspacecloud.com</link>. </para>
<section>
<title>Intended Audience</title>
<para>This guide is intended to individuals who want to develop standalone documentation
to use within an OpenStack deployment. Using this tool chain will give you the look and
feel of the mainline OpenStack documentation.
</para>
</section>
<section>
<title>Document Change History</title>
<para>This version of the Maven Getting Started Guide replaces and obsoletes all previous versions. The
most recent changes are described in the table below:</para>
<informaltable rules="all">
<thead>
<tr>
<td align="center" colspan="1">Revision Date</td>
<td align="center" colspan="4">Summary of Changes</td>
</tr>
</thead>
<tbody>
<tr>
<td colspan="1" align="center">July. 14, 2011</td>
<td colspan="4">
<itemizedlist spacing="compact">
<listitem>
<para>Initial document creation.</para>
</listitem>
</itemizedlist>
</td>
</tr>
</tbody>
</informaltable>
</section>
<section>
<title>Additional Resources</title>
<itemizedlist spacing="compact">
<listitem>
<para>
<link xlink:href="http://www.openstack.org">
Openstack - Cloud Software
</link>
</para>
</listitem>
<listitem>
<para>
<link xlink:href="http://www.docbook.org">
Docbook Main Web Site
</link>
</para>
</listitem>
<listitem>
<para>
<link xlink:href="http://docbook.org/tdg/en/html/quickref.html">
Docbook Quick Reference
</link>
</para>
</listitem>
</itemizedlist>
</section>
</chapter>
<chapter>
<title>Concepts</title>
<para>
Need to put something here.
</para>
</chapter>
<chapter>
<title>How do I?</title>
<section>
<title>Notes and including images</title>
<para>So I want an note and an image in this section ...</para>
<note>
<para>This is an example of a note. </para>
</note>
<para>Here's a sample figure in svg and png formats:</para>
<figure xml:id="CFinterfaces">
<title>Sample Image</title>
<mediaobject>
<imageobject role="fo">
<imagedata fileref="figures/example.svg" contentwidth="5in"/>
</imageobject>
<imageobject role="html">
<imagedata fileref="figures/example.png"/>
</imageobject>
</mediaobject>
</figure>
</section>
<section>
<title>Multiple Related Documents</title>
<para>
What you need to do in order to have multiple documents fit within the
build structure.
</para>
</section>
<section>
<title>Using multiple files for a document</title>
<para>
What you need to do in order to have a single document that is made up of multiple
files.
</para>
</section>
<section>
<title>Who, What, Where, When and Why of pom.xml</title>
<para>
You will of noticed the <emphasis>pom.xml</emphasis> file at the root directory.
This file is used to set the project parameters for the documentation. Including
what type of documentation to produce and any post processing that needs to happen.
If you want to know more about
<link
xlink:href="http://www.openstack.org">
pom.xml - need a link
</link>
then follow the link.
</para>
<para> For the <emphasis>pom.xml</emphasis>file that was included in this distribution we will
parse the individual lines and explaine the meaning.
</para>
<para>
<programlisting language="xml"> <xi:include href="../../pom.xml" parse="text" /></programlisting>
</para>
<section>
<title> &lt;project&gt; </title>
<para>
What is all of this stuff and why is it important?
</para>
</section>
<section>
<title> &lt;modelVersion&gt; </title>
<para>
What goes in here and why?
</para>
</section>
<section>
<title> &lt;groupId&gt; </title>
<para>
What goes in here and why?
</para>
</section>
<section>
<title> &lt;artifactId&gt; </title>
<para>
What goes in here and why?
</para>
</section>
<section>
<title> &lt;version&gt; </title>
<para>
What goes in here and why?
</para>
</section>
<section>
<title> &lt;packaging&gt; </title>
<para>
What goes in here and why?
</para>
</section>
<section>
<title> &lt;name&gt; </title>
<para>
Name of your document.
</para>
</section>
<section>
<title> &lt;build&gt; </title>
<para>
Make some documents.
</para>
<section>
<title> &lt;plugin(s)&gt; </title>
<para>
What does this do and why?
</para>
<section>
<title> &lt;groupId&gt; </title>
<para>
What goes in here and why?
</para>
</section>
<section>
<title> &lt;artifactId&gt; </title>
<para>
What goes in here and why?
</para>
</section>
<section>
<title> &lt;execution(s)&gt; </title>
<para>
What goes in here and why?
</para>
<section>
<title> &lt;goal(s)&gt; </title>
<para>
Different types of goals and why you use them.
</para>
</section>
<section>
<title> &lt;phase&gt; </title>
<para>
What does this section do? What phases can you specify.
</para>
</section>
</section>
<section>
<title> &lt;configuration&gt; </title>
<para>
What does this section do?
</para>
<section>
<title> &lt;xincludeSupported&gt; </title>
<para>
What does this do and why?
</para>
</section>
<section>
<title> &lt;chunkSectionDepth&gt; </title>
<para>
What does this do and why?
</para>
</section>
<section>
<title> &lt;postprocess&gt; </title>
<para>
What does this section do? What are possible pieces?
</para>
<section>
<title> &lt;copy&gt; </title>
<para>
What does this section do? What are possible pieces?
</para>
<section>
<title> &lt;fileset&gt; </title>
<para>
What does this section do? What are possible pieces?
</para>
<section>
<title> &lt;include&gt; </title>
<para>
What does this section do? What are possible pieces?
</para>
</section>
</section>
</section>
</section>
</section>
</section>
</section>
</section>
<section>
<title>Who, What, Where, When and Why of build.xml</title>
<para>
You will of noticed the <emphasis>build.xml</emphasis> file at the root directory.
This file is used to set the project parameters for the documentation. Including
what type of documentation to produce and any post processing that needs to happen.
If you want to know more about
<link
xlink:href="http://www.openstack.org">
pom.xml - need a link
</link>
then follow the link.
</para>
</section>
</chapter>
<chapter>
<title>Troubleshooting</title>
<para>Sometimes things go wrong...</para>
</chapter>
</book>

Binary file not shown.

After

Width:  |  Height:  |  Size: 49 KiB

View File

@ -0,0 +1,79 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<diagram>
<source><![CDATA[bfs:BFS[a]
/queue:FIFO
someNode:Node
node:Node
adjList:List
adj:Node
bfs:queue.new
bfs:someNode.setLevel(0)
bfs:queue.insert(someNode)
[c:loop while queue != ()]
bfs:node=queue.remove()
bfs:level=node.getLevel()
bfs:adjList=node.getAdjacentNodes()
[c:loop 0 <= i < #adjList]
bfs:adj=adjList.get(i)
bfs:nodeLevel=adj.getLevel()
[c:alt nodeLevel IS NOT defined]
bfs:adj.setLevel(level+1)
bfs:queue.insert(adj)
--[else]
bfs:nothing to do
[/c]
[/c]
[/c]
bfs:queue.destroy()]]></source>
<configuration>
<property name="actorWidth" value="25"/>
<property name="allowMessageProperties" value="false"/>
<property name="arrowSize" value="6"/>
<property name="colorizeThreads" value="true"/>
<property name="destructorWidth" value="30"/>
<property family="Dialog" name="font" size="12" style="0"/>
<property name="fragmentMargin" value="8"/>
<property name="fragmentPadding" value="10"/>
<property name="fragmentTextPadding" value="3"/>
<property name="glue" value="10"/>
<property name="headHeight" value="35"/>
<property name="headLabelPadding" value="5"/>
<property name="headWidth" value="100"/>
<property name="initialSpace" value="10"/>
<property name="leftMargin" value="5"/>
<property name="lineWrap" value="false"/>
<property name="lowerMargin" value="5"/>
<property name="mainLifelineWidth" value="8"/>
<property name="messageLabelSpace" value="3"/>
<property name="messagePadding" value="6"/>
<property name="noteMargin" value="6"/>
<property name="notePadding" value="6"/>
<property name="opaqueMessageText" value="false"/>
<property name="returnArrowVisible" value="true"/>
<property name="rightMargin" value="5"/>
<property name="selfMessageHorizontalSpace" value="15"/>
<property name="separatorBottomMargin" value="8"/>
<property name="separatorTopMargin" value="15"/>
<property name="shouldShadowParticipants" value="true"/>
<property name="spaceBeforeActivation" value="2"/>
<property name="spaceBeforeAnswerToSelf" value="10"/>
<property name="spaceBeforeConstruction" value="6"/>
<property name="spaceBeforeSelfMessage" value="7"/>
<property name="subLifelineWidth" value="6"/>
<property name="tc0" value="-1118482"/>
<property name="tc1" value="-256"/>
<property name="tc2" value="-65536"/>
<property name="tc3" value="-16776961"/>
<property name="tc4" value="-16711936"/>
<property name="tc5" value="-4144960"/>
<property name="tc6" value="-65281"/>
<property name="tc7" value="-14336"/>
<property name="tc8" value="-20561"/>
<property name="tc9" value="-12566464"/>
<property name="threadNumbersVisible" value="false"/>
<property name="threaded" value="true"/>
<property name="upperMargin" value="5"/>
<property name="verticallySplit" value="true"/>
</configuration>
</diagram>

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 207 KiB

File diff suppressed because it is too large Load Diff

After

Width:  |  Height:  |  Size: 115 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 66 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 187 KiB

File diff suppressed because it is too large Load Diff

After

Width:  |  Height:  |  Size: 204 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 245 KiB

File diff suppressed because it is too large Load Diff

After

Width:  |  Height:  |  Size: 589 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 63 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 629 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 188 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 105 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 181 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 198 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 87 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 72 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 223 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 85 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 621 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 175 KiB

View File

@ -0,0 +1,155 @@
<?xml version="1.0" encoding="UTF-8"?>
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0">
<?dbhtml filename="ch_introduction-to-openstack-compute.html" ?>
<title>Introduction to OpenStack Compute</title>
<para>OpenStack Compute gives you a tool to orchestrate a cloud, including running instances,
managing networks, and controlling access to the cloud through users and projects. The
underlying open source project's name is Nova, and it provides the software that can control
an Infrastructure as a Service (IaaS) cloud computing platform. It is similar in scope to
Amazon EC2 and Rackspace Cloud Servers. OpenStack Compute does not include any
virtualization software; rather it defines drivers that interact with underlying
virtualization mechanisms that run on your host operating system, and exposes functionality
over a web-based API.</para>
<section>
<?dbhtml filename="hypervisors.html" ?>
<title>Hypervisors</title>
<para>OpenStack Compute requires a hypervisor and Compute controls the hypervisors through an
API server. The process for selecting a hypervisor usually means prioritizing and making
decisions based on budget and resource constraints as well as the inevitable list of
supported features and required technical specifications. With OpenStack Compute, you
can orchestrate clouds using multiple hypervisors in different zones. The types of
virtualization standards that may be used with Compute include:</para>
<itemizedlist>
<listitem>
<para><link
xlink:href="https://www.microsoft.com/windowsserver2008/en/us/hyperv-main.aspx"
>Hyper-V 2008</link>
</para>
</listitem>
<listitem>
<para><link xlink:href="http://www.linux-kvm.org/page/Main_Page">KVM</link> -
Kernel-based Virtual Machine</para>
</listitem>
<listitem>
<para><link xlink:href="http://lxc.sourceforge.net/">LXC</link> - Linux Containers
(through libvirt)</para>
</listitem>
<listitem>
<para><link xlink:href="http://wiki.qemu.org/Manual">QEMU</link> - Quick
EMUlator</para>
</listitem>
<listitem>
<para><link xlink:href="http://user-mode-linux.sourceforge.net/">UML</link> - User
Mode Linux</para>
</listitem>
<listitem>
<para><link
xlink:href="http://www.vmware.com/products/vsphere-hypervisor/support.html"
>VMWare ESX/ESXi</link> 4.1 update 1</para>
</listitem>
<listitem>
<para><link xlink:href="http://www.xen.org/support/documentation.html">Xen</link> -
XenServer 5.5, Xen Cloud Platform (XCP)</para>
</listitem>
</itemizedlist>
</section>
<section><?dbhtml filename="users-and-projects.html" ?>
<title>Users and Projects</title>
<para>The OpenStack Compute system is designed to be used by many different cloud computing
consumers or customers, using role-based access assignments. Roles control the
actions that a user is allowed to perform. For example, a user cannot allocate a public
IP without the netadmin or admin role. There are both global roles and per-project role assignments.
A user's access to particular images is limited
by project, but the access key and secret key are assigned per user. Key pairs granting
access to an instance are enabled per user, but quotas to control resource consumption
across available hardware resources are per project. </para>
<para>OpenStack Compute uses a rights management system that employs a Role-Based Access
Control (RBAC) model and supports the following five roles:</para>
<itemizedlist>
<listitem><para>Cloud Administrator (admin): Global role. Users of this class enjoy complete system access.</para></listitem>
<listitem><para>IT Security (itsec): Global role. This role is limited to IT security personnel. It permits role holders to
quarantine instances on any project.</para></listitem>
<listitem><para>Project Manager (projectmanager): Project role. The default for project owners, this role affords users the
ability to add other users to a project, interact with project images, and
launch and terminate instances.</para></listitem>
<listitem><para>Network Administrator (netadmin): Project role. Users with this role are permitted to allocate and assign
publicly accessible IP addresses as well as create and modify firewall
rules.</para></listitem>
<listitem><para>Developer (developer): Project role. This is a general purpose role that is assigned to users by
default.</para></listitem></itemizedlist>
<para>While the original EC2 API supports users, OpenStack Compute adds the concept of projects.
Projects are isolated resource containers forming the principal organizational structure
within Nova. They consist of a separate VLAN, volumes, instances, images, keys, and
users. A user can specify which project he or she wishes to use by appending :project_id
to his or her access key. If no project is specified in the API request, Compute
attempts to use a project with the same id as the user. </para>
<para>For projects, quota controls are available to limit the: <itemizedlist>
<listitem>
<para>Number of volumes which may be created</para>
</listitem>
<listitem>
<para>Total size of all volumes within a project as measured in GB</para>
</listitem>
<listitem>
<para>Number of instances which may be launched</para>
</listitem>
<listitem>
<para>Number of processor cores which may be allocated</para>
</listitem>
<listitem>
<para>Publicly accessible IP addresses</para>
</listitem>
</itemizedlist></para>
</section><section><?dbhtml filename="images-and-instances.html" ?>
<title>Images and Instances</title>
<para>An image is a file containing information about a virtual disk that completely
replicates all information about a working computer at a point in time including
operating system information and file system information. Compute can use certificate
management for decrypting bundled images. For now, Compute relies on using the euca2ools
command-line tools distributed by the Eucalyptus Team for adding, bundling, and deleting
images. </para>
<para>There are two methods for managing images. Images can be served through the OpenStack
Image Service, a project that is named Glance, or use the nova-objectstore service.
With an OpenStack Image Service server in place, the Image Service fetches the image
on to the host machine and then OpenStack Compute boots the image from the host machine.
To place images into the service, you would use a ReST interface to stream them, and the
service, in turn, streams that into a back end which could be S3, OpenStack Object
Storage (which can use an S3), or the local file system on the server where OpenStack
Image Service is installed.</para>
<para>An instance is a running virtual machine within the cloud. An instance has a life
cycle that is controlled by OpenStack Compute. Compute creates the instances and it is
responsible for building a disk image, launching it, reporting the state, attaching
persistent storage, and terminating it. </para>
</section><section>
<?dbhtml filename="system-architecture.html" ?>
<title>System Architecture</title><para>OpenStack Compute consists of several main components. A "cloud controller" contains many of these components,
and it represents the global state and interacts with all other components. An API Server
acts as the web services front end for the cloud controller. The compute controller
provides compute server resources and typically contains the compute service, The Object Store component optionally provides storage
services. An auth manager provides authentication and authorization services. A volume
controller provides fast and permanent block-level storage for the compute servers. A
network controller provides virtual networks to enable compute servers to interact with
each other and with the public network. A scheduler selects the most suitable compute
controller to host an instance. </para><para>OpenStack Compute is built on a shared-nothing, messaging-based architecture. You can run all
of the major components on multiple servers including a compute controller, volume
controller, network controller, and object store. A cloud controller communicates with
the internal object store via HTTP (Hyper Text Transfer Protocol), but it communicates
with a scheduler, network controller, and volume controller via AMQP (Advanced Message
Queue Protocol). To avoid blocking each component while waiting for a response,
OpenStack Compute uses asynchronous calls, with a call-back that gets triggered when a
response is received.</para>
<para>To achieve the shared-nothing property with multiple copies of the same component, OpenStack Compute keeps all the cloud system state in a distributed data store. Updates to system state are written into this store, using atomic transactions when required. Requests for system state are read out of this store. In limited cases, the read results are cached within controllers for short periods of time (for example, the current list of system users.)</para></section><section>
<?dbhtml filename="storage-and-openstack-compute.html" ?>
<title>Storage and OpenStack Compute</title><para>A volume is a detachable block storage device. You can think of it as a USB hard drive. It
can only be attached to one instance at a time, so it does not work like a SAN. If you
wish to expose the same volume to multiple instances, you will have to use an NFS or
SAMBA share from an existing instance. </para><para>Every instance larger than m1.tiny starts with some local storage (up to 160GB for m1.xlarge).
This storage is currently the second partition on the root drive. </para></section></chapter>

View File

@ -0,0 +1,46 @@
<?xml version="1.0" encoding="UTF-8"?>
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0">
<?dbhtml filename="ch_introduction-to-openstack-imaging-service.html" ?>
<title>Introcution to the OpenStack Image Service</title>
<para>You can use OpenStack Image Services for discovering, registering, and retrieving virtual machine images. The service includes a RESTful API that allows users to query VM image metadata and retrieve the actual image with HTTP requests, or you can use a client class in your Python code to accomplish the same tasks.
</para><para>
VM images made available through OpenStack Image Service can be stored in a variety of locations from simple file systems to object-storage systems like the OpenStack Object Storage project, or even use S3 storage either on its own or through an OpenStack Object Storage S3 interface.</para>
<section>
<?dbhtml filename="overview-of-architecture.html" ?>
<title>Overview of Architecture</title>
<para>There are two main parts to the Image Services architecture:</para>
<itemizedlist><listitem><para>API server</para></listitem>
<listitem><para>Registry server(s)</para>
</listitem>
</itemizedlist>
<para>OpenStack Image Service is designed to be as adaptable as possible for various back-end storage and registry database solutions. There is a main API server (the ``glance-api`` program) that serves as the communications hub betweenvarious client programs, the registry of image metadata, and the storage systems that actually contain the virtual machine image data.</para>
</section>
<section>
<?dbhtml filename="openstack-imaging-service-api-server.html" ?>
<title>OpenStack Image Service API Server</title>
<para>The API server is the main interface for OpenStack Image Service. It routes requests from clients to registries of image metadata and to its backend stores, which are the mechanisms by which OpenStack Image Service actually saves incoming virtual machine images.</para>
<para>The backend stores that OpenStack Image Service can work with are as follows:</para>
<itemizedlist><listitem><para>OpenStack Object Storage - OpenStack Object Storage is the highly-available object storage project in OpenStack.</para></listitem>
<listitem><para>Filesystem - The default backend that OpenStack Image Service uses to store virtual machine images is the filesystem backend. This simple backend writes image files to the local filesystem.</para></listitem>
<listitem><para>S3 - This backend allows OpenStack Image Service to store virtual machine images in Amazons S3 service.</para></listitem>
<listitem><para>HTTP - OpenStack Image Service can read virtual machine images that are available via HTTP somewhere on the Internet. This store is readonly.</para></listitem></itemizedlist>
</section>
<section>
<?dbhtml filename="openstack-imaging-service-registry-servers.html" ?>
<title>OpenStack Image Service Registry Servers</title>
<para>OpenStack Image Service registry servers are servers that conform to the OpenStack Image Service Registry API. OpenStack Image Service ships with a reference implementation of a registry server that complies with this API (bin/OpenStack Image Service-registry).</para></section>
</chapter>

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,382 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE chapter [
<!-- Some useful entities borrowed from HTML -->
<!ENTITY ndash "&#x2013;">
<!ENTITY mdash "&#x2014;">
<!ENTITY hellip "&#x2026;">
<!ENTITY nbsp "&#160;">
<!ENTITY CHECK '<inlinemediaobject xmlns="http://docbook.org/ns/docbook">
<imageobject>
<imagedata fileref="img/Check_mark_23x20_02.svg"
format="SVG" scale="60"/>
</imageobject>
</inlinemediaobject>'>
<!ENTITY ARROW '<inlinemediaobject xmlns="http://docbook.org/ns/docbook">
<imageobject>
<imagedata fileref="img/Arrow_east.svg"
format="SVG" scale="60"/>
</imageobject>
</inlinemediaobject>'>
]>
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0">
<?dbhtml filename="ch_openstack-compute-automated-installations.html" ?>
<title>OpenStack Compute Automated Installations</title>
<para>In a large-scale cloud deployment, automated installations are a requirement for
successful, efficient, repeatable installations. Automation for installation also helps with
continuous integration and testing. This chapter offers some tested methods for deploying
OpenStack Compute with either Puppet (an infrastructure management platform) or Chef (an
infrastructure management framework) paired with Vagrant (a tool for building and
distributing virtualized development environments).</para>
<section>
<?dbhtml filename="openstack-compute-deployment-tool-with-puppet.html" ?>
<title>Deployment Tool for OpenStack using Puppet</title>
<para>Thanks to a new project available that couples Puppet automation with a configuration
file and deployment tool, you can install many servers automatically by simply editing
the configuration file (deploy.conf) and running the deployment tool (deploy.py in the
nova-deployment-tool project in Launchpad).</para>
<simplesect>
<title>Prerequisites</title>
<itemizedlist>
<listitem>
<para>Networking: The servers must be connected to a subnet. </para>
</listitem>
<listitem>
<para>Networking: Ensure that the puppet server can access nova component
servers by name. The command examples in this document identify the user as
“nii”. You should change the name but you need to create the same users on
all Nova, Glance and Swift component servers in ~/DeploymentTool/conf/deploy.conf
(ssh_user=user). </para>
</listitem>
<listitem>
<para>Permissions: You must have root user permission for installation and
service provision. </para></listitem>
<listitem><para>Software: You must configure the installation server to access the Puppet server by name.
(Puppet 0.25 or higher)</para></listitem>
<listitem>
<para>Software: You must configure LVM if you do not change the default setting
of the VolumeManager in the nova-volume service. </para>
</listitem>
<listitem>
<para>Software: Python 2.6 or higher</para>
</listitem>
<listitem>
<para>Software: Because of the current Nova implementation architecture, the
binaries for nova-api, nova-objectstore, and euca2ools must have been loaded
in one server.</para>
</listitem>
<listitem>
<para>Operating system: Ubuntu 10.04, 10.10 or 11.04</para>
</listitem>
</itemizedlist>
<para>The tool does not support system configurations other than those listed above. If you want
to use other configurations, you have to change the configuration after running the
deployment tool or modify the deployment tool. </para>
<para>This deployment tool has been tested under the following configurations. </para>
<itemizedlist>
<listitem><para>Nova-compute components are installed on multiple servers. </para></listitem>
<listitem><para>OS: Ubuntu10.04, Ubuntu10.10 or Ubuntu 11.04 </para></listitem>
<listitem><para>Multiple network modes (VLAN Mode, Flat Mode)</para></listitem>
</itemizedlist>
<para>Although we conducted extensive tests, we were unable to test every configuration.
Please let us know any problems that occur in your environment by contacting us at
https://answers.launchpad.net/nova-deployment-tool. We will try to resolve any
problem you send us and make the tool better for Stackers. </para>
<para>
<note>
<para>The configurations, which are not described on this document, are Nova
default settings. Note also that, although we have not done so ourselves,
you should be able to change the network mode to flat DHCP mode and
hypervisor to Xen if you follow the instructions in the Notes section
below.</para>
</note>
</para>
</simplesect>
<simplesect><title>Overview of Deployment Tool Steps</title>
<para>You can install/test/uninstall Nova, Glance and Swift with the Nova deployment tool as follows,
which is simply an overview. The detailed steps are in the sections that
follow.</para>
<para>Deploy.py takes care of the details using puppet. Puppet is an automation tool
with standardized scripts that manage a machine's configuration. See an
Introduction to Puppet on the PuppetLabs site.</para>
<para>Install by typing the following command.</para>
<literallayout class="monospaced">python deploy.py install</literallayout>
<para>Confirm that the installation succeeded by typing the following
command.</para>
<literallayout class="monospaced">python deploy.py test</literallayout>
<para>Uninstall by typing the following command.</para>
<literallayout class="monospaced">python deploy.py uninstall
python deploy.py all = python deploy.py uninstall; python deploy.py install; python deploy.py test </literallayout>
<para>Uninstall/install/test only Nova.</para>
<literallayout class="monospaced">python deploy.py all nova</literallayout>
<para>Uninstall/install/test only Swift.</para>
<literallayout class="monospaced">python deploy.py all swift</literallayout>
<para>Uninstall/install/test only Glance.</para>
<literallayout class="monospaced">python deploy.py all glance</literallayout></simplesect>
<simplesect><title>Installing the Deployment Tool</title>
<para>Type or copy/paste the following command to use the OpenStack PPA on all component servers.</para>
<literallayout class="monospaced">
sudo apt-get install python-software-properties -y
sudo add-apt-repository ppa:openstack-release/2011.2
sudo apt-get update</literallayout>
</simplesect>
<simplesect><title>Set permissions to the deployment 'user'</title>
<para>Edit sudoers file to give the correct permissions to the 'user' running all the components.
Type or copy/paste the visudo command to set user (= nii in this document) as a sudouer on all nova component servers.
</para>
<literallayout class="monospaced">sudo visudo</literallayout>
<para>Append the following lines to the visudo file, and then save the file.</para>
<literallayout class="monospaced">nii ALL=(ALL) NOPASSWD:ALL
nova ALL=(ALL) NOPASSWD:ALL</literallayout></simplesect>
<simplesect><title>Configure SSH</title><para>Next, we'll configure the system so that SSH works by generating public and private key pairs that provide credentials without a password intervention. </para>
<para> The Deployment tool needs to connect to all nova, glance and swift component servers without having the operator enter a password for any of the servers.</para>
<para>Type or copy/paste the following command to generate public and private key pairs on the server running the Nova deployment tool.</para>
<literallayout class="monospaced">ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa</literallayout>
<para>Copy this generated public key to all nova component servers.</para>
<para>Next, type or copy/paste the following commands to register the public keys on all nova component servers.</para>
<literallayout class="monospaced">ssh-copy-id nii@(each nova component server name) </literallayout>
<para>Download the code for the deployment tool next, and extract the contents of the
compressed file. </para>
<literallayout class="monospaced">wget http://launchpad.net/nova-deployment-tool/cactus/cactus1.3/+download/nova-deployment-tool-cactus.tgz
tar xzvf nova-deployment-tool-cactus.tgz</literallayout>
</simplesect>
<simplesect><title>Create Swift storage folder and mount device</title>
<para>First, create a Swift-storage folder and mount device on each swift-storage server. </para>
<para>The commands vary depending on which destination (Partition or Lookback device) is to be used. </para>
<para>The steps are detailed in the sections that follow. </para>
<para>“$storage_path” and “$storage_dev” are defined in “deploy.conf”.</para>
<para>Partition</para>
<literallayout class="monospaced">
sudo apt-get install xfsprogs -y
sudo sh -c "echo '/dev/$storage_dev $storage_path/$storage_dev xfs noatime,nodiratime,nobarrier,logbufs=8 0 0' >> /etc/fstab"
sudo mount $storage_path/$storage_dev
</literallayout>
<para>Loopback device</para>
<literallayout class="monospaced">
sudo apt-get install xfsprogs -y
sudo mkdir -p $storage_path/$storage_dev
sudo dd if=/dev/zero of=/srv/swift-disk bs=1024 count=0 seek=1000000
sudo mkfs.xfs -i size=1024 /srv/swift-disk
sudo sh -c "echo '/srv/swift-disk $storage_path/$storage_dev xfs loop,noatime,nodiratime,nobarrier,logbufs=8 0 0' >> /etc/fstab"
sudo mount $storage_path/$storage_dev
</literallayout>
</simplesect>
<simplesect><title>Configuring the Deployment Tool</title>
<para>You must change the configuration file in order to execute the Nova deployment tool according to your environment and configuration design. In the unzipped files, edit conf/deploy.conf to change the settings according to your environment and desired installation (single or multiple servers, for example). </para>
<para>Here are the definitions of the values which are used in deploy.conf.</para>
<para>default section</para>
<itemizedlist>
<listitem><para>puppet_server Name of server in which the puppet server is installed</para></listitem>
<listitem><para>sh_user User name that is used to SSH into a nova component</para></listitem>
</itemizedlist>
<para>nova section</para>
<itemizedlist>
<listitem><para>nova_api Name of server in which the nava-api component is installed</para></listitem>
<listitem><para>nova_objectstore Name of server in which the nova-objectstore component is installed*</para></listitem>
<listitem><para>nova_compute Name of server in which the nova-compute component is installed</para></listitem>
<listitem><para>nova_scheduler Name of server in which the nova-scheduler component is installed</para></listitem>
<listitem><para>nova_network Name of server in which the nova-network component is installed</para></listitem>
<listitem><para>nova_volume Name of server in which the nova-volume component is installed</para></listitem>
<listitem><para>euca2ools Name of server that runs the test sequence</para></listitem>
<listitem><para>mysql Name of server in which mysql is installed</para></listitem>
<listitem><para>glance_host Glance server name</para></listitem>
<listitem><para>libvirt_type Virtualization type</para></listitem>
<listitem><para>network_manager Network management class name</para></listitem>
<listitem><para>image_service Image management class name</para></listitem>
<listitem><para>network_interface Network interface that is used in the nova-compute component</para></listitem>
<listitem><para>network_ip_range IP address range used by guest VMS. This value should be included in the values of fixed_range.</para></listitem>
<listitem><para>volume_group LVM volume group name that is used in the nova volume component</para></listitem>
<listitem><para>fixed_range Range of IP addresses used in all projects. If you want to change the value, please also change the IP addresses X.X.X.X of the command "nova-manage network create X.X.X.X ..." in file setup-network.sh, and the IP addresses should include the new value.</para></listitem>
<listitem><para>network_size Number of IP addresses used by Guest VM in all projects</para></listitem>
</itemizedlist>
<para>glance section</para>
<itemizedlist>
<listitem><para>glance Name of server in which the glance is installed</para></listitem>
<listitem><para>default_store Image store that is used in glance. Available value: file, swift, s3</para></listitem>
</itemizedlist>
<para>swift section</para>
<itemizedlist>
<listitem><para>swift_proxy Name of server in which the glance is installed</para></listitem>
<listitem><para>swift_storage Name of server in which the swift=storage is installed</para></listitem>
<listitem><para>account swift account name</para></listitem>
<listitem><para>username swift user name</para></listitem>
<listitem><para>password swift password</para></listitem>
<listitem><para>storage_path Folder for saving account, container and object information in swift storage server</para></listitem>
<listitem><para>storage_dev Device holding account, container and object information</para></listitem>
<listitem><para>ring_builder_replicas Number of account, container, and object copies. The value has to be equal or less than the number of swift-storage servers.</para></listitem>
<listitem><para>super_admin_key A key for creating swift users</para></listitem>
</itemizedlist>
<para> If you install swift on Ubuntu 11.04, due to the bug https://bugs.launchpad.net/swift/+bug/796404 swift_proxy should be installed on the different machine from the machine where swift_storage will be installed.</para>
<para>Because of the current implementation architecture, you must load nova-api, nova-objectstore and euca2ools on a single server.</para>
<para>The following configuration information is an example. If you want to have multiple
nova-computes, you can do so by nova_compute=ubuntu3, ubuntu8, for example. And if
you want to have multiple swift storage, you can do so by swift_storage=ubuntu3,
ubuntu8, for example.</para>
<literallayout class="monospaced">
&lt;begin ~/DeploymentTool/conf/deploy.conf>
[default]
puppet_server=ubuntu7
ssh_user=nii
[nova]
nova_api=ubuntu7
nova_objectstore=ubuntu7
nova_compute=ubuntu7
nova_scheduler=ubuntu7
nova_network=ubuntu7
nova_volume=ubuntu7
euca2ools=ubuntu7
mysql=ubuntu7
glance_host=ubuntu7
libvirt_type=kvm
network_manager=nova.network.manager.VlanManager
image_service=nova.image.glance.GlanceImageService
network_interface=eth0
network_ip_range=10.0.0.0/24
volume_group=ubuntu7
fixed_range=10.0.0.0/8
network_size=5000
[glance]
glance=ubuntu7
default_store=swift
[swift]
swift_proxy=ubuntu7
swift_storage=ubuntu7
account=system
username=root
password=testpass
storage_path=/srv/node
storage_dev=sdb1
ring_builder_replicas=1
super_admin_key=swauth
&lt;end ~/DeploymentTool/conf/deploy.conf></literallayout>
</simplesect>
</section>
<section>
<?dbhtml filename="openstack-compute-installation-using-virtualbox-vagrant-and-chef.html" ?>
<title>OpenStack Compute Installation Using VirtualBox, Vagrant, And Chef</title>
<para>Integration testing for distributed systems that have many dependencies can be a huge challenge. Ideally, you would have a cluster of machines that you could PXE boot to a base OS install and run a complete install of the system. Unfortunately not everyone has a bunch of extra hardware sitting around. For those of us that are a bit on the frugal side, a whole lot of testing can be done with Virtual Machines. Read on for a simple guide to installing OpenStack Compute (Nova) with VirtualBox and Vagrant.</para>
<simplesect><title>Installing VirtualBox</title>
<para>VirtualBox is virtualization software by Oracle. It runs on Mac/Linux/Windows and can be controlled from the command line. Note that we will be using VirtualBox 4.0 and the vagrant prerelease.</para>
<para>OSX</para>
<literallayout class="monospaced">curl -O http://download.virtualbox.org/virtualbox/4.0.2/VirtualBox-4.0.2-69518-OSX.dmg&#x000A;open VirtualBox-4.0.2-69518-OSX.dmg</literallayout>
<para>Ubuntu Maverick</para>
<literallayout class="monospaced">wget -q http://download.virtualbox.org/virtualbox/debian/oracle_vbox.asc -O- | sudo apt-key add -&#x000A;echo &quot;deb http://download.virtualbox.org/virtualbox/debian maverick contrib&quot; | sudo tee /etc/apt/sources.list.d/virtualbox.list&#x000A;sudo apt-get update&#x000A;sudo apt-get install -y virtualbox-4.0</literallayout>
<para>Ubuntu Lucid</para>
<literallayout class="monospaced">wget -q http://download.virtualbox.org/virtualbox/debian/oracle_vbox.asc -O- | sudo apt-key add -&#x000A;echo &quot;deb http://download.virtualbox.org/virtualbox/debian lucid contrib&quot; | sudo tee /etc/apt/sources.list.d/virtualbox.list&#x000A;sudo apt-get update&#x000A;sudo apt-get install -y virtualbox-4.0</literallayout></simplesect>
<simplesect><title>Install RubyGems</title>
<para>The following instructions for installing Vagrant use RubyGems for the installation commands. You can download RubyGems from <link xlink:href="http://rubygems.org/pages/download">http://rubygems.org/pages/download</link>. </para>
</simplesect>
<simplesect><title>Get the Vagrant Pre-release</title>
<para>OSX</para>
<literallayout class="monospaced">sudo gem update -- system&#x000A;sudo gem install vagrant -- pre</literallayout>
<para>Ubuntu Maverick</para>
<literallayout class="monospaced">sudo gem install vagrant --pre&#x000A;sudo ln -s /var/lib/gems/1.8/bin/vagrant /usr/local/bin/vagrant</literallayout>
<para>Ubuntu Lucid</para>
<literallayout class="monospaced">wget http://production.cf.rubygems.org/rubygems/rubygems-1.3.6.zip&#x000A;sudo apt-get install -y unzip&#x000A;unzip rubygems-1.3.6.zip&#x000A;cd rubygems-1.3.6&#x000A;sudo ruby setup.rb&#x000A;sudo gem1.8 install vagrant --pre</literallayout></simplesect>
<simplesect> <title>Get the Chef Recipes</title>
<literallayout class="monospaced">cd ~&#x000A;git clone https://github.com/ansolabs/openstack-cookbooks/openstack-cookbooks.git</literallayout>
</simplesect>
<simplesect><title>Set Up Some Directories</title>
<literallayout class="monospaced">mkdir aptcache&#x000A;mkdir chef&#x000A;cd chef</literallayout>
</simplesect>
<simplesect><title>Get the chef-solo Vagrant file</title>
<para>Provisioning for vagrant can use chef-solo, chef-server, or puppet. We&#8217;re going to use chef-solo for the installation of OpenStack Compute.</para>
<literallayout class="monospaced">curl -o Vagrantfile https://raw.github.com/gist/786945/solo.rb</literallayout>
</simplesect>
<simplesect><title>Running OpenStack Compute within a Vagrant Instance</title>
<para>Installing and running OpenStack Compute is as simple as typing "vagrant up"</para>
<literallayout class="monospaced">vagrant up</literallayout>
<para>In 3-10 minutes, your vagrant instance should be running. NOTE: Some people report an
error from vagrant complaining about MAC addresses the first time they vagrant up.
Doing <code>vagrant up</code> again seems to resolve the problem.</para>
<literallayout class="monospaced">vagrant ssh</literallayout>
<para>Now you can run an instance and connect to it:</para>
<literallayout class="monospaced">. /vagrant/novarc&#x000A;euca-add-keypair test &gt; test.pem&#x000A;chmod 600 test.pem&#x000A;euca-run-instances -t m1.tiny -k test ami-tty&#x000A;# wait for boot (euca-describe-instances should report running)&#x000A;ssh -i test.pem root@10.0.0.3</literallayout>
<para>Yo, dawg, your VMs have VMs! That is, you are now running an instance inside of OpenStack Compute, which itself is running inside a VirtualBox VM.</para>
<para>When the you are finished, you can destroy the entire system with vagrant destroy. You will also need to remove the .pem files and the novarc if you want to run the system again.</para>
<literallayout class="monospaced">vagrant destroy&#x000A;rm *.pem novarc</literallayout></simplesect>
<simplesect><title>Using the dashboard
</title><para>The OpenStack Dashboard should be running on 192.168.86.100. You can login using username: admin, password: vagrant.</para>
</simplesect></section>
</chapter>

View File

@ -0,0 +1,997 @@
<?xml version="1.0" encoding="UTF-8"?>
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0">
<?dbhtml filename="ch_configuring-openstack-compute.html" ?>
<title>Configuring OpenStack Compute</title>
<para>The OpenStack system has several key projects that are separate installations but can
work together depending on your cloud needs: OpenStack Compute, OpenStack Object
Storage, and OpenStack Image Store. You can install any of these projects separately and
then configure them either as standalone or connected entities.</para>
<section>
<?dbhtml filename="general-compute-configuration-overview.html" ?>
<title>General Compute Configuration Overview</title>
<para>Most configuration information is available in the nova.conf flag file. Here are
some general purpose flags that you can use to learn more about the flag file and the
node. The configuration file nova.conf is typically stored in
/etc/nova/nova.conf.</para>
<para>You can use a particular flag file by using the --flagfile (nova.conf) parameter when
running one of the nova- services. This inserts flag definitions from the given configuration file name, which may be useful for debugging or performance tuning. Here are some general purpose flags. </para>
<table rules="all">
<caption>Description of general purpose nova.conf flags </caption>
<thead>
<tr>
<td>Flag</td>
<td>Default</td>
<td>Description</td>
</tr>
</thead>
<tbody><tr>
<td>--my_ip</td>
<td>None</td>
<td>IP address; Calculated to contain the host IP address.</td>
</tr>
<tr>
<td>--host</td>
<td>None</td>
<td>String value; Calculated to contain the name of the node where the cloud controller is hosted</td>
</tr>
<tr>
<td>-?, --[no]help</td>
<td>None</td>
<td>Show this help.</td>
</tr>
<tr>
<td>--[no]helpshort</td>
<td>None</td>
<td>Show usage only for this module.</td>
</tr>
<tr>
<td>--[no]helpxml</td>
<td>None</td>
<td>Show this help, but with XML output instead of text</td>
</tr>
</tbody>
</table>
<para>If you want to maintain the state of all the services, you can use the --state_path flag to indicate a top-level directory for storing data related to the state of Compute including images if you are using the Compute object store. Here are additional flags that apply to all nova- services.</para>
<table rules="all">
<caption>Description of nova.conf flags for all services </caption>
<thead>
<tr>
<td>Flag</td>
<td>Default</td>
<td>Description</td>
</tr>
</thead>
<tbody><tr>
<td>--state_path</td>
<td>'/Users/username/p/nova/nova/../'</td>
<td>Directory path; Top-level directory for maintaining nova's state.</td>
</tr>
<tr>
<td>--periodic_interval</td>
<td>default: '60'</td>
<td>Integer value; Seconds between running periodic tasks.</td>
</tr>
<tr>
<td>--report_interval</td>
<td>default: '10'</td>
<td>Integer value; Seconds between nodes reporting state to the data store.</td>
</tr>
</tbody>
</table>
</section>
<section>
<?dbhtml filename="sample-nova-configuration-files.html" ?>
<title>Example nova.conf Configuration Files</title>
<para>The following sections describe many of the flag settings that can go into the nova.conf files. These need to be copied to each compute node. Here are some sample nova.conf files that offer examples of specific configurations</para>
<simplesect><title>Configuration using KVM, FlatDHCP, MySQL, Glance, LDAP, and optionally sheepdog, API is EC2</title>
<para>From <link xlink:href="http://wikitech.wikimedia.org/view/OpenStack#On_the_controller_and_all_compute_nodes.2C_configure_.2Fetc.2Fnova.2Fnova.conf">wikimedia.org</link>, used with permission. Where you see parameters passed in, it's likely an IP address you need. </para><literallayout class="monospaced">
# configured using KVM, FlatDHCP, MySQL, Glance, LDAP, and optionally sheepdog, API is EC2
--verbose
--daemonize=1
--logdir=/var/log/nova
--state_path=/var/lib/nova
--lock_path=/var/lock/nova
--sql_connection=mysql://$nova_db_user:$nova_db_pass@$nova_db_host/$nova_db_name
--image_service=nova.image.glance.GlanceImageService
--s3_host=$nova_glance_host
--glance_api_servers=$nova_glance_host
--rabbit_host=$nova_rabbit_host
--network_host=$nova_network_host
--ec2_url=http://$nova_api_host:8773/services/Cloud
--libvirt_type=kvm
--dhcpbridge=/usr/bin/nova-dhcpbridge
--network_manager=nova.network.manager.FlatDHCPManager
--flat_interface=$nova_network_flat_interface
--public_interface=$nova_network_public_interface
--routing_source_ip=$nova_network_public_ip
--ajax_console_proxy_url=$nova_ajax_proxy_url
--volume_driver=nova.volume.driver.SheepdogDriver
--auth_driver=nova.auth.ldapdriver.LdapDriver
--ldap_url=ldap://$nova_ldap_host
--ldap_password=$nova_ldap_user_pass
--ldap_user_dn=$nova_ldap_user_dn
--ldap_user_unit=people
--ldap_user_subtree=ou=people,$nova_ldap_base_dn
--ldap_project_subtree=ou=groups,$nova_ldap_base_dn
--role_project_subtree=ou=groups,$nova_ldap_base_dn
--ldap_cloudadmin=cn=cloudadmins,ou=groups,$nova_ldap_base_dn
--ldap_itsec=cn=itsec,ou=groups,$nova_ldap_base_dn
--ldap_sysadmin=cn=sysadmins,$nova_ldap_base_dn
--ldap_netadmin=cn=netadmins,$nova_ldap_base_dn
--ldap_developer=cn=developers,$nova_ldap_base_dn
</literallayout></simplesect>
<simplesect><title>KVM, Flat, MySQL, and Glance, OpenStack or EC2 API</title><para>This example nova.conf file is from an internal Rackspace test system used for demonstrations. </para>
<literallayout class="monospaced">
# configured using KVM, Flat, MySQL, and Glance, API is OpenStack (or EC2)
--daemonize=1
--dhcpbridge_flagfile=/etc/nova/nova.conf
--dhcpbridge=/usr/bin/nova-dhcpbridge
--lock_path=/var/lock/nova
--logdir=/var/log/nova
--state_path=/var/lib/nova
--verbose
--network_manager=nova.network.manager.FlatManager
--sql_connection=mysql://$nova_db_user:$nova_db_pass@$nova_db_host/$nova_db_name
--osapi_host=$nova_api_host
--rabbit_host=$rabbit_api_host
--ec2_host=$nova_api_host
--image_service=nova.image.glance.GlanceImageService
--glance_api_servers=$nova_glance_host
# first 3 octets of the network your volume service is on, substitute with real numbers
--iscsi_ip_prefix=nnn.nnn.nnn
</literallayout></simplesect>
</section>
<section><?dbhtml filename="configuring-logging.html" ?>
<title>Configuring Logging</title>
<para>You can use nova.conf flags to indicate where Compute will log events, the level of logging, and customize log formats.</para>
<table rules="all">
<caption>Description of nova.conf flags for logging </caption>
<thead>
<tr>
<td>Flag</td>
<td>Default</td>
<td>Description</td>
</tr>
</thead>
<tbody><tr>
<td>--logdir</td>
<td>'/var/logs/nova'</td>
<td>Directory path; Output to a per-service log file in the named directory.</td>
</tr>
<tr>
<td>--logfile</td>
<td>default: ''</td>
<td>File name; Output to named file.</td>
</tr>
<tr>
<td>--[no]use_syslog</td>
<td>default: 'false'</td>
<td>Output to syslog using their file naming system.</td>
</tr><tr>
<td>--default_log_levels</td>
<td>default: 'amqplib=WARN,sqlalchemy=WARN,eventlet.wsgi.server=WARN'</td>
<td>Pair of named loggers and level of message to be logged; List of
logger=LEVEL pairs</td>
</tr>
<tr>
<td>--verbose</td>
<td>default: 'false'</td>
<td>Set to 1 or true to turn on; Shows debug output - optional but helpful during initial setup.</td>
</tr>
</tbody>
</table>
<para>To customize log formats for OpenStack Compute, use these flag settings.</para>
<table rules="all">
<caption>Description of nova.conf flags for customized log formats </caption>
<thead>
<tr>
<td>Flag</td>
<td>Default</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>--logging_context_format_string</td>
<td>default: '%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user)s
%(project)s] %(message)s'</td>
<td>The format string to use for log messages with additional context.</td>
</tr>
<tr>
<td>--logging_debug_format_suffix</td>
<td>default: 'from %(processName)s (pid=%(process)d) %(funcName)s
%(pathname)s:%(lineno)d'</td>
<td>The data to append to the log format when level is DEBUG</td>
</tr>
<tr>
<td>--logging_default_format_string</td>
<td>default: '%(asctime)s %(levelname)s %(name)s [-] %(message)s'</td>
<td>The format string to use for log messages without context.</td>
</tr>
<tr>
<td>--logging_exception_prefix</td>
<td>default: '(%(name)s): TRACE: '</td>
<td>String value; Prefix each line of exception output with this format.</td>
</tr>
</tbody>
</table>
</section>
<section>
<?dbhtml filename="configuring-hypervisors.html" ?>
<title>Configuring Hypervisors </title>
<para>OpenStack Compute requires a hypervisor and supports several hypervisors and virtualization standards. Configuring and running OpenStack Compute to use a particular hypervisor takes several installation and configuration steps. </para>
</section>
<section>
<?dbhtml filename="configuring-compute-to-use-ipv6-addresses.html" ?>
<title>Configuring Compute to use IPv6 Addresses </title>
<para>You can configure Compute to use both IPv4 and IPv6 addresses for communication by
putting it into a IPv4/IPv6 dual stack mode. In IPv4/IPv6 dual stack mode, instances can
acquire their IPv6 global unicast address by stateless address autoconfiguration
mechanism [RFC 4862/2462]. IPv4/IPv6 dual stack mode works with VlanManager and
FlatDHCPManager networking modes, though floating IPs are not supported in the Bexar
release. In VlanManager, different 64bit global routing prefix is used for each project.
In FlatDHCPManager, one 64bit global routing prefix is used for all instances. The
Cactus release includes support for the FlatManager networking mode with a required
database migration.</para>
<para>This configuration has been tested on Ubuntu 10.04 with VM images that have IPv6
stateless address autoconfiguration capability (must use EUI-64 address for stateless
address autoconfiguration), a requirement for any VM you want to run with an IPv6
address. Each node that executes a nova- service must have python-netaddr and radvd
installed. </para>
<para>On all nova-nodes, install python-netaddr:</para>
<para>
<literallayout class="monospaced">sudo apt-get install -y python-netaddr</literallayout>
</para>
<para>On all nova-network nodes install radvd and configure IPv6 networking: </para>
<literallayout class="monospaced">sudo apt-get install -y radvd
sudo bash -c "echo 1 > /proc/sys/net/ipv6/conf/all/forwarding"
sudo bash -c "echo 0 > /proc/sys/net/ipv6/conf/all/accept_ra"</literallayout>
<para>Edit the nova.conf file on all nodes to set the --use_ipv6 flag to True. Restart all
nova- services. </para>
<para>When using the command 'nova-manage network create' you can add a fixed range for IPv6
addresses. You must specify public or private after the create parameter.</para>
<para>
<literallayout class="monospaced">nova-manage network create public fixed_range num_networks network_size [vlan_start] [vpn_start] [fixed_range_v6]</literallayout>
</para>
<para>You can set IPv6 global routing prefix by using the fixed_range_v6 parameter. The
default is: fd00::/48. When you use FlatDHCPManager, the command uses the original value
of fixed_range_v6. When you use VlanManager, the command creates prefixes of subnet by
incrementing subnet id. Guest VMs uses this prefix for generating their IPv6 global
unicast address. </para>
<para>Here is a usage example for VlanManager: </para>
<para>
<literallayout class="monospaced">nova-manage network create public 10.0.1.0/24 3 32 100 1000 fd00:1::/48 </literallayout>
</para>
<para>Here is a usage example for FlatDHCPManager: </para>
<para>
<literallayout class="monospaced">nova-manage network create public 10.0.2.0/24 3 32 0 0 fd00:1::/48 </literallayout>
</para>
<para>Note that [vlan_start] and [vpn_start] parameters are not used by
FlatDHCPManager.</para>
<table rules="all">
<caption>Description of nova.conf flags for configuring IPv6</caption>
<thead>
<tr>
<td>Flag</td>
<td>Default</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr><td>--use_ipv6</td>
<td>default: 'false'</td>
<td>Set to 1 or true to turn on; Determines whether to use IPv6 network addresses </td>
</tr>
<tr>
<td>--flat_injected</td>
<td>default: 'false'</td>
<td>Cactus only:Indicates whether Compute (Nova) should use attempt to inject IPv6 network configuration information into the guest. It attempts to modify /etc/network/interfaces and currently only works on Debian-based systems. </td>
</tr>
</tbody>
</table>
</section>
<section><?dbhtml filename="configuring-compute-to-use-the-image-service.html" ?>
<title>Configuring Image Service and Storage for Compute</title>
<para>You can either use a local image storage system or install Glance for storing and retrieving images. After you have installed a Glance server, you can configure nova-compute to
use Glance for image storage and retrieval. You must change the --image_service flag to
'nova.image.glance.GlanceImageService' in order to use Glance to store and retrieve
images for OpenStack Compute.</para>
<table rules="all">
<caption>Description of nova.conf flags for the Glance image service and
storage</caption>
<thead>
<tr>
<td>Flag</td>
<td>Default</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>--image_service</td>
<td>default: 'nova.image.local.LocalImageService'</td>
<td><para>The service to use for retrieving and searching for images. Images must be registered using
euca2ools. Options: </para><itemizedlist>
<listitem>
<para>nova.image.s3.S3ImageService</para>
<para>S3 backend for the Image Service.</para>
</listitem>
<listitem>
<para>nova.image.local.LocalImageService</para>
<para>Image service storing images to local disk. It assumes that image_ids are integers. This is the default setting if no image manager is defined here.</para>
</listitem>
<listitem>
<para><emphasis role="bold"
>nova.image.glance.GlanceImageService</emphasis></para>
<para>Glance back end for storing and retrieving images; See <link
xlink:href="http://glance.openstack.org"
>http://glance.openstack.org</link> for more info.</para>
</listitem>
</itemizedlist></td>
</tr>
<tr>
<td>--glance_api_servers</td>
<td>default: '$my_ip:9292'</td>
<td>List of Glance API hosts. Each item may contain a host (or IP address) and
port of an OpenStack Compute Image Service server (project's name is
Glance)</td>
</tr>
<tr>
<td>--s3_dmz</td>
<td>default: '$my_ip'</td>
<td>IP address; For instances internal IP (a DMZ is shorthand for a
demilitarized zone)</td>
</tr>
<tr>
<td>--s3_host</td>
<td>default: '$my_ip'</td>
<td>IP address: IP address of the S3 host for infrastructure. Location where
OpenStack Compute is hosting the objectstore service, which will contain the
virtual machine images and buckets.</td>
</tr>
<tr>
<td>--s3_port</td>
<td>default: '3333'</td>
<td>Integer value; Port where S3 host is running</td>
</tr>
<tr><td>--use_s3</td>
<td>default: 'true'</td>
<td>Set to 1 or true to turn on; Determines whether to get images from s3 or use a local copy </td></tr>
</tbody>
</table>
<para>If you choose not to use Glance for the image service, you can use the object store
that maintains images in a particular location, namely the state path on the server
local to the nova.conf file. You can also use a set of S3 buckets to store
images.</para>
<table rules="all">
<caption>Description of nova.conf flags for local image storage</caption>
<thead>
<tr>
<td>Flag</td>
<td>Default</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>--image_service</td>
<td>default: 'nova.image.local.LocalImageService'</td>
<td><para>The service to use for retrieving and searching for images. Images must be registered using
euca2ools. Options: </para><itemizedlist>
<listitem>
<para>nova.image.s3.S3ImageService</para>
<para>S3 backend for the Image Service; In Cactus, the S3
image service wraps the other image services for use by the EC2
API. The EC2 api will always use the S3 image service by default
so setting the flag is not necessary.</para>
</listitem>
<listitem>
<para><emphasis role="bold">
nova.image.local.LocalImageService
</emphasis></para>
<para>Image service storing images to local disk. It assumes that
image_ids are integers.</para>
</listitem>
<listitem>
<para>nova.image.glance.GlanceImageService</para>
<para>Glance back end for storing and retrieving images; See <link
xlink:href="http://glance.openstack.org"
>http://glance.openstack.org</link> for more info.</para>
</listitem>
</itemizedlist></td>
</tr>
<tr>
<td>--state_path</td>
<td>'/Users/username/p/nova/nova/../'</td>
<td>Directory path; Top-level directory for maintaining nova's state.</td>
</tr>
<tr>
<td>--buckets_path</td>
<td>'$state_path/buckets'</td>
<td>Directory path; Directory established for S3-style buckets.</td>
</tr>
<tr>
<td>--images_path</td>
<td>'$state_path/images'</td>
<td>Directory path; Directory that stores images when using object store.</td>
</tr>
</tbody>
</table>
</section>
<section><?dbhtml filename="configuring-live-migrations.html" ?>
<title>Configuring Live Migrations </title>
<para>The live migration feature is useful when you need to upgrade or installing patches to hypervisors/BIOS and you need the machines to keep running. For example, when one of HDD volumes RAID or one of bonded NICs is out of order. Also for regular periodic maintenance, you may need to migrate VM instances. When many VM instances are running on a specific physical machine, you can redistribute the high load. Sometimes when VM instances are scattered, you can move VM instances to a physical machine to arrange them more logically.</para>
<para>
<emphasis role="bold">Environments</emphasis>
<itemizedlist>
<listitem>
<para><emphasis role="bold">OS:</emphasis> Ubuntu 10.04/10.10
for both instances and host.</para>
</listitem>
<listitem>
<para><emphasis role="bold">Shared storage:</emphasis>
NOVA-INST-DIR/instances/ has to be mounted by shared storage
(tested using NFS).</para>
</listitem>
<listitem>
<para><emphasis role="bold">Instances:</emphasis> Instance can
be migrated with ISCSI/AoE based volumes</para>
</listitem>
<listitem>
<para><emphasis role="bold">Hypervisor:</emphasis>
KVM with libvirt</para>
</listitem>
<listitem>
<para><emphasis role="bold">(NOTE1)</emphasis>
"NOVA-INST-DIR/instance" is expected that vm image is put on to.
see "flags.instances_path" in nova.compute.manager for the default
value</para>
</listitem>
<listitem>
<para><emphasis role="bold">(NOTE2)</emphasis> This feature is
admin only, since nova-manage is necessary.</para>
</listitem>
</itemizedlist>
</para>
<para>
<emphasis role="bold">Sample Nova Installation before starting</emphasis>
<itemizedlist>
<listitem>
<para>Prepare 3 servers at least, lets say, HostA, HostB
and HostC</para>
</listitem>
<listitem>
<para>nova-api/nova-network/nova-volume/nova-objectstore/
nova-scheduler(and other daemon) are running on HostA.</para>
</listitem>
<listitem>
<para>nova-compute is running on both HostB and HostC.</para>
</listitem>
<listitem>
<para>HostA export NOVA-INST-DIR/instances, HostB and HostC
mount it.</para>
</listitem>
<listitem>
<para>To avoid any confusion, NOVA-INST-DIR is same at
HostA/HostB/HostC("NOVA-INST-DIR" shows top of install dir). </para>
</listitem>
<listitem>
<para>HostA export NOVA-INST-DIR/instances, HostB and HostC mount it.</para>
</listitem>
</itemizedlist>
</para>
<para><emphasis role="bold">Pre-requisite configurations</emphasis></para>
<para>
<orderedlist>
<listitem>
<para>Configure /etc/hosts, Make sure 3 Hosts can do name-resolution
with each other. Ping with each other is better way to test.</para>
<programlisting><![CDATA[
# ping HostA
# ping HostB
# ping HostC
]]></programlisting>
</listitem>
<listitem>
<para>Configure NFS at HostA by adding below to /etc/exports</para>
<literallayout class="monospaced">NOVA-INST-DIR/instances HostA/255.255.0.0(rw,sync,fsid=0,no_root_squash</literallayout>
<para> Change "255.255.0.0" appropriate netmask, which should include
HostB/HostC. Then restart nfs server.</para>
<programlisting><![CDATA[
# /etc/init.d/nfs-kernel-server restart
# /etc/init.d/idmapd restart
]]></programlisting>
</listitem>
<listitem>
<para>Configure NFS at HostB and HostC by adding below to
/etc/fstab</para>
<literallayout class="monospaced">HostA:/ DIR nfs4 defaults 0 0</literallayout>
<para>Then mount, check exported directory can be mounted.</para>
<literallayout class="monospaced"># mount -a -v</literallayout>
<para>If fail, try this at any hosts.</para>
<literallayout class="monospaced"># iptables -F</literallayout>
<para>Also, check file/daemon permissions. We expect any nova daemons
are running as root. </para>
<programlisting><![CDATA[
# ps -ef | grep nova
root 5948 5904 9 11:29 pts/4 00:00:00 python /opt/nova-2010.4//bin/nova-api
root 5952 5908 6 11:29 pts/5 00:00:00 python /opt/nova-2010.4//bin/nova-objectstore
... (snip)
]]></programlisting>
<para>"NOVA-INST-DIR/instances/" directory can be seen at HostA</para>
<programlisting><![CDATA[
# ls -ld NOVA-INST-DIR/instances/
drwxr-xr-x 2 root root 4096 2010-12-07 14:34 nova-install-dir/instances/
]]></programlisting>
<para>Same check at HostB and HostC</para>
<programlisting><![CDATA[
# ls -ld NOVA-INST-DIR/instances/
drwxr-xr-x 2 root root 4096 2010-12-07 14:34 nova-install-dir/instances/
# df -k
Filesystem 1K-blocks Used Available Use% Mounted on
/dev/sda1 921514972 4180880 870523828 1% /
none 16498340 1228 16497112 1% /dev
none 16502856 0 16502856 0% /dev/shm
none 16502856 368 16502488 1% /var/run
none 16502856 0 16502856 0% /var/lock
none 16502856 0 16502856 0% /lib/init/rw
HostA: 921515008 101921792 772783104 12% /opt ( <--- this line is important.)
]]></programlisting>
</listitem>
<listitem>
<para>Libvirt configurations. Modify /etc/libvirt/libvirt.conf:</para>
<programlisting><![CDATA[
before : #listen_tls = 0
after : listen_tls = 0
before : #listen_tcp = 1
after : listen_tcp = 1
add: auth_tcp = "none"
]]></programlisting>
<para>Modify /etc/init/libvirt-bin.conf</para>
<programlisting><![CDATA[
before : exec /usr/sbin/libvirtd -d
after : exec /usr/sbin/libvirtd -d -l
]]></programlisting>
<para>Modify /etc/default/libvirt-bin</para>
<programlisting><![CDATA[
before :libvirtd_opts=" -d"
after :libvirtd_opts=" -d -l"
]]></programlisting>
<para>then, restart libvirt. Make sure libvirt is restarted.</para>
<programlisting><![CDATA[
# stop libvirt-bin && start libvirt-bin
# ps -ef | grep libvirt
root 1145 1 0 Nov27 ? 00:00:03 /usr/sbin/libvirtd -d -l
]]></programlisting>
</listitem>
<listitem>
<para>Flag configuration. usually, you do not have to configure
any flags. Below chart is only for advanced usage.</para>
</listitem>
</orderedlist>
</para>
<table rules="all">
<caption>Description of nova.conf flags for live migration</caption>
<thead>
<tr>
<td>Flag</td>
<td>Default</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr><td>--live_migration_retry_count</td>
<td>default: 30</td>
<td>Retry count needed in live_migration. Sleep 1sec for each retry</td>
</tr>
<tr>
<td>--live_migration_uri</td>
<td>default: 'qemu+tcp://%s/system'</td>
<td>Define protocol used by live_migration feature. If you would like to use qemu+ssh, change this as described at http://libvirt.org/. </td>
</tr>
<tr>
<td>--live_migration_bandwidth</td>
<td>default: 0</td>
<td>Define bandwidth used by live migration. </td>
</tr>
<tr>
<td>--live_migration_flag</td>
<td>default: 'VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER'</td>
<td>Define libvirt flag for live migration. </td>
</tr>
</tbody>
</table>
</section>
<section><?dbhtml filename="configuring-database-connections.html" ?>
<title>Configuring Database Connections </title>
<para>You can configure OpenStack Compute to use any SQLAlchemy-compatible database. The database name is 'nova' and entries to it are mostly written by the nova-scheduler service, although all the services need to be able to update entries in the database. Use these settings to configure the connection string for the nova database.</para>
<table rules="all">
<caption>Description of nova.conf flags for database access </caption>
<thead>
<tr>
<td>Flag</td>
<td>Default</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>--sql_connection</td>
<td>default: 'sqlite:///$state_path/nova.sqlite'</td>
<td>IP address; Location of OpenStack Compute SQL database</td>
</tr>
<tr>
<td>--sql_idle_timeout</td>
<td>default: '3600'</td>
<td/>
</tr>
<tr>
<td>--sql_max_retries</td>
<td>default: '12'</td>
<td>Integer value; Number of attempts on the SQL connection</td>
</tr>
<tr>
<td>--sql_retry_interval</td>
<td>default: '10'</td>
<td>Integer value; Retry interval for SQL connections</td>
</tr>
<tr>
<td>--db_backend</td>
<td>default: 'sqlalchemy'</td>
<td>The backend selected for the database connection</td>
</tr>
<tr>
<td>--db_driver</td>
<td>default: 'nova.db.api'</td>
<td>The drive to use for database access</td>
</tr>
</tbody>
</table>
</section>
<section><?dbhtml filename="configuring-compute-messaging.html" ?>
<title>Configuring the Compute Messaging System</title>
<para>OpenStack Compute uses an open standard for messaging middleware known as AMQP.
RabbitMQ enables this messaging system so that nova- services can talk to each other.
You can configure the messaging communication for different installation scenarios as
well as tune RabbitMQ's retries and the size of the RPC thread pool. </para>
<table rules="all">
<caption>Description of nova.conf flags for Remote Procedure Calls and RabbitMQ Messaging </caption>
<thead>
<tr>
<td>Flag</td>
<td>Default</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>--rabbit_host</td>
<td>default: 'localhost'</td>
<td>IP address; Location of RabbitMQ installation.</td>
</tr>
<tr>
<td>--rabbit_password</td>
<td>default: 'guest'</td>
<td>String value; Password for the RabbitMQ server.</td>
</tr>
<tr>
<td>--rabbit_port</td>
<td>default: '5672'</td>
<td>Integer value; Port where RabbitMQ server is running/listening.</td>
</tr>
<tr>
<td>--rabbit_userid</td>
<td>default: 'guest'</td>
<td>String value; User ID used for Rabbit connections.</td>
</tr>
<tr>
<td>--rabbit_virtual_host</td>
<td>default: '/'</td>
<td>Location of a virtual RabbitMQ installation.</td>
</tr>
</tbody>
</table>
<table rules="all">
<caption>Description of nova.conf flags for Tuning RabbitMQ Messaging </caption>
<thead>
<tr>
<td>Flag</td>
<td>Default</td>
<td>Description</td>
</tr>
</thead><tbody>
<tr>
<td>--rabbit_max_retries</td>
<td>default: '12'</td>
<td>Integer value; RabbitMQ connection attempts.</td>
</tr>
<tr>
<td>--rabbit-retry-interval</td>
<td>default: '10'</td>
<td>Integer value: RabbitMQ connection retry interval.</td>
</tr>
<tr>
<td>--rpc_thread_pool_size</td>
<td>default: '1024'</td>
<td>Integer value: Size of Remote Procedure Call thread pool.</td>
</tr>
</tbody></table>
<table rules="all">
<caption>Description of nova.conf flags for Customizing Exchange or Topic Names </caption>
<thead>
<tr>
<td>Flag</td>
<td>Default</td>
<td>Description</td>
</tr>
</thead><tbody>
<tr>
<td>--control_exchange</td>
<td>default:nova</td>
<td>String value; Name of the main exchange to connect to</td>
</tr>
<tr>
<td>--ajax_console_proxy_topic</td>
<td>default: 'ajax_proxy'</td>
<td>String value; Topic that the ajax proxy nodes listen on</td>
</tr>
<tr>
<td>--console_topic</td>
<td>default: 'console'</td>
<td>String value; The topic console proxy nodes listen on</td>
</tr>
<tr>
<td>--network_topic</td>
<td>default: 'network'</td>
<td>String value; The topic network nodes listen on.</td>
</tr>
<tr>
<td>--scheduler_topic</td>
<td>default: 'scheduler'</td>
<td>String value; The topic scheduler nodes listen on.</td>
</tr>
<tr>
<td>--volume_topic</td>
<td>default: 'volume'</td>
<td>String value; Name of the topic that volume nodes listen on</td>
</tr>
</tbody></table>
</section>
<section><?dbhtml filename="configuring-authentication-authorization.html" ?>
<title>Configuring Authentication and Authorization </title>
<para>OpenStack Compute uses an implementation of an authentication system structured like
having an Active Directory or other federated LDAP user store that backends to an
identity manager or other SAML Policy Controller that then maps to groups. You can also
customize roles for projects. Credentials for API calls are stored in the project zip
file. Certificate authority is also customzed in nova.conf. </para>
<table rules="all">
<caption>Description of nova.conf flag for Authentication</caption>
<thead>
<tr>
<td>Flag</td>
<td>Default</td>
<td>Description</td>
</tr>
</thead><tbody>
<tr>
<td>--auth_driver</td>
<td>default:'nova.auth.dbdriver.DbDriver'</td>
<td><para>String value; Name of the driver for authentication</para><itemizedlist>
<listitem>
<para>nova.auth.dbdriver.DbDriver - Default setting.</para>
</listitem>
<listitem>
<para>nova.auth.ldapdriver.FakeLdapDriver - create a replacement
for this driver supporting other backends by creating another
class that exposes the same public methods</para>
<para>.</para>
</listitem>
</itemizedlist></td>
</tr>
</tbody>
</table>
<table rules="all">
<caption>Description of nova.conf flags for customizing roles</caption>
<thead>
<tr>
<td>Flag</td>
<td>Default</td>
<td>Description</td>
</tr>
</thead><tbody>
<tr>
<td>--allowed_roles</td>
<td>default: 'cloudadmin,itsec,sysadmin,netadmin,developer') </td>
<td>Comma separated list; Allowed roles for project</td>
</tr>
<tr>
<td>--global_roles</td>
<td>default: 'cloudadmin,itsec') </td>
<td>Comma separated list; Roles that apply to all projects</td>
</tr>
<tr>
<td>--superuser_roles</td>
<td>default: 'cloudadmin') </td>
<td>Comma separated list; Roles that ignore authorization checking completely</td>
</tr>
</tbody>
</table>
<table rules="all">
<caption>Description of nova.conf flags for credentials</caption>
<thead>
<tr>
<td>Flag</td>
<td>Default</td>
<td>Description</td>
</tr>
</thead><tbody>
<tr>
<td>--credentials_template</td>
<td>default: '/Users/termie/p/nova/nova/auth/novarc.template') </td>
<td>Directory; Template for creating users' RC file</td>
</tr>
<tr>
<td>--credential_rc_file</td>
<td>default: '%src') </td>
<td>File name; File name of rc in credentials zip</td>
</tr>
<tr>
<td>--credential_cert_file</td>
<td>default: 'cert.pem') </td>
<td>File name; File name of certificate in credentials zip</td>
</tr>
<tr>
<td>--credential_key_file</td>
<td>default: 'pk.pem') </td>
<td>File name; File name of rc in credentials zip</td>
</tr>
<tr>
<td>--vpn_client_template</td>
<td>default: 'nova/cloudpipe/client/ovpn.template') </td>
<td>Directory; Refers to where the template lives for creating users vpn file</td>
</tr>
<tr>
<td>--credential_vpn_file</td>
<td>default: 'nova-vpn.conf') </td>
<td>File name; Filename of certificate in credentials.zip</td>
</tr>
</tbody></table>
<table rules="all">
<caption>Description of nova.conf flags for CA (Certificate Authority)</caption>
<thead>
<tr>
<td>Flag</td>
<td>Default</td>
<td>Description</td>
</tr>
</thead><tbody>
<tr>
<td>--keys_path</td>
<td>default: '$state_path/keys') </td>
<td>Directory; Where Nova keeps the keys</td>
</tr>
<tr>
<td>--ca_file</td>
<td>default: 'cacert.pem') </td>
<td>File name; File name of root CA</td>
</tr>
<tr>
<td>--crl_file</td>
<td>default: 'crl.pem') </td>
<td>File name; File name of Certificate Revocation List</td>
</tr>
<tr>
<td>--key_file</td>
<td>default: 'private/cakey.pem') </td>
<td>File name; File name of private key</td>
</tr>
<tr>
<td>--use_project_ca</td>
<td>default: 'false') </td>
<td>True or false; Indicates whether to use a CA for each project; false means CA is not used for each project</td>
</tr>
<tr>
<td>--project_cert_subject</td>
<td>default: '/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=proje ct-ca-%s-%s') </td>
<td>String; Subject for certificate for projects, %s for project, timestamp </td>
</tr>
<tr>
<td>--user_cert_subject</td>
<td>default: '/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=%s-%s-%s') </td>
<td>String; Subject for certificate for users, %s for project, users, timestamp </td>
</tr>
<tr>
<td>--vpn_cert_subject</td>
<td>default: '/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=project-vpn-%s-%s') </td>
<td>String; Subject for certificate for vpns, %s for project, timestamp </td>
</tr>
</tbody></table>
</section>
</chapter>

View File

@ -0,0 +1,141 @@
<?xml version="1.0" encoding="UTF-8"?>
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"><?dbhtml filename="ch_hypervisors.html" ?>
<title>Hypervisors</title>
<para>This section assumes you have a working installation of OpenStack Compute and want to
select a particular hypervisor or run with multiple hypervisors. Before you try to get a VM
running within OpenStack Compute, be sure you have installed a hypervisor and used the
hypervisor's documentation to run a test VM and get it working.</para>
<section><?dbhtml filename="selecting-a-hypervisor.html" ?>
<title>Selecting a Hypervisor</title>
<para>OpenStack Compute supports many hypervisors, an array of which must provide a bit of
difficulty in selecting a hypervisor unless you are already familiar with one. You
cannot configure more than one virtualization type on the compute nodes in the Cactus
release, so the hypervisor selection is for the entire installation. These links provide
additional information for choosing a hypervisor. </para>
<para>Here is a list of the supported hypervisors with links to a relevant web site for
configuration and use:</para>
<itemizedlist>
<listitem><para><link xlink:href="https://www.microsoft.com/windowsserver2008/en/us/hyperv-main.aspx">Hyper-V
2008</link> - Use to run Windows-based virtual machines, specifically
Windows 2008 R2 Datacenter or Enterprise Edition. You must install and run
nova-compute on Windows servers that run the Windows-based virtual
machines.</para></listitem>
<listitem><para><link xlink:href="http://www.linux-kvm.org/page/Main_Page">KVM</link> - Kernel-based Virtual
Machine. The virtual disk formats that it supports it inherits from QEMU since
it uses a modified QEMU program to launch the virtual machine. The supported
formats include raw images, the qcow2, and VMware formats. </para></listitem>
<listitem><para><link xlink:href="http://lxc.sourceforge.net/">LXC</link> - Linux Containers (through
libvirt), use to run Linux-based virtual machines.</para></listitem>
<listitem><para><link xlink:href="http://wiki.qemu.org/Manual">QEMU</link> - Quick EMUlator, generally only
used for development purposes.</para></listitem>
<listitem><para><link xlink:href="http://user-mode-linux.sourceforge.net/">UML</link> - User Mode Linux,
generally only used for development purposes. </para></listitem>
<listitem><para><link xlink:href="http://www.vmware.com/products/vsphere-hypervisor/support.html">VMWare
ESX/ESXi</link> 4.1 update 1, runs VMWare-based Linux and Windows images
through a connection with the ESX server.</para></listitem>
<listitem><para><link xlink:href="http://www.xen.org/support/documentation.html">Xen</link> - XenServer 5.5,
Xen Cloud Platform (XCP), use to run Linux or Windows virtual machines. You must
install the nova-compute service on DomU. </para></listitem></itemizedlist>
</section>
<section><?dbhtml filename="hypervisor-configuration-basics.html" ?><title>Hypervisor Configuration Basics</title>
<para>The node where the nova-compute service is installed and running is the machine that
runs all the virtual machines, referred to as the compute node in this guide. </para>
<para>By default, the selected hypervisor is KVM. To change to another hypervisor, change
the --libvirt_type flag in nova.conf and restart the nova-compute service. </para>
<para>Here are the nova.conf flags that are used to configure the compute node.</para>
<table rules="all">
<caption>Description of nova.conf flags for the compute node</caption>
<thead>
<tr>
<td>Flag</td>
<td>Default</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr><td>--connection_type</td>
<td>default: 'libvirt'</td>
<td>libvirt, xenapi, or fake; Value that indicates the virtualization
connection type</td></tr>
<tr>
<td>--compute_manager</td>
<td>default: 'nova.compute.manager.ComputeManager'</td>
<td>String value; Manager to use for nova-compute</td>
</tr>
<tr>
<td>--compute_driver</td>
<td>default: 'nova.virt.connection.get_connection'</td>
<td>String value; Driver to use for controlling virtualization</td>
</tr>
<tr>
<td>--images_path</td>
<td>default: '$state_path/images'</td>
<td>Directory; Location where decrypted images are stored on disk (when not
using Glance)</td>
</tr>
<tr>
<td>--instances_path</td>
<td>default: '$state_path/instances'</td>
<td>Directory; Location where instances are stored on disk (when not using
Glance)</td>
</tr>
<tr>
<td>--libvirt_type</td>
<td>default: 'kvm'</td>
<td>String; Libvirt domain type (valid options are: kvm, qemu, uml, xen) </td>
</tr>
<tr>
<td>--allow_project_net_traffic</td>
<td>default: 'true'</td>
<td>True or false; Indicates whether to allow in-project network traffic </td>
</tr>
<tr>
<td>--firewall_driver</td>
<td>default: 'nova.virt.libvirt_conn.IptablesFirewallDriver'</td>
<td>String; Firewall driver for instances, defaults to iptables</td>
</tr>
<tr>
<td>--injected_network_template</td>
<td>default: '/Users/termie/p/nova/nova/virt/interfaces.template'</td>
<td>Directory and file name; Template file for injected network
information</td>
</tr>
<tr>
<td>--libvirt_uri</td>
<td>default: empty string</td>
<td>String; Override the default libvirt URI (which is dependent on libvirt_type)</td>
</tr>
<tr>
<td>--libvirt_xml_template</td>
<td>default: '/Users/termie/p/nova/nova/virt/libvirt.xml.template'</td>
<td>Directory and file name; Libvirt XML template</td>
</tr>
<tr>
<td>--use_cow_images</td>
<td>default: 'true'</td>
<td>True or false; Indicates whether to use cow images</td>
</tr>
<tr>
<td>--rescue_image_id</td>
<td>default: 'ami-rescue'</td>
<td>String; AMI image to use for rescue</td>
</tr>
<tr>
<td>--rescue_kernel_id</td>
<td>default: 'aki-rescue'</td>
<td>String; AKI image to use for rescue</td>
</tr>
<tr>
<td>--rescue_ramdisk_id</td>
<td>default: 'ari-rescue'</td>
<td>String; ARI image to use for rescue</td>
</tr>
</tbody></table>
</section>
</chapter>

View File

@ -0,0 +1,712 @@
<?xml version="1.0" encoding="UTF-8"?>
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0">
<?dbhtml filename="ch_installing-openstack-compute.html" ?>
<title>Installing OpenStack Compute</title>
<para>The OpenStack system has several key projects that are separate installations but can
work together depending on your cloud needs: OpenStack Compute, OpenStack Object Storage,
and OpenStack Image Service. You can install any of these projects separately and then
configure them either as standalone or connected entities.</para>
<section><?dbhtml filename="compute-system-requirements.html" ?>
<title>System Requirements</title>
<para><emphasis role="bold">Hardware</emphasis>: OpenStack components are intended to
run on standard hardware. Recommended hardware configurations for a minimum production
deployment are as follows for the cloud controller nodes and compute nodes.</para>
<table rules="all">
<caption>Hardware Recommendations </caption>
<col width="20%"/>
<col width="23%"/>
<col width="57%"/>
<thead>
<tr>
<td>Server</td>
<td>Recommended Hardware</td>
<td>Notes</td>
</tr>
</thead>
<tbody>
<tr>
<td>Cloud Controller node (runs network, volume, API, scheduler and image
services) </td>
<td>
<para>Processor: 64-bit x86</para>
<para>Memory: 12 GB RAM </para>
<para>Disk space: 30 GB (SATA or SAS or SSD) </para>
<para>Volume storage: two disks with 2 TB (SATA) for volumes attached to the
compute nodes </para>
<para>Network: one 1 GB Network Interface Card (NIC)</para>
</td>
<td>
<para>Two NICS are recommended but not required. A quad core server with 12
GB RAM would be more than sufficient for a cloud controller node.</para>
<para>32-bit processors will work for the cloud controller node. </para>
</td>
</tr>
<tr>
<td>Compute nodes (runs virtual instances)</td>
<td>
<para>Processor: 64-bit x86</para>
<para>Memory: 32 GB RAM</para>
<para>Disk space: 30 GB (SATA)</para>
<para>Network: two 1 GB NICs</para>
</td>
<td>
<para>Note that you cannot run 64-bit VM instances on a 32-bit compute node.
A 64-bit compute node can run either 32- or 64-bit VMs, however.</para>
<para>With 2 GB RAM you can run one m1.small instance on a node or three
m1.tiny instances without memory swapping, so 2 GB RAM would be a
minimum for a test-environment compute node. As an example, Rackspace
Cloud Builders use 96 GB RAM for compute nodes in OpenStack
deployments.</para>
<para>Specifically for virtualization on certain hypervisors on the node or
nodes running nova-compute, you need a x86 machine with an AMD processor
with SVM extensions (also called AMD-V) or an Intel processor with VT
(virtualization technology) extensions. </para>
<para>For Xen-based hypervisors, the Xen wiki contains a list of compatible
processors on the <link
xlink:href="http://wiki.xensource.com/xenwiki/HVM_Compatible_Processors"
>HVM Compatible Processors</link> page. For XenServer-compatible
Intel processors, refer to the <link
xlink:href="http://ark.intel.com/VTList.aspx">Intel® Virtualization
Technology List</link>. </para>
<para>For LXC, the VT extensions are not required.</para>
</td>
</tr>
</tbody>
</table>
<para>
<emphasis role="bold">Operating System</emphasis>: OpenStack currently has
packages for the following distributions: Ubuntu, RHEL, SUSE, Debian, and Fedora. These
packages are maintained by community members, refer to <link
xlink:href="http://wiki.openstack.org/Packaging"
>http://wiki.openstack.org/Packaging</link> for additional links. </para>
<para><emphasis role="bold">Networking</emphasis>: 1000 Mbps are suggested. For
OpenStack Compute, networking is configured on multi-node installations between the
physical machines on a single subnet. For networking between virtual machine instances,
three network options are available: flat, DHCP, and VLAN. Two NICs (Network Interface
Cards) are recommended on the server running nova-network. </para>
<para><emphasis role="bold">Database</emphasis>: For OpenStack Compute, you need access
to either a PostgreSQL or MySQL database, or you can install it as part of the OpenStack
Compute installation process.</para>
<para><emphasis role="bold">Permissions</emphasis>: You can install OpenStack Compute
either as root or as a user with sudo permissions if you configure the sudoers file to
enable all the permissions. </para>
</section><section><?dbhtml filename="example-installation-architecture.html" ?>
<title>Example Installation Architectures</title>
<para>OpenStack Compute uses a shared-nothing, messaging-based architecture. While very
flexible, the fact that you can install each nova- service on an independent server
means there are many possible methods for installing OpenStack Compute. The only
co-dependency between possible multi-node installations is that the Dashboard must be
installed nova-api server. Here are the types of installation architectures:</para>
<itemizedlist>
<listitem>
<para xmlns="http://docbook.org/ns/docbook">Single node: Only one server
runs all nova- services and also drives all the virtual instances. Use this
configuration only for trying out OpenStack Compute, or for development
purposes.</para></listitem>
<listitem><para>Two nodes: A cloud controller node runs the nova- services except for nova-compute, and a
compute node runs nova-compute. A client computer is likely needed to bundle
images and interfacing to the servers, but a client is not required. Use this
configuration for proof of concepts or development environments. </para></listitem>
<listitem><para xmlns="http://docbook.org/ns/docbook">Multiple nodes: You can add more compute nodes to the
two node installation by simply installing nova-compute on an additional server
and copying a nova.conf file to the added node. This would result in a multiple
node installation. You can also add a volume controller and a network controller
as additional nodes in a more complex multiple node installation. A minimum of
4 nodes is best for running multiple virtual instances that require a lot of
processing power.</para>
</listitem>
</itemizedlist>
<para>This is an illustration of one possible multiple server installation of OpenStack
Compute; virtual server networking in the cluster may vary.</para>
<para><inlinemediaobject>
<imageobject>
<imagedata scale="80" fileref="../figures/NOVA_install_arch.png"/></imageobject>
</inlinemediaobject></para>
<para>An alternative architecture would be to add more messaging servers if you notice a lot
of back up in the messaging queue causing performance problems. In that case you would
add an additional RabbitMQ server in addition to or instead of scaling up the database
server. Your installation can run any nova- service on any server as long as the
nova.conf is configured to point to the RabbitMQ server and the server can send messages
to the server.</para>
<para>Multiple installation architectures are possible, here is another example
illustration. </para>
<para><inlinemediaobject>
<imageobject>
<imagedata scale="40" fileref="../figures/NOVA_compute_nodes.png"/></imageobject>
</inlinemediaobject></para>
</section>
<section><?dbhtml filename="service-architecture.html" ?><title>Service Architecture</title>
<para>Because Compute has multiple services and many configurations are possible, here is a diagram showing the overall service architecture and communication systems between the services.</para>
<para><inlinemediaobject>
<imageobject>
<imagedata scale="80" fileref="../figures/NOVA_ARCH.png"/></imageobject>
</inlinemediaobject></para></section>
<section><?dbhtml filename="installing-openstack-compute-on-ubuntu.html" ?>
<title>Installing OpenStack Compute on Ubuntu </title>
<para>How you go about installing OpenStack Compute depends on your goals for the
installation. You can use an ISO image, you can use a scripted installation, and you can
manually install with a step-by-step installation.</para>
<section><?dbhtml filename="iso-ubuntu-installation.html" ?>
<title>ISO Distribution Installation</title>
<para>You can download and use an ISO image that is based on a Ubuntu Linux Server 10.04
LTS distribution containing only the components needed to run OpenStack Compute. See
<link xlink:href="http://sourceforge.net/projects/stackops/files/"
>http://sourceforge.net/projects/stackops/files/</link> for download files and
information, license information, and a README file. For documentation on the
StackOps distro, see <link xlink:href="http://docs.stackops.org">http://docs.stackops.org</link>. For free support, go to
<link xlink:href="http://getsatisfaction.com/stackops">http://getsatisfaction.com/stackops</link>.</para></section>
<section><?dbhtml filename="scripted-ubuntu-installation.html" ?>
<title>Scripted Installation</title>
<para>You can download a script from GitHub at <link
xlink:href="https://github.com/elasticdog/OpenStack-NOVA-Installer-Script/raw/master/nova-install"
>https://github.com/elasticdog/OpenStack-NOVA-Installer-Script/raw/master/nova-install</link>.</para>
<para>Copy the file to the servers where you want to install OpenStack Compute services
- with multiple servers, you could install a cloud controller node and multiple
compute nodes. The compute nodes manage the virtual machines through the
nova-compute service. The cloud controller node contains all other nova-
services.</para>
<para>Ensure you can execute the script by modifying the permissions on the script
file.</para>
<literallayout class="monospaced">wget --no-check-certificate https://github.com/elasticdog/OpenStack-NOVA-Installer-Script/raw/master/nova-install
sudo chmod 755 nova-install</literallayout><para>You
must run the script with root permissions. </para>
<literallayout class="monospaced">sudo bash nova-install -t cloud</literallayout>
<para>The way this script is designed, you can have multiple servers for the cloud
controller, the messaging service, and the database server, or run it all on one
server. The -t or -type parameter has two options: <code>nova-install -t
cloud</code> installs the cloud controller and <code>nova-install -t
compute</code> installs a compute node for an existing cloud controller.</para>
<para>These are the parameters you enter using the script:</para>
<para>
<itemizedlist>
<listitem>
<para>Enter the Cloud Controller Host IP address.</para>
</listitem>
<listitem>
<para>Enter the S3 IP, or use the default address as the current server's IP
address.</para>
</listitem>
<listitem>
<para>Enter the RabbitMQ Host IP. Again, you can use the default to install
it to the local server. RabbitMQ will be installed. </para>
</listitem>
<listitem>
<para>Enter the MySQL host IP address.</para>
</listitem>
<listitem>
<para>Enter the MySQL root password and verify it.</para>
</listitem>
<listitem>
<para>Enter a network range for all projects in CIDR format.</para>
</listitem>
</itemizedlist>
</para>
<para>The script uses all these values entered for the configuration information to
create the nova.conf configuration file. The script also walks you through creating
a user and project. Enter a user name and project name when prompted. After the script is finished, you also need to create the project zip file. Credentials are generated after you create the project zip file with <code>nova-manage project zipfile projname username</code></para>
<para>After configuring OpenStack Compute and creating a project zip file using the nova-manage project create command, be sure to unizp the project zip file and then source the novarc
credential file that you extracted. </para>
<literallayout class="monospaced">source /root/creds/novarc </literallayout>
<para>Now all the necessary nova services are started up and you can begin to issue
nova-manage commands. If you configured it to all run from one server, you're done.
If you have a second server that you intend to use as a compute node (a node that
does not contain the database), install the nova services on the second node using
the -t compute parameters using the same nova-install script.</para>
<para>To run from two or more servers, copy the nova.conf from the cloud controller node to the compute node. </para>
</section>
<section><?dbhtml filename="manual-ubuntu-installation.html" ?>
<title>Manual Installation</title>
<para>The manual installation involves installing from packages on Ubuntu 10.04 or 10.10
as a user with root permission. Depending on your environment, you may need to
prefix these commands with sudo.</para>
<para>This installation process walks through installing a cloud controller node and a
compute node. The cloud controller node contains all the nova- services including
the API server and the database server. The compute node needs to run only the
nova-compute service. You only need one nova-network service running in a multi-node
install. You cannot install nova-objectstore on a different machine from
nova-compute (production-style deployments will use a Glance server for virtual
images).</para>
<section><?dbhtml filename="installing-the-cloud-controller.html" ?>
<title>Installing the Cloud Controller</title>
<para>First, set up pre-requisites to use the Nova PPA (Personal Packages Archive)
provided through https://launchpad.net/~nova-core/+archive/trunk. The
python-software-properties package is a pre-requisite for setting up the nova
package repository. You can also use the release package by adding the
ppa:nova-core/release repository.</para>
<literallayout class="monospaced">sudo apt-get install python-software-properties</literallayout>
<literallayout class="monospaced">sudo add-apt-repository ppa:nova-core/trunk</literallayout>
<para>Run update.</para>
<literallayout class="monospaced">sudo apt-get update</literallayout>
<para>Install the messaging queue server, RabbitMQ.</para>
<literallayout class="monospaced">sudo apt-get install -y rabbitmq-server</literallayout>
<para>Now, install the Python dependencies. </para>
<literallayout class="monospaced">sudo apt-get install -y python-greenlet python-mysqldb </literallayout>
<para>Install the required nova- packages, and dependencies should be automatically
installed.</para>
<literallayout class="monospaced">sudo apt-get install -y nova-common nova-doc python-nova nova-api
nova-network nova-objectstore nova-scheduler nova-compute</literallayout>
<para>Install the supplemental tools such as euca2ools and unzip.</para>
<literallayout class="monospaced">sudo apt-get install -y euca2ools unzip</literallayout>
<section><?dbhtml filename="setting-up-sql-database-mysql.html" ?>
<title>Setting up the SQL Database (MySQL) on the Cloud Controller</title>
<para>You must use a SQLAlchemy-compatible database, such as MySQL or
PostgreSQL. This example shows MySQL. </para>
<para>First you can set environments with a "pre-seed" line to bypass all
the installation prompts, running this as root: </para>
<para>
<literallayout class="monospaced">bash
MYSQL_PASS=nova
NOVA_PASS=notnova
cat &lt;&lt;MYSQL_PRESEED | debconf-set-selections
mysql-server-5.1 mysql-server/root_password password $MYSQL_PASS
mysql-server-5.1 mysql-server/root_password_again password $MYSQL_PASS
mysql-server-5.1 mysql-server/start_on_boot boolean true
MYSQL_PRESEED</literallayout>
</para>
<para>Next, install MySQL with: <code>sudo apt-get install -y
mysql-server</code>
</para>
<para>Edit /etc/mysql/my.cnf to change bind-address from localhost
(127.0.0.1) to any (0.0.0.0) and restart the mysql service: </para>
<para>
<literallayout class="monospaced">sudo sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf
sudo service mysql restart</literallayout></para>
<para>To configure the MySQL database, create the nova database: </para>
<literallayout class="monospaced">sudo mysql -uroot -p$MYSQL_PASS -e 'CREATE DATABASE nova;'</literallayout>
<para>Update the DB to give user nova@% full control of the nova
database:</para>
<para>
<literallayout class="monospaced">sudo mysql -uroot -p$MYSQL_PASS -e "GRANT ALL PRIVILEGES ON *.* TO
'nova'@'%' WITH GRANT OPTION;"</literallayout>
</para>
<para>Set MySQL password for 'nova'@'%':</para>
<para>
<literallayout class="monospaced">sudo mysql -uroot -p$MYSQL_PASS -e "SET PASSWORD FOR 'nova'@'%' =
PASSWORD('$NOVA_PASS');"</literallayout>
</para>
</section>
</section>
<section><?dbhtml filename="installing-the-compute-node.html" ?>
<title>Installing the Compute Node</title>
<para>There are many different ways to perform a multinode install of Compute. In
this case, you can install all the nova- packages and dependencies as you did
for the Cloud Controller node, or just install nova-network and nova-compute.
Your installation can run any nova- services anywhere, so long as the service
can access nova.conf so it knows where the rabbitmq server is installed.</para>
<para>The Compute Node is where you configure the Compute network, the networking
between your instances. There are three options: flat, flatDHCP, and
VLAN.</para>
<para>If you use FlatManager as your network manager, there are some additional
networking changes to ensure connectivity between your nodes and VMs. If you
chose VlanManager or FlatDHCP, you may skip this section because they are set up
for you automatically. </para>
<para>Compute defaults to a bridge device named br100. This needs to be created
and somehow integrated into your network. To keep things as simple as possible,
have all the VM guests on the same network as the VM hosts (the compute nodes).
To do so, set the compute nodes external IP address to be on the bridge and add
eth0 to that bridge. To do this, edit your network interfaces configuration to
look like the following example: </para>
<para>
<literallayout class="monospaced">
&lt; begin /etc/network/interfaces >
# The loopback network interface
auto lo
iface lo inet loopback
# Networking for OpenStack Compute
auto br100
iface br100 inet dhcp
bridge_ports eth0
bridge_stp off
bridge_maxwait 0
bridge_fd 0
&lt; end /etc/network/interfaces >
</literallayout>
</para>
<para>Next, restart networking to apply the changes: </para>
<literallayout class="monospaced">sudo /etc/init.d/networking restart</literallayout>
<para>If you use flat networking, you must manually insert the IP address into the
'fixed_ips' table in the nova database. Also ensure that the database lists the
bridge name correctly that matches the network configuration you are working
within. Flat networking should insert this automatically but you may need to
check it.</para>
<para>Because you may need to query the database from the Compute node and learn
more information about instances, euca2ools and mysql-client packages should be
installed on any additional Compute nodes.</para>
</section>
<section><?dbhtml filename="restart-nova-services.html" ?>
<title>Restart All Relevant Services on the Compute Node</title>
<para>On both nodes, restart all six services in total, just to cover the entire
spectrum: </para>
<para>
<literallayout class="monospaced">restart libvirt-bin; restart nova-network; restart nova-compute;
restart nova-api; restart nova-objectstore; restart nova-scheduler</literallayout>
</para>
</section>
</section>
</section>
<section><?dbhtml filename="installing-openstack-compute-on-rhel6.html" ?>
<title>Installing OpenStack Compute on Red Hat Enterprise Linux 6 </title>
<para>This section documents a multi-node installation using RHEL 6. RPM repos for the Bexar
release, the Cactus release, and also per-commit trunk builds for OpenStack Nova are
available at <link xlink:href="http://yum.griddynamics.net"
>http://yum.griddynamics.net</link>. </para>
<para>Known limitations for RHEL version 6 installations: </para>
<itemizedlist><listitem><para>iSCSI LUN not supported due to tgtadm vs ietadm differences</para></listitem>
<listitem><para>Only KVM hypervisor has been tested with this installation</para></listitem></itemizedlist>
<para>To install Nova on RHEL v.6 you need access to two repositories, one available on the
yum.griddynamics.net website and the RHEL DVD image connected as repo. </para>
<para>First, install RHEL 6.0, preferrably with a minimal set of packages.</para>
<para>Disable SELinux in /etc/sysconfig/selinux and then reboot. </para>
<para>Connect the RHEL 3. 6.0 x86_64 DVD as a repository in YUM. </para>
<literallayout class="monospaced">sudo mount /dev/cdrom /mnt/cdrom
cat /etc/yum.repos.d/rhel.repo
[rhel]
name=RHEL 6.0
baseurl=file:///mnt/cdrom/Server
enabled=1
gpgcheck=0</literallayout>
<para>Download and install repo config and key.</para>
<literallayout class="monospaced">wget http://yum.griddynamics.net/openstack-repo-2011.1-2.noarch.rpm
sudo rpm -i openstack-repo-2011.1-2.noarch.rpm</literallayout>
<para>Install the libvirt package (these instructions are tested only on KVM). </para>
<literallayout class="monospaced">sudo yum install libvirt
sudo chkconfig libvirtd on
sudo service libvirtd start</literallayout>
<para>Repeat the basic installation steps to put the pre-requisites on all cloud controller and compute nodes. Nova has many different possible configurations. You can install Nova services on separate servers as needed but these are the basic pre-reqs.</para>
<para>These are the basic packages to install for a cloud controller node:</para>
<literallayout class="monospaced">sudo yum install euca2ools openstack-nova-{api,compute,network,objectstore,scheduler,volume} openstack-nova-cc-config openstack-glance</literallayout>
<para>These are the basic packages to install compute nodes. Repeat for each compute node (the node that runs the VMs) that you want to install.</para>
<literallayout class="monospaced">sudo yum install openstack-nova-compute openstack-nova-compute-config</literallayout>
<para>On the cloud controller node, create a MySQL database named nova. </para>
<literallayout class="monospaced">sudo service mysqld start
sudo chkconfig mysqld on
sudo service rabbitmq-server start
sudo chkconfig rabbitmq-server on
mysqladmin -uroot password nova</literallayout>
<para>You can use this script to create the database. </para>
<literallayout class="monospaced">#!/bin/bash
DB_NAME=nova
DB_USER=nova
DB_PASS=nova
PWD=nova
CC_HOST="A.B.C.D" # IPv4 address
HOSTS='node1 node2 node3' # compute nodes list
mysqladmin -uroot -p$PWD -f drop nova
mysqladmin -uroot -p$PWD create nova
for h in $HOSTS localhost; do
echo "GRANT ALL PRIVILEGES ON $DB_NAME.* TO '$DB_USER'@'$h' IDENTIFIED BY '$DB_PASS';" | mysql -uroot -p$DB_PASS mysql
done
echo "GRANT ALL PRIVILEGES ON $DB_NAME.* TO $DB_USER IDENTIFIED BY '$DB_PASS';" | mysql -uroot -p$DB_PASS mysql
echo "GRANT ALL PRIVILEGES ON $DB_NAME.* TO root IDENTIFIED BY '$DB_PASS';" | mysql -uroot -p$DB_PASS mysql </literallayout>
<para>Now, ensure the database version matches the version of nova that you are installing:</para>
<literallayout class="monospaced">nova-manage db sync</literallayout>
<para>On each node, set up the configuration file in /etc/nova/nova.conf.</para>
<para>Start the Nova services after configuring and you then are running an OpenStack
cloud!</para>
<literallayout class="monospaced">for n in api compute network objectstore scheduler volume; do sudo service openstack-nova-$n start; done
sudo service openstack-glance start
for n in node1 node2 node3; do ssh $n sudo service openstack-nova-compute start; done</literallayout>
</section>
<section>
<?dbhtml filename="configuring-openstack-compute-basics.html" ?>
<title>Post-Installation Configuration for OpenStack Compute</title>
<para>Configuring your Compute installation involves nova-manage commands plus editing the
nova.conf file to ensure the correct flags are set. This section contains the basics for
a simple multi-node installation, but Compute can be configured many ways. You can find
networking options and hypervisor options described in separate chapters, and you will
read about additional configuration information in a separate chapter as well.</para>
<section><?dbhtml filename="setting-flags-in-nova-conf-file.html" ?>
<title>Setting Flags in the nova.conf File</title>
<para>The configuration file nova.conf is installed in /etc/nova by default. You only
need to do these steps when installing manually, the scripted installation above
does this configuration during the installation. A default set of options are
already configured in nova.conf when you install manually. The defaults are as
follows:</para>
<literallayout class="monospaced">--daemonize=1
--dhcpbridge_flagfile=/etc/nova/nova.conf
--dhcpbridge=/usr/bin/nova-dhcpbridge
--logdir=/var/log/nova
--state_path=/var/lib/nova </literallayout>
<para>Starting with the default file, you must define the following required items in
/etc/nova/nova.conf. The flag variables are described below. You can place
comments in the nova.conf file by entering a new line with a # sign at the beginning of the line. To see a listing of all possible flag settings, see
the output of running /bin/nova-api --help.</para>
<table rules="all">
<caption>Description of nova.conf flags (not comprehensive)</caption>
<thead>
<tr>
<td>Flag</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>--sql_connection</td>
<td>IP address; Location of OpenStack Compute SQL database</td>
</tr>
<tr>
<td>--s3_host</td>
<td>IP address; Location where OpenStack Compute is hosting the objectstore
service, which will contain the virtual machine images and buckets</td>
</tr>
<tr>
<td>--rabbit_host</td>
<td>IP address; Location of OpenStack Compute SQL database</td>
</tr>
<tr>
<td>--ec2_api</td>
<td>IP address; Location where the nova-api service runs</td>
</tr>
<tr>
<td>--verbose</td>
<td>Set to 1 to turn on; Optional but helpful during initial setup</td>
</tr>
<tr>
<td>--ec2_url</td>
<td>HTTP URL; Location to interface nova-api. Example:
http://184.106.239.134:8773/services/Cloud</td>
</tr>
<tr>
<td>--network_manager</td>
<td>
<para>Configures how your controller will communicate with additional
OpenStack Compute nodes and virtual machines. Options: </para>
<itemizedlist>
<listitem>
<para>nova.network.manager.FlatManager</para>
<para>Simple, non-VLAN networking</para>
</listitem>
<listitem>
<para>nova.network.manager.FlatDHCPManager</para>
<para>Flat networking with DHCP</para>
</listitem>
<listitem>
<para>nova.network.manager.VlanManager</para>
<para>VLAN networking with DHCP; This is the Default if no
network manager is defined here in nova.conf. </para>
</listitem>
</itemizedlist>
</td>
</tr>
<tr>
<td>--fixed_range</td>
<td>IP address/range; Network prefix for the IP network that all the
projects for future VM guests reside on. Example: 192.168.0.0/12</td>
</tr>
<tr>
<td>--network_size</td>
<td>Number value; Number of addresses in each private subnet.</td>
</tr>
</tbody>
</table>
<para>Here is a simple example nova.conf file for a small private cloud, with all the
cloud controller services, database server, and messaging server on the same
server.</para>
<literallayout class="monospaced">--dhcpbridge_flagfile=/etc/nova/nova.conf
--dhcpbridge=/usr/bin/nova-dhcpbridge
--logdir=/var/log/nova
--state_path=/var/lib/nova
--verbose
--s3_host=184.106.239.134
--rabbit_host=184.106.239.134
--ec2_api=184.106.239.134
--ec2_url=http://184.106.239.134:8773/services/Cloud
--fixed_range=192.168.0.0/16
--network_size=8
--routing_source_ip=184.106.239.134
--sql_connection=mysql://nova:notnova@184.106.239.134/nova </literallayout>
<para>Create a “nova” group, so you can set permissions on the configuration file: </para>
<literallayout class="monospaced">sudo addgroup nova</literallayout>
<para>The nova.config file should have its owner set to root:nova, and mode set to 0640,
since the file contains your MySQL servers username and password. </para>
<literallayout class="monospaced">chown -R root:nova /etc/nova
chmod 640 /etc/nova/nova.conf</literallayout>
</section><section><?dbhtml filename="setting-up-openstack-compute-environment-on-the-compute-node.html" ?>
<title>Setting Up OpenStack Compute Environment on the Compute Node</title>
<para>These are the commands you run to ensure the database schema is current, and
then set up a user and project: </para>
<para>
<literallayout class="monospaced">/usr/bin/nova-manage db sync
/usr/bin/nova-manage user admin &lt;user_name>
/usr/bin/nova-manage project create &lt;project_name> &lt;user_name>
/usr/bin/nova-manage network create &lt;project-network> &lt;number-of-networks-in-project> &lt;addresses-in-each-network></literallayout></para>
<para>Here is an example of what this looks like with real values entered: </para>
<literallayout class="monospaced">/usr/bin/nova-manage db sync
/usr/bin/nova-manage user admin dub
/usr/bin/nova-manage project create dubproject dub
/usr/bin/nova-manage network create 192.168.0.0/24 1 256 </literallayout>
<para>For this example, the number of IPs is /24 since that falls inside the /16
range that was set in fixed-range in nova.conf. Currently, there can only be
one network, and this set up would use the max IPs available in a /24. You can
choose values that let you use any valid amount that you would like. </para>
<para>The nova-manage service assumes that the first IP address is your network
(like 192.168.0.0), that the 2nd IP is your gateway (192.168.0.1), and that the
broadcast is the very last IP in the range you defined (192.168.0.255). If this is
not the case you will need to manually edit the sql db networks table.o. </para>
<para>When you run the <code>nova-manage network create</code> command, entries are made
in the networks and fixed_ips table. However, one of the networks listed in the
networks table needs to be marked as bridge in order for the code to know that a
bridge exists. The network in the Nova networks table is marked as bridged
automatically for Flat Manager.</para>
</section>
<section><?dbhtml filename="creating-certifications.html" ?>
<title>Creating Certifications</title>
<para>Generate the certifications as a zip file. These are the certs you will use to
launch instances, bundle images, and all the other assorted API functions. </para>
<para>
<literallayout class="monospaced">mkdir p /root/creds
/usr/bin/python /usr/bin/nova-manage project zipfile $NOVA_PROJECT $NOVA_PROJECT_USER /root/creds/novacreds.zip</literallayout>
</para>
<para>If you are using one of the Flat modes for networking, you may see a Warning
message "No vpn data for project &lt;project_name>" which you can safely
ignore.</para>
<para>Unzip them in your home directory, and add them to your environment. </para>
<literallayout class="monospaced">unzip /root/creds/novacreds.zip -d /root/creds/
cat /root/creds/novarc >> ~/.bashrc
source ~/.bashrc </literallayout>
</section>
<section><?dbhtml filename="enabling-access-to-vms-on-the-compute-node.html" ?>
<title>Enabling Access to VMs on the Compute Node</title>
<para>One of the most commonly missed configuration areas is not allowing the proper
access to VMs. Use the euca-authorize command to enable access. Below, you
will find the commands to allow ping and ssh to your VMs: </para>
<literallayout class="monospaced">euca-authorize -P icmp -t -1:-1 default
euca-authorize -P tcp -p 22 default</literallayout>
<para>Another
common issue is you cannot ping or SSH your instances after issuing the
euca-authorize commands. Something to look at is the amount of dnsmasq
processes that are running. If you have a running instance, check to see that
TWO dnsmasq processes are running. If not, perform the following:</para>
<literallayout class="monospaced">killall dnsmasq
service nova-network restart</literallayout>
</section>
<section><?dbhtml filename="configuring-multiple-compute-nodes.html" ?>
<title>Configuring Multiple Compute Nodes</title><para>If your goal is to split your VM load across more than one server, you can connect an
additional nova-compute node to a cloud controller node. This configuring can be
reproduced on multiple compute servers to start building a true multi-node OpenStack
Compute cluster. </para><para>To build out and scale the Compute platform, you spread out services amongst many servers.
While there are additional ways to accomplish the build-out, this section describes
adding compute nodes, and the service we are scaling out is called
'nova-compute.'</para>
<para>With the Bexar release we have two configuration files: nova-api.conf and nova.conf. For a multi-node install you only make changes to nova.conf and copy it to additional compute nodes. Ensure each nova.conf file points to the correct IP addresses for the respective services. Customize the nova.config example below to match your environment. The CC_ADDR is the Cloud Controller IP Address.
</para>
<literallayout class="monospaced">
--dhcpbridge_flagfile=/etc/nova/nova.conf
--dhcpbridge=/usr/bin/nova-dhcpbridge
--logdir=/var/log/nova
--state_path=/var/lib/nova
--verbose
--sql_connection=mysql://root:nova@CC_ADDR/nova
--s3_host=CC_ADDR
--rabbit_host=CC_ADDR
--ec2_api=CC_ADDR
--ec2_url=http://CC_ADDR:8773/services/Cloud
--network_manager=nova.network.manager.FlatManager
--fixed_range= network/CIDR
--network_size=number of addresses</literallayout><para>By default, Nova sets 'br100' as the bridge device, and this is what needs to be done next. Edit /etc/network/interfaces with the following template, updated with your IP information. </para>
<literallayout class="monospaced">
# The loopback network interface
auto lo
iface lo inet loopback
# The primary network interface
auto br100
iface br100 inet static
bridge_ports eth0
bridge_stp off
bridge_maxwait 0
bridge_fd 0
address xxx.xxx.xxx.xxx
netmask xxx.xxx.xxx.xxx
network xxx.xxx.xxx.xxx
broadcast xxx.xxx.xxx.xxx
gateway xxx.xxx.xxx.xxx
# dns-* options are implemented by the resolvconf package, if installed
dns-nameservers xxx.xxx.xxx.xxx</literallayout>
<para>Restart networking:</para>
<literallayout class="monospaced">/etc/init.d/networking restart</literallayout>
<para>With nova.conf updated and networking set, configuration is nearly complete. First, lets bounce the relevant services to take the latest updates:</para>
<literallayout class="monospaced">restart libvirt-bin; service nova-compute restart</literallayout>
<para>To avoid issues with KVM and permissions with Nova, run the following commands to ensure we have VM's that are running optimally:</para>
<literallayout class="monospaced">chgrp kvm /dev/kvm
chmod g+rwx /dev/kvm</literallayout>
<para>If you want to use the 10.04 Ubuntu Enterprise Cloud images that are readily available at http://uec-images.ubuntu.com/releases/10.04/release/, you may run into delays with booting. Any server that does not have nova-api running on it needs this iptables entry so that UEC images can get metadata info. On compute nodes, configure the iptables with this next step:</para>
<literallayout class="monospaced"> # iptables -t nat -A PREROUTING -d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT --to-destination $NOVA_API_IP:8773</literallayout>
<para>Lastly, confirm that your compute node is talking to your cloud controller. From the cloud controller, run this database query:</para>
<literallayout class="monospaced">mysql -u$MYSQL_USER -p$MYSQL_PASS nova -e 'select * from services;'</literallayout>
<para>In return, you should see something similar to this:</para>
<literallayout class="monospaced"> +---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+
| created_at | updated_at | deleted_at | deleted | id | host | binary | topic | report_count | disabled | availability_zone |
+---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+
| 2011-01-28 22:52:46 | 2011-02-03 06:55:48 | NULL | 0 | 1 | osdemo02 | nova-network | network | 46064 | 0 | nova |
| 2011-01-28 22:52:48 | 2011-02-03 06:55:57 | NULL | 0 | 2 | osdemo02 | nova-compute | compute | 46056 | 0 | nova |
| 2011-01-28 22:52:52 | 2011-02-03 06:55:50 | NULL | 0 | 3 | osdemo02 | nova-scheduler | scheduler | 46065 | 0 | nova |
| 2011-01-29 23:49:29 | 2011-02-03 06:54:26 | NULL | 0 | 4 | osdemo01 | nova-compute | compute | 37050 | 0 | nova |
| 2011-01-30 23:42:24 | 2011-02-03 06:55:44 | NULL | 0 | 9 | osdemo04 | nova-compute | compute | 28484 | 0 | nova |
| 2011-01-30 21:27:28 | 2011-02-03 06:54:23 | NULL | 0 | 8 | osdemo05 | nova-compute | compute | 29284 | 0 | nova |
+---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+</literallayout>
<para>You can see that 'osdemo0{1,2,4,5} are all running 'nova-compute.' When you start spinning up instances, they will allocate on any node that is running nova-compute from this list.</para>
</section>
<section>
<?dbhtml filename="determining-version-of-compute.html" ?>
<title>Determining the Version of Compute</title>
<para>In the Diablo release, you can find the version of the installation by using the
nova-manage command:</para>
<literallayout class="monospaced">nova-manage version list</literallayout>
</section>
<section><?dbhtml filename="migrating-from-bexar-to-cactus.html" ?><title>Migrating from Bexar to Cactus</title>
<para>If you have an installation already installed and running, to migrate to
Cactus you must update the installation first, then your database, then perhaps
your images if you were already running images on Bexar in the nova-objectstore.
If you were running images through Glance, your images should work automatically
after an upgrade. Here are the overall steps. </para>
<para>If your installation already pointed to ppa:nova-core/release, the release
package has been updated from Bexar to Cactus so you can simply run: </para>
<literallayout class="monospaced">apt-get update
apt-get upgrade</literallayout>
<para>Next, update the database schema. </para><literallayout class="monospaced">nova-manage db sync</literallayout>
<para>Restart all the nova- services. </para>
<para>Make sure that you can launch images. You can convert images that were previously stored in the nova object store using this command: </para>
<literallayout class="monospaced">nova-manage image convert /var/lib/nova/images</literallayout>
</section>
</section>
</chapter>

View File

@ -0,0 +1,587 @@
<?xml version="1.0" encoding="UTF-8"?>
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0">
<?dbhtml filename="ch_networking.html" ?>
<title>Networking</title>
<para>By understanding the available networking configuration options you can design the best
configuration for your OpenStack Compute instances.</para>
<section><?dbhtml filename="networking-options.html" ?>
<title>Networking Options</title>
<para>This section offers a brief overview of each concept in networking for Compute. </para>
<para>In Compute, users organize their cloud resources in projects. A Compute project
consists of a number of VM instances created by a user. For each VM instance, Compute
assigns to it a private IP address. (Currently, Nova only supports Linux bridge
networking that allows the virtual interfaces to connect to the outside network through
the physical interface.)</para>
<para>The Network Controller provides virtual networks to enable compute servers to interact
with each other and with the public network.</para>
<para>Currently, Nova supports three kinds of networks, implemented in three “Network
Manager” types respectively: Flat Network Manager, Flat DHCP Network Manager, and VLAN
Network Manager. The three kinds of networks can co-exist in a cloud system. However,
since you can't yet select the type of network for a given project, you cannot configure
more than one type of network in a given Compute installation.</para>
<para>Nova has a concept of Fixed IPs and Floating IPs. Fixed IPs are assigned to an
instance on creation and stay the same until the instance is explicitly terminated.
Floating IPs are IP addresses that can be dynamically associated with an instance. This
address can be disassociated and associated with another instance at any time. A user
can reserve a floating IP for their project. </para>
<para>In Flat Mode, a network administrator specifies a subnet. The IP addresses for VM
instances are grabbed from the subnet, and then injected into the image on launch. Each
instance receives a fixed IP address from the pool of available addresses. A network
administrator must configure the Linux networking bridge (named br100) both on the
network controller hosting the network and on the cloud controllers hosting the
instances. All instances of the system are attached to the same bridge, configured
manually by the network administrator.</para>
<para>
<note>
<para>The configuration injection currently only works on Linux-style systems that
keep networking configuration in /etc/network/interfaces.</para>
</note>
</para>
<para>In Flat DHCP Mode, you start a DHCP server to pass out IP addresses to VM instances
from the specified subnet in addition to manually configuring the networking bridge. IP
addresses for VM instances are grabbed from a subnet specified by the network
administrator. Like Flat Mode, all instances are attached to a single bridge on the
compute node. In addition a DHCP server is running to configure instances. In this mode,
Compute does a bit more configuration in that it attempts to bridge into an ethernet
device (eth0 by default). It will also run dnsmasq as a dhcpserver listening on this
bridge. Instances receive their fixed IPs by doing a dhcpdiscover. </para>
<para>In both flat modes, the network nodes do not act as a default gateway. Instances are
given public IP addresses. Compute nodes have iptables/ebtables entries created per
project and instance to protect against IP/MAC address spoofing and ARP poisoning. </para>
<para>VLAN Network Mode is the default mode for OpenStack Compute. In this mode, Compute
creates a VLAN and bridge for each project. For multiple machine installation, the VLAN
Network Mode requires a switch that supports VLAN tagging (IEEE 802.1Q). The project gets
a range of private IPs that are only accessible from inside the VLAN. In order for a
user to access the instances in their project, a special VPN instance (code named
cloudpipe) needs to be created. Compute generates a certificate and key for the user to
access the VPN and starts the VPN automatically. It provides a private network segment
for each project's instances that can be accessed via a dedicated VPN connection from
the Internet. In this mode, each project gets its own VLAN, Linux networking bridge, and
subnet. The subnets are specified by the network administrator, and are assigned
dynamically to a project when required. A DHCP Server is started for each VLAN to pass
out IP addresses to VM instances from the subnet assigned to the project. All instances
belonging to one project are bridged into the same VLAN for that project. OpenStack
Compute creates the Linux networking bridges and VLANs when required.</para></section>
<section><?dbhtml filename="cloudpipe-per-project-vpns.html" ?>
<title>Cloudpipe — Per Project Vpns</title>
<para>
Cloudpipe is a method for connecting end users to their project
instances in VLAN networking mode.
</para>
<para>
The support code for cloudpipe implements admin commands (via
nova-manage) to automatically create a vm for a project that
allows users to vpn into the private network of their project.
Access to this vpn is provided through a public port on the
network host for the project. This allows users to have free
access to the virtual machines in their project without exposing
those machines to the public internet.
</para>
<para>
The cloudpipe image is basically just a Linux instance with
openvpn installed. It needs a simple script to grab user data from
the metadata server, b64 decode it into a zip file, and run the
autorun.sh script from inside the zip. The autorun script will
configure and run openvpn to run using the data from nova.
</para>
<para>
It is also useful to have a cron script that will periodically
redownload the metadata and copy the new crl. This will keep
revoked users from connecting and will disconnect any users that
are connected with revoked certificates when their connection is
renegotiated (every hour).
</para>
<section><?dbhtml filename="creating-a-cloudpipe-image.html" ?>
<title>Creating a Cloudpipe Image</title>
<para>
Making a cloudpipe image is relatively easy.
</para>
<itemizedlist><listitem><para>
# install openvpn on a base ubuntu image.
</para></listitem>
<listitem><para># set up a
server.conf.template in /etc/openvpn/</para></listitem>
<listitem><para>
# set up.sh in /etc/openvpn/
</para></listitem>
<listitem><para>
# set down.sh in /etc/openvpn/
</para></listitem>
<listitem><para>
# download and run the payload on boot from /etc/rc.local
</para></listitem>
<listitem><para>
# setup /etc/network/interfaces
</para></listitem>
<listitem><para>
# register the image and set the image id in your flagfile:
</para>
<literallayout class="monospaced">
--vpn_image_id=ami-xxxxxxxx
</literallayout></listitem>
<listitem><para>
# you should set a few other flags to make vpns work properly:
</para>
<literallayout class="monospaced">
--use_project_ca
--cnt_vpn_clients=5
</literallayout></listitem></itemizedlist>
<para>
When you use nova-manage to launch a cloudpipe for a user, it goes
through the following process:
</para>
<orderedlist>
<listitem>
<para>
creates a keypair called &lt;project_id&gt;-vpn and saves it
in the keys directory
</para>
</listitem>
<listitem>
<para>
creates a security group &lt;project_id&gt;-vpn and opens up
1194 and icmp
</para>
</listitem>
<listitem>
<para>
creates a cert and private key for the vpn instance and saves
it in the CA/projects/&lt;project_id&gt;/ directory
</para>
</listitem>
<listitem>
<para>
zips up the info and puts it b64 encoded as user data
</para>
</listitem>
<listitem>
<para>
launches an m1.tiny instance with the above settings using the
flag-specified vpn image
</para>
</listitem>
</orderedlist>
</section>
<section><?dbhtml filename="vpn-access.html" ?>
<title>VPN Access</title>
<para>
In vlan networking mode, the second IP in each private network is
reserved for the cloudpipe instance. This gives a consistent IP to
the instance so that nova-network can create forwarding rules for
access from the outside world. The network for each project is
given a specific high-numbered port on the public IP of the
network host. This port is automatically forwarded to 1194 on the
vpn instance.
</para>
<para>
If specific high numbered ports do not work for your users, you
can always allocate and associate a public IP to the instance, and
then change the vpn_public_ip and vpn_public_port in the database.
(This will be turned into a nova-manage command or a flag soon.)
</para>
</section>
<section><?dbhtml filename="certificates-and-revocation.html" ?>
<title>Certificates and Revocation</title>
<para>If the use_project_ca flag is set (required to for cloudpipes to work securely),
then each project has its own ca. This ca is used to sign the certificate for the
vpn, and is also passed to the user for bundling images. When a certificate is
revoked using nova-manage, a new Certificate Revocation List (crl) is generated. As
long as cloudpipe has an updated crl, it will block revoked users from connecting to
the vpn. </para>
<para>
The userdata for cloudpipe isn't currently updated when certs are
revoked, so it is necessary to restart the cloudpipe instance if a
user's credentials are revoked.
</para>
</section>
<section><?dbhtml filename="restarting-and-logging-into-cloudpipe-vpn.html" ?>
<title>Restarting and Logging into the Cloudpipe VPN</title>
<para>You can reboot a cloudpipe vpn through the api if something goes wrong (using
euca-reboot-instances for example), but if you generate a new crl, you will have to
terminate it and start it again using nova-manage vpn run. The cloudpipe instance
always gets the first ip in the subnet and it can take up to 10 minutes for the ip
to be recovered. If you try to start the new vpn instance too soon, the instance
will fail to start because of a NoMoreAddresses error. If you cant wait 10 minutes,
you can manually update the ip with something like the following (use the right ip
for the project): </para>
<literallayout class="monospaced">
euca-terminate-instances &lt;instance_id&gt;
mysql nova -e "update fixed_ips set allocated=0, leased=0, instance_id=NULL where fixed_ip='10.0.0.2'"
</literallayout>
<para>You also will need to terminate the dnsmasq running for the user (make sure you use the right pid file):</para>
<literallayout class="monospaced">sudo kill `cat /var/lib/nova/br100.pid`</literallayout>
<para>Now you should be able to re-run the vpn:</para>
<literallayout class="monospaced">nova-manage vpn run &lt;project_id&gt;</literallayout>
<para>The keypair that was used to launch the cloudpipe instance should be in the keys/&lt;project_id&gt; folder. You can use this key to log into the cloudpipe instance for debugging purposes.</para>
</section>
</section>
<section>
<?dbhtml filename="configuring-networking-on-the-compute-node.html" ?>
<title>Configuring Networking on the Compute Node</title>
<para>To configure the Compute node's networking for the VM images, the overall steps are:</para>
<orderedlist>
<listitem>
<para>Set the --network-manager flag in nova.conf.</para>
</listitem><listitem>
<para>Use the <code>nova-manage network create networkname CIDR n n</code>
command to create the subnet that the VMs reside on.</para>
</listitem>
<listitem>
<para>Integrate the bridge with your network. </para>
</listitem>
</orderedlist>
<para>By default, Compute uses the VLAN Network Mode. You choose the networking mode for your
virtual instances in the nova.conf file. Here are the three possible options: </para>
<itemizedlist>
<listitem>
<para>--network_manager = nova.network.manager.FlatManager</para>
<para>Simple, non-VLAN networking</para>
</listitem>
<listitem>
<para>--network_manager = nova.network.manager.FlatDHCPManager</para>
<para>Flat networking with DHCP</para>
</listitem>
<listitem>
<para>--network_manager = nova.network.manager.VlanManager</para>
<para>VLAN networking with DHCP. This is the Default if no network manager is
defined in nova.conf. </para>
</listitem>
</itemizedlist>
<para>Also, when you issue the nova-manage network create command, it uses the settings from
the nova.conf flag file. Use the "nova-manage network create public 192.168.0.0/24 1
255" command to create the subnet that your VMs will run on. You specify public or
private after the create command.</para><section><?dbhtml filename="configuring-flat-networking.html" ?><title>Configuring Flat Networking</title>
<para>FlatNetworking uses ethernet adapters configured as bridges to allow network
traffic to transit between all the various nodes. This setup can be done with a
single adapter on the physical host, or multiple. This option does not require a
switch that does VLAN tagging as VLAN networking does, and is a common development
installation or proof of concept setup. When you choose Flat networking, Nova does
not manage networking at all. Instead, IP addresses are injected into the instance
via the file system (or passed in via a guest agent). Metadata forwarding must be
configured manually on the gateway if it is required within your network. </para>
<para>To configure flat networking, ensure that your nova.conf file contains the
line:</para>
<para>
<literallayout>--network_manager = nova.network.manager.FlatManager</literallayout>
</para>
<para>Compute defaults to a bridge device named br100 which is stored in the Nova
database, so you can change the name of the bridge device by modifying the entry in
the database. Consult the diagrams for additional configuration options.</para>
<para>In any set up with FlatNetworking (either Flat or FlatDHCP), the host with nova-network on it is responsible for forwarding traffic from the private network configured with the --fixed_range= directive in nova.conf. This host needs to have br100 configured and talking to any other nodes that are hosting VMs. With either of the Flat Networking options, the default gateway for the virtual machines is set to the host which is running nova-network. </para>
<para>Set the compute node's external IP address to be on the bridge and add eth0 to
that bridge. To do this, edit your network interfaces configuration to look like the
following example: </para>
<para>
<literallayout class="monospaced">&lt; begin /etc/network/interfaces >
# The loopback network interface
auto lo
iface lo inet loopback
# Networking for OpenStack Compute
auto br100
iface br100 inet dhcp
bridge_ports eth0
bridge_stp off
bridge_maxwait 0
bridge_fd 0
&lt; end /etc/network/interfaces > </literallayout>
</para>
<para>Next, restart networking to apply the changes: <code>sudo /etc/init.d/networking
restart</code></para>
<para>For an all-in-one development setup, this diagram represents the network
setup.</para>
<para><figure><title>Flat network, all-in-one server installation </title><mediaobject>
<imageobject>
<imagedata scale="80" fileref="../figures/FlatNetworkSingleInterfaceAllInOne.png"/>
</imageobject>
</mediaobject></figure></para>
<para>For multiple compute nodes with a single network adapter, which you can use for
smoke testing or a proof of concept, this diagram represents the network
setup.</para>
<figure>
<title>Flat network, single interface, multiple servers</title>
<mediaobject>
<imageobject>
<imagedata scale="80" fileref="../figures/FlatNetworkSingleInterface.png"/>
</imageobject>
</mediaobject>
</figure>
<para>For multiple compute nodes with multiple network adapters, this diagram
represents the network setup. You may want to use this setup for separate admin and
data traffic.</para>
<figure>
<title>Flat network, multiple interfaces, multiple servers</title>
<mediaobject>
<imageobject>
<imagedata scale="80" fileref="../figures/FlatNetworkMultInterface.png"/>
</imageobject>
</mediaobject>
</figure>
</section>
<section>
<?dbhtml filename="configuring-flat-dhcp-networking.html" ?>
<title>Configuring Flat DHCP Networking</title><para>With Flat DHCP, the host running nova-network acts as the gateway to the virtual nodes. You
can run one nova-network per cluster. Set the flag --network_host on the nova.conf
stored on the nova-compute node to tell it which host the nova-network is running on
so it can communicate with nova-network. The nova-network service will track leases
and releases in the database so it knows if a VM instance has stopped properly
configuring via DHCP. Lastly, it sets up iptables rules to allow the VMs to
communicate with the outside world and contact a special metadata server to retrieve
information from the cloud.</para>
<para>Compute hosts in the FlatDHCP model are responsible for bringing up a matching
bridge and bridging the VM tap devices into the same ethernet device that the
network host is on. The compute hosts do not need an IP address on the VM network,
because the bridging puts the VMs and the network host on the same logical network.
When a VM boots, the VM sends out DHCP packets, and the DHCP server on the network
host responds with their assigned IP address.</para>
<para>Visually, the setup looks like the diagram below:</para>
<figure>
<title>Flat DHCP network, multiple interfaces, multiple servers</title>
<mediaobject>
<imageobject>
<imagedata scale="80" fileref="../figures/flatdchp-net.jpg"/>
</imageobject>
</mediaobject>
</figure>
<para>FlatDHCP doesn't create VLANs, it creates a bridge. This bridge works just fine on
a single host, but when there are multiple hosts, traffic needs a way to get out of
the bridge onto a physical interface. Be careful when setting up --flat_interface,
if you specify an interface that already has an IP it will break and if this is the
interface you are connecting through with SSH, you cannot fix it unless you have
ipmi/console access. In FlatDHCP mode, the setting for --network_size should be
number of IPs in the entire fixed range. If you are doing a /12 in CIDR notation,
then this number would be 2^20 or 1,048,576 IP addresses. That said, it will take a
very long time for you to create your initial network, as an entry for each IP will
be created in the database. </para>
<para>If you have an unused interface on your hosts that has connectivity with no IP
address, you can simply tell FlatDHCP to bridge into the interface by specifying
--flat_interface=&lt;interface> in your flagfile. The network host will
automatically add the gateway ip to this bridge. You can also add the interface to
br100 manually and not set flat_interface. If this is the case for you, edit your
nova.conf file to contain the following lines: </para>
<para>
<literallayout>--dhcpbridge_flagfile=/etc/nova/nova.conf
--dhcpbridge=/usr/bin/nova-dhcpbridge
--network_manager=nova.network.manager.FlatDHCPManager
--flat_network_dhcp_start=10.0.0.2
--flat_interface=eth2
--flat_injected=False
--public_interface=eth0</literallayout>
</para>
<para>Integrate your network interfaces to match this configuration.</para></section>
<section><?dbhtml filename="outbound-traffic-flow-with-any-flat-networking.html" ?><title>Outbound Traffic Flow with Any Flat Networking</title><para>In any set up with FlatNetworking, the host with nova-network on it is responsible for forwarding traffic from the private network configured with the {{--fixed_range=...} directive in nova.conf. This host needs to have br100 configured and talking to any other nodes that are hosting VMs. With either of the Flat Networking options, the default gateway for the virtual machines is set to the host which is running nova-network.</para>
<para>When a virtual machine sends traffic out to the public networks, it sends it first to its default gateway, which is where nova-network is configured. </para>
<figure>
<title>Single adaptor hosts, first route</title>
<mediaobject>
<imageobject>
<imagedata scale="80" fileref="../figures/SingleInterfaceOutbound_1.png"/>
</imageobject>
</mediaobject>
</figure>
<para>Next, the host on which nova-network is configured acts as a router and forwards the traffic out to the Internet.</para>
<figure>
<title>Single adaptor hosts, second route</title>
<mediaobject>
<imageobject>
<imagedata scale="80" fileref="../figures/SingleInterfaceOutbound_2.png"/>
</imageobject>
</mediaobject>
</figure>
<warning><para>If you're using a single interface, then that interface (often eth0) needs to be set into promiscuous mode for the forwarding to happen correctly. This does not appear to be needed if you're running with physical hosts that have and use two interfaces.</para></warning>
</section>
<section>
<?dbhtml filename="configuring-vlan-networking.html" ?>
<title>Configuring VLAN Networking</title>
<para>In some networking environments, you may have a large IP space which is cut up
into smaller subnets. The smaller subnets are then trunked together at the switch
level (dividing layer 3 by layer 2) so that all machines in the larger IP space can
communicate. The purpose of this is generally to control the size of broadcast
domains.</para>
<para>Using projects as a way to logically separate each VLAN, we can setup our cloud
in this environment. Please note that you must have IP forwarding enabled for this
network mode to work.</para>
<para>Obtain the parameters for each network. You may need to ask a network administrator for this information, including netmask, broadcast, gateway, ethernet device and VLAN ID.</para> <para>You need to have networking hardware that supports VLAN tagging.</para>
<para>Please note that currently eth0 is hardcoded as the vlan_interface in the default flags. If you need to attach your bridges to a device other than eth0, you will need to add following flag to /etc/nova/nova.conf:</para>
<literallayout>--vlan_interface=eth1</literallayout>
<para>In VLAN mode, the setting for --network_size is the number of IPs per project as
opposed to the FlatDHCP mode where --network_size indicates number of IPs in the
entire fixed range. For VLAN, the settings in nova.conf that affect networking are
also --fixed_range, where the space is divided up into subnets of
--network_size.</para>
<para>VLAN is the default networking mode for Compute, so if you have no
--network_manager entry in your nova.conf file, you are set up for VLAN. To set your nova.conf file to VLAN, use this flag in /etc/nova/nova.conf:</para>
<literallayout>--network_manager=nova.network.manager.VlanManager</literallayout>
<para>For the purposes of this example walk-through, we will use the following settings. These are intentionally complex in an attempt to cover most situations:</para>
<itemizedlist>
<listitem><para>VLANs: 171, 172, 173 and
174</para></listitem>
<listitem><para>IP Blocks: 10.1.171.0/24,
10.1.172.0/24, 10.1.173.0/24 and 10.1.174.0/24</para></listitem>
<listitem><para>Each VLAN maps to its corresponding /24 (171 = 10.1.171.0/24, etc)</para></listitem>
<listitem><para>Each VLAN will get its own
bridge device, which is in the format br_$VLANID</para></listitem>
<listitem><para>Each /24 has an upstream
default gateway on .1</para></listitem>
<listitem><para>The first 6 IPs in each /24
are reserved</para></listitem>
</itemizedlist>
<para>First, create the networks that Compute can pull from using nova-manage commands:</para>
<literallayout class="monospaced">nova-manage --flagfile=/etc/nova/nova.conf network create private 10.1.171.0/24 1 256
nova-manage --flagfile=/etc/nova/nova.conf network create private 10.1.172.0/24 1 256
nova-manage --flagfile=/etc/nova/nova.conf network create private 10.1.173.0/24 1 256
nova-manage --flagfile=/etc/nova/nova.conf network create private 10.1.174.0/24 1 256</literallayout>
<para>Log in to the nova database to determine the network ID assigned to each VLAN:</para>
<literallayout class="monospaced">select id,cidr from networks;</literallayout>
<para>Update the DB to match your network settings. The following script will generate SQL based on the predetermined settings for this example. <emphasis>You will need to modify this database update to fit your environment.</emphasis></para>
<literallayout class="monospaced">
if [ -z $1 ]; then
echo "You need to specify the vlan to modify"
fi
if [ -z $2 ]; then
echo "You need to specify a network id number (check the DB for the network you want to update)"
fi
VLAN=$1
ID=$2
cat &gt; vlan.sql &lt;&lt; __EOF_
update networks set vlan = '$VLAN' where id = $ID;
update networks set bridge = 'br_$VLAN' where id = $ID;
update networks set gateway = '10.1.$VLAN.7' where id = $ID;
update networks set dhcp_start = '10.1.$VLAN.8' where id = $ID;
update fixed_ips set reserved = 1 where address in ('10.1.$VLAN.1','10.1.$VLAN.2','10.1.$VLAN.3','10.1.$VLAN.4','10.1.$VLAN.5','10.1.$VLAN.6','10.1.$VLAN.7');
__EOF_</literallayout>
<para>After verifying that the above SQL will work for your environment, run it against the nova database, once for every VLAN you have in the environment.</para>
<para>Next, create a project manager for the Compute project:</para>
<literallayout class="monospaced">nova-manage --flagfile=/etc/nova/nova.conf user admin $username</literallayout>
<para>Then create a project and assign that user as the admin user:</para>
<literallayout class="monospaced">nova-manage --flagfile=/etc/nova/nova.conf project create $projectname $username</literallayout>
<para>Finally, get the credentials for the user just created, which also assigns
one of the networks to this project:)</para>
<literallayout class="monospaced">nova-manage --flagfile=/etc/nova/nova.conf project zipfile $projectname $username</literallayout>
<para>When you start nova-network, the bridge devices and associated VLAN tags will be created. When you create a new VM you must determine (either manually or programatically) which VLAN it should be a part of, and start the VM in the corresponding project.</para>
<para>In certain cases, the network manager may not properly tear down bridges and VLANs when it is stopped. If you attempt to restart the network manager and it does not start, check the logs for errors indicating that a bridge device already exists. If this is the case, you will likely need to tear down the bridge and VLAN devices manually.</para>
<literallayout class="monospaced">vconfig rem vlanNNN
ifconfig br_NNN down
brctl delbr br_NNN</literallayout>
<para>Also, if users need to access the instances in their project across a VPN, a special VPN instance (code
named cloudpipe) needs to be created. You can create the cloudpipe instance. The
image is basically just a Linux instance with openvpn installed. It needs a simple
script to grab user data from the metadata server, b64 decode it into a zip file,
and run the autorun.sh script from inside the zip. The autorun script should
configure and run openvpn to run using the data from Compute. </para>
<para>For certificate management, it is also useful to have a cron script that will
periodically download the metadata and copy the new Certificate Revocation List
(CRL). This will keep revoked users from connecting and disconnects any users that
are connected with revoked certificates when their connection is re-negotiated
(every hour). You set the --use_project_ca flag in nova.conf for cloudpipes to work
securely so that each project has its own Certificate Authority (CA).</para></section>
<section>
<?dbhtml filename="enabling-ping-and-ssh-on-vms.html" ?>
<title>Enabling Ping and SSH on VMs</title>
<para>Be sure you enable access to your VMs by using the euca-authorize command. Below,
you will find the commands to allow ping and ssh to your VMs: </para>
<para><literallayout>euca-authorize -P icmp -t -1:-1 default
euca-authorize -P tcp -p 22 default</literallayout>If
you still cannot ping or SSH your instances after issuing the euca-authorize commands,
look at the number of dnsmasq processes that are running. If you have a running
instance, check to see that TWO dnsmasq processes are running. If not, perform the
following: <code>killall dnsmasq; service nova-network restart</code></para></section><section><?dbhtml filename="allocating-associating-ip-addresses.html" ?><title>Allocating and Associating IP Addresses with Instances</title><para>You can use Euca2ools commands to manage floating IP addresses used with Flat DHCP or VLAN
networking. </para>
<para>To assign a reserved IP address to your project, removing it from the pool of
available floating IP addresses, use <code>euca-allocate-address</code>. It'll
return an IP address, assign it to the project you own, and remove it from the pool
of available floating IP addresses. </para>
<para>To associate the floating IP to your instance, use <code>euca-associate-address -i
[instance_id] [floating_ip]</code>.</para>
<para>When you want to return the floating IP to the pool, first use
euca-disassociate-address [floating_ip] to disassociate the IP address from your
instance, then use euca-deallocate-address [floating_ip] to return the IP to the
pool of IPs for someone else to grab.</para>
<para>There are nova-manage commands that also help you manage the floating IPs.</para>
<para>nova-manage floating list - This command lists the floating IP addresses in the
pool. </para>
<para>nova-manage floating create [hostname] [cidr] - This command creates specific
floating IPs for a specific network host and either a single address or a subnet. </para>
<para>nova-manage floating destroy [hostname] [cidr] - This command removes floating IP
addresses using the same parameters as the create command.</para></section><section><?dbhtml filename="associating-public-ip.html" ?><title>Associating a Public IP Address</title>
<para>OpenStack Compute uses NAT for public IPs. If you plan to use public IP
addresses for your virtual instances, you must configure --public_interface=vlan100'
in the nova.conf file so that Nova knows where to bind public IP addresses. Restart
nova-network if you change nova.conf while the service is running. Also, ensure you
have opened port 22 for the nova network.</para>
<para>You must add the IP address or block of public ip addresses to the floating IP
list using the <code>nova-manage floating create</code> command. When you start a
new virtual instance, associate one of the public addresses to the new instance
using the euca-associate-address command.</para>
<para>These are the basic overall steps and checkpoints. </para>
<para>First, set up the public address.</para>
<literallayout class="monospaced">nova-manage floating create my-hostname 68.99.26.170/31
euca-allocate-address 68.99.26.170
euca-associate-address -i i-1 68.99.26.170</literallayout>
<para>Make sure the security groups are open.</para>
<literallayout class="monospaced">root@my-hostname:~# euca-describe-groups
GROUP admin-project default default
PERMISSION admin-project default ALLOWS icmp -1 -1
FROM CIDR 0.0.0.0/0
PERMISSION admin-project default ALLOWS tcp 22 22
FROM CIDR 0.0.0.0/0</literallayout>
<para>Ensure the NAT rules have been added to iptables.</para>
<literallayout class="monospaced">-A nova-network-OUTPUT -d 68.99.26.170/32 -j DNAT --to-destination 10.0.0.3
-A nova-network-PREROUTING -d 68.99.26.170/32 -j DNAT --to-destination10.0.0.3
-A nova-network-floating-snat -s 10.0.0.3/32 -j SNAT --to-source 68.99.26.170</literallayout>
<para>Check that the public address, in this example 68.99.26.170, has been added to your public interface. You
should see the address in the listing when you enter "ip addr" at the command prompt.</para>
<literallayout class="monospaced">2: eth0: &lt;BROADCAST,MULTICAST,UP,LOWER_UP&gt; mtu 1500 qdisc mq state UP qlen 1000
link/ether xx:xx:xx:17:4b:c2 brd ff:ff:ff:ff:ff:ff
inet 13.22.194.80/24 brd 13.22.194.255 scope global eth0
inet 68.99.26.170/32 scope global eth0
inet6 fe80::82b:2bf:fe1:4b2/64 scope link
valid_lft forever preferred_lft forever</literallayout>
<para>Note that you cannot SSH to an instance with a public IP from within the same
server as the routing configuration won't allow it. </para>
</section></section>
<section><?dbhtml filename="removing-network-from-project.html" ?><title>Removing a Network from a Project</title><para>You will find that you cannot remove a network that has already been associated to a project by simply deleting it. You can disassociate the project from the network with a scrub command and the project name as the final parameter:
</para><literallayout class="monospaced">nova-manage project scrub projectname</literallayout></section></chapter>

View File

@ -0,0 +1,267 @@
<?xml version="1.0" encoding="UTF-8"?>
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0">
<?dbhtml filename="ch_openstack-compute-tutorials.html" ?>
<title>OpenStack Compute Tutorials</title>
<para>We want OpenStack to make sense, and sometimes the best way to make sense of the cloud is to try out some basic ideas with cloud computing. Flexible, elastic, and scalable are a few attributes of cloud computing, so these tutorials show various ways to use virtual computing or web-based storage with OpenStack components.</para>
<section>
<?dbhtml filename="running-elastic-web-app-cloud.html" ?>
<title>Running Your First Elastic Web Application on the Cloud</title>
<para>In this OpenStack Compute tutorial, well walk through the creation of an elastic,
scalable cloud running a WordPress installation on a few virtual machines.</para>
<para>The tutorial assumes you have OpenStack Compute already installed on Ubuntu 10.04. You
can tell OpenStack Compute is installed by running "sudo nova-manage service list" to
ensure it is installed and the necessary services are running and ready. You should see
a set of nova- services in a response, and they should have a sideways smiley face in
each row, indicating they're running. You should run the tutorial as a root user or a
user with sudo access.</para>
<para>If you haven't installed OpenStack Compute yet, you can use an ISO image that is based
on a Ubuntu Linux Server 10.04 LTS distribution containing only the components needed to
run OpenStack Compute. See <link
xlink:href="http://sourceforge.net/projects/stackops/files/"
>http://sourceforge.net/projects/stackops/files/</link> for download files and
information, license information, and a README file to get started.</para>
<para>We'll go through this tutorial in parts:</para>
<itemizedlist>
<listitem><para>Setting up a user, project, and network for this cloud.</para></listitem>
<listitem><para>Getting images for your application servers.</para></listitem>
<listitem><para>On the instances you spin up, installing Wordpress and its dependencies, the Memcached plugin, and multiple memcache servers.</para></listitem>
</itemizedlist>
<section><?dbhtml filename="part-i-setting-up-cloud-infrastructure.html" ?>
<title>Part I: Setting Up the Cloud Infrastructure</title>
<para>In this part, we'll get the networking layer set up based on what we think most
networks would work like. We'll also create a user and a project to house our cloud
and its network. Onward, brave cloud pioneers! </para>
<simplesect>
<title>Configuring the network</title>
<para>Ideally on large OpenStack Compute deployments, each project is in a protected
network segment. Our project in this case is a LAMP stack running Wordpress with
the Memcached plugin for added database efficiency. So we need a public IP
address for the Wordpress server but we can use flat networking for this. Here's
how you set those network settings. </para>
<para>Usually networking is set in nova.conf, but VLAN-based networking with DHCP is
the default setting when no network manager is defined in nova.conf. To check
this network setting, open your nova.conf, typically in /etc/nova/nova.conf and
look for -network_manager. The possible options are:</para>
<itemizedlist>
<listitem>
<para>-network_manager=nova.network.manager.FlatManager for a simple,
no-VLAN networking type, </para>
</listitem>
<listitem>
<para>-network_manager=nova.network.manager.FlatDHCPManager for flat
networking with a built-in DHCP server, </para>
</listitem>
<listitem>
<para>-network_manager= nova.network.manager.VlanManager, which is the most
tested in production but requires network hardware with VLAN
tagging.</para>
</listitem>
</itemizedlist>
<para>Here is an example nova.conf for a single node installation of OpenStack
Compute.</para>
<literallayout class="monospaced"># Sets the network type
--network_manager=nova.network.manager.FlatManager
# Sets whether to use IPV6 addresses
--use_ipv6=false
# DHCP bridge information
--dhcpbridge_flagfile=/etc/nova/nova.conf
--dhcpbridge=nova-dhcpbridge
--logdir=/var/log/nova
# Top-level directory for maintaining nova's state
--state_path=/var/lib/nova
# These indicate where nova-api services are installed
--s3_host=184.106.239.134
--rabbit_host=184.106.239.134
--ec2_api=184.106.239.134
--ec2_url=http://184.106.239.134:8773/services/Cloud
# Block of IP addresses that are fixed IPs
--fixed_range=192.168.0.0/12
# Number of addresses in each private subnet
--network_size=24
# FlatDHCP bridges to this interface if set, be very careful setting it on an interface that does not already have an IP associated with it
--flat_interface=eth0
# Public IP of the server running nova-network, when instances without a floating IP hit the internet, traffic is snatted to this IP
--routing_source_ip=184.106.239.134
# Not required, but handy for debugging
--verbose
# Tells nova where to connect for database
--sql_connection=mysql://nova:notnova@184.106.239.134/nova</literallayout>
<para>Now that we know the networking configuration, let's set up the network for
our project. With Flat DHCP, the host running nova-network acts as the gateway
to the virtual nodes, so ideally this will have a public IP address for our
tutorial. Be careful when setting up --flat_interface in nova.conf, if you
specify an interface that already has an IP it will break and if this is the
interface you are connecting through with SSH, you cannot fix it unless you have
ipmi/console access. </para>
<para>For this tutorial, we set a 24 value for network_size, the number of addresses
in each private subnet, since that falls inside the /12 CIDR-notated range
that's set in fixed-range in nova.conf. We probably won't use that many at
first, but it's good to have the room to scale.</para>
<para>Currently, there can only be one network set in nova.conf. When you issue the
nova-manage network create command, it uses the settings in the nova.conf flag
file. From the --fixed_range setting, iptables are set. Those iptables are
regenerated each time the nova-network service restarts, also. </para>
<note>
<para>The nova-manage service assumes that the first IP address is your network
(like 192.168.0.0), that the 2nd IP is your gateway (192.168.0.1), and that
the broadcast is the very last IP in the range you defined (192.168.0.255).
If this is not the case you will need to manually edit the sql db networks
table.o but that scenario shouldn't happen for this tutorial.</para>
</note>
<para>Run this command as root or sudo:</para>
<literallayout class="monospaced"> nova-manage network create public 192.168.3.0/12 1 256</literallayout>
<para>On running this command, entries are made in the networks and fixed_ips
table in the nova database. However, one of the networks listed in the
networks table needs to be marked as bridge in order for the code to know that
a bridge exists. The Network is marked as bridged automatically based on the
type of network manager selected. </para>
<para>Next you want to integrate this network bridge, named br100, into your
network. A bridge connects two Ethernet segments together.</para>
</simplesect>
<simplesect>
<title>Ensure the Database is Up-to-date</title>
<para>The first command you run using nova-manage is one called db sync, which
ensures that your database is updated. You must run this as root.</para>
<literallayout class="monospaced">nova-manage db sync</literallayout>
</simplesect>
<simplesect>
<title>Creating a user</title>
<para>OpenStack Compute can run many projects for many users, so for our tutorial
we'll create a user and project just for this scenario. </para>
<para>We control the actions a user can take through roles, such as admin for
Administrator who has complete system access, itsec for IT Security, netadmin
for Network Administrator, and so on.</para>
<para>In addition to these roles controlling access to the Eucalyptus API,
credentials are supplied and bundled by OpenStack compute in a zip file when you
create a project. The user accessing the cloud infrastructure through ec2
commands are given an access and secret key through the project itself. Let's
create a user that has the access we want for this project.</para>
<para>To add an admin user named cloudypants, use:</para>
<literallayout class="monospaced">nova-manage user admin cloudypants</literallayout>
</simplesect>
<simplesect>
<title>Creating a project and related credentials</title>
<para>Next we'll create the project, which in turn gives you certifications in a zip
file.</para>
<para>Enter this command to create a project named wpscales as the admin user,
cloudypants, that you created above.</para>
<literallayout class="monospaced">nova-manage project create wpscales cloudypants</literallayout>
<para>Great, now you have a project that is set apart from the rest of the clouds
you might control with OpenStack Compute. Now you need to give the user some
credentials so they can run commands for the instances with in that project's
cloud. </para>
<para>These are the certs you will use to launch instances, bundle images, and all
the other assorted API and command-line functions.</para>
<para>First, we'll create a directory that'll house these credentials, in this case
in the root directory. You need to sudo here or save this to your own directory
with 'mkdir -p ~/creds' so that the credentials match the user and are stored in
their home.</para>
<literallayout class="monospaced">mkdir p /root/creds</literallayout>
<para>Now, run nova-manage to create a zip file for your project called wpscales
with the user cloudypants (the admin user we created previously). </para>
<literallayout class="monospaced">sudo nova-manage project zipfile wpscales cloudypants /root/creds/novacreds.zip</literallayout>
<para>Next, you can unzip novacreds.zip in your home directory, and add these
credentials to your environment. </para>
<literallayout class="monospaced">unzip /root/creds/novacreds.zip -d /root/creds/</literallayout>
<para>Sending that information and sourcing it as part of your .bashrc file
remembers those credentials for next time.</para>
<literallayout class="monospaced">cat /root/creds/novarc >> ~/.bashrc
source ~/.bashrc</literallayout>
<para>Okay, you've created the basic scaffolding for your cloud so that you can get
some images and run instances. Onward to Part II!</para>
</simplesect>
</section>
<section>
<?dbhtml filename="part-ii-getting-virtual-machines.html" ?>
<title>Part II: Getting Virtual Machines to Run the Virtual Servers</title>
<para>Understanding what you can do with cloud computing means you should have a grasp
on the concept of virtualization. With virtualization, you can run operating systems
and applications on virtual machines instead of physical computers. To use a virtual
machine, you must have an image that contains all the information about which
operating system to run, the user login and password, files stored on the system,
and so on.</para>
<para>For this tutorial, we've created an image that you can download that allows the
networking you need to run web applications and so forth. In order to use it with
the OpenStack Compute cloud, you download the image, then use uec-publish-tarball to
publish it. </para>
<para>Here are the commands to get your virtual image. Be aware that the download of the
compressed file may take a few minutes.</para>
<literallayout class="monospaced">image="ubuntu1010-UEC-localuser-image.tar.gz"
wget http://c0179148.cdn1.cloudfiles.rackspacecloud.com/
ubuntu1010-UEC-localuser-image.tar.gz
uec-publish-tarball $image wpbucket x86_64</literallayout>
<para>What you'll get in return from this command is three references: emi, eri and eki.
These are acronyms - emi stands for eucalyptus machine image, eri stands for
eucalyptus ramdisk image, and eki stands for eucalyptus kernal image. Amazon has
similar references for their images - ami, ari, and aki.</para>
<para>You need to use the emi value when you run the instance. These look something like
“ami-zqkyh9th″ - basically a unique identifier.</para>
<para>Okay, now that you have your image and it's published, realize that it has to be
decompressed before you can launch an image from it. We can realize what state an
image is in using the 'euca-describe-instances' command. Basically, run:</para>
<literallayout class="monospaced">euca-describe-instances</literallayout>
<para>and look for the state in the text that returns. You can also use
euca-describe-images to ensure the image is untarred. Wait until the state shows
"available" so that you know the instances is ready to roll.</para>
</section>
<section>
<?dbhtml filename="installing-needed-software-for-web-scale.html" ?>
<title>Part III: Installing the Needed Software for the Web-Scale Scenario</title>
<para>Once that state is "available" you can enter this command, which will use your
credentials to start up the instance with the identifier you got by publishing the
image.</para>
<literallayout class="monospaced">emi=ami-zqkyh9th
euca-run-instances $emi -k mykey -t m1.tiny</literallayout>
<para>Now you can look at the state of the running instances by using
euca-describe-instances again. The instance will go from “launching” to “running” in
a short time, and you should be able to connect via SSH. Look at the IP addresses so
that you can connect to the instance once it starts running.</para>
<para>Basically launch a terminal window from any computer, and enter: </para>
<literallayout class="monospaced">ssh -i mykey ubuntu@10.127.35.119</literallayout>
<para>On this particular image, the 'ubuntu' user has been set up as part of the sudoers
group, so you can escalate to 'root' via the following command:</para>
<literallayout class="monospaced">sudo -i</literallayout>
<simplesect>
<title>On the first VM, install WordPress</title>
<para>Now, you can install WordPress. Create and then switch to a blog
directory:</para>
<literallayout class="monospaced">mkdir blog
cd blog</literallayout>
<para>Download WordPress directly to you by using wget:</para>
<literallayout class="monospaced">wget http://wordpress.org/latest.tar.gz </literallayout>
<para>Then unzip the package using: </para>
<literallayout class="monospaced">tar -xzvf latest.tar.gz</literallayout>
<para>The WordPress package will extract into a folder called wordpress in the same
directory that you downloaded latest.tar.gz. </para>
<para>Next, enter "exit" and disconnect from this SSH session.</para>
</simplesect>
<simplesect>
<title>On a second VM, install MySQL</title>
<para>Next, SSH into another virtual machine and install MySQL and use these
instructions to install the WordPress database using the MySQL Client from a
command line: <link
xlink:href="http://codex.wordpress.org/Installing_WordPress#Using_the_MySQL_Client"
>Using the MySQL Client - Wordpress Codex</link>.</para>
</simplesect>
<simplesect><title>On a third VM, install Memcache</title><para>Memcache makes Wordpress database reads and writers more efficient, so your virtual servers
can go to work for you in a scalable manner. SSH to a third virtual machine and
install Memcache:</para>
<para>
<literallayout class="monospaced">apt-get install memcached</literallayout>
</para></simplesect><simplesect><title>Configure the Wordpress Memcache plugin</title><para>From a web browser, point to the IP address of your Wordpress server. Download and install the Memcache Plugin. Enter the IP address of your Memcache server.</para></simplesect>
</section><section>
<?dbhtml filename="running-a-blog-in-the-cloud.html" ?>
<title>Running a Blog in the Cloud</title><para>That's it! You're now running your blog on a cloud server in OpenStack Compute, and you've scaled it horizontally using additional virtual images to run the database and Memcache. Now if your blog gets a big boost of comments, you'll be ready for the extra reads-and-writes to the database. </para></section>
</section>
</chapter>

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 66 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

View File

@ -0,0 +1,66 @@
<?xml version="1.0" encoding="UTF-8"?>
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0">
<title>Configuring OpenStack with Hyper-V</title>
<para>You can run OpenStack Compute with Hyper-V as the hypervisor to run Windows 2008 R2
Datacenter or Enterprise virtual instances. </para>
<section>
<title>Requirements</title>
<para>Supported Operating System: Windows 2008 R2 Datacenter or Enterprise (Note that licensing is limited on Enterprise.)</para>
<para>On the Compute node that is running Windows, install the following
dependencies.</para>
<itemizedlist><listitem><para>Python 2.6 (32 bit): <link xlink:href="http://www.python.org/download/releases/2.6.6/"
>http://www.python.org/download/releases/2.6.6/</link></para></listitem>
<listitem><para>Microsoft Visual C++ 2008 Redistributable Package: <link xlink:href="http://www.microsoft.com/downloads/en/details.aspx?familyid=9B2DA534-3E03-4391-8A4D-074B9F2BC1BF">http://www.microsoft.com/downloads/en/details.aspx?familyid=9B2DA534-3E03-4391-8A4D-074B9F2BC1BF</link></para></listitem>
<listitem><para>easy_install: <link xlink:href="http://pypi.python.org/pypi/setuptools#files">http://pypi.python.org/pypi/setuptools#files</link></para></listitem>
<listitem><para>Pywin32 214<link
xlink:href="http://sourceforge.net/projects/pywin32/files/pywin32/Build%20214/pywin32-214.win32-py2.6.exe/download"
>http://sourceforge.net/projects/pywin32/files/pywin32/Build%20214/pywin32-214.win32-py2.6.exe/download</link></para></listitem>
<listitem><para>Swig 2.0.1:<link xlink:href="http://sourceforge.net/projects/swig/"> http://sourceforge.net/projects/swig/</link></para></listitem>
<listitem><para>M2Crypto 0.19.1 <link xlink:href="http://chandlerproject.org/pub/Projects/MeTooCrypto/M2Crypto-0.19.1.win32-py2.6.exe">http://chandlerproject.org/pub/Projects/MeTooCrypto/M2Crypto-0.19.1.win32-py2.6.exe</link></para></listitem>
<listitem><para>MySQL-python 1.2.2: <link xlink:href="http://www.codegood.com/archives/4">http://www.codegood.com/archives/4</link></para></listitem>
</itemizedlist>
<para>Use easy_install to install the following additional requirements:</para>
<itemizedlist>
<listitem><para>pip</para></listitem>
<listitem><para>netaddr</para></listitem>
<listitem><para>paramiki</para></listitem>
<listitem><para>WMI 1.4.7</para></listitem>
<listitem><para>IPy 0.72</para></listitem>
<listitem><para>Markdown 2.0.3</para></listitem>
<listitem><para>SQLAlchemy 0.6.5</para></listitem>
<listitem><para>Twisted 10.2.0</para></listitem>
<listitem><para>amqplib 0.6.1</para></listitem>
<listitem><para>anyjson 0.3</para></listitem>
<listitem><para>boto 1.9b</para></listitem>
<listitem><para>carrot 0.10.7</para></listitem>
<listitem><para>eventlet 0.9.13</para></listitem>
<listitem><para>greenlet 0.3.1</para></listitem>
<listitem><para>mox 0.5.3</para></listitem>
<listitem><para>python-gflags 1.4</para></listitem>
<listitem><para>tornado 1.1</para></listitem>
<listitem><para>zope.interface 3.6.1</para></listitem></itemizedlist>
</section>
<section><title>Installation Architecture</title><para>With Hyper-V integration, the nova-compute service runs on a Windows server and the remaining nova- services run on Linux servers.</para></section>
<section><title>Configuring OpenStack Compute (Nova) to use Hyper-V</title><para>Configure Windows Server for the Compute Node </para>
<para>Install Dependencies </para>
<para>Setting up the Nova Environment </para>
<para>Configure nova.conf flags for Hyper-V</para></section>
<section><title>Running Nova with Hyper-V</title><para>Managing instances running Hyper-V</para></section>
<section><title>Preparing Images for use with Hyper-V</title>
<para>Install a new virtual machine on Hyper-V to create a VHD file using <link xlink:href="http://technet.microsoft.com/en-us/library/cc732470%28WS.10%29.aspx#BKMK_step3">instructions from Microsoft</link>. </para>
<para>The VHD file is usually saved in C:\Users\Public\Documents\Hyper-V\Virtual hard disks.
If using a local copy (when use_s3=False in your nova.conf), copy this to
C:\Users\Public\Documents\Hyper-V\Virtual hard disks\images\&lt;image name&gt;\image.
Note this is "image" not "image.vhd" - no file extension. The VHD file needs to be
renamed to just "image" in the directory with the name of the image. </para></section>
</chapter>

View File

@ -0,0 +1,122 @@
<?xml version="1.0" encoding="UTF-8"?>
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0">
<?dbhtml filename="ch_installing-openstack-imaging-service.html" ?>
<title>Installing and Configuring OpenStack Image Service</title>
<para>The OpenStack system has several key projects that are separate installations but can work
together depending on your cloud needs: OpenStack Compute, OpenStack Object Storage, and an
OpenStack Image Service with a project name of Glance. You can install any of these
projects separately and then configure them either as standalone or connected
entities.</para>
<section>
<?dbhtml filename="glance-system-requirements.html" ?>
<title>System Requirements for OpenStack Image Service (Glance)</title>
<para><emphasis role="bold">Hardware</emphasis>: OpenStack components are intended to run on
standard hardware.</para>
<para><emphasis role="bold">Operating System</emphasis>: The OpenStack Image Service
itself currently runs on Ubuntu but the images it stores may contain different operating
systems.</para>
<para><emphasis role="bold">Networking</emphasis>: 1000 Mbps are suggested. </para>
<para><emphasis role="bold">Database</emphasis>: Any SQLAlchemy-compatible database, such as
MySQL, Oracle, PostgreSQL, or SQLite. The reference registry server implementation that
ships with OpenStack Image Service uses a SQL database to store information about an
image, and publishes this information via an HTTP/REST-like interface.</para>
<para><emphasis role="bold">Permissions</emphasis>: You can install OpenStack imaging
service either as root or as a user with sudo permissions if you configure the sudoers
file to enable all the permissions. </para>
</section>
<section>
<?dbhtml filename="installing-openstack-imaging-service-on-ubuntu.html" ?>
<title>Installing OpenStack Image Service on Ubuntu </title><para>The installation of the Image Services themselves are separate from the storage of the virtual images to be retrieved. </para>
<section>
<title>Example Installation Architecture</title>
<para>These installation instructions have you set up the services on a single node, so the API server and registry services are on the same server. The images themselves can be stored either in OpenStack Object Storage, Amazon's S3 infrastructure, in a filesystem, or if you want read-only access, on a web server to be served via HTTP.</para></section>
<section>
<?dbhtml filename="installing-glance.html" ?>
<title>Installing OpenStack Image Service (Glance) </title>
<para>First, add the Glance PPA to your sources.lst. </para>
<para>
<literallayout class="monospaced">sudo add-apt-repository ppa:glance-core/trunk </literallayout></para>
<para>Run update. </para>
<para><literallayout class="monospaced">sudo apt-get update</literallayout></para>
<para>Now, install the Glance server. </para>
<para>
<literallayout class="monospaced">sudo apt-get install glance </literallayout></para>
<para>All dependencies should be automatically installed.</para>
<para>Refer to the <link xlink:href="http://glance.openstack.org/installing.html">Glance
developer documentation site to install from a Bazaar branch</link>. </para>
</section>
</section><section>
<?dbhtml filename="configuring-and-controlling-openstack-imaging-servers.html" ?>
<title>Configuring and Controlling Glance Servers</title>
<para>You start Glance either by calling the server program, glance-api, or using the server daemon wrapper program named glance-control.</para> <para>Glance ships with an etc/ directory that contains sample paste.deploy configuration files that you can copy to a standard configuration directory and adapt for your own uses.</para>
<para>If you do not specify a configuration file on the command line when starting the glance-api server, Glance attempts to locate a glance.conf configuration file in one of the following directories, and uses the first config file it finds in this order:</para>
<orderedlist>
<listitem><para>.</para></listitem>
<listitem><para>~/.glance</para></listitem>
<listitem><para>~/</para></listitem>
<listitem><para>/etc/glance/</para></listitem>
<listitem><para>/etc</para></listitem></orderedlist>
<para>If Glance doesn't find a configuration file in one of these locations, you see an error: <code>ERROR: Unable to locate any configuration file. Cannot load application glance-api</code>.</para>
<simplesect><title>Manually starting the server</title>
<para>To manually start the glance-api server, use a command like the following: </para>
<literallayout class="monospaced">sudo glance-api etc/glance.conf.sample --debug</literallayout>
<para>Supply the configuration file as the first argument (etc/glance.conf.sample in the above example) and then any common options you want to use. In the above example, the --debug option shows some of the debugging output that the server shows when starting up. Call the server program with --help to see all available options you can specify on the command line.</para>
<para>Note that the server does not daemonize itself when run manually from the terminal. You can force the server to daemonize using the standard shell backgrounding indicator, However, for most use cases, we recommend using the glance-control server daemon wrapper for daemonizing. See below for more details on daemonization with glance-control.</para></simplesect>
<simplesect><title>Starting the server with the glance-control wrapper script</title>
<para>The second way to start up a Glance server is to use the glance-control program. glance-control is a wrapper script that allows the user to start, stop, restart, and reload the other Glance server programs in a fashion that is more conducive to automation and scripting.</para>
<para>Servers started via the glance-control program are always daemonized, meaning that the server program process runs in the background.</para>
<para>To start a Glance server with glance-control, simply call glance-control with a server and the word “start”, followed by any command-line options you wish to provide. Start the server with glance-control in the following way:</para>
<literallayout class="monospaced"> sudo glance-control {SERVER} start [CONFPATH]</literallayout>
<para> Here is an example that shows how to start the glance-registry server with the glance-control wrapper script.</para>
<literallayout class="monospaced">sudo glance-control registry start etc/glance.conf.sample
Starting glance-registry with /home/jpipes/repos/glance/trunk/etc/glance.conf.sample</literallayout>
<para>To start all the Glance servers (currently the glance-api and glance-registry programs) at once, you can specify “all” for the {SERVER}.</para>
</simplesect>
<simplesect><title>Stopping a Glance server</title><para>You can use Ctrl-C to stop a Glance server if it was started manually. </para>
<para>If you started the Glance server using the glance-control program, you can use the glance-control program to stop it. Simply do the following:</para>
<literallayout class="monospaced">sudo glance-control {SERVER} stop</literallayout>
<para> as this example shows:
</para>
<literallayout class="monospaced">sudo glance-control registry stop
Stopping glance-registry pid: 17602 signal: 15
</literallayout>
</simplesect>
<simplesect><title>Restarting a Glance server</title>
<para>
You can restart a server with the glance-control program, as demonstrated here:
</para>
<literallayout class ="monospaced">
sudo glance-control registry restart etc/glance.conf.sample
Stopping glance-registry pid: 17611 signal: 15
Starting glance-registry with /home/jpipes/repos/glance/trunk/etc/glance.conf.sample</literallayout>
</simplesect>
</section>
</chapter>

View File

@ -0,0 +1,805 @@
<?xml version="1.0" encoding="UTF-8"?>
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0">
<?dbhtml filename="ch_introduction-to-openstack-imaging-service.html" ?>
<title>OpenStack Image Service</title>
<para>You can use OpenStack Image Service for discovering, registering, and retrieving virtual machine images. The service includes a RESTful API that allows users to query VM image metadata and retrieve the actual image with HTTP requests, or you can use a client class in your Python code to accomplish the same tasks.
</para><para>
VM images made available through OpenStack Image Service can be stored in a variety of locations from simple file systems to object-storage systems like the OpenStack Object Storage project, or even use S3 storage either on its own or through an OpenStack Object Storage S3 interface.</para>
<section>
<?dbhtml filename="overview-of-architecture.html" ?>
<title>Overview of Architecture</title>
<para>There are two main parts to the Image Services architecture:</para>
<itemizedlist><listitem><para>API server</para></listitem>
<listitem><para>Registry server(s)</para>
</listitem>
</itemizedlist>
<para>OpenStack Image Service is designed to be as adaptable as possible for various back-end storage and registry database solutions. There is a main API server (the ``glance-api`` program) that serves as the communications hub between various client programs, the registry of image metadata, and the storage systems that actually contain the virtual machine image data.</para>
</section>
<section>
<?dbhtml filename="openstack-imaging-service-api-server.html" ?>
<title>OpenStack Image Service API Server</title>
<para>The API server is the main interface for OpenStack Image Service. It routes requests from clients to registries of image metadata and to its backend stores, which are the mechanisms by which OpenStack Image Service actually saves incoming virtual machine images.</para>
<para>The backend stores that OpenStack Image Service can work with are as follows:</para>
<itemizedlist><listitem><para>OpenStack Object Storage - OpenStack Object Storage is the highly-available object storage project in OpenStack.</para></listitem>
<listitem><para>Filesystem - The default backend that OpenStack Image Service uses to store virtual machine images is the filesystem backend. This simple backend writes image files to the local filesystem.</para></listitem>
<listitem><para>S3 - This backend allows OpenStack Image Service to store virtual machine images in Amazons S3 service.</para></listitem>
<listitem><para>HTTP - OpenStack Image Service can read virtual machine images that are available via HTTP somewhere on the Internet. This store is read-only.</para></listitem></itemizedlist>
</section>
<section>
<?dbhtml filename="openstack-imaging-service-registry-servers.html" ?>
<title>OpenStack Image Service Registry Servers</title>
<para>OpenStack Image Service registry servers are servers that conform to the OpenStack Image Service Registry API. OpenStack Image Service ships with a reference implementation of a registry server that complies with this API (bin/OpenStack Image Service-registry).</para></section>
<section>
<?dbhtml filename="installing-openstack-imaging-service.html" ?>
<title>Installing and Configuring OpenStack Image Service</title>
<para>The OpenStack system has several key projects that are separate installations but can work
together depending on your cloud needs: OpenStack Compute, OpenStack Object Storage, and an
OpenStack Image Service with a project name of Glance. You can install any of these
projects separately and then configure them either as standalone or connected
entities.</para>
<section>
<?dbhtml filename="glance-system-requirements.html" ?>
<title>System Requirements for OpenStack Image Service (Glance)</title>
<para><emphasis role="bold">Hardware</emphasis>: OpenStack components are intended to run on
standard hardware.</para>
<para><emphasis role="bold">Operating System</emphasis>: The OpenStack Image Service
itself currently runs on Ubuntu but the images it stores may contain different operating
systems.</para>
<para><emphasis role="bold">Networking</emphasis>: 1000 Mbps are suggested. </para>
<para><emphasis role="bold">Database</emphasis>: Any SQLAlchemy-compatible database, such as
MySQL, Oracle, PostgreSQL, or SQLite. The reference registry server implementation that
ships with OpenStack Image Service uses a SQL database to store information about an
image, and publishes this information via an HTTP/REST-like interface.</para>
<para><emphasis role="bold">Permissions</emphasis>: You can install OpenStack imaging
service either as root or as a user with sudo permissions if you configure the sudoers
file to enable all the permissions. </para>
</section>
<section>
<?dbhtml filename="installing-openstack-imaging-service-on-ubuntu.html" ?>
<title>Installing OpenStack Image Service on Ubuntu </title><para>The installation of the Image Services themselves are separate from the storage of the virtual images to be retrieved. </para>
<section><?dbhtml filename="example-installation-architecture-glance.html" ?>
<title>Example Installation Architecture</title>
<para>These installation instructions have you set up the services on a single node, so the API server and registry services are on the same server. The images themselves can be stored either in OpenStack Object Storage, Amazon's S3 infrastructure, in a filesystem, or if you want read-only access, on a web server to be served via HTTP.</para></section>
<section>
<?dbhtml filename="installing-glance.html" ?>
<title>Installing OpenStack Image Service (Glance) </title>
<para>First, add the Glance PPA to your sources.lst. </para>
<para>
<literallayout class="monospaced">sudo add-apt-repository ppa:glance-core/trunk </literallayout></para>
<para>Run update. </para>
<para><literallayout class="monospaced">sudo apt-get update</literallayout></para>
<para>Now, install the Glance server. </para>
<para>
<literallayout class="monospaced">sudo apt-get install glance </literallayout></para>
<para>All dependencies should be automatically installed.</para>
<para>Refer to the <link xlink:href="http://glance.openstack.org/installing.html">Glance
developer documentation site to install from a Bazaar branch</link>. </para>
</section>
</section><section>
<?dbhtml filename="configuring-and-controlling-openstack-imaging-servers.html" ?>
<title>Configuring and Controlling Glance Servers</title>
<para>You start Glance either by calling the server program, glance-api, or using the server daemon wrapper program named glance-control.</para> <para>Glance ships with an etc/ directory that contains sample paste.deploy configuration files that you can copy to a standard configuration directory and adapt for your own uses.</para>
<para>If you do not specify a configuration file on the command line when starting the glance-api server, Glance attempts to locate a glance.conf configuration file in one of the following directories, and uses the first config file it finds in this order:</para>
<orderedlist>
<listitem><para>.</para></listitem>
<listitem><para>~/.glance</para></listitem>
<listitem><para>~/</para></listitem>
<listitem><para>/etc/glance/</para></listitem>
<listitem><para>/etc</para></listitem></orderedlist>
<para>If Glance doesn't find a configuration file in one of these locations, you see an error: <code>ERROR: Unable to locate any configuration file. Cannot load application glance-api</code>.</para>
<simplesect><title>Manually starting the server</title>
<para>To manually start the glance-api server, use a command like the following: </para>
<literallayout class="monospaced">sudo glance-api etc/glance.conf.sample --debug</literallayout>
<para>Supply the configuration file as the first argument (etc/glance.conf.sample in the above example) and then any common options you want to use. In the above example, the --debug option shows some of the debugging output that the server shows when starting up. Call the server program with --help to see all available options you can specify on the command line.</para>
<para>Note that the server does not daemonize itself when run manually from the terminal. You can force the server to daemonize using the standard shell backgrounding indicator, However, for most use cases, we recommend using the glance-control server daemon wrapper for daemonizing. See below for more details on daemonization with glance-control.</para></simplesect>
<simplesect><title>Starting the server with the glance-control wrapper script</title>
<para>The second way to start up a Glance server is to use the glance-control program. glance-control is a wrapper script that allows the user to start, stop, restart, and reload the other Glance server programs in a fashion that is more conducive to automation and scripting.</para>
<para>Servers started via the glance-control program are always daemonized, meaning that the server program process runs in the background.</para>
<para>To start a Glance server with glance-control, simply call glance-control with a server and the word “start”, followed by any command-line options you wish to provide. Start the server with glance-control in the following way:</para>
<literallayout class="monospaced"> sudo glance-control {SERVER} start [CONFPATH]</literallayout>
<para> Here is an example that shows how to start the glance-registry server with the glance-control wrapper script.</para>
<literallayout class="monospaced">sudo glance-control registry start etc/glance.conf.sample
Starting glance-registry with /home/jpipes/repos/glance/trunk/etc/glance.conf.sample</literallayout>
<para>To start all the Glance servers (currently the glance-api and glance-registry programs) at once, you can specify “all” for the {SERVER}.</para>
</simplesect>
<simplesect><title>Stopping a Glance server</title><para>You can use Ctrl-C to stop a Glance server if it was started manually. </para>
<para>If you started the Glance server using the glance-control program, you can use the glance-control program to stop it. Simply do the following:</para>
<literallayout class="monospaced">sudo glance-control {SERVER} stop</literallayout>
<para> as this example shows:
</para>
<literallayout class="monospaced">sudo glance-control registry stop
Stopping glance-registry pid: 17602 signal: 15
</literallayout>
</simplesect>
<simplesect><title>Restarting a Glance server</title>
<para>
You can restart a server with the glance-control program, as demonstrated here:
</para>
<literallayout class ="monospaced">
sudo glance-control registry restart etc/glance.conf.sample
Stopping glance-registry pid: 17611 signal: 15
Starting glance-registry with /home/jpipes/repos/glance/trunk/etc/glance.conf.sample</literallayout>
</simplesect>
</section>
<section><?dbhtml filename="configuring-compute-to-use-glance.html" ?><title>Configuring Compute to use Glance</title>
<para>Once Glance is installed and the server is running, you should edit your nova.conf file to add or edit the following flags:</para>
<literallayout class="monospaced">
--glance_api_servers=GLANCE_SERVER_IP
--image_service=nova.image.glance.GlanceImageService</literallayout>
<para>Where the GLANCE_SERVER_IP is the IP address of the server running the glance-api service.</para></section>
</section>
<section><?dbhtml filename="configuring-logging-for-glance.html" ?><title>Configuring Logging for Glance</title>
<para>There are a number of configuration options in Glance that control how Glance servers log messages. The configuration options are specified in the glance.conf configuration file.</para>
<table rules="all">
<caption>Description of glance.conf flags for Glance logging</caption>
<thead>
<tr>
<td>Flag</td>
<td>Default</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>--log-config=PATH </td>
<td>default: none</td>
<td>Path name to a configuration file to use for configuring logging: Specified on the command line only. </td>
</tr>
<tr>
<td>--log-format </td>
<td>default: %(asctime)s %(levelname)8s [%(name)s] %(message)s</td>
<td>Format of log records: Because of a bug in the PasteDeploy package, this
option is only available on the command line. See the <link
xlink:href="http://docs.python.org/library/logging.html">Python logging
module documentation</link> for more information about the options in
the format string.</td>
</tr>
<tr>
<td>--log_file </td>
<td>default: none</td>
<td>Path name: The filepath of the file to use for logging messages from Glances servers. Without this setting, the default is to output messages to stdout, so if you are running Glance servers in a daemon mode (using glance-control) you should make sure that the log_file option is set appropriately.</td>
</tr>
<tr>
<td>--log_dir </td>
<td>default: none</td>
<td>Path name: The filepath of the directory to use for log files. If not specified (the default) the log_file is used as an absolute filepath.</td>
</tr>
<tr>
<td>--log_date_format </td>
<td>default: %Y-%m-%d %H:%M:%S</td>
<td>Python logging module formats: The format string for timestamps in the log
output. See the <link
xlink:href="http://docs.python.org/library/logging.html">Python logging
module documentation</link> for more information on setting this format
string.</td>
</tr>
</tbody>
</table>
</section>
<section><?dbhtml filename="openstack-imaging-service-glance-rest-api.html" ?>
<info><title>The Glance REST API</title></info>
<para>
Glance has a RESTful API that exposes both metadata about registered
virtual machine images and the image data itself.
</para>
<para>
A host that runs the <literal>bin/glance-api</literal> service is
said to be a <emphasis>Glance API Server</emphasis>.
</para>
<para>
Assume there is a Glance API server running at the URL
<literal>http://glance.example.com</literal>.
</para>
<para>
Let's walk through how a user might request information from this
server.
</para>
<section xml:id="requesting-a-list-of-public-vm-images"><?dbhtml filename="requesting-vm-list.html" ?><info><title>Requesting a List of Public VM Images</title></info>
<para>
We want to see a list of available virtual machine images that the
Glance server knows about.
</para>
<para>
We issue a <literal>GET</literal> request to
<literal>http://glance.example.com/images/</literal> to retrieve
this list of available <emphasis>public</emphasis> images. The
data is returned as a JSON-encoded mapping in the following
format:
</para>
<screen>
{'images': [
{'uri': 'http://glance.example.com/images/1',
'name': 'Ubuntu 10.04 Plain',
'disk_format': 'vhd',
'container_format': 'ovf',
'size': '5368709120'}
...]}
</screen>
<note><para>
All images returned from the above `GET` request are *public* images
</para></note>
</section>
<section xml:id="requesting-detailed-metadata-on-public-vm-images"><?dbhtml filename="requesting-vm-metadata.html" ?><info><title>Requesting Detailed Metadata on Public VM Images</title></info>
<para>
We want to see more detailed information on available virtual
machine images that the Glance server knows about.
</para>
<para>
We issue a <literal>GET</literal> request to
<literal>http://glance.example.com/images/detail</literal> to
retrieve this list of available <emphasis>public</emphasis>
images. The data is returned as a JSON-encoded mapping in the
following format:
</para>
<screen>
{'images': [
{'uri': 'http://glance.example.com/images/1',
'name': 'Ubuntu 10.04 Plain 5GB',
'disk_format': 'vhd',
'container_format': 'ovf',
'size': '5368709120',
'checksum': 'c2e5db72bd7fd153f53ede5da5a06de3',
'location': 'swift://account:key/container/image.tar.gz.0',
'created_at': '2010-02-03 09:34:01',
'updated_at': '2010-02-03 09:34:01',
'deleted_at': '',
'status': 'active',
'is_public': True,
'properties': {'distro': 'Ubuntu 10.04 LTS'}},
...]}
</screen>
<note><para>
All images returned from the above `GET` request are *public* images.
</para><para>
All timestamps returned are in UTC.</para>
<para>The `updated_at` timestamp is the timestamp when an image's metadata
was last updated, not its image data, as all image data is immutable
once stored in Glance.</para>
<para>The `properties` field is a mapping of free-form key/value pairs that
have been saved with the image metadata.</para>
<para>The `checksum` field is an MD5 checksum of the image file data.
</para></note>
</section>
<section xml:id="filtering-images-returned-via-get-images-and-get-imagesdetail"><info><title>Filtering Images Returned via <literal>GET /images</literal>
and <literal>GET /images/detail</literal></title></info>
<para>
Both the <literal>GET /images</literal> and
<literal>GET /images/detail</literal> requests take query
parameters that serve to filter the returned list of images. The
following list details these query parameters.
</para>
<itemizedlist>
<listitem>
<para>
<literal>name=NAME</literal>
</para>
<para>
Filters images having a <literal>name</literal> attribute
matching <literal>NAME</literal>.
</para>
</listitem>
<listitem>
<para>
<literal>container_format=FORMAT</literal>
</para>
<para>
Filters images having a <literal>container_format</literal>
attribute matching <literal>FORMAT</literal>
</para>
<para>
For more information, see About Disk and Container
Formats.
</para>
</listitem>
<listitem>
<para>
<literal>disk_format=FORMAT</literal>
</para>
<para>
Filters images having a <literal>disk_format</literal>
attribute matching <literal>FORMAT</literal>
</para>
<para>
For more information, see About Disk and Container
Formats.
</para>
</listitem>
<listitem>
<para>
<literal>status=STATUS</literal>
</para>
<para>
Filters images having a <literal>status</literal> attribute
matching <literal>STATUS</literal>
</para>
<para>
For more information, see :doc:`About Image Statuses
&lt;statuses&gt;`
</para>
</listitem>
<listitem>
<para>
<literal>size_min=BYTES</literal>
</para>
<para>
Filters images having a <literal>size</literal> attribute
greater than or equal to <literal>BYTES</literal>
</para>
</listitem>
<listitem>
<para>
<literal>size_max=BYTES</literal>
</para>
<para>
Filters images having a <literal>size</literal> attribute less
than or equal to <literal>BYTES</literal>
</para>
</listitem>
</itemizedlist>
<para>
These two resources also accept sort parameters:
</para>
<itemizedlist>
<listitem>
<para>
<literal>sort_key=KEY</literal>
</para>
<para>
Results will be ordered by the specified image attribute
<literal>KEY</literal>. Accepted values include
<literal>id</literal>, <literal>name</literal>,
<literal>status</literal>, <literal>disk_format</literal>,
<literal>container_format</literal>, <literal>size</literal>,
<literal>created_at</literal> (default) and
<literal>updated_at</literal>.
</para>
</listitem>
<listitem>
<para>
<literal>sort_dir=DIR</literal>
</para>
<para>
Results will be sorted in the direction
<literal>DIR</literal>. Accepted values are
<literal>asc</literal> for ascending or
<literal>desc</literal> (default) for descending.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="requesting-detailed-metadata-on-a-specific-image"><?dbhtml filename="requesting-metadata-specific-image.html" ?><info><title>Requesting Detailed Metadata on a Specific Image</title></info>
<para>
We want to see detailed information for a specific virtual machine
image that the Glance server knows about.
</para>
<para>
We have queried the Glance server for a list of public images and
the data returned includes the `uri` field for each available
image. This `uri` field value contains the exact location needed
to get the metadata for a specific image.
</para>
<para>
Continuing the example from above, in order to get metadata about
the first public image returned, we can issue a
<literal>HEAD</literal> request to the Glance server for the
image's URI.
</para>
<para>
We issue a <literal>HEAD</literal> request to
<literal>http://glance.example.com/images/1</literal> to retrieve
complete metadata for that image. The metadata is returned as a
set of HTTP headers that begin with the prefix
<literal>x-image-meta-</literal>. The following shows an example
of the HTTP headers returned from the above
<literal>HEAD</literal> request:
</para>
<screen>
x-image-meta-uri http://glance.example.com/images/1
x-image-meta-name Ubuntu 10.04 Plain 5GB
x-image-meta-disk-format vhd
x-image-meta-container-format ovf
x-image-meta-size 5368709120
x-image-meta-checksum c2e5db72bd7fd153f53ede5da5a06de3
x-image-meta-location swift://account:key/container/image.tar.gz.0
x-image-meta-created_at 2010-02-03 09:34:01
x-image-meta-updated_at 2010-02-03 09:34:01
x-image-meta-deleted_at
x-image-meta-status available
x-image-meta-is-public True
x-image-meta-property-distro Ubuntu 10.04 LTS
</screen>
<note><para>
All timestamps returned are in UTC.
</para>
<para>The `x-image-meta-updated_at` timestamp is the timestamp when an
image's metadata was last updated, not its image data, as all
image data is immutable once stored in Glance.</para>
<para>There may be multiple headers that begin with the prefix
`x-image-meta-property-`. These headers are free-form key/value pairs
that have been saved with the image metadata. The key is the string
after `x-image-meta-property-` and the value is the value of the header.</para>
<para>The response's `ETag` header will always be equal to the
`x-image-meta-checksum` value.</para>
</note>
</section>
<section xml:id="retrieving-a-virtual-machine-image"><?dbhtml filename="retrieving-vm-image.html" ?><info><title>Retrieving a Virtual Machine Image</title></info>
<para>
We want to retrieve that actual raw data for a specific virtual
machine image that the Glance server knows about.
</para>
<para>
We have queried the Glance server for a list of public images and
the data returned includes the `uri` field for each available
image. This `uri` field value contains the exact location needed
to get the metadata for a specific image.
</para>
<para>
Continuing the example from above, in order to get metadata about
the first public image returned, we can issue a
<literal>HEAD</literal> request to the Glance server for the
image's URI.
</para>
<para>
We issue a <literal>GET</literal> request to
<literal>http://glance.example.com/images/1</literal> to retrieve
metadata for that image as well as the image itself encoded into
the response body.
</para>
<para>
The metadata is returned as a set of HTTP headers that begin with
the prefix <literal>x-image-meta-</literal>. The following shows
an example of the HTTP headers returned from the above
<literal>GET</literal> request:
</para>
<screen>
x-image-meta-uri http://glance.example.com/images/1
x-image-meta-name Ubuntu 10.04 Plain 5GB
x-image-meta-disk-format vhd
x-image-meta-container-format ovf
x-image-meta-size 5368709120
x-image-meta-checksum c2e5db72bd7fd153f53ede5da5a06de3
x-image-meta-location swift://account:key/container/image.tar.gz.0
x-image-meta-created_at 2010-02-03 09:34:01
x-image-meta-updated_at 2010-02-03 09:34:01
x-image-meta-deleted_at
x-image-meta-status available
x-image-meta-is-public True
x-image-meta-property-distro Ubuntu 10.04 LTS
</screen>
<note><para>
All timestamps returned are in UTC.</para>
<para> The `x-image-meta-updated_at` timestamp is the timestamp when an
image's metadata was last updated, not its image data, as all
image data is immutable once stored in Glance.</para>
<para>There may be multiple headers that begin with the prefix
`x-image-meta-property-`. These headers are free-form key/value pairs
that have been saved with the image metadata. The key is the string
after `x-image-meta-property-` and the value is the value of the header.</para>
<para>The response's `Content-Length` header shall be equal to the value of
the `x-image-meta-size` header.</para>
<para>The response's `ETag` header will always be equal to the
`x-image-meta-checksum` value.</para>
<para>The image data itself will be the body of the HTTP response returned
from the request, which will have content-type of
`application/octet-stream`.</para>
</note>
</section>
<section xml:id="adding-a-new-virtual-machine-image"><?dbhtml filename="adding-vm-image.html" ?><info><title>Adding a New Virtual Machine Image</title></info>
<para>
We have created a new virtual machine image in some way (created a
"golden image" or snapshotted/backed up an existing
image) and we wish to do two things:
</para>
<itemizedlist>
<listitem>
<para>
Store the disk image data in Glance
</para>
</listitem>
<listitem>
<para>
Store metadata about this image in Glance
</para>
</listitem>
</itemizedlist>
<para>
We can do the above two activities in a single call to the Glance
API. Assuming, like in the examples above, that a Glance API
server is running at <literal>glance.example.com</literal>, we
issue a <literal>POST</literal> request to add an image to Glance:
</para>
<screen>
POST http://glance.example.com/images/
</screen>
<para>
The metadata about the image is sent to Glance in HTTP headers.
The body of the HTTP request to the Glance API will be the
MIME-encoded disk image data.
</para>
<section xml:id="adding-image-metadata-in-http-headers"><?dbhtml filename="adding-image-metadata-http-headers.html" ?><info><title>Adding Image Metadata in HTTP Headers</title></info>
<para>
Glance will view as image metadata any HTTP header that it
receives in a
</para>
<screen>
``POST`` request where the header key is prefixed with the strings
``x-image-meta-`` and ``x-image-meta-property-``.
</screen>
<para>
The list of metadata headers that Glance accepts are listed
below.
</para>
<itemizedlist>
<listitem>
<para>
<literal>x-image-meta-name</literal>
</para>
<para>
This header is required. Its value should be the name of the
image.
</para>
<para>
Note that the name of an image <emphasis>is not unique to a
Glance node</emphasis>. It would be an unrealistic
expectation of users to know all the unique names of all
other user's images.
</para>
</listitem>
<listitem>
<para>
<literal>x-image-meta-id</literal>
</para>
<para>
This header is optional.
</para>
<para>
When present, Glance will use the supplied identifier for
the image. If the identifier already exists in that Glance
node, then a <emphasis role="strong">409 Conflict</emphasis>
will be returned by Glance.
</para>
<para>
When this header is <emphasis>not</emphasis> present, Glance
will generate an identifier for the image and return this
identifier in the response (see below)
</para>
</listitem>
<listitem>
<para>
<literal>x-image-meta-store</literal>
</para>
<para>
This header is optional. Valid values are one of
<literal>file</literal>, <literal>s3</literal>, or
<literal>swift</literal>
</para>
<para>
When present, Glance will attempt to store the disk image
data in the backing store indicated by the value of the
header. If the Glance node does not support the backing
store, Glance will return a <emphasis role="strong">400 Bad
Request</emphasis>.
</para>
<para>
When not present, Glance will store the disk image data in
the backing store that is marked default. See the
configuration option <literal>default_store</literal> for
more information.
</para>
</listitem>
<listitem>
<para>
<literal>x-image-meta-disk-format</literal>
</para>
<para>
This header is optional. Valid values are one of
<literal>aki</literal>, <literal>ari</literal>,
<literal>ami</literal>, <literal>raw</literal>,
<literal>iso</literal>, <literal>vhd</literal>,
<literal>vdi</literal>, <literal>qcow2</literal>, or
<literal>vmdk</literal>.
</para>
<para>
For more information, see :doc:`About Disk and Container
Formats &lt;formats&gt;`
</para>
</listitem>
<listitem>
<para>
<literal>x-image-meta-container-format</literal>
</para>
<para>
This header is optional. Valid values are one of
<literal>aki</literal>, <literal>ari</literal>,
<literal>ami</literal>, <literal>bare</literal>, or
<literal>ovf</literal>.
</para>
<para>
For more information, see :doc:`About Disk and Container
Formats &lt;formats&gt;`
</para>
</listitem>
<listitem>
<para>
<literal>x-image-meta-size</literal>
</para>
<para>
This header is optional.
</para>
<para>
When present, Glance assumes that the expected size of the
request body will be the value of this header. If the length
in bytes of the request body <emphasis>does not
match</emphasis> the value of this header, Glance will
return a <emphasis role="strong">400 Bad Request</emphasis>.
</para>
<para>
When not present, Glance will calculate the image's size
based on the size of the request body.
</para>
</listitem>
<listitem>
<para>
<literal>x-image-meta-checksum</literal>
</para>
<para>
This header is optional. When present it shall be the
expected <emphasis role="strong">MD5</emphasis> checksum of
the image file data.
</para>
<para>
When present, Glance will verify the checksum generated from
the backend store when storing your image against this value
and return a <emphasis role="strong">400 Bad
Request</emphasis> if the values do not match.
</para>
</listitem>
<listitem>
<para>
<literal>x-image-meta-is-public</literal>
</para>
<para>
This header is optional.
</para>
<para>
When Glance finds the string "true"
(case-insensitive), the image is marked as a public image,
meaning that any user may view its metadata and may read the
disk image from Glance.
</para>
<para>
When not present, the image is assumed to be <emphasis>not
public</emphasis> and specific to a user.
</para>
</listitem>
<listitem>
<para>
<literal>x-image-meta-property-*</literal>
</para>
<para>
When Glance receives any HTTP header whose key begins with
the string prefix <literal>x-image-meta-property-</literal>,
Glance adds the key and value to a set of custom, free-form
image properties stored with the image. The key is the
lower-cased string following the prefix
<literal>x-image-meta-property-</literal> with dashes and
punctuation replaced with underscores.
</para>
<para>
For example, if the following HTTP header were sent:
</para>
<screen>
x-image-meta-property-distro Ubuntu 10.10
</screen>
<para>
Then a key/value pair of "distro"/"Ubuntu
10.10" will be stored with the image in Glance.
</para>
<para>
There is no limit on the number of free-form key/value
attributes that can be attached to the image. However, keep
in mind that the 8K limit on the size of all HTTP headers
sent in a request will effectively limit the number of image
properties.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="updating-an-image"><?dbhtml filename="updating-vm-image.html" ?><info><title>Updating an Image</title></info>
<para>
Glance will view as image metadata any HTTP header that it
receives in a
</para>
<screen>
``PUT`` request where the header key is prefixed with the strings
``x-image-meta-`` and ``x-image-meta-property-``.
</screen>
<para>
If an image was previously reserved, and thus is in the
<literal>queued</literal> state, then image data can be added by
including it as the request body. If the image already as data
associated with it (e.g. not in the <literal>queued</literal>
state), then including a request body will result in a
<emphasis role="strong">409 Conflict</emphasis> exception.
</para>
<para>
On success, the <literal>PUT</literal> request will return the
image metadata encoded as HTTP headers.
</para>
<para>
See more about image statuses here: :doc:`Image Statuses
&lt;statuses&gt;`
</para>
</section>
</section>
</section>
</chapter>

View File

@ -0,0 +1,222 @@
<?xml version="1.0" encoding="UTF-8"?>
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0">
<?dbhtml filename="ch_openstack-interfaces.html" ?>
<title>OpenStack Interfaces</title>
<para>OpenStack has components that provide a view of the OpenStack installation such as a Django-built website that serves as a dashboard and the ability to connect to running instances using a VNC connection via a VNC Proxy.</para>
<section><?dbhtml filename="about-the-dashboard.html" ?><title>About the Dashboard</title><para>You can use a dashboard interface with an OpenStack Compute installation with a web-based
console provided by the Openstack-Dashboard project. It provides a reference implementation of a Django site that provides web-based interactions with the OpenStack Compute cloud controller. For more information about the Openstack-Dashboard project,
please visit: <link xlink:href="http://launchpad.net/openstack-dashboard"
>http://launchpad.net/openstack-dashboard</link>. These instructions are for a test deployment of an OpenStack Dashboard. They configure your dashboard to use the default Django server. To create a more robust, production-ready installation, you would configure this with an Apache web server. </para>
<section>
<?dbhtml filename="dashboard-system-requirements.html" ?>
<title>System Requirements for the Dashboard</title>
<para>You should have a running OpenStack Compute installation with the Keystone module enabled. </para>
<para>The dashboard needs to be installed on the node that can contact the Keystone service.</para>
<para>You should know the URL of your Keystone endpoint. </para>
<para>You must know the credentials of a valid Keystone tenant.</para>
<para>Python 2.6 is required, and these instructions have been tested with Ubuntu 10.10.</para>
</section>
<section><?dbhtml filename="installing-openstack-dashboard.html" ?>
<title>Installing the OpenStack Dashboard</title>
<para>Here are the overall steps for building the dashboard.</para>
<orderedlist>
<listitem><para>Get the source for the openstack-dashboard project.</para></listitem>
<listitem><para>Build and configure the openstack-dashboard.</para></listitem>
<listitem>
<para>Run the server that starts the dashboard.</para>
</listitem>
</orderedlist>
<para>Before you begin, you must have git installed. It's straightforward to install
it with <code>sudo apt-get install git-core</code>. </para>
<para>Create a source directory to house the project:</para>
<literallayout class="monospaced">mkdir src
cd src </literallayout>
<para>Next, get the openstack-dashboard project, which provides all the look and feel for the OpenStack Dashboard.</para>
<literallayout class="monospaced">
git clone https://github.com/4P/openstack-dashboard
</literallayout>
<para>You should now have a directory called openstack-dashboard, which contains the OpenStack Dashboard application.</para>
<section><?dbhtml filename="build-and-configure-openstack-dashboard.html" ?>
<title>Build and Configure Openstack-Dashboard</title>
<para>Now you can configure the dashboard application. The first step in configuring the application
is to create your local_settings.py file. An example is provided that you can copy
to local_settings.py and then modify for your environment.
</para>
<para>
<literallayout class="monospaced">
cd openstack-dashboard/openstack-dashboard/local
cp local_settings.py.example local_settings.py
vi local_settings.py
</literallayout>
</para>
<para>In the new copy of the local_settings.py file, change these important options:</para>
<itemizedlist>
<listitem>
<para>OPENSTACK_ADMIN_TOKEN : Token for Keystone endpoint.</para>
</listitem>
<listitem>
<para>OPENSTACK_KEYSTONE_URL : URL for the Keystone endpoint.</para>
</listitem>
<listitem>
<para>SWIFT_ENABLED : Flag to enable/disable swift support from the dashboard. </para>
</listitem>
</itemizedlist>
<para>Now install the openstack-dashboard environment. This installs all the dependencies for
openstack-dashboard. If you don't already have easy_install installed,
use sudo apt-get install python-setuptools.
</para>
<para>
<literallayout class="monospaced">
sudo apt-get install -y python-setuptools
sudo easy_install virtualenv
python tools/install_venv.py
</literallayout>
</para>
<para>This step takes some time since it downloads a number of dependencies.</para>
</section>
<section>
<?dbhtml filename="run-the-server.html" ?>
<title>Run the Server</title>
<para>Now run the server on a high port value so that you can validate the installation.</para><para><literallayout class="monospaced">tools/with_venv.sh dashboard/manage.py runserver 0.0.0.0:8000</literallayout></para><para>Make sure that your firewall isn't blocking TCP/8000 and just point your browser at this server on port 8000. If you are running the server on the same machine as your browser, this would be "http://localhost:8000". </para>
<mediaobject>
<imageobject role="fo">
<imagedata fileref="figures/dashboard-overview.png"
format="PNG" scale="60"/>
</imageobject>
<imageobject role="html">
<imagedata fileref="../figures/dashboard-overview.png"
format="PNG" />
</imageobject>
</mediaobject></section></section></section>
<section xml:id="getting-started-with-the-vnc-proxy"><info><title>Getting Started with the VNC Proxy</title></info>
<para>
The VNC Proxy is an OpenStack component that allows users of Nova to
access their instances through a websocket enabled browser (like
Google Chrome 4.0). See <link xlink:href="http://caniuse.com/#search=websocket">http://caniuse.com/#search=websocket</link> for a reference list of supported web browsers.</para>
<para>
A VNC Connection works like so:
</para>
<itemizedlist>
<listitem>
<para>
User connects over an API and gets a URL like
http://ip:port/?token=xyz
</para>
</listitem>
<listitem>
<para>
User pastes URL in browser
</para>
</listitem>
<listitem>
<para>
Browser connects to VNC Proxy though a websocket enabled client
like noVNC
</para>
</listitem>
<listitem>
<para>
VNC Proxy authorizes users token, maps the token to a host and
port of an instance's VNC server
</para>
</listitem>
<listitem>
<para>
VNC Proxy initiates connection to VNC server, and continues
proxying until the session ends
</para>
</listitem>
</itemizedlist>
<section xml:id="configuring-the-vnc-proxy"><info><title>Configuring the VNC Proxy</title></info>
<para>The nova-vncproxy requires a websocket enabled html client to work properly. At this time,
the only tested client is a slightly modified fork of noVNC, which you can at find <link
xmlns:xlink="http://www.w3.org/1999/xlink"
xlink:href="http://github.com/openstack/noVNC.git"
>http://github.com/openstack/noVNC.git</link>
</para>
<para>The noVNC tool must be in the location specified by --vncproxy_wwwroot, which defaults to
/var/lib/nova/noVNC. nova-vncproxy will fail to launch until this code is properly installed. </para>
<para>
By default, nova-vncproxy binds 0.0.0.0:6080. This can be
configured with:
</para>
<itemizedlist>
<listitem>
<para>
--vncproxy_port=[port]
</para>
</listitem>
<listitem>
<para>
--vncproxy_host=[host]
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="enabling-vnc-consoles-in-nova"><info><title>Enabling VNC Consoles in Nova</title></info>
<para>
At the moment, VNC support is supported only when using libvirt.
To enable VNC Console, configure the following flags in the nova.conf file:
</para>
<itemizedlist>
<listitem>
<para>
--vnc_console_proxy_url=http://[proxy_host]:[proxy_port] -
proxy_port defaults to 6080. This URL must point to
nova-vncproxy
</para>
</listitem>
<listitem>
<para>
--vnc_enabled=[True|False] - defaults to True. If this flag is
not set your instances will launch without VNC support.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="getting-an-instances-vnc-console"><info><title>Getting an Instance's VNC Console</title></info>
<para>
You can access an instance's VNC Console URL in the following
methods:
</para>
<itemizedlist>
<listitem>
<para>
Using the direct api: eg: 'stack --user=admin --project=admin
compute get_vnc_console instance_id=1'
</para>
</listitem>
<listitem>
<para>
Support for Dashboard, and the Openstack API will be
forthcoming
</para>
</listitem>
</itemizedlist><para>
At the moment, VNC Consoles are only supported through the web
browser, but more general VNC support is in the works.
</para>
</section>
</section>
</chapter>

View File

@ -0,0 +1,56 @@
<?xml version="1.0" encoding="UTF-8"?>
<book xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns:html="http://www.w3.org/1999/xhtml"
version="5.0"
>
<title>OpenStack Compute Administration Manual</title>
<info>
<author>
<personname>
<firstname/>
<surname/>
</personname>
<affiliation>
<orgname>OpenStack</orgname>
</affiliation>
</author>
<copyright>
<year>2010</year>
<year>2011</year>
<holder>OpenStack LLC</holder>
</copyright>
<releaseinfo>trunk</releaseinfo>
<productname>OpenStack Compute</productname>
<pubdate>2011-08-19</pubdate>
<legalnotice role="apache2">
<annotation>
<remark>Copyright details are filled in by the template.</remark>
</annotation>
</legalnotice>
<legalnotice role="cc-by-sa">
<annotation>
<remark>Remaining licensing details are filled in by the template.</remark>
</annotation>
</legalnotice>
<abstract>
<para>OpenStack™ Compute offers open source software for cloud administration and
management for any organization. This manual provides guidance for installing,
managing, and understanding the software that runs OpenStack Compute. </para>
</abstract>
</info>
<!-- Chapters are referred from the book file through these include statements. You can add additional chapters using these types of statements. -->
<xi:include href="../common/getstart.xml"/>
<xi:include href="aboutcompute.xml"/>
<xi:include href="computeinstall.xml"/>
<xi:include href="computeconfigure.xml"/>
<xi:include href="computehypervisors.xml"/>
<xi:include href="computeautomation.xml"/>
<xi:include href="computenetworking.xml"/>
<xi:include href="computeadmin.xml"/>
<xi:include href="interfaces.xml"/>
<xi:include href="computetutorials.xml"/>
<xi:include href="../common/support.xml"/>
</book>

View File

@ -0,0 +1,159 @@
<?xml version="1.0"?>
<!-- Converted by db4-upgrade version 1.0 -->
<chapter xmlns="http://docbook.org/ns/docbook" version="5.0-extension RaxBook-1.0" xml:id="quick-guide-to-getting-started-with-keystone"><info><title>Quick Guide to Getting Started with Keystone</title></info>
<para>
First, you will need to install keystone, if you haven't done so
already. Refer to Installing for more information.</para>
<section xml:id="dependencies"><info><title>Dependencies</title></info>
<para>Once Keystone is installed you need to initialize the database. You can do so with the keystone-manage command line utility. The keystone-manage utility helps with managing and configuring a Keystone installation. You configure the keystone-manage utility itself with a SQL Alchemy connection configuration via a parameter passed to the utility:</para>
<para>--sql_connection=CONN_STRING</para>
<para>Where the CONN_STRING is a proper SQLAlchemy connection string as described in
http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html?highlight=engine#sqlalchemy.create_engine.</para>
<para>One important use of keystone-manage is to setup the database. To do so, run:</para>
<screen>
keystone-manage db_sync</screen>
</section>
<section xml:id="creating-your-first-global-admin"><info><title>Creating your first global admin and tenant admin</title></info>
<para>
Change directory to your Keystone install path.
</para>
<orderedlist numeration="arabic">
<listitem>
<para>
Run the following to create the first tenant:
</para>
<screen>
$&gt; bin/keystone-manage tenant add "MyTenant"
</screen>
</listitem>
<listitem>
<para>
Run the following to create the first tenant admin:
</para>
<screen>
$&gt; bin/keystone-manage user add MyAdmin P@ssw0rd MyTenant
</screen>
</listitem>
</orderedlist>
<note><para>
Some reserved roles are defined (and can be modified) through the keystone.conf in the /etc folder.</para>
</note>
<orderedlist numeration="arabic">
<listitem override="3">
<para>
Associate your tenant admin with the Admin role:
</para>
<screen>
$&gt; bin/keystone-manage role grant Admin MyAdmin
</screen>
</listitem>
</orderedlist>
</section>
<section xml:id="curl-examples"><info><title>Curl examples</title></info>
<para>All examples assume default port usage (5001) and use the example admin account created above.</para>
<para><emphasis>Admin Initial GET</emphasis></para>
<para>Retrieves version, full API url, pdf doc link, and wadl link:</para>
<screen>$> curl http://0.0.0.0:5001</screen>
<para>or:</para>
<screen>$> curl http://0.0.0.0:5001/v2.0/</screen>
<para><emphasis>Retrieve token:</emphasis></para>
<para>To retrieve the token and expiration date for a user:</para>
<screen>$> curl -d '{"passwordCredentials":{"username": "MyAdmin", "password": "P@ssw0rd"}}' -H "Content-type: application/json" http://localhost:5001/v2.0/tokens</screen>
<para>This will return something like:</para>
<screen>$> {"auth": {"token": {"expires": "2011-08-10T17:45:22.838440", "id": "0eed0ced-4667-4221-a0b2-24c91f242b0b"}}}</screen>
<note><para>Save the “id” value as youll be using it in the calls below.</para></note>
<para><emphasis>To retrieve a list of tenants:</emphasis></para>
<para>Run:</para>
<screen>$> curl -H "X-Auth-Token:999888777666" http://localhost:5001/v2.0/tenants</screen>
<para>This will return something like:</para>
<screen>$> {"tenants": {"values": [{"enabled": 1, "id": "MyTenant", "description": null}], "links": []}}</screen>
<para><emphasis>Retrieve a list of users:</emphasis></para>
<para> Run:</para>
<screen>$> curl -H "X-Auth-Token:999888777666" http://localhost:5001/v2.0/users</screen>
<para>This will return something like:</para>
<screen>$> {"users": {"values": [{"email": null, "enabled": true, "id": "MyAdmin", "tenantId": "MyTenant"}], "links": []}}</screen>
<para><emphasis>Retrieve information about the token:</emphasis></para>
<para>Run:</para>
<screen>$> curl -H "X-Auth-Token:999888777666" http://localhost:5001/v2.0/tokens/0eed0ced-4667-4221-a0b2-24c91f242b0b</screen>
<para> This will return something like:</para>
<screen>$> {"auth": {"token": {"expires": "2011-08-11T04:26:58.145171", "id": "0eed0ced-4667-4221-a0b2-24c91f242b0b"}, "user": {"username": "MyAdmin", "roleRefs": [{"roleId": "Admin", "id": 1}], "tenantId": "MyTenant"}}}</screen>
<para><emphasis> Revoking a token:</emphasis></para>
<para>Run:</para>
<screen>$> curl -X DELETE -H "X-Auth-Token:999888777666" http://localhost:5001/tokens/0eed0ced-4667-4221-a0b2-24c91f242b0b</screen>
<para><emphasis>Creating a tenant:</emphasis></para>
<para>Run:</para>
<screen> $> curl -H "X-Auth-Token:999888777666" -H "Content-type: application/json" -d '{"tenant":{"id":"MyTenant2", "description":"My 2nd Tenant", "enabled":true}}' http://localhost:5001/tenants</screen>
<para> This will return something like:</para>
<screen>$> {"tenant": {"enabled": true, "id": "MyTenant2", "description": "My 2nd Tenant"}}</screen>
<para><emphasis>Verifying the tenant:</emphasis></para>
<para>Run:</para>
<screen>$> curl -H "X-Auth-Token:999888777666" http://localhost:5001/v2.0/tenants/MyTenant2</screen>
<para>This will return something like:</para>
<screen>$> {"tenant": {"enabled": 1, "id": "MyTenant2", "description": "My 2nd Tenant"}}</screen>
<para><emphasis>Updating the tenant:</emphasis></para>
<para>Run:</para>
<screen>$> curl -X PUT -H "X-Auth-Token:999888777666" -H "Content-type: application/json" -d '{"tenant":{"description":"My NEW 2nd Tenant"}}' http://localhost:5001/v2.0/tenants/MyTenant2
</screen>
<para>This will return something like:
</para>
<screen>$> {"tenant": {"enabled": true, "id": "MyTenant2", "description": "My NEW 2nd Tenant"}}
</screen>
<para><emphasis>Deleting the tenant:</emphasis></para>
<para>Run:</para>
<screen>$> curl -X DELETE -H "X-Auth-Token:999888777666" http://localhost:5001/v2.0/tenants/MyTenant2</screen>
</section>
</chapter>

View File

@ -0,0 +1,99 @@
<?xml version="1.0" encoding="UTF-8"?>
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"
xml:id="Identity-Service-Concepts-e1362">
<title>Identity Service Concepts</title>
<para>
The Keystone Identity Service has several key concepts which are
important to understand:
</para>
<variablelist>
<varlistentry>
<term>User</term>
<listitem><para>A digital representation of a person, system, or service who uses OpenStack cloud services.
Keystone authentication services will validate that incoming request are being made by the user
who claims to be making the call. Users have a login and may be assigned tokens to access
resources. Users may be directly assigned to a particular tenant and
behave as if they are contained in that tenant.</para></listitem>
</varlistentry>
<varlistentry>
<term>Credentials</term>
<listitem><para>
Data that belongs to, is owned by, and generally only known by a user that the user can present
to prove they are who they are (since nobody else should know that data).
</para><para>Examples are:
<itemizedlist>
<listitem><para>a matching username and password</para></listitem>
<listitem><para>a matching username and API key</para></listitem>
<listitem><para>yourself and a driver's license with a picture of you</para></listitem>
<listitem><para>a token that was issued to you that nobody else knows of</para></listitem>
</itemizedlist>
</para></listitem>
</varlistentry>
<varlistentry>
<term>Authentication</term>
<listitem><para>
In the context of Keystone, authentication is the act of confirming the identity of a
user or the truth of a claim.
Keystone will confirm that incoming request are being made by the user
who claims to be making the call by validating a set of claims that the user is making.
These claims are initially in the form of a set of credentials (username &amp; password,
or username and API key). After initial confirmation, Keystone will issue the user a token
which the user can then provide to demonstrate that their identity has been authenticated
when making subsequent requests.
</para></listitem>
</varlistentry>
<varlistentry>
<term>Token</term>
<listitem><para>
A token is an arbitrary bit of text that is used to access
resources. Each token has a scope which describes which
resources are accessible with it. A token may be
revoked at anytime and is valid for a finite duration.
</para>
<para>
While Keystone supports token-based authentication in this release,
the intention is for it to support additional protocols in the
future. The intent is for it to be an integration service foremost, and not
a aspire to be a full-fledged identity store and management solution.
</para></listitem>
</varlistentry>
<varlistentry>
<term>Tenant</term>
<listitem><para>
A container used to group or isolate resources and/or identity
objects. Depending on the service operator, a tenant may map to a customer,
account, organization, or project.
</para></listitem>
</varlistentry>
<varlistentry>
<term>Service</term>
<listitem><para>
An OpenStack service, such as Compute (Nova), Object Storage (Swift), or Image Service (Glance). A service provides
one or more endpoints through which users can access resources and perform
(presumably useful) operations.
</para></listitem>
</varlistentry>
<varlistentry>
<term>Endpoint</term>
<listitem> <para>
An network-accessible address, usually described by URL, where a service may be accessed. If using an extension for templates, you can create an endpoint template, which represents the templates of all the consumable services that are available across the regions.
</para></listitem>
</varlistentry>
<varlistentry>
<term>Role</term>
<listitem><para> A personality that a user assumes when performing a specific set of operations.
A role includes a set of right and privileges. A user assuming that role inherits
those rights and privileges.
</para><para>
In Keystone, a token that is issued to a user includes the list of roles that user
can assume. Services that are being called by that user determine how they interpret the set
of roles a user has and which operations or resources each roles grants access to.
</para></listitem>
</varlistentry>
</variablelist>
</chapter>

View File

@ -0,0 +1,138 @@
<?xml version="1.0"?>
<!-- Converted by db4-upgrade version 1.0 -->
<chapter xmlns="http://docbook.org/ns/docbook" version="5.0-extension RaxBook-1.0" xml:id="installing-keystone"><info><title>Installing Keystone</title></info>
<para>You can install the Identity service from packages or from source.</para>
<section xml:id="installing-from-packages"><info><title>Installing from packages</title></info>
<para>
To install the latest version of Keystone from the Github
repositories, following the following instructions.
</para>
<section xml:id="debianubuntu"><info><title>Debian/Ubuntu</title></info>
<orderedlist numeration="arabic">
<listitem>
<para>
Add the Keystone PPA to your sources.lst:
</para>
<para>
::
</para>
<para>
$&gt; sudo add-apt-repository ppa:keystone-core/trunk $&gt;
sudo apt-get update
</para>
</listitem>
<listitem>
<para>
Install Keystone:
</para>
<para>
::
</para>
<para>
$&gt; sudo apt-get install keystone
</para>
</listitem>
</orderedlist>
</section>
</section>
<section xml:id="installing-from-source-tarballs"><info><title>Installing from source tarballs</title></info>
<para>
To install the latest version of Keystone from the Launchpad
Bazaar repositories, following the following instructions.
</para>
<orderedlist numeration="arabic">
<listitem>
<para>
Grab the source tarball from
<link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="https://github.com/openstack/keystone">Github</link>
</para>
</listitem>
<listitem>
<para>
Untar the source tarball:
</para>
<para>
::
</para>
<para>
$&gt; tar -xzf &lt;FILE&gt;
</para>
</listitem>
<listitem>
<para>
Change into the package directory and build/install:
</para>
<para>
::
</para>
<para>
$&gt; cd keystone-&lt;RELEASE&gt; $&gt; sudo python setup.py
install
</para>
</listitem>
</orderedlist>
</section>
<section xml:id="installing-from-a-github-branch"><info><title>Installing from a Github Branch</title></info>
<para>
To install the latest version of Keystone from the Github
repositories, see the following instructions.
</para>
<section xml:id="debianubuntu-1"><info><title>Debian/Ubuntu</title></info>
<orderedlist numeration="arabic">
<listitem>
<para>
Install Git and build dependencies:
</para>
<para>
::
</para>
<para>
$&gt; sudo apt-get install git python-eventlet python-routes
python-greenlet swift $&gt; sudo apt-get install
python-argparse python-sqlalchemy python-wsgiref
python-pastedeploy
</para>
</listitem>
</orderedlist>
<para>
..note:
</para>
<screen>
If you want to build the Keystone documentation locally, you will also want
to install the python-sphinx package
</screen>
<orderedlist numeration="arabic">
<listitem>
<para>
Branch Keystone's trunk branch:: (see
<link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://wiki.openstack.org/GerritWorkflow">http://wiki.openstack.org/GerritWorkflow</link>
to get the project initially setup):
</para>
<para>
::
</para>
<para>
$&gt; git checkout master $&gt; git pull origin master
</para>
</listitem>
<listitem>
<para>
Install Keystone:
</para>
<para>
::
</para>
<para>
$&gt; sudo python setup.py install
</para>
</listitem>
</orderedlist>
</section>
</section>
</chapter>

View File

@ -0,0 +1,45 @@
<?xml version="1.0" encoding="UTF-8"?>
<book xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns:html="http://www.w3.org/1999/xhtml"
version="5.0"
>
<title>OpenStack Identity Starter Guide</title>
<info>
<author>
<personname>
<firstname/>
<surname/>
</personname>
<affiliation>
<orgname>OpenStack</orgname>
</affiliation>
</author>
<copyright>
<year>2010</year>
<year>2011</year>
<holder>OpenStack LLC</holder>
</copyright>
<releaseinfo>trunk</releaseinfo>
<productname>OpenStack Identity Service</productname>
<pubdate>2011-09-19</pubdate>
<legalnotice role="apache2">
<annotation>
<remark>Copyright details are filled in by the template.</remark>
</annotation>
</legalnotice>
<abstract>
<para>OpenStack™ Identity Service offers open source software for identity management
for cloud users and administrators. This manual provides guidance for installing,
managing, and understanding the software that runs OpenStack Identity Service.
</para>
</abstract>
</info>
<!-- Chapters are referred from the book file through these include statements. You can add additional chapters using these types of statements. -->
<xi:include href="gettingstartedkeystone.xml"/>
<xi:include href="installingkeystone.xml"/>
<xi:include href="identity-concepts.xml"/>
</book>

View File

@ -0,0 +1,21 @@
<?xml version="1.0"?>
<chapter xmlns="http://docbook.org/ns/docbook" version="5.0" xml:id="glance-architecture"><info><title>Glance Architecture</title></info><para>Glance is designed to be as adaptable as possible for various back-end storage and registry database solutions. There is a main Glance API server (the glance-api program) that serves as the communications hub between various client programs, the registry of image metadata, and the storage systems that actually contain the virtual machine image data.</para>
<para>From a birdseye perspective, one can visualize the Glance architectural model like so:</para>
<para><inlinemediaobject>
<imageobject>
<imagedata scale="80" fileref="../figures/glancearch.png"/></imageobject>
</inlinemediaobject></para>
<section><title>What is a Registry Server? </title>
<para>A registry server is any service that publishes image metadata that conforms to the
Glance Registry REST-ful API. Glance comes with a reference implementation of a registry
server called glance-registry, but this is only a reference implementation that uses a
SQL database for its metadata storage.</para></section>
<section><title>What is a Store?</title><para>A store is a Python class that inherits from glance.store.Backend and conforms to that class API for reading, writing, and deleting virtual machine image data.
Glance currently ships with stores for S3, Swift, a simple filesystem store, and a read-only HTTP(S) store.
Implementors are encouraged to create stores for other backends, including other distributed storage systems like Sheepdog or Ceph.</para></section>
</chapter>

View File

@ -0,0 +1,41 @@
<?xml version="1.0"?>
<!-- Converted by db4-upgrade version 1.0 -->
<chapter xmlns="http://docbook.org/ns/docbook" version="5.0-extension RaxBook-1.0" xml:id="glance-authentication-with-keystone"><info><title>Glance Authentication With Keystone</title></info>
<para>
Glance may optionally be integrated with Keystone. Setting this up
is relatively straightforward: the Keystone distribution includes
the requisite middleware and examples of appropriately modified
<literal>glance-api.conf</literal> and
<literal>glance-registry.conf</literal> configuration files in the
<literal>examples/paste</literal> directory. Once you have installed
Keystone and edited your configuration files, newly created images
will have their `owner` attribute set to the tenant of the
authenticated users, and the `is_public` attribute will cause access
to those images for which it is `false` to be restricted to only the
owner.
</para>
<note>
<para>The exception is those images for which `owner` is set to `null`,
which may only be done by those users having the ``Admin`` role.
These images may still be accessed by the public, but will not
appear in the list of public images. This allows the Glance
Registry owner to publish images for beta testing without allowing
those images to show up in lists, potentially confusing users.</para>
</note>
<section xml:id="sharing-images-with-others"><info><title>Sharing Images With Others</title></info>
<para>
It is possible to allow a private image to be shared with one or
more alternate tenants. This is done through image
<emphasis>memberships</emphasis>, which are available via the
`members` resource of images. (For more details, see
:ref:`glanceapi`.) Essentially, a membership is an association
between an image and a tenant which has permission to access that
image. These membership associations may also have a `can_share`
attribute, which, if set to `true`, delegates the authority to
share an image to the named tenant.
</para>
</section>
</chapter>

View File

@ -0,0 +1,670 @@
<?xml version="1.0"?>
<!-- Converted by db4-upgrade version 1.0 -->
<chapter xmlns="http://docbook.org/ns/docbook" version="5.0-extension RaxBook-1.0" xml:id="using-glance-programmatically-with-glances-client"><info><title>Using Glance Programmatically with Glance's Client</title></info>
<para>
While it is perfectly acceptable to issue HTTP requests directly to
Glance via its RESTful API, sometimes it is better to be able to
access and modify image resources via a client class that removes
some of the complexity and tedium of dealing with raw HTTP requests.
</para>
<para>
Glance includes a client class for just this purpose. You can
retrieve metadata about an image, change metadata about an image,
remove images, and of course retrieve an image itself via this
client class.
</para>
<para>
Below are some examples of using Glance's Client class. We assume
that there is a Glance server running at the address
`glance.example.com` on port `9292`.
</para>
<section xml:id="requesting-a-list-of-public-vm-images"><info><title>Requesting a List of Public VM Images</title></info>
<para>
We want to see a list of available virtual machine images that the
Glance server knows about.
</para>
<para>
Using Glance's Client, we can do this using the following code
</para>
<para>
..code-block:: python
</para>
<para>
from glance.client import Client
</para>
<para>
c = Client("glance.example.com", 9292)
</para>
<para>
print c.get_images()
</para>
</section>
<section xml:id="requesting-detailed-metadata-on-public-vm-images"><info><title>Requesting Detailed Metadata on Public VM Images</title></info>
<para>
We want to see more detailed information on available virtual
machine images that the Glance server knows about.
</para>
<para>
Using Glance's Client, we can do this using the following code
</para>
<para>
..code-block:: python
</para>
<para>
from glance.client import Client
</para>
<para>
c = Client("glance.example.com", 9292)
</para>
<para>
print c.get_images_detailed()
</para>
</section>
<section xml:id="filtering-images-returned-via-get_images-and-get_images_detailed"><info><title>Filtering Images Returned via <literal>get_images()</literal>
and <literal>get_images_detailed()</literal></title></info>
<para>
Both the <literal>get_images()</literal> and
<literal>get_images_detailed()</literal> methods take query
parameters that serve to filter the returned list of images.
</para>
<para>
When calling, simply pass an optional dictionary to the method
containing the filters by which you wish to limit results, with
the filter keys being one or more of the below:
</para>
<itemizedlist>
<listitem>
<para>
<literal>name: NAME</literal>
</para>
<para>
Filters images having a <literal>name</literal> attribute
matching <literal>NAME</literal>.
</para>
</listitem>
<listitem>
<para>
<literal>container_format: FORMAT</literal>
</para>
<para>
Filters images having a <literal>container_format</literal>
attribute matching <literal>FORMAT</literal>
</para>
</listitem>
<listitem>
<para>
<literal>disk_format: FORMAT</literal>
</para>
<para>
Filters images having a <literal>disk_format</literal>
attribute matching <literal>FORMAT</literal>
</para>
</listitem>
<listitem>
<para>
<literal>status: STATUS</literal>
</para>
<para>
Filters images having a <literal>status</literal> attribute
matching <literal>STATUS</literal>
</para>
</listitem>
<listitem>
<para>
<literal>size_min: BYTES</literal>
</para>
<para>
Filters images having a <literal>size</literal> attribute
greater than or equal to <literal>BYTES</literal>
</para>
</listitem>
<listitem>
<para>
<literal>size_max: BYTES</literal>
</para>
<para>
Filters images having a <literal>size</literal> attribute less
than or equal to <literal>BYTES</literal>
</para>
</listitem>
</itemizedlist>
<para>
Here's a quick example that will return all images less than or
equal to 5G in size and in the `saving` status.
</para>
<para>
from glance.client import Client
</para>
<para>
c = Client("glance.example.com", 9292)
</para>
<para>
filters = {'status': 'saving', 'size_max': (5 * 1024 * 1024 *
1024)} print c.get_images_detailed(filters=filters)
</para>
</section>
<section xml:id="sorting-images-returned-via-get_images-and-get_images_detailed"><info><title>Sorting Images Returned via <literal>get_images()</literal>
and <literal>get_images_detailed()</literal></title></info>
<para>
Two parameters are available to sort the list of images returned
by these methods.
</para>
<itemizedlist>
<listitem>
<para>
<literal>sort_key: KEY</literal>
</para>
<para>
Images can be ordered by the image attribute
<literal>KEY</literal>. Acceptable values:
<literal>id</literal>, <literal>name</literal>,
<literal>status</literal>,
<literal>container_format</literal>,
<literal>disk_format</literal>, <literal>created_at</literal>
(default) and <literal>updated_at</literal>.
</para>
</listitem>
<listitem>
<para>
<literal>sort_dir: DIR</literal>
</para>
<para>
The direction of the sort may be defined by
<literal>DIR</literal>. Accepted values:
<literal>asc</literal> for ascending or
<literal>desc</literal> (default) for descending.
</para>
</listitem>
</itemizedlist>
<para>
The following example will return a list of images sorted
alphabetically by name in ascending order.
</para>
<para>
..code-block:: python
</para>
<para>
from glance.client import Client
</para>
<para>
c = Client("glance.example.com", 9292)
</para>
<para>
print c.get_images(sort_key='name', sort_dir='asc')
</para>
</section>
<section xml:id="requesting-detailed-metadata-on-a-specific-image"><info><title>Requesting Detailed Metadata on a Specific Image</title></info>
<para>
We want to see detailed information for a specific virtual machine
image that the Glance server knows about.
</para>
<para>
We have queried the Glance server for a list of public images and
the data returned includes the `uri` field for each available
image. This `uri` field value contains the exact location needed
to get the metadata for a specific image.
</para>
<para>
Continuing the example from above, in order to get metadata about
the first public image returned, we can use the following code
</para>
<para>
..code-block:: python
</para>
<para>
from glance.client import Client
</para>
<para>
c = Client("glance.example.com", 9292)
</para>
<para>
print
c.get_image_meta("<link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://glance.example.com/images/1">http://glance.example.com/images/1</link>")
</para>
</section>
<section xml:id="retrieving-a-virtual-machine-image"><info><title>Retrieving a Virtual Machine Image</title></info>
<para>
We want to retrieve that actual raw data for a specific virtual
machine image that the Glance server knows about.
</para>
<para>
We have queried the Glance server for a list of public images and
the data returned includes the `uri` field for each available
image. This `uri` field value contains the exact location needed
to get the metadata for a specific image.
</para>
<para>
Continuing the example from above, in order to get both the
metadata about the first public image returned and its image data,
we can use the following code
</para>
<para>
..code-block:: python
</para>
<para>
from glance.client import Client
</para>
<para>
c = Client("glance.example.com", 9292)
</para>
<para>
meta, image_file =
c.get_image("<link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://glance.example.com/images/1">http://glance.example.com/images/1</link>")
</para>
<para>
print meta
</para>
<para>
f = open('some_local_file', 'wb') for chunk in
image_&lt;link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="file:"&gt;file:&lt;/link&gt; f.write(chunk) f.close()
</para>
<note>
<para>The return from Client.get_image() is a tuple of (`metadata`, `file`)
where `metadata` is a mapping of metadata about the image and `file` is a
generator that yields chunks of image data.</para>
</note>
</section>
<section xml:id="adding-a-new-virtual-machine-image"><info><title>Adding a New Virtual Machine Image</title></info>
<para>
We have created a new virtual machine image in some way (created a
"golden image" or snapshotted/backed up an existing
image) and we wish to do two things:
</para>
<itemizedlist>
<listitem>
<para>
Store the disk image data in Glance
</para>
</listitem>
<listitem>
<para>
Store metadata about this image in Glance
</para>
</listitem>
</itemizedlist>
<para>
We can do the above two activities in a single call to the Glance
client. Assuming, like in the examples above, that a Glance API
server is running at `glance.example.com`, we issue a call to
`glance.client.Client.add_image`.
</para>
<para>
The method signature is as follows:
</para>
<screen>
glance.client.Client.add_image(image_meta, image_data=None)
</screen>
<para>
The `image_meta` argument is a mapping containing various image
metadata. The `image_data` argument is the disk image data and is
an optional argument.
</para>
<para>
The list of metadata that `image_meta` can contain are listed
below.
</para>
<itemizedlist>
<listitem>
<para>
`name`
</para>
<para>
This key/value is required. Its value should be the name of
the image.
</para>
<para>
Note that the name of an image <emphasis>is not unique to a
Glance node</emphasis>. It would be an unrealistic expectation
of users to know all the unique names of all other user's
images.
</para>
</listitem>
<listitem>
<para>
`id`
</para>
<para>
This key/value is optional.
</para>
<para>
When present, Glance will use the supplied identifier for the
image. If the identifier already exists in that Glance node,
then a `glance.common.exception.Duplicate` will be raised.
</para>
<para>
When this key/value is <emphasis>not</emphasis> present,
Glance will generate an identifier for the image and return
this identifier in the response (see below)
</para>
</listitem>
<listitem>
<para>
`store`
</para>
<para>
This key/value is optional. Valid values are one of `file`,
`s3` or `swift`
</para>
<para>
When present, Glance will attempt to store the disk image data
in the backing store indicated by the value. If the Glance
node does not support the backing store, Glance will raise a
`glance.common.exception.BadRequest`
</para>
<para>
When not present, Glance will store the disk image data in the
backing store that is marked default. See the configuration
option `default_store` for more information.
</para>
</listitem>
<listitem>
<para>
`type`
</para>
<para>
This key/values is required. Valid values are one of `kernel`,
`machine`, `raw`, or `ramdisk`.
</para>
</listitem>
<listitem>
<para>
`size`
</para>
<para>
This key/value is optional.
</para>
<para>
When present, Glance assumes that the expected size of the
request body will be the value. If the length in bytes of the
request body <emphasis>does not match</emphasis> the value,
Glance will raise a `glance.common.exception.BadRequest`
</para>
<para>
When not present, Glance will calculate the image's size based
on the size of the request body.
</para>
</listitem>
<listitem>
<para>
`is_public`
</para>
<para>
This key/value is optional.
</para>
<para>
When present, Glance converts the value to a boolean value, so
"on, 1, true" are all true values. When true, the
image is marked as a public image, meaning that any user may
view its metadata and may read the disk image from Glance.
</para>
<para>
When not present, the image is assumed to be <emphasis>not
public</emphasis> and specific to a user.
</para>
</listitem>
<listitem>
<para>
`properties`
</para>
<para>
This key/value is optional.
</para>
<para>
When present, the value is assumed to be a mapping of
free-form key/value attributes to store with the image.
</para>
<para>
For example, if the following is the value of the `properties`
key in the `image_meta` argument:
</para>
<screen>
{'distro': 'Ubuntu 10.10'}
</screen>
<para>
Then a key/value pair of "distro"/"Ubuntu
10.10" will be stored with the image in Glance.
</para>
<para>
There is no limit on the number of free-form key/value
attributes that can be attached to the image with
`properties`. However, keep in mind that there is a 8K limit
on the size of all HTTP headers sent in a request and this
number will effectively limit the number of image properties.
</para>
<para>
If the `image_data` argument is omitted, Glance will add the
`image_meta` mapping to its registries and return the
newly-registered image metadata, including the new image's
identifier. The `status` of the image will be set to the value
`queued`.
</para>
</listitem>
</itemizedlist>
<para>
As a complete example, the following code would add a new machine
image to Glance
</para>
<para>
from glance.client import Client
</para>
<para>
c = Client("glance.example.com", 9292)
</para>
<variablelist>
<varlistentry>
<term>
meta = {'name': 'Ubuntu 10.10 5G',
</term>
<listitem>
<para>
'type': 'machine', 'is_public': True, 'properties':
{'distro': 'Ubuntu 10.10'}}
</para>
</listitem>
</varlistentry>
</variablelist>
<para>
new_meta = c.add_image(meta, open('/path/to/image.tar.gz'))
</para>
<para>
print 'Stored image. Got identifier: %s' % new_meta['id']
</para>
</section>
<section xml:id="requesting-image-memberships"><info><title>Requesting Image Memberships</title></info>
<para>
We want to see a list of the other system tenants that may access
a given virtual machine image that the Glance server knows about.
</para>
<para>
Continuing from the example above, in order to get the memberships
for the image with ID 1, we can use the following code
</para>
<para>
from glance.client import Client
</para>
<para>
c = Client("glance.example.com", 9292)
</para>
<para>
members = c.get_image_members(1)
</para>
<note>
<para>The return from Client.get_image_members() is a list of dictionaries. Each
dictionary has a `member_id` key, mapping to the tenant the image is shared
with, and a `can_share` key, mapping to a boolean value that identifies
whether the member can further share the image.</para>
</note>
</section>
<section xml:id="requesting-member-images"><info><title>Requesting Member Images</title></info>
<para>
We want to see a list of the virtual machine images a given system
tenant may access.
</para>
<para>
Continuing from the example above, in order to get the images
shared with 'tenant1', we can use the following code
</para>
<para>
from glance.client import Client
</para>
<para>
c = Client("glance.example.com", 9292)
</para>
<para>
images = c.get_member_images('tenant1')
</para>
<note>
<para>The return from Client.get_member_images() is a list of dictionaries. Each
dictionary has an `image_id` key, mapping to an image shared with the member,
and a `can_share` key, mapping to a boolean value that identifies whether
the member can further share the image.</para>
</note>
</section>
<section xml:id="adding-a-member-to-an-image"><info><title>Adding a Member To an Image</title></info>
<para>
We want to authorize a tenant to access a private image.
</para>
<para>
Continuing from the example above, in order to share the image
with ID 1 with 'tenant1', and to allow 'tenant2' to not only
access the image but to also share it with other tenants, we can
use the following code
</para>
<para>
from glance.client import Client
</para>
<para>
c = Client("glance.example.com", 9292)
</para>
<para>
c.add_member(1, 'tenant1') c.add_member(1, 'tenant2', True)
</para>
<para>
..note:
</para>
<screen>
The Client.add_member() function takes one optional argument, the `can_share`
value. If one is not provided and the membership already exists, its current
`can_share` setting is left alone. If the membership does not already exist,
then the `can_share` setting will default to `False`, and the membership will
be created. In all other cases, existing memberships will be modified to use
the specified `can_share` setting, and new memberships will be created with
it. The return value of Client.add_member() is not significant.
</screen>
</section>
<section xml:id="removing-a-member-from-an-image"><info><title>Removing a Member From an Image</title></info>
<para>
We want to revoke a tenant's authorization to access a private
image.
</para>
<para>
Continuing from the example above, in order to revoke the access
of 'tenant1' to the image with ID 1, we can use the following code
</para>
<para>
from glance.client import Client
</para>
<para>
c = Client("glance.example.com", 9292)
</para>
<para>
c.delete_member(1, 'tenant1')
</para>
<para>
..note:
</para>
<screen>
The return value of Client.delete_member() is not significant.
</screen>
</section>
<section xml:id="replacing-a-membership-list-for-an-image"><info><title>Replacing a Membership List For an Image</title></info>
<para>
All existing image memberships may be revoked and replaced in a
single operation.
</para>
<para>
Continuing from the example above, in order to replace the
membership list of the image with ID 1 with two entries--the first
allowing 'tenant1' to access the image, and the second allowing
'tenant2' to access and further share the image, we can use the
following code
</para>
<para>
from glance.client import Client
</para>
<para>
c = Client("glance.example.com", 9292)
</para>
<variablelist>
<varlistentry>
<term>
c.replace_members(1, {'member_id': 'tenant1', 'can_share':
False},
</term>
<listitem>
<para>
{'member_id': 'tenant2', 'can_share': True})
</para>
</listitem>
</varlistentry>
</variablelist>
<note>
<para>The first argument to Client.replace_members() is the opaque identifier of
the image; the remaining arguments are dictionaries with the keys
`member_id` (mapping to a tenant name) and `can_share`. Note that
`can_share` may be omitted, in which case any existing membership for the
specified member will be preserved through the replace operation.
</para>
<para>The return value of Client.replace_members() is not significant.</para>
</note>
</section>
</chapter>

View File

@ -0,0 +1,110 @@
<?xml version="1.0"?>
<!-- Converted by db4-upgrade version 1.0 -->
<section xmlns="http://docbook.org/ns/docbook" version="5.0-extension RaxBook-1.0" xml:id="getting-involved"><info><title>Getting Involved</title></info>
<para>
The Glance community is a very friendly group and there are places
online to join in with the community. Feel free to ask questions.
This document points you to some of the places where you can
communicate with people.
</para>
<section xml:id="how-to-join-the-openstack-community"><info><title>How to Join the OpenStack Community</title></info>
<para>
Our community welcomes all people interested in open source cloud
computing, and there are no formal membership requirements. The
best way to join the community is to talk with others online or at
a meetup and offer contributions through Launchpad, the wiki, or
blogs. We welcome all types of contributions, from blueprint
designs to documentation to testing to deployment scripts.
</para>
</section>
<section xml:id="contributing-code"><info><title>Contributing Code</title></info>
<para>
To contribute code, sign up for a Launchpad account and sign a
contributor license agreement, available on the
<uri xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://wiki.openstack.org/CLA">http://wiki.openstack.org/CLA</uri>. Once the CLA
is signed you can contribute code through the Bazaar version
control system which is related to your Launchpad account.
</para>
</section>
<section xml:id="openstack-on-freenode-irc-network"><info><title>#openstack on Freenode IRC Network</title></info>
<para>
There is a very active chat channel at
<uri xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="irc://freenode.net/#openstack">irc://freenode.net/#openstack</uri>. This is
usually the best place to ask questions and find your way around.
IRC stands for Internet Relay Chat and it is a way to chat online
in real time. You can also ask a question and come back to the log
files to read the answer later. Logs for the #openstack IRC
channel are stored at
<uri xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://eavesdrop.openstack.org/irclogs/">http://eavesdrop.openstack.org/irclogs/</uri>.
</para>
</section>
<section xml:id="openstack-wiki"><info><title>OpenStack Wiki</title></info>
<para>
The wiki is a living source of knowledge. It is edited by the
community, and has collections of links and other sources of
information. Typically the pages are a good place to write drafts
for specs or documentation, describe a blueprint, or collaborate
with others.
</para>
<para>
<link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://wiki.openstack.org/">OpenStack Wiki</link>
</para>
</section>
<section xml:id="glance-on-launchpad"><info><title>Glance on Launchpad</title></info>
<para>
Launchpad is a code hosting service that hosts the Glance source
code. From Launchpad you can report bugs, ask questions, and
register blueprints (feature requests).
</para>
<itemizedlist>
<listitem>
<para>
<link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://wiki.openstack.org/LifeWithBzrAndLaunchpad">Learn
about how to use bzr with launchpad</link>
</para>
</listitem>
<listitem>
<para>
<link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://launchpad.net/glance">Launchpad Glance
Page</link>
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="openstack-blog"><info><title>OpenStack Blog</title></info>
<para>
The OpenStack blog includes a weekly newsletter that aggregates
OpenStack news from around the internet, as well as providing
inside information on upcoming events and posts from OpenStack
contributors.
</para>
<para>
<link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://openstack.org/blog">OpenStack Blog</link>
</para>
<para>
See also: <link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://planet.openstack.org/">Planet
OpenStack</link>, aggregating blogs about OpenStack from around
the internet into a single feed. If you'd like to contribute to
this blog aggregation with your blog posts, there are instructions
for <link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://wiki.openstack.org/AddingYourBlog">adding
your blog</link>.
</para>
</section>
<section xml:id="twitter"><info><title>Twitter</title></info>
<para>
Because all the cool kids do it:
<link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://twitter.com/openstack">@openstack</link>. Also
follow the
<link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://search.twitter.com/search?q=%23openstack">#openstack</link>
tag for relevant tweets.
</para>
</section>
</section>

View File

@ -0,0 +1,744 @@
<?xml version="1.0"?>
<!-- Converted by db4-upgrade version 1.0 -->
<chapter xmlns="http://docbook.org/ns/docbook" version="5.0-extension RaxBook-1.0" xml:id="configuring-glance"><info><title>Configuring Glance</title></info>
<para>
Glance has a number of options that you can use to configure the
Glance API server, the Glance Registry server, and the various
storage backends that Glance can use to store images.
</para>
<para>
Most configuration is done via configuration files, with the Glance
API server and Glance Registry server using separate configuration
files.
</para>
<para>
When starting up a Glance server, you can specify the configuration
file to use (see <link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="controllingservers">the documentation
on controller Glance servers</link>). If you do
<emphasis role="strong">not</emphasis> specify a configuration file,
Glance will look in the following directories for a configuration
file, in order:
</para>
<itemizedlist>
<listitem>
<para>
<literal>~/.glance</literal>
</para>
</listitem>
<listitem>
<para>
<literal>~/</literal>
</para>
</listitem>
<listitem>
<para>
<literal>/etc/glance</literal>
</para>
</listitem>
<listitem>
<para>
<literal>/etc</literal>
</para>
</listitem>
</itemizedlist>
<para>
The Glance API server configuration file should be named
<literal>glance-api.conf</literal>. Similarly, the Glance Registry
server configuration file should be named
<literal>glance-registry.conf</literal>. If you installed Glance via
your operating system's package management system, it is likely that
you will have sample configuration files installed in
<literal>/etc/glance</literal>.
</para>
<para>
In addition to this documentation page, you can check the
<literal>etc/glance-api.conf</literal> and
<literal>etc/glance-registry.conf</literal> sample configuration
files distributed with Glance for example configuration files for
each server application with detailed comments on what each options
does.
</para>
<section xml:id="common-configuration-options-in-glance"><info><title>Common Configuration Options in Glance</title></info>
<para>
Glance has a few command-line options that are common to all
Glance programs:
</para>
<itemizedlist>
<listitem>
<para>
<literal>--verbose</literal>
</para>
</listitem>
</itemizedlist>
<para>
Optional. Default: <literal>False</literal>
</para>
<para>
Can be specified on the command line and in configuration files.
</para>
<para>
Turns on the INFO level in logging and prints more verbose
command-line interface printouts.
</para>
<itemizedlist>
<listitem>
<para>
<literal>--debug</literal>
</para>
</listitem>
</itemizedlist>
<para>
Optional. Default: <literal>False</literal>
</para>
<para>
Can be specified on the command line and in configuration files.
</para>
<para>
Turns on the DEBUG level in logging.
</para>
<itemizedlist>
<listitem>
<para>
<literal>--config-file=PATH</literal>
</para>
</listitem>
</itemizedlist>
<para>
Optional. Default: <literal>None</literal>
</para>
<para>
Specified on the command line only.
</para>
<para>
Takes a path to a configuration file to use when running the
program. If this CLI option is not specified, then we check to see
if the first argument is a file. If it is, then we try to use that
as the configuration file. If there is no file or there were no
arguments, we search for a configuration file in the following
order:
</para>
<itemizedlist>
<listitem>
<para>
<literal>~/.glance</literal>
</para>
</listitem>
<listitem>
<para>
<literal>~/</literal>
</para>
</listitem>
<listitem>
<para>
<literal>/etc/glance</literal>
</para>
</listitem>
<listitem>
<para>
<literal>/etc</literal>
</para>
</listitem>
</itemizedlist>
<para>
The filename that is searched for depends on the server
application name. So, if you are starting up the API server,
<literal>glance-api.conf</literal> is searched for, otherwise
<literal>glance-registry.conf</literal>.
</para>
</section>
<section xml:id="configuring-logging-in-glance"><info><title>Configuring Logging in Glance</title></info>
<para>
There are a number of configuration options in Glance that control
how Glance servers log messages.
</para>
<itemizedlist>
<listitem>
<para>
<literal>--log-config=PATH</literal>
</para>
</listitem>
</itemizedlist>
<para>
Optional. Default: <literal>None</literal>
</para>
<para>
Specified on the command line only.
</para>
<para>
Takes a path to a configuration file to use for configuring
logging.
</para>
<section xml:id="logging-options-available-only-in-configuration-files"><info><title>Logging Options Available Only in Configuration
Files</title></info>
<para>
You will want to place the different logging options in the
<emphasis role="strong">[DEFAULT]</emphasis> section in your
application configuration file. As an example, you might do the
following for the API server, in a configuration file called
<literal>etc/glance-api.conf</literal>:
</para>
<screen>
[DEFAULT]
log_file = /var/log/glance/api.log
</screen>
<itemizedlist>
<listitem>
<para>
<literal>log_file</literal>
</para>
</listitem>
</itemizedlist>
<para>
The filepath of the file to use for logging messages from
Glance's servers. If missing, the default is to output messages
to <literal>stdout</literal>, so if you are running Glance
servers in a daemon mode (using
<literal>glance-control</literal>) you should make sure that the
<literal>log_file</literal> option is set appropriately.
</para>
<itemizedlist>
<listitem>
<para>
<literal>log_dir</literal>
</para>
</listitem>
</itemizedlist>
<para>
The filepath of the directory to use for log files. If not
specified (the default) the <literal>log_file</literal> is used
as an absolute filepath.
</para>
<itemizedlist>
<listitem>
<para>
<literal>log_date_format</literal>
</para>
</listitem>
</itemizedlist>
<para>
The format string for timestamps in the log output.
</para>
<para>
Defaults to <literal>%Y-%m-%d %H:%M:%S</literal>. See the
<link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://docs.python.org/library/logging.html">logging
module</link> documentation for more information on setting
this format string.
</para>
</section>
</section>
<section xml:id="configuring-glance-storage-backends"><info><title>Configuring Glance Storage Backends</title></info>
<para>
There are a number of configuration options in Glance that control
how Glance stores disk images. These configuration options are
specified in the <literal>glance-api.conf</literal> config file in
the section <literal>[DEFAULT]</literal>.
</para>
<itemizedlist>
<listitem>
<para>
<literal>default_store=STORE</literal>
</para>
</listitem>
</itemizedlist>
<para>
Optional. Default: <literal>file</literal>
</para>
<para>
Can only be specified in configuration files.
</para>
<para>
Sets the storage backend to use by default when storing images in
Glance. Available options for this option are
(<literal>file</literal>, <literal>swift</literal>, or
<literal>s3</literal>).
</para>
<section xml:id="configuring-the-filesystem-storage-backend"><info><title>Configuring the Filesystem Storage Backend</title></info>
<itemizedlist>
<listitem>
<para>
<literal>filesystem_store_datadir=PATH</literal>
</para>
</listitem>
</itemizedlist>
<para>
Optional. Default: <literal>/var/lib/glance/images/</literal>
</para>
<para>
Can only be specified in configuration files.
</para>
<para>
`This option is specific to the filesystem storage backend.`
</para>
<para>
Sets the path where the filesystem storage backend write disk
images. Note that the filesystem storage backend will attempt to
create this directory if it does not exist. Ensure that the user
that <literal>glance-api</literal> runs under has write
permissions to this directory.
</para>
</section>
<section xml:id="configuring-the-swift-storage-backend"><info><title>Configuring the Swift Storage Backend</title></info>
<itemizedlist>
<listitem>
<para>
<literal>swift_store_auth_address=URL</literal>
</para>
</listitem>
</itemizedlist>
<para>
Required when using the Swift storage backend.
</para>
<para>
Can only be specified in configuration files.
</para>
<para>
`This option is specific to the Swift storage backend.`
</para>
<para>
Sets the authentication URL supplied to Swift when making calls
to its storage system. For more information about the Swift
authentication system, please see the
<link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://swift.openstack.org/overview_auth.html">Swift
auth</link> documentation and the
<link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://docs.openstack.org/openstack-object-storage/admin/content/ch02s02.html">overview
of Swift authentication</link>.
</para>
<itemizedlist>
<listitem>
<para>
<literal>swift_store_user=USER</literal>
</para>
</listitem>
</itemizedlist>
<para>
Required when using the Swift storage backend.
</para>
<para>
Can only be specified in configuration files.
</para>
<para>
`This option is specific to the Swift storage backend.`
</para>
<para>
Sets the user to authenticate against the
<literal>swift_store_auth_address</literal> with.
</para>
<itemizedlist>
<listitem>
<para>
<literal>swift_store_key=KEY</literal>
</para>
</listitem>
</itemizedlist>
<para>
Required when using the Swift storage backend.
</para>
<para>
Can only be specified in configuration files.
</para>
<para>
`This option is specific to the Swift storage backend.`
</para>
<para>
Sets the authentication key to authenticate against the
<literal>swift_store_auth_address</literal> with for the user
<literal>swift_store_user</literal>.
</para>
<itemizedlist>
<listitem>
<para>
<literal>swift_store_container=CONTAINER</literal>
</para>
</listitem>
</itemizedlist>
<para>
Optional. Default: <literal>glance</literal>
</para>
<para>
Can only be specified in configuration files.
</para>
<para>
`This option is specific to the Swift storage backend.`
</para>
<para>
Sets the name of the container to use for Glance images in
Swift.
</para>
<itemizedlist>
<listitem>
<para>
<literal>swift_store_create_container_on_put</literal>
</para>
</listitem>
</itemizedlist>
<para>
Optional. Default: <literal>False</literal>
</para>
<para>
Can only be specified in configuration files.
</para>
<para>
`This option is specific to the Swift storage backend.`
</para>
<para>
If true, Glance will attempt to create the container
<literal>swift_store_container</literal> if it does not exist.
</para>
<itemizedlist>
<listitem>
<para>
<literal>swift_store_large_object_size=SIZE_IN_MB</literal>
</para>
</listitem>
</itemizedlist>
<para>
Optional. Default: <literal>5120</literal>
</para>
<para>
Can only be specified in configuration files.
</para>
<para>
`This option is specific to the Swift storage backend.`
</para>
<para>
What size, in MB, should Glance start chunking image files and
do a large object manifest in Swift? By default, this is the
maximum object size in Swift, which is 5GB
</para>
<itemizedlist>
<listitem>
<para>
<literal>swift_store_large_object_chunk_size=SIZE_IN_MB</literal>
</para>
</listitem>
</itemizedlist>
<para>
Optional. Default: <literal>200</literal>
</para>
<para>
Can only be specified in configuration files.
</para>
<para>
`This option is specific to the Swift storage backend.`
</para>
<para>
When doing a large object manifest, what size, in MB, should
Glance write chunks to Swift? This amount of data is written to
a temporary disk buffer during the process of chunking the image
file, and the default is 200MB
</para>
</section>
<section xml:id="configuring-the-s3-storage-backend"><info><title>Configuring the S3 Storage Backend</title></info>
<itemizedlist>
<listitem>
<para>
<literal>s3_store_host=URL</literal>
</para>
</listitem>
</itemizedlist>
<para>
Required when using the S3 storage backend.
</para>
<para>
Can only be specified in configuration files.
</para>
<para>
`This option is specific to the S3 storage backend.`
</para>
<para>
Default: s3.amazonaws.com
</para>
<para>
Sets the main service URL supplied to S3 when making calls to
its storage system. For more information about the S3
authentication system, please see the
<link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://aws.amazon.com/documentation/s3/">S3
documentation</link>
</para>
<itemizedlist>
<listitem>
<para>
<literal>s3_store_access_key=ACCESS_KEY</literal>
</para>
</listitem>
</itemizedlist>
<para>
Required when using the S3 storage backend.
</para>
<para>
Can only be specified in configuration files.
</para>
<para>
`This option is specific to the S3 storage backend.`
</para>
<para>
Sets the access key to authenticate against the
<literal>s3_store_host</literal> with.
</para>
<para>
You should set this to your 20-character Amazon AWS access key.
</para>
<itemizedlist>
<listitem>
<para>
<literal>s3_store_secret_key=SECRET_KEY</literal>
</para>
</listitem>
</itemizedlist>
<para>
Required when using the S3 storage backend.
</para>
<para>
Can only be specified in configuration files.
</para>
<para>
`This option is specific to the S3 storage backend.`
</para>
<para>
Sets the secret key to authenticate against the
<literal>s3_store_host</literal> with for the access key
<literal>s3_store_access_key</literal>.
</para>
<para>
You should set this to your 40-character Amazon AWS secret key.
</para>
<itemizedlist>
<listitem>
<para>
<literal>s3_store_bucket=BUCKET</literal>
</para>
</listitem>
</itemizedlist>
<para>
Required when using the S3 storage backend.
</para>
<para>
Can only be specified in configuration files.
</para>
<para>
`This option is specific to the S3 storage backend.`
</para>
<para>
Sets the name of the bucket to use for Glance images in S3.
</para>
<para>
Note that the namespace for S3 buckets is
<emphasis role="strong">global</emphasis>, and therefore you
must use a name for the bucket that is unique. It is recommended
that you use a combination of your AWS access key,
<emphasis role="strong">lowercased</emphasis> with
"glance".
</para>
<para>
For instance if your Amazon AWS access key is:
</para>
<para>
<literal>ABCDEFGHIJKLMNOPQRST</literal>
</para>
<para>
then make your bucket value be:
</para>
<para>
<literal>abcdefghijklmnopqrstglance</literal>
</para>
<itemizedlist>
<listitem>
<para>
<literal>s3_store_create_bucket_on_put</literal>
</para>
</listitem>
</itemizedlist>
<para>
Optional. Default: <literal>False</literal>
</para>
<para>
Can only be specified in configuration files.
</para>
<para>
`This option is specific to the S3 storage backend.`
</para>
<para>
If true, Glance will attempt to create the bucket
<literal>s3_store_bucket</literal> if it does not exist.
</para>
</section>
</section>
<section xml:id="configuring-the-glance-registry"><info><title>Configuring the Glance Registry</title></info>
<para>
Glance ships with a default, reference implementation registry
server. There are a number of configuration options in Glance that
control how this registry server operates. These configuration
options are specified in the
<literal>glance-registry.conf</literal> config file in the section
<literal>[DEFAULT]</literal>.
</para>
<itemizedlist>
<listitem>
<para>
<literal>sql_connection=CONNECTION_STRING</literal>
(<literal>--sql-connection</literal> when specified on command
line)
</para>
</listitem>
</itemizedlist>
<para>
Optional. Default: <literal>None</literal>
</para>
<para>
Can be specified in configuration files. Can also be specified on
the command-line for the <literal>glance-manage</literal> program.
</para>
<para>
Sets the SQLAlchemy connection string to use when connecting to
the registry database. Please see the documentation for
<link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html">SQLAlchemy
connection strings</link> online.
</para>
<itemizedlist>
<listitem>
<para>
<literal>sql_timeout=SECONDS</literal> on command line)
</para>
</listitem>
</itemizedlist>
<para>
Optional. Default: <literal>3600</literal>
</para>
<para>
Can only be specified in configuration files.
</para>
<para>
Sets the number of seconds after which SQLAlchemy should reconnect
to the datastore if no activity has been made on the connection.
</para>
</section>
<section xml:id="configuring-notifications"><info><title>Configuring Notifications</title></info>
<para>
Glance can optionally generate notifications to be logged or sent
to a RabbitMQ queue. The configuration options are specified in
the <literal>glance-api.conf</literal> config file in the section
<literal>[DEFAULT]</literal>.
</para>
<itemizedlist>
<listitem>
<para>
<literal>notifier_strategy</literal>
</para>
</listitem>
</itemizedlist>
<para>
Optional. Default: <literal>noop</literal>
</para>
<para>
Sets the strategy used for notifications. Options are
<literal>logging</literal>, <literal>rabbit</literal> and
<literal>noop</literal>.
</para>
<itemizedlist>
<listitem>
<para>
<literal>rabbit_host</literal>
</para>
</listitem>
</itemizedlist>
<para>
Optional. Default: <literal>localhost</literal>
</para>
<para>
Host to connect to when using <literal>rabbit</literal> strategy.
</para>
<itemizedlist>
<listitem>
<para>
<literal>rabbit_port</literal>
</para>
</listitem>
</itemizedlist>
<para>
Optional. Default: <literal>5672</literal>
</para>
<para>
Port to connect to when using <literal>rabbit</literal> strategy.
</para>
<itemizedlist>
<listitem>
<para>
<literal>rabbit_use_ssl</literal>
</para>
</listitem>
</itemizedlist>
<para>
Optional. Default: <literal>false</literal>
</para>
<para>
Boolean to use SSL for connecting when using
<literal>rabbit</literal> strategy.
</para>
<itemizedlist>
<listitem>
<para>
<literal>rabbit_userid</literal>
</para>
</listitem>
</itemizedlist>
<para>
Optional. Default: <literal>guest</literal>
</para>
<para>
Userid to use for connection when using <literal>rabbit</literal>
strategy.
</para>
<itemizedlist>
<listitem>
<para>
<literal>rabbit_password</literal>
</para>
</listitem>
</itemizedlist>
<para>
Optional. Default: <literal>guest</literal>
</para>
<para>
Password to use for connection when using
<literal>rabbit</literal> strategy.
</para>
<itemizedlist>
<listitem>
<para>
<literal>rabbit_virtual_host</literal>
</para>
</listitem>
</itemizedlist>
<para>
Optional. Default: <literal>/</literal>
</para>
<para>
Virtual host to use for connection when using
<literal>rabbit</literal> strategy.
</para>
<itemizedlist>
<listitem>
<para>
<literal>rabbit_notification_topic</literal>
</para>
</listitem>
</itemizedlist>
<para>
Optional. Default: <literal>glance_notifications</literal>
</para>
<para>
Topic to use for connection when using <literal>rabbit</literal>
strategy.
</para>
</section>
</chapter>

View File

@ -0,0 +1,277 @@
<?xml version="1.0"?>
<!-- Converted by db4-upgrade version 1.0 -->
<chapter xmlns="http://docbook.org/ns/docbook" version="5.0-extension RaxBook-1.0" xml:id="controlling-glance-servers"><info><title>Controlling Glance Servers</title></info>
<para>
This section describes the ways to start, stop, and reload Glance's
server programs.
</para>
<section xml:id="starting-a-server"><info><title>Starting a server</title></info>
<para>
There are two ways to start a Glance server (either the API server
or the reference implementation registry server that ships with
Glance):
</para>
<itemizedlist>
<listitem>
<para>
Manually calling the server program
</para>
</listitem>
<listitem>
<para>
Using the <literal>glance-control</literal> server daemon
wrapper program
</para>
</listitem>
</itemizedlist>
<para>
We recommend using the second way.
</para>
<section xml:id="manually-starting-the-server"><info><title>Manually starting the server</title></info>
<para>
The first is by directly calling the server program, passing in
command-line options and a single argument for a
<literal>paste.deploy</literal> configuration file to use when
configuring the server application.
</para>
<note>
<para>Glance ships with an ``etc/`` directory that contains sample ``paste.deploy``
configuration files that you can copy to a standard configuration directory and
adapt for your own uses. Specifically, bind_host must be set properly.</para>
</note>
<para>
If you do `not` specify a configuration file on the command
line, Glance will do its best to locate a configuration file in
one of the following directories, stopping at the first config
file it finds:
</para>
<itemizedlist>
<listitem>
<para>
<literal>$CWD</literal>
</para>
</listitem>
<listitem>
<para>
<literal>~/.glance</literal>
</para>
</listitem>
<listitem>
<para>
<literal>~/</literal>
</para>
</listitem>
<listitem>
<para>
<literal>/etc/glance</literal>
</para>
</listitem>
<listitem>
<para>
<literal>/etc</literal>
</para>
</listitem>
</itemizedlist>
<para>
The filename that is searched for depends on the server
application name. So, if you are starting up the API server,
<literal>glance-api.conf</literal> is searched for, otherwise
<literal>glance-registry.conf</literal>.
</para>
<para>
If no configuration file is found, you will see an error, like:
</para>
<screen>
$&gt; glance-api
ERROR: Unable to locate any configuration file. Cannot load application glance-api
</screen>
<para>
Here is an example showing how you can manually start the
<literal>glance-api</literal> server and
<literal>glance-registry</literal> in a shell.:
</para>
<screen>
$ sudo glance-api glance-api.conf --debug &amp;
jsuh@mc-ats1:~$ 2011-04-13 14:50:12 DEBUG [glance-api] ********************************************************************************
2011-04-13 14:50:12 DEBUG [glance-api] Configuration options gathered from config file:
2011-04-13 14:50:12 DEBUG [glance-api] /home/jsuh/glance-api.conf
2011-04-13 14:50:12 DEBUG [glance-api] ================================================
2011-04-13 14:50:12 DEBUG [glance-api] bind_host 65.114.169.29
2011-04-13 14:50:12 DEBUG [glance-api] bind_port 9292
2011-04-13 14:50:12 DEBUG [glance-api] debug True
2011-04-13 14:50:12 DEBUG [glance-api] default_store file
2011-04-13 14:50:12 DEBUG [glance-api] filesystem_store_datadir /home/jsuh/images/
2011-04-13 14:50:12 DEBUG [glance-api] registry_host 65.114.169.29
2011-04-13 14:50:12 DEBUG [glance-api] registry_port 9191
2011-04-13 14:50:12 DEBUG [glance-api] verbose False
2011-04-13 14:50:12 DEBUG [glance-api] ********************************************************************************
2011-04-13 14:50:12 DEBUG [routes.middleware] Initialized with method overriding = True, and path info altering = True
2011-04-13 14:50:12 DEBUG [eventlet.wsgi.server] (21354) wsgi starting up on http://65.114.169.29:9292/
$ sudo glance-registry glance-registry.conf &amp;
jsuh@mc-ats1:~$ 2011-04-13 14:51:16 INFO [sqlalchemy.engine.base.Engine.0x...feac] PRAGMA table_info("images")
2011-04-13 14:51:16 INFO [sqlalchemy.engine.base.Engine.0x...feac] ()
2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Col ('cid', 'name', 'type', 'notnull', 'dflt_value', 'pk')
2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (0, u'created_at', u'DATETIME', 1, None, 0)
2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (1, u'updated_at', u'DATETIME', 0, None, 0)
2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (2, u'deleted_at', u'DATETIME', 0, None, 0)
2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (3, u'deleted', u'BOOLEAN', 1, None, 0)
2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (4, u'id', u'INTEGER', 1, None, 1)
2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (5, u'name', u'VARCHAR(255)', 0, None, 0)
2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (6, u'disk_format', u'VARCHAR(20)', 0, None, 0)
2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (7, u'container_format', u'VARCHAR(20)', 0, None, 0)
2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (8, u'size', u'INTEGER', 0, None, 0)
2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (9, u'status', u'VARCHAR(30)', 1, None, 0)
2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (10, u'is_public', u'BOOLEAN', 1, None, 0)
2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (11, u'location', u'TEXT', 0, None, 0)
2011-04-13 14:51:16 INFO [sqlalchemy.engine.base.Engine.0x...feac] PRAGMA table_info("image_properties")
2011-04-13 14:51:16 INFO [sqlalchemy.engine.base.Engine.0x...feac] ()
2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Col ('cid', 'name', 'type', 'notnull', 'dflt_value', 'pk')
2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (0, u'created_at', u'DATETIME', 1, None, 0)
2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (1, u'updated_at', u'DATETIME', 0, None, 0)
2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (2, u'deleted_at', u'DATETIME', 0, None, 0)
2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (3, u'deleted', u'BOOLEAN', 1, None, 0)
2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (4, u'id', u'INTEGER', 1, None, 1)
2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (5, u'image_id', u'INTEGER', 1, None, 0)
2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (6, u'key', u'VARCHAR(255)', 1, None, 0)
2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (7, u'value', u'TEXT', 0, None, 0)
$ ps aux | grep glance
root 20009 0.7 0.1 12744 9148 pts/1 S 12:47 0:00 /usr/bin/python /usr/bin/glance-api glance-api.conf --debug
root 20012 2.0 0.1 25188 13356 pts/1 S 12:47 0:00 /usr/bin/python /usr/bin/glance-registry glance-registry.conf
jsuh 20017 0.0 0.0 3368 744 pts/1 S+ 12:47 0:00 grep glance
</screen>
<para>
Simply supply the configuration file as the first argument (the
<literal>etc/glance-api.conf</literal> and
<literal>etc/glance-registry.conf</literal> sample configuration
files were used in the above example) and then any common
options you want to use (<literal>--debug</literal> was used
above to show some of the debugging output that the server shows
when starting up. Call the server program with
<literal>--help</literal> to see all available options you can
specify on the command line.)
</para>
<para>
For more information on configuring the server via the
<literal>paste.deploy</literal> configuration files, see the
section entitled Configuring Glance servers.
</para>
<para>
Note that the server `daemonizes` itself by using the standard
shell backgrounding indicator, <literal>&amp;</literal>, in the
previous example. For most use cases, we recommend using the
<literal>glance-control</literal> server daemon wrapper for
daemonizing. See below for more details on daemonization with
<literal>glance-control</literal>.
</para>
</section>
<section xml:id="using-the-glance-control-program-to-start-the-server"><info><title>Using the <literal>glance-control</literal> program to
start the server</title></info>
<para>
The second way to start up a Glance server is to use the
<literal>glance-control</literal> program.
<literal>glance-control</literal> is a wrapper script that
allows the user to start, stop, restart, and reload the other
Glance server programs in a fashion that is more conducive to
automation and scripting.
</para>
<para>
Servers started via the <literal>glance-control</literal>
program are always `daemonized`, meaning that the server program
process runs in the background.
</para>
<para>
To start a Glance server with <literal>glance-control</literal>,
simply call <literal>glance-control</literal> with a server and
the word "start", followed by any command-line options
you wish to provide. Start the server with
<literal>glance-control</literal> in the following way:
</para>
<screen>
$&gt; sudo glance-control &lt;SERVER&gt; start [CONFPATH]
</screen>
<para>
..note:
</para>
<screen>
You must use the ``sudo`` program to run ``glance-control`` currently, as the
pid files for the server programs are written to /var/run/glance/
</screen>
<para>
Here is an example that shows how to start the
<literal>glance-registry</literal> server with the
<literal>glance-control</literal> wrapper script.
</para>
<para>
::
</para>
<para>
$ sudo glance-control api start glance-api.conf Starting
glance-api with /home/jsuh/glance.conf
</para>
<para>
$ sudo glance-control registry start glance-registry.conf
Starting glance-registry with /home/jsuh/glance.conf
</para>
<para>
$ ps aux | grep glance root 20038 4.0 0.1 12728 9116 ? Ss
12:51 0:00 /usr/bin/python /usr/bin/glance-api
/home/jsuh/glance-api.conf root 20039 6.0 0.1 25188 13356 ? Ss
12:51 0:00 /usr/bin/python /usr/bin/glance-registry
/home/jsuh/glance-registry.conf jsuh 20042 0.0 0.0 3368 744
pts/1 S+ 12:51 0:00 grep glance
</para>
<para>
The same <literal>paste.deploy</literal> configuration files are
used by <literal>glance-control</literal> to start the Glance
server programs, and you can specify (as the example above
shows) a configuration file when starting the server.
</para>
</section>
</section>
<section xml:id="stopping-a-server"><info><title>Stopping a server</title></info>
<para>
If you started a Glance server manually and did not use the
<literal>&amp;</literal> backgrounding function, simply send a
terminate signal to the server process by typing
<literal>Ctrl-C</literal>
</para>
<para>
If you started the Glance server using the
<literal>glance-control</literal> program, you can use the
<literal>glance-control</literal> program to stop it. Simply do
the following:
</para>
<screen>
$&gt; sudo glance-control &lt;SERVER&gt; stop
</screen>
<para>
as this example shows:
</para>
<screen>
$&gt; sudo glance-control registry stop
Stopping glance-registry pid: 17602 signal: 15
</screen>
</section>
<section xml:id="restarting-a-server"><info><title>Restarting a server</title></info>
<para>
You can restart a server with the
<literal>glance-control</literal> program, as demonstrated here:
</para>
<screen>
$&gt; sudo glance-control registry restart etc/glance-registry.conf
Stopping glance-registry pid: 17611 signal: 15
Starting glance-registry with /home/jpipes/repos/glance/trunk/etc/glance-registry.conf
</screen>
</section>
</chapter>

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

View File

@ -0,0 +1,171 @@
<?xml version="1.0"?>
<!-- Converted by db4-upgrade version 1.0 -->
<chapter xmlns="http://docbook.org/ns/docbook" version="5.0-extension RaxBook-1.0" xml:id="disk-and-container-formats"><info><title>Disk and Container Formats</title></info>
<para>
When adding an image to Glance, you are may specify what the virtual
machine image's <emphasis>disk format</emphasis> and
<emphasis>container format</emphasis> are.
</para>
<para>
This document explains exactly what these formats are.
</para>
<section xml:id="disk-format"><info><title>Disk Format</title></info>
<para>
The disk format of a virtual machine image is the format of the
underlying disk image. Virtual appliance vendors have different
formats for laying out the information contained in a virtual
machine disk image.
</para>
<para>
You can set your image's container format to one of the following:
</para>
<itemizedlist>
<listitem>
<para>
<emphasis role="strong">raw</emphasis>
</para>
<para>
This is an unstructured disk image format
</para>
</listitem>
<listitem>
<para>
<emphasis role="strong">vhd</emphasis>
</para>
<para>
This is the VHD disk format, a common disk format used by
virtual machine monitors from VMWare, Xen, Microsoft,
VirtualBox, and others
</para>
</listitem>
<listitem>
<para>
<emphasis role="strong">vmdk</emphasis>
</para>
<para>
Another common disk format supported by many common virtual
machine monitors
</para>
</listitem>
<listitem>
<para>
<emphasis role="strong">vdi</emphasis>
</para>
<para>
A disk format supported by VirtualBox virtual machine monitor
and the QEMU emulator
</para>
</listitem>
<listitem>
<para>
<emphasis role="strong">iso</emphasis>
</para>
<para>
An archive format for the data contents of an optical disc
(e.g. CDROM).
</para>
</listitem>
<listitem>
<para>
<emphasis role="strong">qcow2</emphasis>
</para>
<para>
A disk format supported by the QEMU emulator that can expand
dynamically and supports Copy on Write
</para>
</listitem>
<listitem>
<para>
<emphasis role="strong">aki</emphasis>
</para>
<para>
This indicates what is stored in Glance is an Amazon kernel
image
</para>
</listitem>
<listitem>
<para>
<emphasis role="strong">ari</emphasis>
</para>
<para>
This indicates what is stored in Glance is an Amazon ramdisk
image
</para>
</listitem>
<listitem>
<para>
<emphasis role="strong">ami</emphasis>
</para>
<para>
This indicates what is stored in Glance is an Amazon machine
image
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="container-format"><info><title>Container Format</title></info>
<para>
The container format refers to whether the virtual machine image
is in a file format that also contains metadata about the actual
virtual machine.
</para>
<para>
There are two main types of container formats: OVF and Amazon's
AMI. In addition, a virtual machine image may have no container
format at all --basically, it's just a blob of unstructured
data...
</para>
<para>
You can set your image's container format to one of the following:
</para>
<itemizedlist>
<listitem>
<para>
<emphasis role="strong">ovf</emphasis>
</para>
<para>
This is the OVF container format
</para>
</listitem>
<listitem>
<para>
<emphasis role="strong">bare</emphasis>
</para>
<para>
This indicates there is no container or metadata envelope for
the image
</para>
</listitem>
<listitem>
<para>
<emphasis role="strong">aki</emphasis>
</para>
<para>
This indicates what is stored in Glance is an Amazon kernel
image
</para>
</listitem>
<listitem>
<para>
<emphasis role="strong">ari</emphasis>
</para>
<para>
This indicates what is stored in Glance is an Amazon ramdisk
image
</para>
</listitem>
<listitem>
<para>
<emphasis role="strong">ami</emphasis>
</para>
<para>
This indicates what is stored in Glance is an Amazon machine
image
</para>
</listitem>
</itemizedlist>
</section>
</chapter>

View File

@ -0,0 +1,129 @@
<?xml version="1.0"?>
<!-- Converted by db4-upgrade version 1.0 -->
<chapter xmlns="http://docbook.org/ns/docbook" version="5.0-extension RaxBook-1.0" xml:id="quick-guide-to-getting-started-with-glance"><info><title>Quick Guide to Getting Started with Glance</title></info>
<para>
Glance is a server that provides the following services:
</para>
<itemizedlist>
<listitem>
<para>
Ability to store and retrieve virtual machine images
</para>
</listitem>
<listitem>
<para>
Ability to store and retrieve metadata about these virtual
machine images
</para>
</listitem>
<listitem>
<para>
FUTURE: Convert a virtual machine image from one format to
another
</para>
</listitem>
<listitem>
<para>
FUTURE: Help caching proxies such as Varnish or Squid cache
machine images
</para>
</listitem>
</itemizedlist>
<para>
Communication with Glance occurs via a REST-like HTTP interface.
</para>
<para>
However, Glance includes a Client class that
makes working with Glance easy and straightforward.
</para>
<para>
As of the Cactus release, there are also command-line tools for
interacting with Glance.
</para>
<section xml:id="overview-of-glance-architecture"><info><title>Overview of Glance Architecture</title></info>
<para>
There are two main parts to Glance's architecture:
</para>
<itemizedlist>
<listitem>
<para>
Glance API server
</para>
</listitem>
<listitem>
<para>
Glance Registry server(s)
</para>
</listitem>
</itemizedlist>
<section xml:id="glance-api-server"><info><title>Glance API Server</title></info>
<para>
The API server is the main interface for Glance. It routes
requests from clients to registries of image metadata and to its
<emphasis role="strong">backend stores</emphasis>, which are the
mechanisms by which Glance actually saves incoming virtual
machine images.
</para>
<para>
The backend stores that Glance can work with are as follows:
</para>
<itemizedlist>
<listitem>
<para>
<emphasis role="strong">Swift</emphasis>
</para>
<para>
Swift is the highly-available object storage project in
OpenStack. More information can be found about Swift
<link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://swift.openstack.org">here</link>.
</para>
</listitem>
<listitem>
<para>
<emphasis role="strong">Filesystem</emphasis>
</para>
<para>
The default backend that Glance uses to store virtual
machine images is the filesystem backend. This simple
backend writes image files to the local filesystem.
</para>
</listitem>
<listitem>
<para>
<emphasis role="strong">S3</emphasis>
</para>
<para>
This backend allows Glance to store virtual machine images
in Amazon's S3 service.
</para>
</listitem>
<listitem>
<para>
<emphasis role="strong">HTTP</emphasis>
</para>
<para>
Glance can read virtual machine images that are available
via HTTP somewhere on the Internet. This store is
<emphasis role="strong">readonly</emphasis>
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="glance-registry-servers"><info><title>Glance Registry Servers</title></info>
<para>
Glance registry servers are servers that conform to the Glance
Registry API. Glance ships with a reference implementation of a
registry server that complies with this API
(<literal>glance-registry</literal>).
</para>
<para>
For more details on Glance's architecture see the Architecture section. For more information on what a Glance
registry server is, see the Registries section.
</para>
</section>
</section>
</chapter>

View File

@ -0,0 +1,613 @@
<?xml version="1.0"?>
<!-- Converted by db4-upgrade version 1.0 -->
<chapter xmlns="http://docbook.org/ns/docbook" version="5.0-extension RaxBook-1.0" xml:id="using-the-glance-cli-tool"><info><title>Using the Glance CLI Tool</title></info>
<para>
Glance ships with a command-line tool for querying and managing
Glance It has a fairly simple but powerful interface of the form:
</para>
<screen>
Usage: glance &lt;command&gt; [options] [args]
</screen>
<para>
Where <literal>&lt;command&gt;</literal> is one of the following:
</para>
<itemizedlist>
<listitem>
<para>
<literal>help</literal>
</para>
<para>
Show detailed help information about a specific command
</para>
</listitem>
<listitem>
<para>
<literal>add</literal>
</para>
<para>
Adds an image to Glance
</para>
</listitem>
<listitem>
<para>
<literal>update</literal>
</para>
<para>
Updates an image's stored metadata in Glance
</para>
</listitem>
<listitem>
<para>
<literal>delete</literal>
</para>
<para>
Deletes an image and its metadata from Glance
</para>
</listitem>
<listitem>
<para>
<literal>index</literal>
</para>
<para>
Lists brief information about <emphasis>public</emphasis> images
that Glance knows about
</para>
</listitem>
<listitem>
<para>
<literal>details</literal>
</para>
<para>
Lists detailed information about <emphasis>public</emphasis>
images that Glance knows about
</para>
</listitem>
<listitem>
<para>
<literal>show</literal>
</para>
<para>
Lists detailed information about a specific image
</para>
</listitem>
<listitem>
<para>
<literal>clear</literal>
</para>
<para>
Destroys all <emphasis role="strong">public</emphasis> images
and their associated metadata
</para>
</listitem>
</itemizedlist>
<para>
This document describes how to use the <literal>glance</literal>
tool for each of the above commands.
</para>
<section xml:id="the-help-command"><info><title>The <literal>help</literal> command</title></info>
<para>
Issuing the <literal>help</literal> command with a
<literal>&lt;COMMAND&gt;</literal> argument shows detailed help
about a specific command. Running <literal>glance</literal>
without any arguments shows a brief help message, like so:
</para>
<screen>
$&gt; glance
Usage: glance &lt;command&gt; [options] [args]
Commands:
help &lt;command&gt; Output help for one of the commands below
add Adds a new image to Glance
update Updates an image's metadata in Glance
delete Deletes an image from Glance
index Return brief information about images in Glance
details Return detailed information about images in
Glance
show Show detailed information about an image in
Glance
clear Removes all images and metadata from Glance
Options:
--version show program's version number and exit
-h, --help show this help message and exit
-v, --verbose Print more verbose output
-H ADDRESS, --host=ADDRESS
Address of Glance API host. Default: example.com
-p PORT, --port=PORT Port the Glance API host listens on. Default: 9292
--limit=LIMIT Page size to use while requesting image metadata
--marker=MARKER Image index after which to begin pagination
--sort_key=KEY Sort results by this image attribute.
--sort_dir=[desc|asc]
Sort results in this direction.
-f, --force Prevent select actions from requesting user
confirmation
--dry-run Don't actually execute the command, just print output
showing what WOULD happen.
</screen>
<para>
With a <literal>&lt;COMMAND&gt;</literal> argument, more
information on the command is shown, like so:
</para>
<screen>
$&gt; glance help update
glance update [options] &lt;ID&gt; &lt;field1=value1 field2=value2 ...&gt;
Updates an image's metadata in Glance. Specify metadata fields as arguments.
All field/value pairs are converted into a mapping that is passed
to Glance that represents the metadata for an image.
Field names that can be specified:
name A name for the image.
is_public If specified, interpreted as a boolean value
and sets or unsets the image's availability to the public.
disk_format Format of the disk image
container_format Format of the container
All other field names are considered to be custom properties so be careful
to spell field names correctly. :)
</screen>
</section>
<section xml:id="the-add-command"><info><title>The <literal>add</literal> command</title></info>
<para>
The <literal>add</literal> command is used to do both of the
following:
</para>
<itemizedlist>
<listitem>
<para>
Store virtual machine image data and metadata about that image
in Glance
</para>
</listitem>
<listitem>
<para>
Let Glance know about an existing virtual machine image that
may be stored somewhere else
</para>
</listitem>
</itemizedlist>
<para>
We cover both use cases below.
</para>
<section xml:id="important-information-about-uploading-images"><info><title>Important Information about Uploading Images</title></info>
<para>
Before we go over the commands for adding an image to Glance, it
is important to understand that Glance
<emphasis role="strong">does not currently inspect</emphasis>
the image files you add to it. In other words,
<emphasis role="strong">Glance only understands what you tell
it, via attributes and custom properties</emphasis>.
</para>
<para>
If the file extension of the file you upload to Glance ends in
'.vhd', Glance <emphasis role="strong">does not</emphasis> know
that the image you are uploading has a disk format of
<literal>vhd</literal>. You have to
<emphasis role="strong">tell</emphasis> Glance that the image
you are uploading has a disk format by using the
<literal>disk_format=vhd</literal> on the command line (see more
below).
</para>
<para>
By the same token, Glance does not currently allow you to upload
"multi-part" disk images at once.
<emphasis role="strong">The common operation of bundling a
kernel image and ramdisk image into a machine image is not done
automagically by Glance.</emphasis>
</para>
</section>
<section xml:id="store-virtual-machine-image-data-and-metadata"><info><title>Store virtual machine image data and metadata</title></info>
<para>
When adding an actual virtual machine image to Glance, you use
the <literal>add</literal> command. You will pass metadata about
the VM image on the command line, and you will use a standard
shell redirect to stream the image data file to
<literal>glance</literal>.
</para>
<para>
Let's walk through a simple example. Suppose we have a virtual
disk image stored on our local filesystem that we wish to
"upload" to Glance. This image is stored on our local
filesystem in <literal>/tmp/images/myimage.iso</literal>.
</para>
<para>
We'd also like to tell Glance that this image should be called
"My Image", and that the image should be public --
anyone should be able to fetch it.
</para>
<para>
Here is how we'd upload this image to Glance. Change example ip
number to your server ip number.:
</para>
<screen>
$&gt; glance add name="My Image" is_public=true &lt; /tmp/images/myimage.iso --host=65.114.169.29
</screen>
<para>
If Glance was able to successfully upload and store your VM
image data and metadata attributes, you would see something like
this:
</para>
<screen>
$&gt; glance add name="My Image" is_public=true &lt; /tmp/images/myimage.iso --host=65.114.169.29
Added new image with ID: 2
</screen>
<para>
You can use the <literal>--verbose</literal> (or
<literal>-v</literal>) command-line option to print some more
information about the metadata that was saved with the image:
</para>
<screen>
$&gt; glance --verbose add name="My Image" is_public=true &lt; /tmp/images/myimage.iso --host=65.114.169.29
Added new image with ID: 4
Returned the following metadata for the new image:
container_format =&gt; ovf
created_at =&gt; 2011-02-22T19:20:53.298556
deleted =&gt; False
deleted_at =&gt; None
disk_format =&gt; raw
id =&gt; 4
is_public =&gt; True
location =&gt; file:///tmp/images/4
name =&gt; My Image
properties =&gt; {}
size =&gt; 58520278
status =&gt; active
updated_at =&gt; None
Completed in 0.6141 sec.
</screen>
<para>
If you are unsure about what will be added, you can use the
<literal>--dry-run</literal> command-line option, which will
simply show you what <emphasis>would</emphasis> have happened:
</para>
<screen>
$&gt; glance --dry-run add name="Foo" distro="Ubuntu" is_publi=True &lt; /tmp/images/myimage.iso --host=65.114.169.29
Dry run. We would have done the following:
Add new image with metadata:
container_format =&gt; ovf
disk_format =&gt; raw
is_public =&gt; False
name =&gt; Foo
properties =&gt; {'is_publi': 'True', 'distro': 'Ubuntu'}
</screen>
<para>
This is useful for detecting problems and for seeing what the
default field values supplied by <literal>glance</literal> are.
For instance, there was a typo in the command above (the
<literal>is_public</literal> field was incorrectly spelled
<literal>is_publi</literal> which resulted in the image having
an <literal>is_publi</literal> custom property added to the
image and the <emphasis>real</emphasis>
<literal>is_public</literal> field value being `False` (the
default) and not `True`...
</para>
</section>
<section xml:id="register-a-virtual-machine-image-in-another-location"><info><title>Register a virtual machine image in another
location</title></info>
<para>
Sometimes, you already have stored the virtual machine image in
some non-Glance location -- perhaps even a location you have no
write access to -- and you want to tell Glance where this
virtual machine image is located and some metadata about it. The
<literal>add</literal> command can do this for you.
</para>
<para>
When registering an image in this way, the only difference is
that you do not use a shell redirect to stream a virtual machine
image file into Glance, but instead, you tell Glance where to
find the existing virtual machine image by setting the
<literal>location</literal> field. Below is an example of doing
this.
</para>
<para>
Let's assume that there is a virtual machine image located at
the URL
<literal>http://example.com/images/myimage.vhd</literal>. We can
register this image with Glance using the following:
</para>
<screen>
$&gt; glance --verbose add name="Some web image" disk_format=vhd container_format=ovf\
location="http://example.com/images/myimage.vhd"
Added new image with ID: 1
Returned the following metadata for the new image:
container_format =&gt; ovf
created_at =&gt; 2011-02-23T00:42:04.688890
deleted =&gt; False
deleted_at =&gt; None
disk_format =&gt; vhd
id =&gt; 1
is_public =&gt; True
location =&gt; http://example.com/images/myimage.vhd
name =&gt; Some web image
properties =&gt; {}
size =&gt; 0
status =&gt; active
updated_at =&gt; None
Completed in 0.0356 sec.
</screen>
</section>
</section>
<section xml:id="the-update-command"><info><title>The <literal>update</literal> command</title></info>
<para>
After uploading/adding a virtual machine image to Glance, it is
not possible to modify the actual virtual machine image -- images
are read-only after all --however, it <emphasis>is</emphasis>
possible to update any metadata about the image after you add it
to Glance.
</para>
<para>
The <literal>update</literal> command allows you to update the
metadata fields of a stored image. You use this command like so:
</para>
<screen>
glance update &lt;ID&gt; [field1=value1 field2=value2 ...]
</screen>
<para>
Let's say we have an image with identifier 5 that we wish to
change the is_public attribute of the image from False to True.
The following would accomplish this:
</para>
<screen>
$&gt; glance update 5 is_public=true --host=65.114.169.29
Updated image 5
</screen>
<para>
Using the <literal>--verbose</literal> flag will show you all the
updated data about the image:
</para>
<screen>
$&gt; glance --verbose update 5 is_public=true --host=65.114.169.29
Updated image 5
Updated image metadata for image 5:
URI: http://example.com/images/5
Id: 5
Public? Yes
Name: My Image
Size: 58520278
Location: file:///tmp/images/5
Disk format: raw
Container format: ovf
Completed in 0.0596 sec.
</screen>
</section>
<section xml:id="the-delete-command"><info><title>The <literal>delete</literal> command</title></info>
<para>
You can delete an image by using the <literal>delete</literal>
command, shown below:
</para>
<screen>
$&gt; glance --verbose delete 5 --host=65.114.169.29
Deleted image 5
</screen>
</section>
<section xml:id="the-index-command"><info><title>The <literal>index</literal> command</title></info>
<para>
The <literal>index</literal> command displays brief information
about the <emphasis>public</emphasis> images available in Glance,
as shown below:
</para>
<screen>
$&gt; glance index --host=65.114.169.29
ID Name Disk Format Container Format Size
---------------- ------------------------------ -------------------- -------------------- --------------
1 Ubuntu 10.10 vhd ovf 58520278
2 Ubuntu 10.04 ami ami 58520278
3 Fedora 9 vdi bare 3040
4 Vanilla Linux 2.6.22 qcow2 bare 0
</screen>
<para>
Image metadata such as 'name', 'disk_format', 'container_format'
and 'status' may be used to filter the results of an index or
details command. These commands also accept 'size_min' and
'size_max' as lower and upper bounds of the image metadata 'size.'
Any unrecognized fields are handled as custom image properties.
</para>
<para>
The 'limit' and 'marker' options are used by the index and details
commands to control pagination. The 'marker' indicates the last
record that was seen by the user. The page of results returned
will begin after the provided image ID. The 'limit' param
indicates the page size. Each request to the api will be
restricted to returning a maximum number of results. Without the
'force' option, the user will be prompted before each page of
results is fetched from the API.
</para>
<para>
Results from index and details commands may be ordered using the
'sort_key' and 'sort_dir' options. Any image attribute may be used
for 'sort_key', while only 'asc' or 'desc' are allowed for
'sort_dir'.
</para>
</section>
<section xml:id="the-details-command"><info><title>The <literal>details</literal> command</title></info>
<para>
The <literal>details</literal> command displays detailed
information about the <emphasis>public</emphasis> images available
in Glance, as shown below:
</para>
<screen>
$&gt; glance details --host=65.114.169.29
================================================================================
URI: http://example.com/images/1
Id: 1
Public? Yes
Name: Ubuntu 10.10
Status: active
Size: 58520278
Location: file:///tmp/images/1
Disk format: vhd
Container format: ovf
Property 'distro_version': 10.10
Property 'distro': Ubuntu
================================================================================
URI: http://example.com/images/2
Id: 2
Public? Yes
Name: Ubuntu 10.04
Status: active
Size: 58520278
Location: file:///tmp/images/2
Disk format: ami
Container format: ami
Property 'distro_version': 10.04
Property 'distro': Ubuntu
================================================================================
URI: http://example.com/images/3
Id: 3
Public? Yes
Name: Fedora 9
Status: active
Size: 3040
Location: file:///tmp/images/3
Disk format: vdi
Container format: bare
Property 'distro_version': 9
Property 'distro': Fedora
================================================================================
URI: http://example.com/images/4
Id: 4
Public? Yes
Name: Vanilla Linux 2.6.22
Status: active
Size: 0
Location: http://example.com/images/vanilla.iso
Disk format: qcow2
Container format: bare
================================================================================
</screen>
</section>
<section xml:id="the-show-command"><info><title>The <literal>show</literal> command</title></info>
<para>
The <literal>show</literal> command displays detailed information
about a specific image, specified with
<literal>&lt;ID&gt;</literal>, as shown below:
</para>
<screen>
$&gt; glance show 3 --host=65.114.169.29
URI: http://example.com/images/3
Id: 3
Public? Yes
Name: Fedora 9
Status: active
Size: 3040
Location: file:///tmp/images/3
Disk format: vdi
Container format: bare
Property 'distro_version': 9
Property 'distro': Fedora
</screen>
</section>
<section xml:id="the-clear-command"><info><title>The <literal>clear</literal> command</title></info>
<para>
The <literal>clear</literal> command is an administrative command
that deletes <emphasis role="strong">ALL</emphasis> images and all
image metadata. Passing the <literal>--verbose</literal> command
will print brief information about all the images that were
deleted, as shown below:
</para>
<screen>
$&gt; glance --verbose clear --host=65.114.169.29
Deleting image 1 "Some web image" ... done
Deleting image 2 "Some other web image" ... done
Completed in 0.0328 sec.
</screen>
</section>
<section xml:id="the-image-members-command"><info><title>The <literal>image-members</literal> Command</title></info>
<para>
The <literal>image-members</literal> command displays the list of
members with which a specific image, specified with
<literal>&lt;ID&gt;</literal>, is shared, as shown below:
</para>
<screen>
$&gt; glance image-members 3 --host=65.114.169.29
tenant1
tenant2 *
(*: Can share image)
</screen>
</section>
<section xml:id="the-member-images-command"><info><title>The <literal>member-images</literal> Command</title></info>
<para>
The <literal>member-images</literal> command displays the list of
images which are shared with a specific member, specified with
<literal>&lt;MEMBER&gt;</literal>, as shown below:
</para>
<screen>
$&gt; glance member-images tenant1 --host=65.114.169.29
1
2 *
(*: Can share image)
</screen>
</section>
<section xml:id="the-member-add-command"><info><title>The <literal>member-add</literal> Command</title></info>
<para>
The <literal>member-add</literal> command grants a member,
specified with <literal>&lt;MEMBER&gt;</literal>, access to a
private image, specified with <literal>&lt;ID&gt;</literal>. The
<literal>--can-share</literal> flag can be given to allow the
member to share the image, as shown below:
</para>
<screen>
$&gt; glance member-add 1 tenant1 --host=65.114.169.29
$&gt; glance member-add 1 tenant2 --can-share --host=65.114.169.29
</screen>
</section>
<section xml:id="the-member-delete-command"><info><title>The <literal>member-delete</literal> Command</title></info>
<para>
The <literal>member-delete</literal> command revokes the access of
a member, specified with <literal>&lt;MEMBER&gt;</literal>, to a
private image, specified with <literal>&lt;ID&gt;</literal>, as
shown below:
</para>
<screen>
$&gt; glance member-delete 1 tenant1
$&gt; glance member-delete 1 tenant2
</screen>
</section>
<section xml:id="the-members-replace-command"><info><title>The <literal>members-replace</literal> Command</title></info>
<para>
The <literal>members-replace</literal> command revokes all
existing memberships on a private image, specified with
<literal>&lt;ID&gt;</literal>, and replaces them with a membership
for one member, specified with <literal>&lt;MEMBER&gt;</literal>.
The <literal>--can-share</literal> flag can be given to allow the
member to share the image, as shown below:
</para>
<screen>
$&gt; glance members-replace 1 tenant1 --can-share --host=65.114.169.29
</screen>
<para>
The command is given in plural form to make it clear that all
existing memberships are affected by the command.
</para>
</section>
</chapter>

View File

@ -0,0 +1,18 @@
<?xml version="1.0"?>
<!-- Converted by db4-upgrade version 1.0 -->
<chapter xmlns="http://docbook.org/ns/docbook" version="5.0-extension RaxBook-1.0" xml:id="image-identifiers"><info><title>Image Identifiers</title></info>
<para>
Images are uniquely identified by way of a URI that matches the
following signature:
</para>
<screen>
&lt;Glance Server Location&gt;/images/&lt;ID&gt;
</screen>
<para>
where `&lt;Glance Server Location&gt;` is the resource location of
the Glance service that knows about an image, and `&lt;ID&gt;` is
the image's identifier that is <emphasis>unique to that Glance
server</emphasis>.
</para>
</chapter>

View File

@ -0,0 +1,109 @@
<?xml version="1.0"?>
<!-- Converted by db4-upgrade version 1.0 -->
<chapter xmlns="http://docbook.org/ns/docbook" version="5.0-extension RaxBook-1.0" xml:id="installing-glance"><info><title>Installing Glance</title></info>
<section xml:id="installing-from-packages"><info><title>Installing from packages</title></info>
<para>
To install the latest version of Glance from the Launchpad Bazaar
repositories, following the following instructions.
</para>
<section xml:id="debianubuntu"><info><title>Debian/Ubuntu</title></info>
<orderedlist numeration="arabic">
<listitem>
<para>
Add the Glance PPA to your sources.lst:
</para>
<para>
$&gt; sudo add-apt-repository ppa:glance-core/trunk $&gt;
sudo apt-get update
</para>
</listitem>
<listitem>
<para>
Install Glance:
</para>
<para>
$&gt; sudo apt-get install glance
</para>
</listitem>
</orderedlist>
</section>
</section>
<section xml:id="installing-from-source-tarballs"><info><title>Installing from source tarballs</title></info>
<para>
To install the latest version of Glance from the Launchpad Bazaar
repositories, following the following instructions.
</para>
<orderedlist numeration="arabic">
<listitem>
<para>
Grab the source tarball from
<link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://launchpad.net/glance/+download">Launchpad</link>
</para>
</listitem>
<listitem>
<para>
Untar the source tarball:
</para>
<para> $&gt; tar -xzf &lt;FILE&gt; </para>
</listitem>
<listitem>
<para>
Change into the package directory and build/install:
</para>
<para>
$&gt; cd glance-&lt;RELEASE&gt; $&gt; sudo python setup.py
install
</para>
</listitem>
</orderedlist>
</section>
<section xml:id="installing-from-a-bazaar-branch"><info><title>Installing from a Bazaar Branch</title></info>
<para>
To install the latest version of Glance from the Launchpad Bazaar
repositories, following the following instructions.
</para>
<section xml:id="debianubuntu-1"><info><title>Debian/Ubuntu</title></info>
<orderedlist numeration="arabic">
<listitem>
<para>
Install Bazaar and build dependencies:
</para>
<para>
$&gt; sudo apt-get install bzr python-eventlet python-routes
python-greenlet swift $&gt; sudo apt-get install
python-argparse python-sqlalchemy python-wsgiref
python-pastedeploy
</para>
</listitem>
</orderedlist>
<note>
<para>If you want to build the Glance documentation locally, you will also want
to install the python-sphinx package.</para>
</note>
<orderedlist numeration="arabic">
<listitem>
<para>
Branch Glance's trunk branch:
</para>
<para>
$&gt; bzr branch lp:glance
</para>
</listitem>
<listitem>
<para>
Install Glance:
</para>
<para>
$&gt; sudo python setup.py install
</para>
</listitem>
</orderedlist>
</section>
</section>
</chapter>

View File

@ -0,0 +1,131 @@
<?xml version="1.0"?>
<!-- Converted by db4-upgrade version 1.0 -->
<chapter xmlns="http://docbook.org/ns/docbook" version="5.0-extension RaxBook-1.0" xml:id="notifications"><info><title>Notifications</title></info>
<para>
Notifications can be generated for each upload, update or delete
image event. These can be used for auditing, troubleshooting, etc.
</para>
<section xml:id="strategies"><info><title>Strategies</title></info>
<itemizedlist>
<listitem>
<para>
logging
</para>
<para>
This strategy uses the standard Python logging infrastructure
with the notifications ending up in file specificed by the
log_file configuration directive.
</para>
</listitem>
<listitem>
<para>
rabbit
</para>
<para>
This strategy sends notifications to a rabbitmq queue. This
can then be processed by other services or applications.
</para>
</listitem>
<listitem>
<para>
noop
</para>
<para>
This strategy produces no notifications. It is the default
strategy.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="content"><info><title>Content</title></info>
<para>
Every message contains a handful of attributes.
</para>
<itemizedlist>
<listitem>
<para>
message_id
</para>
<para>
UUID identifying the message.
</para>
</listitem>
<listitem>
<para>
publisher_id
</para>
<para>
The hostname of the glance instance that generated the
message.
</para>
</listitem>
<listitem>
<para>
event_type
</para>
<para>
Event that generated the message.
</para>
</listitem>
<listitem>
<para>
priority
</para>
<para>
One of WARN, INFO or ERROR.
</para>
</listitem>
<listitem>
<para>
timestamp
</para>
<para>
UTC timestamp of when event was generated.
</para>
</listitem>
<listitem>
<para>
payload
</para>
<para>
Data specific to the event type.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="payload"><info><title>Payload</title></info>
<para>
WARN and ERROR events contain a text message in the payload.
</para>
<itemizedlist>
<listitem>
<para>
image.upload
</para>
<para>
For INFO events, it is the image metadata.
</para>
</listitem>
<listitem>
<para>
image.update
</para>
<para>
For INFO events, it is the image metadata.
</para>
</listitem>
<listitem>
<para>
image.delete
</para>
<para>
For INFO events, it is the image id.
</para>
</listitem>
</itemizedlist>
</section>
</chapter>

View File

@ -0,0 +1,50 @@
<?xml version="1.0" encoding="UTF-8"?>
<book xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns:html="http://www.w3.org/1999/xhtml"
version="5.0"
xml:id="openstack-image-service-admin-manual"
>
<title>OpenStack Image Service Admin Manual</title>
<info>
<author>
<personname>
<firstname/>
<surname/>
</personname>
<affiliation>
<orgname>OpenStack</orgname>
</affiliation>
</author>
<copyright>
<year>2010</year>
<year>2011</year>
<holder>OpenStack LLC</holder>
</copyright>
<releaseinfo>trunk</releaseinfo>
<productname>OpenStack Image Service</productname>
<pubdate>2011-09-19</pubdate>
<legalnotice role="apache2">
<annotation>
<remark>Copyright details are filled in by the template.</remark>
</annotation>
</legalnotice>
<abstract>
<para>OpenStack™ Image Service offers a service for discovering, registering, and retrieving virtual machine images. Code-named Glance, it has a RESTful API that allows querying of VM image metadata as well as retrieval of the actual image. This manual provides guidance for installing, managing, and understanding the software that runs OpenStack Image Service. </para>
</abstract>
</info>
<!-- Chapters are referred from the book file through these include statements. You can add additional chapters using these types of statements. -->
<xi:include href="gettingstarted.xml"/>
<xi:include href="installing.xml"/>
<xi:include href="identifiers.xml"/>
<xi:include href="registries.xml"/>
<xi:include href="statuses.xml"/>
<xi:include href="formats.xml"/>
<xi:include href="controllingservers.xml"/>
<xi:include href="configuring.xml"/>
<xi:include href="glance.xml"/>
<xi:include href="client.xml"/>
<xi:include href="authentication.xml"/>
</book>

View File

@ -0,0 +1,145 @@
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.openstack.docs</groupId>
<artifactId>openstack-guide</artifactId>
<version>1.0.0-SNAPSHOT</version>
<packaging>jar</packaging>
<name>OpenStack Guides</name>
<!-- ################################################ -->
<!-- USE "mvn clean generate-sources" to run this POM -->
<!-- ################################################ -->
<profiles>
<profile>
<id>Rackspace Research Repositories</id>
<activation>
<activeByDefault>true</activeByDefault>
</activation>
<repositories>
<repository>
<id>rackspace-research</id>
<name>Rackspace Research Repository</name>
<url>http://maven.research.rackspacecloud.com/content/groups/public/</url>
</repository>
</repositories>
<pluginRepositories>
<pluginRepository>
<id>rackspace-research</id>
<name>Rackspace Research Repository</name>
<url>http://maven.research.rackspacecloud.com/content/groups/public/</url>
</pluginRepository>
</pluginRepositories>
</profile>
</profiles>
<build>
<resources>
<resource>
<directory>target/docbkx/pdf</directory>
<excludes>
<exclude>**/*.fo</exclude>
</excludes>
</resource>
</resources>
<plugins>
<plugin>
<groupId>com.rackspace.cloud.api</groupId>
<artifactId>clouddocs-maven-plugin</artifactId>
<version>1.0.5-SNAPSHOT</version>
<executions>
<execution>
<id>goal1</id>
<goals>
<goal>generate-pdf</goal>
</goals>
<phase>generate-sources</phase>
<configuration>
<highlightSource>false</highlightSource>
<!-- The following elements sets the autonumbering of sections in output for chapter numbers but no numbered sections-->
<sectionAutolabel>0</sectionAutolabel>
<sectionLabelIncludesComponentLabel>0</sectionLabelIncludesComponentLabel>
</configuration>
</execution>
<execution>
<id>goal2</id>
<goals>
<goal>generate-webhelp</goal>
</goals>
<phase>generate-sources</phase>
<configuration>
<!-- These parameters only apply to webhelp -->
<enableDisqus>1</enableDisqus>
<disqusShortname>openstackdocs</disqusShortname>
<enableGoogleAnalytics>1</enableGoogleAnalytics>
<googleAnalyticsId>UA-17511903-6</googleAnalyticsId>
<generateToc>
appendix toc,title
article/appendix nop
article toc,title
book title,figure,table,example,equation
chapter toc,title
part toc,title
preface toc,title
qandadiv toc
qandaset toc
reference toc,title
set toc,title
</generateToc>
<!-- The following elements sets the autonumbering of sections in output for chapter numbers but no numbered sections-->
<sectionAutolabel>0</sectionAutolabel>
<sectionLabelIncludesComponentLabel>0</sectionLabelIncludesComponentLabel>
<postProcess>
<!-- Copies the figures to the correct location for webhelp -->
<copy todir="${basedir}/target/docbkx/webhelp/openstack-image-service-admin/os-image-adminguide/figures">
<fileset dir="${basedir}/figures">
<include name="**/*.png" />
</fileset>
</copy>
<!-- Copies webhelp (HTML output) to desired URL location on docs.openstack.org -->
<copy
todir="${basedir}/target/docbkx/webhelp/trunk/openstack-image-service/admin">
<fileset
dir="${basedir}/target/docbkx/webhelp/openstack-image-service-admin/os-image-adminguide/">
<include name="**/*" />
</fileset>
</copy>
<!--Moves PDFs to the needed placement -->
<move failonerror="false"
file="${basedir}/target/docbkx/pdf/openstack-image-service-admin/os-image-adminguide.pdf"
tofile="${basedir}/target/docbkx/webhelp/trunk/openstack-image-service/admin/os-image-adminguide-trunk.pdf"/>
<!--Deletes leftover uneeded directories -->
<delete dir="${basedir}/target/docbkx/webhelp/openstack-image-service-admin"/>
</postProcess>
</configuration>
</execution>
</executions>
<configuration>
<!-- These parameters apply to pdf and webhelp -->
<xincludeSupported>true</xincludeSupported>
<sourceDirectory>.</sourceDirectory>
<includes>
os-image-adminguide.xml
</includes>
<profileSecurity>reviewer</profileSecurity>
<branding>openstack</branding>
</configuration>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,216 @@
<?xml version="1.0"?>
<!-- Converted by db4-upgrade version 1.0 -->
<chapter xmlns="http://docbook.org/ns/docbook" version="5.0-extension RaxBook-1.0" xml:id="image-registries"><info><title>Image Registries</title></info>
<para>
Image metadata made available through Glance can be stored in image
`registries`. Image registries are any web service that adheres to
the Glance REST-like API for image metadata.
</para>
<para>
Glance comes with a server program
<literal>glance-registry</literal> that acts as a reference
implementation of a Glance Registry.
</para>
<para>
Please see the section about Controlling Servers for more information on starting up the
Glance registry server that ships with Glance.
</para>
<section xml:id="glance-registry-api"><info><title>Glance Registry API</title></info>
<para>
Any web service that publishes an API that conforms to the
following REST-like API specification can be used by Glance as a
registry.
</para>
<section xml:id="api-in-summary"><info><title>API in Summary</title></info>
<para>
The following is a brief description of the Glance API:
</para>
<screen>
GET /images Return brief information about public images
GET /images/detail Return detailed information about public images
GET /images/&lt;ID&gt; Return metadata about an image in HTTP headers
POST /images Register metadata about a new image
PUT /images/&lt;ID&gt; Update metadata about an existing image
DELETE /images/&lt;ID&gt; Remove an image's metadata from the registry
</screen>
</section>
</section>
<section xml:id="filtering-images-returned-via-get-images-and-get-imagesdetail"><info><title>Filtering Images Returned via <literal>GET /images</literal>
and <literal>GET /images/detail</literal></title></info>
<para>
Both the <literal>GET /images</literal> and
<literal>GET /images/detail</literal> requests take query
parameters that serve to filter the returned list of images. The
following list details these query parameters.
</para>
<itemizedlist>
<listitem>
<para>
<literal>name=NAME</literal>
</para>
<para>
Filters images having a <literal>name</literal> attribute
matching <literal>NAME</literal>.
</para>
</listitem>
<listitem>
<para>
<literal>container_format=FORMAT</literal>
</para>
<para>
Filters images having a <literal>container_format</literal>
attribute matching <literal>FORMAT</literal>
</para>
</listitem>
<listitem>
<para>
<literal>disk_format=FORMAT</literal>
</para>
<para>
Filters images having a <literal>disk_format</literal>
attribute matching <literal>FORMAT</literal>
</para>
</listitem>
<listitem>
<para>
<literal>status=STATUS</literal>
</para>
<para>
Filters images having a <literal>status</literal> attribute
matching <literal>STATUS</literal>
</para>
</listitem>
<listitem>
<para>
<literal>size_min=BYTES</literal>
</para>
<para>
Filters images having a <literal>size</literal> attribute
greater than or equal to <literal>BYTES</literal>
</para>
</listitem>
<listitem>
<para>
<literal>size_max=BYTES</literal>
</para>
<para>
Filters images having a <literal>size</literal> attribute less
than or equal to <literal>BYTES</literal>
</para>
</listitem>
</itemizedlist>
<para>
These two resources also accept sort parameters:
</para>
<itemizedlist>
<listitem>
<para>
<literal>sort_key=KEY</literal>
</para>
<para>
Results will be ordered by the specified image attribute
<literal>KEY</literal>. Accepted values include
<literal>id</literal>, <literal>name</literal>,
<literal>status</literal>, <literal>disk_format</literal>,
<literal>container_format</literal>, <literal>size</literal>,
<literal>created_at</literal> (default) and
<literal>updated_at</literal>.
</para>
</listitem>
<listitem>
<para>
<literal>sort_dir=DIR</literal>
</para>
<para>
Results will be sorted in the direction
<literal>DIR</literal>. Accepted values are
<literal>asc</literal> for ascending or
<literal>desc</literal> (default) for descending.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="post-images"><info><title><literal>POST /images</literal></title></info>
<para>
The body of the request will be a JSON-encoded set of data about
the image to add to the registry. It will be in the following
format:
</para>
<screen>
{'image':
{'id': &lt;ID&gt;|None,
'name': &lt;NAME&gt;,
'status': &lt;STATUS&gt;,
'disk_format': &lt;DISK_FORMAT&gt;,
'container_format': &lt;CONTAINER_FORMAT&gt;,
'properties': [ ... ]
}
}
</screen>
<para>
The request shall validate the following conditions and return a
<literal>400 Bad request</literal> when any of the conditions are
not met:
</para>
<itemizedlist>
<listitem>
<para>
<literal>status</literal> must be non-empty, and must be one
of <emphasis role="strong">active</emphasis>,
<emphasis role="strong">saving</emphasis>,
<emphasis role="strong">queued</emphasis>, or
<emphasis role="strong">killed</emphasis>
</para>
</listitem>
<listitem>
<para>
<literal>disk_format</literal> must be non-empty, and must be
one of <emphasis role="strong">ari</emphasis>,
<emphasis role="strong">aki</emphasis>,
<emphasis role="strong">ami</emphasis>,
<emphasis role="strong">raw</emphasis>,
<emphasis role="strong">iso</emphasis>,
<emphasis role="strong">vhd</emphasis>,
<emphasis role="strong">vdi</emphasis>,
<emphasis role="strong">qcow2</emphasis>, or
<emphasis role="strong">vmdk</emphasis>
</para>
</listitem>
<listitem>
<para>
<literal>container_format</literal> must be non-empty, and
must be on of <emphasis role="strong">ari</emphasis>,
<emphasis role="strong">aki</emphasis>,
<emphasis role="strong">ami</emphasis>,
<emphasis role="strong">bare</emphasis>, or
<emphasis role="strong">ovf</emphasis>
</para>
</listitem>
<listitem>
<para>
If <literal>disk_format</literal> <emphasis>or</emphasis>
<literal>container_format</literal> is
<emphasis role="strong">ari</emphasis>,
<emphasis role="strong">aki</emphasis>,
<emphasis role="strong">ami</emphasis>, then
<emphasis>both</emphasis> <literal>disk_format</literal> and
<literal>container_format</literal> must be the same.
</para>
</listitem>
</itemizedlist>
<section xml:id="examples"><info><title>Examples</title></info>
<para>
..todo:: Complete examples for Glance registry API
</para>
</section>
</section>
</chapter>

View File

@ -0,0 +1,67 @@
<?xml version="1.0"?>
<!-- Converted by db4-upgrade version 1.0 -->
<chapter xmlns="http://docbook.org/ns/docbook" version="5.0-extension RaxBook-1.0" xml:id="image-statuses"><info><title>Image Statuses</title></info>
<para>
Images in Glance can be in one the following statuses:
</para>
<itemizedlist>
<listitem>
<para>
<literal>queued</literal>
</para>
<para>
The image identifier has been reserved for an image in the
Glance registry. No image data has been uploaded to Glance.
</para>
</listitem>
<listitem>
<para>
<literal>saving</literal>
</para>
<para>
Denotes that an image's raw data is currently being uploaded to
Glance. When an image is registered with a call to `POST
/images` and there is an `x-image-meta-location` header present,
that image will never be in the `saving` status (as the image
data is already available in some other location).
</para>
</listitem>
<listitem>
<para>
<literal>active</literal>
</para>
<para>
Denotes an image that is fully available in Glance.
</para>
</listitem>
<listitem>
<para>
<literal>killed</literal>
</para>
<para>
Denotes that an error occurred during the uploading of an
image's data, and that the image is not readable.
</para>
</listitem>
<listitem>
<para>
<literal>deleted</literal>
</para>
<para>
Glance has retained the information about the image, but it is
no longer available to use. An image in this state will be
removed automatically at a later date.
</para>
</listitem>
<listitem>
<para>
<literal>pending_delete</literal>
</para>
<para>
This is similiar to `deleted`, however, Glance has not yet
removed the image data. An image in this state is recoverable.
</para>
</listitem>
</itemizedlist>
</chapter>

View File

@ -0,0 +1,21 @@
This typeface software ("SOFTWARE") is the property of FontSite Inc. Its
use by you is covered under the terms of an End-User License Agreement
("EULA"). By exercising your rights to make and use copies of this SOFTWARE,
you agree to be bound by the terms of this EULA. If you do not agree to the
terms of this EULA, you may not use the SOFTWARE.
This SOFTWARE is a valuable asset of FontSite Inc. which is protected by
copyright laws and international copyright treaties, as well as other
intellectual property laws and treaties. The typeface software is licensed,
not sold.
This EULA grants you the following rights:
You may install and use an unlimited number of copies of this SOFTWARE.
You may reproduce and distribute an unlimited number of copies of this
SOFTWARE, provided that each copy shall be a true and complete copy,
including all copyright and trademark notices, electronic documentation
(user guide in PDF format, etc.), and shall be accompanied by a copy of this
EULA. Copies of the SOFTWARE may not be distributed for profit either on a
standalone basis or included as part of your own product.

View File

@ -0,0 +1,35 @@
<?xml version="1.0" encoding="UTF-8"?>
<fop version="1.0">
<!-- Base URL for resolving relative URLs -->
<base>.</base>
<!-- Source resolution in dpi (dots/pixels per inch) for determining the size of pixels in SVG and bitmap images, default: 72dpi -->
<source-resolution>90</source-resolution>
<!-- Target resolution in dpi (dots/pixels per inch) for specifying the target resolution for generated bitmaps, default: 72dpi -->
<target-resolution>90</target-resolution>
<!-- Default page-height and page-width, in case
value is specified as auto -->
<default-page-settings height="11in" width="8.26in"/>
<renderers>
<renderer mime="application/pdf">
<filterList>
<!-- provides compression using zlib flate (default is on) -->
<value>flate</value>
<!-- encodes binary data into printable ascii characters (default off)
This provides about a 4:5 expansion of data size -->
<!-- <value>ascii-85</value> -->
<!-- encodes binary data with hex representation (default off)
This filter is not recommended as it doubles the data size -->
<!-- <value>ascii-hex</value> -->
</filterList>
<fonts>
<directory recursive="true">$fontPath$</directory>
</fonts>
</renderer>
</renderers>
</fop>

View File

@ -0,0 +1,15 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 12.0.1, SVG Export Plug-In . SVG Version: 6.00 Build 51448) -->
<!DOCTYPE svg [
<!ENTITY ns_svg "http://www.w3.org/2000/svg">
<!ENTITY ns_xlink "http://www.w3.org/1999/xlink">
]>
<svg version="1.0" id="Layer_1" xmlns="&ns_svg;" xmlns:xlink="&ns_xlink;" width="33" height="33" viewBox="0 0 33 33"
style="overflow:visible;enable-background:new 0 0 33 33;" xml:space="preserve">
<circle style="stroke:#000000;" cx="16.5" cy="16.5" r="16"/>
<g>
<g style="enable-background:new ;">
<path style="fill:#FFFFFF;" d="M10.428,10.411h0.56c3.78,0,4.788-1.96,4.872-3.444h3.22v19.88h-3.92V13.154h-4.732V10.411z"/>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 703 B

View File

@ -0,0 +1,18 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 12.0.1, SVG Export Plug-In . SVG Version: 6.00 Build 51448) -->
<!DOCTYPE svg [
<!ENTITY ns_svg "http://www.w3.org/2000/svg">
<!ENTITY ns_xlink "http://www.w3.org/1999/xlink">
]>
<svg version="1.0" id="Layer_1" xmlns="&ns_svg;" xmlns:xlink="&ns_xlink;" width="33" height="33" viewBox="0 0 33 33"
style="overflow:visible;enable-background:new 0 0 33 33;" xml:space="preserve">
<circle style="stroke:#000000;" cx="16.5" cy="16.5" r="16"/>
<g>
<g style="enable-background:new ;">
<path style="fill:#FFFFFF;" d="M3.815,10.758h0.48c3.24,0,4.104-1.681,4.176-2.952h2.76v17.04h-3.36V13.11H3.815V10.758z"/>
<path style="fill:#FFFFFF;" d="M22.175,7.806c4.009,0,5.904,2.76,5.904,8.736c0,5.975-1.896,8.76-5.904,8.76
c-4.008,0-5.904-2.785-5.904-8.76C16.271,10.566,18.167,7.806,22.175,7.806z M22.175,22.613c1.921,0,2.448-1.68,2.448-6.071
c0-4.393-0.527-6.049-2.448-6.049c-1.92,0-2.448,1.656-2.448,6.049C19.727,20.934,20.255,22.613,22.175,22.613z"/>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 1.0 KiB

View File

@ -0,0 +1,16 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 12.0.1, SVG Export Plug-In . SVG Version: 6.00 Build 51448) -->
<!DOCTYPE svg [
<!ENTITY ns_svg "http://www.w3.org/2000/svg">
<!ENTITY ns_xlink "http://www.w3.org/1999/xlink">
]>
<svg version="1.0" id="Layer_1" xmlns="&ns_svg;" xmlns:xlink="&ns_xlink;" width="33" height="33" viewBox="0 0 33 33"
style="overflow:visible;enable-background:new 0 0 33 33;" xml:space="preserve">
<circle style="stroke:#000000;" cx="16.5" cy="16.5" r="16"/>
<g>
<g style="enable-background:new ;">
<path style="fill:#FFFFFF;" d="M5.209,10.412h0.48c3.24,0,4.104-1.681,4.176-2.952h2.76V24.5h-3.36V12.764H5.209V10.412z"/>
<path style="fill:#FFFFFF;" d="M18.553,10.412h0.48c3.24,0,4.104-1.681,4.176-2.952h2.76V24.5h-3.359V12.764h-4.056V10.412z"/>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 827 B

View File

@ -0,0 +1,18 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 12.0.1, SVG Export Plug-In . SVG Version: 6.00 Build 51448) -->
<!DOCTYPE svg [
<!ENTITY ns_svg "http://www.w3.org/2000/svg">
<!ENTITY ns_xlink "http://www.w3.org/1999/xlink">
]>
<svg version="1.0" id="Layer_1" xmlns="&ns_svg;" xmlns:xlink="&ns_xlink;" width="33" height="33" viewBox="0 0 33 33"
style="overflow:visible;enable-background:new 0 0 33 33;" xml:space="preserve">
<circle style="stroke:#000000;" cx="16.5" cy="16.5" r="16"/>
<g>
<g style="enable-background:new ;">
<path style="fill:#FFFFFF;" d="M4.813,10.412h0.48c3.24,0,4.104-1.681,4.176-2.952h2.76V24.5h-3.36V12.764H4.813V10.412z"/>
<path style="fill:#FFFFFF;" d="M17.316,13.484c0-5.545,4.056-6.024,5.568-6.024c3.265,0,5.856,1.92,5.856,5.376
c0,2.928-1.896,4.416-3.553,5.544c-2.256,1.584-3.432,2.353-3.815,3.145h7.392V24.5h-11.64c0.12-1.992,0.264-4.08,3.96-6.768
c3.072-2.232,4.296-3.097,4.296-5.017c0-1.128-0.72-2.424-2.353-2.424c-2.352,0-2.423,1.944-2.447,3.192H17.316z"/>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 1.0 KiB

View File

@ -0,0 +1,20 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 12.0.1, SVG Export Plug-In . SVG Version: 6.00 Build 51448) -->
<!DOCTYPE svg [
<!ENTITY ns_svg "http://www.w3.org/2000/svg">
<!ENTITY ns_xlink "http://www.w3.org/1999/xlink">
]>
<svg version="1.0" id="Layer_1" xmlns="&ns_svg;" xmlns:xlink="&ns_xlink;" width="33" height="33" viewBox="0 0 33 33"
style="overflow:visible;enable-background:new 0 0 33 33;" xml:space="preserve">
<circle style="stroke:#000000;" cx="16.5" cy="16.5" r="16"/>
<g>
<g style="enable-background:new ;">
<path style="fill:#FFFFFF;" d="M3.813,10.412h0.48c3.24,0,4.104-1.681,4.176-2.952h2.76V24.5h-3.36V12.764H3.813V10.412z"/>
<path style="fill:#FFFFFF;" d="M20.611,14.636h0.529c1.008,0,2.855-0.096,2.855-2.304c0-0.624-0.288-2.185-2.137-2.185
c-2.303,0-2.303,2.185-2.303,2.784h-3.12c0-3.191,1.8-5.472,5.64-5.472c2.279,0,5.279,1.152,5.279,4.752
c0,1.728-1.08,2.808-2.039,3.24V15.5c0.6,0.168,2.568,1.056,2.568,3.96c0,3.216-2.377,5.496-5.809,5.496
c-1.607,0-5.928-0.36-5.928-5.688h3.288l-0.024,0.024c0,0.912,0.24,2.976,2.496,2.976c1.344,0,2.52-0.911,2.52-2.808
c0-2.328-2.256-2.424-3.816-2.424V14.636z"/>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 1.2 KiB

View File

@ -0,0 +1,17 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 12.0.1, SVG Export Plug-In . SVG Version: 6.00 Build 51448) -->
<!DOCTYPE svg [
<!ENTITY ns_svg "http://www.w3.org/2000/svg">
<!ENTITY ns_xlink "http://www.w3.org/1999/xlink">
]>
<svg version="1.0" id="Layer_1" xmlns="&ns_svg;" xmlns:xlink="&ns_xlink;" width="33" height="33" viewBox="0 0 33 33"
style="overflow:visible;enable-background:new 0 0 33 33;" xml:space="preserve">
<circle style="stroke:#000000;" cx="16.5" cy="16.5" r="16"/>
<g>
<g style="enable-background:new ;">
<path style="fill:#FFFFFF;" d="M4.146,10.412h0.48c3.24,0,4.104-1.681,4.176-2.952h2.76V24.5h-3.36V12.764H4.146V10.412z"/>
<path style="fill:#FFFFFF;" d="M28.457,20.732h-1.896V24.5h-3.36v-3.768h-6.72v-2.904L22.746,7.46h3.815v10.656h1.896V20.732z
M23.201,18.116c0-4.128,0.072-6.792,0.072-7.32h-0.048l-4.272,7.32H23.201z"/>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 906 B

View File

@ -0,0 +1,19 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 12.0.1, SVG Export Plug-In . SVG Version: 6.00 Build 51448) -->
<!DOCTYPE svg [
<!ENTITY ns_svg "http://www.w3.org/2000/svg">
<!ENTITY ns_xlink "http://www.w3.org/1999/xlink">
]>
<svg version="1.0" id="Layer_1" xmlns="&ns_svg;" xmlns:xlink="&ns_xlink;" width="33" height="33" viewBox="0 0 33 33"
style="overflow:visible;enable-background:new 0 0 33 33;" xml:space="preserve">
<circle style="stroke:#000000;" cx="16.5" cy="16.5" r="16"/>
<g>
<g style="enable-background:new ;">
<path style="fill:#FFFFFF;" d="M3.479,11.079h0.48c3.24,0,4.104-1.681,4.176-2.952h2.76v17.04h-3.36V13.43H3.479V11.079z"/>
<path style="fill:#FFFFFF;" d="M19.342,14.943c0.625-0.433,1.392-0.937,3.048-0.937c2.279,0,5.16,1.584,5.16,5.496
c0,2.328-1.176,6.121-6.192,6.121c-2.664,0-5.376-1.584-5.544-5.016h3.36c0.144,1.391,0.888,2.326,2.376,2.326
c1.607,0,2.544-1.367,2.544-3.191c0-1.512-0.72-3.047-2.496-3.047c-0.456,0-1.608,0.023-2.256,1.223l-3-0.143l1.176-9.361h9.36
v2.832h-6.937L19.342,14.943z"/>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 1.1 KiB

Some files were not shown because too many files have changed in this diff Show More