diff --git a/doc-tools-check-languages.conf b/doc-tools-check-languages.conf index 59fc1861fc..852ecb2983 100644 --- a/doc-tools-check-languages.conf +++ b/doc-tools-check-languages.conf @@ -8,7 +8,7 @@ declare -A BOOKS=( ["de"]="install-guide" ["fr"]="install-guide" ["id"]="image-guide install-guide" - ["ja"]="ha-guide image-guide install-guide ops-guide" + ["ja"]="ha-guide image-guide install-guide" ["ko_KR"]="install-guide" ["ru"]="install-guide" ["tr_TR"]="image-guide install-guide arch-design" @@ -47,7 +47,6 @@ declare -A SPECIAL_BOOKS=( ["image-guide"]="RST" ["install-guide"]="RST" ["networking-guide"]="RST" - ["ops-guide"]="RST" # Do not translate for now, we need to fix our scripts first to # generate the content properly. ["install-guide-debconf"]="skip" diff --git a/doc/common/app-support.rst b/doc/common/app-support.rst index dc58f8f3af..61492745b2 100644 --- a/doc/common/app-support.rst +++ b/doc/common/app-support.rst @@ -50,8 +50,6 @@ The following books explain how to configure and run an OpenStack cloud: * `Configuration Reference `_ -* `Operations Guide `_ - * `Networking Guide `_ * `High Availability Guide `_ diff --git a/doc/ops-guide/setup.cfg b/doc/ops-guide/setup.cfg deleted file mode 100644 index 6747b30b6c..0000000000 --- a/doc/ops-guide/setup.cfg +++ /dev/null @@ -1,27 +0,0 @@ -[metadata] -name = openstackopsguide -summary = OpenStack Operations Guide -author = OpenStack -author-email = openstack-docs@lists.openstack.org -home-page = https://docs.openstack.org/ -classifier = -Environment :: OpenStack -Intended Audience :: Information Technology -Intended Audience :: System Administrators -License :: OSI Approved :: Apache Software License -Operating System :: POSIX :: Linux -Topic :: Documentation - -[global] -setup-hooks = - pbr.hooks.setup_hook - -[files] - -[build_sphinx] -warning-is-error = 1 -build-dir = build -source-dir = source - -[wheel] -universal = 1 diff --git a/doc/ops-guide/setup.py b/doc/ops-guide/setup.py deleted file mode 100644 index 736375744d..0000000000 --- a/doc/ops-guide/setup.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr'], - pbr=True) diff --git a/doc/ops-guide/source/acknowledgements.rst b/doc/ops-guide/source/acknowledgements.rst deleted file mode 100644 index ad027b7809..0000000000 --- a/doc/ops-guide/source/acknowledgements.rst +++ /dev/null @@ -1,51 +0,0 @@ -================ -Acknowledgements -================ - -The OpenStack Foundation supported the creation of this book with plane -tickets to Austin, lodging (including one adventurous evening without -power after a windstorm), and delicious food. For about USD $10,000, we -could collaborate intensively for a week in the same room at the -Rackspace Austin office. The authors are all members of the OpenStack -Foundation, which you can join. Go to the `Foundation web -site `_. - -We want to acknowledge our excellent host Rackers at Rackspace in -Austin: - -- Emma Richards of Rackspace Guest Relations took excellent care of our - lunch orders and even set aside a pile of sticky notes that had - fallen off the walls. - -- Betsy Hagemeier, a Fanatical Executive Assistant, took care of a room - reshuffle and helped us settle in for the week. - -- The Real Estate team at Rackspace in Austin, also known as "The - Victors," were super responsive. - -- Adam Powell in Racker IT supplied us with bandwidth each day and - second monitors for those of us needing more screens. - -- On Wednesday night we had a fun happy hour with the Austin OpenStack - Meetup group and Racker Katie Schmidt took great care of our group. - -We also had some excellent input from outside of the room: - -- Tim Bell from CERN gave us feedback on the outline before we started - and reviewed it mid-week. - -- Sébastien Han has written excellent blogs and generously gave his - permission for re-use. - -- Oisin Feeley read it, made some edits, and provided emailed feedback - right when we asked. - -Inside the book sprint room with us each day was our book sprint -facilitator Adam Hyde. Without his tireless support and encouragement, -we would have thought a book of this scope was impossible in five days. -Adam has proven the book sprint method effectively again and again. He -creates both tools and faith in collaborative authoring at -`www.booksprints.net `_. - -We couldn't have pulled it off without so much supportive help and -encouragement. diff --git a/doc/ops-guide/source/app-crypt.rst b/doc/ops-guide/source/app-crypt.rst deleted file mode 100644 index 35480419d1..0000000000 --- a/doc/ops-guide/source/app-crypt.rst +++ /dev/null @@ -1,536 +0,0 @@ -================================= -Tales From the Cryp^H^H^H^H Cloud -================================= - -Herein lies a selection of tales from OpenStack cloud operators. Read, -and learn from their wisdom. - -Double VLAN -~~~~~~~~~~~ - -I was on-site in Kelowna, British Columbia, Canada setting up a new -OpenStack cloud. The deployment was fully automated: Cobbler deployed -the OS on the bare metal, bootstrapped it, and Puppet took over from -there. I had run the deployment scenario so many times in practice and -took for granted that everything was working. - -On my last day in Kelowna, I was in a conference call from my hotel. In -the background, I was fooling around on the new cloud. I launched an -instance and logged in. Everything looked fine. Out of boredom, I ran -:command:`ps aux` and all of the sudden the instance locked up. - -Thinking it was just a one-off issue, I terminated the instance and -launched a new one. By then, the conference call ended and I was off to -the data center. - -At the data center, I was finishing up some tasks and remembered the -lock-up. I logged into the new instance and ran :command:`ps aux` again. -It worked. Phew. I decided to run it one more time. It locked up. - -After reproducing the problem several times, I came to the unfortunate -conclusion that this cloud did indeed have a problem. Even worse, my -time was up in Kelowna and I had to return back to Calgary. - -Where do you even begin troubleshooting something like this? An instance -that just randomly locks up when a command is issued. Is it the image? -Nope—it happens on all images. Is it the compute node? Nope—all nodes. -Is the instance locked up? No! New SSH connections work just fine! - -We reached out for help. A networking engineer suggested it was an MTU -issue. Great! MTU! Something to go on! What's MTU and why would it cause -a problem? - -MTU is maximum transmission unit. It specifies the maximum number of -bytes that the interface accepts for each packet. If two interfaces have -two different MTUs, bytes might get chopped off and weird things -happen—such as random session lockups. - -.. note:: - - Not all packets have a size of 1500. Running the :command:`ls` command over - SSH might only create a single packets less than 1500 bytes. - However, running a command with heavy output, such as :command:`ps aux` - requires several packets of 1500 bytes. - -OK, so where is the MTU issue coming from? Why haven't we seen this in -any other deployment? What's new in this situation? Well, new data -center, new uplink, new switches, new model of switches, new servers, -first time using this model of servers… so, basically everything was -new. Wonderful. We toyed around with raising the MTU at various areas: -the switches, the NICs on the compute nodes, the virtual NICs in the -instances, we even had the data center raise the MTU for our uplink -interface. Some changes worked, some didn't. This line of -troubleshooting didn't feel right, though. We shouldn't have to be -changing the MTU in these areas. - -As a last resort, our network admin (Alvaro) and myself sat down with -four terminal windows, a pencil, and a piece of paper. In one window, we -ran ping. In the second window, we ran ``tcpdump`` on the cloud -controller. In the third, ``tcpdump`` on the compute node. And the forth -had ``tcpdump`` on the instance. For background, this cloud was a -multi-node, non-multi-host setup. - -One cloud controller acted as a gateway to all compute nodes. -VlanManager was used for the network config. This means that the cloud -controller and all compute nodes had a different VLAN for each OpenStack -project. We used the ``-s`` option of ``ping`` to change the packet -size. We watched as sometimes packets would fully return, sometimes they'd -only make it out and never back in, and sometimes the packets would stop at a -random point. We changed ``tcpdump`` to start displaying the hex dump of -the packet. We pinged between every combination of outside, controller, -compute, and instance. - -Finally, Alvaro noticed something. When a packet from the outside hits -the cloud controller, it should not be configured with a VLAN. We -verified this as true. When the packet went from the cloud controller to -the compute node, it should only have a VLAN if it was destined for an -instance. This was still true. When the ping reply was sent from the -instance, it should be in a VLAN. True. When it came back to the cloud -controller and on its way out to the Internet, it should no longer have -a VLAN. False. Uh oh. It looked as though the VLAN part of the packet -was not being removed. - -That made no sense. - -While bouncing this idea around in our heads, I was randomly typing -commands on the compute node: - -.. code-block:: console - - $ ip a - … - 10: vlan100@vlan20: mtu 1500 qdisc noqueue master br100 state UP - … - -"Hey Alvaro, can you run a VLAN on top of a VLAN?" - -"If you did, you'd add an extra 4 bytes to the packet…" - -Then it all made sense… - -.. code-block:: console - - $ grep vlan_interface /etc/nova/nova.conf - vlan_interface=vlan20 - -In ``nova.conf``, ``vlan_interface`` specifies what interface OpenStack -should attach all VLANs to. The correct setting should have been: - -.. code-block:: ini - - vlan_interface=bond0 - -As this would be the server's bonded NIC. - -vlan20 is the VLAN that the data center gave us for outgoing Internet -access. It's a correct VLAN and is also attached to bond0. - -By mistake, I configured OpenStack to attach all tenant VLANs to vlan20 -instead of bond0 thereby stacking one VLAN on top of another. This added -an extra 4 bytes to each packet and caused a packet of 1504 bytes to be -sent out which would cause problems when it arrived at an interface that -only accepted 1500. - -As soon as this setting was fixed, everything worked. - -"The Issue" -~~~~~~~~~~~ - -At the end of August 2012, a post-secondary school in Alberta, Canada -migrated its infrastructure to an OpenStack cloud. As luck would have -it, within the first day or two of it running, one of their servers just -disappeared from the network. Blip. Gone. - -After restarting the instance, everything was back up and running. We -reviewed the logs and saw that at some point, network communication -stopped and then everything went idle. We chalked this up to a random -occurrence. - -A few nights later, it happened again. - -We reviewed both sets of logs. The one thing that stood out the most was -DHCP. At the time, OpenStack, by default, set DHCP leases for one minute -(it's now two minutes). This means that every instance contacts the -cloud controller (DHCP server) to renew its fixed IP. For some reason, -this instance could not renew its IP. We correlated the instance's logs -with the logs on the cloud controller and put together a conversation: - -#. Instance tries to renew IP. - -#. Cloud controller receives the renewal request and sends a response. - -#. Instance "ignores" the response and re-sends the renewal request. - -#. Cloud controller receives the second request and sends a new - response. - -#. Instance begins sending a renewal request to ``255.255.255.255`` - since it hasn't heard back from the cloud controller. - -#. The cloud controller receives the ``255.255.255.255`` request and - sends a third response. - -#. The instance finally gives up. - -With this information in hand, we were sure that the problem had to do -with DHCP. We thought that for some reason, the instance wasn't getting -a new IP address and with no IP, it shut itself off from the network. - -A quick Google search turned up this: `DHCP lease errors in VLAN -mode `_ -which further supported our DHCP theory. - -An initial idea was to just increase the lease time. If the instance -only renewed once every week, the chances of this problem happening -would be tremendously smaller than every minute. This didn't solve the -problem, though. It was just covering the problem up. - -We decided to have ``tcpdump`` run on this instance and see if we could -catch it in action again. Sure enough, we did. - -The ``tcpdump`` looked very, very weird. In short, it looked as though -network communication stopped before the instance tried to renew its IP. -Since there is so much DHCP chatter from a one minute lease, it's very -hard to confirm it, but even with only milliseconds difference between -packets, if one packet arrives first, it arrived first, and if that -packet reported network issues, then it had to have happened before -DHCP. - -Additionally, this instance in question was responsible for a very, very -large backup job each night. While "The Issue" (as we were now calling -it) didn't happen exactly when the backup happened, it was close enough -(a few hours) that we couldn't ignore it. - -Further days go by and we catch The Issue in action more and more. We -find that dhclient is not running after The Issue happens. Now we're -back to thinking it's a DHCP issue. Running ``/etc/init.d/networking`` -restart brings everything back up and running. - -Ever have one of those days where all of the sudden you get the Google -results you were looking for? Well, that's what happened here. I was -looking for information on dhclient and why it dies when it can't renew -its lease and all of the sudden I found a bunch of OpenStack and dnsmasq -discussions that were identical to the problem we were seeing! - -`Problem with Heavy Network IO and -Dnsmasq `_. - -`instances losing IP address while running, due to No -DHCPOFFER `_. - -Seriously, Google. - -This bug report was the key to everything: `KVM images lose connectivity -with bridged -network `_. - -It was funny to read the report. It was full of people who had some -strange network problem but didn't quite explain it in the same way. - -So it was a qemu/kvm bug. - -At the same time of finding the bug report, a co-worker was able to -successfully reproduce The Issue! How? He used ``iperf`` to spew a ton -of bandwidth at an instance. Within 30 minutes, the instance just -disappeared from the network. - -Armed with a patched qemu and a way to reproduce, we set out to see if -we've finally solved The Issue. After 48 hours straight of hammering the -instance with bandwidth, we were confident. The rest is history. You can -search the bug report for "joe" to find my comments and actual tests. - -Disappearing Images -~~~~~~~~~~~~~~~~~~~ - -At the end of 2012, Cybera (a nonprofit with a mandate to oversee the -development of cyberinfrastructure in Alberta, Canada) deployed an -updated OpenStack cloud for their `DAIR -project `_. A few days into -production, a compute node locks up. Upon rebooting the node, I checked -to see what instances were hosted on that node so I could boot them on -behalf of the customer. Luckily, only one instance. - -The :command:`nova reboot` command wasn't working, so I used :command:`virsh`, -but it immediately came back with an error saying it was unable to find the -backing disk. In this case, the backing disk is the Glance image that is -copied to ``/var/lib/nova/instances/_base`` when the image is used for -the first time. Why couldn't it find it? I checked the directory and -sure enough it was gone. - -I reviewed the ``nova`` database and saw the instance's entry in the -``nova.instances`` table. The image that the instance was using matched -what virsh was reporting, so no inconsistency there. - -I checked Glance and noticed that this image was a snapshot that the -user created. At least that was good news—this user would have been the -only user affected. - -Finally, I checked StackTach and reviewed the user's events. They had -created and deleted several snapshots—most likely experimenting. -Although the timestamps didn't match up, my conclusion was that they -launched their instance and then deleted the snapshot and it was somehow -removed from ``/var/lib/nova/instances/_base``. None of that made sense, -but it was the best I could come up with. - -It turns out the reason that this compute node locked up was a hardware -issue. We removed it from the DAIR cloud and called Dell to have it -serviced. Dell arrived and began working. Somehow or another (or a fat -finger), a different compute node was bumped and rebooted. Great. - -When this node fully booted, I ran through the same scenario of seeing -what instances were running so I could turn them back on. There were a -total of four. Three booted and one gave an error. It was the same error -as before: unable to find the backing disk. Seriously, what? - -Again, it turns out that the image was a snapshot. The three other -instances that successfully started were standard cloud images. Was it a -problem with snapshots? That didn't make sense. - -A note about DAIR's architecture: ``/var/lib/nova/instances`` is a -shared NFS mount. This means that all compute nodes have access to it, -which includes the ``_base`` directory. Another centralized area is -``/var/log/rsyslog`` on the cloud controller. This directory collects -all OpenStack logs from all compute nodes. I wondered if there were any -entries for the file that :command:`virsh` is reporting: - -.. code-block:: console - - dair-ua-c03/nova.log:Dec 19 12:10:59 dair-ua-c03 - 2012-12-19 12:10:59 INFO nova.virt.libvirt.imagecache - [-] Removing base file: - /var/lib/nova/instances/_base/7b4783508212f5d242cbf9ff56fb8d33b4ce6166_10 - -Ah-hah! So OpenStack was deleting it. But why? - -A feature was introduced in Essex to periodically check and see if there -were any ``_base`` files not in use. If there were, OpenStack Compute -would delete them. This idea sounds innocent enough and has some good -qualities to it. But how did this feature end up turned on? It was -disabled by default in Essex. As it should be. It was `decided to be -turned on in Folsom `_. -I cannot emphasize enough that: - -*Actions which delete things should not be enabled by default.* - -Disk space is cheap these days. Data recovery is not. - -Secondly, DAIR's shared ``/var/lib/nova/instances`` directory -contributed to the problem. Since all compute nodes have access to this -directory, all compute nodes periodically review the \_base directory. -If there is only one instance using an image, and the node that the -instance is on is down for a few minutes, it won't be able to mark the -image as still in use. Therefore, the image seems like it's not in use -and is deleted. When the compute node comes back online, the instance -hosted on that node is unable to start. - -The Valentine's Day Compute Node Massacre -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Although the title of this story is much more dramatic than the actual -event, I don't think, or hope, that I'll have the opportunity to use -"Valentine's Day Massacre" again in a title. - -This past Valentine's Day, I received an alert that a compute node was -no longer available in the cloud—meaning, - -.. code-block:: console - - $ openstack compute service list - -showed this particular node in a down state. - -I logged into the cloud controller and was able to both ``ping`` and SSH -into the problematic compute node which seemed very odd. Usually if I -receive this type of alert, the compute node has totally locked up and -would be inaccessible. - -After a few minutes of troubleshooting, I saw the following details: - -- A user recently tried launching a CentOS instance on that node - -- This user was the only user on the node (new node) - -- The load shot up to 8 right before I received the alert - -- The bonded 10gb network device (bond0) was in a DOWN state - -- The 1gb NIC was still alive and active - -I looked at the status of both NICs in the bonded pair and saw that -neither was able to communicate with the switch port. Seeing as how each -NIC in the bond is connected to a separate switch, I thought that the -chance of a switch port dying on each switch at the same time was quite -improbable. I concluded that the 10gb dual port NIC had died and needed -replaced. I created a ticket for the hardware support department at the -data center where the node was hosted. I felt lucky that this was a new -node and no one else was hosted on it yet. - -An hour later I received the same alert, but for another compute node. -Crap. OK, now there's definitely a problem going on. Just like the -original node, I was able to log in by SSH. The bond0 NIC was DOWN but -the 1gb NIC was active. - -And the best part: the same user had just tried creating a CentOS -instance. What? - -I was totally confused at this point, so I texted our network admin to -see if he was available to help. He logged in to both switches and -immediately saw the problem: the switches detected spanning tree packets -coming from the two compute nodes and immediately shut the ports down to -prevent spanning tree loops: - -.. code-block:: console - - Feb 15 01:40:18 SW-1 Stp: %SPANTREE-4-BLOCK_BPDUGUARD: Received BPDU packet on Port-Channel35 with BPDU guard enabled. Disabling interface. (source mac fa:16:3e:24:e7:22) - Feb 15 01:40:18 SW-1 Ebra: %ETH-4-ERRDISABLE: bpduguard error detected on Port-Channel35. - Feb 15 01:40:18 SW-1 Mlag: %MLAG-4-INTF_INACTIVE_LOCAL: Local interface Port-Channel35 is link down. MLAG 35 is inactive. - Feb 15 01:40:18 SW-1 Ebra: %LINEPROTO-5-UPDOWN: Line protocol on Interface Port-Channel35 (Server35), changed state to down - Feb 15 01:40:19 SW-1 Stp: %SPANTREE-6-INTERFACE_DEL: Interface Port-Channel35 has been removed from instance MST0 - Feb 15 01:40:19 SW-1 Ebra: %LINEPROTO-5-UPDOWN: Line protocol on Interface Ethernet35 (Server35), changed state to down - -He re-enabled the switch ports and the two compute nodes immediately -came back to life. - -Unfortunately, this story has an open ending... we're still looking into -why the CentOS image was sending out spanning tree packets. Further, -we're researching a proper way on how to mitigate this from happening. -It's a bigger issue than one might think. While it's extremely important -for switches to prevent spanning tree loops, it's very problematic to -have an entire compute node be cut from the network when this happens. -If a compute node is hosting 100 instances and one of them sends a -spanning tree packet, that instance has effectively DDOS'd the other 99 -instances. - -This is an ongoing and hot topic in networking circles —especially with -the raise of virtualization and virtual switches. - -Down the Rabbit Hole -~~~~~~~~~~~~~~~~~~~~ - -Users being able to retrieve console logs from running instances is a -boon for support—many times they can figure out what's going on inside -their instance and fix what's going on without bothering you. -Unfortunately, sometimes overzealous logging of failures can cause -problems of its own. - -A report came in: VMs were launching slowly, or not at all. Cue the -standard checks—nothing on the Nagios, but there was a spike in network -towards the current master of our RabbitMQ cluster. Investigation -started, but soon the other parts of the queue cluster were leaking -memory like a sieve. Then the alert came in—the master Rabbit server -went down and connections failed over to the slave. - -At that time, our control services were hosted by another team and we -didn't have much debugging information to determine what was going on -with the master, and we could not reboot it. That team noted that it -failed without alert, but managed to reboot it. After an hour, the -cluster had returned to its normal state and we went home for the day. - -Continuing the diagnosis the next morning was kick started by another -identical failure. We quickly got the message queue running again, and -tried to work out why Rabbit was suffering from so much network traffic. -Enabling debug logging on nova-api quickly brought understanding. A -``tail -f /var/log/nova/nova-api.log`` was scrolling by faster -than we'd ever seen before. CTRL+C on that and we could plainly see the -contents of a system log spewing failures over and over again - a system -log from one of our users' instances. - -After finding the instance ID we headed over to -``/var/lib/nova/instances`` to find the ``console.log``: - -.. code-block:: console - - adm@cc12:/var/lib/nova/instances/instance-00000e05# wc -l console.log - 92890453 console.log - adm@cc12:/var/lib/nova/instances/instance-00000e05# ls -sh console.log - 5.5G console.log - -Sure enough, the user had been periodically refreshing the console log -page on the dashboard and the 5G file was traversing the Rabbit cluster -to get to the dashboard. - -We called them and asked them to stop for a while, and they were happy -to abandon the horribly broken VM. After that, we started monitoring the -size of console logs. - -To this day, `the issue `__ -doesn't have a permanent resolution, but we look forward to the discussion -at the next summit. - -Havana Haunted by the Dead -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Felix Lee of Academia Sinica Grid Computing Centre in Taiwan contributed -this story. - -I just upgraded OpenStack from Grizzly to Havana 2013.2-2 using the RDO -repository and everything was running pretty well—except the EC2 API. - -I noticed that the API would suffer from a heavy load and respond slowly -to particular EC2 requests such as ``RunInstances``. - -Output from ``/var/log/nova/nova-api.log`` on :term:`Havana`: - -.. code-block:: console - - 2014-01-10 09:11:45.072 129745 INFO nova.ec2.wsgi.server - [req-84d16d16-3808-426b-b7af-3b90a11b83b0 - 0c6e7dba03c24c6a9bce299747499e8a 7052bd6714e7460caeb16242e68124f9] - 117.103.103.29 "GET - /services/Cloud?AWSAccessKeyId=[something]&Action=RunInstances&ClientToken=[something]&ImageId=ami-00000001&InstanceInitiatedShutdownBehavior=terminate... - HTTP/1.1" status: 200 len: 1109 time: 138.5970151 - -This request took over two minutes to process, but executed quickly on -another co-existing Grizzly deployment using the same hardware and -system configuration. - -Output from ``/var/log/nova/nova-api.log`` on :term:`Grizzly`: - -.. code-block:: console - - 2014-01-08 11:15:15.704 INFO nova.ec2.wsgi.server - [req-ccac9790-3357-4aa8-84bd-cdaab1aa394e - ebbd729575cb404081a45c9ada0849b7 8175953c209044358ab5e0ec19d52c37] - 117.103.103.29 "GET - /services/Cloud?AWSAccessKeyId=[something]&Action=RunInstances&ClientToken=[something]&ImageId=ami-00000007&InstanceInitiatedShutdownBehavior=terminate... - HTTP/1.1" status: 200 len: 931 time: 3.9426181 - -While monitoring system resources, I noticed a significant increase in -memory consumption while the EC2 API processed this request. I thought -it wasn't handling memory properly—possibly not releasing memory. If the -API received several of these requests, memory consumption quickly grew -until the system ran out of RAM and began using swap. Each node has 48 -GB of RAM and the "nova-api" process would consume all of it within -minutes. Once this happened, the entire system would become unusably -slow until I restarted the nova-api service. - -So, I found myself wondering what changed in the EC2 API on Havana that -might cause this to happen. Was it a bug or a normal behavior that I now -need to work around? - -After digging into the nova (OpenStack Compute) code, I noticed two -areas in ``api/ec2/cloud.py`` potentially impacting my system: - -.. code-block:: python - - instances = self.compute_api.get_all(context, - search_opts=search_opts, - sort_dir='asc') - - sys_metas = self.compute_api.get_all_system_metadata( - context, search_filts=[{'key': ['EC2_client_token']}, - {'value': [client_token]}]) - -Since my database contained many records—over 1 million metadata records -and over 300,000 instance records in "deleted" or "errored" states—each -search took a long time. I decided to clean up the database by first -archiving a copy for backup and then performing some deletions using the -MySQL client. For example, I ran the following SQL command to remove -rows of instances deleted for over a year: - -.. code-block:: console - - mysql> delete from nova.instances where deleted=1 and terminated_at < (NOW() - INTERVAL 1 YEAR); - -Performance increased greatly after deleting the old records and my new -deployment continues to behave well. diff --git a/doc/ops-guide/source/app-resources.rst b/doc/ops-guide/source/app-resources.rst deleted file mode 100644 index 1c998987a7..0000000000 --- a/doc/ops-guide/source/app-resources.rst +++ /dev/null @@ -1,62 +0,0 @@ -========= -Resources -========= - -OpenStack -~~~~~~~~~ - -- `OpenStack Installation Tutorial for openSUSE and SUSE Linux Enterprise - Server `_ - -- `OpenStack Installation Tutorial for Red Hat Enterprise Linux and CentOS - `_ - -- `OpenStack Installation Tutorial for Ubuntu - Server `_ - -- `OpenStack Administrator Guide `_ - -- `OpenStack Cloud Computing Cookbook (Packt - Publishing) `_ - -Cloud (General) -~~~~~~~~~~~~~~~ - -- `The NIST Definition of Cloud - Computing `_ - -Python -~~~~~~ - -- `Dive Into Python (Apress) `_ - -Networking -~~~~~~~~~~ - -- `TCP/IP Illustrated, Volume 1: The Protocols, 2/E - (Pearson) `_ - -- `The TCP/IP Guide (No Starch - Press) `_ - -- `A tcpdump Tutorial and - Primer `_ - -Systems Administration -~~~~~~~~~~~~~~~~~~~~~~ - -- `UNIX and Linux Systems Administration Handbook (Prentice - Hall) `_ - -Virtualization -~~~~~~~~~~~~~~ - -- `The Book of Xen (No Starch - Press) `_ - -Configuration Management -~~~~~~~~~~~~~~~~~~~~~~~~ - -- `Puppet Labs Documentation `_ - -- `Pro Puppet (Apress) `_ diff --git a/doc/ops-guide/source/app-roadmaps.rst b/doc/ops-guide/source/app-roadmaps.rst deleted file mode 100644 index 48d28e574f..0000000000 --- a/doc/ops-guide/source/app-roadmaps.rst +++ /dev/null @@ -1,435 +0,0 @@ -===================== -Working with Roadmaps -===================== - -The good news: OpenStack has unprecedented transparency when it comes to -providing information about what's coming up. The bad news: each release -moves very quickly. The purpose of this appendix is to highlight some of -the useful pages to track, and take an educated guess at what is coming -up in the next release and perhaps further afield. - -OpenStack follows a six month release cycle, typically releasing in -April/May and October/November each year. At the start of each cycle, -the community gathers in a single location for a design summit. At the -summit, the features for the coming releases are discussed, prioritized, -and planned. The below figure shows an example release cycle, with dates -showing milestone releases, code freeze, and string freeze dates, along -with an example of when the summit occurs. Milestones are interim releases -within the cycle that are available as packages for download and -testing. Code freeze is putting a stop to adding new features to the -release. String freeze is putting a stop to changing any strings within -the source code. - -.. image:: figures/osog_ac01.png - :width: 100% - - -Information Available to You -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -There are several good sources of information available that you can use -to track your OpenStack development desires. - -Release notes are maintained on the OpenStack wiki, and also shown here: - -.. list-table:: - :widths: 25 25 25 25 - :header-rows: 1 - - * - Series - - Status - - Releases - - Date - * - Liberty - - `Under Development - `_ - - 2015.2 - - Oct, 2015 - * - Kilo - - `Current stable release, security-supported - `_ - - `2015.1 `_ - - Apr 30, 2015 - * - Juno - - `Security-supported - `_ - - `2014.2 `_ - - Oct 16, 2014 - * - Icehouse - - `End-of-life - `_ - - `2014.1 `_ - - Apr 17, 2014 - * - - - - - `2014.1.1 `_ - - Jun 9, 2014 - * - - - - - `2014.1.2 `_ - - Aug 8, 2014 - * - - - - - `2014.1.3 `_ - - Oct 2, 2014 - * - Havana - - End-of-life - - `2013.2 `_ - - Apr 4, 2013 - * - - - - - `2013.2.1 `_ - - Dec 16, 2013 - * - - - - - `2013.2.2 `_ - - Feb 13, 2014 - * - - - - - `2013.2.3 `_ - - Apr 3, 2014 - * - - - - - `2013.2.4 `_ - - Sep 22, 2014 - * - - - - - `2013.2.1 `_ - - Dec 16, 2013 - * - Grizzly - - End-of-life - - `2013.1 `_ - - Apr 4, 2013 - * - - - - - `2013.1.1 `_ - - May 9, 2013 - * - - - - - `2013.1.2 `_ - - Jun 6, 2013 - * - - - - - `2013.1.3 `_ - - Aug 8, 2013 - * - - - - - `2013.1.4 `_ - - Oct 17, 2013 - * - - - - - `2013.1.5 `_ - - Mar 20, 2015 - * - Folsom - - End-of-life - - `2012.2 `_ - - Sep 27, 2012 - * - - - - - `2012.2.1 `_ - - Nov 29, 2012 - * - - - - - `2012.2.2 `_ - - Dec 13, 2012 - * - - - - - `2012.2.3 `_ - - Jan 31, 2013 - * - - - - - `2012.2.4 `_ - - Apr 11, 2013 - * - Essex - - End-of-life - - `2012.1 `_ - - Apr 5, 2012 - * - - - - - `2012.1.1 `_ - - Jun 22, 2012 - * - - - - - `2012.1.2 `_ - - Aug 10, 2012 - * - - - - - `2012.1.3 `_ - - Oct 12, 2012 - * - Diablo - - Deprecated - - `2011.3 `_ - - Sep 22, 2011 - * - - - - - `2011.3.1 `_ - - Jan 19, 2012 - * - Cactus - - Deprecated - - `2011.2 `_ - - Apr 15, 2011 - * - Bexar - - Deprecated - - `2011.1 `_ - - Feb 3, 2011 - * - Austin - - Deprecated - - `2010.1 `_ - - Oct 21, 2010 - -Here are some other resources: - -- `A breakdown of current features under development, with their target - milestone `_ - -- `A list of all features, including those not yet under - development `_ - -- `Rough-draft design discussions ("etherpads") from the last design - summit `_ - -- `List of individual code changes under - review `_ - -Influencing the Roadmap -~~~~~~~~~~~~~~~~~~~~~~~ - -OpenStack truly welcomes your ideas (and contributions) and highly -values feedback from real-world users of the software. By learning a -little about the process that drives feature development, you can -participate and perhaps get the additions you desire. - -Feature requests typically start their life in Etherpad, a collaborative -editing tool, which is used to take coordinating notes at a design -summit session specific to the feature. This then leads to the creation -of a blueprint on the Launchpad site for the particular project, which -is used to describe the feature more formally. Blueprints are then -approved by project team members, and development can begin. - -Therefore, the fastest way to get your feature request up for -consideration is to create an Etherpad with your ideas and propose a -session to the design summit. If the design summit has already passed, -you may also create a blueprint directly. Read this `blog post about how -to work with blueprints -`_ -the perspective of Victoria Martínez, a developer intern. - -The roadmap for the next release as it is developed can be seen at -`Releases `_. - -To determine the potential features going in to future releases, or to -look at features implemented previously, take a look at the existing -blueprints such as `OpenStack Compute (nova) -Blueprints `_, `OpenStack -Identity (keystone) -Blueprints `_, and release -notes. - -Aside from the direct-to-blueprint pathway, there is another very -well-regarded mechanism to influence the development roadmap: -the user survey. Found at `OpenStack User Survey -`_, -it allows you to provide details of your deployments and needs, anonymously by -default. Each cycle, the user committee analyzes the results and produces a -report, including providing specific information to the technical -committee and project team leads. - -Aspects to Watch -~~~~~~~~~~~~~~~~ - -You want to keep an eye on the areas improving within OpenStack. The -best way to "watch" roadmaps for each project is to look at the -blueprints that are being approved for work on milestone releases. You -can also learn from PTL webinars that follow the OpenStack summits twice -a year. - -Driver Quality Improvements ---------------------------- - -A major quality push has occurred across drivers and plug-ins in Block -Storage, Compute, and Networking. Particularly, developers of Compute -and Networking drivers that require proprietary or hardware products are -now required to provide an automated external testing system for use -during the development process. - -Easier Upgrades ---------------- - -One of the most requested features since OpenStack began (for components -other than Object Storage, which tends to "just work"): easier upgrades. -In all recent releases internal messaging communication is versioned, -meaning services can theoretically drop back to backward-compatible -behavior. This allows you to run later versions of some components, -while keeping older versions of others. - -In addition, database migrations are now tested with the Turbo Hipster -tool. This tool tests database migration performance on copies of -real-world user databases. - -These changes have facilitated the first proper OpenStack upgrade guide, -found in :doc:`ops-upgrades`, and will continue to improve in the next -release. - -Deprecation of Nova Network ---------------------------- - -With the introduction of the full software-defined networking stack -provided by OpenStack Networking (neutron) in the Folsom release, -development effort on the initial networking code that remains part of -the Compute component has gradually lessened. While many still use -``nova-network`` in production, there has been a long-term plan to -remove the code in favor of the more flexible and full-featured -OpenStack Networking. - -An attempt was made to deprecate ``nova-network`` during the Havana -release, which was aborted due to the lack of equivalent functionality -(such as the FlatDHCP multi-host high-availability mode mentioned in -this guide), lack of a migration path between versions, insufficient -testing, and simplicity when used for the more straightforward use cases -``nova-network`` traditionally supported. Though significant effort has -been made to address these concerns, ``nova-network`` was not be -deprecated in the Juno release. In addition, to a limited degree, -patches to ``nova-network`` have again begin to be accepted, such as -adding a per-network settings feature and SR-IOV support in Juno. - -This leaves you with an important point of decision when designing your -cloud. OpenStack Networking is robust enough to use with a small number -of limitations (performance issues in some scenarios, only basic high -availability of layer 3 systems) and provides many more features than -``nova-network``. However, if you do not have the more complex use cases -that can benefit from fuller software-defined networking capabilities, -or are uncomfortable with the new concepts introduced, ``nova-network`` -may continue to be a viable option for the next 12 months. - -Similarly, if you have an existing cloud and are looking to upgrade from -``nova-network`` to OpenStack Networking, you should have the option to -delay the upgrade for this period of time. However, each release of -OpenStack brings significant new innovation, and regardless of your use -of networking methodology, it is likely best to begin planning for an -upgrade within a reasonable timeframe of each release. - -As mentioned, there's currently no way to cleanly migrate from -``nova-network`` to neutron. We recommend that you keep a migration in -mind and what that process might involve for when a proper migration -path is released. - -Distributed Virtual Router -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -One of the long-time complaints surrounding OpenStack Networking was the -lack of high availability for the layer 3 components. The Juno release -introduced Distributed Virtual Router (DVR), which aims to solve this -problem. - -Early indications are that it does do this well for a base set of -scenarios, such as using the ML2 plug-in with Open vSwitch, one flat -external network and VXLAN tenant networks. However, it does appear that -there are problems with the use of VLANs, IPv6, Floating IPs, high -north-south traffic scenarios and large numbers of compute nodes. It is -expected these will improve significantly with the next release, but bug -reports on specific issues are highly desirable. - -Replacement of Open vSwitch Plug-in with Modular Layer 2 -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The Modular Layer 2 plug-in is a framework allowing OpenStack Networking -to simultaneously utilize the variety of layer-2 networking technologies -found in complex real-world data centers. It currently works with the -existing Open vSwitch, Linux Bridge, and Hyper-V L2 agents and is -intended to replace and deprecate the monolithic plug-ins associated -with those L2 agents. - -New API Versions -~~~~~~~~~~~~~~~~ - -The third version of the Compute API was broadly discussed and worked on -during the Havana and Icehouse release cycles. Current discussions -indicate that the V2 API will remain for many releases, and the next -iteration of the API will be denoted v2.1 and have similar properties to -the existing v2.0, rather than an entirely new v3 API. This is a great -time to evaluate all API and provide comments while the next generation -APIs are being defined. A new working group was formed specifically to -`improve OpenStack APIs `_ -and create design guidelines, which you are welcome to join. - -OpenStack on OpenStack (TripleO) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This project continues to improve and you may consider using it for -greenfield deployments, though according to the latest user survey -results it remains to see widespread uptake. - -Data processing service for OpenStack (sahara) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -A much-requested answer to big data problems, a dedicated team has been -making solid progress on a Hadoop-as-a-Service project. - -Bare metal Deployment (ironic) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The bare-metal deployment has been widely lauded, and development -continues. The Juno release brought the OpenStack Bare metal drive into -the Compute project, and it was aimed to deprecate the existing -bare-metal driver in Kilo. If you are a current user of the bare metal -driver, a particular blueprint to follow is `Deprecate the bare metal -driver -`_ - -Database as a Service (trove) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The OpenStack community has had a database-as-a-service tool in -development for some time, and we saw the first integrated release of it -in Icehouse. From its release it was able to deploy database servers out -of the box in a highly available way, initially supporting only MySQL. -Juno introduced support for Mongo (including clustering), PostgreSQL and -Couchbase, in addition to replication functionality for MySQL. In Kilo, -more advanced clustering capability was delivered, in addition to better -integration with other OpenStack components such as Networking. - -Message Service (zaqar) -~~~~~~~~~~~~~~~~~~~~~~~ - -A service to provide queues of messages and notifications was released. - -DNS service (designate) -~~~~~~~~~~~~~~~~~~~~~~~ - -A long requested service, to provide the ability to manipulate DNS -entries associated with OpenStack resources has gathered a following. -The designate project was also released. - -Scheduler Improvements -~~~~~~~~~~~~~~~~~~~~~~ - -Both Compute and Block Storage rely on schedulers to determine where to -place virtual machines or volumes. In Havana, the Compute scheduler -underwent significant improvement, while in Icehouse it was the -scheduler in Block Storage that received a boost. Further down the -track, an effort started this cycle that aims to create a holistic -scheduler covering both will come to fruition. Some of the work that was -done in Kilo can be found under the `Gantt -project `_. - -Block Storage Improvements --------------------------- - -Block Storage is considered a stable project, with wide uptake and a -long track record of quality drivers. The team has discussed many areas -of work at the summits, including better error reporting, automated -discovery, and thin provisioning features. - -Toward a Python SDK -------------------- - -Though many successfully use the various python-\*client code as an -effective SDK for interacting with OpenStack, consistency between the -projects and documentation availability waxes and wanes. To combat this, -an `effort to improve the -experience `_ has -started. Cross-project development efforts in OpenStack have a checkered -history, such as the `unified client -project `_ having -several false starts. However, the early signs for the SDK project are -promising, and we expect to see results during the Juno cycle. diff --git a/doc/ops-guide/source/app-usecases.rst b/doc/ops-guide/source/app-usecases.rst deleted file mode 100644 index 595a8ea917..0000000000 --- a/doc/ops-guide/source/app-usecases.rst +++ /dev/null @@ -1,192 +0,0 @@ -========= -Use Cases -========= - -This appendix contains a small selection of use cases from the -community, with more technical detail than usual. Further examples can -be found on the `OpenStack website `_. - -NeCTAR -~~~~~~ - -Who uses it: researchers from the Australian publicly funded research -sector. Use is across a wide variety of disciplines, with the purpose of -instances ranging from running simple web servers to using hundreds of -cores for high-throughput computing. - -Deployment ----------- - -Using OpenStack Compute cells, the NeCTAR Research Cloud spans eight -sites with approximately 4,000 cores per site. - -Each site runs a different configuration, as a resource cells in an -OpenStack Compute cells setup. Some sites span multiple data centers, -some use off compute node storage with a shared file system, and some -use on compute node storage with a non-shared file system. Each site -deploys the Image service with an Object Storage back end. A central -Identity, dashboard, and Compute API service are used. A login to the -dashboard triggers a SAML login with Shibboleth, which creates an -account in the Identity service with an SQL back end. An Object Storage -Global Cluster is used across several sites. - -Compute nodes have 24 to 48 cores, with at least 4 GB of RAM per core -and approximately 40 GB of ephemeral storage per core. - -All sites are based on Ubuntu 14.04, with KVM as the hypervisor. The -OpenStack version in use is typically the current stable version, with 5 -to 10 percent back-ported code from trunk and modifications. - -Resources ---------- - -- `OpenStack.org case - study `_ - -- `NeCTAR-RC GitHub `_ - -- `NeCTAR website `_ - -MIT CSAIL -~~~~~~~~~ - -Who uses it: researchers from the MIT Computer Science and Artificial -Intelligence Lab. - -Deployment ----------- - -The CSAIL cloud is currently 64 physical nodes with a total of 768 -physical cores and 3,456 GB of RAM. Persistent data storage is largely -outside the cloud on NFS, with cloud resources focused on compute -resources. There are more than 130 users in more than 40 projects, -typically running 2,000–2,500 vCPUs in 300 to 400 instances. - -We initially deployed on Ubuntu 12.04 with the Essex release of -OpenStack using FlatDHCP multi-host networking. - -The software stack is still Ubuntu 12.04 LTS, but now with OpenStack -Havana from the Ubuntu Cloud Archive. KVM is the hypervisor, deployed -using `FAI `_ and Puppet for configuration -management. The FAI and Puppet combination is used lab-wide, not only -for OpenStack. There is a single cloud controller node, which also acts -as network controller, with the remainder of the server hardware -dedicated to compute nodes. - -Host aggregates and instance-type extra specs are used to provide two -different resource allocation ratios. The default resource allocation -ratios we use are 4:1 CPU and 1.5:1 RAM. Compute-intensive workloads use -instance types that require non-oversubscribed hosts where ``cpu_ratio`` -and ``ram_ratio`` are both set to 1.0. Since we have hyper-threading -enabled on our compute nodes, this provides one vCPU per CPU thread, or -two vCPUs per physical core. - -With our upgrade to Grizzly in August 2013, we moved to OpenStack -Networking, neutron (quantum at the time). Compute nodes have -two-gigabit network interfaces and a separate management card for IPMI -management. One network interface is used for node-to-node -communications. The other is used as a trunk port for OpenStack managed -VLANs. The controller node uses two bonded 10g network interfaces for -its public IP communications. Big pipes are used here because images are -served over this port, and it is also used to connect to iSCSI storage, -back-ending the image storage and database. The controller node also has -a gigabit interface that is used in trunk mode for OpenStack managed -VLAN traffic. This port handles traffic to the dhcp-agent and -metadata-proxy. - -We approximate the older ``nova-network`` multi-host HA setup by using -"provider VLAN networks" that connect instances directly to existing -publicly addressable networks and use existing physical routers as their -default gateway. This means that if our network controller goes down, -running instances still have their network available, and no single -Linux host becomes a traffic bottleneck. We are able to do this because -we have a sufficient supply of IPv4 addresses to cover all of our -instances and thus don't need NAT and don't use floating IP addresses. -We provide a single generic public network to all projects and -additional existing VLANs on a project-by-project basis as needed. -Individual projects are also allowed to create their own private GRE -based networks. - -Resources ---------- - -- `CSAIL homepage `_ - -DAIR -~~~~ - -Who uses it: DAIR is an integrated virtual environment that leverages -the CANARIE network to develop and test new information communication -technology (ICT) and other digital technologies. It combines such -digital infrastructure as advanced networking and cloud computing and -storage to create an environment for developing and testing innovative -ICT applications, protocols, and services; performing at-scale -experimentation for deployment; and facilitating a faster time to -market. - -Deployment ----------- - -DAIR is hosted at two different data centers across Canada: one in -Alberta and the other in Quebec. It consists of a cloud controller at -each location, although, one is designated the "master" controller that -is in charge of central authentication and quotas. This is done through -custom scripts and light modifications to OpenStack. DAIR is currently -running Havana. - -For Object Storage, each region has a swift environment. - -A NetApp appliance is used in each region for both block storage and -instance storage. There are future plans to move the instances off the -NetApp appliance and onto a distributed file system such as :term:`Ceph` or -GlusterFS. - -VlanManager is used extensively for network management. All servers have -two bonded 10GbE NICs that are connected to two redundant switches. DAIR -is set up to use single-node networking where the cloud controller is -the gateway for all instances on all compute nodes. Internal OpenStack -traffic (for example, storage traffic) does not go through the cloud -controller. - -Resources ---------- - -- `DAIR homepage `__ - -CERN -~~~~ - -Who uses it: researchers at CERN (European Organization for Nuclear -Research) conducting high-energy physics research. - -Deployment ----------- - -The environment is largely based on Scientific Linux 6, which is Red Hat -compatible. We use KVM as our primary hypervisor, although tests are -ongoing with Hyper-V on Windows Server 2008. - -We use the Puppet Labs OpenStack modules to configure Compute, Image -service, Identity, and dashboard. Puppet is used widely for instance -configuration, and Foreman is used as a GUI for reporting and instance -provisioning. - -Users and groups are managed through Active Directory and imported into -the Identity service using LDAP. CLIs are available for nova and -Euca2ools to do this. - -There are three clouds currently running at CERN, totaling about 4,700 -compute nodes, with approximately 120,000 cores. The CERN IT cloud aims -to expand to 300,000 cores by 2015. - -Resources ---------- - -- `OpenStack in Production: A tale of 3 OpenStack - Clouds `_ - -- `Review of CERN Data Centre - Infrastructure `_ - -- `CERN Cloud Infrastructure User - Guide `_ diff --git a/doc/ops-guide/source/appendix.rst b/doc/ops-guide/source/appendix.rst deleted file mode 100644 index dc27aa0f51..0000000000 --- a/doc/ops-guide/source/appendix.rst +++ /dev/null @@ -1,12 +0,0 @@ -Appendix -~~~~~~~~ - -.. toctree:: - :maxdepth: 1 - - app-usecases.rst - app-crypt.rst - app-roadmaps.rst - app-resources.rst - common/app-support.rst - common/glossary.rst diff --git a/doc/ops-guide/source/common b/doc/ops-guide/source/common deleted file mode 120000 index dc879abe93..0000000000 --- a/doc/ops-guide/source/common +++ /dev/null @@ -1 +0,0 @@ -../../common \ No newline at end of file diff --git a/doc/ops-guide/source/conf.py b/doc/ops-guide/source/conf.py deleted file mode 100644 index afc3df57f1..0000000000 --- a/doc/ops-guide/source/conf.py +++ /dev/null @@ -1,297 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import os -# import sys - -import openstackdocstheme - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = ['openstackdocstheme'] - -# Add any paths that contain templates here, relative to this directory. -# templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -repository_name = "openstack/openstack-manuals" -bug_project = 'openstack-manuals' -project = u'Operations Guide' -bug_tag = u'ops-guide' -copyright = u'2016-2017, OpenStack contributors' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = '15.0' -# The full version, including alpha/beta/rc tags. -release = '15.0.0' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ['common/cli*', 'common/nova*', - 'common/appendix.rst', - 'common/get-started*', 'common/dashboard*'] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [openstackdocstheme.get_html_theme_path()] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = [] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# So that we can enable "log-a-bug" links from each output HTML page, this -# variable must be set to a format that includes year, month, day, hours and -# minutes. -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -html_use_index = False - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -html_show_sourcelink = False - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'ops-guide' - -# If true, publish source files -html_copy_source = False - -# -- Options for LaTeX output --------------------------------------------- -pdf_theme_path = openstackdocstheme.get_pdf_theme_path() -openstack_logo = openstackdocstheme.get_openstack_logo_path() - -latex_custom_template = r""" -\newcommand{\openstacklogo}{%s} -\usepackage{%s} -""" % (openstack_logo, pdf_theme_path) - -latex_engine = 'xelatex' - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - 'papersize': 'a4paper', - - # The font size ('10pt', '11pt' or '12pt'). - 'pointsize': '11pt', - - #Default figure align - 'figure_align': 'H', - - # Not to generate blank page after chapter - 'classoptions': ',openany', - - # Additional stuff for the LaTeX preamble. - 'preamble': latex_custom_template, -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'OpsGuide.tex', u'Operations Guide', - u'OpenStack contributors', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'opsguide', u'Operations Guide', - [u'OpenStack contributors'], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'OpsGuide', u'Operations Guide', - u'OpenStack contributors', 'OpsGuide', - 'This book provides information about designing and operating ' - 'OpenStack clouds.', 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] diff --git a/doc/ops-guide/source/figures/Check_mark_23x20_02.png b/doc/ops-guide/source/figures/Check_mark_23x20_02.png deleted file mode 100644 index e6e5d5a72b..0000000000 Binary files a/doc/ops-guide/source/figures/Check_mark_23x20_02.png and /dev/null differ diff --git a/doc/ops-guide/source/figures/Check_mark_23x20_02.svg b/doc/ops-guide/source/figures/Check_mark_23x20_02.svg deleted file mode 100644 index 3051a2f937..0000000000 --- a/doc/ops-guide/source/figures/Check_mark_23x20_02.svg +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - image/svg+xml - - - - - - - - diff --git a/doc/ops-guide/source/figures/create_project.png b/doc/ops-guide/source/figures/create_project.png deleted file mode 100644 index 8906bcac35..0000000000 Binary files a/doc/ops-guide/source/figures/create_project.png and /dev/null differ diff --git a/doc/ops-guide/source/figures/edit_project_member.png b/doc/ops-guide/source/figures/edit_project_member.png deleted file mode 100644 index 84d7408bac..0000000000 Binary files a/doc/ops-guide/source/figures/edit_project_member.png and /dev/null differ diff --git a/doc/ops-guide/source/figures/network_packet_ping.svg b/doc/ops-guide/source/figures/network_packet_ping.svg deleted file mode 100644 index f5dda8e250..0000000000 --- a/doc/ops-guide/source/figures/network_packet_ping.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -2013-03-02 18:48ZCanvas 1Layer 1Compute Node nbr100Internetinstanceeth0eth0vnet1L2 Switchgateway12345 diff --git a/doc/ops-guide/source/figures/neutron_packet_ping.svg b/doc/ops-guide/source/figures/neutron_packet_ping.svg deleted file mode 100644 index 898794fffb..0000000000 --- a/doc/ops-guide/source/figures/neutron_packet_ping.svg +++ /dev/null @@ -1,1734 +0,0 @@ - - - - - 2013-03-02 18:48Z - - - - image/svg+xml - - - - - - - - - - - - - - - - - - - - - - - - IP Link - Layer2 VLANTrunk - - Neutron Network Paths - - VLAN and GRE networks - - GRE networks - - VLAN networks - - - - - - Compute Node n - - - - br-int - - - - instance - - - - - eth0 - - - - - - tap - - - - - - - 1 - - - - - - - - - 2 - - - - - - - - - - 3 - - - - - br-tun - - - - - - 4b - - - - - - - 4a - - - - - patch-tun - - - - int-br-eth1 - - - - - - - eth1 - - - - - phy-br-eth1 - - - - - eth0 - - - - br-eth1 - - - - patch-int - - - - gre0 - - - - gre<N> - - - - - - - - - - - - - Network Node - - - - - dhcp-agent - - - - - - - 10 - - - - - - - 5b - - - - - - 5a - - - - - - - 8 - - - - - - br-eth1 - - - br-tun - - - - - eth1 - - - - - phy-br-eth1 - - - - patch-int - - - - gre<N> - - - - gre0 - - - - - eth2 - - - - - - - - qg-<n> - - - - - - eth0 - - - - - - - 9 - - - - - - - - - 6 - - - - br-int - - - - - - tap - - - - - - qr-<n> - - - - - phy-br-eth1 - - - - patch-tun - - - - br-ex - - - - netns qrouter-uuid - netns qdhcp-uuid - - - - - - - - - - - l3-agent - - - - - - - 7 - - - - - - - - Internet - - - - - - diff --git a/doc/ops-guide/source/figures/os-ref-arch.svg b/doc/ops-guide/source/figures/os-ref-arch.svg deleted file mode 100644 index 7fea7f198c..0000000000 --- a/doc/ops-guide/source/figures/os-ref-arch.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -2013-02-26 23:27ZCanvas 1Layer 1Compute Node 2nova-computenova-api-metadatanova-vncconsolenova-networketh1eth0Compute Node 1HypervisorAPI for metadatanoVNCnova-networkInternetCloud Controller NodeDatabaseMessage QueueAPI servicesSchedulerIdentityImageBlock StorageDashboardConsole accesseth0eth1Management Network 192.168.1.0/24Public Network 203.0.113.0/24Flat Network 10.1.0.0/16eth1eth0Block Storage NodeSCSI target (tgt)eth1Ephemeral Storage NodeNFSeth1cinder-volume diff --git a/doc/ops-guide/source/figures/os_physical_network.svg b/doc/ops-guide/source/figures/os_physical_network.svg deleted file mode 100644 index d4d83fcb60..0000000000 --- a/doc/ops-guide/source/figures/os_physical_network.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -2013-02-27 18:33ZCanvas 1Layer 1Compute Node neth0eth1Management Network192.168.1.0/24Flat Network10.1.0.0/16Public Network203.0.113.0/24br100instance ninstance 2instance 1 diff --git a/doc/ops-guide/source/figures/osog_00in01.png b/doc/ops-guide/source/figures/osog_00in01.png deleted file mode 100644 index 1a7c150ccf..0000000000 Binary files a/doc/ops-guide/source/figures/osog_00in01.png and /dev/null differ diff --git a/doc/ops-guide/source/figures/osog_0201.png b/doc/ops-guide/source/figures/osog_0201.png deleted file mode 100644 index 794c327e40..0000000000 Binary files a/doc/ops-guide/source/figures/osog_0201.png and /dev/null differ diff --git a/doc/ops-guide/source/figures/osog_1201.png b/doc/ops-guide/source/figures/osog_1201.png deleted file mode 100644 index d0e3a3fd4e..0000000000 Binary files a/doc/ops-guide/source/figures/osog_1201.png and /dev/null differ diff --git a/doc/ops-guide/source/figures/osog_1202.png b/doc/ops-guide/source/figures/osog_1202.png deleted file mode 100644 index ce1e475e52..0000000000 Binary files a/doc/ops-guide/source/figures/osog_1202.png and /dev/null differ diff --git a/doc/ops-guide/source/figures/osog_ac01.png b/doc/ops-guide/source/figures/osog_ac01.png deleted file mode 100644 index 6caddef4a2..0000000000 Binary files a/doc/ops-guide/source/figures/osog_ac01.png and /dev/null differ diff --git a/doc/ops-guide/source/figures/provision-an-instance.graffle b/doc/ops-guide/source/figures/provision-an-instance.graffle deleted file mode 100644 index 62ea26a84d..0000000000 Binary files a/doc/ops-guide/source/figures/provision-an-instance.graffle and /dev/null differ diff --git a/doc/ops-guide/source/figures/provision-an-instance.png b/doc/ops-guide/source/figures/provision-an-instance.png deleted file mode 100644 index b5370526ab..0000000000 Binary files a/doc/ops-guide/source/figures/provision-an-instance.png and /dev/null differ diff --git a/doc/ops-guide/source/figures/provision-an-instance.svg b/doc/ops-guide/source/figures/provision-an-instance.svg deleted file mode 100644 index 47db5aa8a8..0000000000 --- a/doc/ops-guide/source/figures/provision-an-instance.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - Produced by OmniGraffle 6.0.5 2016-08-17 01:06ZCanvas 1Role: classHTTP REST169.254.169.254Return volume informationrpc.cast tolaunch instanceRead cluster stateSave instance stateRead filtering and weighinginformationSubscribe newinstance requestCreate entry for instanceClientkeystoneLDAPToken Storenova-apiDBnova-schedulernova-computenova-conductorglance-apicinder-volumecinder-schedulerneutron-serverneutron-DHCP-agentneutron-L2-agentdnsmasqlibvirtcinder-apivm-instanceneutron-metadata-proxynova-api-metadataAuthentication requestAuthorization tokenAuthorization Save tokenLaunchinstanceInstance requestcomplete[rpc.cast] to request new instanceMQSubscribe new instance requestrpc.call to Nova-conductorto fetch instance informationSubscribe new instance requestRead instance statePublish new instance stateSubscribe new instance request[REST] get image URLfrom glance image IDReturn image URL Allocate and configure the network for instance Get network infoRead request IPAllocate IPReplyRead request L2 configurationReply L2 configurationConfiguration L2Save instance network statePass network information [REST] Get volume dataValidate auth-token and permissionsUpdate authorization headers with roles and acl Start VMPort updaterpc.call to Nova-conductor to fetch the instance informationSubscribe new instance requestPublish new instance statePass volume informationGet cluster mapReturn mapMount volumeReturn metadataHTTP REST AddUUID intormation X-headersPoll instancestateReturn instancestateRead instancestateReturnstateReply IPRead IPValidate the auth-tokenValidate the auth-token diff --git a/doc/ops-guide/source/figures/releasecyclegrizzlydiagram.png b/doc/ops-guide/source/figures/releasecyclegrizzlydiagram.png deleted file mode 100644 index 26ae2250cf..0000000000 Binary files a/doc/ops-guide/source/figures/releasecyclegrizzlydiagram.png and /dev/null differ diff --git a/doc/ops-guide/source/index.rst b/doc/ops-guide/source/index.rst deleted file mode 100644 index dd5b3dba83..0000000000 --- a/doc/ops-guide/source/index.rst +++ /dev/null @@ -1,55 +0,0 @@ -========================== -OpenStack Operations Guide -========================== - -Abstract -~~~~~~~~ - -This guide provides information about operating OpenStack clouds. - -We recommend that you turn to the `Installation Tutorials and Guides -`_, -which contains a step-by-step guide on how to manually install the -OpenStack packages and dependencies on your cloud. - -While it is important for an operator to be familiar with the steps -involved in deploying OpenStack, we also strongly encourage you to -evaluate `OpenStack deployment tools -`_ -and configuration-management tools, such as :term:`Puppet` or -:term:`Chef`, which can help automate this deployment process. - -In this guide, we assume that you have successfully deployed an -OpenStack cloud and are able to perform basic operations -such as adding images, booting instances, and attaching volumes. - -As your focus turns to stable operations, we recommend that you do skim -this guide to get a sense of the content. Some of this content is useful -to read in advance so that you can put best practices into effect to -simplify your life in the long run. Other content is more useful as a -reference that you might turn to when an unexpected event occurs (such -as a power failure), or to troubleshoot a particular problem. - -Contents -~~~~~~~~ - -.. toctree:: - :maxdepth: 2 - - acknowledgements.rst - preface.rst - common/conventions.rst - ops-deployment-factors.rst - ops-planning.rst - ops-capacity-planning-scaling.rst - ops-lay-of-the-land.rst - ops-projects-users.rst - ops-user-facing-operations.rst - ops-maintenance.rst - ops-network-troubleshooting.rst - ops-logging-monitoring.rst - ops-backup-recovery.rst - ops-customize.rst - ops-advanced-configuration.rst - ops-upgrades.rst - appendix.rst diff --git a/doc/ops-guide/source/locale/ja/LC_MESSAGES/ops-guide.po b/doc/ops-guide/source/locale/ja/LC_MESSAGES/ops-guide.po deleted file mode 100644 index 7bde069712..0000000000 --- a/doc/ops-guide/source/locale/ja/LC_MESSAGES/ops-guide.po +++ /dev/null @@ -1,13128 +0,0 @@ -# Translators: -# Akihiro Motoki , 2013 -# Akira Yoshiyama , 2013 -# Andreas Jaeger , 2014-2015 -# Ying Chun Guo , 2013 -# doki701 , 2013 -# yfukuda , 2014 -# Masanori Itoh , 2013 -# Masanori Itoh , 2013 -# Masayuki Igawa , 2013 -# Masayuki Igawa , 2013 -# myamamot , 2014 -# *はたらくpokotan* <>, 2013 -# Tomoaki Nakajima <>, 2013 -# Yuki Shira , 2013 -# Shogo Sato , 2014 -# tsutomu.takekawa , 2013 -# Masanori Itoh , 2013 -# Toru Makabe , 2013 -# doki701 , 2013 -# Tom Fifield , 2014 -# Tomoyuki KATO , 2012-2015 -# Toru Makabe , 2013 -# tsutomu.takekawa , 2013 -# Ying Chun Guo , 2013 -# ykatabam , 2014 -# Yuki Shira , 2013 -# -# -# Akihiro Motoki , 2016. #zanata -# KATO Tomoyuki , 2016. #zanata -# Shu Muto , 2016. #zanata -# KATO Tomoyuki , 2017. #zanata -msgid "" -msgstr "" -"Project-Id-Version: Operations Guide 15.0\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2017-06-12 16:25+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2017-03-22 07:06+0000\n" -"Last-Translator: KATO Tomoyuki \n" -"Language: ja\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"X-Generator: Zanata 3.9.6\n" -"Language-Team: Japanese\n" - -msgid "\"Hey Alvaro, can you run a VLAN on top of a VLAN?\"" -msgstr "「Alvaro、VLAN 上に VLAN って作れるのかい?」" - -msgid "\"If you did, you'd add an extra 4 bytes to the packet…\"" -msgstr "「もしやったら、パケットに余計に4バイト追加になるよ・・」" - -msgid "\"The Issue\"" -msgstr "「あの問題」" - -msgid "**Back matter:**" -msgstr "**後付:**" - -msgid "**Block Storage service (cinder)**" -msgstr "**Block Storage サービス (cinder)**" - -msgid "**Column**" -msgstr "**カラム**" - -msgid "**Compute nodes**" -msgstr "**コンピュートノード**" - -msgid "**Compute service (nova)**" -msgstr "**Compute サービス (nova)**" - -msgid "**Controller node**" -msgstr "**コントローラーノード**" - -msgid "**Create a port that can be reused**" -msgstr "**再利用できるポートの作成**" - -msgid "**Description**" -msgstr "**説明**" - -msgid "**Detach a port from an instance**" -msgstr "**インスタンスからポートの切断**" - -msgid "**Ensuring Snapshots of Linux Guests Are Consistent**" -msgstr "**Linux ゲストのスナップショットの整合性の保証**" - -msgid "**Ensuring Snapshots of Windows Guests Are Consistent**" -msgstr "**Windows ゲストのスナップショットの整合性の保証**" - -msgid "**Example of Complexity**" -msgstr "**複雑さの例**" - -msgid "**Example**" -msgstr "**例**" - -msgid "**Identity service (keystone)**" -msgstr "**Identity サービス (keystone)**" - -msgid "**Image service (glance)**" -msgstr "**Image サービス (glance)**" - -msgid "**Networking service (neutron)**" -msgstr "**Networking サービス (neutron)**" - -msgid "**Overhead**" -msgstr "**オーバーヘッド**" - -msgid "**Provision an instance**" -msgstr "**インスタンスの配備**" - -msgid "**Setting with openstack command**" -msgstr "**openstack コマンドを用いたセットアップ方法**" - -msgid "**Shared services**" -msgstr "**共有サービス**" - -msgid "**Storage nodes**" -msgstr "**ストレージノード**" - -msgid "" -"**To capture packets from the patch-tun internal interface on integration " -"bridge, br-int:**" -msgstr "" -"統合ブリッジ ``br-int`` の内部インターフェース ``patch-tun`` からのパケットを" -"キャプチャーする方法。" - -msgid "" -"**To create the middleware and plug it in through Paste configuration:**" -msgstr "**ミドルウェアを作成して Paste の環境設定を通して組み込むためには:**" - -msgid "**To create the scheduler and plug it in through configuration**" -msgstr "**スケジューラーを作成して、設定を通して組み込む方法**" - -msgid "" -"**To discover which internal VLAN tag is in use for a GRE tunnel by using " -"the ovs-ofctl command**" -msgstr "" -"**ovs-ofctlコマンドを使用することにより、GRE トンネル向けに使用されている内" -"部 VLAN タグを検索します。**" - -msgid "" -"**To discover which internal VLAN tag is in use for a given external VLAN by " -"using the ovs-ofctl command**" -msgstr "" -"**ovs-ofctl コマンドを使用することにより、外部 VLAN 向けに使用されている内部 " -"VLAN タグを検索します。**" - -msgid "**To perform a rollback**" -msgstr "**ロールバック方法**" - -msgid "**To update Block Storage quotas for a tenant (project)**" -msgstr "**プロジェクトの Block Storage クォータの更新方法**" - -msgid "**To update quota values for a tenant (project)**" -msgstr "**テナント (プロジェクト) のクォータ値の更新**" - -msgid "**To view Block Storage quotas for a tenant (project)**" -msgstr "**プロジェクトの Block Storage クォータの表示方法**" - -msgid "**To view and update default Block Storage quota values**" -msgstr "**Block Storage のデフォルトのクォータ値の表示と更新**" - -msgid "**To view and update default quota values**" -msgstr "**デフォルトのクォータ値の表示と更新**" - -msgid "**To view quota values for a tenant (project)**" -msgstr "**テナント (プロジェクト) のクォータ値の表示**" - -msgid "*Actions which delete things should not be enabled by default.*" -msgstr "*何かを削除する操作はデフォルトで有効化されるべきではない。*" - -msgid "/var/lib/nova/instances" -msgstr "/var/lib/nova/instances" - -msgid "0 GB" -msgstr "0 GB" - -msgid "1" -msgstr "1" - -msgid "1 GB" -msgstr "1 GB" - -msgid "10" -msgstr "10" - -msgid "10 GB" -msgstr "10 GB" - -msgid "100" -msgstr "100" - -msgid "15" -msgstr "15" - -msgid "16 GB" -msgstr "16 GB" - -msgid "160 GB" -msgstr "160 GB" - -msgid "2" -msgstr "2" - -msgid "2 GB" -msgstr "2 GB" - -msgid "20" -msgstr "20" - -msgid "20 GB" -msgstr "20 GB" - -msgid "200 physical cores." -msgstr "物理コア 200 個" - -msgid "2015.2" -msgstr "2015.2" - -msgid "21" -msgstr "21" - -msgid "22" -msgstr "22" - -msgid "3" -msgstr "3" - -msgid "4" -msgstr "4" - -msgid "4 GB" -msgstr "4 GB" - -msgid "40 GB" -msgstr "40 GB" - -msgid "5" -msgstr "5" - -msgid "512 MB" -msgstr "512 MB" - -msgid "8" -msgstr "8" - -msgid "8 GB" -msgstr "8 GB" - -msgid "80 GB" -msgstr "80 GB" - -msgid "98" -msgstr "98" - -msgid "99" -msgstr "99" - -msgid ":command:`cinder-manage`" -msgstr ":command:`cinder-manage`" - -msgid ":command:`euca-describe-availability-zones verbose`" -msgstr ":command:`euca-describe-availability-zones verbose`" - -msgid ":command:`glance-manage`" -msgstr ":command:`glance-manage`" - -msgid ":command:`keystone-manage`" -msgstr ":command:`keystone-manage`" - -msgid ":command:`nova-manage`" -msgstr ":command:`nova-manage`" - -msgid ":command:`openstack compute service list`" -msgstr ":command:`openstack compute service list`" - -msgid ":command:`openstack host list` (os-hosts)" -msgstr ":command:`openstack host list` (os-hosts)" - -msgid ":doc:`app-crypt`" -msgstr ":doc:`app-crypt`" - -msgid ":doc:`app-resources`" -msgstr ":doc:`app-resources`" - -msgid ":doc:`app-roadmaps`" -msgstr ":doc:`app-roadmaps`" - -msgid ":doc:`app-usecases`" -msgstr ":doc:`app-usecases`" - -msgid ":doc:`common/glossary`" -msgstr ":doc:`common/glossary`" - -msgid ":doc:`ops-advanced-configuration`" -msgstr ":doc:`ops-advanced-configuration`" - -msgid ":doc:`ops-backup-recovery`" -msgstr ":doc:`ops-backup-recovery`" - -msgid ":doc:`ops-customize`" -msgstr ":doc:`ops-customize`" - -msgid ":doc:`ops-lay-of-the-land`" -msgstr ":doc:`ops-lay-of-the-land`" - -msgid ":doc:`ops-logging-monitoring`" -msgstr ":doc:`ops-logging-monitoring`" - -msgid ":doc:`ops-maintenance`" -msgstr ":doc:`ops-maintenance`" - -msgid ":doc:`ops-network-troubleshooting`" -msgstr ":doc:`ops-network-troubleshooting`" - -msgid ":doc:`ops-projects-users`" -msgstr ":doc:`ops-projects-users`" - -msgid ":doc:`ops-upgrades`" -msgstr ":doc:`ops-upgrades`" - -msgid ":doc:`ops-user-facing-operations`" -msgstr ":doc:`ops-user-facing-operations`" - -msgid "" -":ref:`table_segregation_methods` provides a comparison view of each " -"segregation method currently provided by OpenStack Compute." -msgstr "" -":ref:`table_segregation_methods` では、OpenStack Compute が現在提供している各" -"分割メソッドの比較ビューを提供しています。" - -msgid "" -":term:`Availability zones ` and host aggregates, which " -"merely divide a single Compute deployment." -msgstr "" -":term:`アベイラビリティゾーン ` およびホストアグリゲート。" -"コンピュートのデプロイメントの分割のみを行います。" - -msgid "" -msgstr "<スナップショットされたインスタンスの UUID>" - -msgid "" -msgstr "<スナップショットされたインスタンスの元イメージの UUID>" - -msgid "" -"A DHCP problem might be caused by a misbehaving dnsmasq process. First, " -"debug by checking logs and then restart the dnsmasq processes only for that " -"project (tenant). In VLAN mode, there is a dnsmasq process for each tenant. " -"Once you have restarted targeted dnsmasq processes, the simplest way to rule " -"out dnsmasq causes is to kill all of the dnsmasq processes on the machine " -"and restart ``nova-network``. As a last resort, do this as root:" -msgstr "" -"DHCP の問題は dnsmasq の不具合が原因となりがちです。まず、ログを確認し、その" -"後該当するプロジェクト(テナント)の dnsmasq プロセスを再起動してください。 " -"VLAN モードにおいては、 dnsmasq プロセスはテナントごとに存在します。すでに該" -"当の dnsmasq プロセスを再起動しているのであれば、もっともシンプルな解決法は、" -"マシン上の全ての dnsmasq プロセスをkillし、 ``nova-network`` を再起動すること" -"です。最終手段として、root で以下を実行してください。" - -msgid "" -"A NetApp appliance is used in each region for both block storage and " -"instance storage. There are future plans to move the instances off the " -"NetApp appliance and onto a distributed file system such as :term:`Ceph` or " -"GlusterFS." -msgstr "" -"各リージョンでは、ブロックストレージとインスタンスストレージの両方でNetApp ア" -"プライアンスが使用されています。これらのインスタンスを NetApp アプライアンス" -"から :term:`Ceph` または GlusterFS といった分散ファイルシステム上に移動する計" -"画があります。" - -msgid "" -"A basic type of alert monitoring is to simply check and see whether a " -"required process is running. For example, ensure that the ``nova-api`` " -"service is running on the cloud controller:" -msgstr "" -"基本的なアラーム監視は、単に要求されたプロセスが稼働しているかどうかを確認す" -"ることです。 例えば、 ``nova-api`` サービスがクラウドコントローラーで稼働して" -"いるかどうかを確認します。" - -msgid "" -"A boolean to indicate whether the volume should be deleted when the instance " -"is terminated. True can be specified as ``True`` or ``1``. False can be " -"specified as ``False`` or ``0``." -msgstr "" -"インスタンスが終了したときに、ボリュームが削除されるかどうかを指示する論理値" -"です。真は ``True`` または ``1`` として指定できます。偽は ``False`` または " -"``0`` として指定できます。" - -msgid "" -"A brief overview of how to send REST API requests to endpoints for OpenStack " -"services" -msgstr "" -"OpenStack サービスのエンドポイントに REST API リクエストをどのように送信する" -"かについての概要が説明されています" - -msgid "" -"A cloud with multiple sites where you can schedule VMs \"anywhere\" or on a " -"particular site." -msgstr "" -"複数サイトで構成されるクラウドで、仮想マシンを「任意のサイト」または特定のサ" -"イトにスケジューリングしたい場合" - -msgid "" -"A cloud with multiple sites, where you schedule VMs to a particular site and " -"you want a shared infrastructure." -msgstr "" -"複数サイトで構成されるクラウドで、仮想マシンを特定のサイトに対してスケジュー" -"リングでき、かつ共有インフラを利用したい場合" - -msgid "" -"A collection of foreign keys are available to find relations to the " -"instance. The most useful of these — ``user_id`` and ``project_id`` are the " -"UUIDs of the user who launched the instance and the project it was launched " -"in." -msgstr "" -"外部キーはインスタンスの関連を見つけるために利用可能です。これらの中で最も有" -"用なものは、 ``user_id`` および ``project_id`` です。これらは、インスタンスを" -"起動したユーザー、およびそれが起動されたプロジェクトの UUID です。" - -msgid "" -"A common new-user issue with OpenStack is failing to set an appropriate " -"security group when launching an instance. As a result, the user is unable " -"to contact the instance on the network." -msgstr "" -"OpenStack の新しいユーザーがよく経験する問題が、インスタンスを起動するときに" -"適切なセキュリティグループを設定できず、その結果、ネットワーク経由でインスタ" -"ンスにアクセスできないというものです。" - -msgid "" -"A common scenario is to take down production management services in " -"preparation for an upgrade, completed part of the upgrade process, and " -"discovered one or more problems not encountered during testing. As a " -"consequence, you must roll back your environment to the original \"known good" -"\" state. You also made sure that you did not make any state changes after " -"attempting the upgrade process; no new instances, networks, storage volumes, " -"and so on. Any of these new resources will be in a frozen state after the " -"databases are restored from backup." -msgstr "" -"一般的なシナリオは、アップグレードの準備で本番の管理サービスを分解して、アッ" -"プグレード手順の一部分を完了して、テスト中には遭遇しなかった 1 つ以上の問題に" -"遭遇することです。環境を元の「万全な」状態にロールバックする必要があります。" -"続けて、アップグレードプロセスを試行した後、新しいインスタンス、ネットワー" -"ク、ストレージボリュームなど、何も状態を変更していないことを確実にしてくださ" -"い。これらの新しいリソースはすべて、データベースがバックアップからリストアさ" -"れた後、フリーズ状態になります。" - -msgid "" -"A common use of host aggregates is to provide information for use with the " -"``nova-scheduler``. For example, you might use a host aggregate to group a " -"set of hosts that share specific flavors or images." -msgstr "" -"ホストアグリゲートの一般的な用途は ``nova-scheduler`` で使用する情報を提供す" -"ることです。例えば、ホストアグリゲートを使って、特定のフレーバーやイメージを" -"共有するホストの集合を作成することができます。" - -msgid "" -"A common way of dealing with the recovery from a full system failure, such " -"as a power outage of a data center, is to assign each service a priority, " -"and restore in order. :ref:`table_example_priority` shows an example." -msgstr "" -"データセンターの電源障害など、完全なシステム障害からリカバリーする一般的な方" -"法は、各サービスに優先度を付け、順番に復旧していくことです。 :ref:" -"`table_example_priority` に例を示します。" - -msgid "A compute node" -msgstr "コンピュートノード" - -msgid "" -"A critical part of a cloud's scalability is the amount of effort that it " -"takes to run your cloud. To minimize the operational cost of running your " -"cloud, set up and use an automated deployment and configuration " -"infrastructure with a configuration management system, such as :term:" -"`Puppet` or :term:`Chef`. Combined, these systems greatly reduce manual " -"effort and the chance for operator error." -msgstr "" -"クラウドのスケーラビリティにおける重要な部分の一つは、クラウドを運用するのに" -"必要な労力にあります。クラウドの運用コストを最小化するために、 :term:" -"`Puppet` や :term:`Chef` などの設定管理システムを使用して、自動化されたデプロ" -"イメントおよび設定インフラストラクチャーを設定、使用してください。これらのシ" -"ステムを統合すると、工数やオペレーターのミスを大幅に減らすことができます。" - -msgid "" -"A descriptive name, such as xx.size\\_name, is conventional but not " -"required, though some third-party tools may rely on it." -msgstr "" -"慣習として xx.size\\_name などの内容を表す名前を使用しますが、必須ではありま" -"せん。いくつかのサードパーティツールはその名称に依存しているかもしれません。" - -msgid "" -"A device name where the volume is attached in the system at ``/dev/dev_name``" -msgstr "そのボリュームはシステムで ``/dev/dev_name`` に接続されます。" - -msgid "" -"A different API endpoint for every region. Each region has a full nova " -"installation." -msgstr "各リージョンは完全な nova インストール環境を持ちます。" - -msgid "" -"A feature was introduced in Essex to periodically check and see if there " -"were any ``_base`` files not in use. If there were, OpenStack Compute would " -"delete them. This idea sounds innocent enough and has some good qualities to " -"it. But how did this feature end up turned on? It was disabled by default in " -"Essex. As it should be. It was `decided to be turned on in Folsom `_. I cannot emphasize enough that:" -msgstr "" -"Essex で、 ``_base`` 下の任意のファイルが使用されていないかどうか定期的に" -"チェックして確認する機能が導入された。もしあれば、OpenStack Compute はその" -"ファイルを削除する。このアイデアは問題がないように見え、品質的にも良いよう" -"だった。しかし、この機能を有効にすると最終的にどうなるのか?Essex ではこの機" -"能がデフォルトで無効化されていた。そうあるべきであったからだ。これは、 " -"`Folsom で有効になることが決定された `_ 。私はそうあるべきとは思わない。何故なら" - -msgid "A few nights later, it happened again." -msgstr "数日後、それは再び起こった。" - -msgid "" -"A final example is if a user is hammering cloud resources repeatedly. " -"Contact the user and learn what he is trying to do. Maybe he doesn't " -"understand that what he's doing is inappropriate, or maybe there is an issue " -"with the resource he is trying to access that is causing his requests to " -"queue or lag." -msgstr "" -"最後の例は、ユーザーがクラウドのリソースに繰り返し悪影響を与える場合です。" -"ユーザーと連絡をとり、何をしようとしているのか理解します。ユーザー自身が実行" -"しようとしていることを正しく理解していない可能性があります。または、アクセス" -"しようとしているリソースに問題があり、リクエストがキューに入ったり遅れが発生" -"している場合もあります。" - -msgid "A full set of options can be found using:" -msgstr "すべてのオプションは、次のように確認できます。" - -msgid "" -"A list of terms used in this book is included, which is a subset of the " -"larger OpenStack glossary available online." -msgstr "" -"この本で使われている用語の一覧。オンライン上にある OpenStack 用語集のサブセッ" -"トです。" - -msgid "" -"A long requested service, to provide the ability to manipulate DNS entries " -"associated with OpenStack resources has gathered a following. The designate " -"project was also released." -msgstr "" -"長く要望されていたサービスです。配下を収集した OpenStack リソースを関連付けら" -"れた DNS エントリーを操作する機能を提供します。designate プロジェクトもリリー" -"スされました。" - -msgid "" -"A major quality push has occurred across drivers and plug-ins in Block " -"Storage, Compute, and Networking. Particularly, developers of Compute and " -"Networking drivers that require proprietary or hardware products are now " -"required to provide an automated external testing system for use during the " -"development process." -msgstr "" -"主要な品質は、Block Storage、Compute、Networking におけるドライバーやプラグイ" -"ンをまたがり発生しています。とくに、プロプライエタリーやハードウェア製品を必" -"要とする Compute と Networking のドライバー開発者は、開発プロセス中に使用する" -"ために、自動化された外部テストシステムを提供する必要があります。" - -msgid "" -"A much-requested answer to big data problems, a dedicated team has been " -"making solid progress on a Hadoop-as-a-Service project." -msgstr "" -"ビッグデータの問題に対する最も要望された回答です。専門チームが Hadoop-as-a-" -"Service プロジェクトに安定した進捗を実現しました。" - -msgid "" -"A note about DAIR's architecture: ``/var/lib/nova/instances`` is a shared " -"NFS mount. This means that all compute nodes have access to it, which " -"includes the ``_base`` directory. Another centralized area is ``/var/log/" -"rsyslog`` on the cloud controller. This directory collects all OpenStack " -"logs from all compute nodes. I wondered if there were any entries for the " -"file that :command:`virsh` is reporting:" -msgstr "" -"DAIR のアーキテクチャーは ``/var/lib/nova/instances`` が共有 NFS マウントであ" -"ることに注意したい。これは、全てのコンピュートノードがそのディレクトリにアク" -"セスし、その中に ``_base`` ディレクトリが含まれることを意味していた。その他の" -"集約化エリアはクラウドコントローラーの ``/var/log/rsyslog`` だ。このディレク" -"トリは全コンピュートノードの全ての OpenStack ログが収集されていた。私は、 :" -"command:`virsh` が報告したファイルに関するエントリがあるのだろうかと思った。" - -msgid "" -"A number of operating systems use rsyslog as the default logging service. " -"Since it is natively able to send logs to a remote location, you do not have " -"to install anything extra to enable this feature, just modify the " -"configuration file. In doing this, consider running your logging over a " -"management network or using an encrypted VPN to avoid interception." -msgstr "" -"多くのオペレーティングシステムは、rsyslog をデフォルトのロギングサービスとし" -"て利用します。rsyslog は、リモートにログを送信する機能を持っているので、何か" -"を追加でインストールする必要がなく、設定ファイルを変更するだけです。リモート" -"転送を実施する際は、盗聴を防ぐためにログが自身の管理ネットワーク上を通る、も" -"しくは暗号化VPNを利用することを考慮する必要があります。" - -msgid "" -"A number of time-related fields are useful for tracking when state changes " -"happened on an instance:" -msgstr "" -"多くの時刻関連のフィールドは、いつ状態の変化がインスタンスに起こったかを追跡" -"する際に役に立ちます:" - -msgid "" -"A quick Google search turned up this: `DHCP lease errors in VLAN mode " -"`_ which further " -"supported our DHCP theory." -msgstr "" -"ちょっと Google 検索した結果、`VLAN モードでの DHCPリースエラー `_ を見つけた。この情報はその後の" -"我々の DHCP 方針の支えになった。" - -msgid "" -"A quick way to check whether DNS is working is to resolve a hostname inside " -"your instance by using the :command:`host` command. If DNS is working, you " -"should see:" -msgstr "" -"DNS が正しくホスト名をインスタンス内から解決できているか確認する簡単な方法" -"は、 :command:`host` コマンドです。もし DNS が正しく動いていれば、以下メッ" -"セージが確認できます。" - -msgid "" -"A report came in: VMs were launching slowly, or not at all. Cue the standard " -"checks—nothing on the Nagios, but there was a spike in network towards the " -"current master of our RabbitMQ cluster. Investigation started, but soon the " -"other parts of the queue cluster were leaking memory like a sieve. Then the " -"alert came in—the master Rabbit server went down and connections failed over " -"to the slave." -msgstr "" -"報告が入った。VM の起動が遅いか、全く起動しない。標準のチェック項目は?" -"nagios 上は問題なかったが、RabbitMQ クラスタの現用系に向かうネットワークのみ" -"高負荷を示していた。捜査を開始したが、すぐに RabbitMQ クラスタの別の部分がざ" -"るのようにメモリリークを起こしていることを発見した。また警報か?RabbitMQ サー" -"バーの現用系はダウンしようとしていた。接続は待機系にフェイルオーバーした。" - -msgid "A service to provide queues of messages and notifications was released." -msgstr "メッセージと通知のキューを提供するサービスが提供されました。" - -msgid "A shell where you can get some work done" -msgstr "作業を行うためのシェル" - -msgid "" -"A similar pattern can be followed in other projects that use the driver " -"architecture. Simply create a module and class that conform to the driver " -"interface and plug it in through configuration. Your code runs when that " -"feature is used and can call out to other services as necessary. No project " -"core code is touched. Look for a \"driver\" value in the project's ``.conf`` " -"configuration files in ``/etc/`` to identify projects that use a " -"driver architecture." -msgstr "" -"ドライバ・アーキテクチャーを使う他のプロジェクトで、類似のパターンに従うこと" -"ができます。単純に、そのドライバーインタフェースに従うモジュールとクラスを作" -"成し、環境定義によって組み込んでください。あなたのコードはその機能が使われた" -"時に実行され、必要に応じて他のサービスを呼び出します。プロジェクトのコアコー" -"ドは一切修正しません。ドライバーアーキテクチャーを使っているプロジェクトを確" -"認するには、``/etc/`` に格納されている、プロジェクトの ``.conf`` 設" -"定ファイルの中で driver 変数を探してください。" - -msgid "" -"A single :term:`API endpoint` for compute, or you require a second level of " -"scheduling." -msgstr "" -"コンピュート資源に対する単一の :term:`API エンドポイント ` 、も" -"しくは2段階スケジューリングが必要な場合" - -msgid "A single-site cloud with equipment fed by separate power supplies." -msgstr "分離された電源供給ラインを持つ設備で構成される、単一サイトのクラウド。" - -msgid "" -"A snapshot captures the state of the file system, but not the state of the " -"memory. Therefore, to ensure your snapshot contains the data that you want, " -"before your snapshot you need to ensure that:" -msgstr "" -"スナップショットは、ファイルシステムの状態をキャプチャーしますが、メモリーの" -"状態をキャプチャーしません。そのため、スナップショットに期待するデータが含ま" -"れることを確実にするために、次のことを確実にする必要があります。" - -msgid "" -"A tangible example of this is the ``nova-compute`` process. In order to " -"manage the image cache with libvirt, ``nova-compute`` has a periodic process " -"that scans the contents of the image cache. Part of this scan is calculating " -"a checksum for each of the images and making sure that checksum matches what " -"``nova-compute`` expects it to be. However, images can be very large, and " -"these checksums can take a long time to generate. At one point, before it " -"was reported as a bug and fixed, ``nova-compute`` would block on this task " -"and stop responding to RPC requests. This was visible to users as failure of " -"operations such as spawning or deleting instances." -msgstr "" -"これの具体的な例が ``nova-compute`` プロセスです。libvirt でイメージキャッ" -"シュを管理するために、``nova-compute`` はイメージキャッシュの内容をスキャンす" -"る周期的なプロセスを用意します。このスキャンの中で、各イメージのチェックサム" -"を計算し、チェックサムが ``nova-compute`` が期待する値と一致することを確認し" -"ます。しかしながら、イメージは非常に大きく、チェックサムを生成するのに長い時" -"間がかかる場合があります。このことがバグとして報告され修正される前の時点で" -"は、``nova-compute`` はこのタスクで停止し RPC リクエストに対する応答を停止し" -"てしまっていました。この振る舞いは、インスタンスの起動や削除などの操作の失敗" -"としてユーザーに見えていました。" - -msgid "" -"A tool such as **collectd** can be used to store this information. While " -"collectd is out of the scope of this book, a good starting point would be to " -"use collectd to store the result as a COUNTER data type. More information " -"can be found in `collectd's documentation `_." -msgstr "" -"collectd のようなツールはこのような情報を保管することに利用できます。 " -"collectd はこの本のスコープから外れますが、 collectd で COUNTER データ形とし" -"て結果を保存するのがよい出発点になります。より詳しい情報は `collectd のドキュ" -"メント `_ を参照してくださ" -"い。" - -msgid "A typical user" -msgstr "一般的なユーザー" - -msgid "" -"A user might need a custom flavor that is uniquely tuned for a project she " -"is working on. For example, the user might require 128 GB of memory. If you " -"create a new flavor as described above, the user would have access to the " -"custom flavor, but so would all other tenants in your cloud. Sometimes this " -"sharing isn't desirable. In this scenario, allowing all users to have access " -"to a flavor with 128 GB of memory might cause your cloud to reach full " -"capacity very quickly. To prevent this, you can restrict access to the " -"custom flavor using the :command:`nova flavor-access-add` command:" -msgstr "" -"ユーザーが、取り組んでいるプロジェクト向けに独自にチューニングした、カスタム" -"フレーバーを必要とするかもしれません。例えば、ユーザーが 128 GB メモリーを必" -"要とするかもしれません。前に記載したとおり、新しいフレーバーを作成する場合、" -"ユーザーがカスタムフレーバーにアクセスできるでしょう。しかし、クラウドにある" -"他のすべてのクラウドもアクセスできます。ときどき、この共有は好ましくありませ" -"ん。この場合、すべてのユーザーが 128 GB メモリーのフレーバーにアクセスでき、" -"クラウドのリソースが非常に高速に容量不足になる可能性があります。これを防ぐた" -"めに、:command:`nova flavor-access-add` コマンドを使用して、カスタムフレー" -"バーへのアクセスを制限できます。" - -msgid "A user recently tried launching a CentOS instance on that node" -msgstr "" -"最近、あるユーザがそのノード上で CentOS のインスタンスを起動しようとした。" - -msgid "AMQP broker" -msgstr "AMQP ブローカー" - -msgid "Absolute limits" -msgstr "絶対制限" - -msgid "Abstract" -msgstr "概要" - -msgid "Account quotas" -msgstr "アカウントのクォータ" - -msgid "Acknowledgements" -msgstr "謝辞" - -msgid "Adam Hyde" -msgstr "Adam Hyde" - -msgid "" -"Adam Powell in Racker IT supplied us with bandwidth each day and second " -"monitors for those of us needing more screens." -msgstr "" -"Rackspace IT部門 の Adam Powell は、私たちに毎日のネットワーク帯域を提供して" -"くれました。また、より多くのスクリーンが必要となったため、セカンドモニタを提" -"供してくれました。" - -msgid "" -"Adam facilitated this book sprint. He also founded the book sprint " -"methodology and is the most experienced book-sprint facilitator around. See " -"`BookSprints `_ for more information. Adam " -"founded FLOSS Manuals—a community of some 3,000 individuals developing Free " -"Manuals about Free Software. He is also the founder and project manager for " -"Booktype, an open source project for writing, editing, and publishing books " -"online and in print." -msgstr "" -"Adam は今回の Book Sprint をリードしました。 Book Sprint メソッドを創設者でも" -"あり、一番経験豊富な Book Sprint のファシリテーターです。詳しい情報は " -"`BookSprints `_ を見てください。 3000人もの参加者" -"がいるフリーソフトウェアのフリーなマニュアルを作成するコミュニティである " -"FLOSS Manuals の創設者です。また、Booktype の創設者でプロジェクトマネージャー" -"です。 Booktype はオンラインで本の執筆、編集、出版を行うオープンソースプロ" -"ジェクトです。" - -msgid "" -"Add all raw disks to one large RAID array, either hardware or software " -"based. You can partition this large array with the boot, root, swap, and LVM " -"areas. This option is simple to implement and uses all partitions. However, " -"disk I/O might suffer." -msgstr "" -"すべてのローディスクを 1 つの大きな RAID 配列に追加します。ここでは、ソフト" -"ウェアベースでもハードウェアベースでも構いません。この大きなRAID 配列を " -"boot、root、swap、LVM 領域に分割します。この選択肢はシンプルですべてのパー" -"ティションを利用することができますが、I/O性能に悪影響がでる可能性があります。" - -msgid "Add device ``snooper0`` to bridge ``br-int``:" -msgstr "``snooper0`` デバイスを ``br-int`` ブリッジに追加します。" - -msgid "Add metadata to the container to allow the IP:" -msgstr "メタデータをコンテナーに追加して、IP を許可します。" - -msgid "Add the repository for the new release packages." -msgstr "新リリースのパッケージのリポジトリーを追加します。" - -msgid "Adding Custom Logging Statements" -msgstr "カスタムログの追加" - -msgid "Adding Images" -msgstr "イメージの追加" - -msgid "Adding Projects" -msgstr "プロジェクトの追加" - -msgid "Adding Signed Images" -msgstr "署名済みイメージの追加" - -msgid "Adding a Compute Node" -msgstr "コンピュートノードの追加" - -msgid "" -"Adding a new object storage node is different from adding compute or block " -"storage nodes. You still want to initially configure the server by using " -"your automated deployment and configuration-management systems. After that " -"is done, you need to add the local disks of the object storage node into the " -"object storage ring. The exact command to do this is the same command that " -"was used to add the initial disks to the ring. Simply rerun this command on " -"the object storage proxy server for all disks on the new object storage " -"node. Once this has been done, rebalance the ring and copy the resulting " -"ring files to the other storage nodes." -msgstr "" -"新しいオブジェクトストレージノードの追加は、コンピュートノードやブロックスト" -"レージノードの追加とは異なります。サーバーの設定は、これまで通り自動配備シス" -"テムと構成管理システムを使って行えます。完了した後、オブジェクトストレージ" -"ノードのローカルディスクをオブジェクトストレージリングに追加する必要がありま" -"す。これを実行するコマンドは、最初にディスクをリングに追加するのに使用したコ" -"マンドと全く同じです。オブジェクトストレージプロキシサーバーにおいて、このコ" -"マンドを、新しいオブジェクトストレージノードにあるすべてのディスクに対して、" -"再実行するだけです。これが終わったら、リングの再バランスを行い、更新されたリ" -"ングファイルを他のストレージノードにコピーします。" - -msgid "Adding an Object Storage Node" -msgstr "オブジェクトストレージノードの追加" - -msgid "" -"Adding security groups is typically done on instance boot. When launching " -"from the dashboard, you do this on the :guilabel:`Access & Security` tab of " -"the :guilabel:`Launch Instance` dialog. When launching from the command " -"line, append ``--security-groups`` with a comma-separated list of security " -"groups." -msgstr "" -"セキュリティグループの追加は、一般的にインスタンスの起動時に実行されます。" -"ダッシュボードから起動するとき、これは :guilabel:`インスタンスの起動` ダイア" -"ログの :guilabel:`アクセスとセキュリティー` タブにあります。コマンドラインか" -"ら起動する場合には、 ``--security-groups`` にセキュリティグループのコンマ区切" -"り一覧を指定します。" - -msgid "" -"Adding to a RAID array (RAID stands for redundant array of independent " -"disks), based on the number of disks you have available, so that you can add " -"capacity as your cloud grows. Some options are described in more detail " -"below." -msgstr "" -"使用可能なディスクの数をもとに、RAID 配列 (RAID は Redundant Array of " -"Independent Disks の略) に追加します。 こうすることで、クラウドが大きくなった" -"場合も容量を追加できます。オプションは、以下で詳しく説明しています。" - -msgid "" -"Additional optional restrictions on which compute nodes the flavor can run " -"on. This is implemented as key-value pairs that must match against the " -"corresponding key-value pairs on compute nodes. Can be used to implement " -"things like special resources (such as flavors that can run only on compute " -"nodes with GPU hardware)." -msgstr "" -"フレーバーを実行できるコンピュートノードに関する追加の制限。これはオプション" -"です。これは、コンピュートノードにおいて対応するキーバリューペアとして実装さ" -"れ、コンピュートノードでの対応するキーバリューペアと一致するものでなければい" -"けません。(GPU ハードウェアを持つコンピュートノードのみにおいて実行するフレー" -"バーのように) 特別なリソースのようなものを実装するために使用できます。" - -msgid "" -"Additionally, for Identity-related issues, try the tips in :ref:" -"`sql_backend`." -msgstr "" -"さらに、Identity 関連の問題に対して、:ref:`sql_backend` にあるヒントを試して" -"みてください。" - -msgid "" -"Additionally, this instance in question was responsible for a very, very " -"large backup job each night. While \"The Issue\" (as we were now calling it) " -"didn't happen exactly when the backup happened, it was close enough (a few " -"hours) that we couldn't ignore it." -msgstr "" -"加えて、問題のインスタンスは毎晩非常に長いバックアップジョブを担っていた。" -"「あの問題」(今では我々はこの障害をこう呼んでいる)はバックアップが行われて" -"いる最中には起こらなかったが、(数時間たっていて)「あの問題」が起こるまであ" -"と少しのところだった。" - -msgid "Administrative Command-Line Tools" -msgstr "管理系コマンドラインツール" - -msgid "Advanced Configuration" -msgstr "高度な設定" - -msgid "After a Compute Node Reboots" -msgstr "コンピュートノードの再起動後" - -msgid "" -"After a cloud controller reboots, ensure that all required services were " -"successfully started. The following commands use :command:`ps` and :command:" -"`grep` to determine if nova, glance, and keystone are currently running:" -msgstr "" -"クラウドコントローラーの再起動後、すべての必要なサービスが正常に起動されたこ" -"とを確認します。以下のコマンドは、 :command:`ps` と :command:`grep` を使用し" -"て、nova、glance、keystone が現在動作していることを確認しています。" - -msgid "After a few minutes of troubleshooting, I saw the following details:" -msgstr "数分間のトラブル調査の後、以下の詳細が判明した。" - -msgid "" -"After digging into the nova (OpenStack Compute) code, I noticed two areas in " -"``api/ec2/cloud.py`` potentially impacting my system:" -msgstr "" -"nova (OpenStack Compute) のコードを深堀りすると、私のシステムに影響を与える可" -"能性がある 2 つの領域を ``api/ec2/cloud.py`` で見つけました。" - -msgid "" -"After finding the instance ID we headed over to ``/var/lib/nova/instances`` " -"to find the ``console.log``:" -msgstr "" -"インスタンスIDの発見後、``console.log`` を探すため ``/var/lib/nova/" -"instances`` にアクセスした。" - -msgid "" -"After learning about scalability in computing from particle physics " -"experiments, such as ATLAS at the Large Hadron Collider (LHC) at CERN, Tom " -"worked on OpenStack clouds in production to support the Australian public " -"research sector. Tom currently serves as an OpenStack community manager and " -"works on OpenStack documentation in his spare time." -msgstr "" -"CERN の Large Hadron Collider (LHC) で ATLAS のような素粒子物理学実験でコン" -"ピューティングのスケーラビリティの経験を積んだ後、現在はオーストラリアの公的" -"な研究部門を支援するプロダクションの OpenStack クラウドに携わっていました。現" -"在は OpenStack のコミュニティマネージャーを務めており、空いた時間で " -"OpenStack ドキュメントプロジェクトに参加しています。" - -msgid "" -"After migration, users see different results from :command:`openstack image " -"list` and :command:`glance image-list`. To ensure users see the same images " -"in the list commands, edit the :file:`/etc/glance/policy.json` file and :" -"file:`/etc/nova/policy.json` file to contain ``\"context_is_admin\": \"role:" -"admin\"``, which limits access to private images for projects." -msgstr "" -"移行後、ユーザーは :command:`openstack image list` と :command:`glance image-" -"list` から異なる結果を見ることになります。ユーザーが一覧コマンドにおいて同じ" -"イメージをきちんと見るために、 ``/etc/glance/policy.json`` と :file:`/etc/" -"nova/policy.json` ファイルを編集して、 ``\"context_is_admin\": \"role:admin" -"\"`` を含めます。これは、プロジェクトのプライベートイメージへのアクセスを制限" -"します。" - -msgid "" -"After reproducing the problem several times, I came to the unfortunate " -"conclusion that this cloud did indeed have a problem. Even worse, my time " -"was up in Kelowna and I had to return back to Calgary." -msgstr "" -"何度か問題が再現した後、私はこのクラウドが実は問題を抱えているという不幸な結" -"論に至った。更に悪いことに、私がケロウナから出発する時間になっており、カルガ" -"リーに戻らなければならなかった。" - -msgid "" -"After restarting the instance, everything was back up and running. We " -"reviewed the logs and saw that at some point, network communication stopped " -"and then everything went idle. We chalked this up to a random occurrence." -msgstr "" -"インスタンスの再起動後、全ては元通りに動くようになった。我々はログを見直し、" -"問題の箇所(ネットワーク通信が止まり、全ては待機状態になった)を見た。我々は" -"ランダムな事象の原因はこのインスタンスだと判断した。" - -msgid "After running" -msgstr "実行後" - -msgid "" -"After that, use the :command:`openstack` command to reboot all instances " -"that were on c01.example.com while regenerating their XML files at the same " -"time:" -msgstr "" -"その後、:command:`openstack` コマンドを使って、c01.example.com にあったすべて" -"のインスタンスを再起動します。起動する際にインスタンスの XML ファイルを再生成" -"します:" - -msgid "" -"After the compute node is successfully running, you must deal with the " -"instances that are hosted on that compute node because none of them are " -"running. Depending on your SLA with your users or customers, you might have " -"to start each instance and ensure that they start correctly." -msgstr "" -"コンピュートノードが正常に実行された後、そのコンピュートノードでホストされて" -"いるインスタンスはどれも動作していないので、そのコンピュートノードにおいてホ" -"ストされているインスタンスを処理する必要があります。ユーザーや顧客に対する " -"SLA によっては、各インスタンスを開始し、正常に起動していることを確認する必要" -"がある場合もあるでしょう。" - -msgid "After the dnsmasq processes start again, check whether DNS is working." -msgstr "dnsmasq再起動後に、DNSが動いているか確認します。" - -msgid "" -"After the packet is on this NIC, it transfers to the compute node's default " -"gateway. The packet is now most likely out of your control at this point. " -"The diagram depicts an external gateway. However, in the default " -"configuration with multi-host, the compute host is the gateway." -msgstr "" -"パケットはこのNICに送られた後、コンピュートノードのデフォルトゲートウェイに転" -"送されます。パケットはこの時点で、おそらくあなたの管理範囲外でしょう。図には" -"外部ゲートウェイを描いていますが、マルチホストのデフォルト構成では、コン" -"ピュートホストがゲートウェイです。" - -msgid "" -"After this command it is common practice to call :command:`openstack image " -"create` from your workstation, and once done press enter in your instance " -"shell to unfreeze it. Obviously you could automate this, but at least it " -"will let you properly synchronize." -msgstr "" -"このコマンドの後、お使いの端末から :command:`openstack image create` を呼び出" -"すことが一般的な慣習です。実行した後、インスタンスの中で Enter キーを押して、" -"フリーズ解除します。もちろん、これを自動化できますが、少なくとも適切に同期で" -"きるようになるでしょう。" - -msgid "" -"After you consider these factors, you can determine how many cloud " -"controller cores you require. A typical eight core, 8 GB of RAM server is " -"sufficient for up to a rack of compute nodes — given the above caveats." -msgstr "" -"これらの要素を検討した後、クラウドコントローラにどのくらいのコア数が必要なの" -"か決定することができます。上記で説明した留意事項の下、典型的には、ラック 1 本" -"分のコンピュートノードに対して8 コア、メモリ 8GB のサーバで充分です。" - -msgid "" -"After you establish that the instance booted properly, the task is to figure " -"out where the failure is." -msgstr "" -"インスタンスが正しく起動した後、この手順でどこが問題かを切り分けることができ" -"ます。" - -msgid "" -"After you have the list, you can use the :command:`openstack` command to " -"start each instance:" -msgstr "" -"一覧を取得した後、各インスタンスを起動するために :command:`openstack` コマン" -"ドを使用できます。" - -msgid "" -"Again, it turns out that the image was a snapshot. The three other instances " -"that successfully started were standard cloud images. Was it a problem with " -"snapshots? That didn't make sense." -msgstr "" -"再度、イメージがスナップショットであることが判明した。無事に起動した他の3イ" -"ンスタンスは標準のクラウドイメージであった。これはスナップショットの問題か?" -"それは意味が無かった。" - -msgid "" -"Again, the right answer depends on your environment. You have to make your " -"decision based on the trade-offs between space utilization, simplicity, and " -"I/O performance." -msgstr "" -"ここでも、環境によって適したソリューションが変わります。スペース使用状況、シ" -"ンプルさ、I/O パフォーマンスの長所、短所をベースに意思決定していく必要があり" -"ます。" - -msgid "Ah-hah! So OpenStack was deleting it. But why?" -msgstr "あっはっは!じゃぁ、OpenStack が削除したのか。でも何故?" - -msgid "" -"All files and directories in ``/var/lib/nova/instances`` are uniquely named. " -"The files in \\_base are uniquely titled for the glance image that they are " -"based on, and the directory names ``instance-xxxxxxxx`` are uniquely titled " -"for that particular instance. For example, if you copy all data from ``/var/" -"lib/nova/instances`` on one compute node to another, you do not overwrite " -"any files or cause any damage to images that have the same unique name, " -"because they are essentially the same file." -msgstr "" -"``/var/lib/nova/instances`` にあるすべてのファイルとディレクトリは一意に名前" -"が付けられています。 \\_base にあるファイルは元となった glance イメージに対応" -"する一意に名前が付けられています。また、``instance-xxxxxxxx`` という名前が付" -"けられたディレクトリは特定のインスタンスに対して一意にタイトルが付けられてい" -"ます。たとえば、あるコンピュートノードにある ``/var/lib/nova/instances`` のす" -"べてのデータを他のノードにコピーしたとしても、ファイルを上書きすることはあり" -"ませんし、また同じ一意な名前を持つイメージにダメージを与えることもありませ" -"ん。同じ一意な名前を持つものは本質的に同じファイルだからです。" - -msgid "" -"All in all, just issue the :command:`reboot` command. The operating system " -"cleanly shuts down services and then automatically reboots. If you want to " -"be very thorough, run your backup jobs just before you reboot." -msgstr "" -"大体の場合、単に :command:`reboot` コマンドを発行します。オペレーティングシス" -"テムがサービスを正常にシャットダウンして、その後、自動的に再起動します。万全" -"を期したい場合、再起動する前にバックアップジョブを実行します。" - -msgid "" -"All interfaces on the ``br-tun`` are internal to Open vSwitch. To monitor " -"traffic on them, you need to set up a mirror port as described above for " -"``patch-tun`` in the ``br-int`` bridge." -msgstr "" -"``br-tun`` にあるすべてのインターフェースは、Open vSwitch 内部のものです。そ" -"れらの通信を監視する場合、 ``br-int`` にある ``patch-tun`` 向けに上で説明した" -"ようなミラーポートをセットアップする必要があります。" - -msgid "All nodes" -msgstr "全ノード" - -msgid "" -"All of the alert types mentioned earlier can also be used for trend " -"reporting. Some other trend examples include:" -msgstr "" -"これまでに示した全てのアラートタイプは、トレンドレポートに利用可能です。その" -"他のトレンドの例は以下の通りです。" - -msgid "" -"All of the code for OpenStack lives in ``/opt/stack``. Go to the swift " -"directory in the ``shell`` screen and edit your middleware module." -msgstr "" -"すべての OpenStack のコードは ``/opt/stack`` にあります。 ``shell`` セッショ" -"ンの screen の中で swift ディレクトリに移動し、あなたのミドルウェアモジュール" -"を編集してください。" - -msgid "" -"All sites are based on Ubuntu 14.04, with KVM as the hypervisor. The " -"OpenStack version in use is typically the current stable version, with 5 to " -"10 percent back-ported code from trunk and modifications." -msgstr "" -"全サイトは Ubuntu 14.04 をベースにしており、ハイパーバイザとして KVM を使用し" -"ています。使用している OpenStack のバージョンは基本的に安定バージョンであり、" -"5~10%のコードが開発コードからバックポートされたか、修正されています。" - -msgid "" -"All translation of GRE tunnels to and from internal VLANs happens on this " -"bridge." -msgstr "このブリッジで GRE トンネルと内部 VLAN の相互変換が行われます。" - -msgid "Allow DHCP client traffic." -msgstr "DHCP クライアント通信の許可。" - -msgid "Allow IPv6 ICMP traffic to allow RA packets." -msgstr "RA パケットを許可するための IPv6 ICMP 通信の許可。" - -msgid "" -"Allow access to the share with IP access type and 10.254.0.4 IP address:" -msgstr "" -"IP アクセス形式と 10.254.0.4 IP アドレスを持つ共有へのアクセスを許可します。" - -msgid "Allow traffic from defined IP/MAC pairs." -msgstr "定義済み IP/MAC ペアからの通信許可。" - -msgid "" -"Almost all OpenStack components have an underlying database to store " -"persistent information. Usually this database is MySQL. Normal MySQL " -"administration is applicable to these databases. OpenStack does not " -"configure the databases out of the ordinary. Basic administration includes " -"performance tweaking, high availability, backup, recovery, and repairing. " -"For more information, see a standard MySQL administration guide." -msgstr "" -"ほとんどすべての OpenStack コンポーネントは、永続的な情報を保存するために内部" -"でデータベースを使用しています。このデータベースは通常 MySQL です。通常の " -"MySQL の管理方法がこれらのデータベースに適用できます。OpenStack は特別な方法" -"でデータベースを設定しているわけではありません。基本的な管理として、パフォー" -"マンス調整、高可用性、バックアップ、リカバリーおよび修理などがあります。さら" -"なる情報は標準の MySQL 管理ガイドを参照してください。" - -msgid "" -"Also check that all services are functioning. The following set of commands " -"sources the ``openrc`` file, then runs some basic glance, nova, and " -"openstack commands. If the commands work as expected, you can be confident " -"that those services are in working condition:" -msgstr "" -"また、すべてのサービスが正しく機能していることを確認します。以下のコマンド群" -"は、 ``openrc`` ファイルを読み込みます。そして、いくつかの基本的な glance、" -"nova、openstack コマンドを実行します。コマンドが期待したとおりに動作すれば、" -"それらのサービスが動作状態にあると確認できます。" - -msgid "Also check that it is functioning:" -msgstr "また、正しく機能していることを確認します。" - -msgid "Also ensure that it has successfully connected to the AMQP server:" -msgstr "AMQP サーバーに正常に接続できることも確認します。" - -msgid "" -"Also, in practice, the ``nova-compute`` services on the compute nodes do not " -"always reconnect cleanly to rabbitmq hosted on the controller when it comes " -"back up after a long reboot; a restart on the nova services on the compute " -"nodes is required." -msgstr "" -"実際には、コンピュートノードの ``nova-compute`` サービスが、コントローラー上" -"で動作している rabbitmq に正しく再接続されない場合があります。時間のかかるリ" -"ブートから戻ってきた場合や、コンピュートノードの nova サービスを再起動する必" -"要がある場合です。" - -msgid "Alter the configuration until it works." -msgstr "正常に動作するまで設定を変更する。" - -msgid "" -"Alternatively, if you want someone to help guide you through the decisions " -"about the underlying hardware or your applications, perhaps adding in a few " -"features or integrating components along the way, consider contacting one of " -"the system integrators with OpenStack experience, such as Mirantis or " -"Metacloud." -msgstr "" -"代わりに、ベースとするハードウェアやアプリケーション、いくつかの新機能の追" -"加、コンポーネントをくみ上げる方法を判断するために、誰かに支援してほしい場" -"合、Mirantis や Metacloud などの OpenStack の経験豊富なシステムインテグレー" -"ターに連絡することを検討してください。" - -msgid "" -"Alternatively, it is possible to configure VLAN-based networks to use " -"external routers rather than the l3-agent shown here, so long as the " -"external router is on the same VLAN:" -msgstr "" -"これとは別に、外部ルーターが同じ VLAN にあれば、ここの示されている L3 エー" -"ジェントの代わりに外部ルーターを使用するよう、VLAN ベースのネットワークを設定" -"できます。" - -msgid "" -"Although the title of this story is much more dramatic than the actual " -"event, I don't think, or hope, that I'll have the opportunity to use " -"\"Valentine's Day Massacre\" again in a title." -msgstr "" -"この物語のタイトルは実際の事件よりかなりドラマティックだが、私はタイトル中に" -"「バレンタインデーの大虐殺」を使用する機会が再びあるとは思わない(し望まな" -"い)。" - -msgid "" -"Although this method is not documented or supported, you can use it when " -"your compute node is permanently offline but you have instances locally " -"stored on it." -msgstr "" -"この方法はドキュメントに書かれておらず、サポートされていない方法ですが、コン" -"ピュートノードが完全にオフラインになってしまったが、インスタンスがローカルに" -"保存されているときに、この方法を使用できます。" - -msgid "Among the log statements you'll see the lines:" -msgstr "ログの中に以下の行があるでしょう。" - -msgid "" -"An OpenStack cloud does not have much value without users. This chapter " -"covers topics that relate to managing users, projects, and quotas. This " -"chapter describes users and projects as described by version 2 of the " -"OpenStack Identity API." -msgstr "" -"OpenStack クラウドは、ユーザーなしでは特に価値はありません。本章では、ユー" -"ザー、プロジェクト、クォータの管理に関するトピックを記載します。また、" -"OpenStack Identity API のバージョン 2 で説明されているように、ユーザーとプロ" -"ジェクトについても説明します。" - -msgid "" -"An academic turned software-developer-slash-operator, Lorin worked as the " -"lead architect for Cloud Services at Nimbis Services, where he deploys " -"OpenStack for technical computing applications. He has been working with " -"OpenStack since the Cactus release. Previously, he worked on high-" -"performance computing extensions for OpenStack at University of Southern " -"California's Information Sciences Institute (USC-ISI)." -msgstr "" -"アカデミック出身のソフトウェア開発者・運用者である彼は、Nimbis Services でク" -"ラウドサービスの Lead Architect として働いていました。Nimbis Service では彼は" -"技術計算アプリケーション用の OpenStack を運用しています。 Cactus リリース以" -"来 OpenStack に携わっています。以前は、University of Southern California's " -"Information Sciences Institute (USC-ISI) で OpenStack の high-performance " -"computing 向けの拡張を行いました。" - -msgid "" -"An administrative super user, which has full permissions across all projects " -"and should be used with great care" -msgstr "" -"すべてのプロジェクトにわたり全権限を持つ管理ユーザー。非常に注意して使用する" -"必要があります。" - -msgid "" -"An advanced use of this general concept allows different flavor types to run " -"with different CPU and RAM allocation ratios so that high-intensity " -"computing loads and low-intensity development and testing systems can share " -"the same cloud without either starving the high-use systems or wasting " -"resources on low-utilization systems. This works by setting ``metadata`` in " -"your host aggregates and matching ``extra_specs`` in your flavor types." -msgstr "" -"この一般的なコンセプトを高度なレベルで使用すると、集中度の高いコンピュート" -"ロードや負荷の低い開発やテストシステムが使用量の多いシステムのリソースが不足" -"したり、使用量の低いシステムでリソースを無駄にしたりしないで、同じクラウドを" -"共有できるように、異なるフレーバーの種別が、異なる CPU および RAM 割当の比率" -"で実行できるようになります。 これは、ホストアグリゲートに ``metadata`` を設" -"定して、フレーバー種別の ``extra_specs`` と一致させると機能します。" - -msgid "" -"An alternative to enabling the RabbitMQ web management interface is to use " -"the ``rabbitmqctl`` commands. For example, :command:`rabbitmqctl " -"list_queues| grep cinder` displays any messages left in the queue. If there " -"are messages, it's a possible sign that cinder services didn't connect " -"properly to rabbitmq and might have to be restarted." -msgstr "" -"RabbitMQ Web 管理インターフェイスを有効にするもう一つの方法としては、 " -"``rabbitmqctl`` コマンドを利用します。例えば :command:`rabbitmqctl " -"list_queues| grep cinder` は、キューに残っているメッセージを表示します。メッ" -"セージが存在する場合、Cinder サービスが RabbitMQ に正しく接続できてない可能性" -"があり、再起動が必要かもしれません。" - -msgid "" -"An attempt was made to deprecate ``nova-network`` during the Havana release, " -"which was aborted due to the lack of equivalent functionality (such as the " -"FlatDHCP multi-host high-availability mode mentioned in this guide), lack of " -"a migration path between versions, insufficient testing, and simplicity when " -"used for the more straightforward use cases ``nova-network`` traditionally " -"supported. Though significant effort has been made to address these " -"concerns, ``nova-network`` was not be deprecated in the Juno release. In " -"addition, to a limited degree, patches to ``nova-network`` have again begin " -"to be accepted, such as adding a per-network settings feature and SR-IOV " -"support in Juno." -msgstr "" -"Havana リリース中に ``nova-network`` を廃止しようという試みがありました。これ" -"は、このガイドで言及した FlatDHCP マルチホスト高可用性モードなどの同等機能の" -"不足、バージョン間の移行パスの不足、不十分なテスト、伝統的にサポートされる " -"``nova-network`` のより簡単なユースケースに使用する場合のシンプルさ、などの理" -"由により中断されました。甚大な努力によりこれらの心配事を解決してきましたが、 " -"``nova-network`` は Juno リリースにおいて廃止されませんでした。さらに、Juno " -"においてネットワークごとの設定機能や SR-IOV の追加などの限定された範囲で、 " -"``nova-network`` へのパッチが再び受け入れられてきました。" - -msgid "" -"An authorization policy can be composed by one or more rules. If more rules " -"are specified, evaluation policy is successful if any of the rules evaluates " -"successfully; if an API operation matches multiple policies, then all the " -"policies must evaluate successfully. Also, authorization rules are " -"recursive. Once a rule is matched, the rule(s) can be resolved to another " -"rule, until a terminal rule is reached. These are the rules defined:" -msgstr "" -"認可ポリシーは、一つまたは複数のルールにより構成できます。複数のルールを指定" -"すると、いずれかのルールが成功と評価されれば、評価エンジンが成功になります。" -"API 操作が複数のポリシーに一致すると、すべてのポリシーが成功と評価される必要" -"があります。認可ルールは再帰的にもできます。あるルールにマッチした場合、これ" -"以上展開できないルールに達するまで、そのルールは別のルールに展開されます。以" -"下のルールが定義できます。" - -msgid "" -"An automated deployment system installs and configures operating systems on " -"new servers, without intervention, after the absolute minimum amount of " -"manual work, including physical racking, MAC-to-IP assignment, and power " -"configuration. Typically, solutions rely on wrappers around PXE boot and " -"TFTP servers for the basic operating system install and then hand off to an " -"automated configuration management system." -msgstr "" -"自動のデプロイメントシステムは、物理ラッキング、MAC から IP アドレスの割当、" -"電源設定など、必要最小限の手作業のみで、介入なしに新規サーバー上にオペレー" -"ティングシステムのインストールと設定を行います。ソリューションは通常、PXE " -"ブートや TFTP サーバー関連のラッパーに依存して基本のオペレーティングシステム" -"をインストールして、次に自動設定管理システムに委譲されます。" - -msgid "An external server outside of the cloud" -msgstr "クラウド外部のサーバー" - -msgid "" -"An hour later I received the same alert, but for another compute node. Crap. " -"OK, now there's definitely a problem going on. Just like the original node, " -"I was able to log in by SSH. The bond0 NIC was DOWN but the 1gb NIC was " -"active." -msgstr "" -"1時間後、私は同じ警告を受信したが、別のコンピュートノードだった。拍手。OK、" -"問題は間違いなく現在進行中だ。元のノードと全く同様に、私は SSH でログインする" -"ことが出来た。bond0 NIC は DOWN だったが、1Gb NIC は有効だった。" - -msgid "" -"An initial idea was to just increase the lease time. If the instance only " -"renewed once every week, the chances of this problem happening would be " -"tremendously smaller than every minute. This didn't solve the problem, " -"though. It was just covering the problem up." -msgstr "" -"最初のアイデアは、単にリース時間を増やすことだった。もしインスタンスが毎週1" -"回だけIPアドレスを更新するのであれば、毎分更新する場合よりこの問題が起こる可" -"能性は極端に低くなるだろう。これはこの問題を解決しないが、問題を単に取り繕う" -"ことはできる。" - -msgid "An instance running on that compute node" -msgstr "コンピュートノード内のインスタンス" - -msgid "" -"An integral part of a configuration-management system is the item that it " -"controls. You should carefully consider all of the items that you want, or " -"do not want, to be automatically managed. For example, you may not want to " -"automatically format hard drives with user data." -msgstr "" -"設定管理システムの不可欠な部分は、このシステムが制御する項目です。自動管理を" -"する項目、しない項目をすべて慎重に検討していく必要があります。例えば、ユー" -"ザーデータが含まれるハードドライブは自動フォーマットは必要ありません。" - -msgid "" -"An upgrade pre-testing system is excellent for getting the configuration to " -"work. However, it is important to note that the historical use of the system " -"and differences in user interaction can affect the success of upgrades." -msgstr "" -"アップグレード前テストシステムは、設定を動作させるために優れています。しかし" -"ながら、システムの歴史的な使用法やユーザー操作における違いにより、アップグ" -"レードの成否に影響することに注意することが重要です。" - -msgid "And finally, you can disassociate the floating IP:" -msgstr "最後に、floating IPを開放します。" - -msgid "" -"And the best part: the same user had just tried creating a CentOS instance. " -"What?" -msgstr "" -"そして、最も重要なこと。同じユーザが CentOS インスタンスを作成しようとしたば" -"かりだった。何だと?" - -msgid "Anne Gentle" -msgstr "Anne Gentle" - -msgid "" -"Anne is the documentation coordinator for OpenStack and also served as an " -"individual contributor to the Google Documentation Summit in 2011, working " -"with the Open Street Maps team. She has worked on book sprints in the past, " -"with FLOSS Manuals’ Adam Hyde facilitating. Anne lives in Austin, Texas." -msgstr "" -"Anne は OpenStack のドキュメントコーディネーターで、2011年の Google Doc " -"Summit では individual contributor (個人コントリビュータ) を努め Open " -"Street Maps チームとともに活動しました。Adam Hyde が進めていた FLOSS Manuals " -"の以前の doc sprint にも参加しています。テキサス州オースティンに住んでいま" -"す。" - -msgid "" -"Another common concept across various OpenStack projects is that of periodic " -"tasks. Periodic tasks are much like cron jobs on traditional Unix systems, " -"but they are run inside an OpenStack process. For example, when OpenStack " -"Compute (nova) needs to work out what images it can remove from its local " -"cache, it runs a periodic task to do this." -msgstr "" -"様々な OpenStack プロジェクトに共通する別の考え方として、周期的タスク " -"(periodic task) があります。周期的タスクは伝統的な Unix システムの cron ジョ" -"ブに似ていますが、OpenStack プロセスの内部で実行されます。例えば、OpenStack " -"Compute (nova) はローカルキャッシュからどのイメージを削除できるかを決める必要" -"がある際に、これを行うために周期的タスクを実行します。" - -msgid "" -"Another example is a user consuming a very large amount of bandwidth. Again, " -"the key is to understand what the user is doing. If she naturally needs a " -"high amount of bandwidth, you might have to limit her transmission rate as " -"to not affect other users or move her to an area with more bandwidth " -"available. On the other hand, maybe her instance has been hacked and is part " -"of a botnet launching DDOS attacks. Resolution of this issue is the same as " -"though any other server on your network has been hacked. Contact the user " -"and give her time to respond. If she doesn't respond, shut down the instance." -msgstr "" -"別の例は、あるユーザーが非常に多くの帯域を消費することです。繰り返しですが、" -"ユーザーが実行していることを理解することが重要です。必ず多くの帯域を使用する" -"必要があれば、他のユーザーに影響を与えないように通信帯域を制限する、または、" -"より多くの帯域を利用可能な別の場所に移動させる必要があるかもしれません。一" -"方、ユーザーのインスタンスが侵入され、DDOS 攻撃を行っているボットネットの一部" -"になっているかもしれません。この問題の解決法は、ネットワークにある他のサー" -"バーが侵入された場合と同じです。ユーザーに連絡し、対応する時間を与えます。も" -"し対応しなければ、そのインスタンスを停止します。" - -msgid "Another example is displaying all properties for a certain image:" -msgstr "" -"もう一つの例は、特定のイメージに関するすべてのプロパティを表示することです。" - -msgid "" -"Any time an instance shuts down unexpectedly, it might have problems on " -"boot. For example, the instance might require an ``fsck`` on the root " -"partition. If this happens, the user can use the dashboard VNC console to " -"fix this." -msgstr "" -"予期せずシャットダウンしたときは、ブートに問題があるかもしれません。たとえ" -"ば、インスタンスがルートパーティションにおいて ``fsck`` を実行する必要がある" -"かもしれません。もしこうなっても、これを修復するためにダッシュボード VNC コン" -"ソールを使用できます。" - -msgid "Appendix" -msgstr "付録" - -msgid "Apr 11, 2013" -msgstr "2013年4月11日" - -msgid "Apr 15, 2011" -msgstr "2011年4月15日" - -msgid "Apr 17, 2014" -msgstr "2014年4月17日" - -msgid "Apr 3, 2014" -msgstr "2014年4月3日" - -msgid "Apr 30, 2015" -msgstr "2015年4月30日" - -msgid "Apr 4, 2013" -msgstr "2013年4月4日" - -msgid "Apr 5, 2012" -msgstr "2012年4月5日" - -msgid "" -"Arbitrary local files can also be placed into the instance file system at " -"creation time by using the ``--file `` option. You may " -"store up to five files." -msgstr "" -"``--file `` オプションを使用することにより、任意のローカル" -"ファイルを生成時にインスタンスのファイルシステムの中に置けます。5 ファイルま" -"で保存できます。" - -msgid "" -"Armed with a patched qemu and a way to reproduce, we set out to see if we've " -"finally solved The Issue. After 48 hours straight of hammering the instance " -"with bandwidth, we were confident. The rest is history. You can search the " -"bug report for \"joe\" to find my comments and actual tests." -msgstr "" -"パッチを当てた qemu と再現方法を携えて、我々は「あの問題」を最終的に解決した" -"かを確認する作業に着手した。インスタンスにネットワーク負荷をかけてから丸48時" -"間後、我々は確信していた。その後のことは知っての通りだ。あなたは、joe へのバ" -"グ報告を検索し、私のコメントと実際のテストを見つけることができる。" - -msgid "" -"Artificial scale testing can go only so far. After your cloud is upgraded, " -"you must pay careful attention to the performance aspects of your cloud." -msgstr "" -"人工的なスケールテストは、あくまである程度のものです。クラウドをアップグレー" -"ドした後、クラウドのパフォーマンス観点で十分に注意する必要があります。" - -msgid "" -"As a cloud administrative user, you can use the OpenStack dashboard to " -"create and manage projects, users, images, and flavors. Users are allowed to " -"create and manage images within specified projects and to share images, " -"depending on the Image service configuration. Typically, the policy " -"configuration allows admin users only to set quotas and create and manage " -"services. The dashboard provides an :guilabel:`Admin` tab with a :guilabel:" -"`System Panel` and an :guilabel:`Identity` tab. These interfaces give you " -"access to system information and usage as well as to settings for " -"configuring what end users can do. Refer to the `OpenStack Administrator " -"Guide `__ for " -"detailed how-to information about using the dashboard as an admin user." -msgstr "" -"クラウドの管理ユーザーとして OpenStack Dashboard を使用して、プロジェクト、" -"ユーザー、イメージ、フレーバーの作成および管理を行うことができます。ユーザー" -"は Image service の設定に応じて、指定されたプロジェクト内でイメージを作成/管" -"理したり、共有したりすることができます。通常、ポリシーの設定では、管理ユー" -"ザーのみがクォータの設定とサービスの作成/管理を行うことができます。ダッシュ" -"ボードには :guilabel:`管理` タブがあり、 :guilabel:`システムパネル` と :" -"guilabel:`ユーザー管理タブ` に分かれています。これらのインターフェースによ" -"り、システム情報と使用状況のデータにアクセスすることができるのに加えて、エン" -"ドユーザーが実行可能な操作を設定することもできます。管理ユーザーとしてダッ" -"シュボードを使用する方法についての詳しい説明は `OpenStack Administrator " -"Guide `__ を参照してく" -"ださい。" - -msgid "" -"As a last resort, our network admin (Alvaro) and myself sat down with four " -"terminal windows, a pencil, and a piece of paper. In one window, we ran " -"ping. In the second window, we ran ``tcpdump`` on the cloud controller. In " -"the third, ``tcpdump`` on the compute node. And the forth had ``tcpdump`` on " -"the instance. For background, this cloud was a multi-node, non-multi-host " -"setup." -msgstr "" -"結局、我々のネットワーク管理者(Alvao)と私自身は4つのターミナルウィンドウ、" -"1本の鉛筆と紙切れを持って座った。1つのウインドウで我々は ping を実行した。" -"2つ目のウインドウではクラウドコントローラー上の ``tcpdump`` 、3つ目ではコン" -"ピュートノード上の ``tcpdump`` 、4つ目ではインスタンス上の ``tcpdump`` を実" -"行した。前提として、このクラウドはマルチノード、非マルチホスト構成である。" - -msgid "" -"As a specific example, compare a cloud that supports a managed web-hosting " -"platform with one running integration tests for a development project that " -"creates one VM per code commit. In the former, the heavy work of creating a " -"VM happens only every few months, whereas the latter puts constant heavy " -"load on the cloud controller. You must consider your average VM lifetime, as " -"a larger number generally means less load on the cloud controller." -msgstr "" -"特定の例としては、マネージド Web ホスティングプラットフォームをサポートするク" -"ラウドと、コードコミットごとに仮想マシンを1つ作成するような開発プロジェクト" -"の統合テストを実行するクラウドを比較してみましょう。前者では、VMを作成する負" -"荷の大きい処理は数か月に 一度しか発生しないのに対して、後者ではクラウドコント" -"ローラに常に負荷の大きい処理が発生します。一般論として、VMの平均寿命が長いと" -"いうことは、クラウドコントローラの負荷が軽いことを意味するため、平均的なVMの" -"寿命を検討する必要があります。" - -msgid "" -"As an OpenStack cloud is composed of so many different services, there are a " -"large number of log files. This chapter aims to assist you in locating and " -"working with them and describes other ways to track the status of your " -"deployment." -msgstr "" -"OpenStackクラウドは、様々なサービスから構成されるため、多くのログファイルが存" -"在します。この章では、それぞれのログの場所と取り扱い、そしてシステムのさらな" -"る監視方法について説明します。" - -msgid "" -"As an administrative user, you can update the Block Storage service quotas " -"for a tenant, as well as update the quota defaults for a new tenant. See :" -"ref:`table_block_storage_quota`." -msgstr "" -"管理ユーザーは、既存のテナントの Block Storage のクォータを更新できます。ま" -"た、新規テナントのクォータのデフォルト値を更新することもできます。:ref:" -"`table_block_storage_quota` を参照してください。" - -msgid "" -"As an administrative user, you can update the Compute service quotas for an " -"existing tenant, as well as update the quota defaults for a new tenant. See :" -"ref:`table_compute_quota`." -msgstr "" -"管理ユーザーは、既存のテナントの Compute のクォータを更新できます。また、新規" -"テナントのクォータのデフォルト値を更新することもできます。 :ref:" -"`table_compute_quota` を参照してください。" - -msgid "" -"As an administrative user, you can use the :command:`cinder quota-*` " -"commands, which are provided by the ``python-cinderclient`` package, to view " -"and update tenant quotas." -msgstr "" -"管理ユーザーは :command:`cinder quota-*` コマンドを使って、テナントのクォー" -"タを表示したり更新したりできます。コマンドは ``python-cinderclient`` パッケー" -"ジに含まれます。" - -msgid "" -"As an administrative user, you can use the :command:`nova quota-*` commands, " -"which are provided by the ``python-novaclient`` package, to view and update " -"tenant quotas." -msgstr "" -"管理ユーザーは :command:`nova quota-*` コマンドを使って、テナントのクォータ" -"を表示したり更新したりできます。コマンドは ``python-novaclient`` パッケージに" -"含まれます。" - -msgid "" -"As an administrator, you have a few ways to discover what your OpenStack " -"cloud looks like simply by using the OpenStack tools available. This section " -"gives you an idea of how to get an overview of your cloud, its shape, size, " -"and current state." -msgstr "" -"管理者は、利用可能な OpenStack ツールを使用して、OpenStack クラウドが全体像を" -"確認する方法がいくつかあります。本項では、クラウドの概要、形態、サイズ、現在" -"の状態についての情報を取得する方法について説明します。" - -msgid "" -"As an example, recording ``nova-api`` usage can allow you to track the need " -"to scale your cloud controller. By keeping an eye on ``nova-api`` requests, " -"you can determine whether you need to spawn more ``nova-api`` processes or " -"go as far as introducing an entirely new server to run ``nova-api``. To get " -"an approximate count of the requests, look for standard INFO messages in ``/" -"var/log/nova/nova-api.log``:" -msgstr "" -"例として、 ``nova-api`` の使用を記録することでクラウドコントローラーをスケー" -"ルする必要があるかを追跡できます。 ``nova-api`` のリクエスト数に注目すること" -"により、 ``nova-api`` プロセスを追加するか、もしくは、 ``nova-api`` を実行す" -"るための新しいサーバーを導入することまで行なうかを決定することができます。リ" -"クエストの概数を取得するには ``/var/log/nova/nova-api.log`` の INFO メッセー" -"ジを検索します。" - -msgid "" -"As an open source project, one of the unique aspects of OpenStack is that it " -"has many different levels at which you can begin to engage with it—you don't " -"have to do everything yourself." -msgstr "" -"OpenStack は、オープンソースプロジェクトとして、ユニークな点があります。その " -"1 つは、さまざまなレベルで OpenStack に携わりはじめることができる点です。すべ" -"てを自分自身で行う必要はありません。" - -msgid "" -"As for your initial deployment, you should ensure that all hardware is " -"appropriately burned in before adding it to production. Run software that " -"uses the hardware to its limits—maxing out RAM, CPU, disk, and network. Many " -"options are available, and normally double as benchmark software, so you " -"also get a good idea of the performance of your system." -msgstr "" -"初期導入時と同じように、本番環境に追加する前に、すべてのハードウェアについて" -"適切な通電テストを行うべきでしょう。ハードウェアを限界まで使用するソフトウェ" -"アを実行します。RAM、CPU、ディスク、ネットワークを限界まで使用します。多くの" -"オプションが利用可能であり、通常はベンチマークソフトウェアとの役割も果たしま" -"す。そのため、システムのパフォーマンスに関する良いアイディアを得ることもでき" -"ます。" - -msgid "" -"As mentioned, there's currently no way to cleanly migrate from ``nova-" -"network`` to neutron. We recommend that you keep a migration in mind and " -"what that process might involve for when a proper migration path is released." -msgstr "" -"言及されたとおり、 ``nova-network`` から neutron にきれいに移行する方法は現在" -"ありません。適切な移行パスがリリースされるまで、移行を心に留めておき、そのプ" -"ロセスに関わることを推奨します。" - -msgid "" -"As noted in the previous chapter, the number of rules per security group is " -"controlled by the ``quota_security_group_rules``, and the number of allowed " -"security groups per project is controlled by the ``quota_security_groups`` " -"quota." -msgstr "" -"前の章で述べたとおり、セキュリティグループごとのルール数は " -"``quota_security_group_rules`` により制御されます。また、プロジェクトごとに許" -"可されるセキュリティグループ数は ``quota_security_groups`` クォータにより制御" -"されます。" - -msgid "As soon as this setting was fixed, everything worked." -msgstr "全力でこの問題を修正した結果、全てが正常に動作するようになった。" - -msgid "As this would be the server's bonded NIC." -msgstr "これはサーバーの冗長化された(bonded)NIC であるべきだからだ。" - -msgid "" -"As with most architecture choices, the right answer depends on your " -"environment. If you are using existing hardware, you know the disk density " -"of your servers and can determine some decisions based on the options above. " -"If you are going through a procurement process, your user's requirements " -"also help you determine hardware purchases. Here are some examples from a " -"private cloud providing web developers custom environments at AT&T. This " -"example is from a specific deployment, so your existing hardware or " -"procurement opportunity may vary from this. AT&T uses three types of " -"hardware in its deployment:" -msgstr "" -"多くのアーキテクチャーの選択肢と同様に、環境により適切なソリューションは変" -"わって来ます。既存のハードウェアを使用する場合、サーバーのディスク密度を把握" -"し、上記のオプションをもとに意思決定していきます。調達プロセスを行っている場" -"合、ユーザー要件などもハードウェア購入決定の一助となります。ここでは AT&T の " -"Web 開発者にカスタムの環境を提供するプライベートクラウドの例をあげています。" -"この例は、特定のデプロイメントであるため、既存のハードウェアや調達機会はこれ" -"と異なる可能性があります。AT&T は、デプロイメントに 3 種類のハードウェアを使" -"用しています。" - -msgid "" -"As with other removable disk technology, it is important that the operating " -"system is not trying to make use of the disk before removing it. On Linux " -"instances, this typically involves unmounting any file systems mounted from " -"the volume. The OpenStack volume service cannot tell whether it is safe to " -"remove volumes from an instance, so it does what it is told. If a user tells " -"the volume service to detach a volume from an instance while it is being " -"written to, you can expect some level of file system corruption as well as " -"faults from whatever process within the instance was using the device." -msgstr "" -"他のリムーバブルディスク技術と同じように、ディスクを取り外す前に、オペレー" -"ティングシステムがそのディスクを使用しないようにすることが重要です。Linux イ" -"ンスタンスにおいて、一般的にボリュームからマウントされているすべてのファイル" -"システムをアンマウントする必要があります。OpenStack Volume Service は、インス" -"タンスから安全にボリュームを取り外すことができるかはわかりません。そのため、" -"指示されたことを実行します。ボリュームに書き込み中にインスタンスからボリュー" -"ムの切断を、ユーザーが Volume Service に指示すると、何らかのレベルのファイル" -"システム破損が起きる可能性があります。それだけでなく、デバイスを使用していた" -"インスタンスの中のプロセスがエラーを起こす可能性もあります。" - -msgid "" -"As your cloud grows, MySQL is utilized more and more. If you suspect that " -"MySQL might be becoming a bottleneck, you should start researching MySQL " -"optimization. The MySQL manual has an entire section dedicated to this " -"topic: `Optimization Overview `_." -msgstr "" -"クラウドが大きくなるにつれて、MySQL がさらに使用されてきます。MySQL がボトル" -"ネックになってきたことが疑われる場合、MySQL 最適化の調査から始めるとよいで" -"しょう。MySQL のマニュアルでは、 `Optimization Overview `_ というセクションがあり、一つ" -"のセクション全部をあててこの話題を扱っています。" - -msgid "" -"Aside from connection failures, RabbitMQ log files are generally not useful " -"for debugging OpenStack related issues. Instead, we recommend you use the " -"RabbitMQ web management interface. Enable it on your cloud controller:" -msgstr "" -"接続エラーは別として、RabbitMQ のログファイルは一般的に OpenStack 関連の問題" -"をデバッグするために役立ちません。代わりに、RabbitMQ の Web 管理インター" -"フェースを使用することを推奨します。クラウドコントローラーで Web 管理インター" -"フェースを有効にするには以下のようにします。" - -msgid "" -"Aside from the direct-to-blueprint pathway, there is another very well-" -"regarded mechanism to influence the development roadmap: the user survey. " -"Found at `OpenStack User Survey `_, " -"it allows you to provide details of your deployments and needs, anonymously " -"by default. Each cycle, the user committee analyzes the results and produces " -"a report, including providing specific information to the technical " -"committee and project team leads." -msgstr "" -"開発ロードマップに影響を与えるために、直接ブループリントに関わる道以外に、非" -"常に高く評価された別の方法があります。ユーザー調査です。 `OpenStack User " -"Survey `_ にあります。基本的に匿名" -"で、お使いの環境の詳細、要望を送ることができます。各サイクルで、ユーザーコ" -"ミッティーが結果を分析して、報告書を作成します。具体的な情報を TC や PTL に提" -"供することを含みます。" - -msgid "Aspects to Watch" -msgstr "ウォッチの観点" - -msgid "Associating Security Groups" -msgstr "セキュリティグループの割り当て" - -msgid "Associating Users with Projects" -msgstr "プロジェクトへのユーザーの割り当て" - -msgid "" -"Associating existing users with an additional project or removing them from " -"an older project is done from the :guilabel:`Projects` page of the dashboard " -"by selecting :guilabel:`Manage Members` from the :guilabel:`Actions` column, " -"as shown in the screenshot below." -msgstr "" -"既存のユーザーを追加のプロジェクトに割り当てる、または古いプロジェクトから削" -"除することは、以下のスクリーンショットにあるとおり、ダッシュボードの :" -"guilabel:`プロジェクト` ページから、:guilabel:`アクション` 列のユーザーの変更" -"を選択することにより実行できます。" - -msgid "" -"At that time, our control services were hosted by another team and we didn't " -"have much debugging information to determine what was going on with the " -"master, and we could not reboot it. That team noted that it failed without " -"alert, but managed to reboot it. After an hour, the cluster had returned to " -"its normal state and we went home for the day." -msgstr "" -"この時、我々のコントロールサービスは別のチームによりホスティングされており、" -"我々には現用系サーバー上で何が起こっているのかを調査するための大したデバッグ" -"情報がなく、再起動もできなかった。このチームは警報なしで障害が起こったと連絡" -"してきたが、そのサーバーの再起動を管理していた。1時間後、クラスタは通常状態" -"に復帰し、我々はその日は帰宅した。" - -msgid "" -"At the data center, I was finishing up some tasks and remembered the lock-" -"up. I logged into the new instance and ran :command:`ps aux` again. It " -"worked. Phew. I decided to run it one more time. It locked up." -msgstr "" -"データセンターで、私はいくつかの仕事を済ませると、ロックアップのことを思い出" -"した。私は新しいインスタンスにログインし、再度 :command:`ps aux` を実行した。" -"コマンドは機能した。ふぅ。私はもう一度試してみることにした。今度はロックアッ" -"プした。" - -msgid "" -"At the end of 2012, Cybera (a nonprofit with a mandate to oversee the " -"development of cyberinfrastructure in Alberta, Canada) deployed an updated " -"OpenStack cloud for their `DAIR project `_. A " -"few days into production, a compute node locks up. Upon rebooting the node, " -"I checked to see what instances were hosted on that node so I could boot " -"them on behalf of the customer. Luckily, only one instance." -msgstr "" -"2012年の終わり、Cybera (カナダ アルバータ州にある、サイバーインフラのデプロ" -"イを監督する権限を持つ非営利団体)が、彼らの `DAIR project `_ 用に新しい OpenStack クラウドをデプロイした。サービスイ" -"ンから数日後、あるコンピュートノードがロックアップした。問題のノードの再起動" -"にあたり、私は顧客の権限でインスタンスを起動するため、そのノード上で何のイン" -"スタンスがホスティングされていたかを確認した。幸運にも、インスタンスは1つだ" -"けだった。" - -msgid "" -"At the end of August 2012, a post-secondary school in Alberta, Canada " -"migrated its infrastructure to an OpenStack cloud. As luck would have it, " -"within the first day or two of it running, one of their servers just " -"disappeared from the network. Blip. Gone." -msgstr "" -"2012年8月の終わり、カナダ アルバータ州のある大学はそのインフラを OpenStack ク" -"ラウドに移行した。幸か不幸か、サービスインから1~2日間に、彼らのサーバーの1台" -"がネットワークから消失した。ビッ。いなくなった。" - -msgid "" -"At the same time of finding the bug report, a co-worker was able to " -"successfully reproduce The Issue! How? He used ``iperf`` to spew a ton of " -"bandwidth at an instance. Within 30 minutes, the instance just disappeared " -"from the network." -msgstr "" -"バグ報告を発見すると同時に、同僚が「あの問題」を再現することに成功した!どう" -"やって?彼は ``iperf`` を使用して、インスタンス上で膨大なネットワーク負荷をか" -"けた。30 分後、インスタンスはネットワークから姿を消した。" - -msgid "" -"At the time of writing, OpenStack has more than 3,000 configuration options. " -"You can see them documented at the `OpenStack Configuration Reference " -"`_. " -"This chapter cannot hope to document all of these, but we do try to " -"introduce the important concepts so that you know where to go digging for " -"more information." -msgstr "" -"執筆時点では、OpenStack は 3,000 以上の設定オプションがあります。 `OpenStack " -"Configuration Reference `_ にドキュメント化されています。本章は、これらのすべて" -"をドキュメント化できませんが、どの情報を掘り下げて調べるかを理解できるよう、" -"重要な概念を紹介したいと考えています。" - -msgid "" -"At the very base of any operating system are the hard drives on which the " -"operating system (OS) is installed." -msgstr "" -"オペレーティングシステムの基盤は、オペレーティングシステムがインストールされ" -"るハードドライブです。" - -msgid "Attaching Block Storage" -msgstr "ブロックストレージの接続" - -msgid "Attempt to boot a nova instance in the affected environment." -msgstr "影響のある環境において、nova インスタンスを起動できるか試します。" - -msgid "Attempt to list the objects in the ``middleware-test`` container:" -msgstr "" -"``middleware-test`` コンテナーにあるオブジェクトを一覧表示しようとします。" - -msgid "Aug 10, 2012" -msgstr "2012年8月10日" - -msgid "Aug 8, 2013" -msgstr "2013年8月8日" - -msgid "Aug 8, 2014" -msgstr "2014年8月8日" - -msgid "Austin" -msgstr "Austin" - -msgid "Availability zone" -msgstr "アベイラビリティゾーン" - -msgid "Availability zones" -msgstr "アベイラビリティゾーン" - -msgid "Available vCPUs" -msgstr "利用可能な vCPU 数" - -msgid "" -"Back in your DevStack instance on the shell screen, add some metadata to " -"your container to allow the request from the remote machine:" -msgstr "" -"シェル画面において DevStack 用インスタンスに戻り、リモートマシンからのリクエ" -"ストを許可するようなコンテナのメタデータを追加します。" - -msgid "" -"Back up HOT template ``yaml`` files, and the ``/etc/heat/`` directory " -"containing Orchestration configuration files." -msgstr "" -"HOT テンプレートの ``yaml`` ファイル、Orchestration の設定ファイルを含む ``/" -"etc/heat/`` ディレクトリーをバックアップします。" - -msgid "" -"Back up the ``/etc/ceilometer`` directory containing Telemetry configuration " -"files." -msgstr "" -"Telemetry の設定ファイルを含む ``/etc/ceilometer`` ディレクトリーをバックアッ" -"プします。" - -msgid "Backing storage services" -msgstr "バックエンドのストレージサービス" - -msgid "Backup and Recovery" -msgstr "バックアップとリカバリー" - -msgid "" -"Backup and subsequent recovery is one of the first tasks system " -"administrators learn. However, each system has different items that need " -"attention. By taking care of your database, image service, and appropriate " -"file system locations, you can be assured that you can handle any event " -"requiring recovery." -msgstr "" -"バックアップ、その後のリカバリーは、最初に学習するシステム管理の 1 つです。し" -"かしながら、各システムは、それぞれ注意を必要とする項目が異なります。データ" -"ベース、Image service、適切なファイルシステムの場所に注意することにより、リカ" -"バリーを必要とするすべてのイベントを処理できることが保証されます。" - -msgid "Bare metal Deployment (ironic)" -msgstr "Bare metal Deployment (ironic)" - -msgid "" -"Be sure that the instance has successfully booted and is at a login screen " -"before doing the above." -msgstr "" -"上記を実行する前に、インスタンスが正常に起動し、ログイン画面になっていること" -"を確認します。" - -msgid "" -"Because it is recommended to not use partitions on a swift disk, simply " -"format the disk as a whole:" -msgstr "" -"Swift ディスクではパーティションを使用しないことが推奨されるので、単にディス" -"ク全体をフォーマットします。" - -msgid "" -"Because network troubleshooting is especially difficult with virtual " -"resources, this chapter is chock-full of helpful tips and tricks for tracing " -"network traffic, finding the root cause of networking failures, and " -"debugging related services, such as DHCP and DNS." -msgstr "" -"ネットワークのトラブルシューティングは、仮想リソースでとくに難しくなります。" -"この章は、ネットワーク通信の追跡、ネットワーク障害の根本原因の調査、DHCP や " -"DNS などの関連サービスのデバッグに関するヒントとコツがたくさん詰まっていま" -"す。" - -msgid "" -"Because of the high redundancy of Object Storage, dealing with object " -"storage node issues is a lot easier than dealing with compute node issues." -msgstr "" -"オブジェクトストレージの高い冗長性のため、オブジェクトストレージのノードに関" -"する問題を処理することは、コンピュートノードに関する問題を処理するよりも簡単" -"です。" - -msgid "" -"Because without sensible quotas a single tenant could use up all the " -"available resources, default quotas are shipped with OpenStack. You should " -"pay attention to which quota settings make sense for your hardware " -"capabilities." -msgstr "" -"妥当なクォータがないと、単一のテナントが利用可能なリソースをすべて使用してし" -"まう可能性があるため、デフォルトのクォータが OpenStack には含まれています。お" -"使いのハードウェア機能には、どのクォータ設定が適切か注意してください。" - -msgid "" -"Because your cloud is most likely composed of many servers, you must check " -"logs on each of those servers to properly piece an event together. A better " -"solution is to send the logs of all servers to a central location so that " -"they can all be accessed from the same area." -msgstr "" -"クラウドは多くのサーバーから構成されるため、各サーバー上にあるイベントログを" -"繋ぎあわせて、ログをチェックしなければなりません。よい方法は全てのサーバーの" -"ログを一ヶ所にまとめ、同じ場所で確認できるようにすることです。" - -msgid "" -"Betsy Hagemeier, a Fanatical Executive Assistant, took care of a room " -"reshuffle and helped us settle in for the week." -msgstr "" -"熱狂的なエグゼクティブアシスタントの Betsy Hagemeier は、部屋の改造の面倒を見" -"てくれて、1週間で解決する手助けをしてくれました。" - -msgid "Bexar" -msgstr "Bexar" - -msgid "Block Storage" -msgstr "ブロックストレージ" - -msgid "Block Storage Creation Failures" -msgstr "ブロックストレージの作成エラー" - -msgid "Block Storage Improvements" -msgstr "Block Storage の改善" - -msgid "" -"Block Storage is considered a stable project, with wide uptake and a long " -"track record of quality drivers. The team has discussed many areas of work " -"at the summits, including better error reporting, automated discovery, and " -"thin provisioning features." -msgstr "" -"Block Storage は、品質ドライバーの幅広い理解と長く取られている記録を持つ、安" -"定したプロジェクトと考えられています。このチームは、よりよいエラー報告、自動" -"探索、シンプロビジョニング機能など、さまざまな領域の作業をサミットで議論しま" -"した。" - -msgid "Block Storage nodes" -msgstr "Block Storage ノード" - -msgid "Block Storage service" -msgstr "Block Storage サービス" - -msgid "" -"Block Storage service - Updating the Block Storage service only requires " -"restarting the service." -msgstr "" -"Block Storage サービス - Block Storage サービスの更新は、サービスの再起動のみ" -"を必要とします。" - -msgid "" -"Boolean value that indicates whether the flavor is available to all users or " -"private. Private flavors do not get the current tenant assigned to them. " -"Defaults to ``True``." -msgstr "" -"フレーバーがすべてのユーザーに利用可能であるか、プライベートであるかを示す論" -"理値。プライベートなフレーバーは、現在のテナントをそれらに割り当てません。デ" -"フォルトは ``True`` です。" - -msgid "Boot a test server:" -msgstr "テストサーバーを起動します。" - -msgid "" -"Both Compute and Block Storage rely on schedulers to determine where to " -"place virtual machines or volumes. In Havana, the Compute scheduler " -"underwent significant improvement, while in Icehouse it was the scheduler in " -"Block Storage that received a boost. Further down the track, an effort " -"started this cycle that aims to create a holistic scheduler covering both " -"will come to fruition. Some of the work that was done in Kilo can be found " -"under the `Gantt project `_." -msgstr "" -"Compute と Block Storage はどちらも、仮想マシンやボリュームを配置する場所を決" -"めるためにスケジューラーに頼っています。Havana では、Compute のスケジューラー" -"が大幅に改善されました。これは、Icehouse において支援を受けた Block Storage " -"におけるスケジューラーでした。さらに掘り下げて追跡すると、どちらも取り扱う全" -"体的なスケジューラーを作成することを目指した、このサイクルを始めた努力が実を" -"結ぶでしょう。Kilo において実行されたいくつかの作業は、`Gantt project " -"`_ にあります。" - -msgid "" -"Both Ubuntu and Red Hat Enterprise Linux include mechanisms for configuring " -"the operating system, including preseed and kickstart, that you can use " -"after a network boot. Typically, these are used to bootstrap an automated " -"configuration system. Alternatively, you can use an image-based approach for " -"deploying the operating system, such as systemimager. You can use both " -"approaches with a virtualized infrastructure, such as when you run VMs to " -"separate your control services and physical infrastructure." -msgstr "" -"Ubuntu と Red Hat Enterprise Linux にはいずれも、ネットワークブート後に使用可" -"能なpreseed や kickstart といった、オペレーティングシステムを設定するための仕" -"組みがあります。これらは、典型的には自動環境設定システムのブートストラップに" -"使用されます。他の方法としては、systemimager のようなイメージベースのオペレー" -"ティングシステムのデプロイメント手法を使うこともできます。これらの手法はどち" -"らも、物理インフラストラクチャーと制御サービスを分離するために仮想マシンを実" -"行する場合など、仮想化基盤と合わせて使用できます。" - -msgid "Burn-in Testing" -msgstr "エージング試験" - -msgid "" -"But how can you tell whether images are being successfully uploaded to the " -"Image service? Maybe the disk that Image service is storing the images on is " -"full or the S3 back end is down. You could naturally check this by doing a " -"quick image upload:" -msgstr "" -"しかし、Image service にイメージが正しくアップロードされたことをどのように知" -"ればいいのでしょうか? もしかしたら、Image service が保管しているイメージの" -"ディスクが満杯、もしくは S3 のバックエンドがダウンしているかもしれません。簡" -"易的なイメージアップロードを行なうことでこれをチェックすることができます。" - -msgid "" -"By comparing a tenant's hard limit with their current resource usage, you " -"can see their usage percentage. For example, if this tenant is using 1 " -"floating IP out of 10, then they are using 10 percent of their floating IP " -"quota. Rather than doing the calculation manually, you can use SQL or the " -"scripting language of your choice and create a formatted report:" -msgstr "" -"テナントのハード制限と現在の使用量を比較することにより、それらの使用割合を確" -"認できます。例えば、このテナントが Floating IP を 10 個中 1 個使用している場" -"合、Floating IP クォータの 10% を使用していることになります。手動で計算するよ" -"り、SQL やお好きなスクリプト言語を使用して、定型化されたレポートを作成できま" -"す。" - -msgid "By default, Object Storage logs to syslog." -msgstr "デフォルトで Object Storage は syslog にログを出力します。" - -msgid "" -"By mistake, I configured OpenStack to attach all tenant VLANs to vlan20 " -"instead of bond0 thereby stacking one VLAN on top of another. This added an " -"extra 4 bytes to each packet and caused a packet of 1504 bytes to be sent " -"out which would cause problems when it arrived at an interface that only " -"accepted 1500." -msgstr "" -"ミスにより、私は全てのテナント VLAN を bond0 の代わりに vlan20 にアタッチする" -"よう OpenStack を設定した。これにより1つの VLAN が別の VLAN の上に積み重な" -"り、各パケットに余分に4バイトが追加され、送信されるパケットサイズが 1504 バ" -"イトになる原因となった。これがパケットサイズ 1500 のみ許容するインターフェー" -"スに到達した際、問題の原因となったのだった!" - -msgid "" -"By modifying your configuration setup, you can set up IPv6 when using ``nova-" -"network`` for networking, and a tested setup is documented for FlatDHCP and " -"a multi-host configuration. The key is to make ``nova-network`` think a " -"``radvd`` command ran successfully. The entire configuration is detailed in " -"a Cybera blog post, `“An IPv6 enabled cloud” `_." -msgstr "" -"セットアップした設定を変更することにより、ネットワークに ``nova-network`` を" -"使用している場合に、IPv6 をセットアップできます。テストされたセットアップ環境" -"が FlatDHCP とマルチホストの設定向けにドキュメント化されています。重要な点" -"は、``radvd`` を正常に実行されたと、``nova-network`` が考えるようにすることで" -"す。設定全体の詳細は、Cybera のブログ記事 `“An IPv6 enabled cloud” `_ にありま" -"す。" - -msgid "" -"By running this command periodically and keeping a record of the result, you " -"can create a trending report over time that shows whether your ``nova-api`` " -"usage is increasing, decreasing, or keeping steady." -msgstr "" -"このコマンドを定期的に実行し結果を記録することで、トレンドレポートを作ること" -"ができます。これにより ``/var/log/nova/nova-api.log`` の使用量が増えているの" -"か、減っているのか、安定しているのか、を知ることができます。" - -msgid "" -"By taking this script and rolling it into an alert for your monitoring " -"system (such as Nagios), you now have an automated way of ensuring that " -"image uploads to the Image Catalog are working." -msgstr "" -"このスクリプトを(Nagiosのような)監視システムに組込むことで、イメージカタログ" -"のアップロードが動作していることを自動的に確認することができます。" - -msgid "CERN" -msgstr "CERN" - -msgid "" -"CONF.node_availability_zone has been renamed to CONF." -"default_availability_zone and is used only by the ``nova-api`` and ``nova-" -"scheduler`` services." -msgstr "" -"CONF.node_availability_zone は、CONF.default_availability_zone に名前が変更さ" -"れ、``nova-api`` および ``nova-scheduler`` サービスのみで使用されます。" - -msgid "CONF.node_availability_zone still works but is deprecated." -msgstr "CONF.node_availability_zone は今も機能しますが、非推奨扱いです。" - -msgid "Cactus" -msgstr "Cactus" - -msgid "Can instances launch and be destroyed?" -msgstr "インスタンスの起動と削除が可能か?" - -msgid "Can objects be stored and deleted?" -msgstr "オブジェクトの保存と削除は可能か?" - -msgid "Can users be created?" -msgstr "ユーザの作成は可能か?" - -msgid "Can volumes be created and destroyed?" -msgstr "ボリュームの作成と削除は可能か?" - -msgid "Capacity Planning" -msgstr "キャパシティプランニング" - -msgid "Cells" -msgstr "セル" - -msgid "" -"Cells and regions, which segregate an entire cloud and result in running " -"separate Compute deployments." -msgstr "" -"セルおよびリージョン。クラウド全体を分離し、個別にコンピュートデプロイメント" -"を稼働します。" - -msgid "Centrally Managing Logs" -msgstr "ログの集中管理" - -msgid "Change access rules for shares, reset share state" -msgstr "共有のアクセスルールの変更、共有状態のリセット" - -msgid "Change to the directory where Object Storage is installed:" -msgstr "Object Storage がインストールされるディレクトリーを変更します。" - -msgid "Check cloud usage:" -msgstr "クラウドの使用量を確認します。" - -msgid "Check for instances in a failed or weird state and investigate why." -msgstr "故障または異常になっているインスタンスを確認し、理由を調査します。" - -msgid "Check for operator accounts that should be removed." -msgstr "削除すべきオペレーターアカウントを確認します。" - -msgid "Check for security patches and apply them as needed." -msgstr "セキュリティパッチを確認し、必要に応じて適用します。" - -msgid "Check for user accounts that should be removed." -msgstr "削除すべきユーザーアカウントを確認します。" - -msgid "Check memory consumption:" -msgstr "メモリー消費を確認します。" - -msgid "Check the attributes of the updated Share1:" -msgstr "更新された Share1 の属性を確認します。" - -msgid "Check the port connection using the netcat utility:" -msgstr "netcat ユーティリティーを使用してポート接続を確認します。" - -msgid "Check the ports for the lost IP address and update the name:" -msgstr "失われた IP アドレス向けのポートを確認して、その名前を更新します。" - -msgid "Check usage and trends over the past month." -msgstr "この 1 か月における使用量および傾向を確認します。" - -msgid "Check your monitoring system for alerts and act on them." -msgstr "監視システムのアラートを確認し、それらに対処します。" - -msgid "Check your ticket queue for new tickets." -msgstr "チケットキューの新しいチケットを確認します。" - -msgid "" -"Clean up after an OpenStack upgrade (any unused or new services to be aware " -"of?)." -msgstr "" -"OpenStack のアップグレード後に後始末を行います (未使用または新しいサービスを" -"把握していますか?)。" - -msgid "" -"Clean up by clearing all mirrors on ``br-int`` and deleting the dummy " -"interface:" -msgstr "" -"``br-int`` にあるすべてのミラーを解除して、ダミーインターフェースを削除するこ" -"とにより、クリーンアップします。" - -msgid "Click the :guilabel:`Create Project` button." -msgstr ":guilabel:`プロジェクトの作成` ボタンをクリックします。" - -msgid "Cloud (General)" -msgstr "Cloud (General)" - -msgid "Cloud Controller and Storage Proxy Failures and Maintenance" -msgstr "クラウドコントローラーとストレージプロキシの故障とメンテナンス" - -msgid "" -"Cloud computing is quite an advanced topic, and this book requires a lot of " -"background knowledge. However, if you are fairly new to cloud computing, we " -"recommend that you make use of the :doc:`common/glossary` at the back of the " -"book, as well as the online documentation for OpenStack and additional " -"resources mentioned in this book in :doc:`app-resources`." -msgstr "" -"クラウドコンピューティングは非常に高度な話題です。また、本書は多くの基礎知識" -"を必要とします。しかしながら、クラウドコンピューティングに慣れていない場合、" -"本書の最後にある :doc:`common/glossary` 、OpenStack のオンラインドキュメン" -"ト、:doc:`app-resources` にある本書で参照されている参考資料を使うことを推奨し" -"ます。" - -msgid "Cloud controller" -msgstr "クラウドコントローラー" - -msgid "Cloud controller receives the renewal request and sends a response." -msgstr "クラウドコントローラーは更新リクエストを受信し、レスポンスを返す。" - -msgid "Cloud controller receives the second request and sends a new response." -msgstr "" -"クラウドコントローラーは2度めのリクエストを受信し、新しいレスポンスを返す。" - -msgid "Command-Line Tools" -msgstr "コマンドラインツール" - -msgid "" -"Compare an attribute in the resource with an attribute extracted from the " -"user's security credentials and evaluates successfully if the comparison is " -"successful. For instance, ``\"tenant_id:%(tenant_id)s\"`` is successful if " -"the tenant identifier in the resource is equal to the tenant identifier of " -"the user submitting the request." -msgstr "" -"リソースの属性をユーザーのセキュリティクレデンシャルから抽出した属性と比較" -"し、一致した場合に成功と評価されます。たとえば、リソースのテナント識別子がリ" -"クエストを出したユーザーのテナント識別子と一致すれば、 ``\"tenant_id:" -"%(tenant_id)s\"`` が成功します。" - -msgid "Compute" -msgstr "コンピュート" - -msgid "Compute Node Failures and Maintenance" -msgstr "コンピュートノードの故障とメンテナンス" - -msgid "Compute nodes" -msgstr "コンピュートノード" - -msgid "" -"Compute nodes can fail the same way a cloud controller can fail. A " -"motherboard failure or some other type of hardware failure can cause an " -"entire compute node to go offline. When this happens, all instances running " -"on that compute node will not be available. Just like with a cloud " -"controller failure, if your infrastructure monitoring does not detect a " -"failed compute node, your users will notify you because of their lost " -"instances." -msgstr "" -"コンピュートノードは、クラウドコントローラーの障害と同じように故障します。マ" -"ザーボードや他の種類のハードウェア障害により、コンピュートノード全体がオフラ" -"インになる可能性があります。これが発生した場合、そのコンピュートノードで動作" -"中のインスタンスがすべて利用できなくなります。ちょうどクラウドコントローラー" -"が発生した場合のように、インフラ監視機能がコンピュートノードの障害を検知しな" -"くても、インスタンスが失われるので、ユーザーが気づくでしょう。" - -msgid "" -"Compute nodes have 24 to 48 cores, with at least 4 GB of RAM per core and " -"approximately 40 GB of ephemeral storage per core." -msgstr "" -"コンピュートノードは 24~48コアがあり、1コアあたり 4GB 以上の RAM があり、1" -"コアあたり約 40GB 以上の一時ストレージがあります。" - -msgid "Compute quota descriptions" -msgstr "Compute のクォータの説明" - -msgid "Compute service - Edit the configuration file and restart the service." -msgstr "Compute サービス - 設定ファイルを編集して、サービスを再起動します。" - -msgid "Compute service, including networking components." -msgstr "Compute サービス。ネットワークコンポーネントも含む。 " - -msgid "Conclusion" -msgstr "まとめ" - -msgid "Configuration Management" -msgstr "構成管理" - -msgid "Configuration changes to ``nova.conf``." -msgstr "``nova.conf`` の設定を変更" - -msgid "Connect the qemu-nbd device to the disk." -msgstr "qemu-nbd デバイスをディスクに接続します。" - -msgid "Connect the qemu-nbd device to the disk:" -msgstr "qemu-nbd デバイスをディスクに接続します。" - -msgid "" -"Consider adopting structure and options from the service configuration files " -"and merging them with existing configuration files. The `OpenStack " -"Configuration Reference `_ contains new, updated, and deprecated options for most services." -msgstr "" -"このサービス設定ファイルから構造とオプションを適用して、既存の設定ファイルに" -"マージすることを検討してください。ほとんどのサービスは、`OpenStack " -"Configuration Reference `_ に新しいオプション、更新されたオプション、非推奨になったオプションがあり" -"ます。" - -msgid "" -"Consider the approach to upgrading your environment. You can perform an " -"upgrade with operational instances, but this is a dangerous approach. You " -"might consider using live migration to temporarily relocate instances to " -"other compute nodes while performing upgrades. However, you must ensure " -"database consistency throughout the process; otherwise your environment " -"might become unstable. Also, don't forget to provide sufficient notice to " -"your users, including giving them plenty of time to perform their own " -"backups." -msgstr "" -"お使いの環境をアップグレードする方法を検討します。運用中のインスタンスがある" -"状態でアップグレードを実行できます。しかし、これは非常に危険なアプローチで" -"す。アップグレードの実行中は、ライブマイグレーションを使用して、インスタンス" -"を別のコンピュートノードに一時的に再配置することを考慮すべきでしょう。しかし" -"ながら、プロセス全体を通して、データベースの整合性を担保する必要があります。" -"そうでなければ、お使いの環境が不安定になるでしょう。また、ユーザーに十分に注" -"意を促すことを忘れてはいけません。バックアップを実行するために時間の猶予を与" -"えることも必要です。" - -msgid "" -"Consider the example where you want to take a snapshot of a persistent block " -"storage volume, detected by the guest operating system as ``/dev/vdb`` and " -"mounted on ``/mnt``. The fsfreeze command accepts two arguments:" -msgstr "" -"永続ブロックストレージのスナップショットを取得したい例を検討します。ゲストオ" -"ペレーティングシステムにより ``/dev/vdb`` として認識され、 ``/mnt`` にマウン" -"トされているとします。fsfreeze コマンドが 2 つの引数を受け取ります:" - -msgid "Consider the following example:" -msgstr "次のような例を考えてみましょう。" - -msgid "" -"Consider the impact of an upgrade to users. The upgrade process interrupts " -"management of your environment including the dashboard. If you properly " -"prepare for the upgrade, existing instances, networking, and storage should " -"continue to operate. However, instances might experience intermittent " -"network interruptions." -msgstr "" -"アップグレードによるユーザーへの影響を考慮してください。アップグレードプロセ" -"スは、ダッシュボードを含む、環境の管理機能を中断します。このアップグレードを" -"正しく準備する場合、既存のインスタンス、ネットワーク、ストレージは通常通り動" -"作し続けるべきです。しかしながら、インスタンスがネットワークの中断を経験する" -"かもしれません。" - -msgid "" -"Consider updating your SQL server configuration as described in the " -"`Installation Tutorials and Guides `_." -msgstr "" -"`Installation Tutorials and Guides `_ に記載されているように、SQL サーバーの設定の更新を考" -"慮してください。" - -msgid "" -"Consider using a public cloud to test the scalability limits of your cloud " -"controller configuration. Most public clouds bill by the hour, which means " -"it can be inexpensive to perform even a test with many nodes." -msgstr "" -"お使いのクラウドコントローラーの設定に関するスケーラビリティーの限界をテスト" -"するために、パブリッククラウドを使用することを考慮します。多くのパブリックク" -"ラウドは時間単位で課金されます。つまり、多くのノードを用いてテストしても、そ" -"れほど費用がかかりません。" - -msgid "" -"Considered experimental. A new service, nova-cells. Each cell has a full " -"nova installation except nova-api." -msgstr "" -"試験的とみなされます。新しいサービス nova-cells。各セルには nova-api 以外の" -"全 nova 設定が含まれています。" - -msgid "Console (boot up messages) for VM instances:" -msgstr "仮想マシンインスタンスのコンソール (起動メッセージ):" - -msgid "Container quotas" -msgstr "コンテナーのクォータ" - -msgid "" -"Contains a reference listing of all configuration options for core and " -"integrated OpenStack services by release version" -msgstr "" -"リリースバージョン毎に、OpenStack のコアサービス、統合されたサービスのすべて" -"の設定オプションの一覧が載っています" - -msgid "" -"Contains each floating IP address that was added to Compute. This table is " -"related to the ``fixed_ips`` table by way of the ``floating_ips." -"fixed_ip_id`` column." -msgstr "" -"Compute に登録された各 Floating IP アドレス。このテーブルは ``floating_ips." -"fixed_ip_id`` 列で ``fixed_ips`` テーブルと関連付けられます。" - -msgid "" -"Contains each possible IP address for the subnet(s) added to Compute. This " -"table is related to the ``instances`` table by way of the ``fixed_ips." -"instance_uuid`` column." -msgstr "" -"nova に登録されたサブネットで利用可能なIPアドレス。このテーブルは " -"``fixed_ips.instance_uuid`` 列で ``instances`` テーブルと関連付けられます。" - -msgid "Contains guidelines for designing an OpenStack cloud" -msgstr "OpenStack クラウドの設計に関するガイドライン" - -msgid "" -"Contains how-to information for managing an OpenStack cloud as needed for " -"your use cases, such as storage, computing, or software-defined-networking" -msgstr "" -"あなたのユースケースに合わせて、ストレージ、コンピューティング、Software-" -"defined-networking など OpenStack クラウドを管理する方法が書かれています" - -msgid "Contents" -msgstr "内容" - -msgid "" -"Continuing the diagnosis the next morning was kick started by another " -"identical failure. We quickly got the message queue running again, and tried " -"to work out why Rabbit was suffering from so much network traffic. Enabling " -"debug logging on nova-api quickly brought understanding. A ``tail -f /var/" -"log/nova/nova-api.log`` was scrolling by faster than we'd ever seen before. " -"CTRL+C on that and we could plainly see the contents of a system log spewing " -"failures over and over again - a system log from one of our users' instances." -msgstr "" -"翌朝の継続調査は別の同様の障害でいきなり始まった。我々は急いで RabbitMQ サー" -"バーを再起動し、何故 RabbitMQ がそのような過剰なネットワーク負荷に直面してい" -"るのかを調べようとした。nova-api のデバッグログを出力することにより、理由はす" -"ぐに判明した。``tail -f /var/log/nova/nova-api.log`` は我々が見たこともない速" -"さでスクロールしていた。CTRL+C でコマンドを止め、障害を吐き出していたシステム" -"ログの内容をはっきり目にすることが出来た。-我々のユーザの1人のインスタンス" -"からのシステムログだった。" - -msgid "" -"Copy contents of configuration backup directories that you created during " -"the upgrade process back to ``/etc/`` directory." -msgstr "" -"アップグレード作業中に作成した、設定ディレクトリーのバックアップの中身を ``/" -"etc/`` にコピーします。" - -msgid "" -"Copy the code as shown below into ``ip_whitelist.py``. The following code is " -"a middleware example that restricts access to a container based on IP " -"address as explained at the beginning of the section. Middleware passes the " -"request on to another application. This example uses the swift \"swob\" " -"library to wrap Web Server Gateway Interface (WSGI) requests and responses " -"into objects for swift to interact with. When you're done, save and close " -"the file." -msgstr "" -"以下の示すコードを ``ip_whitelist.py`` にコピーします。以下のコードは、このセ" -"クションの初めに説明されたように、IP アドレスに基づいてコンテナーへのアクセス" -"を制限するミドルウェアの例です。ミドルウェアは、他のアプリケーションへのリク" -"エストを通過させます。この例は、swift \"swob\" ライブラリーを使用して、swift " -"が通信するオブジェクトに関する Web Server Gateway Interface (WSGI) のリクエス" -"トとレスポンスをラップします。これを実行したとき、ファイルを保存して閉じま" -"す。" - -msgid "Create Share" -msgstr "共有の作成" - -msgid "Create Snapshots" -msgstr "スナップショットの作成" - -msgid "Create a Share Network" -msgstr "共有ネットワークの作成" - -msgid "" -"Create a clone of your automated configuration infrastructure with changed " -"package repository URLs." -msgstr "" -"変更したパッケージリポジトリー URL を用いて、自動化された設定インフラストラク" -"チャーのクローンを作成する。" - -msgid "Create a container called ``middleware-test``:" -msgstr "``middleware-test`` という名前のコンテナーを作成します。" - -msgid "Create a port on the ``Public_AGILE`` network:" -msgstr "``Public_AGILE`` ネットワークにポートを作成します。" - -msgid "Create a public share using :command:`manila create`." -msgstr ":command:`manila create` を使用して、パブリック共有を作成します。" - -msgid "Create a share network" -msgstr "共有ネットワークの作成" - -msgid "Create an OpenStack Development Environment" -msgstr "OpenStack 開発環境の作成" - -msgid "Create and bring up a dummy interface, ``snooper0``:" -msgstr "ダミーインターフェース ``snooper0`` を作成して起動します。" - -msgid "Create context" -msgstr "コンテキストの作成" - -msgid "" -"Create mirror of ``patch-tun`` to ``snooper0`` (returns UUID of mirror port):" -msgstr "" -"``patch-tun`` のミラーを ``snooper0`` に作成します (ミラーポートの UUID を返" -"します)。" - -msgid "Create share" -msgstr "共有の作成" - -msgid "Create share networks" -msgstr "共有ネットワークの作成" - -msgid "Create snapshots" -msgstr "スナップショットの作成" - -msgid "Create the ``ip_scheduler.py`` Python source code file:" -msgstr "``ip_scheduler.py`` Python ソースコードファイルを作成します。" - -msgid "Create the ``ip_whitelist.py`` Python source code file:" -msgstr "``ip_whitelist.py`` Python ソースコードファイルを作成します。" - -msgid "Create ways to automatically test these actions." -msgstr "それらのアクションに対して自動テストを作成する" - -msgid "Create, update, delete, and force-delete shares" -msgstr "共有の作成、更新、削除、強制削除" - -msgid "Creating New Users" -msgstr "新規ユーザーの作成" - -msgid "Customization" -msgstr "カスタマイズ" - -msgid "Customizing Authorization" -msgstr "権限のカスタマイズ" - -msgid "Customizing Object Storage (Swift) Middleware" -msgstr "Object Storage (Swift) ミドルウェアのカスタマイズ" - -msgid "Customizing the Dashboard (Horizon)" -msgstr "Dashboard (Horizon) のカスタマイズ" - -msgid "Customizing the OpenStack Compute (nova) Scheduler" -msgstr "OpenStack Compute (nova) スケジューラーのカスタマイズ" - -msgid "DAIR" -msgstr "DAIR" - -msgid "" -"DAIR is hosted at two different data centers across Canada: one in Alberta " -"and the other in Quebec. It consists of a cloud controller at each location, " -"although, one is designated the \"master\" controller that is in charge of " -"central authentication and quotas. This is done through custom scripts and " -"light modifications to OpenStack. DAIR is currently running Havana." -msgstr "" -"DAIR はカナダの2つの異なるデータセンタ(1つはアルバータ州、もう1つはケベック" -"州)でホスティングされています。各拠点にはそれぞれクラウドコントローラがあり" -"ますが、その1つが「マスター」コントローラーとして、認証とクォータ管理を集中" -"して行うよう設計されています。これは、特製スクリプトと OpenStack の軽微な改造" -"により実現されています。DAIR は現在、Havana で運営されています。" - -msgid "" -"DHCP agents running on OpenStack networks run in namespaces similar to the " -"l3-agents. DHCP namespaces are named ``qdhcp-`` and have a TAP device " -"on the integration bridge. Debugging of DHCP issues usually involves working " -"inside this network namespace." -msgstr "" -"OpenStack ネットワークで動作している DHCP エージェントは、l3-agent と同じよう" -"な名前空間で動作します。DHCP 名前空間は、 ``qdhcp-`` という名前を持ち、" -"統合ブリッジに TAP デバイスを持ちます。DHCP の問題のデバッグは、通常この名前" -"空間の中での動作に関連します。" - -msgid "" -"DHCP traffic uses UDP. The client sends from port 68 to port 67 on the " -"server. Try to boot a new instance and then systematically listen on the " -"NICs until you identify the one that isn't seeing the traffic. To use " -"``tcpdump`` to listen to ports 67 and 68 on br100, you would do:" -msgstr "" -"DHCP トラフィックは UDP を使います。そして、クライアントは 68 番ポートから" -"サーバーの 67 番ポートへパケットを送信します。新しいインスタンスを起動し、機" -"械的にNICをリッスンしてください。トラフィックに現れない通信を特定できるまで行" -"います。 ``tcpdump`` で br100 上のポート 67、68 をリッスンするには、こうしま" -"す。" - -msgid "DNS service (designate)" -msgstr "DNS サービス (designate)" - -msgid "Daily" -msgstr "日次" - -msgid "" -"Dashboard - In typical environments, updating Dashboard only requires " -"restarting the Apache HTTP service." -msgstr "" -"Dashboard - 一般的な環境では、 Dashboard を更新するのに必要な作業は Apache " -"HTTP サービスの再起動のみです。" - -msgid "Dashboard node" -msgstr "ダッシュボードサービス" - -msgid "Data processing service for OpenStack (sahara)" -msgstr "OpenStack の Data Processing サービス (sahara)" - -msgid "Database Backups" -msgstr "データベースのバックアップ" - -msgid "Database Connectivity" -msgstr "データベース接続性" - -msgid "Database as a Service (trove)" -msgstr "Database as a Service (trove)" - -msgid "Databases" -msgstr "データベース" - -msgid "Date" -msgstr "リリース日" - -msgid "Dealing with Network Namespaces" -msgstr "ネットワーク名前空間への対応" - -msgid "Debugging DHCP Issues with nova-network" -msgstr "nova-network の DHCP 問題の デバッグ" - -msgid "Debugging DNS Issues" -msgstr "DNS の問題をデバッグする" - -msgid "Dec 13, 2012" -msgstr "2012年12月13日" - -msgid "Dec 16, 2013" -msgstr "2013年12月16日" - -msgid "" -"Decrease DHCP timeouts by modifying the :file:`/etc/nova/nova.conf` file on " -"the compute nodes back to the original value for your environment." -msgstr "" -"コンピュートノードにおいて :file:`/etc/nova/nova.conf` ファイルを変更すること" -"により、DHCP タイムアウトを元の環境の値に減らして戻します。" - -msgid "" -"Dedicate entire disks to certain partitions. For example, you could allocate " -"disk one and two entirely to the boot, root, and swap partitions under a " -"RAID 1 mirror. Then, allocate disk three and four entirely to the LVM " -"partition, also under a RAID 1 mirror. Disk I/O should be better because I/O " -"is focused on dedicated tasks. However, the LVM partition is much smaller." -msgstr "" -"全ディスク領域を特定のパーティションに割り当てます。例えば、ディスク 1 と 2 " -"すべてを RAID 1 ミラーとして boot、root、swapパーティションに割り当てます。そ" -"して、ディスク 3 と 4 すべてを、同様に RAID 1 ミラーとしてLVMパーティションに" -"割り当てます。I/O は専用タスクにフォーカスするため、ディスクの I/O は良くなる" -"はずです。しかし、LVM パーティションははるかに小さくなります。" - -msgid "Default drop rule for unmatched traffic." -msgstr "一致しない通信のデフォルト破棄ルール。" - -msgid "Define new share types" -msgstr "新しい共有種別の作成" - -msgid "Delete Share" -msgstr "共有の削除" - -msgid "" -"Delete the instance and create a new instance using the ``--nic port-id`` " -"option." -msgstr "" -"インスタンスを削除し、``--nic port-id`` オプションを使用して新しいインスタン" -"スを作成します。" - -msgid "Delete the ports that are not needed:" -msgstr "必要ないポートを削除します。" - -msgid "Deleting Images" -msgstr "イメージの削除" - -msgid "" -"Depending on the type of server, the contents and order of your package list " -"might vary from this example." -msgstr "" -"サーバーの種類に応じて、パケット一覧の内容や順番がこの例と異なるかもしれませ" -"ん。" - -msgid "" -"Depending on your specific configuration, upgrading all packages might " -"restart or break services supplemental to your OpenStack environment. For " -"example, if you use the TGT iSCSI framework for Block Storage volumes and " -"the upgrade includes new packages for it, the package manager might restart " -"the TGT iSCSI services and impact connectivity to volumes." -msgstr "" -"お使いの設定によっては、すべてのパッケージを更新することにより、OpenStack 環" -"境の補助サービスを再起動または破壊するかもしれません。例えば、Block Storage " -"ボリューム向けに TGT iSCSI フレームワークを使用していて、それの新しいパッケー" -"ジがアップグレードに含まれる場合、パッケージマネージャーが TGT iSCSI サービス" -"を再起動して、ボリュームへの接続性に影響を与えるかもしれません。" - -msgid "Deployment" -msgstr "デプロイ" - -msgid "Deprecated" -msgstr "非推奨" - -msgid "Deprecation of Nova Network" -msgstr "nova-network の非推奨" - -msgid "" -"Describes a manual installation process, as in, by hand, without automation, " -"for multiple distributions based on a packaging system:" -msgstr "" -"自動化せずに、手動で行う場合のインストール手順について説明しています。パッ" -"ケージングシステムがある複数のディストリビューション向けのインストールガイド" -"があります。" - -msgid "" -"Describes potential strategies for making your OpenStack services and " -"related controllers and data stores highly available" -msgstr "" -"OpenStack サービス、関連するコントローラーやデータストアを高可用にするために" -"取りうる方策に説明しています" - -msgid "Description" -msgstr "説明" - -msgid "" -"Design and create an architecture for your first nontrivial OpenStack cloud. " -"After you read this guide, you'll know which questions to ask and how to " -"organize your compute, networking, and storage resources and the associated " -"software packages." -msgstr "" -"初めての本格的な OpenStack クラウドのアーキテクチャーの設計と構築。この本を読" -"み終えると、コンピュート、ネットワーク、ストレージのリソースを選ぶにはどんな" -"質問を自分にすればよいのか、どのように組み上げればよいのかや、どんなソフト" -"ウェアパッケージが必要かが分かることでしょう。" - -msgid "" -"Designate a server as the central logging server. The best practice is to " -"choose a server that is solely dedicated to this purpose. Create a file " -"called ``/etc/rsyslog.d/server.conf`` with the following contents:" -msgstr "" -"集中ログサーバーとして使用するサーバーを決めます。ログ専用のサーバーを利用す" -"るのが最も良いです。 ``/etc/rsyslog.d/server.conf`` を次のように作成します。" - -msgid "" -"Despite only outputting the newly added rule, this operation is additive:" -msgstr "新しく追加されたルールのみが出力されますが、この操作は追加操作です:" - -msgid "" -"Determine which OpenStack packages are installed on your system. Use the :" -"command:`dpkg --get-selections` command. Filter for OpenStack packages, " -"filter again to omit packages explicitly marked in the ``deinstall`` state, " -"and save the final output to a file. For example, the following command " -"covers a controller node with keystone, glance, nova, neutron, and cinder:" -msgstr "" -"お使いの環境にインストールされている OpenStack パッケージを判断します。 :" -"command:`dpkg --get-selections` コマンドを使用します。OpenStack パッケージを" -"フィルターします。再びフィルターして、明示的に ``deinstall`` 状態になっている" -"パッケージを省略します。最終出力をファイルに保存します。例えば、以下のコマン" -"ドは、keystone、glance、nova、neutron、cinder を持つコントローラーノードを取" -"り扱います。" - -msgid "Determine which servers the RabbitMQ alarms are coming from." -msgstr "RabbitMQ のアラームが発生しているサーバーを特定します。" - -msgid "Determining Which Component Is Broken" -msgstr "故障しているコンポーネントの特定" - -msgid "" -"Develop an upgrade procedure and assess it thoroughly by using a test " -"environment similar to your production environment." -msgstr "" -"アップグレード手順を作成し、本番環境と同じようなテスト環境を使用して、全体を" -"評価します。" - -msgid "Diablo" -msgstr "Diablo" - -msgid "Diagnose Your Compute Nodes" -msgstr "コンピュートノードの診断" - -msgid "Diane Fleming" -msgstr "Diane Fleming" - -msgid "" -"Diane works on the OpenStack API documentation tirelessly. She helped out " -"wherever she could on this project." -msgstr "" -"Diane は OpenStack API ドキュメントプロジェクトで非常に熱心に活動しています。" -"このプロジェクトでは自分ができるところであれば、どこでも取り組んでくれまし" -"た。" - -msgid "Differences Between Various Drivers" -msgstr "ドライバーによる違い" - -msgid "Direct incoming traffic from VM to the security group chain." -msgstr "仮想マシンからセキュリティグループチェインへの直接受信。" - -msgid "Direct packets associated with a known session to the RETURN chain." -msgstr "既知のセッションに関連付けられたパケットの RETURN チェインへの転送。" - -msgid "Direct traffic from the VM interface to the security group chain." -msgstr "仮想マシンインスタンスからセキュリティグループチェインへの直接通信。" - -msgid "" -"Disable scheduling of new VMs to the node, optionally providing a reason " -"comment:" -msgstr "" -"新規 VM のノードへのスケジューリングを無効化し、理由をコメントにします。" - -msgid "Disappearing Images" -msgstr "イメージの消失" - -msgid "Disconnect the qemu-nbd device." -msgstr "qemu-nbd デバイスを切断します。" - -msgid "" -"Discrete regions with separate API endpoints and no coordination between " -"regions." -msgstr "" -"リージョンごとに別々のAPIエンドポイントが必要で、リージョン間で協調する必要が" -"ない場合" - -msgid "Disk" -msgstr "ディスク" - -msgid "Disk partitioning and disk array setup for scalability" -msgstr "" -"スケーラビリティ確保に向けたディスクのパーティショニングおよびディスク配列設" -"定" - -msgid "Disk space" -msgstr "ディスク領域" - -msgid "Disk space is cheap these days. Data recovery is not." -msgstr "今日、ディスクスペースは安価である。データの復元はそうではない。" - -msgid "Disk usage" -msgstr "ディスク使用量" - -msgid "Distributed Virtual Router" -msgstr "分散仮想ルーター" - -msgid "" -"Do a full manual install by using the `Installation Tutorials and Guides " -"`_ for your " -"platform. Review the final configuration files and installed packages." -msgstr "" -"お使いのプラットフォーム用の `Installation Tutorials and Guides `_ を使用して、完全な手動イン" -"ストールを実行する。最終的な設定ファイルとインストールされたパッケージをレ" -"ビューします。" - -msgid "" -"Do not mount a share without an access rule! This can lead to an exception." -msgstr "" -"アクセスルールなしで共有をマウントしてはいけません。これは、例外を引き起こす" -"可能性があります。" - -msgid "Double VLAN" -msgstr "二重 VLAN" - -msgid "Down the Rabbit Hole" -msgstr "ウサギの穴に落ちて" - -msgid "Downgrade OpenStack packages." -msgstr "OpenStack パッケージをダウングレードします。" - -msgid "" -"Downgrading packages is by far the most complicated step; it is highly " -"dependent on the distribution and the overall administration of the system." -msgstr "" -"パッケージのダウングレードは、かなり最も複雑な手順です。ディストリビューショ" -"ン、システム管理全体に非常に依存します。" - -msgid "" -"Downtime, whether planned or unscheduled, is a certainty when running a " -"cloud. This chapter aims to provide useful information for dealing " -"proactively, or reactively, with these occurrences." -msgstr "" -"停止時間(計画的なものと予定外のものの両方)はクラウドを運用するときに確実に" -"発生します。本章は、プロアクティブまたはリアクティブに、これらの出来事に対処" -"するために有用な情報を提供することを目的としています。" - -msgid "Driver Quality Improvements" -msgstr "ドライバー品質の改善" - -msgid "Drop packets that are not associated with a state." -msgstr "どの状態にも関連付けられていないパケットの破棄。" - -msgid "Drop traffic without an IP/MAC allow rule." -msgstr "IP/MAC 許可ルールにない通信の破棄。" - -msgid "" -"During an upgrade, operators can add configuration options to ``nova.conf`` " -"which lock the version of RPC messages and allow live upgrading of the " -"services without interruption caused by version mismatch. The configuration " -"options allow the specification of RPC version numbers if desired, but " -"release name alias are also supported. For example:" -msgstr "" -"運用者は、アップグレード中、RPC バージョンをロックして、バージョン不一致によ" -"り引き起こされる中断なしでサービスのライブアップグレードできるよう、``nova." -"conf`` に設定オプションを追加できます。この設定オプションは、使いたければ " -"RPC バージョン番号を指定できます。リリース名のエイリアスもサポートされます。" -"例:" - -msgid "" -"EC2 compatibility credentials can be downloaded by selecting :guilabel:" -"`Project`, then :guilabel:`Compute`, then :guilabel:`Access & Security`, " -"then :guilabel:`API Access` to display the :guilabel:`Download EC2 " -"Credentials` button. Click the button to generate a ZIP file with server " -"x509 certificates and a shell script fragment. Create a new directory in a " -"secure location because these are live credentials containing all the " -"authentication information required to access your cloud identity, unlike " -"the default ``user-openrc``. Extract the ZIP file here. You should have " -"``cacert.pem``, ``cert.pem``, ``ec2rc.sh``, and ``pk.pem``. The ``ec2rc.sh`` " -"is similar to this:" -msgstr "" -"EC2 互換のクレデンシャルをダウンロードするには、 :guilabel:`プロジェクト" -"` 、 :guilabel:`コンピュート` 、 :guilabel:`アクセスとセキュリティ` 、 :" -"guilabel:`API アクセス` の順に選択し、 :guilabel:`EC2 認証情報のダウンロード" -"` ボタンを表示します。このボタンをクリックすると、 サーバーの x509 証明書と" -"シェルスクリプトフラグメントが含まれた ZIP が生成されます。これらのファイル" -"は、デフォルトの ``user-openrc`` とは異なり、クラウドのアイデンティティへのア" -"クセスに必要なすべての認証情報を含む有効なクレデンシャルなので、セキュリティ" -"保護された場所に新規ディレクトリを作成して、そこで ZIP ファイルを展開します。" -"``cacert.pem``、``cert.pem``、``ec2rc.sh``、および ``pk.pem`` が含まれている" -"はずです。``ec2rc.sh`` には、以下と似たような内容が記述されています。" - -msgid "" -"Each OpenStack cloud is different even if you have a near-identical " -"architecture as described in this guide. As a result, you must still test " -"upgrades between versions in your environment using an approximate clone of " -"your environment." -msgstr "" -"このガイドに記載されているような、理想的なアーキテクチャーに近いと思われる場" -"合でも、各 OpenStack クラウドはそれぞれ異なります。そのため、お使いの環境の適" -"切なクローンを使用して、お使いの環境のバージョン間でアップグレードをテストす" -"る必要があります。" - -msgid "" -"Each method provides different functionality and can be best divided into " -"two groups:" -msgstr "" -"メソッド毎に異なる機能を提供しますが、このメソッドは 2 つのグループに分類する" -"と良いでしょう。" - -msgid "" -"Each site runs a different configuration, as a resource cells in an " -"OpenStack Compute cells setup. Some sites span multiple data centers, some " -"use off compute node storage with a shared file system, and some use on " -"compute node storage with a non-shared file system. Each site deploys the " -"Image service with an Object Storage back end. A central Identity, " -"dashboard, and Compute API service are used. A login to the dashboard " -"triggers a SAML login with Shibboleth, which creates an account in the " -"Identity service with an SQL back end. An Object Storage Global Cluster is " -"used across several sites." -msgstr "" -"各サイトは(OpenStack Compute のセル設定におけるリソースセルとして)異なる設" -"定で実行されています。数サイトは複数データセンターに渡り、コンピュートノード" -"外のストレージを共有ストレージで使用しているサイトもあれば、コンピュートノー" -"ド上のストレージを非共有型ファイルシステムで使用しているサイトもあります。各" -"サイトは Object Storage バックエンドを持つ Image service をデプロイしていま" -"す。中央の Identity、dashboard、Compute API サービスが使用されています。" -"dashboard へのログインが Shibboleth の SAML ログインのトリガーになり、SQL " -"バックエンドの Identity サービスのアカウントを作成します。Object Storage " -"Global Cluster は、いくつかの拠点をまたがり使用されます。" - -msgid "" -"Early indications are that it does do this well for a base set of scenarios, " -"such as using the ML2 plug-in with Open vSwitch, one flat external network " -"and VXLAN tenant networks. However, it does appear that there are problems " -"with the use of VLANs, IPv6, Floating IPs, high north-south traffic " -"scenarios and large numbers of compute nodes. It is expected these will " -"improve significantly with the next release, but bug reports on specific " -"issues are highly desirable." -msgstr "" -"初期の目安は、ML2 プラグインと Open vSwitch、1 つのフラットな外部ネットワーク" -"と VXLAN のテナントネットワークなど、基本的なシナリオに対してこれをうまく実行" -"することです。しかしながら、VLAN、IPv6、Floating IP、大量のノース・サウス通信" -"のシナリオ、大量のコンピュートノードなどで問題が発生しはじめます。これらは次" -"のリリースで大幅に改善されることが期待されていますが、特定の問題におけるバグ" -"報告が強く望まれています。" - -msgid "Easier Upgrades" -msgstr "より簡単なアップグレード" - -msgid "" -"Either ``snap``, which means that the volume was created from a snapshot, or " -"anything other than ``snap`` (a blank string is valid). In the preceding " -"example, the volume was not created from a snapshot, so we leave this field " -"blank in our following example." -msgstr "" -"ボリュームがスナップショットから作成されたことを意味する ``snap`` 、または " -"``snap`` 以外の何か (空文字列も有効) です。上の例では、ボリュームがスナップ" -"ショットから作成されていません。そのため、この項目を以下の例において空白にし" -"てあります。" - -msgid "" -"Either approach is valid. Use the approach that matches your experience." -msgstr "" -"どのアプローチも有効です。あなたの経験に合うアプローチを使用してください。" - -msgid "ElasticSearch" -msgstr "ElasticSearch" - -msgid "Email address" -msgstr "電子メールアドレス" - -msgid "" -"Emma Richards of Rackspace Guest Relations took excellent care of our lunch " -"orders and even set aside a pile of sticky notes that had fallen off the " -"walls." -msgstr "" -"Rackspace ゲストリレーションズの Emma Richards は、私たちのランチの注文を素晴" -"らしく面倒を見てくれて、更に壁から剥がれ落ちた付箋紙の山を脇においてくれまし" -"た。" - -msgid "Enable scheduling of VMs to the node:" -msgstr "ノードへの仮想マシンのスケジュールを有効化します。" - -msgid "Enabled" -msgstr "有効" - -msgid "Enabling IPv6 Support" -msgstr "IPv6 サポートの有効化" - -msgid "Encode certificate in DER format" -msgstr "証明書を DER 形式でエンコードします" - -msgid "End-User Configuration of Security Groups" -msgstr "セキュリティグループのエンドユーザー設定" - -msgid "End-of-life" -msgstr "エンドオブライフ" - -msgid "" -"Ensure that cryptsetup is installed, and ensure that ``pythin-" -"barbicanclient`` Python package is installed" -msgstr "" -"cryptsetup がきちんとインストールされ、``pythin-barbicanclient`` Python パッ" -"ケージがインストールされていることを確認してください。" - -msgid "Ensure that the operating system has recognized the new disk:" -msgstr "" -"オペレーティングシステムが新しいディスクを認識していることを確認します。" - -msgid "Ephemeral" -msgstr "エフェメラル" - -msgid "Essex" -msgstr "Essex" - -msgid "" -"Evaluate successfully if a field of the resource specified in the current " -"request matches a specific value. For instance, ``\"field:networks:" -"shared=True\"`` is successful if the attribute shared of the network " -"resource is set to ``true``." -msgstr "" -"現在のリクエストに指定されたリソースの項目が指定された値と一致すれば、成功と" -"評価されます。たとえば、ネットワークリソースの shared 属性が ``true`` に設定" -"されている場合、 ``\"field:networks:shared=True\"`` が成功します。" - -msgid "" -"Evaluate successfully if the user submitting the request has the specified " -"role. For instance, ``\"role:admin\"`` is successful if the user submitting " -"the request is an administrator." -msgstr "" -"リクエストを出したユーザーが指定された役割を持っていれば、成功と評価されま" -"す。たとえば、リクエストを出しているユーザーが管理者ならば、 ``\"role:admin" -"\"`` が成功します。" - -msgid "" -"Even at smaller-scale testing, look for excess network packets to determine " -"whether something is going horribly wrong in inter-component communication." -msgstr "" -"より小規模なテストにおいてさえも、過剰なネットワークパケットを探して、コン" -"ポーネント間の通信で何かとてつもなくおかしくなっていないかどうかを判断しま" -"す。" - -msgid "" -"Ever have one of those days where all of the sudden you get the Google " -"results you were looking for? Well, that's what happened here. I was looking " -"for information on dhclient and why it dies when it can't renew its lease " -"and all of the sudden I found a bunch of OpenStack and dnsmasq discussions " -"that were identical to the problem we were seeing!" -msgstr "" -"探し続けてきた Google の検索結果が突然得られたという事態をお分かりだろうか?" -"えっと、それがここで起こったことだ。私は dhclient の情報と、何故 dhclient が" -"そのリースを更新できない場合に死ぬのかを探していて、我々が遭遇したのと同じ問" -"題についての OpenStack と dnsmasq の議論の束を突然発見した。" - -msgid "Everett Toews" -msgstr "Everett Toews" - -msgid "" -"Everett is a developer advocate at Rackspace making OpenStack and the " -"Rackspace Cloud easy to use. Sometimes developer, sometimes advocate, and " -"sometimes operator, he's built web applications, taught workshops, given " -"presentations around the world, and deployed OpenStack for production use by " -"academia and business." -msgstr "" -"Everett は Rackspace の Developer Advocate で、OpenStack や Rackspace Cloud " -"を使いやすくする仕事をしています。ある時は開発者、ある時は advocate、またある" -"時は運用者です。彼は、ウェブアプリケーションを作成し、ワークショップを行い、" -"世界中で公演を行い、教育界やビジネスでプロダクションユースとして使われる " -"OpenStack を構築しています。" - -msgid "Example Image service Database Queries" -msgstr "Image service のデータベースクエリーの例" - -msgid "" -"Failures of hardware are common in large-scale deployments such as an " -"infrastructure cloud. Consider your processes and balance time saving " -"against availability. For example, an Object Storage cluster can easily live " -"with dead disks in it for some period of time if it has sufficient capacity. " -"Or, if your compute installation is not full, you could consider live " -"migrating instances off a host with a RAM failure until you have time to " -"deal with the problem." -msgstr "" -"クラウドインフラなどの大規模環境では、ハードウェアの故障はよくあることです。" -"作業内容を考慮し、可用性と時間の節約のバランスを取ります。たとえば、オブジェ" -"クトストレージクラスターは、十分な容量がある場合には、ある程度の期間は死んだ" -"ディスクがあっても問題なく動作します。また、(クラウド内の) コンピュートノード" -"に空きがある場合には、問題に対処する時間が取れるまで、ライブマイグレーション" -"で RAM が故障したホストから他のホストへインスタンスを移動させることも考慮する" -"とよいでしょう。" - -msgid "" -"Feature requests typically start their life in Etherpad, a collaborative " -"editing tool, which is used to take coordinating notes at a design summit " -"session specific to the feature. This then leads to the creation of a " -"blueprint on the Launchpad site for the particular project, which is used to " -"describe the feature more formally. Blueprints are then approved by project " -"team members, and development can begin." -msgstr "" -"機能追加リクエストは、通常 Etherpad で始まります。Etherpad は共同編集ツール" -"で、デザインサミットのその機能に関するセッションで議論を整理するのに使われま" -"す。続けて、プロジェクトの Launchpad サイトに blueprint が作成され、" -"blueprint を使ってよりきちんとした形で機能が規定されていきます。 この後、" -"blueprint はプロジェクトメンバーによって承認され、開発が始まります。" - -msgid "Feb 13, 2014" -msgstr "2014年2月13日" - -msgid "Feb 3, 2011" -msgstr "2011年2月3日" - -msgid "" -"Felix Lee of Academia Sinica Grid Computing Centre in Taiwan contributed " -"this story." -msgstr "" -"台湾の Academia Sinica Grid Computing Centre の Felix Lee さんがこの話を提供" -"してくれました。" - -msgid "Field-based rules" -msgstr "項目に基づいたルール" - -msgid "Figure. Neutron network paths" -msgstr "図: Neutron ネットワーク経路" - -msgid "Figure. Traffic route for ping packet" -msgstr "図: ping パケットの通信ルート" - -msgid "File System Backups" -msgstr "ファイルシステムバックアップ" - -msgid "File injection" -msgstr "ファイルインジェクション" - -msgid "" -"File system to store files and directories, where all the data lives, " -"including the root partition that starts and runs the system." -msgstr "" -"ファイルやディレクトリを格納するファイルシステム。システムを起動、実行する " -"root パーティションなど、全データが設置される場所。" - -msgid "Final steps" -msgstr "最終手順" - -msgid "" -"Finally, Alvaro noticed something. When a packet from the outside hits the " -"cloud controller, it should not be configured with a VLAN. We verified this " -"as true. When the packet went from the cloud controller to the compute node, " -"it should only have a VLAN if it was destined for an instance. This was " -"still true. When the ping reply was sent from the instance, it should be in " -"a VLAN. True. When it came back to the cloud controller and on its way out " -"to the Internet, it should no longer have a VLAN. False. Uh oh. It looked as " -"though the VLAN part of the packet was not being removed." -msgstr "" -"遂に、Alvaro が何かを掴んだ。外部からのパケットがクラウドコントローラーを叩い" -"た際、パケットは VLAN で設定されるべきではない。我々はこれが正しいことを検証" -"した。パケットがクラウドコントローラーからコンピュートノードに行く際、パケッ" -"トはインスタンス宛の場合にのみ VLAN を持つべきである。これもまた正しかった。" -"ping のレスポンスがインスタンスから送られる際、パケットは VLAN 中にいるべきで" -"ある。OK。クラウドコントローラーからインターネットにパケットが戻る際、パ" -"ケットには VLAN を持つべきではない。NG。うぉっ。まるで パケットの VLAN 部分" -"が削除されていないように見える。" - -msgid "" -"Finally, I checked StackTach and reviewed the user's events. They had " -"created and deleted several snapshots—most likely experimenting. Although " -"the timestamps didn't match up, my conclusion was that they launched their " -"instance and then deleted the snapshot and it was somehow removed from ``/" -"var/lib/nova/instances/_base``. None of that made sense, but it was the best " -"I could come up with." -msgstr "" -"最後に、私は StackTack をチェックし、ユーザのイベントを見直した。彼らはいくつ" -"かのスナップショットを作ったり消したりしていた-ありそうな操作ではあるが。タ" -"イムスタンプが一致しないとはいえ、彼らがインスタンスを起動して、その後スナッ" -"プショットを削除し、それが何故か ``/var/lib/nova/instances/_base`` から削除" -"されたというのが私の結論だった。大した意味は無かったが、それがその時私が得た" -"全てだった。" - -msgid "Finally, mount the disk:" -msgstr "最後に、ディスクをマウントします。" - -msgid "" -"Finally, reattach volumes using the same method described in the section :" -"ref:`volumes`." -msgstr "" -"最後に、 :ref:`volumes` のセクションで説明されているのと同じ方法を用いて、ボ" -"リュームを再接続します。" - -msgid "" -"Finally, to create a share that uses this share network, get to Create Share " -"use case described earlier in this chapter." -msgstr "" -"最後に、これまでの本章に記載された「共有の作成」ユースケースを参照して、この" -"共有ネットワークを使用する共有を作成します。" - -msgid "" -"Find the ``[filter:ratelimit]`` section in ``/etc/swift/proxy-server.conf``, " -"and copy in the following configuration section after it:" -msgstr "" -"``/etc/swift/proxy-server.conf`` の ``[filter:ratelimit]`` セクションを探し、" -"その後ろに以下の環境定義セクションを貼り付けてください。" - -msgid "" -"Find the ``[pipeline:main]`` section in ``/etc/swift/proxy-server.conf``, " -"and add ``ip_whitelist`` after ratelimit to the list like so. When you're " -"done, save and close the file:" -msgstr "" -"``/etc/swift/proxy-server.conf`` ``[pipeline:main]`` セクションを探し、このよ" -"うに ``ip_whitelist`` リストを ratelimit の後ろに追加してください。完了した" -"ら、ファイルを保存して閉じてください。" - -msgid "" -"Find the ``provider:segmentation_id`` of the network you're interested in. " -"This is the same field used for the VLAN ID in VLAN-based networks:" -msgstr "" -"興味あるネットワークの ``provider:segmentation_id`` を探します。これは、VLAN " -"ベースのネットワークにおける VLAN ID に使用されるものと同じ項目です。" - -msgid "Find the ``scheduler_driver`` config and change it like so:" -msgstr "``scheduler_driver`` 設定を見つけ、このように変更してください。" - -msgid "" -"Find the external VLAN tag of the network you're interested in. This is the " -"``provider:segmentation_id`` as returned by the networking service:" -msgstr "" -"興味のあるネットワークの外部 VLAN タグを見つけます。これは、ネットワークサー" -"ビスにより返される ``provider:segmentation_id`` です。" - -msgid "Find the port corresponding to the instance. For example:" -msgstr "インスタンスに対応するポートを見つけます。例:" - -msgid "Finding a Failure in the Path" -msgstr "経路上の障害を見つける" - -msgid "First, find the UUID of the instance in question:" -msgstr "まず、インスタンスのUUIDを確認します。" - -msgid "First, unmount the disk:" -msgstr "まず、ディスクをアンマウントします。" - -msgid "" -"First, you can discover what servers belong to your OpenStack cloud by " -"running:" -msgstr "" -"まず、あなたのOpenStackクラウドに属し、稼働しているサーバーを把握することがで" -"きます。" - -msgid "Fixed IPs" -msgstr "固定 IP" - -msgid "Flavors" -msgstr "フレーバー" - -msgid "" -"Flavors define a number of parameters, resulting in the user having a choice " -"of what type of virtual machine to run—just like they would have if they " -"were purchasing a physical server. :ref:`table_flavor_params` lists the " -"elements that can be set. Note in particular ``extra_specs``, which can be " -"used to define free-form characteristics, giving a lot of flexibility beyond " -"just the size of RAM, CPU, and Disk." -msgstr "" -"フレーバーは、数多くのパラメーターを定義します。これにより、ユーザーが実行す" -"る仮想マシンの種類を選択できるようになります。ちょうど、物理サーバーを購入す" -"る場合と同じようなことです。:ref:`table_flavor_params` は、設定できる要素の一" -"覧です。とくに ``extra_specs`` に注意してください。これは、メモリー、CPU、" -"ディスクの容量以外にもかなり柔軟に、自由形式で特徴を定義するために使用できま" -"す。" - -msgid "Floating IPs" -msgstr "Floating IP" - -msgid "Folsom" -msgstr "Folsom" - -msgid "" -"For Compute, instance metadata is a collection of key-value pairs associated " -"with an instance. Compute reads and writes to these key-value pairs any time " -"during the instance lifetime, from inside and outside the instance, when the " -"end user uses the Compute API to do so. However, you cannot query the " -"instance-associated key-value pairs with the metadata service that is " -"compatible with the Amazon EC2 metadata service." -msgstr "" -"Compute では、インスタンスのメタデータはインスタンスと関連付けられたキーバ" -"リューペアの集まりです。エンドユーザーがこれらのキーバリューペアを読み書きす" -"るために Compute API を使用するとき、Compute がインスタンスの生存期間中にイン" -"スタンスの内外からこれらを読み書きします。しかしながら、Amazon EC2 メタデータ" -"サービスと互換性のあるメタデータサービスを用いて、インスタンスに関連付けられ" -"たキーバリューペアをクエリーできません。" - -msgid "For Object Storage, each region has a swift environment." -msgstr "オブジェクトストレージ用に、各リージョンには swift 環境があります。" - -msgid "" -"For an example of instance metadata, users can generate and register SSH " -"keys using the :command:`openstack keypair create` command:" -msgstr "" -"インスタンスのメタデータの場合、ユーザーが :command:`openstack keypair " -"create` コマンドを使用して SSH 鍵を生成および登録できます。" - -msgid "" -"For details, see subsection `Security Services `__ of “Shared File " -"Systems” section of OpenStack Administrator Guide document." -msgstr "" -"詳細は OpenStack Administrator Guide の Shared File Systems セクションにある " -"`Security Services `__ を参照してください。" - -msgid "" -"For environments using the OpenStack Networking service (neutron), verify " -"the release version of the database. For example:" -msgstr "" -"OpenStack Networking サービス (neutron) を使用している環境では、リリースバー" -"ジョンのデータベースを検証します。例:" - -msgid "For example" -msgstr "例えば" - -msgid "" -"For example, a group of users have instances that are utilizing a large " -"amount of compute resources for very compute-intensive tasks. This is " -"driving the load up on compute nodes and affecting other users. In this " -"situation, review your user use cases. You may find that high compute " -"scenarios are common, and should then plan for proper segregation in your " -"cloud, such as host aggregation or regions." -msgstr "" -"例えば、あるユーザーのグループが、非常に計算負荷の高い作業用に大量のコン" -"ピュートリソースを使うインスタンスを持っているとします。これにより、Compute " -"ノードの負荷が高くなり、他のユーザーに影響を与えます。この状況では、ユーザー" -"のユースケースを精査する必要があります。計算負荷が高いシナリオがよくあるケー" -"スだと判明し、ホスト集約やリージョンなど、クラウドを適切に分割することを計画" -"すべき場合もあるでしょう。" - -msgid "" -"For example, let's say you have a special ``authorized_keys`` file named " -"special_authorized_keysfile that for some reason you want to put on the " -"instance instead of using the regular SSH key injection. In this case, you " -"can use the following command:" -msgstr "" -"例えば、何らかの理由で通常の SSH 鍵の注入ではなく、 " -"special_authorized_keysfile という名前の特別な ``authorized_keys`` ファイルを" -"インスタンスに置きたいと言うとします。この場合、以下のコマンドを使用できます:" - -msgid "For example, run the following command:" -msgstr "例えば、以下のコマンドを実行します。" - -msgid "For example, to place a 5 GB quota on an account:" -msgstr "例として、アカウントに 5 GB のクォータを設定します。" - -msgid "For example, to restrict a project's image storage to 5 GB, do this:" -msgstr "" -"たとえば、プロジェクトのイメージストレージを 5GB に制限するには、以下を実行し" -"ます。" - -msgid "" -"For example, you usually cannot configure NICs for VLANs when PXE booting. " -"Additionally, you usually cannot PXE boot with bonded NICs. If you run into " -"this scenario, consider using a simple 1 GB switch in a private network on " -"which only your cloud communicates." -msgstr "" -"例えば、PXE ブートの際には、通常は VLAN の設定は行えません。さらに、通常は " -"bonding された NIC から PXE ブートを行うこともできません。このような状況の場" -"合、クラウド内でのみ通信できるネットワークで、シンプルな 1Gbps のスイッチを使" -"うことを検討してください。" - -msgid "For example:" -msgstr "例えば" - -msgid "" -"For instructions on installing, upgrading, or removing command-line clients, " -"see the `Install the OpenStack command-line clients `_ " -"section in OpenStack End User Guide." -msgstr "" -"コマンドラインクライアントのインストール、アップグレード、削除に関する詳細" -"は、OpenStack エンドユーザーガイドの `OpenStack コマンドラインクライアントの" -"インストール `_ セクションを参照ください。" - -msgid "" -"For more details and additional information on snapshots, see `Share " -"Snapshots `__ of “Shared File Systems” section of “OpenStack " -"Administrator Guide” document." -msgstr "" -"スナップショットに関する詳細は、OpenStack Administrator Guide の Shared File " -"Systems セクションにある `Share Snapshots `__ を参照してください。" - -msgid "" -"For more information about updating Block Storage volumes (for example, " -"resizing or transferring), see the `OpenStack End User Guide `__." -msgstr "" -"Block Storage ボリュームの更新 (例えばリサイズや譲渡など) に関する詳細は、 " -"`OpenStack エンドユーザーガイド `__ を参照してください。" - -msgid "" -"For more information on installing DevStack, see the `DevStack `_ website." -msgstr "" -"DevStack のインストールの詳細は `DevStack `_ の Web サイトにあります。" - -msgid "" -"For more information, see `RabbitMQ documentation `_." -msgstr "" -"詳細は `RabbitMQ のドキュメント `_ " -"を参照してください。" - -msgid "" -"For readers who need to get a specialized feature into OpenStack, this " -"chapter describes how to use DevStack to write custom middleware or a custom " -"scheduler to rebalance your resources." -msgstr "" -"OpenStack に特別な機能を追加したい読者向けに、この章は、カスタムミドルウェア" -"やカスタムスケジューラーを書いて、リソースを再配置するために、DevStack を使用" -"する方法について説明します。" - -msgid "" -"For resource alerting, for example, monitor disk capacity on a compute node " -"with Nagios, add the following to your Nagios configuration:" -msgstr "" -"たとえば、リソースのアラートとして、コンピュートノード上のディスク容量を " -"Nagios を使って監視する場合、次のような Nagios 設定を追加します。" - -msgid "" -"For stable operations, you want to detect failure promptly and determine " -"causes efficiently. With a distributed system, it's even more important to " -"track the right items to meet a service-level target. Learning where these " -"logs are located in the file system or API gives you an advantage. This " -"chapter also showed how to read, interpret, and manipulate information from " -"OpenStack services so that you can monitor effectively." -msgstr "" -"安定運用のために、障害を即座に検知して、原因を効率的に見つけたいと思います。" -"分散システムを用いると、目標サービスレベルを満たすために、適切な項目を追跡す" -"ることがより重要になります。ログが保存されるファイルシステムの場所、API が与" -"える利点を学びます。本章は、OpenStack のサービスを効率的に監視できるよう、そ" -"れらからの情報を読み、解釈し、操作する方法も説明しました。" - -msgid "" -"For the cloud controller, the good news is if your cloud is using the " -"FlatDHCP multi-host HA network mode, existing instances and volumes continue " -"to operate while the cloud controller is offline. For the storage proxy, " -"however, no storage traffic is possible until it is back up and running." -msgstr "" -"クラウドコントローラーの場合、良いニュースとしては、クラウドが FlatDHCP マル" -"チホスト HA ネットワークモードを使用していれば、既存のインスタンスとボリュー" -"ムはクラウドコントローラーがオフラインの間も動作を継続するという点がありま" -"す。しかしながら、ストレージプロキシの場合には、サーバーが元に戻され動作状態" -"になるまで、ストレージとの通信ができません。" - -msgid "" -"For the second path, you can write new features and plug them in using " -"changes to a configuration file. If the project where your feature would " -"need to reside uses the Python Paste framework, you can create middleware " -"for it and plug it in through configuration. There may also be specific ways " -"of customizing a project, such as creating a new scheduler driver for " -"Compute or a custom tab for the dashboard." -msgstr "" -"2 番目の方法として、新機能を書き、設定ファイルを変更して、それらをプラグイン" -"することもできます。もし、あなたの機能が必要とされるプロジェクトが Python " -"Paste フレームワークを使っているのであれば、そのための ミドルウェアを作成し、" -"環境設定を通じて組み込めばよいのです。他にもプロジェクトをカスタマイズする方" -"法があるかもしれません。例えば、Compute の新しいスケジューラーやダッシュボー" -"ドのカスタムタブなど。" - -msgid "" -"For the storage proxy, ensure that the :term:`Object Storage service ` has resumed:" -msgstr "" -"ストレージプロキシの場合、:term:`Object Storage サービス ` が再開していることを確認します。" - -msgid "" -"For this example, we will use the Open vSwitch (OVS) back end. Other back-" -"end plug-ins will have very different flow paths. OVS is the most popularly " -"deployed network driver, according to the April 2016 OpenStack User Survey. " -"We'll describe each step in turn, with :ref:`network_paths` for reference." -msgstr "" -"この例のために、Open vSwitch (OVS) バックエンドを使用します。他のバックエンド" -"プラグインは、まったく別のフロー経路になるでしょう。2016 年 4 月の OpenStack " -"User Survey によると、 OVS は、最も一般的に配備されているネットワークドライ" -"バーです。 :ref:`network_paths` を参照しながら、各手順を順番に説明していきま" -"す。" - -msgid "Freeze the system" -msgstr "システムをフリーズします" - -msgid "" -"From here, click the :guilabel:`+` icon to add users to the project. Click " -"the :guilabel:`-` to remove them." -msgstr "" -"ここから、プロジェクトにユーザーを追加するには :guilabel:`+` アイコンをクリッ" -"クします。削除するには :guilabel:`-` をクリックします。" - -msgid "From the command line, do this:" -msgstr "コマンドラインから次のとおり実行します。" - -msgid "" -"From the vnet NIC, the packet transfers to a bridge on the compute node, " -"such as ``br100``." -msgstr "" -"パケットはvnet NICからコンピュートノードのブリッジ、例えば ``br100`` に転送さ" -"れます。" - -msgid "" -"From these tables, you can see that a floating IP is technically never " -"directly related to an instance; it must always go through a fixed IP." -msgstr "" -"これらのテーブルから、Floating IPが技術的には直接インスタンスにひも付けられて" -"おらず、固定IP経由であることがわかります。" - -msgid "" -"From this view, you can do a number of useful things, as well as a few " -"dangerous ones." -msgstr "" -"このビューから、数多くの有用な操作、いくつかの危険な操作を実行できます。" - -msgid "" -"From this you see that the DHCP server on that network is using the " -"``tape6256f7d-31`` device and has an IP address of ``10.0.1.100``. Seeing " -"the address ``169.254.169.254``, you can also see that the dhcp-agent is " -"running a metadata-proxy service. Any of the commands mentioned previously " -"in this chapter can be run in the same way. It is also possible to run a " -"shell, such as ``bash``, and have an interactive session within the " -"namespace. In the latter case, exiting the shell returns you to the top-" -"level default namespace." -msgstr "" -"ここから、そのネットワークにある DHCP サーバーが ``tape6256f7d-31`` デバイス" -"を使用していて、IP アドレス ``10.0.1.100`` を持つことを確認します。アドレス " -"``169.254.169.254`` を確認することにより、dhcp-agent が metadata-proxy サービ" -"スを実行していることも確認できます。この章の前の部分で言及したコマンドは、す" -"べて同じ方法で実行できます。 ``bash`` などのシェルを実行して、名前空間の中で" -"対話式セッションを持つこともできます。後者の場合、シェルを抜けることにより、" -"最上位のデフォルトの名前空間に戻ります。" - -msgid "" -"Functional testing like this is not a replacement for proper unit and " -"integration testing, but it serves to get you started." -msgstr "" -"このような機能試験は、正しいユニットテストと結合テストの代わりになるものでは" -"ありませんが、作業を開始することはできます。" - -msgid "Further Reading" -msgstr "参考資料" - -msgid "" -"Further days go by and we catch The Issue in action more and more. We find " -"that dhclient is not running after The Issue happens. Now we're back to " -"thinking it's a DHCP issue. Running ``/etc/init.d/networking`` restart " -"brings everything back up and running." -msgstr "" -"それから何日か過ぎ、我々は「あの問題」に度々遭遇した。我々は「あの問題」の発" -"生後、dhclient が実行されていないことを発見した。今、我々は、それが DHCP の問" -"題であるという考えに立ち戻った。 ``/etc/init.d/networking`` restart を実行す" -"ると、全ては元通りに実行されるようになった。" - -msgid "" -"Further troubleshooting showed that libvirt was not running at all. This " -"made more sense. If libvirt wasn't running, then no instance could be " -"virtualized through KVM. Upon trying to start libvirt, it would silently die " -"immediately. The libvirt logs did not explain why." -msgstr "" -"さらなるトラブルシューティングにより、libvirt がまったく動作していないことが" -"わかりました。これは大きな手がかりです。libvirt が動作していないと、KVM によ" -"るインスタンスの仮想化ができません。libvirt を開始させようとしても、libvirt " -"は何も表示せずすぐに停止しました。libvirt のログでは理由がわかりませんでし" -"た。" - -msgid "" -"GRE-based networks are passed with ``patch-tun`` to the tunnel bridge ``br-" -"tun`` on interface ``patch-int``. This bridge also contains one port for " -"each GRE tunnel peer, so one for each compute node and network node in your " -"network. The ports are named sequentially from ``gre-1`` onward." -msgstr "" -"GRE ベースのネットワークは、 ``patch-tun`` を用いて、 ``patch-int`` インター" -"フェースの ``br-tun`` トンネルブリッジに渡されます。このブリッジは、各 GRE " -"トンネルの 1 つのポートにも含まれます。つまり、ネットワーク上の各コンピュート" -"ノードとネットワークノードに対して 1 つです。ポートの名前は、 ``gre-1`` から" -"順番に増えていきます。" - -msgid "" -"GRE-based networks will be passed to the tunnel bridge ``br-tun``, which " -"behaves just like the GRE interfaces on the compute node." -msgstr "" -"GRE ベースのネットワークは、トンネルブリッジ ``br-tun`` に転送されます。これ" -"は、コンピュートノードにおいて GRE インターフェースのように動作します。" - -msgid "Generate signature of image and convert it to a base64 representation:" -msgstr "イメージの署名を生成して、base64 形式に変換します。" - -msgid "Generic rules" -msgstr "汎用的なルール" - -msgid "Geographical Considerations for Object Storage" -msgstr " Object Storage の地理的考慮事項" - -msgid "Get a list of instances that need to be moved:" -msgstr "移動する必要のあるインスタンスの一覧を取得します。" - -msgid "Getting Credentials" -msgstr "認証情報の取得方法" - -msgid "Getting Started with OpenStack" -msgstr "はじめての OpenStack" - -msgid "Good Luck!" -msgstr "グッドラック!" - -msgid "" -"Grep for 0x<``provider:segmentation_id``>, 0x3 in this case, in the output " -"of ``ovs-ofctl dump-flows br-tun``:" -msgstr "" -"この場合、 ``ovs-ofctl dump-flows br-tun`` の出力で 0x<``provider:" -"segmentation_id``>, 0x3 を grep します。" - -msgid "" -"Grep for the ``provider:segmentation_id``, 2113 in this case, in the output " -"of :command:`ovs-ofctl dump-flows br-int`:" -msgstr "" -"この場合、 :command:`ovs-ofctl dump-flows br-int` の出力で ``provider:" -"segmentation_id`` を、この場合は 2113 を grep します。" - -msgid "Grizzly" -msgstr "Grizzly" - -msgid "HDWMY" -msgstr "HDWMY" - -msgid "Handling a Complete Failure" -msgstr "完全な故障の対処" - -msgid "Hardware Procurement" -msgstr "ハードウェア調達" - -msgid "" -"Hardware does not have to be consistent, but it should at least have the " -"same type of CPU to support instance migration." -msgstr "" -"ハードウェアに整合性を持たせる必要はありませんが、インスタンスのマイグレー" -"ションをサポートできるように、最低限、CPU の種類は同じにする必要があります。" - -msgid "" -"Hardware for compute nodes. Typically 256 or 144 GB memory, two processors, " -"24 cores. 4–6 TB direct attached storage, typically in a RAID 5 " -"configuration." -msgstr "" -"コンピュートノードのハードウェア。通常、メモリー 256 GB または 144 GB、プロ" -"セッサー 2 個、コア 24 個、通常 RAID 5 設定のダイレクトアタッチストレージ " -"(DAS)。" - -msgid "" -"Hardware for controller nodes, used for all stateless OpenStack API " -"services. About 32–64 GB memory, small attached disk, one processor, varied " -"number of cores, such as 6–12." -msgstr "" -"コントローラーノードのハードウェア。ステートレスの OpenStack API サービスすべ" -"てに使用します。メモリー約 32-64GB、接続された容量の小さいディスク、プロセッ" -"サー 1 つ、6-12 個程度のコア。" - -msgid "" -"Hardware for storage nodes. Typically for these, the disk space is optimized " -"for the lowest cost per GB of storage while maintaining rack-space " -"efficiency." -msgstr "" -"ストレージノードのハードウェア。通常、ラックスペース効率を確保しつつも、ディ" -"スク容量のコストが GB ベースで最も低く最適化されています。" - -msgid "Havana" -msgstr "Havana" - -msgid "Havana Haunted by the Dead" -msgstr "Havana 死者の幽霊" - -msgid "" -"He re-enabled the switch ports and the two compute nodes immediately came " -"back to life." -msgstr "" -"彼はスイッチポートを再度有効にしたところ、2つのコンピュートノードは即時に復" -"活した。" - -msgid "Here are snippets of the default nova ``policy.json`` file:" -msgstr "これは標準の nova ``policy.json`` ファイルの抜粋です。" - -msgid "Here are some other resources:" -msgstr "他にもいくつかリソースがあります。" - -msgid "Here is an example error log:" -msgstr "これはエラーログの例です。" - -msgid "" -"Here is an example of a log message with the corresponding ERROR (Python " -"traceback) immediately following:" -msgstr "" -"これは、ERROR (Python のトレースバック) に対応するログメッセージの例です。" - -msgid "" -"Here is an example using the ratios for gathering scalability information " -"for the number of VMs expected as well as the storage needed. The following " -"numbers support (200 / 2) × 16 = 1600 VM instances and require 80 TB of " -"storage for ``/var/lib/nova/instances``:" -msgstr "" -"ここでは、期待される仮想マシン数や必要なストレージ数などの拡張性の情報を収集" -"するために、これらの比率を使用した例を紹介しています。以下の数では、 (200 / " -"2) × 16 = 1600 仮想マシンのインスタンスをサポートし、 ``/var/lib/nova/" -"instances`` のストレージ 80 TB が必要となります。" - -msgid "" -"Here we can see that the request was denied because the remote IP address " -"wasn't in the set of allowed IPs." -msgstr "" -"ここで、リモートIPアドレスが、許可されたIPアドレスの中になかったため、リクエ" -"ストが拒否されていることがわかります。" - -msgid "" -"Here you can see packets received on port ID 1 with the VLAN tag 2113 are " -"modified to have the internal VLAN tag 7. Digging a little deeper, you can " -"confirm that port 1 is in fact ``int-br-eth1``:" -msgstr "" -"これで VLAN タグ 2113 を持つポート ID 1 で受信したパケットを参照できます。こ" -"れは変換され、内部 VLAN タグ 7 を持ちます。より深く掘り下げると、ポート 1 が" -"実際に ``int-br-eth1`` であることが確認できます。" - -msgid "" -"Here's a quick list of various to-do items for each hour, day, week, month, " -"and year. Please note that these tasks are neither required nor definitive " -"but helpful ideas:" -msgstr "" -"これらは、毎時間、日、週、月および年に実行する To Do 項目の簡単な一覧です。こ" -"れらのタスクは必要なものでも、絶対的なものでもありませんが、役に立つものばか" -"りです。" - -msgid "" -"Here, the ID associated with the instance is ``faf7ded8-4a46-413b-b113-" -"f19590746ffe``. If you search for this string on the cloud controller in the " -"``/var/log/nova-*.log`` files, it appears in ``nova-api.log`` and ``nova-" -"scheduler.log``. If you search for this on the compute nodes in ``/var/log/" -"nova-*.log``, it appears in ``nova-compute.log``. If no ERROR or CRITICAL " -"messages appear, the most recent log entry that reports this may provide a " -"hint about what has gone wrong." -msgstr "" -"ここで、インスタンスのUUIDは ``faf7ded8-4a46-413b-b113-f19590746ffe`` です。" -"クラウドコントローラー上の ``/var/log/nova-*.log`` ファイルをこの文字列で検索" -"すると、 ``nova-api.log`` と ``nova-scheduler.log`` で見つかります。同様にコ" -"ンピュートノードで検索した場合、 ``nova-compute.log`` で見つかります。もし、 " -"ERROR や CRITICAL のメッセージが存在しない場合、最後のログエントリが、何が悪" -"いかのヒントを示しているかもしれません。" - -msgid "" -"Here, the external server received the ping request and sent a ping reply. " -"On the compute node, you can see that both the ping and ping reply " -"successfully passed through. You might also see duplicate packets on the " -"compute node, as seen above, because ``tcpdump`` captured the packet on both " -"the bridge and outgoing interface." -msgstr "" -"外部サーバーはpingリクエストを受信し、pingリプライを送信しています。コン" -"ピュートノード上では、pingとpingリプライがそれぞれ成功していることがわかりま" -"す。また、見ての通り、コンピュートノード上ではパケットが重複していることもわ" -"かるでしょう。なぜなら``tcpdump`` はブリッジと外向けインターフェイスの両方で" -"パケットをキャプチャーするからです。" - -msgid "" -"Here, two floating IPs are available. The first has been allocated to a " -"project, while the other is unallocated." -msgstr "" -"この場合は、2 つの Floating IP アドレスが利用可能です。最初の IP アドレスはプ" -"ロジェクトに確保されていますが、もう一方は確保されていません。" - -msgid "" -"Here, you see three flows related to this GRE tunnel. The first is the " -"translation from inbound packets with this tunnel ID to internal VLAN ID 1. " -"The second shows a unicast flow to output port 53 for packets destined for " -"MAC address fa:16:3e:a6:48:24. The third shows the translation from the " -"internal VLAN representation to the GRE tunnel ID flooded to all output " -"ports. For further details of the flow descriptions, see the man page for " -"``ovs-ofctl``. As in the previous VLAN example, numeric port IDs can be " -"matched with their named representations by examining the output of ``ovs-" -"ofctl show br-tun``." -msgstr "" -"ここで、この GRE トンネルに関連する 3 つのフローを見つけられます。1 番目は、" -"このトンネル ID を持つ受信パケットから内部 VLAN ID 1 に変換したものです。2 番" -"目は、MAC アドレス fa:16:3e:a6:48:24 宛のパケットに対する送信ポート 53 番への" -"ユニキャストフローです。3 番目は、内部 VLAN 表現から、すべての出力ポートにあ" -"ふれ出した GRE トンネル ID に変換したものです。フローの説明の詳細は ``ovs-" -"ofctl`` のマニュアルページを参照してください。前の VLAN の例にあるように、数" -"値ポート ID は、 ``ovs-ofctl show br-tun`` の出力を検査することにより、それら" -"の名前を付けた表現に対応付けられます。" - -msgid "" -"Herein lies a selection of tales from OpenStack cloud operators. Read, and " -"learn from their wisdom." -msgstr "" -"ここにあるのは、OpenStack クラウドオペレータ達の苦闘の抜粋である。これを読" -"み、彼らの叡智を学ぶが良い。" - -msgid "High Availability" -msgstr "高可用性" - -msgid "Host aggregates" -msgstr "ホスト・アグリゲート" - -msgid "" -"Host aggregates and instance-type extra specs are used to provide two " -"different resource allocation ratios. The default resource allocation ratios " -"we use are 4:1 CPU and 1.5:1 RAM. Compute-intensive workloads use instance " -"types that require non-oversubscribed hosts where ``cpu_ratio`` and " -"``ram_ratio`` are both set to 1.0. Since we have hyper-threading enabled on " -"our compute nodes, this provides one vCPU per CPU thread, or two vCPUs per " -"physical core." -msgstr "" -"ホストアグリゲートとインスタンス種別の追加スペックが使用され、2 種類の割り当" -"て倍率を提供します。私たちが使用するデフォルトのリソース割り当ては、4:1 CPU " -"と 1.5:1 RAM です。コンピュート中心の処理は、``cpu_ratio`` と ``ram_ratio`` " -"をどちらも 1.0 に設定されている、オーバーサブスクライブしていないホストを必要" -"とするインスタンス種別を使用します。コンピュートノードでハイパースレッディン" -"グを有効化しているので、これは CPU スレッドごとに 1 つの仮想 CPU、または、物" -"理 CPU ごとに 2 つの仮想 CPU を提供します。" - -msgid "Host aggregates zone" -msgstr "ホストアグリゲートゾーン" - -msgid "Hourly" -msgstr "毎時" - -msgid "How Do I Modify an Existing Flavor?" -msgstr "どのように既存のフレーバーを変更しますか?" - -msgid "How This Book Is Organized" -msgstr "この本の構成" - -msgid "How many backups to keep?" -msgstr "いくつのバックアップを持つべきか?" - -msgid "" -"How much storage is required ``(flavor disk size × number of instances)``" -msgstr "" -"必要とされるストレージ容量: ``(フレーバーのディスクサイズ × インスタンス数)``" - -msgid "How often should backups be tested?" -msgstr "どの程度の頻度でバックアップをテストすべきか?" - -msgid "How to Contribute to This Book" -msgstr "この本の作成に参加するには" - -msgid "" -"However, hardware choice is important for many applications, so if that " -"applies to you, consider that there are several software distributions " -"available that you can run on servers, storage, and network products of your " -"choosing. Canonical (where OpenStack replaced Eucalyptus as the default " -"cloud option in 2011), Red Hat, and SUSE offer enterprise OpenStack " -"solutions and support. You may also want to take a look at some of the " -"specialized distributions, such as those from Rackspace, Piston, SwiftStack, " -"or Cloudscaling." -msgstr "" -"また一方、ハードウェアの選択が多くのアプリケーションにとって重要です。そのた" -"め、アプライアンスを適用する場合、自分で選択したサーバー、ストレージ、ネット" -"ワークの製品で実行できる、ソフトウェアディストリビューションがいくつかあるこ" -"とを考慮してください。Canonical (2011 年に標準のクラウド製品を Eucalyptus か" -"ら OpenStack に置き換えました)、Red Hat、SUSE は、エンタープライズレベルの " -"OpenStack ソリューションとサポートを提供しています。Rackspace、Piston、" -"SwiftStack、Cloudscaling などの専門的なディストリビューションも見たいかもしれ" -"ません。" - -msgid "" -"However, that is not to say that it needs to be the same size or use " -"identical hardware as the production environment. It is important to " -"consider the hardware and scale of the cloud that you are upgrading. The " -"following tips can help you minimise the cost:" -msgstr "" -"しかしながら、言うまでもなく、本番環境と同じ大きさや同一のハードウェアを使用" -"する必要がありません。アップグレードするクラウドのハードウェアや規模を考慮す" -"ることは重要です。以下のヒントにより予測不能なコストを最小化する役に立つで" -"しょう。" - -msgid "" -"However, the enticing part of OpenStack might be to build your own private " -"cloud, and there are several ways to accomplish this goal. Perhaps the " -"simplest of all is an appliance-style solution. You purchase an appliance, " -"unpack it, plug in the power and the network, and watch it transform into an " -"OpenStack cloud with minimal additional configuration." -msgstr "" -"しかしながら、OpenStack の魅力的な部分は、自分のプライベートクラウドを構築す" -"ることかもしれません。この目標を達成するいくつかの方法があります。おそらく最" -"も簡単な方法は、アプライアンス形式のソリューションです。アプライアンスを購入" -"し、それを展開し、電源とネットワークを接続します。そして、最小限の設定だけで " -"OpenStack クラウドが構築されていくことを見ていてください。" - -msgid "" -"However, this guide has a different audience—those seeking flexibility from " -"the OpenStack framework by deploying do-it-yourself solutions." -msgstr "" -"しかしながら、このガイドは別の想定読者もいます。独自の自作ソリューションを導" -"入することにより、OpenStack フレームワークの柔軟性を求めている人々です。" - -msgid "" -"However, you need more than the core count alone to estimate the load that " -"the API services, database servers, and queue servers are likely to " -"encounter. You must also consider the usage patterns of your cloud." -msgstr "" -"しかし、APIサービスやデータベースサーバー、MQサーバーがおそらく遭遇する負荷を" -"見積もるためには、コア数以外の検討も行う必要があります。クラウドの利用パター" -"ンも考慮しなければなりません。" - -msgid "" -"I checked Glance and noticed that this image was a snapshot that the user " -"created. At least that was good news—this user would have been the only user " -"affected." -msgstr "" -"私は Glance をチェックし、問題のイメージがそのユーザの作成したスナップショッ" -"トであることに注目した。最終的に、それはグッドニュースだった。このユーザが影" -"響を受けた唯一のユーザだった。" - -msgid "" -"I just upgraded OpenStack from Grizzly to Havana 2013.2-2 using the RDO " -"repository and everything was running pretty well—except the EC2 API." -msgstr "" -"RDO リポジトリーを使用して Grizzly から Havana 2013.2-2 に OpenStack を単に" -"アップグレードしました。そして、すべてのものが EC2 API で非常に良く動作してい" -"ました。" - -msgid "" -"I logged into the cloud controller and was able to both ``ping`` and SSH " -"into the problematic compute node which seemed very odd. Usually if I " -"receive this type of alert, the compute node has totally locked up and would " -"be inaccessible." -msgstr "" -"実に奇妙なことだが、私はクラウドコントローラーにログインし、問題のコンピュー" -"トノードに ``ping`` と SSH の両方を実行できた。通常、この種の警告を受け取る" -"と、コンピュートノードは完全にロックしていてアクセス不可になる。" - -msgid "" -"I looked at the status of both NICs in the bonded pair and saw that neither " -"was able to communicate with the switch port. Seeing as how each NIC in the " -"bond is connected to a separate switch, I thought that the chance of a " -"switch port dying on each switch at the same time was quite improbable. I " -"concluded that the 10gb dual port NIC had died and needed replaced. I " -"created a ticket for the hardware support department at the data center " -"where the node was hosted. I felt lucky that this was a new node and no one " -"else was hosted on it yet." -msgstr "" -"私は bonding ペアの両方の NIC の状態を確認し、両方ともスイッチポートへの通信" -"ができないことを知った。bond 中の各 NIC が異なるスイッチに接続されていること" -"を知り、私は、各スイッチのスイッチポートが同時に死ぬ可能性はまずないと思っ" -"た。私は 10Gb デュアルポート NIC が死んで、交換が必要だと結論づけた。私は、そ" -"のノードがホスティングされているデータセンターのハードウェアサポート部門に宛" -"てたチケットを作成した。私は、それが新しいノードで、他のインスタンスがまだそ" -"のノード上でホスティングされていないことを幸運に思った。" - -msgid "" -"I noticed that the API would suffer from a heavy load and respond slowly to " -"particular EC2 requests such as ``RunInstances``." -msgstr "" -"この API は、``RunInstances`` などの特定の EC2 リクエストに対して、高負荷にな" -"り、応答が遅くなることに気がつきました。" - -msgid "" -"I reviewed the ``nova`` database and saw the instance's entry in the ``nova." -"instances`` table. The image that the instance was using matched what virsh " -"was reporting, so no inconsistency there." -msgstr "" -"私は ``nova`` データベースを見直し、 ``nova.instances`` テーブル中の当該イン" -"スタンスのレコードを見た。インスタンスが使用しているイメージは virsh が報告し" -"たものと一致した。よって、ここでは矛盾は発見されなかった。" - -msgid "" -"I was on-site in Kelowna, British Columbia, Canada setting up a new " -"OpenStack cloud. The deployment was fully automated: Cobbler deployed the OS " -"on the bare metal, bootstrapped it, and Puppet took over from there. I had " -"run the deployment scenario so many times in practice and took for granted " -"that everything was working." -msgstr "" -"私は、新しい OpenStack クラウドのセットアップをするため、カナダのブリティッ" -"シュコロンビア州ケロウナの現地にいた。デプロイ作業は完全に自動化されていた。" -"Cobbler が物理マシンに OS をデプロイし、それを起動し、その後は Puppet が引き" -"継いだ。私は練習で幾度もデプロイシナリオを実行してきたし、もちろん全て正常で" -"あった。" - -msgid "" -"I was totally confused at this point, so I texted our network admin to see " -"if he was available to help. He logged in to both switches and immediately " -"saw the problem: the switches detected spanning tree packets coming from the " -"two compute nodes and immediately shut the ports down to prevent spanning " -"tree loops:" -msgstr "" -"私はこの時点で完全に混乱した。よって、私はネットワーク管理者に対して、私を助" -"けられるか聞いてみるためメールした。彼は両方のスイッチにログインし、すぐに問" -"題を発見した。そのスイッチは2つのコンピュートノードから来たスパニングツリー" -"パケットを検出し、スパニングツリーループを回避するため、即時にそれらのポート" -"をダウンさせたのだ。" - -msgid "ID" -msgstr "ID" - -msgid "Icehouse" -msgstr "Icehouse" - -msgid "Identity" -msgstr "認証" - -msgid "" -"Identity service - Clear any expired tokens before synchronizing the " -"database." -msgstr "" -"Identity サービス - データベースの同期前に期限切れのトークンを削除します。" - -msgid "" -"If :command:`openstack server show` does not sufficiently explain the " -"failure, searching for the instance UUID in the ``nova-compute.log`` on the " -"compute node it was scheduled on or the ``nova-scheduler.log`` on your " -"scheduler hosts is a good place to start looking for lower-level problems." -msgstr "" -":command:`openstack server show` が十分な失敗の理由が表示されていない場合、そ" -"のインスタンスがスケジューリングされたコンピュートノードの ``nova-compute." -"log`` やスケジューラーホストの ``nova-scheduler.log`` を、インスタンスの " -"UUID で検索するのが、より低レベルの問題を調査する良い出発点となります。" - -msgid "" -"If a compute node fails and won't be fixed for a few hours (or at all), you " -"can relaunch all instances that are hosted on the failed node if you use " -"shared storage for ``/var/lib/nova/instances``." -msgstr "" -"コンピュートノードが故障し、2〜3時間もしくはそれ以上たっても復旧できないと見" -"込まれる場合、 ``/var/lib/nova/instances`` に共有ストレージを使用していれば、" -"故障したノードで動作していたインスタンスをすべて再スタートすることができま" -"す。" - -msgid "" -"If a hard drive fails in an Object Storage node, replacing it is relatively " -"easy. This assumes that your Object Storage environment is configured " -"correctly, where the data that is stored on the failed drive is also " -"replicated to other drives in the Object Storage environment." -msgstr "" -"Object Storage ノードのハードディスクが故障した場合、その交換は比較的簡単で" -"す。Object Storage 環境が正しく設定され、故障したディスクに保存されているデー" -"タが Object Storage 環境内の他のディスクにも複製されていることを前提にしてい" -"ます。" - -msgid "" -"If a storage node requires a reboot, simply reboot it. Requests for data " -"hosted on that node are redirected to other copies while the server is " -"rebooting." -msgstr "" -"ストレージノードを再起動する必要がある場合、単に再起動します。そのノードに配" -"置されているデータへのリクエストは、そのサーバーが再起動している間、別のコ" -"ピーにリダイレクトされます。" - -msgid "" -"If a user tries to create a volume and the volume immediately goes into an " -"error state, the best way to troubleshoot is to grep the cinder log files " -"for the volume's UUID. First try the log files on the cloud controller, and " -"then try the storage node where the volume was attempted to be created:" -msgstr "" -"ユーザーがボリュームを作成しようとし、すぐにエラー状態になれば、トラブル解決" -"のために最適な方法は cinder ログファイルをボリュームの UUID で grep すること" -"です。まずクラウドコントローラーにあるログファイルを調べます。次に、ボリュー" -"ムを作成しようとしたストレージノードのログファイルを調べます:" - -msgid "" -"If an instance does not boot, meaning ``virsh list`` never shows the " -"instance as even attempting to boot, do the following on the compute node:" -msgstr "" -"インスタンスがブートしなければ、つまりブートしようとしても ``virsh list`` が" -"インスタンスを表示しなければ、コンピュートノードにおいて以下のとおり実行しま" -"す。" - -msgid "" -"If an instance fails to start and immediately moves to an error state, there " -"are a few different ways to track down what has gone wrong. Some of these " -"can be done with normal user access, while others require access to your log " -"server or compute nodes." -msgstr "" -"インスタンスの開始に失敗し、すぐにエラー状態になるならば、何が問題なのかを追" -"跡するために、いくつかの異なる方法があります。いくつかの方法は通常のユーザー" -"アクセスで実行でき、他の方法ではログサーバーやコンピュートノードへのアクセス" -"が必要です。" - -msgid "" -"If any of these links are missing or incorrect, it suggests a configuration " -"error. Bridges can be added with ``ovs-vsctl add-br``, and ports can be " -"added to bridges with ``ovs-vsctl add-port``. While running these by hand " -"can be useful debugging, it is imperative that manual changes that you " -"intend to keep be reflected back into your configuration files." -msgstr "" -"これらのリンクのどれかが存在しない、または誤っている場合、設定エラーを暗示し" -"ています。ブリッジは ``ovs-vsctl add-br`` で追加できます。ポートは ``ovs-" -"vsctl add-port`` でブリッジに追加できます。これらを手動で実行することはデバッ" -"グに有用ですが、維持することを意図した手動の変更が設定ファイルの中に反映され" -"なければいけません。" - -msgid "" -"If possible, we highly recommend that you dump your production database " -"tables and test the upgrade in your development environment using this data. " -"Several MySQL bugs have been uncovered during database migrations because of " -"slight table differences between a fresh installation and tables that " -"migrated from one version to another. This will have impact on large real " -"datasets, which you do not want to encounter during a production outage." -msgstr "" -"可能ならば、本番環境のデータベースのテーブルをダンプして、このデータを使用し" -"て開発環境においてアップグレードをテストすることを非常に強く推奨します。" -"MySQL バグのいくつかは、新規インストールと旧バージョンから移行したバージョン" -"のテーブルの間のわずかな違いによる、データベース移行中に取り扱われません。こ" -"れは大規模な実データセットにおいてのみ影響するでしょう。本番環境の停止中に遭" -"遇したくないでしょう。" - -msgid "" -"If restarting the dnsmasq process doesn't fix the issue, you might need to " -"use ``tcpdump`` to look at the packets to trace where the failure is. The " -"DNS server listens on UDP port 53. You should see the DNS request on the " -"bridge (such as, br100) of your compute node. Let's say you start listening " -"with ``tcpdump`` on the compute node:" -msgstr "" -"dnsmasq の再起動でも問題が解決しないときは、 ``tcpdump`` で問題がある場所のパ" -"ケットトレースを行う必要があるでしょう。 DNS サーバーは UDP ポート 53 番で" -"リッスンします。あなたのコンピュートノードのブリッジ (br100 など) 上で DNS リ" -"クエストをチェックしてください。コンピュートノード上にて、 ``tcpdump`` でリッ" -"スンを開始すると、" - -msgid "" -"If the affected instances also had attached volumes, first generate a list " -"of instance and volume UUIDs:" -msgstr "" -"影響するインスタンスもボリュームを接続していた場合、まずインスタンスとボ" -"リュームの UUID の一覧を生成します。" - -msgid "" -"If the error indicates that the problem is with another component, switch to " -"tailing that component's log file. For example, if nova cannot access " -"glance, look at the ``glance-api`` log:" -msgstr "" -"エラーから問題が他のコンポーネントにあることが分かる場合には、そのコンポーネ" -"ントのログファイルに表示を切り替えます。nova が glance にアクセスできなけれ" -"ば、 ``glance-api`` ログを確認します:" - -msgid "" -"If the instance fails to resolve the hostname, you have a DNS problem. For " -"example:" -msgstr "" -"もしインスタンスがホスト名の解決に失敗するのであれば、DNSに問題があります。例" -"えば、" - -msgid "" -"If the package manager prompts you to update configuration files, reject the " -"changes. The package manager appends a suffix to newer versions of " -"configuration files. Consider reviewing and adopting content from these " -"files." -msgstr "" -"パッケージマネージャーがさまざまな設定ファイルの更新をプロンプトで確認する場" -"合、変更を拒否します。パッケージマネージャーは、新しいバージョンの設定ファイ" -"ルにサフィックスを追加します。これらのファイルの内容を確認して適用することを" -"検討してください。" - -msgid "" -"If the problem does not seem to be related to dnsmasq itself, at this point " -"use ``tcpdump`` on the interfaces to determine where the packets are getting " -"lost." -msgstr "" -"もし問題が dnsmasq と関係しないようであれば、 ``tcpdump`` を使ってパケットロ" -"スがないか確認してください。" - -msgid "" -"If there are no errors, restart the RabbitMQ service on the next controller " -"node." -msgstr "" -"エラーがなければ、次のコントローラーノードで RabbitMQ サービスを再起動しま" -"す。" - -msgid "" -"If there is not enough information in the existing logs, you may need to add " -"your own custom logging statements to the ``nova-*`` services." -msgstr "" -"十分な情報が既存のログにない場合、独自のロギング宣言を ``nova-*`` サービスに" -"追加する必要があるかもしれません。" - -msgid "" -"If there's a suspicious-looking dnsmasq log message, take a look at the " -"command-line arguments to the dnsmasq processes to see if they look correct:" -msgstr "" -"もしdnsmasqのログメッセージで疑わしいものがあれば、コマンドラインにてdnsmasq" -"が正しく動いているか確認してください。" - -msgid "If you access or view the user's content and data, get approval first!" -msgstr "ユーザーのコンテンツやデータを参照したい場合、まず許可を得ましょう。" - -msgid "" -"If you are able to use :term:`SSH ` to log into an " -"instance, but it takes a very long time (on the order of a minute) to get a " -"prompt, then you might have a DNS issue. The reason a DNS issue can cause " -"this problem is that the SSH server does a reverse DNS lookup on the IP " -"address that you are connecting from. If DNS lookup isn't working on your " -"instances, then you must wait for the DNS reverse lookup timeout to occur " -"for the SSH login process to complete." -msgstr "" -"あなたが :term:`SSH ` を使用してインスタンスにログインで" -"きるけれども、プロンプトが表示されるまで長い時間 (約1分) を要する場合、DNS に" -"問題があるかもしれません。SSH サーバーが接続元 IP アドレスの DNS 逆引きするこ" -"と、それがこの問題の原因です。もしあなたのインスタンスで DNS が正しく引けない" -"場合、SSH のログインプロセスが完了するには、DNS の逆引きがタイムアウトするま" -"で待たなければいけません。" - -msgid "" -"If you are encountering any sort of networking difficulty, one good initial " -"troubleshooting step is to make sure that your interfaces are up. For " -"example:" -msgstr "" -"もしあなたがネットワークの問題に直面した場合、まず最初にするとよいのは、イン" -"ターフェイスが UP になっているかを確認することです。例えば、" - -msgid "" -"If you are logged in to an instance and ping an external host, for example, " -"Google, the ping packet takes the route shown in :ref:`figure_traffic_route`." -msgstr "" -"インスタンスにログインして、外部ホスト (例えば Google) に ping する場合、" -"ping パケットは :ref:`figure_traffic_route` に示されたルートを通ります。" - -msgid "" -"If you are using cinder, run the following command to see a similar listing:" -msgstr "" -"cinder を使用している場合は、次のコマンドを実行して同様の一覧を表示します。" - -msgid "" -"If you can't ping the IP address of the compute node, the problem is between " -"the instance and the compute node. This includes the bridge connecting the " -"compute node's main NIC with the vnet NIC of the instance." -msgstr "" -"もしコンピュートノードのIPアドレスにpingできないのであれば、問題はインスタン" -"スとコンピュートノード間にあります。これはコンピュートノードの物理NICとインス" -"タンス vnet NIC間のブリッジ接続を含みます。" - -msgid "" -"If you can't, try pinging the IP address of the compute node where the " -"instance is hosted. If you can ping this IP, then the problem is somewhere " -"between the compute node and that compute node's gateway." -msgstr "" -"もしそれができないのであれば、インスタンスがホストされているコンピュートノー" -"ドのIPアドレスへpingを試行してください。もしそのIPにpingできるのであれば、そ" -"のコンピュートノードと、ゲートウェイ間のどこかに問題があります。" - -msgid "" -"If you cannot have any data loss at all, you should also focus on a highly " -"available deployment. The `OpenStack High Availability Guide `_ offers suggestions for elimination of a " -"single point of failure that could cause system downtime. While it is not a " -"completely prescriptive document, it offers methods and techniques for " -"avoiding downtime and data loss." -msgstr "" -"すべてのデータをまったく失いたくない場合、高可用性を持たせた導入に注力すべき" -"です。 `OpenStack High Availability Guide `_ は、システム停止につながる可能性がある、単一障害点の削減" -"に向けた提案があります。完全に規定されたドキュメントではありませんが、停止時" -"間やデータ損失を避けるための方法や技術を提供しています。" - -msgid "If you cannot launch an instance, continue to troubleshoot the issue." -msgstr "" -"インスタンスを起動できない場合、問題のトラブルシューティングを続けます。" - -msgid "" -"If you do not follow last three steps, OpenStack Compute cannot manage the " -"instance any longer. It fails to respond to any command issued by OpenStack " -"Compute, and it is marked as shut down." -msgstr "" -"最後の手順 3 つを省略すると、OpenStack Compute がインスタンスを管理できなくな" -"ります。OpenStack Compute により発行されるすべてのコマンドに対する応答が失敗" -"し、シャットダウンしているように見えます。" - -msgid "" -"If you do not need a share any more, you can delete it using :command:" -"`manila delete share_name_or_ID` command like:" -msgstr "" -"共有が必要なくなった場合、:command:`manila delete share_name_or_ID` のように" -"コマンドを使用して削除できます。" - -msgid "" -"If you do not see the ``DHCPDISCOVER``, a problem exists with the packet " -"getting from the instance to the machine running dnsmasq. If you see all of " -"the preceding output and your instances are still not able to obtain IP " -"addresses, then the packet is able to get from the instance to the host " -"running dnsmasq, but it is not able to make the return trip." -msgstr "" -"もし ``DHCPDISCOVER`` が見つからなければ、 dnsmasq が動いているマシンがインス" -"タンスからパケットを受け取れない何らかの問題があります。もし上記の出力が全て" -"確認でき、かついまだに IP アドレスを取得できないのであれば、パケットはインス" -"タンスから dnsmasq 稼働マシンに到達していますが、その復路に問題があります。" - -msgid "" -"If you find a bug and can't fix it or aren't sure it's really a doc bug, log " -"a bug at `OpenStack Manuals `_. Tag the bug under Extra options with the ``ops-guide`` tag to " -"indicate that the bug is in this guide. You can assign the bug to yourself " -"if you know how to fix it. Also, a member of the OpenStack doc-core team can " -"triage the doc bug." -msgstr "" -"バグを見つけたが、どのように直せばよいか分からない場合や本当にドキュメントの" -"バグか自信が持てない場合は、 `OpenStack Manuals `_ にバグを登録して、バグの Extra オプションで ``ops-" -"guide`` タグを付けて下さい。 ``ops-guide`` タグは、そのバグがこのガイドに関す" -"るものであることを示します。どのように直せばよいか分かる場合には、そのバグの" -"担当者を自分に割り当てることもできます。また、OpenStack doc-core チームのメン" -"バーがドキュメントバグを分類することもできます。" - -msgid "" -"If you find that you have reached or are reaching the capacity limit of your " -"computing resources, you should plan to add additional compute nodes. Adding " -"more nodes is quite easy. The process for adding compute nodes is the same " -"as when the initial compute nodes were deployed to your cloud: use an " -"automated deployment system to bootstrap the bare-metal server with the " -"operating system and then have a configuration-management system install and " -"configure OpenStack Compute. Once the Compute service has been installed and " -"configured in the same way as the other compute nodes, it automatically " -"attaches itself to the cloud. The cloud controller notices the new node(s) " -"and begins scheduling instances to launch there." -msgstr "" -"コンピューティングリソースのキャパシティ限界に達した、または達しそうとわかれ" -"ば、さらなるコンピュートノードの追加を計画すべきです。さらなるコンピュート" -"ノードを追加することは簡単です。ノードを追加する手順は、最初にコンピュート" -"ノードをクラウドに導入したときと同じです。自動配備システムを使ってベアメタル" -"サーバーにオペレーティングシステムのインストールと起動を行い、次に構成管理シ" -"ステムにより OpenStack Compute サービスのインストールと設定を行います。他のコ" -"ンピュートノードと同じ方法で Compute サービスのインストールと設定が終わると、" -"自動的にクラウドに接続されます。クラウドコントローラーが新しいノードを検知" -"し、そこにインスタンスを起動するようスケジュールし始めます。" - -msgid "" -"If you have previously prepared block storage with a bootable file system " -"image, it is even possible to boot from persistent block storage. The " -"following command boots an image from the specified volume. It is similar to " -"the previous command, but the image is omitted and the volume is now " -"attached as ``/dev/vda``:" -msgstr "" -"ブート可能なファイルシステムイメージでブロックストレージを事前に準備している" -"と、永続ブロックストレージからブートすることもできます。以下のコマンドは、指" -"定したボリュームからイメージを起動します。前のコマンドに似ていますが、イメー" -"ジが省略され、ボリュームが ``/dev/vda`` として接続されます。" - -msgid "" -"If you modify the configuration, it reverts the next time you restart ``nova-" -"network`` or ``neutron-server``. You must use OpenStack to manage iptables." -msgstr "" -"もし iptables の構成を変更した場合、次の ``nova-network`` や ``neutron-" -"server`` の再起動時に前の状態に戻ります。 iptables の管理には OpenStack を" -"使ってください。" - -msgid "" -"If you need to reboot a compute node due to planned maintenance, such as a " -"software or hardware upgrade, perform the following steps:" -msgstr "" -"ソフトウェアやハードウェアのアップグレードなど、計画されたメンテナンスのため" -"に、コンピュートノードを再起動する必要があれば、以下の手順を実行します。" - -msgid "" -"If you need to shut down a storage node for an extended period of time (one " -"or more days), consider removing the node from the storage ring. For example:" -msgstr "" -"ストレージノードを少し長い間 ( 1 日以上) シャットダウンする必要があれば、ノー" -"ドをストレージリングから削除することを検討します。例:" - -msgid "If you only want to backup a single database, you can instead run:" -msgstr "" -"もし、単一のデータベースのみバックアップする場合は次のように実行します。" - -msgid "" -"If you receive alerts for RabbitMQ, take the following steps to troubleshoot " -"and resolve the issue:" -msgstr "" -"RabbitMQ のアラートを受けとった場合、以下の手順を実行して、問題を調査および解" -"決します。" - -msgid "" -"If you removed the release repositories, you must first reinstall them and " -"run the :command:`apt-get update` command." -msgstr "" -"あるリリースのリポジトリーを削除した場合、それらを再インストールして、 :" -"command:`apt-get update` を実行する必要があります。" - -msgid "" -"If you run FlatDHCPManager, one bridge is on the compute node. If you run " -"VlanManager, one bridge exists for each VLAN." -msgstr "" -"もしFlatDHCPManagerを使っているのであれば、ブリッジはコンピュートノード上に一" -"つです。VlanManagerであれば、VLANごとにブリッジが存在します。" - -msgid "If you still cannot find the lost IP address, repeat these steps again." -msgstr "" -"失われた IP アドレスがまだ見つからない場合、これらの手順をまた繰り返します。" - -msgid "" -"If you support the EC2 API on your cloud, you should also install the " -"euca2ools package or some other EC2 API tool so that you can get the same " -"view your users have. Using EC2 API-based tools is mostly out of the scope " -"of this guide, though we discuss getting credentials for use with it." -msgstr "" -"クラウド上で EC2 API をサポートする場合には、ユーザーと同じビューを表示できる" -"ように、euca2ools パッケージまたはその他の EC2 API ツールもインストールする必" -"要があります。EC2 API ベースのツールの使用に関する内容の大半は本ガイドの対象" -"範囲外となりますが、このツールで使用する認証情報の取得方法についての説明は記" -"載しています。" - -msgid "" -"If you use a configuration-management system, such as Puppet, that ensures " -"the ``nova-compute`` service is always running, you can temporarily move the " -"``init`` files:" -msgstr "" -"Puppet などの構成管理システムを使って、 ``nova-compute`` サービスが確実に実行" -"されているようにしている場合、 ``init`` ファイルを一時的に移動します。" - -msgid "" -"If you use an external storage plug-in or shared file system with your " -"cloud, you can test whether it works by creating a second share or endpoint. " -"This allows you to test the system before entrusting the new version on to " -"your storage." -msgstr "" -"クラウドに外部ストレージプラグインや共有ファイルシステムを使用している場合、" -"2 つ目の共有やエンドポイントを作成することにより、正常に動作するかどうかをテ" -"ストできます。これにより、ストレージに新しいバージョンを信頼させる前に、シス" -"テムをテストできるようになります。" - -msgid "" -"If you use libvirt version ``1.2.2``, you may experience intermittent " -"problems with live snapshot creation." -msgstr "" -"libvirt バージョン ``1.2.2`` を使用している場合、ライブスナップショットの作成" -"において、断続的な問題を経験するかもしれません。" - -msgid "" -"If you want to back up the root file system, you can't simply run the " -"preceding command because it will freeze the prompt. Instead, run the " -"following one-liner, as root, inside the instance:" -msgstr "" -"ルートファイルシステムをバックアップしたければ、プロンプトがフリーズしてしま" -"すので、上のコマンドを単純に実行できません。代わりに、インスタンスの中で " -"root として以下の 1 行を実行します。" - -msgid "" -"If you're running the Cirros image, it doesn't have the \"host\" program " -"installed, in which case you can use ping to try to access a machine by " -"hostname to see whether it resolves. If DNS is working, the first line of " -"ping would be:" -msgstr "" -"もしあなたがCirrosイメージを使っているのであれば、\"host\"プログラムはインス" -"トールされていません。その場合はpingを使い、ホスト名が解決できているか判断で" -"きます。もしDNSが動いていれば、ping結果の先頭行はこうなるはずです。" - -msgid "" -"If your OpenStack Block Storage nodes are separate from your compute nodes, " -"the same procedure still applies because the same queuing and polling system " -"is used in both services." -msgstr "" -"OpenStack ブロックストレージノードがコンピュートノードから分離いる場合、同じ" -"キュー管理とポーリングのシステムが両方のサービスで使用されるので、同じ手順が" -"適用できます。" - -msgid "If your cloud is not using a shared storage, run:" -msgstr "共有ストレージを使用していない場合、次のように実行します。" - -msgid "If your cloud is using a shared storage:" -msgstr "クラウドが共有ストレージを使用している場合:" - -msgid "" -"If your environment is using Neutron, you can configure security groups " -"settings using the :command:`openstack` command. Get a list of security " -"groups for the project you are acting in, by using following command:" -msgstr "" -"お使いの環境で Neutron を使用している場合、:command:`openstack` コマンドを使" -"用して、セキュリティグループを設定できます。以下のコマンドを使用して、作業し" -"ているプロジェクトのセキュリティグループの一覧を取得します。" - -msgid "" -"If your instance failed to obtain an IP through DHCP, some messages should " -"appear in the console. For example, for the Cirros image, you see output " -"that looks like the following:" -msgstr "" -"もしインスタンスがDHCPからのIP取得に失敗していれば、いくつかのメッセージがコ" -"ンソールで確認できるはずです。例えば、Cirrosイメージでは、以下のような出力に" -"なります。" - -msgid "" -"If your instances are still not able to obtain IP addresses, the next thing " -"to check is whether dnsmasq is seeing the DHCP requests from the instance. " -"On the machine that is running the dnsmasq process, which is the compute " -"host if running in multi-host mode, look at ``/var/log/syslog`` to see the " -"dnsmasq output. If dnsmasq is seeing the request properly and handing out an " -"IP, the output looks like this:" -msgstr "" -"もしまだインスタンスが IP アドレスを取得できない場合、次は dnsmasq がインスタ" -"ンスからのDHCPリクエストを見えているか確認します。 dnsmasq プロセスが動いてい" -"るマシンで、``/var/log/syslog`` を参照し、 dnsmasq の出力を確認します。なお、" -"マルチホストモードで動作している場合は、dnsmasqプロセスはコンピュートノードで" -"動作します。もし dnsmasq がリクエストを正しく受け取り、処理していれば、以下の" -"ような出力になります。" - -msgid "" -"If your new object storage node has a different number of disks than the " -"original nodes have, the command to add the new node is different from the " -"original commands. These parameters vary from environment to environment." -msgstr "" -"新しいオブジェクトストレージノードのディスク数が元々のノードのディスク数と異" -"なる場合には、新しいノードを追加するコマンドが元々のコマンドと異なります。こ" -"れらのパラメーターは環境により異なります。" - -msgid "" -"If your operating system doesn't have a version of ``fsfreeze`` available, " -"you can use ``xfs_freeze`` instead, which is available on Ubuntu in the " -"xfsprogs package. Despite the \"xfs\" in the name, xfs_freeze also works on " -"ext3 and ext4 if you are using a Linux kernel version 2.6.29 or greater, " -"since it works at the virtual file system (VFS) level starting at 2.6.29. " -"The xfs_freeze version supports the same command-line arguments as " -"``fsfreeze``." -msgstr "" -"お使いのオペレーティングシステムに利用可能なバージョンの ``fsfreeze`` がなけ" -"れば、代わりに ``xfs_freeze`` を使用できます。これは Ubuntu の xfsprogs パッ" -"ケージにおいて利用可能です。\"xfs\" という名前にもかかわらず、xfs_freeze は " -"Linux カーネル 2.6.29 またはそれ以降を使用していれば ext3 や ext4 においても" -"動作します。それは 2.6.29 において開始された仮想ファイルシステム (VFS) レベル" -"で動作するためです。この xfs_freeze のバージョンは ``fsfreeze`` と同じ名前の" -"コマンドライン引数をサポートします。" - -msgid "" -"If your preference is to build your own OpenStack expertise internally, a " -"good way to kick-start that might be to attend or arrange a training " -"session. The OpenStack Foundation has a `Training Marketplace `_ where you can look for nearby events. " -"Also, the OpenStack community is `working to produce `_ open source training materials." -msgstr "" -"自分たち内部の OpenStack 専門性を高めることを優先する場合、それを始める良い方" -"法は、トレーニングに参加または手配することかもしれません。OpenStack " -"Foundation は、お近くのイベントを見つけられる `Training Marketplace `_ を開いています。また、OpenStack コ" -"ミュニティーは、オープンソースのトレーニング教材を `作成していますworking to " -"produce `_ 。" - -msgid "" -"Image API v2 supports multiline properties, so this option is not required " -"for v2 but it can still be used." -msgstr "" -"Image API v2 は複数行のイメージプロパティーをサポートします。そのため、このオ" -"プションは v2 で必要がありませんが、まだ使用できます。" - -msgid "Image Catalog and Delivery" -msgstr "イメージカタログと配布" - -msgid "Image Catalog and Delivery services" -msgstr "イメージカタログとイメージ配信のサービス" - -msgid "Image service" -msgstr "Image サービス" - -msgid "Image usage" -msgstr "イメージ使用量" - -msgid "Images" -msgstr "イメージ" - -msgid "" -"Imagine a scenario where you have public access to one of your containers, " -"but what you really want is to restrict access to that to a set of IPs based " -"on a whitelist. In this example, we'll create a piece of middleware for " -"swift that allows access to a container from only a set of IP addresses, as " -"determined by the container's metadata items. Only those IP addresses that " -"you explicitly whitelist using the container's metadata will be able to " -"access the container." -msgstr "" -"お使いのコンテナーの 1 つにパブリックにアクセスできるシナリオを想像してくださ" -"い。しかし、本当にやりたいことは、ホワイトリストに基づいてアクセスできる IP " -"を制限することです。この例では、コンテナーのメタデータ項目により決められるよ" -"う、ある IP アドレス群だけからコンテナーにアクセスを許可する、swift 向けのミ" -"ドルウェア部品を作成します。コンテナーのメタデータを使用して、明示的にホワイ" -"トリストに入っている IP アドレスのみが、コンテナーにアクセスできます。" - -msgid "" -"Immediately after create, the security group has only an allow egress rule. " -"To make it do what we want, we need to add some rules:" -msgstr "" -"作成後すぐでは、セキュリティグループは送信ルールのみを許可します。やりたいこ" -"とを行うために、いくつかのルールを追加する必要があります。" - -msgid "Implementing Periodic Tasks" -msgstr "定期タスクの実装" - -msgid "" -"In OpenStack user interfaces and documentation, a group of users is referred " -"to as a :term:`project` or :term:`tenant`. These terms are interchangeable." -msgstr "" -"OpenStack ユーザーインターフェースとドキュメントでは、ユーザーのグループは :" -"term:`プロジェクト ` または :term:`テナント ` と呼ばれます。" -"これらの用語は同義です。" - -msgid "" -"In ``nova.conf``, ``vlan_interface`` specifies what interface OpenStack " -"should attach all VLANs to. The correct setting should have been:" -msgstr "" -"``nova.conf`` 中で、 ``vlan_interface`` は OpenStack が全ての VLAN をアタッチ" -"すべきインターフェースがどれかを指定する。正しい設定はこうだった。" - -msgid "" -"In a multi-tenant cloud environment, users sometimes want to share their " -"personal images or snapshots with other projects. This can be done on the " -"command line with the ``glance`` tool by the owner of the image." -msgstr "" -"マルチテナントクラウド環境において、ユーザーはときどき、自分のイメージやス" -"ナップショットを他のプロジェクトと共有したいことがあります。これは、イメージ" -"の所有者がコマンドラインから ``glance`` ツールを使用することによりできます。" - -msgid "" -"In addition, consider remote power control as well. While IPMI usually " -"controls the server's power state, having remote access to the PDU that the " -"server is plugged into can really be useful for situations when everything " -"seems wedged." -msgstr "" -"さらに、リモート電源管理装置も検討してください。通常、IPMI はサーバーの電源状" -"態を制御しますが、サーバーが接続されている PDU にリモートアクセスできれば、す" -"べてが手詰まりに見えるような状況で非常に役に立ちます。" - -msgid "" -"In addition, database migrations are now tested with the Turbo Hipster tool. " -"This tool tests database migration performance on copies of real-world user " -"databases." -msgstr "" -"さらに、データベースの移行が Turbo Hipster ツールを用いてテストされます。この" -"ツールは、実世界のユーザーのデータベースのコピーにおいて、データベースの移行" -"パフォーマンスをテストします。" - -msgid "" -"In addition, many sites write custom tools for local needs to enforce local " -"policies and provide levels of self-service to users that are not currently " -"available with packaged tools." -msgstr "" -"さらに、多くのサイトは個別の要求を満たすために独自ツールを作成し、サイト固有" -"のポリシーを適用し、パッケージツールでは実現できないレベルのセルフサービスを" -"ユーザーに提供しています。" - -msgid "" -"In an OpenStack cloud, the dnsmasq process acts as the DNS server for the " -"instances in addition to acting as the DHCP server. A misbehaving dnsmasq " -"process may be the source of DNS-related issues inside the instance. As " -"mentioned in the previous section, the simplest way to rule out a " -"misbehaving dnsmasq process is to kill all the dnsmasq processes on the " -"machine and restart ``nova-network``. However, be aware that this command " -"affects everyone running instances on this node, including tenants that have " -"not seen the issue. As a last resort, as root:" -msgstr "" -"OpenStack クラウドにおいて、 dnsmasq プロセスは DHCP サーバに加えてDNS サー" -"バーの役割を担っています。 dnsmasq の不具合は、インスタンスにおける DNS 関連" -"問題の原因となりえます。前節で述べたように、 dnsmasq の不具合を解決するもっと" -"もシンプルな方法は、マシン上のすべてのdnsmasq プロセスをkillし、 ``nova-" -"network`` を再起動することです。しかしながら、このコマンドは該当ノード上で動" -"いているすべてのインスタンス、特に問題がないテナントにも影響します。最終手段" -"として、rootで以下を実行します。" - -msgid "" -"In most cases, the error is the result of something in libvirt's XML file " -"(``/etc/libvirt/qemu/instance-xxxxxxxx.xml``) that no longer exists. You can " -"enforce re-creation of the XML file as well as rebooting the instance by " -"running the following command:" -msgstr "" -"多くの場合、エラーが libvirt の XML ファイル (``/etc/libvirt/qemu/instance-" -"xxxxxxxx.xml``) の何かになります。以下のコマンドを実行することにより、インス" -"タンスを再起動するのと同時に、強制的に XML ファイルを再作成できます:" - -msgid "" -"In our experience, most operators don't sit right next to the servers " -"running the cloud, and many don't necessarily enjoy visiting the data " -"center. OpenStack should be entirely remotely configurable, but sometimes " -"not everything goes according to plan." -msgstr "" -"経験上、多くのオペレーターはクラウドを動かすサーバのそばにいるわけではありま" -"せんし、多くの人が必ずしも楽しんでデータセンターに訪問してるわけではありませ" -"ん。OpenStackは、完全にリモート設定できるはずですが、計画通りにいかないことも" -"あります。" - -msgid "" -"In releases prior to Mitaka, select the equivalent :guilabel:`Terminate " -"instance` action." -msgstr "" -"Mitaka 以前のリリースでは、同等の :guilabel:`インスタンスの終了` 操作を選択し" -"ます。" - -msgid "" -"In some cases, some operations should be restricted to administrators only. " -"Therefore, as a further example, let us consider how this sample policy file " -"could be modified in a scenario where we enable users to create their own " -"flavors:" -msgstr "" -"いくつかの場合では、いくつかの操作を管理者のみに制限すべきです。そこで、次の" -"例では、ユーザーが自分のフレーバーを作成できるようにするシナリオの場合に、こ" -"のサンプルのポリシーファイルをどのように変更すればよいかを示します。" - -msgid "" -"In some scenarios, instances are running but are inaccessible through SSH " -"and do not respond to any command. The VNC console could be displaying a " -"boot failure or kernel panic error messages. This could be an indication of " -"file system corruption on the VM itself. If you need to recover files or " -"inspect the content of the instance, qemu-nbd can be used to mount the disk." -msgstr "" -"いくつかのシナリオでは、インスタンスが実行中であるにも関わらず、SSH 経由でア" -"クセスできず、あらゆるコマンドに反応がありません。VNC コンソールがブート失敗" -"やカーネルパニックのエラーメッセージを表示している可能性があります。これは仮" -"想マシン自身においてファイルシステム破損の意味する可能性があります。ファイル" -"を復旧したりインスタンスの中身を調査したりする必要があれば、qemu-nbd を使って" -"ディスクをマウントできます。" - -msgid "" -"In the very common case where the underlying snapshot is done via LVM, the " -"filesystem freeze is automatically handled by LVM." -msgstr "" -"ベースとするスナップショットが LVM 経由で取得されている非常に一般的な場合、" -"ファイルシステムのフリーズは、LVM により自動的に処理されます。" - -msgid "" -"In this case, ``gre-1`` is a tunnel from IP 10.10.128.21, which should match " -"a local interface on this node, to IP 10.10.128.16 on the remote side." -msgstr "" -"この場合、 ``gre-1`` が IP 10.10.128.21 からリモートの IP 10.10.128.16 へのト" -"ンネルです。これは、このノードのローカルインターフェースと一致します。" - -msgid "" -"In this case, looking at the ``fault`` message shows ``NoValidHost``, " -"indicating that the scheduler was unable to match the instance requirements." -msgstr "" -"この場合、``fault`` メッセージに ``NoValidHost`` が表示されています。 " -"``NoValidHost`` はスケジューラーがインスタンスの要件を満たせなかったことを意" -"味します。" - -msgid "" -"In this error, a nova service has failed to connect to the RabbitMQ server " -"because it got a connection refused error." -msgstr "" -"このエラーでは、novaサービスがRabbitMQへの接続に失敗していました。接続が拒否" -"されたというエラーが出力されています。" - -msgid "" -"In this example, ``cinder-volumes`` failed to start and has provided a stack " -"trace, since its volume back end has been unable to set up the storage volume" -"—probably because the LVM volume that is expected from the configuration " -"does not exist." -msgstr "" -"この例では、ボリュームのバックエンドがストレージボリュームをセットアップがで" -"きなかったため、``cinder-volumes`` が起動に失敗し、スタックトレースを出力して" -"います。おそらく、設定ファイルで指定された LVM ボリュームが存在しないためと考" -"えられます。" - -msgid "In this example, these locations have the following IP addresses:" -msgstr "例では、この環境には以下のIPアドレスが存在します" - -msgid "" -"In this instance, having an out-of-band access into nodes running OpenStack " -"components is a boon. The IPMI protocol is the de facto standard here, and " -"acquiring hardware that supports it is highly recommended to achieve that " -"lights-out data center aim." -msgstr "" -"この場合、OpenStack が動くノードに対して外側からアクセスできるようにすること" -"が重要です。ここでは、IPMIプロトコルが事実上標準となっています。完全自動の" -"データセンタを実現するために、IPMIをサポートしたハードウェアを入手することを" -"強く推奨します。" - -msgid "Influencing the Roadmap" -msgstr "ロードマップへの影響" - -msgid "Information Available to You" -msgstr "利用できる情報" - -msgid "Injected file content bytes" -msgstr "注入ファイルのコンテンツ (バイト)" - -msgid "Injected file path bytes" -msgstr "注入ファイルのパス (バイト)" - -msgid "Injected files" -msgstr "注入ファイル" - -msgid "" -"Inside the book sprint room with us each day was our book sprint facilitator " -"Adam Hyde. Without his tireless support and encouragement, we would have " -"thought a book of this scope was impossible in five days. Adam has proven " -"the book sprint method effectively again and again. He creates both tools " -"and faith in collaborative authoring at `www.booksprints.net `_." -msgstr "" -"私たちは、ブックスプリント部屋の中で、ファシリテーターの Adam Hyde と毎日過" -"ごしました。彼の精力的なサポートと励ましがなければ、この範囲のドキュメントを " -"5 日間で作成できなかったでしょう。Adam は、ブックスプリントの手法が効果的であ" -"ることを何回も実証しました。彼は、`www.booksprints.net `_ にあるコラボレーションにおいて、ツールと信念の両方を作成" -"しました。" - -msgid "Inspecting API Calls" -msgstr "API コールの検査" - -msgid "Inspecting and Recovering Data from Failed Instances" -msgstr "故障したインスタンスからの検査とデータ復旧" - -msgid "Install OpenStack command-line clients" -msgstr "OpenStack コマンドラインクライアントのインストール" - -msgid "Install the ``keystone`` and ``swift`` clients on your local machine:" -msgstr "" -"ローカルマシンに ``keystone`` と ``swif`` クライアントをインストールします。" - -msgid "Installation Tutorials and Guides" -msgstr "インストールチュートリアル・ガイド" - -msgid "Instance \"ignores\" the response and re-sends the renewal request." -msgstr "インスタンスはそのレスポンスを「無視」して、更新リクエストを再送する。" - -msgid "Instance Boot Failures" -msgstr "インスタンスの起動失敗" - -msgid "" -"Instance begins sending a renewal request to ``255.255.255.255`` since it " -"hasn't heard back from the cloud controller." -msgstr "" -"インスタンスはクラウドコントローラーからのレスポンスを受信しなかったため、更" -"新リクエストを ``255.255.255.255`` に送信し始める。" - -msgid "Instance metadata" -msgstr "インスタンスメタデータ" - -msgid "Instance tries to renew IP." -msgstr "インスタンスはIPアドレスを更新しようとする。" - -msgid "Instance user data" -msgstr "インスタンスのユーザーデータ" - -msgid "Instances" -msgstr "インスタンス" - -msgid "" -"Instances are the running virtual machines within an OpenStack cloud. This " -"section deals with how to work with them and their underlying images, their " -"network properties, and how they are represented in the database." -msgstr "" -"インスタンスは OpenStack クラウドの中で実行中の仮想マシンです。このセクション" -"は、インスタンス、インスタンスが使用するイメージ、インスタンスのネットワーク" -"プロパティを扱うための方法について取り扱います。また、それらがデータベースで" -"どのように表現されているかについて取り扱います。" - -msgid "Instances in the Database" -msgstr "データベースにあるインスタンス" - -msgid "Intelligent Alerting" -msgstr "インテリジェントなアラート" - -msgid "" -"Intelligent alerting takes considerably more time to plan and implement than " -"the other alerts described in this chapter. A good outline to implement " -"intelligent alerting is:" -msgstr "" -"インテリジェントなアラートは、この章で述べられているの他のアラートよりも計" -"画、実装にかなり時間を要します。インテリジェントなアラートを実装する流れは次" -"のようになります。" - -msgid "Interacting with share networks." -msgstr "共有ネットワークに接続する" - -msgid "Internal network connectivity" -msgstr "内部ネットワーク接続性" - -msgid "Introduction to OpenStack" -msgstr "OpenStack の概要" - -msgid "Is_Public" -msgstr "Is_Public" - -msgid "" -"It is also possible to add and remove security groups when an instance is " -"running. Currently this is only available through the command-line tools. " -"Here is an example:" -msgstr "" -"インスタンスを実行中にセキュリティグループを追加および削除することもできま" -"す。現在、コマンドラインツールからのみ利用可能です。例:" - -msgid "" -"It is also possible to add project members and adjust the project quotas. " -"We'll discuss those actions later, but in practice, it can be quite " -"convenient to deal with all these operations at one time." -msgstr "" -"プロジェクトメンバーの追加やプロジェクトのクォータの調節も可能です。このよう" -"なアクションについては後ほど説明しますが、実際にこれらの操作を扱うと非常に便" -"利です。" - -msgid "" -"It is important to note that powering off an instance does not terminate it " -"in the OpenStack sense." -msgstr "" -"注意すべき大事な点は、インスタンスの電源オフは、OpenStack 的な意味でのインス" -"タンスの終了ではないということです。" - -msgid "It is possible to define other roles, but doing so is uncommon." -msgstr "他の役割を定義できますが、一般的にはそうしません。" - -msgid "" -"It is possible to watch packets on internal interfaces, but it does take a " -"little bit of networking gymnastics. First you need to create a dummy " -"network device that normal Linux tools can see. Then you need to add it to " -"the bridge containing the internal interface you want to snoop on. Finally, " -"you need to tell Open vSwitch to mirror all traffic to or from the internal " -"port onto this dummy port. After all this, you can then run :command:" -"`tcpdump` on the dummy interface and see the traffic on the internal port." -msgstr "" -"内部インターフェースにおいてパケットを監視することもできますが、少しネット" -"ワークを操作する必要があります。まず、通常の Linux ツールが参照できるダミー" -"ネットワークデバイスを作成する必要があります。次に、監視したい内部インター" -"フェースを含むブリッジにそれを追加する必要があります。最後に、内部ポートのす" -"べての通信をこのダミーポートにミラーするよう Open vSwitch に通知する必要があ" -"ります。これをすべて終えた後、ダミーインターフェースで :command:`tcpdump` を" -"実行して、内部ポートの通信を参照できます。" - -msgid "" -"It turns out the reason that this compute node locked up was a hardware " -"issue. We removed it from the DAIR cloud and called Dell to have it " -"serviced. Dell arrived and began working. Somehow or another (or a fat " -"finger), a different compute node was bumped and rebooted. Great." -msgstr "" -"コンピュートノードがロックアップした原因はハードウェアの問題だったことが判明" -"した。我々はそのハードウェアを DAIR クラウドから取り外し、修理するよう Dell " -"に依頼した。Dell が到着して作業を開始した。何とかかんとか(あるいはタイプミ" -"ス)で、異なるコンピュートノードを落としてしまい、再起動した。素晴らしい。" - -msgid "" -"It was funny to read the report. It was full of people who had some strange " -"network problem but didn't quite explain it in the same way." -msgstr "" -"レポートを読むのは楽しかった。同じ奇妙なネットワーク問題にあった人々であふれ" -"ていたが、全く同じ説明はなかった。" - -msgid "" -"It's also helpful to allocate a specific numeric range for custom and " -"private flavors. On UNIX-based systems, nonsystem accounts usually have a " -"UID starting at 500. A similar approach can be taken with custom flavors. " -"This helps you easily identify which flavors are custom, private, and public " -"for the entire cloud." -msgstr "" -"カスタムフレーバーとプライベートフレーバーに特別な数値範囲を割り当てることも" -"有用です。UNIX 系システムでは、システムアカウント以外は通常 500 から始まりま" -"す。同様の方法をカスタムフレーバーにも使用できます。これは、フレーバーがカス" -"タムフレーバー、プライベートフレーバー、パブリックフレーバーであるかをクラウ" -"ド全体で簡単に識別する役に立ちます。" - -msgid "" -"It's worth mentioning this directory in the context of failed compute nodes. " -"This directory contains the libvirt KVM file-based disk images for the " -"instances that are hosted on that compute node. If you are not running your " -"cloud in a shared storage environment, this directory is unique across all " -"compute nodes." -msgstr "" -"コンピュートノードの故障の話題に関連して、このディレクトリについては説明して" -"おく価値があるでしょう。このディレクトリには、コンピュートノードにホストされ" -"ているインスタンス用の libvirt KVM のファイル形式のディスクイメージが置かれま" -"す。共有ストレージ環境でクラウドを実行していなければ、このディレクトリはコン" -"ピュートノード全体で一つしかありません。" - -msgid "" -"Items to monitor for RabbitMQ include the number of items in each of the " -"queues and the processing time statistics for the server." -msgstr "" -"RabbitMQで監視すべき項目としては、各キューでのアイテムの数と、サーバーでの処" -"理時間の統計情報があります。" - -msgid "Jan 19, 2012" -msgstr "2012年1月19日" - -msgid "Jan 31, 2013" -msgstr "2013年1月31日" - -msgid "Joe Topjian" -msgstr "Joe Topjian" - -msgid "" -"Joe has designed and deployed several clouds at Cybera, a nonprofit where " -"they are building e-infrastructure to support entrepreneurs and local " -"researchers in Alberta, Canada. He also actively maintains and operates " -"these clouds as a systems architect, and his experiences have generated a " -"wealth of troubleshooting skills for cloud environments." -msgstr "" -"Joe は Cybera で複数のクラウドの設計と構築を行って来ました。 Cybera は、非営" -"利でカナダのアルバータ州の起業家や研究者を支援する電子情報インフラを構築して" -"います。また、システムアーキテクトとしてこれらのクラウドの維持・運用を活発に" -"行なっており、その経験からクラウド環境でのトラブルシューティングの豊富な知識" -"を持っています。" - -msgid "" -"Jon has been piloting an OpenStack cloud as a senior technical architect at " -"the MIT Computer Science and Artificial Intelligence Lab for his researchers " -"to have as much computing power as they need. He started contributing to " -"OpenStack documentation and reviewing the documentation so that he could " -"accelerate his learning." -msgstr "" -"Jon は MIT Computer Science and Artificial Intelligence Lab で上級技術アーキ" -"テクトとして OpenStack クラウドを運用し、研究者が必要なだけの計算能力を使える" -"ようにしています。 OpenStack の勉強を加速しようと思い、OpenStack ドキュメント" -"への貢献とドキュメントのレビューを始めました。" - -msgid "Jonathan Proulx" -msgstr "Jonathan Proulx" - -msgid "Jump to the VM specific chain." -msgstr "仮想マシン固有チェインへのジャンプ。" - -msgid "Jun 22, 2012" -msgstr "2012年6月22日" - -msgid "Jun 6, 2013" -msgstr "2013年6月6日" - -msgid "Jun 9, 2014" -msgstr "2014年6月9日" - -msgid "Juno" -msgstr "Juno" - -msgid "" -"Just as important as a backup policy is a recovery policy (or at least " -"recovery testing)." -msgstr "" -"バックアップポリシーと同じくらい大事なことは、リカバリーポリシーです (少なく" -"ともリカバリーのテストは必要です)。" - -msgid "" -"Just running ``sync`` is not enough to ensure that the file system is " -"consistent. We recommend that you use the ``fsfreeze`` tool, which halts new " -"access to the file system, and create a stable image on disk that is " -"suitable for snapshotting. The ``fsfreeze`` tool supports several file " -"systems, including ext3, ext4, and XFS. If your virtual machine instance is " -"running on Ubuntu, install the util-linux package to get ``fsfreeze``:" -msgstr "" -"ファイルシステムが整合性を持つことを保証するためには、単に ``sync`` を実行す" -"るだけでは不十分です。 ``fsfreeze`` ツールを使用することを推奨します。これ" -"は、ファイルシステムに対する新規アクセスを停止し、スナップショットに適した安" -"定したイメージをディスクに作成します。 ``fsfreeze`` は ext3, ext4 および XFS " -"を含むいくつかのファイルシステムをサポートします。仮想マシンのインスタンスが " -"Ubuntu において実行されていれば、 ``fsfreeze`` を取得するために util-linux " -"パッケージをインストールします:" - -msgid "Kerberos" -msgstr "Kerberos" - -msgid "Key pairs" -msgstr "キーペア" - -msgid "Keystone" -msgstr "Keystone" - -msgid "" -"Keystone is handled a little differently. To modify the logging level, edit " -"the ``/etc/keystone/logging.conf`` file and look at the ``logger_root`` and " -"``handler_file`` sections." -msgstr "" -"Keystoneは少し異なる動作をします。ロギングレベルを変更するためには、``/etc/" -"keystone/logging.conf`` を編集し、``logger_root`` と ``handler_file`` を修正" -"する必要があります。" - -msgid "Keystone services" -msgstr "Keystone サービス" - -msgid "Keystone, All nova services" -msgstr "Keystone、すべての Nova サービス" - -msgid "Keystone, ``nova-api``" -msgstr "Keystone, ``nova-api``" - -msgid "Kibana" -msgstr "Kibana" - -msgid "Kilo" -msgstr "Kilo" - -msgid "" -"L3-agent router namespaces are named ``qrouter-``, and dhcp-" -"agent name spaces are named ``qdhcp-``. This output shows a " -"network node with four networks running dhcp-agents, one of which is also " -"running an l3-agent router. It's important to know which network you need to " -"be working in. A list of existing networks and their UUIDs can be obtained " -"by running ``openstack network list`` with administrative credentials." -msgstr "" -"``qrouter-`` という名前の L3 エージェントのルーター名前空間およ" -"び DHCP エージェントの名前空間は、 ``qdhcp-`` という名前です。この" -"出力は、DHCP エージェントを実行している 4 つのネットワークを持つネットワーク" -"ノードを表しています。また、1 つは L3 エージェントルーターも実行しています。" -"作業する必要のあるネットワークを理解することは重要です。既存のネットワークお" -"よび UUID の一覧は、管理クレデンシャルを持って ``openstack network list`` を" -"実行することにより得られます。" - -msgid "LDAP" -msgstr "LDAP" - -msgid "Large instances" -msgstr "大きなインスタンス" - -msgid "Launch several neutron ports:" -msgstr "いくつか neutron ポートを起動します。" - -msgid "Lay of the Land" -msgstr "環境の把握" - -msgid "" -"Learn more about how to contribute to the OpenStack docs at `OpenStack " -"Documentation Contributor Guide `_." -msgstr "" -"OpenStack ドキュメントに貢献する方法は `OpenStack Documentation Contributor " -"Guide `_ にあります。" - -msgid "Liberty" -msgstr "Liberty" - -msgid "" -"Like all major system upgrades, your upgrade could fail for one or more " -"reasons. You can prepare for this situation by having the ability to roll " -"back your environment to the previous release, including databases, " -"configuration files, and packages. We provide an example process for rolling " -"back your environment in :ref:`rolling_back_a_failed_upgrade`." -msgstr "" -"すべてのシステムのメジャーアップグレードと同じく、いくつかの理由により、アッ" -"プグレードに失敗する可能性があります。データベース、設定ファイル、パッケージ" -"など、お使いの環境をロールバックできるようにしておくことにより、この状況に備" -"えられます。お使いの環境をロールバックするためのプロセス例が :ref:" -"`rolling_back_a_failed_upgrade` にあります。" - -msgid "" -"Limit the total size (in bytes) or number of objects that can be stored in a " -"single container." -msgstr "" -"1 つのコンテナーに保存できる、オブジェクトの容量 (バイト単位) や個数の合計を" -"制限します。" - -msgid "" -"Limit the total size (in bytes) that a user has available in the Object " -"Storage service." -msgstr "" -"ユーザーが Object Storage サービス で利用できる合計容量 (バイト単位) を制限し" -"ます。" - -msgid "" -"Linux network namespaces are a kernel feature the networking service uses to " -"support multiple isolated layer-2 networks with overlapping IP address " -"ranges. The support may be disabled, but it is on by default. If it is " -"enabled in your environment, your network nodes will run their dhcp-agents " -"and l3-agents in isolated namespaces. Network interfaces and traffic on " -"those interfaces will not be visible in the default namespace." -msgstr "" -"Linux のネットワーク名前空間は、ネットワークサービスが、重複する IP アドレス" -"範囲を持つ、複数の独立した L2 ネットワークをサポートするために使用する、カー" -"ネルの機能です。この機能のサポートが無効化されている可能性がありますが、デ" -"フォルトで有効になっています。お使いの環境で有効化されている場合、ネットワー" -"クノードが DHCP エージェントと L3 エージェントを独立した名前空間で動作しま" -"す。ネットワークインターフェース、それらのインターフェースにおける通信は、デ" -"フォルトの名前空間で見えなくなります。" - -msgid "List all default quotas for all tenants, as follows:" -msgstr "" -"全テナントに対するクォータのデフォルト値を全て表示するには、以下のようにしま" -"す。" - -msgid "List the currently set quota values for a tenant, as follows:" -msgstr "テナントの現在のクォータ値を一覧表示します。" - -msgid "Live Snapshots" -msgstr "ライブスナップショット" - -msgid "" -"Live snapshots is a feature that allows users to snapshot the running " -"virtual machines without pausing them. These snapshots are simply disk-only " -"snapshots. Snapshotting an instance can now be performed with no downtime " -"(assuming QEMU 1.3+ and libvirt 1.0+ are used)." -msgstr "" -"ライブスナップショットは、ユーザーが実行中の仮想マシンのスナップショットを一" -"時停止なしで取得できる機能です。このスナップショットは単にディスクのみのス" -"ナップショットです。現在はインスタンスのスナップショットは停止時間なしで実行" -"できます (QEMU 1.3+ と libvirt 1.0+ が使用されていることが前提です)。" - -msgid "Log Indexer" -msgstr "Log Indexer" - -msgid "Log Pusher" -msgstr "Log Pusher" - -msgid "Log in as an administrative user." -msgstr "管理ユーザーとしてログインします。" - -msgid "Log location" -msgstr "ログの場所" - -msgid "Logging" -msgstr "ロギング" - -msgid "Logging and Monitoring" -msgstr "ロギングと監視" - -msgid "" -"Logging for horizon is configured in ``/etc/openstack_dashboard/" -"local_settings.py``. Because horizon is a Django web application, it follows " -"the `Django Logging framework conventions `_." -msgstr "" -"horizon のロギング設定は ``/etc/openstack_dashboard/local_settings.py`` で行" -"います。 horizon は Django web アプリケーションですので、 `Django Logging " -"framework conventions `_ に従います。" - -msgid "" -"Logical separation within your nova deployment for physical isolation or " -"redundancy." -msgstr "" -"物理的な隔離や冗長性のために、Nova デプロイメントの中で論理的な分離が必要な場" -"合" - -msgid "Logstash" -msgstr "Logstash" - -msgid "Look at your OpenStack service :term:`catalog`:" -msgstr "" -"それではOpenStack サービス :term:`カタログ ` を見てみましょう。" - -msgid "" -"Look for any errors or traces in the log file. For more information, see :" -"doc:`ops-logging-monitoring`." -msgstr "" -"何らかのエラーまたはトレースをログファイルで探します。詳細は :doc:`ops-" -"logging-monitoring` を参照してください。" - -msgid "Look for connection issues identified in the log files." -msgstr "ログファイルにおいて、識別された接続の問題を探します。" - -msgid "" -"Look for the vnet NIC. You can also reference ``nova.conf`` and look for the " -"``flat_interface_bridge`` option." -msgstr "" -"vnet NICを探してください。また、 ``nova.conf`` の ``flat_interface_bridge`` " -"オプションも参考になります。" - -msgid "Lorin Hochstein" -msgstr "Lorin Hochstein" - -msgid "MIT CSAIL" -msgstr "MIT CSAIL" - -msgid "" -"MTU is maximum transmission unit. It specifies the maximum number of bytes " -"that the interface accepts for each packet. If two interfaces have two " -"different MTUs, bytes might get chopped off and weird things happen—such as " -"random session lockups." -msgstr "" -"MTU とは最大転送単位(Maximum Transmission Unit)である。これは、各パケットに" -"対してそのインターフェースが受け取る最大バイト数を指定する。もし2つのイン" -"ターフェースが異なる MTU であった場合、バイトは尻切れトンボとなって変なことが" -"起こり始める。例えばセッションのランダムなロックアップとか。" - -msgid "" -"Maintaining an OpenStack cloud requires that you manage multiple physical " -"servers, and this number might grow over time. Because managing nodes " -"manually is error prone, we strongly recommend that you use a configuration-" -"management tool. These tools automate the process of ensuring that all your " -"nodes are configured properly and encourage you to maintain your " -"configuration information (such as packages and configuration options) in a " -"version-controlled repository." -msgstr "" -"OpenStack クラウドをメンテナンスするには、複数の物理サーバーを管理することが" -"必要です。そして、この数は日々増えていきます。ノードを手動で管理することはエ" -"ラーを起こしやすいので、構成管理ツールを使用することを強く推奨します。これら" -"のツールはすべてのノードが適切に設定されていることを保証するプロセスを自動化" -"します。また、これらを使うことで、(パッケージや設定オプションといった) 構成情" -"報のバージョン管理されたリポジトリでの管理が行いやすくなります。" - -msgid "Maintenance, Failures, and Debugging" -msgstr "メンテナンス、故障およびデバッグ" - -msgid "" -"Make a full database backup of your production data. Since the Kilo release, " -"database downgrades are not supported, and restoring from backup is the only " -"method available to retrieve a previous database version." -msgstr "" -"本番データの完全バックアップを取得します。Kilo 以降のリリースでは、データベー" -"スのダウングレードはサポートされません。バックアップからリストアすることが、" -"以前のバージョンのデータベースに戻す唯一の方法です。" - -msgid "Make another storage endpoint on the same system" -msgstr "同じシステムに別のストレージエンドポイントの作成" - -msgid "" -"Make sure that the share has been created successfully and is ready to use " -"(check the share status and see the share export location)" -msgstr "" -"共有が正しく作成され、利用可能であることを確認します (共有の状態を確認し、エ" -"クスポートされている場所を確認します)。" - -msgid "Make sure you are in the ``devstack`` directory:" -msgstr "``devstack`` ディレクトリーにいることを確認します。" - -msgid "Make sure you're in the ``devstack`` directory:" -msgstr "``devstack`` ディレクトリーにいることを確認します。" - -msgid "Manage Access To Shares" -msgstr "共有へのアクセス権の管理" - -msgid "Manage Shares" -msgstr "共有の管理" - -msgid "Manage a Share Network" -msgstr "共有ネットワークの管理" - -msgid "Manage a share network" -msgstr "共有ネットワークの管理" - -msgid "Manage access to shares" -msgstr "共有へのアクセス権の管理" - -msgid "Manage repositories" -msgstr "リポジトリーの管理" - -msgid "Managing Projects" -msgstr "プロジェクトの管理" - -msgid "Managing Projects and Users" -msgstr "プロジェクトとユーザーの管理" - -msgid "Managing floating IP addresses between instances" -msgstr "インスタンス間の Floating IP アドレスの管理" - -msgid "Manually Disassociating a Floating IP" -msgstr "Floating IP の手動割り当て解除" - -msgid "" -"Many OpenStack projects implement a driver layer, and each of these drivers " -"will implement its own configuration options. For example, in OpenStack " -"Compute (nova), there are various hypervisor drivers implemented—libvirt, " -"xenserver, hyper-v, and vmware, for example. Not all of these hypervisor " -"drivers have the same features, and each has different tuning requirements." -msgstr "" -"多くの OpenStack プロジェクトではドライバー層が実装され、各ドライバーはドライ" -"バー固有の設定オプションが実装されています。例えば、OpenStack Compute (nova) " -"では、libvirt, xenserver, hyper-v, vmware などの種々のハイパーバイザードライ" -"バーが実装されていますが、これらのハイパーバイザードライバーすべてが同じ機能" -"を持っている訳ではなく、異なるチューニング要件もドライバー毎に異なります。" - -msgid "" -"Many individual efforts keep a community book alive. Our community members " -"updated content for this book year-round. Also, a year after the first " -"sprint, Jon Proulx hosted a second two-day mini-sprint at MIT with the goal " -"of updating the book for the latest release. Since the book's inception, " -"more than 30 contributors have supported this book. We have a tool chain for " -"reviews, continuous builds, and translations. Writers and developers " -"continuously review patches, enter doc bugs, edit content, and fix doc bugs. " -"We want to recognize their efforts!" -msgstr "" -"数多くの方々の努力がコミュニティのドキュメントを維持しています。私たちのコ" -"ミュニティーのメンバーは、一年を通じて、このドキュメントの内容を更新しまし" -"た。また、最初のスプリントの 1 年後、Jon Proulx さんが 2 回目となる 2 日間の" -"ミニスプリントを主催しました。これは、MIT で行われ、最新リリースに向けた更新" -"を目標としました。ドキュメント作成以降、30 人以上の貢献者がこのドキュメントを" -"サポートしてきました。レビュー、継続的ビルド、翻訳のツールチェインがありま" -"す。執筆者や開発者は継続的に、パッチをレビューし、ドキュメントバグを記入し、" -"内容を編集し、そのバグを修正します。その方々の努力を認めたいと思います。" - -msgid "" -"Many sites run with users being associated with only one project. This is a " -"more conservative and simpler choice both for administration and for users. " -"Administratively, if a user reports a problem with an instance or quota, it " -"is obvious which project this relates to. Users needn't worry about what " -"project they are acting in if they are only in one project. However, note " -"that, by default, any user can affect the resources of any other user within " -"their project. It is also possible to associate users with multiple projects " -"if that makes sense for your organization." -msgstr "" -"多くのサイトは一つのプロジェクトのみに割り当てられているユーザーで実行してい" -"ます。これは、管理者にとってもユーザーにとっても、より保守的で分かりやすい選" -"択です。管理の面では、ユーザーからインスタンスやクォータに関する問題の報告が" -"あった場合、どのプロジェクトに関するものかが明確です。ユーザーが一つのプロ" -"ジェクトのみに所属している場合、ユーザーがどのプロジェクトで操作しているのか" -"を気にする必要がありません。ただし、既定の設定では、どのユーザーも同じプロ" -"ジェクトにいる他のユーザーのリソースに影響を与えることができることに注意して" -"ください。あなたの組織にとって意味があるならば、ユーザーを複数のプロジェクト" -"に割り当てることも可能です。" - -msgid "Mar 20, 2015" -msgstr "2015年3月20日" - -msgid "" -"Matching ``gre-`` interfaces to tunnel endpoints is possible by looking " -"at the Open vSwitch state:" -msgstr "" -"``gre-`` インターフェースとトンネルエンドポイントを一致させることは、おそ" -"らく Open vSwitch の状態を見ることになります。" - -msgid "Max amount of space available for all shares" -msgstr "すべての共有のために利用可能な容量の合計" - -msgid "Max number of share snapshots" -msgstr "共有のスナップショットの最大数" - -msgid "Max number of shared networks" -msgstr "共有ネットワークの最大数" - -msgid "Max number of shares" -msgstr "共有の最大数" - -msgid "Max total amount of all snapshots" -msgstr "すべてのスナップショットの合計数" - -msgid "May 9, 2013" -msgstr "2013年5月9日" - -msgid "Megabytes of instance RAM allowed per project." -msgstr "プロジェクトごとのインスタンスの RAM 容量(メガバイト単位)" - -msgid "Memory" -msgstr "メモリ" - -msgid "Memory usage" -msgstr "メモリー使用量" - -msgid "Memory\\_MB" -msgstr "Memory\\_MB" - -msgid "Message Service (zaqar)" -msgstr "Message サービス (zaqar)" - -msgid "Message queue and database services" -msgstr "メッセージキューとデータベースのサービス" - -msgid "Metadata items" -msgstr "メタデータ項目" - -msgid "Microsoft Active Directory" -msgstr "Microsoft Active Directory" - -msgid "Migrate all instances one by one:" -msgstr "すべてのインスタンスを 1 つずつマイグレーションします。" - -msgid "" -"Modifying users is also done from this :guilabel:`Users` page. If you have a " -"large number of users, this page can get quite crowded. The :guilabel:" -"`Filter` search box at the top of the page can be used to limit the users " -"listing. A form very similar to the user creation dialog can be pulled up by " -"selecting :guilabel:`Edit` from the actions drop-down menu at the end of the " -"line for the user you are modifying." -msgstr "" -"ユーザー情報の変更は、この :guilabel:`ユーザー` ページから実行することもでき" -"ます。かなり多くのユーザーがいるならば、このページにはたくさんのユーザーが表" -"示されることでしょう。ページの上部にある :guilabel:`フィルター` 検索ボックス" -"を使うと、表示されるユーザーの一覧を絞り込むことができます。変更しようとして" -"いるユーザーの行末にあるアクションドロップダウンメニューの :guilabel:`編集` " -"を選択することにより、ユーザー作成ダイアログと非常に似ているフォームを表示で" -"きます。" - -msgid "Monitoring" -msgstr "監視" - -msgid "Monitoring Tools" -msgstr "モニタリングツール" - -msgid "Monthly" -msgstr "月次" - -msgid "" -"Most instances are size m1.medium (two virtual cores, 50 GB of storage)." -msgstr "" -"ほとんどのインスタンスのサイズは m1.medium (仮想コア数2、ストレージ50GB) とし" -"ます。" - -msgid "" -"Most services use the convention of writing their log files to " -"subdirectories of the ``/var/log directory``, as listed in :ref:" -"`table_log_locations`." -msgstr "" -"多くのサービスは、慣習に従い、ログファイルを ``/var/log`` ディレクトリーのサ" -"ブディレクトリーに書き込みます。:ref:`table_log_locations` に一覧化されていま" -"す。" - -msgid "Mount the Share:" -msgstr "共有をマウントします。" - -msgid "Mount the qemu-nbd device." -msgstr "qemu-nbd デバイスをマウントします。" - -msgid "" -"Much of OpenStack is driver-oriented, so you can plug in different solutions " -"to the base set of services. This chapter describes some advanced " -"configuration topics." -msgstr "" -"ほとんどの OpenStack は、ドライバーを用いて動作します。そのため、サービスの基" -"本セットに別のソリューションをプラグインできます。この章は、いくつかの高度な" -"設定に関する話題を取り扱います。" - -msgid "" -"Must be ``IPv4`` or ``IPv6``, and addresses represented in CIDR must match " -"the ingress or egress rules." -msgstr "" -"``IPv4`` または ``IPv6`` である必要があります。CIDR 形式のアドレスが受信ルー" -"ルまたは送信ルールに一致する必要があります。" - -msgid "Nagios" -msgstr "Nagios" - -msgid "" -"Nagios alerts you with a `WARNING` when any disk on the compute node is 80 " -"percent full and `CRITICAL` when 90 percent is full." -msgstr "" -"Naigos は、80% のディスク使用率で ``WARNING``、90% で ``CRITICAL`` を警告しま" -"す。" - -msgid "" -"Nagios checks that at least one ``nova-compute`` service is running at all " -"times." -msgstr "" -"Nagiosは常に 1 つ以上の ``nova-compute`` サービスが動作しているかをチェックし" -"ます。" - -msgid "" -"Nagios is an open source monitoring service. It is capable of executing " -"arbitrary commands to check the status of server and network services, " -"remotely executing arbitrary commands directly on servers, and allowing " -"servers to push notifications back in the form of passive monitoring. Nagios " -"has been around since 1999. Although newer monitoring services are " -"available, Nagios is a tried-and-true systems administration staple." -msgstr "" -"Nagios は、オープンソースソフトウェアの監視サービスです。任意のコマンドを実行" -"して、サーバーやネットワークサービスの状態を確認できます。また、任意のコマン" -"ドをリモートのサーバーで直接実行できます。サーバーが受動的な監視形態で通知を" -"送信することもできます。Nagios は 1999 年ごろにできました。より当たらし監視" -"サービスがありますが、Nagios は実績豊富なシステム管理ツールです。" - -msgid "Name" -msgstr "名前" - -msgid "NeCTAR" -msgstr "NeCTAR" - -msgid "Network Configuration in the Database for nova-network" -msgstr "nova-network 用データベースにあるネットワーク設定" - -msgid "Network I/O" -msgstr "ネットワーク I/O" - -msgid "Network Inspection" -msgstr "ネットワークの検査" - -msgid "Network Troubleshooting" -msgstr "ネットワークのトラブルシューティング" - -msgid "" -"Network configuration is a very large topic that spans multiple areas of " -"this book. For now, make sure that your servers can PXE boot and " -"successfully communicate with the deployment server." -msgstr "" -"ネットワーク設定は、本書でも複数の箇所で取り上げられている大きいトピックで" -"す。ここでは、お使いのサーバが PXEブートでき、デプロイメントサーバと正常に通" -"信できることを確認しておいてください。" - -msgid "" -"Network troubleshooting can be challenging. A network issue may cause " -"problems at any point in the cloud. Using a logical troubleshooting " -"procedure can help mitigate the issue and isolate where the network issue " -"is. This chapter aims to give you the information you need to identify any " -"issues for ``nova-network`` or OpenStack Networking (neutron) with Linux " -"Bridge or Open vSwitch." -msgstr "" -"ネットワークのトラブルシューティングは難しい場合もあります。ネットワークの問" -"題は、クラウドのいくつかの場所で問題となりえます。論理的な問題解決手順を用い" -"ることで、問題の緩和やネットワークの問題の正確な切り分けにつながります。この" -"章は、``nova-network`` 、Linux ブリッジや Open vSwitch を用いた OpenStack " -"Networking (neutron) に関する何らかの問題を識別するために必要となる情報を提供" -"することを目的とします。" - -msgid "Network usage (bandwidth and IP usage)" -msgstr "ネットワーク使用量 (帯域および IP 使用量)" - -msgid "Networking" -msgstr "ネットワーク" - -msgid "Networking configuration just for PXE booting" -msgstr "PXE ブート用のネットワーク設定" - -msgid "Networking service" -msgstr "ネットワークサービス" - -msgid "" -"Networking service - Edit the configuration file and restart the service." -msgstr "Networking サービス - 設定ファイルを編集して、サービスを再起動します。" - -msgid "New API Versions" -msgstr "新しい API " - -msgid "Next, create ``/etc/rsyslog.d/client.conf`` with the following line:" -msgstr "" -"次に、 ``/etc/rsyslog.d/client.conf`` を作成して、以下の行を書き込みます。" - -msgid "Next, find the fixed IP entry for that UUID:" -msgstr "次に、そのUUIDから固定IPのエントリーを探します。" - -msgid "" -"Next, manually detach and reattach the volumes, where X is the proper mount " -"point:" -msgstr "" -"次に、ボリュームを手動で切断し、再接続します。ここで X は適切なマウントポイン" -"トです。" - -msgid "" -"Next, open a new shell to the instance and then ping the external host where " -"``tcpdump`` is running. If the network path to the external server and back " -"is fully functional, you see something like the following:" -msgstr "" -"次に、新しいシェルを開いて ``tcpdump`` の動いている外部ホストへ ping を行いま" -"す。もし外部サーバーとのネットワーク経路に問題がなければ、以下のように表示さ" -"れます。" - -msgid "" -"Next, physically remove the disk from the server and replace it with a " -"working disk." -msgstr "" -"次に、ディスクを物理的にサーバーから取り外し、正常なディスクと入れ替えます。" - -msgid "Next, redistribute the ring files to the other nodes:" -msgstr "次に、ring ファイルを他のノードに再配布します。" - -msgid "" -"Next, the ``libvirtd`` daemon was run on the command line. Finally a helpful " -"error message: it could not connect to d-bus. As ridiculous as it sounds, " -"libvirt, and thus ``nova-compute``, relies on d-bus and somehow d-bus " -"crashed. Simply starting d-bus set the entire chain back on track, and soon " -"everything was back up and running." -msgstr "" -"次に、 ``libvirtd`` デーモンをコマンドラインにおいて実行しました。最終的に次" -"のような役に立つエラーメッセージが得られました。d-bus に接続できませんでし" -"た。このため、滑稽に聞こえるかもしれませんが、libvirt 、その結果として " -"``nova-compute`` も D-Bus に依存していて、どういう訳か D-Bus がクラッシュしま" -"した。単に D-Bus を開始するだけで、一連のプログラムがうまく動くようになり、す" -"ぐに全部が元に戻り動作状態になりました。" - -msgid "" -"Next, the ``nova`` database contains three tables that store usage " -"information." -msgstr "" -"次に ``nova`` データベースは 利用情報に関して 3 つのテーブルを持っています。" - -msgid "" -"Next, the internal bridge, ``br-int``, contains ``int-eth1-br``, which pairs " -"with ``phy-eth1-br`` to connect to the physical network shown in the " -"previous bridge, ``patch-tun``, which is used to connect to the GRE tunnel " -"bridge and the TAP devices that connect to the instances currently running " -"on the system:" -msgstr "" -"次に、内部ブリッジ ``br-int`` は ``int-eth1-br`` を持ちます。この ``int-eth1-" -"br`` は、 ``phy-eth1-br`` とペアになり、前のブリッジ ``patch-tun`` で示された" -"物理ネットワークに接続されます。この ``patch-tun`` は、GRE トンネルブリッジを" -"接続するために使用され、システムにおいて現在動作しているインスタンスに接続さ" -"れる TAP デバイスです。" - -msgid "" -"Next, the packets from either input go through the integration bridge, again " -"just as on the compute node." -msgstr "" -"次に、何かしらの入力パケットは統合ブリッジ経由で送信されます。繰り返します" -"が、コンピュートノードと同じようなものです。" - -msgid "" -"Next, update the nova database to indicate that all instances that used to " -"be hosted on c01.example.com are now hosted on c02.example.com:" -msgstr "" -"次に、c01.example.com においてホストされていたすべてのインスタンスが、今度は " -"c02.example.com でホストされることを伝えるために nova データベースを更新しま" -"す。" - -msgid "Node type" -msgstr "ノード種別" - -msgid "" -"Not all packets have a size of 1500. Running the :command:`ls` command over " -"SSH might only create a single packets less than 1500 bytes. However, " -"running a command with heavy output, such as :command:`ps aux` requires " -"several packets of 1500 bytes." -msgstr "" -"すべてのパケットサイズが 1500 に収まるわけではない。SSH 経由の :command:`ls` " -"コマンド実行は 1500 バイト未満のサイズのパケット1つで収まるかもしれない。し" -"かし、 :command:`ps aux` のように多大な出力を行うコマンドを実行する場合、" -"1500 バイトのパケットが複数必要とある。" - -msgid "" -"Not entirely network specific, but it contains information about the " -"instance that is utilizing the ``fixed_ip`` and optional ``floating_ip``." -msgstr "" -"ネットワーク特有のテーブルではありませんが、 ``fixed_ip`` と ``floating_ip`` " -"を使っているインスタンスの情報を管理します。" - -msgid "Nov 29, 2012" -msgstr "2012年11月29日" - -msgid "" -"Now try the command from Step 10 again and it succeeds. There are no objects " -"in the container, so there is nothing to list; however, there is also no " -"error to report." -msgstr "" -"ここで手順 10 のコマンドを再び試みて、続行します。コンテナーにオブジェクトが" -"ありません。そのため、一覧には何もありません。しかしながら、レポートするエ" -"ラーもありません。" - -msgid "Now you can import a previously backed-up database:" -msgstr "以前にバックアップしたデータベースをインポートします。" - -msgid "Now you can refer to your token on the command line as ``$TOKEN``." -msgstr "" -"これで、コマンドラインでトークンを ``$TOKEN`` として参照できるようになりまし" -"た。" - -msgid "Number of Block Storage snapshots allowed per tenant." -msgstr "テナントごとのブロックストレージスナップショット数" - -msgid "Number of Block Storage volumes allowed per tenant" -msgstr "テナントごとのブロックストレージボリューム数" - -msgid "Number of bytes allowed per injected file path." -msgstr "injected file のパス長の最大バイト数" - -msgid "Number of content bytes allowed per injected file." -msgstr "injected file あたりの最大バイト数" - -msgid "" -"Number of fixed IP addresses allowed per project. This number must be equal " -"to or greater than the number of allowed instances." -msgstr "" -"プロジェクトごとの固定 IP アドレスの最大数。この数はプロジェクトごとの最大イ" -"ンスタンス数以上にしなければなりません。" - -msgid "Number of floating IP addresses allowed per project." -msgstr "プロジェクトごとの最大 Floating IP 数" - -msgid "Number of injected files allowed per project." -msgstr "プロジェクトごとの injected file の最大数" - -msgid "Number of instance cores allowed per project." -msgstr "プロジェクトごとのインスタンスのコア数" - -msgid "Number of instances allowed per project." -msgstr "プロジェクトごとの最大インスタンス数" - -msgid "Number of key pairs allowed per user." -msgstr "ユーザーごとの最大キーペア数" - -msgid "Number of metadata items allowed per instance." -msgstr "インスタンスごとのメタデータ項目数" - -msgid "Number of security groups per project." -msgstr "プロジェクトごとのセキュリティグループ数" - -msgid "Number of server groups per project." -msgstr "プロジェクトごとのサーバーグループ数" - -msgid "Number of servers per server group." -msgstr "サーバーグループごとのサーバー数。" - -msgid "Number of virtual CPUs presented to the instance." -msgstr "インスタンスに存在する仮想 CPU 数。" - -msgid "Number of volume gigabytes allowed per tenant" -msgstr "テナントごとのボリューム容量の最大値(単位はギガバイト)" - -msgid "" -"OK, so where is the MTU issue coming from? Why haven't we seen this in any " -"other deployment? What's new in this situation? Well, new data center, new " -"uplink, new switches, new model of switches, new servers, first time using " -"this model of servers… so, basically everything was new. Wonderful. We toyed " -"around with raising the MTU at various areas: the switches, the NICs on the " -"compute nodes, the virtual NICs in the instances, we even had the data " -"center raise the MTU for our uplink interface. Some changes worked, some " -"didn't. This line of troubleshooting didn't feel right, though. We shouldn't " -"have to be changing the MTU in these areas." -msgstr "" -"OK。では MTU の問題はどこから来るのか?なぜ我々は他のデプロイでこの問題に遭遇" -"しなかったのか?この状況は何が新しいのか?えっと、新しいデータセンター、新し" -"い上位リンク、新しいスイッチ、スイッチの新機種、新しいサーバー、サーバーの新" -"機種…つまり、基本的に全てが新しいものだった。素晴らしい。我々は様々な領域で " -"MTU の増加を試してみた。スイッチ、コンピュータのNIC、インスタンスの仮想NIC、" -"データセンターの上位リンク用のインターフェースのMTUまでいじってみた。いくつか" -"の変更ではうまくいったが、他はダメだった。やはり、この線の障害対策はうまく" -"いってないようだった。我々はこれらの領域のMTUは変更すべきではないようだ。" - -msgid "Object Storage" -msgstr "オブジェクトストレージ" - -msgid "Obtain the UUID of the image:" -msgstr "イメージの UUID を取得します。" - -msgid "" -"Obtain the UUID of the project with which you want to share your image, " -"let's call it target project. Unfortunately, non-admin users are unable to " -"use the :command:`openstack` command to do this. The easiest solution is to " -"obtain the UUID either from an administrator of the cloud or from a user " -"located in the target project." -msgstr "" -"イメージを共有したいプロジェクトの UUID を取得します。これを宛先プロジェクト" -"と呼びましょう。残念ながら、管理者以外は、これを実行するために :command:" -"`openstack` コマンドを使用できません。最も簡単な解決方法は、クラウドの管理者" -"やその宛先プロジェクトのユーザーから UUID を教えてもらうことです。" - -msgid "Obtain the tenant ID, as follows:" -msgstr "テナント ID を取得します。" - -msgid "" -"Obtaining consistent snapshots of Windows VMs is conceptually similar to " -"obtaining consistent snapshots of Linux VMs, although it requires additional " -"utilities to coordinate with a Windows-only subsystem designed to facilitate " -"consistent backups." -msgstr "" -"Windows 仮想マシンの整合性あるスナップショットの取得は、Linux マシンのスナッ" -"プショットの場合と同じようなものです。ただし、整合性バックアップを働かせるた" -"めに、Windows 専用のサブコマンドと調整するための追加機能を必要とします。" - -msgid "Oct 12, 2012" -msgstr "2012年10月12日" - -msgid "Oct 16, 2014" -msgstr "2014年10月16日" - -msgid "Oct 17, 2013" -msgstr "2013年10月17日" - -msgid "Oct 2, 2014" -msgstr "2014年10月2日" - -msgid "Oct 21, 2010" -msgstr "2010年10月21日" - -msgid "Oct, 2015" -msgstr "2015年10月" - -msgid "" -"Oisin Feeley read it, made some edits, and provided emailed feedback right " -"when we asked." -msgstr "" -"Oisin Feeley は、このマニュアルを読んで、いくつかの編集をし、私たちが問い合わ" -"せをした際には、メールでフィードバックをくれました。" - -msgid "" -"On Wednesday night we had a fun happy hour with the Austin OpenStack Meetup " -"group and Racker Katie Schmidt took great care of our group." -msgstr "" -"水曜日の夜、オースチン OpenStack ミートアップグループと楽しく幸せな時間を過ご" -"し、Racker Katie Schmidt は私たちのグループを素晴らしい世話をしてくれました。" - -msgid "" -"On all distributions, you must perform some final tasks to complete the " -"upgrade process." -msgstr "" -"すべてのディストリビューションにおいて、アップグレードプロセスを完了するため" -"に、いくつかの最終作業を実行する必要があります。" - -msgid "On all nodes:" -msgstr "すべてのノード:" - -msgid "" -"On compute nodes and nodes running ``nova-network``, use the following " -"command to see information about interfaces, including information about " -"IPs, VLANs, and whether your interfaces are up:" -msgstr "" -"コンピュートノードおよび ``nova-network`` を実行しているノードにおいて、以下" -"のコマンドを使用して、IP、VLAN、起動状態などのインターフェースに関する情報を" -"参照します。" - -msgid "" -"On my last day in Kelowna, I was in a conference call from my hotel. In the " -"background, I was fooling around on the new cloud. I launched an instance " -"and logged in. Everything looked fine. Out of boredom, I ran :command:`ps " -"aux` and all of the sudden the instance locked up." -msgstr "" -"ケロウナの最終日、私はホテルから電話会議に参加していた。その裏で、私は新しい" -"クラウドをいじっていた。私はインスタンスを1つ起動し、ログインした。全ては正" -"常に思えた。退屈しのぎに、私が :command:`ps aux` を実行したところ、突然そのイ" -"ンスタンスがロックアップしてしまった。" - -msgid "On the Compute node, create the following NRPE configuration:" -msgstr "コンピュートノードにおいて、次のような NRPE 設定を作成します。" - -msgid "On the command line, do this:" -msgstr "コマンドラインで、このようにします。" - -msgid "On the compute node, add the following to your NRPE configuration:" -msgstr "コンピュートノード上では、次のようなNRPE設定を追加します。" - -msgid "On the compute node:" -msgstr "コンピュートノード上" - -msgid "On the external server:" -msgstr "外部サーバー上" - -msgid "" -"On the first day, we filled white boards with colorful sticky notes to start " -"to shape this nebulous book about how to architect and operate clouds:" -msgstr "" -"最初の日に、アイデアを色とりどりのポストイットでホワイトボードいっぱいに書き" -"出し、クラウドを設計し運用するという漠然とした話題を扱った本の作成を開始しま" -"した。" - -msgid "On the instance:" -msgstr "インスタンス上" - -msgid "" -"On the integration bridge, networks are distinguished using internal VLANs " -"regardless of how the networking service defines them. This allows instances " -"on the same host to communicate directly without transiting the rest of the " -"virtual, or physical, network. These internal VLAN IDs are based on the " -"order they are created on the node and may vary between nodes. These IDs are " -"in no way related to the segmentation IDs used in the network definition and " -"on the physical wire." -msgstr "" -"内部ブリッジにおいて、ネットワークサービスがどのように定義されているかによら" -"ず、ネットワークは内部 VLAN を使用して区別されます。これにより、同じホストに" -"あるインスタンスが、仮想、物理、ネットワークを転送することなく直接通信できる" -"ようになります。これらの内部 VLAN ID は、ノードにおいて作成された順番に基づ" -"き、ノード間で異なる可能性があります。これらの ID は、ネットワーク定義および" -"物理結線において使用されるセグメント ID にまったく関連しません。" - -msgid "" -"Once access to a flavor has been restricted, no other projects besides the " -"ones granted explicit access will be able to see the flavor. This includes " -"the admin project. Make sure to add the admin project in addition to the " -"original project." -msgstr "" -"フレーバーへのアクセスが制限されると、明示的にアクセスを許可されたプロジェク" -"ト以外は、フレーバーを参照できなくなります。これには admin プロジェクトも含ま" -"れます。元のプロジェクトに加えて、きちんと admin プロジェクトを追加してくださ" -"い。" - -msgid "" -"Once allocated, a floating IP can be assigned to running instances from the " -"dashboard either by selecting :guilabel:`Associate` from the actions drop-" -"down next to the IP on the :guilabel:`Floating IPs` tab of the :guilabel:" -"`Access & Security` page or by making this selection next to the instance " -"you want to associate it with on the Instances page. The inverse action, " -"Dissociate Floating IP, is available from the :guilabel:`Floating IPs` tab " -"of the :guilabel:`Access & Security` page and from the :guilabel:`Instances` " -"page." -msgstr "" -"一度確保すると、Floating IP を実行中のインスタンスに割り当てることができま" -"す。ダッシュボードでは、 :guilabel:`アクセスとセキュリティ` ページの :" -"guilabel:`Floating IP` タブにある IP の隣にある、アクションドロップダウンか" -"ら :guilabel:`割り当て` を選択することにより実行できます。または、 :guilabel:" -"`インスタンス` ページにおいて割り当てたいインスタンスの隣にある、リストからこ" -"れを選択することにより実行できます。逆の動作 :guilabel:`Floating IP の割り当" -"て解除` は :guilabel:`アクセスとセキュリティ` ページの :guilabel:`Floating " -"IP` タブから実行できます。 :guilabel:`インスタンス` のページから利用できませ" -"ん。" - -msgid "Once the files are restored, start everything back up:" -msgstr "ファイルをリストア後、サービスを起動します。" - -msgid "" -"Once the volume has been frozen, do not attempt to read from or write to the " -"volume, as these operations hang. The operating system stops every I/O " -"operation and any I/O attempts are delayed until the file system has been " -"unfrozen." -msgstr "" -"ボリュームがフリーズ状態になったら、ボリュームの読み書き命令が止まってしまう" -"ので、ボリュームの読み書きを行わないようにしてください。オペレーティングシス" -"テムがすべての I/O 操作を停止し、すべての I/O 試行がファイルシステムがフリー" -"ズ解除されるまで遅延させられます。" - -msgid "" -"Once you have both pieces of information, run the :command:`openstack image " -"add project` command:" -msgstr "" -"情報がそろったら、:command:`openstack image add project` コマンドを実行しま" -"す。" - -msgid "" -"Once you have completed the inspection, unmount the mount point and release " -"the qemu-nbd device:" -msgstr "" -"調査を完了すると、マウントポイントをアンマウントし、qemu-nbd デバイスを解放し" -"ます。" - -msgid "" -"Once you have issued the :command:`fsfreeze` command, it is safe to perform " -"the snapshot. For example, if the volume of your instance was named ``mon-" -"volume`` and you wanted to snapshot it to an image named ``mon-snapshot``, " -"you could now run the following:" -msgstr "" -":command:`fsfreeze` コマンドを発行すると、スナップショットを実行しても安全で" -"す。たとえば、インスタンスのボリュームが ``mon-volume`` という名前で、 ``mon-" -"snapshot`` という名前のイメージにスナップショットを取得したければ、以下のとお" -"り実行します:" - -msgid "" -"Once you mount the disk file, you should be able to access it and treat it " -"as a collection of normal directories with files and a directory structure. " -"However, we do not recommend that you edit or touch any files because this " -"could change the :term:`access control lists (ACLs) ` that are used to determine which accounts can perform what " -"operations on files and directories. Changing ACLs can make the instance " -"unbootable if it is not already." -msgstr "" -"ディスクファイルをマウントすると、それにアクセスでき、ファイルとディレクトリ" -"構造を持つ通常のディレクトリのように取り扱えます。しかしながら、どのファイル" -"の編集も操作もしないことをお薦めします。なぜなら、それにより :term:`アクセス" -"制御リスト (ACL) ` が変更されたり、起動できるイン" -"スタンスが起動できなくなる場合があるからです。ACL は、アカウントがファイルや" -"ディレクトリーにどの操作を実行できるのか判断するために使用されます。" - -msgid "" -"Once you've determined which namespace you need to work in, you can use any " -"of the debugging tools mention earlier by prefixing the command with ``ip " -"netns exec ``. For example, to see what network interfaces exist " -"in the first qdhcp namespace returned above, do this:" -msgstr "" -"作業する必要のある名前空間を決めると、コマンドの前に ``ip netns exec " -"`` を付けることにより、前に言及したデバッグツールをすべて使用でき" -"ます。例えば、上で返された最初の qdhcp 名前空間に存在するネットワークインター" -"フェースを参照する場合、このように実行します。" - -msgid "" -"Once you've gathered this information, creating the user in the dashboard is " -"just another web form similar to what we've seen before and can be found by " -"clicking the :guilabel:`Users` link in the :guilabel:`Identity` navigation " -"bar and then clicking the :guilabel:`Create User` button at the top right." -msgstr "" -"一度この情報を収集すると、ダッシュボードでのユーザーの作成は、これまでに見て" -"きた他の Web フォームと同じです。 :guilabel:`ユーザー管理` ナビゲーションバー" -"の :guilabel:`ユーザー` リンクにあります。そして、右上にある :guilabel:`ユー" -"ザーの作成` ボタンをクリックします。" - -msgid "" -"One cloud controller acted as a gateway to all compute nodes. VlanManager " -"was used for the network config. This means that the cloud controller and " -"all compute nodes had a different VLAN for each OpenStack project. We used " -"the ``-s`` option of ``ping`` to change the packet size. We watched as " -"sometimes packets would fully return, sometimes they'd only make it out and " -"never back in, and sometimes the packets would stop at a random point. We " -"changed ``tcpdump`` to start displaying the hex dump of the packet. We " -"pinged between every combination of outside, controller, compute, and " -"instance." -msgstr "" -"1つのクラウドコントローラーが全コンピュートノードのゲートウェイの役割を果た" -"していた。ネットワーク設定には VlanManager が使われていた。これは、クラウドコ" -"ントローラーと全コンピュートノードで、各 OpenStack プロジェクトが異なる VLAN " -"を持つことを意味する。パケットサイズ変更のため、 ``ping`` の ``-s`` オプショ" -"ンを使用していた。パケットが全て戻ってくる時もあれば、パケットが出ていったき" -"り全く戻って来ない時もあれば、パケットはランダムな場所で止まってしまう時もあ" -"る、という状況だった。 ``tcpdump`` を変更し、パケットの16進ダンプを表示するよ" -"うにした。外部、コントローラー、コンピュート、インスタンスのあらゆる組み合わ" -"せの間で ping を実行した。" - -msgid "" -"One common networking problem is that an instance boots successfully but is " -"not reachable because it failed to obtain an IP address from dnsmasq, which " -"is the DHCP server that is launched by the ``nova-network`` service." -msgstr "" -"よくあるネットワークの問題に、インスタンスが起動しているにも関わらず、" -"dnsmasq からの IP アドレス取得に失敗し、到達できないという現象があります。 " -"dnsmasq は ``nova-network`` サービスから起動される DHCP サーバです。" - -msgid "" -"One great, although very in-depth, way of troubleshooting network issues is " -"to use ``tcpdump``. We recommended using ``tcpdump`` at several points along " -"the network path to correlate where a problem might be. If you prefer " -"working with a GUI, either live or by using a ``tcpdump`` capture, check out " -"`Wireshark `_." -msgstr "" -"ネットワーク問題の解決を徹底的に行う方法のひとつは、 ``tcpdump`` です。 " -"``tcpdump`` を使い、ネットワーク経路上の数点、問題のありそうなところから情報" -"を収集することをおすすめします。もし GUI が好みであれば、 `Wireshark `_ を試してみてはいかがでしょう。" - -msgid "" -"One interesting example is modifying the table of images and the owner of " -"that image. This can be easily done if you simply display the unique ID of " -"the owner. This example goes one step further and displays the readable name " -"of the owner:" -msgstr "" -"興味深い例の一つは、イメージとそのイメージの所有者の表の表示内容を変更するこ" -"とです。これは、所有者のユニーク ID を表示するようにするだけで実現できます。" -"この例はさらに一歩進め、所有者の読みやすい形式の名前を表示します:" - -msgid "" -"One key element of systems administration that is often overlooked is that " -"end users are the reason systems administrators exist. Don't go the BOFH " -"route and terminate every user who causes an alert to go off. Work with " -"users to understand what they're trying to accomplish and see how your " -"environment can better assist them in achieving their goals. Meet your users " -"needs by organizing your users into projects, applying policies, managing " -"quotas, and working with them." -msgstr "" -"システム管理の見過ごされがちな大事な要素の一つに、エンドユーザのためにシステ" -"ム管理者が存在するという点があります。BOFH (Bastard Operator From Hell; 「地" -"獄から来た最悪の管理者」) の道に入って、問題の原因となっているユーザーを全員" -"停止させるようなことはしないでください。ユーザーがやりたいことを一緒になって" -"理解し、どうするとあなたの環境がユーザーが目的を達成するのにもっと支援できる" -"かを見つけてください。ユーザーをプロジェクトの中に組織して、ポリシーを適用し" -"て、クォータを管理して、彼らと一緒に作業することにより、ユーザーのニーズを満" -"たしてください。" - -msgid "" -"One last test is to launch a second instance and see whether the two " -"instances can ping each other. If they can, the issue might be related to " -"the firewall on the compute node." -msgstr "" -"最後のテストは、2 つ目のインスタンスを起動して、2 つのインスタンスがお互いに " -"ping できることを確認することです。もしできる場合、問題はコンピュートノードの" -"ファイアウォールに関連するものでしょう。" - -msgid "" -"One morning, a compute node failed to run any instances. The log files were " -"a bit vague, claiming that a certain instance was unable to be started. This " -"ended up being a red herring because the instance was simply the first " -"instance in alphabetical order, so it was the first instance that ``nova-" -"compute`` would touch." -msgstr "" -"ある朝、あるノードでインスタンスの実行がすべて失敗するようになりました。ログ" -"ファイルがすこしあいまいでした。特定のインスタンスが起動できなかったことを示" -"していました。これは最終的に偽の手掛かりであることがわかりました。単にそのイ" -"ンスタンスがアルファベット順で最初のインスタンスだったので、 ``nova-" -"compute`` が最初に操作したのがそのインスタンスだったというだけでした。" - -msgid "" -"One of the long-time complaints surrounding OpenStack Networking was the " -"lack of high availability for the layer 3 components. The Juno release " -"introduced Distributed Virtual Router (DVR), which aims to solve this " -"problem." -msgstr "" -"OpenStack Networking を長く取り巻く不満の 1 つは、L3 コンポーネントの高可用性" -"の不足でした。Juno リリースは、これを解決することを目指した、分散仮想ルー" -"ター (DVR) を導入しました。" - -msgid "" -"One of the most complex aspects of an OpenStack cloud is the networking " -"configuration. You should be familiar with concepts such as DHCP, Linux " -"bridges, VLANs, and iptables. You must also have access to a network " -"hardware expert who can configure the switches and routers required in your " -"OpenStack cloud." -msgstr "" -"OpenStack クラウドの最も複雑な点の一つにネットワーク設定があります。DHCP、" -"Linux ブリッジ、VLAN、iptables といった考え方をよく理解していなければなりませ" -"ん。OpenStack クラウドで必要となるスイッチやルータを設定できるネットワーク" -"ハードウェアの専門家と話をする必要もあります。" - -msgid "" -"One of the most requested features since OpenStack began (for components " -"other than Object Storage, which tends to \"just work\"): easier upgrades. " -"In all recent releases internal messaging communication is versioned, " -"meaning services can theoretically drop back to backward-compatible " -"behavior. This allows you to run later versions of some components, while " -"keeping older versions of others." -msgstr "" -"より簡単なアップグレードは、OpenStack の開始以来、もっとも要望されている機能" -"の 1 つです。Object Storage 以外のコンポーネントは「作業中」です。最近のリ" -"リースでは、内部メッセージ通信がバージョン付けされています。サービスが理論的" -"には後方互換性の動作まで戻れることを目的としています。これにより、古いバー" -"ジョンをいくつか残しながら、いくつかのコンポーネントの新しいバージョンを実行" -"できるようになります。" - -msgid "" -"One way to plan for cloud controller or storage proxy maintenance is to " -"simply do it off-hours, such as at 1 a.m. or 2 a.m. This strategy affects " -"fewer users. If your cloud controller or storage proxy is too important to " -"have unavailable at any point in time, you must look into high-availability " -"options." -msgstr "" -"クラウドコントローラーやストレージプロキシのメンテナンスを計画する一つの方法" -"は、単に午前 1 時や 2 時のような利用の少ない時間帯に実行することです。この戦" -"略はあまり多くのユーザーに影響を与えません。クラウドコントローラーやストレー" -"ジプロキシが、いかなる時間帯においても、サービスが利用できないことによる影響" -"が大きければ、高可用性オプションについて検討する必要があります。" - -msgid "" -"Open OpenStack Dashboard and launch an instance. If the instance launches, " -"the issue is resolved." -msgstr "" -"OpenStack Dashboard を開き、インスタンスを起動します。インスタンスが起動する" -"と、問題が解決されています。" - -msgid "" -"Open vSwitch, as used in the previous OpenStack Networking examples is a " -"full-featured multilayer virtual switch licensed under the open source " -"Apache 2.0 license. Full documentation can be found at `the project's " -"website `_. In practice, given the preceding " -"configuration, the most common issues are being sure that the required " -"bridges (``br-int``, ``br-tun``, and ``br-ex``) exist and have the proper " -"ports connected to them." -msgstr "" -"前の OpenStack Networking の例で使用されていたように、Open vSwitch は、オープ" -"ンソースの Apache 2.0 license にてライセンスされている、完全な機能を持つマル" -"チレイヤー仮想スイッチです。ドキュメント全体は ``プロジェクトの Web サイト " -"`_ にあります。実際のところ、前述の設定を用いた場" -"合、最も一般的な問題は、必要となるブリッジ (``br-int`` 、 ``br-tun`` 、 ``br-" -"ex``) が存在し、それらに接続される適切なポートを持つことを確認することです。" - -msgid "OpenStack" -msgstr "OpenStack" - -msgid "" -"OpenStack Block Storage also allows creating snapshots of volumes. Remember " -"that this is a block-level snapshot that is crash consistent, so it is best " -"if the volume is not connected to an instance when the snapshot is taken and " -"second best if the volume is not in use on the instance it is attached to. " -"If the volume is under heavy use, the snapshot may have an inconsistent file " -"system. In fact, by default, the volume service does not take a snapshot of " -"a volume that is attached to an image, though it can be forced to. To take a " -"volume snapshot, either select :guilabel:`Create Snapshot` from the actions " -"column next to the volume name on the dashboard :guilabel:`Volumes` page, or " -"run this from the command line:" -msgstr "" -"OpenStack Block Storage では、ボリュームのスナップショットを作成することもで" -"きます。これはブロックレベルのスナップショットであることを覚えておいてくださ" -"い。これはクラッシュに対する一貫性があります。そのため、スナップショットが取" -"得されるとき、ボリュームがインスタンスに接続されていないことが最良です。ボ" -"リュームが接続されたインスタンスにおいて使用されていなければ、次に良いです。" -"ボリュームが高負荷にある場合、スナップショットによりファイルシステムの不整合" -"が起こる可能性があります。実際、デフォルト設定では、Volume Service はイメージ" -"に接続されたボリュームのスナップショットを取得しません。ただし、強制的に実行" -"することができます。ボリュームのスナップショットを取得するには、ダッシュボー" -"ドの :guilabel:`ボリューム` ページにおいて、ボリューム名の隣にあるアクション" -"項目から :guilabel:`スナップショットの作成` を選択します。または、コマンドラ" -"インから次のようにします:" - -msgid "OpenStack Block Storage service" -msgstr "OpenStack Block Storage サービス" - -msgid "" -"OpenStack Compute cells are designed to allow running the cloud in a " -"distributed fashion without having to use more complicated technologies, or " -"be invasive to existing nova installations. Hosts in a cloud are partitioned " -"into groups called *cells*. Cells are configured in a tree. The top-level " -"cell (\"API cell\") has a host that runs the ``nova-api`` service, but no " -"``nova-compute`` services. Each child cell runs all of the other typical " -"``nova-*`` services found in a regular installation, except for the ``nova-" -"api`` service. Each cell has its own message queue and database service and " -"also runs ``nova-cells``, which manages the communication between the API " -"cell and child cells." -msgstr "" -"OpenStack Compute のセルによって、より複雑な技術を持ち込むことなしに、また既" -"存のNovaシステムに悪影響を与えることなしに、クラウドを分散された環境で運用す" -"ることができます。1つのクラウドの中のホストは、 *セル* と呼ばれるグループに" -"分割されます。セルは、木構造に構成されてます。最上位のセル (「API セル」) は " -"``nova-api`` サービスを実行するホストを持ちますが、 ``nova-compute`` サービス" -"を実行するホストは持ちません。それぞれの子セルは、 ``nova-api`` サービス以外" -"の、普通のNovaシステムに見られる他のすべての典型的な ``nova-*`` サービスを実" -"行します。それぞれのセルは自分のメッセージキューとデータベースサービスを持" -"ち、またAPIセルと子セルの間の通信を制御する ``nova-cells`` サービスを実行しま" -"す。" - -msgid "OpenStack Compute service" -msgstr "OpenStack Compute サービス" - -msgid "OpenStack Identity service" -msgstr "OpenStack Identity サービス" - -msgid "OpenStack Image service" -msgstr "OpenStack Image service" - -msgid "" -"OpenStack Networking has many more degrees of freedom than ``nova-network`` " -"does because of its pluggable back end. It can be configured with open " -"source or vendor proprietary plug-ins that control software defined " -"networking (SDN) hardware or plug-ins that use Linux native facilities on " -"your hosts, such as Open vSwitch or Linux Bridge." -msgstr "" -"OpenStack Networking は、バックエンドをプラグインできるので、 ``nova-" -"network`` よりも自由度が大きいです。SDN ハードウェアを制御するオープンソース" -"やベンダー製品のプラグイン、ホストで動作する Open vSwitch や Linux Bridge な" -"どの Linux ネイティブの機能を使用するプラグインを用いて設定できます。" - -msgid "OpenStack Networking service" -msgstr "OpenStack Networking サービス" - -msgid "" -"OpenStack Object Storage, known as swift when reading the code, is based on " -"the Python `Paste `_ framework. The best " -"introduction to its architecture is `A Do-It-Yourself Framework `_. Because of the swift " -"project's use of this framework, you are able to add features to a project " -"by placing some custom code in a project's pipeline without having to change " -"any of the core code." -msgstr "" -"OpenStack Object Storage は、コード参照時に swift としても知られ、Python " -"`Paste `_ フレームワークに基づいています。そのアーキ" -"テクチャーは、 `A Do-It-Yourself Framework `_ から始めると最も良いでしょう。swift プロジェクトは" -"このフレームワークを使用しているので、コアのコードを変更することなく、プロ" -"ジェクトのパイプラインにカスタムコードをいくつか配置することにより、プロジェ" -"クトに機能を追加できます。" - -msgid "OpenStack Operations Guide" -msgstr "OpenStack 運用ガイド" - -msgid "OpenStack Specific Resources" -msgstr "OpenStack 固有のリソース" - -msgid "" -"OpenStack believes in open source, open design, and open development, all in " -"an open community that encourages participation by anyone. The long-term " -"vision for OpenStack is to produce a ubiquitous open source cloud computing " -"platform that meets the needs of public and private cloud providers " -"regardless of size. OpenStack services control large pools of compute, " -"storage, and networking resources throughout a data center." -msgstr "" -"OpenStack は、オープンソース、オープン設計、オープン開発を信じています。すべ" -"ては、あらゆる人の参加を奨励するオープンコミュニティにより行われています。" -"OpenStack の長期ビジョンは、規模によらず、パブリッククラウドやプライベートク" -"ラウドのプロバイダーの要求を満たす、ユビキタスなオープンソースのクラウドコン" -"ピューティングソフトウェアを作成することです。OpenStack のサービスは、データ" -"センター全体のコンピュート、ストレージ、ネットワークの大規模なリソースプール" -"を制御します。" - -msgid "OpenStack community members" -msgstr "OpenStack コミュニティーメンバー" - -msgid "" -"OpenStack follows a six month release cycle, typically releasing in April/" -"May and October/November each year. At the start of each cycle, the " -"community gathers in a single location for a design summit. At the summit, " -"the features for the coming releases are discussed, prioritized, and " -"planned. The below figure shows an example release cycle, with dates showing " -"milestone releases, code freeze, and string freeze dates, along with an " -"example of when the summit occurs. Milestones are interim releases within " -"the cycle that are available as packages for download and testing. Code " -"freeze is putting a stop to adding new features to the release. String " -"freeze is putting a stop to changing any strings within the source code." -msgstr "" -"OpenStack は6ヶ月のリリースサイクルを取っており、通常は4/5月と10/11月にリリー" -"スが行われます。各リリースサイクルの最初に、OpenStack コミュニティは一ヶ所に" -"集まりデザインサミットを行います。サミットでは、次のリリースでの機能が議論さ" -"れ、優先度付けと計画が行われます。以下の図は、リリースサイクルの一例で、サ" -"ミットが行われて以降のマイルストーンリリース、Code Freeze、String Freeze など" -"が記載されています。マイルストーンはリリースサイクル内での中間リリースで、テ" -"スト用にパッケージが作成され、ダウンロードできるようになります。 Code Freeze " -"では、そのリリースに向けての新機能の追加が凍結されます。String Freeze は、" -"ソースコード内の文字列の変更が凍結されることを意味します。" - -msgid "" -"OpenStack images can often be thought of as \"virtual machine templates.\" " -"Images can also be standard installation media such as ISO images. " -"Essentially, they contain bootable file systems that are used to launch " -"instances." -msgstr "" -"OpenStack のイメージはしばしば「仮想マシンテンプレート」と考えることができま" -"す。イメージは ISO イメージのような標準的なインストールメディアの場合もありま" -"す。基本的に、インスタンスを起動するために使用されるブート可能なファイルシス" -"テムを含みます。" - -msgid "" -"OpenStack is an open source platform that lets you build an :term:" -"`Infrastructure-as-a-Service (IaaS)` cloud that runs on commodity hardware." -msgstr "" -"OpenStack はオープンソースプラットフォームで、OpenStack を使うと、コモディ" -"ティハードウェア上で動作する :term:`Infrastructure as a Service (IaaS)` クラ" -"ウドを自分で構築できます。" - -msgid "" -"OpenStack is designed for horizontal scalability, so you can easily add new " -"compute, network, and storage resources to grow your cloud over time. In " -"addition to the pervasiveness of massive OpenStack public clouds, many " -"organizations, such as PayPal, Intel, and Comcast, build large-scale private " -"clouds. OpenStack offers much more than a typical software package because " -"it lets you integrate a number of different technologies to construct a " -"cloud. This approach provides great flexibility, but the number of options " -"might be daunting at first." -msgstr "" -"OpenStack は水平的にスケールするよう設計されているため、クラウドの拡大に合わ" -"せて新しいコンピュート、ネットワーク、ストレージのリソースを簡単に追加できま" -"す。大規模 OpenStack パブリッククラウドの広播性に加えて、PayPal、Intel、" -"Comcast などの多くの組織が大規模なプライベートクラウドを構築しています。" -"OpenStack は、クラウドを構築するためのいくつかの異なる技術を統合できるので、" -"一般的なソフトウェアよりも多くのものを提供します。このアプローチにより、素晴" -"らしい柔軟性を提供しますが、始めは数多くのオプションにより圧倒されるかもしれ" -"ません。" - -msgid "" -"OpenStack is intended to work well across a variety of installation flavors, " -"from very small private clouds to large public clouds. To achieve this, the " -"developers add configuration options to their code that allow the behavior " -"of the various components to be tweaked depending on your needs. " -"Unfortunately, it is not possible to cover all possible deployments with the " -"default configuration values." -msgstr "" -"OpenStack は、非常に小さなプライベートクラウドから大規模なパブリッククラウド" -"まで様々な構成でうまく動くことを意図して作られています。これを実現するため、" -"開発者はコードに設定オプションを用意し、要件にあわせて種々のコンポーネントの" -"振る舞いを細かく調整できるようにしています。しかし、残念ながら、デフォルトの" -"設定値ですべてのデプロイメントに対応することはできません。" - -msgid "" -"OpenStack might not do everything you need it to do out of the box. To add a " -"new feature, you can follow different paths." -msgstr "" -"OpenStack はあなたが必要とするすべてのことをしてくれるわけではないかもしれま" -"せん。新しい機能を追加するために、いくつかの方法に従うことができます。" - -msgid "OpenStack on OpenStack (TripleO)" -msgstr "OpenStack on OpenStack (TripleO)" - -msgid "" -"OpenStack services use the standard logging levels, at increasing severity: " -"TRACE, DEBUG, INFO, AUDIT, WARNING, ERROR, and CRITICAL. That is, messages " -"only appear in the logs if they are more \"severe\" than the particular log " -"level, with DEBUG allowing all log statements through. For example, TRACE is " -"logged only if the software has a stack trace, while INFO is logged for " -"every message including those that are only for information." -msgstr "" -"OpenStack サービスは標準のロギングレベルを利用しています。重要度のレベルは次" -"の通りです(重要度の低い順): TRACE、DEBUG、INFO、AUDIT、WARNING、ERROR、" -"CRTICAL。特定のログレベルより「重要」な場合のみメッセージはログに出力されま" -"す。ログレベルDEBUGの場合、すべてのログが出力されます。TRACEの場合、ソフト" -"ウェアがスタックトレースを持つ場合にのみログに出力されます。INFOの場合、情報" -"のみのメッセージも含めて出力されます。" - -msgid "" -"OpenStack truly welcomes your ideas (and contributions) and highly values " -"feedback from real-world users of the software. By learning a little about " -"the process that drives feature development, you can participate and perhaps " -"get the additions you desire." -msgstr "" -"OpenStack は、あなたのアイディア (およびコントリビューション) を本当に歓迎し" -"ています。また、実世界のソフトウェアのユーザーからのフィードバックに高い価値" -"をおきます。機能開発を推進するプロセスについて少し理解することにより、参加で" -"き、あなたの要望を追加できるかもしれません。" - -msgid "" -"OpenStack volumes are persistent block-storage devices that may be attached " -"and detached from instances, but they can be attached to only one instance " -"at a time. Similar to an external hard drive, they do not provide shared " -"storage in the way a network file system or object store does. It is left to " -"the operating system in the instance to put a file system on the block " -"device and mount it, or not." -msgstr "" -"OpenStack のボリュームは、インスタンスから接続および切断できる、永続的なブ" -"ロックストレージデバイスです。ただし、一度に接続できるのは 1 インスタンスだけ" -"です。外部ハードディスクと似ています。ネットワークファイルシステムやオブジェ" -"クトストアがしているような共有ストレージは提供されません。ブロックデバイス上" -"にファイルシステムを構築し、それをマウントするかどうかは、インスタンス内のオ" -"ペレーティングシステムに任されます。" - -msgid "" -"OpenStack's collection of different components interact with each other " -"strongly. For example, uploading an image requires interaction from ``nova-" -"api``, ``glance-api``, ``glance-registry``, keystone, and potentially " -"``swift-proxy``. As a result, it is sometimes difficult to determine exactly " -"where problems lie. Assisting in this is the purpose of this section." -msgstr "" -"OpenStack は、異なるコンポーネント同士が互いに強く連携して動作しています。た" -"とえば、イメージのアップロードでは、 ``nova-api``, ``glance-api``, ``glance-" -"registry``, keystone が連携する必要があります。 ``swift-proxy`` も関係する場" -"合があります。その結果、時として問題が発生している箇所を正確に特定することが" -"難しくなります。これを支援することがこのセクションの目的です。" - -msgid "Operate with consistency groups" -msgstr "整合性グループの操作" - -msgid "Operating with a share" -msgstr "共有の運用" - -msgid "Operation based" -msgstr "操作ベース" - -msgid "Option 1" -msgstr "オプション 1" - -msgid "Option 2" -msgstr "オプション 2" - -msgid "Option 3" -msgstr "オプション 3" - -msgid "" -"Optional property that allows created servers to have a different bandwidth " -"cap from that defined in the network they are attached to. This factor is " -"multiplied by the rxtx\\_base property of the network. Default value is 1.0 " -"(that is, the same as the attached network)." -msgstr "" -"作成したサーバーが接続されたネットワークにおける定義と異なる帯域制限を持てる" -"ようにするプロパティ。これはオプションです。この要素はネットワークの rxtx" -"\\_base プロパティの倍数です。既定の値は 1.0 です (つまり、接続されたネット" -"ワークと同じです)。" - -msgid "Optional swap space allocation for the instance." -msgstr "インスタンスに割り当てられるスワップ空間。これはオプションです。" - -msgid "Optionally, migrate the instances back to their original compute node." -msgstr "" -"インスタンスを元のコンピュートノードにマイグレーションすることもできます。" - -msgid "Orchestration" -msgstr "オーケストレーション" - -msgid "Orchestration service" -msgstr "Orchestration サービス" - -msgid "Other CLI Options" -msgstr "他の CLI オプション" - -msgid "Other backup considerations include:" -msgstr "さらにバックアップの考慮点として以下があげられます。" - -msgid "" -"Other services follow the same process, with their respective directories " -"and databases." -msgstr "" -"他のサービスもそれぞれのディレクトリとデータベースで同じ処理となります。" - -msgid "Output from ``/var/log/nova/nova-api.log`` on :term:`Grizzly`:" -msgstr ":term:`Grizzly` における ``/var/log/nova/nova-api.log`` の出力:" - -msgid "Output from ``/var/log/nova/nova-api.log`` on :term:`Havana`:" -msgstr ":term:`Havana` における ``/var/log/nova/nova-api.log`` の出力:" - -msgid "" -"Packets, now tagged with the external VLAN tag, then exit onto the physical " -"network via ``eth1``. The Layer2 switch this interface is connected to must " -"be configured to accept traffic with the VLAN ID used. The next hop for this " -"packet must also be on the same layer-2 network." -msgstr "" -"パケットは、いま外部 VLAN タグを付けられ、 ``eth1`` 経由で物理ネットワークに" -"出ていきます。このインターフェースが接続されている L2 スイッチは、使用される " -"VLAN ID を持つ通信を許可するよう設定する必要があります。このパケットの次の" -"ホップも、同じ L2 ネットワーク上になければいけません。" - -msgid "" -"Partition all drives in the same way in a horizontal fashion, as shown in :" -"ref:`partition_setup`." -msgstr "" -":ref:`partition_setup` にあるように、すべてのドライブを同じように並列してパー" -"ティショニングにします。" - -msgid "" -"Partitioning, which provides greater flexibility for layout of operating " -"system and swap space, as described below." -msgstr "" -"パーティショニング。以下に説明されている通り、オペレーティングシステムと " -"Swap 領域のレイアウトにおける柔軟性がはるかに高くになります。" - -msgid "Password" -msgstr "パスワード" - -msgid "Perform a backup" -msgstr "バックアップの実行" - -msgid "" -"Perform some cleaning of the environment prior to starting the upgrade " -"process to ensure a consistent state. For example, instances not fully " -"purged from the system after deletion might cause indeterminate behavior." -msgstr "" -"確実に整合性のある状態にするために、アップグレード作業を始める前にいくつか環" -"境のクリーンアップを実行します。例えば、削除後に完全削除されていないインスタ" -"ンスにより、不確実な挙動を引き起こす可能性があります。" - -msgid "Perform source NAT on outgoing traffic." -msgstr "送信方向にソース NAT 実行。" - -msgid "Perform the day-to-day tasks required to administer a cloud." -msgstr "クラウドを管理する上で必要となる日々のタスクの実行。" - -msgid "Performance and Optimizing" -msgstr "パフォーマンスと最適化" - -msgid "" -"Performance increased greatly after deleting the old records and my new " -"deployment continues to behave well." -msgstr "" -"古いレコードの削除後、パフォーマンスが大幅に向上しました。新しい環境は順調に" -"動作しつづけています。" - -msgid "" -"Periodic tasks are important to understand because of limitations in the " -"threading model that OpenStack uses. OpenStack uses cooperative threading in " -"Python, which means that if something long and complicated is running, it " -"will block other tasks inside that process from running unless it " -"voluntarily yields execution to another cooperative thread." -msgstr "" -"周期的タスクは、OpenStack が使用しているスレッドモデルにおける制限を理解する" -"上で重要です。OpenStack は Python の協調スレッドを使用しています。このこと" -"は、何か長く複雑な処理が実行された場合、その処理が自発的に別の協調スレッドに" -"実行を譲らない限り、そのプロセス内の他のタスクの実行が停止されることを意味し" -"ます。" - -msgid "" -"Pick a service endpoint from your service catalog, such as compute. Try a " -"request, for example, listing instances (servers):" -msgstr "" -"サービスカタログから、サービスエンドポイント (例: コンピュート) を選択しま" -"す。要求を試します。例えば、インスタンス (サーバー) の一覧表示を行います。" - -msgid "Place the tenant ID in a variable:" -msgstr "テナント ID を変数に格納します。" - -msgid "Planned Maintenance" -msgstr "計画メンテナンス" - -msgid "Plug and Play OpenStack" -msgstr "プラグアンドプレイ OpenStack" - -msgid "" -"Policies are triggered by an OpenStack policy engine whenever one of them " -"matches an OpenStack API operation or a specific attribute being used in a " -"given operation. For instance, the engine tests the ``create:compute`` " -"policy every time a user sends a ``POST /v2/{tenant_id}/servers`` request to " -"the OpenStack Compute API server. Policies can be also related to specific :" -"term:`API extensions `. For instance, if a user needs an " -"extension like ``compute_extension:rescue``, the attributes defined by the " -"provider extensions trigger the rule test for that operation." -msgstr "" -"ポリシーのいずれかが OpenStack API 操作、もしくは指定された操作で使用されてい" -"る特定の属性に一致する場合、ポリシーが OpenStack ポリシーエンジンにより呼び出" -"されます。たとえば、ユーザーが ``POST /v2/{tenant_id}/servers`` リクエスト" -"を OpenStack Compute API サーバーに送信したときに必ず、エンジンが ``create:" -"compute`` ポリシーを評価します。ポリシーは特定の :term:`API 拡張 ` に関連づけることもできます。たとえば、ユーザーが " -"``compute_extension:rescue`` のような拡張に対して要求を行った場合、プロバイ" -"ダー拡張により定義された属性は、その操作に対するルールテストを呼び出します。" - -msgid "" -"Policies specify access criteria for specific operations, possibly with fine-" -"grained control over specific attributes." -msgstr "" -"特定の操作に対するアクセス基準を指定するポリシー。特定の属性に対する詳細な制" -"御も可能です。" - -msgid "Pre-upgrade considerations" -msgstr "アップグレード前の考慮事項" - -msgid "Pre-upgrade testing environment" -msgstr "テスト環境の事前アップグレード" - -msgid "Preface" -msgstr "はじめに" - -msgid "Prepare any quarterly reports on usage and statistics." -msgstr "使用量と統計に関する四半期レポートを準備します。" - -msgid "Prerequisites" -msgstr "前提" - -msgid "Press **Ctrl+A** followed by **0**." -msgstr "**Ctrl+A** に続けて **0** を押します。" - -msgid "Press **Ctrl+A** followed by **3** to check the log output." -msgstr "**Ctrl+A** に続けて **3** を押して、ログ出力を確認します。" - -msgid "" -"Press **Ctrl+A** followed by **3** to check the log output. Look at the " -"swift log statements again, and among the log statements, you'll see the " -"lines:" -msgstr "" -"**Ctrl+A** に続けて **3** を押して、ログ出力を確認します。再び swift のログを" -"確認すると、ログの中に以下の行があるでしょう。" - -msgid "Press **Ctrl+A** followed by **3**." -msgstr "**Ctrl+A** に続けて **3** を押します。" - -msgid "Press **Ctrl+A** followed by **9**." -msgstr "**Ctrl+A** に続けて **9** を押します。" - -msgid "" -"Press **Ctrl+A** followed by **N** until you reach the ``n-sch`` screen." -msgstr "``n-sch`` 画面が表示されるまで **Ctrl+A** に続けて **N** を押します。" - -msgid "Press **Ctrl+C** to kill the service." -msgstr "**Ctrl+C** を押し、サービスを強制停止します。" - -msgid "Press **Enter** to run it." -msgstr "**Enter** キーを押し、実行します。" - -msgid "Press **Up Arrow** to bring up the last command." -msgstr "**上矢印キー** を押し、最後のコマンドを表示させます。" - -msgid "Press Enter to run it." -msgstr "Enter キーを押し、実行します。" - -msgid "Press  **Ctrl+A** followed by **0**." -msgstr "**Ctrl+A** に続けて **0** を押します。" - -msgid "Prevent DHCP Spoofing by VM." -msgstr "仮想マシンによる DHCP スプーフィングの防止。" - -msgid "" -"Previously, all services had an availability zone. Currently, only the " -"``nova-compute`` service has its own availability zone. Services such as " -"``nova-scheduler``, ``nova-network``, and ``nova-conductor`` have always " -"spanned all availability zones." -msgstr "" -"以前のバージョンでは、全サービスにアベイラビリティゾーンがありました。現在" -"は、``nova-compute`` サービスには独自のアベイラビリティゾーンがあります。" -"``nova-scheduler``、``nova-network``、``nova-conductor`` などのサービスは、常" -"にすべてのアベイラビリティゾーンに対応します。" - -msgid "Primary project" -msgstr "主プロジェクト" - -msgid "Priority" -msgstr "優先度" - -msgid "Private Flavors" -msgstr "プライベートフレーバー" - -msgid "Process Monitoring" -msgstr "プロセス監視" - -msgid "" -"Profit. You can now see traffic on ``patch-tun`` by running :command:" -"`tcpdump -i snooper0`." -msgstr "" -"これでうまくいきます。 :command:`tcpdump -i snooper0` を実行して、 ``patch-" -"tun`` の通信を参照できます。" - -msgid "" -"Project ``771ed149ef7e4b2b88665cc1c98f77ca`` will now have access to image " -"``733d1c44-a2ea-414b-aca7-69decf20d810``." -msgstr "" -"これで、プロジェクト ``771ed149ef7e4b2b88665cc1c98f77ca`` がイメージ " -"``733d1c44-a2ea-414b-aca7-69decf20d810`` にアクセスできます。" - -msgid "Projects or Tenants?" -msgstr "プロジェクトかテナントか?" - -msgid "Property name" -msgstr "プロパティ名" - -msgid "" -"Provides best practices and conceptual information about securing an " -"OpenStack cloud" -msgstr "" -"OpenStack クラウドを安全にするためのベストプラクティスと基本的な考え方につい" -"て書かれています" - -msgid "Provision an instance" -msgstr "インスタンスの配備" - -msgid "Public network connectivity for user virtual machines" -msgstr "ユーザーの仮想マシンに対するパブリックネットワーク接続性" - -msgid "" -"Put the image ID for the only installed image into an environment variable:" -msgstr "インストール済みイメージのみのイメージ ID を環境変数に設定します。" - -msgid "Python" -msgstr "Python" - -msgid "" -"QEMU provides a guest agent that can be run in guests running on KVM " -"hypervisors. This guest agent, on Windows VMs, coordinates with the Windows " -"VSS service to facilitate a workflow which ensures consistent snapshots. " -"This feature requires at least QEMU 1.7. The relevant guest agent commands " -"are:" -msgstr "" -"QEMU は、KVM ハイパーバイザーにおいて動作しているゲストで実行できるゲストエー" -"ジェントを提供しています。Windows 仮想マシンの場合、このゲストエージェント" -"は、Windows VSS サービスと連携して、スナップショットの整合性を保証する流れを" -"楽にします。この機能は QEMU 1.7 以降を必要とします。関連するゲストエージェン" -"トのコマンドは次のとおりです。" - -msgid "Quarterly" -msgstr "四半期ごと" - -msgid "Quota" -msgstr "クォータ" - -msgid "Quotas" -msgstr "クォータ" - -msgid "" -"RAID is not used in this simplistic one-drive setup because generally for " -"production clouds, you want to ensure that if one disk fails, another can " -"take its place. Instead, for production, use more than one disk. The number " -"of disks determine what types of RAID arrays to build." -msgstr "" -"通常、本番環境のクラウドでは、1 つのディスクに問題が発生した場合、別のディス" -"クが必ず稼働するようにするため、RAID は、このシンプルな、ドライブ 1 つの設定" -"では使用されません。本番環境では、ディスクを 1 つ以上使用します。ディスク数に" -"より、どのようなタイプの RAID 配列を構築するか決定します。" - -msgid "RAM" -msgstr "メモリー" - -msgid "RXTX_Factor" -msgstr "RXTX_Factor" - -msgid "RabbitMQ Web Management Interface or rabbitmqctl" -msgstr "RabbitMQ Web管理インターフェイス および rabbitmqctl" - -msgid "RabbitMQ alerts" -msgstr "RabbitMQ アラート" - -msgid "RabbitMQ service hangs" -msgstr "RabbitMQ サービス停止" - -msgid "RabbitMQ troubleshooting" -msgstr "RabbitMQ トラブルシューティング" - -msgid "Rate limits" -msgstr "レートリミット" - -msgid "" -"Read about how to track the OpenStack roadmap through the open and " -"transparent development processes." -msgstr "" -"オープンかつ透明な OpenStack の開発プロセスからロードマップを把握する方法をま" -"とめています。" - -msgid "" -"Read more detailed instructions for launching an instance from a bootable " -"volume in the `OpenStack End User Guide `__." -msgstr "" -"詳細は `OpenStack エンドユーザーガイド `__ の「ボリュームからのイン" -"スタンスの起動」にある説明を参照してください。" - -msgid "" -"Read through the JSON response to get a feel for how the catalog is laid out." -msgstr "JSONレスポンスを読むことで、カタログを把握することができます。" - -msgid "Reading the Logs" -msgstr "ログの読み方" - -msgid "Rebooting a Cloud Controller or Storage Proxy" -msgstr "クラウドコントローラーとストレージプロキシの再起動" - -msgid "Rebooting a Storage Node" -msgstr "ストレージノードの再起動" - -msgid "Recovering Backups" -msgstr "バックアップのリカバリー" - -msgid "" -"Recovering backups is a fairly simple process. To begin, first ensure that " -"the service you are recovering is not running. For example, to do a full " -"recovery of ``nova`` on the cloud controller, first stop all ``nova`` " -"services:" -msgstr "" -"バックアップのリカバリーは単純です。始めにリカバリー対象のサービスが停止して" -"いることを確認します。例を挙げると、クラウドコントローラー上の ``nova`` の完" -"全リカバリーを行なう場合、最初に全ての ``nova`` サービスを停止します。" - -msgid "" -"Refer to the following upgrade notes for information on upgrading specific " -"OpenStack services:" -msgstr "" -"特定の OpenStack サービスのアップグレードに関する情報は、以下のアップグレード" -"ノートを参照してください。" - -msgid "" -"Regardless of the overcommit ratio, an instance can not be placed on any " -"physical node with fewer raw (pre-overcommit) resources than instance flavor " -"requires." -msgstr "" -"オーバーコミット比率に関係なく、フレーバーの要求するリソースよりも(オーバー" -"コミットの前に)リソースが少ない物理ノードにはインスタンスは配置されません。" - -msgid "Regions" -msgstr "リージョン" - -msgid "" -"Release notes are maintained on the OpenStack wiki, and also shown here:" -msgstr "リリースノートは OpenStack Wiki で管理され、以下で公開されています。" - -msgid "Releases" -msgstr "リリース番号" - -msgid "" -"RemoteGroups are a dynamic way of defining the CIDR of allowed sources. The " -"user specifies a RemoteGroup (security group name) and then all the users' " -"other instances using the specified RemoteGroup are selected dynamically. " -"This dynamic selection alleviates the need for individual rules to allow " -"each new member of the cluster." -msgstr "" -"リモートグループは許可されたソースの CIDR を動的に定義する方法です。ユーザー" -"がリモートグループ (セキュリティグループ名) を指定します。これにより、指定さ" -"れたリモートグループを使用する、ユーザーの他のインスタンスが動的にすべて選択" -"されます。この動的な選択により、クラスターのそれぞれの新しいメンバーを許可す" -"る、個別のルールの必要性を軽減できます。" - -msgid "Remove all packages." -msgstr "すべてのパッケージを削除します。" - -msgid "Remove databases." -msgstr "データベースを削除します。" - -msgid "Remove remaining files." -msgstr "残っているファイルを削除します。" - -msgid "Remove the repository for the previous release packages." -msgstr "旧リリースのパッケージのリポジトリーを削除します。" - -msgid "Repeat steps 7-8." -msgstr "手順 7-8 を繰り返します。" - -msgid "Replacement of Open vSwitch Plug-in with Modular Layer 2" -msgstr "Modular Layer 2 プラグインによる Open vSwitch プラグインの置換" - -msgid "Replacing Components" -msgstr "コンポーネントの交換" - -msgid "Replacing a Swift Disk" -msgstr "Swift ディスクの交換" - -msgid "Reset Share State" -msgstr "共有状態のリセット" - -msgid "Resource Alerting" -msgstr "リソースのアラート" - -msgid "" -"Resource alerting provides notifications when one or more resources are " -"critically low. While the monitoring thresholds should be tuned to your " -"specific OpenStack environment, monitoring resource usage is not specific to " -"OpenStack at all—any generic type of alert will work fine." -msgstr "" -"リソースアラート機能は、1 つ以上のリソースが致命的に少なくなった際に通知しま" -"す。閾値監視がお使いの OpenStack 環境で有効化されているべきですが、リソース使" -"用状況の監視は、まったく OpenStack 固有のことではありません。あらゆる汎用のア" -"ラート機能が適切に動作するでしょう。" - -msgid "Resource based" -msgstr "リソースベース" - -msgid "Resources" -msgstr "情報源" - -msgid "" -"Resources such as memory, disk, and CPU are generic resources that all " -"servers (even non-OpenStack servers) have and are important to the overall " -"health of the server. When dealing with OpenStack specifically, these " -"resources are important for a second reason: ensuring that enough are " -"available to launch instances. There are a few ways you can see OpenStack " -"resource usage. The first is through the :command:`nova` command:" -msgstr "" -"メモリ、ディスク、CPUのような一般的なリソースは、全てのサーバー(OpenStackに関" -"連しないサーバーにも)に存在するため、サーバーの状態監視において重要です。" -"OpenStackの場合、インスタンスを起動するために必要なリソースが確実に存在するか" -"の確認という点でも重要です。OpenStackのリソースを見るためには幾つかの方法が存" -"在します。 1 番目は :command:`nova` コマンド経由です。" - -msgid "Restart the RabbitMQ service on all of the controller nodes:" -msgstr "すべてのコントローラーにおいて RabbitMQ サービスを再起動します。" - -msgid "" -"Restart the ``swift proxy`` service to make swift use your middleware. Start " -"by switching to the ``swift-proxy`` screen:" -msgstr "" -"``swift proxy`` にこのミドルウェアを使わせるために、Swift プロキシサービスを" -"再起動します。``swift-proxy`` の screen セッションに切り替えてはじめてくださ" -"い。" - -msgid "" -"Restart the nova scheduler service to make nova use your scheduler. Start by " -"switching to the ``n-sch`` screen:" -msgstr "" -"Nova にこのスケジューラーを使わせるために、Nova スケジューラーサービスを再起" -"動します。 ``n-sch`` screen セッションに切り替えてはじめてください。" - -msgid "Restore databases from backup." -msgstr "バックアップからデータベースをリストアします。" - -msgid "" -"Restore databases from the ``RELEASE_NAME-db-backup.sql`` backup file that " -"you created with the :command:`mysqldump` command during the upgrade process:" -msgstr "" -"アップグレードプロセス中に :command:`mysqldump` コマンドを用いて作成した、 " -"``grizzly-db-backup.sql`` バックアップファイルからデータベースを復元します。" - -msgid "" -"Resume I/O to the disks, similar to the Linux ``fsfreeze -u`` operation." -msgstr "" -"ディスクへの I/O を再開します。Linux の ``fsfreeze -u`` 処理と似ています。" - -msgid "Resume the instance using :command:`virsh`:" -msgstr ":command:`virsh` を使用して、インスタンスを再開します。" - -msgid "Resume the instance." -msgstr "インスタンスを再開します。" - -msgid "Retrieve image for upload" -msgstr "アップロードするためのイメージの取得" - -msgid "" -"Reverse the direction to see the path of a ping reply. From this path, you " -"can see that a single packet travels across four different NICs. If a " -"problem occurs with any of these NICs, a network issue occurs." -msgstr "" -"ping 応答のパスを確認するために、方向を反転させます。この経路説明によって、あ" -"なたはパケットが4つの異なるNICの間を行き来していることがわかったでしょう。こ" -"れらのどのNICに問題が発生しても、ネットワークの問題となるでしょう。" - -msgid "Review and plan any major OpenStack upgrades." -msgstr "OpenStack のメジャーアップグレードの内容を確認し、その計画を立てます。" - -msgid "Review and plan any necessary cloud additions." -msgstr "クラウドの追加の必要性を検討し、計画を立てます。" - -msgid "Review common actions in your cloud." -msgstr "構築したクラウドにおいて一般的なアクションをレビューする" - -msgid "Review usage and trends over the past quarter." -msgstr "この四半期における使用量および傾向を確認します。" - -msgid "Role" -msgstr "役割" - -msgid "Role-based rules" -msgstr "ロールに基づいたルール" - -msgid "Roll Your Own OpenStack" -msgstr "自分の OpenStack の展開" - -msgid "Roll back configuration files." -msgstr "設定ファイルをロールバックします。" - -msgid "Roll back packages." -msgstr "パッケージをロールバックします。" - -msgid "Roll these tests into an alerting system." -msgstr "それらのテストをアラートシステムに組み込む" - -msgid "Rolling back a failed upgrade" -msgstr "失敗したアップグレードのロールバック" - -msgid "" -"Rolling back your environment should be the final course of action since you " -"are likely to lose any data added since the backup." -msgstr "" -"お使いの環境をロールバックすることは、バックアップ以降に追加されたデータが失" -"われることになるので、最終手段にすべきです。" - -msgid "" -"Run the :command:`rabbitmqctl status` to view the current file descriptor " -"limits:" -msgstr "" -":command:`rabbitmqctl status` を実行して、現在のファイル記述子の制限を表示し" -"ます。" - -msgid "Run the following command to view the current iptables configuration:" -msgstr "iptablesの現在の構成を見るには、以下のコマンドを実行します。" - -msgid "Run the following command to view the properties of existing images:" -msgstr "" -"既存のイメージのプロパティを表示するために、以下のコマンドを実行します。" - -msgid "Run this on the command line of the following areas:" -msgstr "このコマンドは以下の場所で実行します。" - -msgid "Running Daemons on the CLI" -msgstr "コマンドラインでのデーモンの実行" - -msgid "Running Instances" -msgstr "稼働中のインスタンス" - -msgid "" -"Running ``sync`` writes dirty buffers (buffered blocks that have been " -"modified but not written yet to the disk block) to disk." -msgstr "" -"``sync`` を実行することにより、ダーティーバッファー (変更されたが、ディスクに" -"書き込まれていないバッファー済みブロック) をディスクに書き込みます。" - -msgid "Running programs have written their contents to disk" -msgstr "実行中のプログラムがコンテンツをディスクに書き込んだこと" - -msgid "SQL back end" -msgstr "SQL バックエンド" - -msgid "Save the configuration files on all nodes. For example:" -msgstr "すべてのノードで設定ファイルを保存します。例:" - -msgid "Scalable Hardware" -msgstr "スケーラブルハードウェア" - -msgid "Scheduler Improvements" -msgstr "スケジューラーの改善" - -msgid "Scheduling to hosts with trusted hardware support." -msgstr "" -"トラステッドコンピューティング機能に対応したホスト群に対してスケジューリング" -"したい場合" - -msgid "" -"Secondly, DAIR's shared ``/var/lib/nova/instances`` directory contributed to " -"the problem. Since all compute nodes have access to this directory, all " -"compute nodes periodically review the \\_base directory. If there is only " -"one instance using an image, and the node that the instance is on is down " -"for a few minutes, it won't be able to mark the image as still in use. " -"Therefore, the image seems like it's not in use and is deleted. When the " -"compute node comes back online, the instance hosted on that node is unable " -"to start." -msgstr "" -"次に、DAIR の共有された ``/var/lib/nova/instances`` が問題を助長した。全コン" -"ピュートノードがこのディレクトリにアクセスするため、全てのコンピュートノード" -"は定期的に \\_base ディレクトリを見直していた。あるイメージを使用しているイン" -"スタンスが1つだけあり、そのインスタンスが存在するノードが数分間ダウンした場" -"合、そのイメージが使用中であるという印を付けられなくなる。それゆえ、イメージ" -"は使用中に見えず、削除されてしまったのだ。そのコンピュートノードが復帰した" -"際、そのノード上でホスティングされていたインスタンスは起動できない。" - -msgid "Security Configuration for Compute, Networking, and Storage" -msgstr "Compute、Networking、Storage のセキュリティ設定" - -msgid "Security Groups" -msgstr "セキュリティグループ" - -msgid "Security group rules" -msgstr "セキュリティグループルール" - -msgid "Security groups" -msgstr "セキュリティグループ" - -msgid "" -"Security groups are sets of IP filter rules that are applied to an " -"instance's networking. They are project specific, and project members can " -"edit the default rules for their group and add new rules sets. All projects " -"have a \"default\" security group, which is applied to instances that have " -"no other security group defined. Unless changed, this security group denies " -"all incoming traffic." -msgstr "" -"セキュリティグループは、インスタンスのネットワークに適用される、IP フィルター" -"ルールの組です。それらはプロジェクト固有です。プロジェクトメンバーがそれらの" -"グループの標準ルールを編集でき、新しいルールを追加できます。すべてのプロジェ" -"クトが \"default\" セキュリティグループを持ちます。他のセキュリティグループが" -"定義されていないインスタンスには \"default\" セキュリティグループが適用されま" -"す。\"default\" セキュリティグループは、ルールを変更しない限り、すべての受信" -"トラフィックを拒否します。" - -msgid "" -"Security groups for the current project can be found on the OpenStack " -"dashboard under :guilabel:`Access & Security`. To see details of an existing " -"group, select the :guilabel:`Edit Security Group` action for that security " -"group. Obviously, modifying existing groups can be done from this edit " -"interface. There is a :guilabel:`Create Security Group` button on the main :" -"guilabel:`Access & Security` page for creating new groups. We discuss the " -"terms used in these fields when we explain the command-line equivalents." -msgstr "" -"現在のプロジェクトのセキュリティグループが、OpenStack dashboard の :guilabel:" -"`アクセスとセキュリティ` にあります。既存のグループの詳細を表示するには、セ" -"キュリティグループの :guilabel:`編集` を選択します。自明ですが、この :" -"guilabel:`編集` インターフェースから既存のグループを変更できます。新しいグ" -"ループを作成するための :guilabel:`セキュリティグループの作成` ボタンが、メイ" -"ンの :guilabel:`アクセスとセキュリティ` ページにあります。同等のコマンドライ" -"ンを説明するとき、これらの項目において使用される用語について説明します。" - -msgid "" -"Security groups, as discussed earlier, are typically required to allow " -"network traffic to an instance, unless the default security group for a " -"project has been modified to be more permissive." -msgstr "" -"セキュリティグループは、前に記載したように、プロジェクトのデフォルトのセキュ" -"リティグループがより許可するよう変更されていない限り、インスタンスへのネット" -"ワーク通信を許可するために一般的に必要となります。" - -msgid "" -"See `Share Networks `__ of “Shared File Systems” section of " -"OpenStack Administrator Guide document for more details." -msgstr "" -"詳細は OpenStack Administrator Guide の Shared File Systems セクションにある " -"`Share Networks `__ を参照してください。" - -msgid "Select the :guilabel:`Identity` tab in the left navigation bar." -msgstr "" -"左側にあるナビゲーションバーの :guilabel:`ユーザー管理` タブを選択します。" - -msgid "Semiannually" -msgstr "1 年に 2 回" - -msgid "Send unmatched traffic to the fallback chain." -msgstr "一致しない通信のフォールバックチェインへの送信。" - -msgid "Sep 22, 2011" -msgstr "2011年9月22日" - -msgid "Sep 22, 2014" -msgstr "2014年9月22日" - -msgid "Sep 27, 2012" -msgstr "2012年9月27日" - -msgid "Series" -msgstr "シリーズ" - -msgid "Seriously, Google." -msgstr "マジ?Google。" - -msgid "Server Group Members" -msgstr "サーバーグループのメンバー" - -msgid "Server Groups" -msgstr "サーバーグループ" - -msgid "Server load" -msgstr "サーバー負荷" - -msgid "Servers and Services" -msgstr "サーバーとサービス" - -msgid "Service" -msgstr "サービス" - -msgid "Service specific upgrade instructions" -msgstr "サービス固有のアップグレード手順" - -msgid "Services" -msgstr "サービス" - -msgid "Set Block Storage Quotas" -msgstr "Block Storage のクォータの設定" - -msgid "Set Compute Service Quotas" -msgstr "コンピュートサービスのクォータの設定" - -msgid "Set Image Quotas" -msgstr "イメージクォータの設定" - -msgid "Set Object Storage Quotas" -msgstr "Object Storage のクォータの設定" - -msgid "" -"Set up the Key Manager service by editing /etc/nova/nova.conf and adding the " -"entries in the codeblock below" -msgstr "" -"``/etc/nova/nova.conf`` を編集して、以下のコードブロックにあるエントリーを追" -"加することにより、Key Manager サービスをセットアップします。" - -msgid "" -"Several minutes after ``nova-network`` is restarted, you should see new " -"dnsmasq processes running:" -msgstr "" -"``nova-network`` の再起動から数分後、新たな dnsmasq プロセスが動いていること" -"が確認できるでしょう。" - -msgid "" -"Several pre-made images exist and can easily be imported into the Image " -"service. A common image to add is the CirrOS image, which is very small and " -"used for testing purposes. To add this image, simply do:" -msgstr "" -"いくつかの構築済みイメージが存在します。簡単に Image service の中にインポート" -"できます。追加する一般的なイメージは、非常に小さく、テスト目的に使用される " -"CirrOS イメージです。このイメージを追加するには、単に次のようにします。" - -msgid "Shared File Systems Service" -msgstr "Shared File Systems サービス" - -msgid "Sharing Images Between Projects" -msgstr "プロジェクト間のイメージの共有" - -msgid "Should backups be kept off-site?" -msgstr "オフサイトにバックアップを置くべきか?" - -msgid "" -"Shows OpenStack end users how to create and manage resources in an OpenStack " -"cloud with the OpenStack dashboard and OpenStack client commands" -msgstr "" -"OpenStack のエンドユーザーが、OpenStack Dashboard と OpenStack クライアントコ" -"マンドを使って、OpenStack クラウドのリソースの作成・管理を行う方法を説明して" -"います" - -msgid "" -"Shows a policy restricting the ability to manipulate flavors to " -"administrators using the Admin API only." -msgstr "" -"インスタンスタイプを操作する権限を、管理 API を使用する管理者だけに限定するポ" -"リシーを表します。" - -msgid "" -"Shows a rule that evaluates successfully if the current user is an " -"administrator or the owner of the resource specified in the request (tenant " -"identifier is equal)." -msgstr "" -"現在のユーザーが、管理者、またはリクエストで指定されたリソースの所有者 (テナ" -"ント識別子が同じ) であれば、成功であると評価されるルールを表します。" - -msgid "" -"Shows the default policy, which is always evaluated if an API operation does " -"not match any of the policies in ``policy.json``." -msgstr "" -"API 操作が ``policy.json`` のどのポリシーとも一致しなかった場合に、必ず評価さ" -"れる規定のポリシーを表します。" - -msgid "" -"Shows you how to obtain, create, and modify virtual machine images that are " -"compatible with OpenStack" -msgstr "" -"OpenStack で利用可能な仮想マシンイメージを取得、作成、更新する方法について説" -"明されています" - -msgid "" -"Shut down your compute node, perform the maintenance, and turn the node back " -"on." -msgstr "" -"コンピュートノードをシャットダウンし、メンテナンスを実行し、ノードをオンライ" -"ンに戻します。" - -msgid "Shutting Down a Storage Node" -msgstr "ストレージノードのシャットダウン" - -msgid "Signature hash method = SHA-256" -msgstr "署名のハッシュ方法 = SHA-256" - -msgid "Signature hash methods: SHA-224, SHA-256, SHA-384, and SHA-512" -msgstr "署名のハッシュ方法: SHA-224、SHA-256、SHA-384、SHA-512" - -msgid "Signature key type = RSA-PSS" -msgstr "署名の鍵形式 = RSA-PSS" - -msgid "" -"Signature key types: DSA, ECC_SECT571K1, ECC_SECT409K1, ECC_SECT571R1, " -"ECC_SECT409R1, ECC_SECP521R1, ECC_SECP384R1, and RSA-PSS" -msgstr "" -"署名の鍵形式: DSA、ECC_SECT571K1、ECC_SECT409K1、ECC_SECT571R1、" -"ECC_SECT409R1、ECC_SECP521R1、ECC_SECP384R1、RSA-PSS" - -msgid "Signature verification will occur when Compute boots the signed image" -msgstr "Compute が署名付きイメージを起動するとき、署名が検証されます。" - -msgid "" -"Similarly, if you have an existing cloud and are looking to upgrade from " -"``nova-network`` to OpenStack Networking, you should have the option to " -"delay the upgrade for this period of time. However, each release of " -"OpenStack brings significant new innovation, and regardless of your use of " -"networking methodology, it is likely best to begin planning for an upgrade " -"within a reasonable timeframe of each release." -msgstr "" -"同様に、既存のクラウドを持ち、 ``nova-network`` から OpenStack Networking に" -"アップグレードするつもりである場合、この期間中のアップグレードを遅らせる選択" -"肢もあるでしょう。しかしながら、OpenStack の各リリースは新しい重要なイノベー" -"ションをもたらします。ネットワークの使用法によらず、各リリースの合理的な時間" -"枠の中でアップグレードの計画を始めることが最も良いでしょう。" - -msgid "" -"Since my database contained many records—over 1 million metadata records and " -"over 300,000 instance records in \"deleted\" or \"errored\" states—each " -"search took a long time. I decided to clean up the database by first " -"archiving a copy for backup and then performing some deletions using the " -"MySQL client. For example, I ran the following SQL command to remove rows of " -"instances deleted for over a year:" -msgstr "" -"データベースに 100 万以上のメタデータおよび 300,000 インスタンスのレコードが" -"「削除済み」または「エラー」状態で含まれていました。MySQL クライアントを使用" -"して、まずバックアップを取得し、データベースをクリーンアップし、いくつか削除" -"を実行することにしました。例えば、以下の SQL コマンドを実行して、1 年以上の間" -"に削除されたインスタンスの行を削除しました。" - -msgid "So it was a qemu/kvm bug." -msgstr "つまり、これは qemu/kvm のバグである。" - -msgid "" -"So many OpenStack resources are available online because of the fast-moving " -"nature of the project, but there are also resources listed here that the " -"authors found helpful while learning themselves." -msgstr "" -"プロジェクトの急速な成熟のため、数多くの OpenStack の情報源がオンラインで利用" -"可能ですが、執筆者が学習している間に有用だったリソースを一覧化しています。" - -msgid "" -"So, I found myself wondering what changed in the EC2 API on Havana that " -"might cause this to happen. Was it a bug or a normal behavior that I now " -"need to work around?" -msgstr "" -"そのため、この問題を引き起こしているかもしれない、Havana で EC2 API に行われ" -"た変更を自分で探しはじめました。これはバグなのか、回避策が必要となる通常の動" -"作なのか?" - -msgid "Some of the resources that you want to monitor include:" -msgstr "監視項目に含む幾つかのリソースをあげます。" - -msgid "Some other examples for Intelligent Alerting include:" -msgstr "インテリジェントなアラートのその他の例としては以下があります。" - -msgid "" -"Sometimes a compute node either crashes unexpectedly or requires a reboot " -"for maintenance reasons." -msgstr "" -"コンピュートノードは、予期せずクラッシュしたり、メンテナンスのために再起動が" -"必要になったりすることがときどきあります。" - -msgid "" -"Sometimes a user and a group have a one-to-one mapping. This happens for " -"standard system accounts, such as cinder, glance, nova, and swift, or when " -"only one user is part of a group." -msgstr "" -"ユーザーとグループは、一対一でマッピングされる場合があります。このようなマッ" -"ピングは cinder、glance、nova、swift などの標準システムアカウントや、グループ" -"にユーザーが 1 人しかいない場合に発生します。" - -msgid "" -"Sometimes an instance is terminated but the floating IP was not correctly " -"disassociated from that instance. Because the database is in an inconsistent " -"state, the usual tools to disassociate the IP no longer work. To fix this, " -"you must manually update the database." -msgstr "" -"しばしば、 Floating IP を正しく開放しないままインスタンスが終了されることがあ" -"ります。するとデータベースは不整合状態となるため、通常のツールではうまく開放" -"できません。解決するには、手動でデータベースを更新する必要があります。" - -msgid "Source ``openrc`` to set up your environment variables for the CLI:" -msgstr "``openrc`` を読み込み、CLI の環境変数を設定します。" - -msgid "Source openrc to set up your environment variables for the CLI:" -msgstr "openrc を読み込み、CLI の環境変数を設定します。" - -msgid "Specific Configuration Topics" -msgstr "設定に関する個別のトピック" - -msgid "" -"Specifies the size of a secondary ephemeral data disk. This is an empty, " -"unformatted disk and exists only for the life of the instance." -msgstr "" -"二次的な一時データディスクの容量を指定します。これは空の、フォーマットされて" -"いないディスクです。インスタンスの生存期間だけ存在します。" - -msgid "Specify quotas for existing users or tenants" -msgstr "既存のユーザまたはテナントのクォータを表示します" - -msgid "StackTach" -msgstr "StackTach" - -msgid "" -"Standard backup best practices apply when creating your OpenStack backup " -"policy. For example, how often to back up your data is closely related to " -"how quickly you need to recover from data loss." -msgstr "" -"OpenStackバックアップポリシーを作成する際、標準的なバックアップのベストプラク" -"ティスが適用できます。例えば、どの程度の頻度でバックアップを行なうかは、どの" -"くらい早くデータロスから復旧させる必要があるかに密接に関連しています。" - -msgid "Start the ``nova-compute`` service:" -msgstr "``nova-compute`` サービスを起動します。" - -msgid "Starting Instances" -msgstr "インスタンスの起動" - -msgid "Status" -msgstr "状態" - -msgid "Stop all OpenStack services." -msgstr "すべての OpenStack サービスを停止します。" - -msgid "Stop the ``nova-compute`` service:" -msgstr "``nova-compute`` サービスを停止します。" - -msgid "Storage Node Failures and Maintenance" -msgstr "ストレージノードの故障とメンテナンス" - -msgid "Summary" -msgstr "概要" - -msgid "" -"Support for global clustering of object storage servers is available for all " -"supported releases. You would implement these global clusters to ensure " -"replication across geographic areas in case of a natural disaster and also " -"to ensure that users can write or access their objects more quickly based on " -"the closest data center. You configure a default region with one zone for " -"each cluster, but be sure your network (WAN) can handle the additional " -"request and response load between zones as you add more zones and build a " -"ring that handles more zones. Refer to `Geographically Distributed Clusters " -"`_ in the documentation for additional information." -msgstr "" -"オブジェクトストレージサーバーのグローバルクラスターが、すべてのサポートされ" -"ているリリースで利用できます。自然災害の発生時に備えて地理的な地域をまたがっ" -"て確実に複製するために、またユーザーが最も近いデータセンターに基づいてより迅" -"速にオブジェクトにアクセスできるようにするために、これらのグローバルクラス" -"ターを導入できるでしょう。各クラスターに 1 つのゾーンを持つデフォルトのリー" -"ジョンを設定します。しかし、より多くのゾーンを追加して、より多くのゾーンを処" -"理するリングを構築するので、お使いのネットワーク (WAN) が、ゾーン間の追加リク" -"エストとレスポンスの負荷を処理できることを確認してください。詳細は " -"`Geographically Distributed Clusters `_ にあるドキュメ" -"ントを参照してください。" - -msgid "" -"Sure enough, the user had been periodically refreshing the console log page " -"on the dashboard and the 5G file was traversing the Rabbit cluster to get to " -"the dashboard." -msgstr "" -"思った通り、ユーザはダッシュボード上のコンソールログページを定期的に更新して" -"おり、ダッシュボードに向けて5GB のファイルが RabbitMQ クラスタを通過してい" -"た。" - -msgid "" -"Suspend I/O to the disks, similar to the Linux ``fsfreeze -f`` operation." -msgstr "" -"ディスクへの I/O を一時停止します。Linux の ``fsfreeze -f`` 処理と似ていま" -"す。" - -msgid "" -"Suspend the instance using the :command:`virsh` command, taking note of the " -"internal ID:" -msgstr "" -":command:`virsh` コマンドを使用してインスタンスを一時停止します。内部 ID を記" -"録します。" - -msgid "Suspend the instance using the ``virsh`` command." -msgstr "``virsh`` コマンドを使用して、インスタンスを一時停止します。" - -msgid "Swap" -msgstr "スワップ" - -msgid "" -"Swap space to free up memory for processes, as an independent area of the " -"physical disk used only for swapping and nothing else." -msgstr "" -"プロセス用にメモリーを空ける Swap 領域。物理ディスクから独立した、スワップの" -"みに使用される領域。" - -msgid "" -"Swift should notice the new disk and that no data exists. It then begins " -"replicating the data to the disk from the other existing replicas." -msgstr "" -"Swift は新しいディスクを認識します。また、データが存在しないことを認識しま" -"す。そうすると、他の既存の複製からディスクにデータを複製しはじめます。" - -msgid "" -"Switch back to the ``n-sch`` screen. Among the log statements, you'll see " -"the line:" -msgstr "" -"``n-sch`` 画面に切り替えます。ログ出力の中に、以下の行を見つけられます。" - -msgid "Syslog choices" -msgstr "Syslog の選択肢" - -msgid "Systems Administration" -msgstr "システム管理" - -msgid "" -"Sébastien Han has written excellent blogs and generously gave his permission " -"for re-use." -msgstr "" -"Sébastien Han は素晴らしいブログを書いてくれて、寛大にも再利用の許可を与えて" -"くれました。" - -msgid "Table OpenStack log locations" -msgstr "表: OpenStack のログの場所" - -msgid "Table. Example service restoration priority list" -msgstr "表: サービス復旧優先度一覧の例" - -msgid "Table. Flavor parameters" -msgstr "表: フレーバーのパラメーター" - -msgid "Table. OpenStack default flavors" -msgstr "表: OpenStack デフォルトのフレーバー" - -msgid "Table. OpenStack segregation methods" -msgstr "表: OpenStack 分離の手法" - -msgid "Table: Block Storage quota descriptions" -msgstr "表: Block Storage のクォータの説明" - -msgid "Tailing Logs" -msgstr "最新ログの確認" - -msgid "Taking Snapshots" -msgstr "スナップショットの取得" - -msgid "" -"Taking the first 11 characters, we can construct a device name of " -"tapff387e54-9e from this output." -msgstr "" -"この出力から最初の 11 文字をとり、デバイス名 tapff387e54-9e を作ることができ" -"ます。" - -msgid "Tales From the Cryp^H^H^H^H Cloud" -msgstr "ハリウッド^H^H^H^H^Hクラウドナイトメア" - -msgid "Telemetry" -msgstr "Telemetry" - -msgid "Telemetry Service" -msgstr "Telemetry サービス" - -msgid "" -"Telemetry service - In typical environments, updating the Telemetry service " -"only requires restarting the service." -msgstr "" -"Telemetry サービス - 一般的な環境では、 Telemetry サービスを更新するために、" -"Apache HTTP サービスの再起動のみが必要となります。" - -msgid "Terminal 1:" -msgstr "端末 1:" - -msgid "Terminal 2:" -msgstr "端末 2:" - -msgid "" -"Test the middleware from outside DevStack on a remote machine that has " -"access to your DevStack instance:" -msgstr "" -" DevStack 環境の外の、DevStack 用インスタンスにアクセス可能なリモートマシンか" -"らミドルウェアをテストします。" - -msgid "" -"Test your middleware with the ``swift`` CLI. Start by switching to the shell " -"screen and finish by switching back to the ``swift-proxy`` screen to check " -"the log output:" -msgstr "" -"``swift``の CLI でミドルウェアのテストをしてください。shell の screen セッ" -"ションに切り替えてテストを開始し、 ``swift-proxy`` の screen セッションにも" -"どってログ出力をチェックして終了します。" - -msgid "" -"Test your scheduler with the nova CLI. Start by switching to the ``shell`` " -"screen and finish by switching back to the ``n-sch`` screen to check the log " -"output:" -msgstr "" -"nova の CLI でスケジューラーのテストをしてください。 ``shell`` の screen セッ" -"ションに切り替えてテストを開始し、 ``n-sch`` screen セッションにもどってログ" -"出力をチェックして終了します。" - -msgid "That made no sense." -msgstr "これでは意味が無かった。" - -msgid "Thaw (unfreeze) the system" -msgstr "システムを解凍 (フリーズ解除) します" - -msgid "" -"The \"cluster\" rule allows SSH access from any other instance that uses the " -"``global-http`` group." -msgstr "" -"\"cluster\" ルールにより、``global-http`` グループを使用する他のすべてのイン" -"スタンスから SSH アクセスが許可されます。" - -msgid "The 1gb NIC was still alive and active" -msgstr "1Gb NICはまだ生きていて、有効だった。" - -msgid "" -"The :command:`nova reboot` command wasn't working, so I used :command:" -"`virsh`, but it immediately came back with an error saying it was unable to " -"find the backing disk. In this case, the backing disk is the Glance image " -"that is copied to ``/var/lib/nova/instances/_base`` when the image is used " -"for the first time. Why couldn't it find it? I checked the directory and " -"sure enough it was gone." -msgstr "" -":command:`nova reboot` コマンドは機能しなかったので、 :command:`virsh` を使用" -"したが、すぐに仮想ディスクが見つからないとのエラーが返ってきた。この場合、仮" -"想ディスクは Glance イメージで、イメージが最初に使用する際に ``/var/lib/nova/" -"instances/_base`` にコピーされていた。何故イメージが見つからないのか?私はそ" -"のディレクトリをチェックし、イメージがないことを知った。" - -msgid "" -"The :command:`openstack flavor create` command allows authorized users to " -"create new flavors. Additional flavor manipulation commands can be shown " -"with the following command:" -msgstr "" -":command:`openstack flavor create` コマンドにより、権限のあるユーザーが新しい" -"フレーバーを作成できます。さらなるフレーバーの操作コマンドは、次のコマンドを" -"用いて表示できます。" - -msgid "" -"The :command:`openstack image create` command provides a large set of " -"options for working with your image. For example, the ``--min-disk`` option " -"is useful for images that require root disks of a certain size (for example, " -"large Windows images). To view these options, run:" -msgstr "" -":command:`openstack image-create` コマンドでは、イメージに指定できる多数のオ" -"プションが用意されています。たとえば、 ``--min-disk`` オプションは、特定の容" -"量のルートディスクを必要とするイメージ (例: 大きな Windows イメージ) のために" -"有用です。これらのオプションを表示するには、次のようにします:" - -msgid "" -"The CSAIL cloud is currently 64 physical nodes with a total of 768 physical " -"cores and 3,456 GB of RAM. Persistent data storage is largely outside the " -"cloud on NFS, with cloud resources focused on compute resources. There are " -"more than 130 users in more than 40 projects, typically running 2,000–2,500 " -"vCPUs in 300 to 400 instances." -msgstr "" -"CSAIL クラウドは現在 64 物理ノード、768 物理コア、3,456 GB のメモリがありま" -"す。クラウドリソースがコンピュータリソースに焦点をあてているため、永続データ" -"ストレージの大部分は、クラウド外の NFS 上にあります。40 以上のプロジェクトに " -"130 以上のユーザーがいます。一般的に、300 ~ 400 インスタンスで 2,000 ~ " -"2,500 仮想 CPU が動作しています。" - -msgid "The I/O statistics of your storage services" -msgstr "ストレージサービスの I/O の統計" - -msgid "" -"The ID of the volume to boot from, as shown in the output of :command:" -"`openstack volume list`" -msgstr "" -"起動するボリュームの ID。:command:`openstack volume list` の出力に表示されま" -"す。" - -msgid "The Image service and the Database" -msgstr "Image service とデータベース" - -msgid "" -"The Modular Layer 2 plug-in is a framework allowing OpenStack Networking to " -"simultaneously utilize the variety of layer-2 networking technologies found " -"in complex real-world data centers. It currently works with the existing " -"Open vSwitch, Linux Bridge, and Hyper-V L2 agents and is intended to replace " -"and deprecate the monolithic plug-ins associated with those L2 agents." -msgstr "" -"Modular Layer 2 プラグインは、OpenStack Networking が複雑な実世界のデータセン" -"ターに見られるさまざまな L2 ネットワーク技術を同時に利用できるようにするフ" -"レームワークです。現在、既存の Open vSwitch、Linux Bridge、Hyper-V L2 エー" -"ジェントと一緒に動作します。それらの L2 エージェントと関連付けられたモノリ" -"シックなプラグインを置き換えて廃止することを意図しています。" - -msgid "" -"The Open vSwitch driver should and usually does manage this automatically, " -"but it is useful to know how to do this by hand with the :command:`ovs-" -"vsctl` command. This command has many more subcommands than we will use " -"here; see the man page or use :command:`ovs-vsctl --help` for the full " -"listing." -msgstr "" -"Open vSwitch ドライバーは、これを自動的に管理すべきです。また、一般的に管理し" -"ます。しかし、 :command:`ovs-vsctl` コマンドを用いて、これを手動で実行する方" -"法を知ることは有用です。このコマンドは、ここで使用している以上に、数多くのサ" -"ブコマンドがあります。完全な一覧は、マニュアルページを参照するか、 :command:" -"`ovs-vsctl --help` を使用してください。" - -msgid "" -"The OpenStack Dashboard provides a graphical interface to manage users. This " -"section describes user management with the Dashboard." -msgstr "" -"OpenStack Dashboard はユーザーを管理するグラフィカルインターフェースを提供し" -"ます。このセクションは Dashboard を用いたユーザー管理を説明します。" - -msgid "" -"The OpenStack Foundation supported the creation of this book with plane " -"tickets to Austin, lodging (including one adventurous evening without power " -"after a windstorm), and delicious food. For about USD $10,000, we could " -"collaborate intensively for a week in the same room at the Rackspace Austin " -"office. The authors are all members of the OpenStack Foundation, which you " -"can join. Go to the `Foundation web site `_." -msgstr "" -"OpenStack Foundationは、オースチンへの航空券、(暴風後の停電によるドキドキの夜" -"を含む)宿、そして美味しい食事で、この本の作成をサポートしました。約10,000USド" -"ルで、Rackspaceのオースチンオフィスの同じ部屋の中で、私たちは1週間で集中的に" -"共同作業をすることができました。著者たちはすべて OpenStack Foundation のメン" -"バーであり、あなたも OpenStack Foundation に参加できます。`Foundation のウェ" -"ブサイト `_ に行ってみてください。" - -msgid "The OpenStack command-line client can provide some additional details:" -msgstr "" -"OpenStack コマンドラインクライアントでは、もう少し詳しい情報が得られます。" - -msgid "" -"The OpenStack community has had a database-as-a-service tool in development " -"for some time, and we saw the first integrated release of it in Icehouse. " -"From its release it was able to deploy database servers out of the box in a " -"highly available way, initially supporting only MySQL. Juno introduced " -"support for Mongo (including clustering), PostgreSQL and Couchbase, in " -"addition to replication functionality for MySQL. In Kilo, more advanced " -"clustering capability was delivered, in addition to better integration with " -"other OpenStack components such as Networking." -msgstr "" -"OpenStack コミュニティーは、何回か開発中の database-as-a-service ツールがあり" -"ました。Icehouse において最初の統合リリースがありました。そのリリース以降、高" -"可用な方法でそのまま使えるデータベースサーバーを配備できます。最初は MySQL の" -"みをサポートしていました。Juno では、Mongo (クラスターを含む)、PostgreSQL、" -"Couchbase、MySQL の複製機能をサポートしました。Kilo では、さらに高度なクラス" -"ター機能が導入されました。また、Networking などの他の OpenStack コンポーネン" -"トとより統合されました。" - -msgid "" -"The OpenStack dashboard (horizon) can be configured to use multiple regions. " -"This can be configured through the ``AVAILABLE_REGIONS`` parameter." -msgstr "" -"OpenStack dashboard (horizon) は、複数のリージョンを使用するよう設定できま" -"す。これは ``AVAILABLE_REGIONS`` パラメーターにより設定できます。" - -msgid "" -"The OpenStack dashboard simulates the ability to modify a flavor by deleting " -"an existing flavor and creating a new one with the same name." -msgstr "" -"OpenStack dashboard は、既存のフレーバーを削除し、同じ名前の新しいものを作成" -"することにより、フレーバーを変更する機能を模倣しています。" - -msgid "" -"The OpenStack service's policy engine matches a policy directly. A rule " -"indicates evaluation of the elements of such policies. For instance, in a " -"``compute:create: \"rule:admin_or_owner\"`` statement, the policy is " -"``compute:create``, and the rule is ``admin_or_owner``." -msgstr "" -"OpenStack サービスのポリシーエンジンがポリシーと直接照合を行います。ルールは" -"そのようなポリシーの要素の評価を意味します。たとえば、 ``compute:create: " -"\"rule:admin_or_owner\"`` 文において、ポリシーは ``compute:create`` で、ルー" -"ルは ``admin_or_owner`` です。" - -msgid "" -"The OpenStack snapshot mechanism allows you to create new images from " -"running instances. This is very convenient for upgrading base images or for " -"taking a published image and customizing it for local use. To snapshot a " -"running instance to an image using the CLI, do this:" -msgstr "" -"OpenStack のスナップショット機能により、実行中のインスタンスから新しいイメー" -"ジを作成することもできます。これは、ベースイメージをアップグレードするため、" -"公開されているイメージをカスタマイズするために、非常に便利です。このように " -"CLI を使用して、実行中のインスタンスをイメージにスナップショットをとります。" - -msgid "" -"The RabbitMQ web management interface is accessible on your cloud controller " -"at *http://localhost:55672*." -msgstr "" -"RabbitMQ Web 管理インターフェイスは、クラウドコントローラーから *http://" -"localhost:55672* でアクセスできます。" - -msgid "" -"The Real Estate team at Rackspace in Austin, also known as \"The Victors,\" " -"were super responsive." -msgstr "" -"「The Victors」としても知られている、オースチンの Rackspace の不動産チーム" -"は、素晴らしい応答をしてくれました。" - -msgid "" -"The Shared File Systems service provides a mechanism of snapshots to help " -"users to restore their own data. To create a snapshot, use :command:`manila " -"snapshot-create` command like:" -msgstr "" -"Shared File Systems サービスは、ユーザーが自身のデータをリストアする支援をす" -"るために、スナップショットの機能を提供します。次のような :command:`manila " -"snapshot-create` コマンドを使用して、スナップショットを作成します。" - -msgid "" -"The TAP device is connected to the integration bridge, ``br-int``. This " -"bridge connects all the instance TAP devices and any other bridges on the " -"system. In this example, we have ``int-br-eth1`` and ``patch-tun``. ``int-br-" -"eth1`` is one half of a veth pair connecting to the bridge ``br-eth1``, " -"which handles VLAN networks trunked over the physical Ethernet device " -"``eth1``. ``patch-tun`` is an Open vSwitch internal port that connects to " -"the ``br-tun`` bridge for GRE networks." -msgstr "" -"TAP デバイスは統合ブリッジ ``br-int`` に接続されます。このブリッジは、すべて" -"のインスタンスの TAP デバイスや他のシステム上のブリッジを接続します。この例で" -"は、 ``int-br-eth1`` と ``patch-tun`` があります。``int-br-eth1`` は、ブリッ" -"ジ ``br-eth1`` に接続している veth ペアの片側です。これは、物理イーサネットデ" -"バイス ``eth1`` 経由でトランクされる VLAN ネットワークを処理します。``patch-" -"tun`` は、GRE ネットワークの ``br-tun`` ブリッジに接続している Open vSwitch " -"内部ポートです。" - -msgid "" -"The TAP device name is constructed using the first 11 characters of the port " -"ID (10 hex digits plus an included '-'), so another means of finding the " -"device name is to use the :command:`neutron` command. This returns a pipe-" -"delimited list, the first item of which is the port ID. For example, to get " -"the port ID associated with IP address 10.0.0.10, do this:" -msgstr "" -"TAP デバイス名は、ポート ID の先頭 11 文字 (10 桁の16進数とハイフン) を使用し" -"て作られます。そのため、デバイス名を見つける別の手段として、 ``neutron`` コマ" -"ンドを使用できます。これは、パイプ区切りの一覧を返し、最初の項目がポート ID " -"です。例えば、次のように IP アドレス 10.0.0.10 に関連づけられているポート ID " -"を取得します。" - -msgid "" -"The TAP devices and veth devices are normal Linux network devices and may be " -"inspected with the usual tools, such as :command:`ip` and :command:" -"`tcpdump`. Open vSwitch internal devices, such as ``patch-tun``, are only " -"visible within the Open vSwitch environment. If you try to run :command:" -"`tcpdump -i patch-tun`, it will raise an error, saying that the device does " -"not exist." -msgstr "" -"TAP デバイスと veth デバイスは、通常の Linux ネットワークデバイスです。 :" -"command:`ip` や :command:`tcpdump` などの通常のツールを用いて調査できるでしょ" -"う。 ``patch-tun`` のような Open vSwitch 内部デバイスは、Open vSwitch 環境の" -"中だけで参照できます。 :command:`tcpdump -i patch-tun`` を実行しようとした場" -"合、デバイスが存在しないというエラーが発生するでしょう。" - -msgid "" -"The Telemetry service (:term:`ceilometer`) collects metering and event data " -"relating to OpenStack services. Data collected by the Telemetry service " -"could be used for billing. Depending on deployment configuration, collected " -"data may be accessible to users based on the deployment configuration. The " -"Telemetry service provides a REST API documented at `ceilometer V2 Web API " -"`_. You can " -"read more about the module in the `OpenStack Administrator Guide `_ or in the `developer " -"documentation `_." -msgstr "" -"Telemetry サービス (:term:`ceilometer`) は、OpenStack のサービスに関連する" -"メータリングとイベントデータを収集します。Telemetry サービスにより収集される" -"データは、課金のために使用できます。環境の設定によっては、ユーザーが設定に基" -"づいて収集したデータにアクセスできるかもしれません。Telemetry サービスは " -"`ceilometer V2 Web API `_ にドキュメント化されている REST API を提供します。このモ" -"ジュールの詳細は、 `OpenStack Administrator Guide `_ や `developer documentation `_ にあります。" - -msgid "The Valentine's Day Compute Node Massacre" -msgstr "バレンタインデーのコンピュートノード大虐殺" - -msgid "" -"The `OpenStack High Availability Guide `_ offers suggestions for elimination of a single point of " -"failure that could cause system downtime. While it is not a completely " -"prescriptive document, it offers methods and techniques for avoiding " -"downtime and data loss." -msgstr "" -"`OpenStack High Availability Guide `_ は、システム停止につながる可能性がある、単一障害点の削減に向け" -"た提案があります。完全に規定されたドキュメントではありませんが、停止時間や" -"データ損失を避けるための方法や技術を提供しています。" - -msgid "" -"The `OpenStack Security Guide `_ " -"provides a deep dive into securing an OpenStack cloud, including SSL/TLS, " -"key management, PKI and certificate management, data transport and privacy " -"concerns, and compliance." -msgstr "" -"`OpenStack セキュリティーガイド `_ は、OpenStack クラウドのセキュア化に関する深い考察を提供します。" -"SSL/TLS、鍵管理、PKI および証明書管理、データ転送およびプライバシーの懸念事" -"項、コンプライアンスなど。" - -msgid "" -"The ``-H`` flag is required when running the daemons with sudo because some " -"daemons will write files relative to the user's home directory, and this " -"write may fail if ``-H`` is left off." -msgstr "" -"sudo を用いてデーモンを実行するとき、 ``-H`` フラグが必要です。いくつかのデー" -"モンは、ユーザーのホームディレクトリーからの相対パスのファイルに書き込みを行" -"うため、 ``-H`` がないと、この書き込みが失敗してしまいます。" - -msgid "" -"The ``-s flag`` used in the cURL commands above are used to prevent " -"the progress meter from being shown. If you are having trouble running cURL " -"commands, you'll want to remove it. Likewise, to help you troubleshoot cURL " -"commands, you can include the ``-v`` flag to show you the verbose output. " -"There are many more extremely useful features in cURL; refer to the man page " -"for all the options." -msgstr "" -"上記の cURL コマンドで使用している ``-s flag`` は、進行状況メーターが表示さ" -"れないようにするために使用します。cURL コマンドの実行で問題が生じた場合には、" -"このオプションを削除してください。また、cURL コマンドのトラブルシューティング" -"を行う場合には、 ``-v`` フラグを指定してより詳細な出力を表示すると役立ちま" -"す。cURL には他にも多数の役立つ機能があります。全オプションは、man ページで参" -"照してください。" - -msgid "" -"The ``/etc/nova`` directory on both the cloud controller and compute nodes " -"should be regularly backed up." -msgstr "" -"クラウドコントローラーとコンピュートノードの ``/etc/nova`` ディレクトリーは、" -"定期的にバックアップすべきです。" - -msgid "" -"The ``deleted`` field is set to ``1`` if the instance has been deleted and " -"``NULL`` if it has not been deleted. This field is important for excluding " -"deleted instances from your queries." -msgstr "" -"``deleted`` フィールドは、インスタンスが削除されていると ``1`` がセットされま" -"す。削除されていなければ ``NULL`` です。このフィールドは、クエリーから削除済" -"みインスタンスを除外するために重要です。" - -msgid "The ``host`` field tells which compute node is hosting the instance." -msgstr "" -"``host`` フィールドは、どのコンピュートノードがインスタンスをホストしているか" -"を示します。" - -msgid "" -"The ``hostname`` field holds the name of the instance when it is launched. " -"The display-name is initially the same as hostname but can be reset using " -"the nova rename command." -msgstr "" -"``hostname`` フィールドは、インスタンスが起動したときのインスタンス名を保持し" -"ます。display-name は、最初は hostname と同じですが、nova rename コマンドを" -"使って再設定することができます。" - -msgid "" -"The ``nova.quota_usages`` table keeps track of how many resources the tenant " -"currently has in use:" -msgstr "" -"``nova.quota_usages`` テーブルはどのくらいリソースをテナントが利用しているか" -"を記録しています。" - -msgid "" -"The ``nova.quotas`` and ``nova.quota_usages`` tables store quota " -"information. If a tenant's quota is different from the default quota " -"settings, its quota is stored in the ``nova.quotas`` table. For example:" -msgstr "" -"``nova.quotas`` と``nova.quota_usages`` テーブルはクォータの情報が保管されて" -"います。もし、テナントのクォータがデフォルト設定と異なる場合、 ``nova." -"quotas`` テーブルに保管されます。以下に例を示します。" - -msgid "" -"The ``qg-`` interface in the l3-agent router namespace sends the packet " -"on to its next hop through device ``eth2`` on the external bridge ``br-ex``. " -"This bridge is constructed similarly to ``br-eth1`` and may be inspected in " -"the same way." -msgstr "" -"l3-agent のルーターの名前空間にある ``qg-`` インターフェースは、外部ブリッ" -"ジ ``br-ex`` にある ``eth2`` デバイス経由で次のホップにパケットを送信します。" -"このブリッジは、 ``br-eth1`` と同じように作られ、同じ方法で検査できるでしょ" -"う。" - -msgid "" -"The ``segmentation_id``, ``cidr``, ``ip_version``, and ``network_type`` " -"share network attributes are automatically set to the values determined by " -"the network provider." -msgstr "" -"``segmentation_id``, ``cidr``, ``ip_version``, ``network_type`` という共有の" -"属性は、ネットワークプロバイダーにより指定された値に自動的に設定されます。" - -msgid "" -"The ``tcpdump`` looked very, very weird. In short, it looked as though " -"network communication stopped before the instance tried to renew its IP. " -"Since there is so much DHCP chatter from a one minute lease, it's very hard " -"to confirm it, but even with only milliseconds difference between packets, " -"if one packet arrives first, it arrived first, and if that packet reported " -"network issues, then it had to have happened before DHCP." -msgstr "" -"``tcpdump`` の結果は非常に奇妙だった。一言で言えば、インスタンスが IP アドレ" -"スを更新しようとする前に、まるでネットワーク通信が停止しているように見えた。" -"1分間のリース期間で大量の DHCP ネゴシエーションがあるため、確認作業は困難を" -"極めた。しかし、パケット間のたった数ミリ秒の違いであれ、あるパケットが最初に" -"到着する際、そのパケットが最初に到着し、そのパケットがネットワーク障害を報告" -"した場合、DHCP より前にネットワーク障害が発生していることになる。" - -msgid "" -"The ``user-data`` key is a special key in the metadata service that holds a " -"file that cloud-aware applications within the guest instance can access. For " -"example, `cloudinit `__ is an " -"open source package from Ubuntu, but available in most distributions, that " -"handles early initialization of a cloud instance that makes use of this user " -"data." -msgstr "" -"``user-data`` 鍵は、メタデータサービス内の特別キーです。ゲストインスタンスに" -"あるクラウド対応アプリケーションがアクセス可能なファイルを保持します。たとえ" -"ば、 `cloudinit `__ は、Ubuntu " -"発祥のオープンソースパッケージですが、ほとんどのディストリビューションで利用" -"可能です。このユーザーデータを使用するクラウドインスタンスの初期設定を処理し" -"ます。" - -msgid "" -"The ``uuid`` field is the UUID of the instance and is used throughout other " -"tables in the database as a foreign key. This ID is also reported in logs, " -"the dashboard, and command-line tools to uniquely identify an instance." -msgstr "" -"``uuid`` フィールドはインスタンスの UUID です。データベースにある他の表におい" -"て外部キーとして使用されます。この ID は、インスタンスを一意に識別するため" -"に、ログ、ダッシュボードおよびコマンドラインツールにおいて表示されます。" - -msgid "" -"The admin is global, not per project, so granting a user the ``admin`` role " -"in any project gives the user administrative rights across the whole cloud." -msgstr "" -"管理者はプロジェクトごとではなく、グローバルです。そのため、ユーザーに " -"``admin`` ロールを与えることにより、クラウド全体にわたるユーザー管理権限を与" -"えることになります。" - -msgid "" -"The api_class [keymgr] is deprecated as of Newton, so it should not be " -"included in this release or beyond." -msgstr "" -"api_class [keymgr] は Newton で非推奨になります。これ以降のリリースには、含ま" -"れるべきではありません。" - -msgid "" -"The asterisk * indicates which screen window you are viewing. This example " -"shows we are viewing the key (for keystone) screen window:" -msgstr "" -"アスタリスク (*) は、表示している screen ウィンドウを表しています。この例は、" -"keystone 用の key という screen ウィンドウを表示していることを表しています。" - -msgid "" -"The bare-metal deployment has been widely lauded, and development continues. " -"The Juno release brought the OpenStack Bare metal drive into the Compute " -"project, and it was aimed to deprecate the existing bare-metal driver in " -"Kilo. If you are a current user of the bare metal driver, a particular " -"blueprint to follow is `Deprecate the bare metal driver `_" -msgstr "" -"ベアメタル配備は幅広く叫ばれていて、開発が続いています。Juno リリースは、" -"OpenStack Bare metal ドライブを Compute プロジェクトの中に持ち込みました。" -"Kilo において、既存のベアメタルドライバーを目指していました。現在ベアメタルド" -"ライバーを使用している場合、従うべき具体的なブループリントは `Deprecate the " -"bare metal driver `_ です。" - -msgid "" -"The block device mapping format is ``=:::" -"``, where:" -msgstr "" -"ブロックデバイスマッピングのフォーマットは ``=::" -":`` です。" - -msgid "The bonded 10gb network device (bond0) was in a DOWN state" -msgstr "冗長化された 10Gb ネットワークデバイス(bond0)は DOWN 状態だった。" - -msgid "" -"The chances of failure for the server's hardware are high at the start and " -"the end of its life. As a result, dealing with hardware failures while in " -"production can be avoided by appropriate burn-in testing to attempt to " -"trigger the early-stage failures. The general principle is to stress the " -"hardware to its limits. Examples of burn-in tests include running a CPU or " -"disk benchmark for several days." -msgstr "" -"サーバーハードウェアの故障確率は、そのライフタイムの最初と最後に高くなりま" -"す。結論として、初期故障を誘発する適切なエージングテストを行うことによって、" -"運用中の故障に対応するための多くの労力を避けることができます。一般的な原則" -"は、限界まで負荷をかけることです。エージング試験の例としては、数日間にわたっ" -"てCPUやディスクベンチマークを走行させることが含まれます。" - -msgid "" -"The choice of central logging engine will be dependent on the operating " -"system in use as well as any organizational requirements for logging tools." -msgstr "" -"一元化したロギングエンジンの選択は、使用するオペレーティングシステム、組織の" -"ロギングツールに関する要件に依存します。" - -msgid "" -"The cloud controller and storage proxy are very similar to each other when " -"it comes to expected and unexpected downtime. One of each server type " -"typically runs in the cloud, which makes them very noticeable when they are " -"not running." -msgstr "" -"想定内の場合も想定外の場合も停止時間が発生した場合の挙動が、クラウドコント" -"ローラーとストレージプロキシは互いに似ています。クラウドコントローラーとスト" -"レージプロキシはそれぞれクラウドで一つ実行されるので、動作していない場合、非" -"常に目立ちます。" - -msgid "" -"The cloud controller could completely fail if, for example, its motherboard " -"goes bad. Users will immediately notice the loss of a cloud controller since " -"it provides core functionality to your cloud environment. If your " -"infrastructure monitoring does not alert you that your cloud controller has " -"failed, your users definitely will. Unfortunately, this is a rough " -"situation. The cloud controller is an integral part of your cloud. If you " -"have only one controller, you will have many missing services if it goes " -"down." -msgstr "" -"クラウドコントローラーは、例えばマザーボードがおかしくなった場合に、完全に故" -"障するでしょう。これはクラウド環境の中核的な機能を提供しているため、ユーザー" -"はクラウドコントローラーの損失にすぐに気づくでしょう。お使いのインフラ監視機" -"能が、クラウド環境の障害のアラートを上げなかった場合でも、ユーザーは絶対に気" -"づきます。残念ながら、これは大まかな状況です。クラウドコントローラーは、クラ" -"ウドの必須部分です。コントローラーが 1 つだけの場合、ダウンした際に多くのサー" -"ビスが失われるでしょう。" - -msgid "" -"The cloud controller receives the ``255.255.255.255`` request and sends a " -"third response." -msgstr "" -"クラウドコントローラーは ``255.255.255.255`` 宛のリクエストを受信し、3番めの" -"レスポンスを返す。" - -msgid "" -"The code for OpenStack lives in ``/opt/stack``, so go to the ``nova`` " -"directory and edit your scheduler module. Change to the directory where " -"``nova`` is installed:" -msgstr "" -"OpenStack のコードは ``/opt/stack`` にあるので、``nova`` ディレクトリに移動し" -"てあなたのスケジューラーモジュールを編集します。``nova`` をインストールした" -"ディレクトリーに移動します。" - -msgid "" -"The code is similar to the above example of :command:`openstack security " -"group rule create`. To use RemoteGroup, specify ``--remote-group`` instead " -"of ``--remote-ip``. For example:" -msgstr "" -"コードは、上の :command:`openstack security group rule create` の例と似ていま" -"す。RemoteGroup を使用するために、``--remote-ip`` の代わりに ``--remote-" -"group`` を指定します。例:" - -msgid "" -"The code shown below is a driver that will schedule servers to hosts based " -"on IP address as explained at the beginning of the section. Copy the code " -"into ``ip_scheduler.py``. When you are done, save and close the file." -msgstr "" -"以下に示すコードはドライバーです。セクションの最初に説明されているように IP " -"アドレスに基づいて、サーバーをホストにスケジュールします。コードを " -"``ip_scheduler.py`` にコピーします。完了すると、ファイルを保存して閉じます。" - -msgid "" -"The command output lists the currently installed version of the package, " -"newest candidate version, and all versions along with the repository that " -"contains each version. Look for the appropriate release version— " -"``2:14.0.1-0ubuntu1~cloud0`` in this case. The process of manually picking " -"through this list of packages is rather tedious and prone to errors. You " -"should consider using a script to help with this process. For example:" -msgstr "" -"コマンド出力の一覧により、パッケージの現在インストールされているバージョン、" -"候補となる最新バージョン、各バージョンを含むリポジトリーにある全バージョンを" -"把握できます。適切なリリースバージョン、この場合 " -"``2:14.0.1-0ubuntu1~cloud0`` を探します。このパッケージ一覧から手動で探し当て" -"る手順は、むしろ退屈で間違えやすいです。この手順を支援するために、以下のスク" -"リプトを使用することを検討すべきです。例:" - -msgid "" -"The command-line tools can be made to show the OpenStack API calls they make " -"by passing the ``--debug`` flag to them. For example:" -msgstr "" -"コマンドラインツールに ``--debug`` フラグを渡すことにより、実行する " -"OpenStack API コールを表示することができます。例えば、以下のようになります。" - -msgid "The connection strings take this format:" -msgstr "connection 文字列は以下の形式をとります。" - -msgid "" -"The currently implemented hypervisors are listed on the `OpenStack " -"Configuration Reference `__. You can see a matrix of the various features " -"in OpenStack Compute (nova) hypervisor drivers at the `Hypervisor support " -"matrix page `_." -msgstr "" -"現在実装されているハイパーバイザーは、 `OpenStack Configuration Reference " -"`__ に一覧化されています。 `Hypervisor support matrix page `_ に、OpenStack " -"Compute (nova) ハイパーバイザーのドライバーにおけるさまざまな機能の組み合わせ" -"表があります。" - -msgid "" -"The dangerous possibility comes with the ability to change member roles. " -"This is the dropdown list below the username in the :guilabel:`Project " -"Members` list. In virtually all cases, this value should be set to :guilabel:" -"`Member`. This example purposefully shows an administrative user where this " -"value is ``admin``." -msgstr "" -"危険な点としては、メンバーの役割を変更する機能があることです。これは :" -"guilabel:`プロジェクトメンバー` 一覧のユーザー名の後ろにあるドロップダウンリ" -"ストです。事実上すべての場合で、この値は :guilabel:`メンバー` に設定されてい" -"ます。この例では意図的に、この値が ``admin`` になっている管理ユーザーを示して" -"います。" - -msgid "" -"The dashboard interface for snapshots can be confusing because the snapshots " -"and images are displayed in the :guilabel:`Images` page. However, an " -"instance snapshot *is* an image. The only difference between an image that " -"you upload directly to the Image Service and an image that you create by " -"snapshot is that an image created by snapshot has additional properties in " -"the glance database. These properties are found in the ``image_properties`` " -"table and include:" -msgstr "" -"ダッシュボードのインターフェースでは、スナップショットとイメージが両方とも :" -"guilabel:`イメージ` のページに表示されるため、まぎらわしいかもしれません。し" -"かしながら、インスタンスのスナップショットはイメージ *です* 。Image service " -"に直接アップロードしたイメージと、スナップショットにより作成したイメージとの" -"唯一の違いは、スナップショットにより作成されたイメージが glance データベース" -"において追加のプロパティを持つことです。これらのプロパティは " -"``image_properties`` テーブルで確認でき、次の項目を含みます:" - -msgid "" -"The dashboard is based on the Python `Django `_ web application framework. To know how to build your Dashboard, see " -"`Building a Dashboard using Horizon `_." -msgstr "" -"dashboard は、Python `Django `_ Web アプリ" -"ケーションフレームワークを利用しています。Dashboard を構築する方法の詳細は " -"`Building a Dashboard using Horizon `_ を参照してください。" - -msgid "" -"The default :term:`authorization` settings allow administrative users only " -"to create resources on behalf of a different project. OpenStack handles two " -"kinds of authorization policies:" -msgstr "" -"デフォルトの :term:`認可 ` 設定では、管理ユーザーのみが他のプ" -"ロジェクトのリソースを作成できます。OpenStack では以下の 2 種類の認可ポリシー" -"を使うことができます。" - -msgid "" -"The default OpenStack flavors are shown in :ref:`table_default_flavors`." -msgstr "" -"デフォルトの OpenStack フレーバーは :ref:`table_default_flavors` に表示されて" -"います。" - -msgid "" -"The direction in which the security group rule is applied. Valid values are " -"``ingress`` or ``egress``." -msgstr "" -"セキュリティグループルールが適用される通信方向。有効な値は ``ingress`` と " -"``egress`` です。" - -msgid "" -"The environment is largely based on Scientific Linux 6, which is Red Hat " -"compatible. We use KVM as our primary hypervisor, although tests are ongoing " -"with Hyper-V on Windows Server 2008." -msgstr "" -"この環境は、大部分は Red Hat 互換の Scientific Linux 6 ベースです。主なハイ" -"パーバイザとして KVM を使用していますが、一方 Windows Server 2008 上の Hyper-" -"V を使用したテストも進行中です。" - -msgid "" -"The example OpenStack architecture designates the cloud controller as the " -"MySQL server. This MySQL server hosts the databases for nova, glance, " -"cinder, and keystone. With all of these databases in one place, it's very " -"easy to create a database backup:" -msgstr "" -"参考アーキテクチャーでは、クラウドコントローラーを MySQL サーバにしています。" -"この MySQL サーバーは nova, glance, cinder, そして keystone のデータベースを" -"保持しています。全てのデータベースが一か所にある場合、データベースバックアッ" -"プは非常に容易となります。" - -msgid "" -"The existence of the ``*-manage`` tools is a legacy issue. It is a goal of " -"the OpenStack project to eventually migrate all of the remaining " -"functionality in the ``*-manage`` tools into the API-based tools. Until that " -"day, you need to SSH into the :term:`cloud controller node` to perform some " -"maintenance operations that require one of the ``*-manage`` tools." -msgstr "" -"``*-manage`` ツールの存在は、レガシーの問題です。OpenStack プロジェクトでは、" -"最終的には ``*-manage`` ツールの残りの機能をすべて API ベースのツールに移行す" -"ることを目標としています。移行が完了するまで、``*-manage`` ツールを必要とする" -"メンテナンス操作は、 :term:`クラウドコントローラーノード ` に SSH 接続して実行する必要があります。" - -msgid "" -"The file system does not have any \"dirty\" buffers: where programs have " -"issued the command to write to disk, but the operating system has not yet " -"done the write" -msgstr "" -"ファイルシステムが「ダーティー」バッファーを持たないこと: 「ダーティー」バッ" -"ファーがあるとは、プログラムがディスクに書き込むためにコマンドを発行しました" -"が、オペレーティングシステムがまだ書き込みを完了していないことです。" - -msgid "" -"The first column of this form, named :guilabel:`All Users`, includes a list " -"of all the users in your cloud who are not already associated with this " -"project. The second column shows all the users who are. These lists can be " -"quite long, but they can be limited by typing a substring of the username " -"you are looking for in the filter field at the top of the column." -msgstr "" -":guilabel:`すべてのユーザー` (All Users) という見出しが付けられた、このフォー" -"ムの最初の列に、このプロジェクトにまだ割り当てられていない、クラウドのすべて" -"のユーザーが一覧表示されます。2 列目には、すべての割り当て済みユーザーが一覧" -"表示されます。これらの一覧は非常に長い可能性があります。しかし、それぞれの列" -"の上部にあるフィルターフィールドに、探しているユーザー名の部分文字列を入力す" -"ることにより、表示を絞り込むことができます。" - -msgid "" -"The first is the ``_base`` directory. This contains all the cached base " -"images from glance for each unique image that has been launched on that " -"compute node. Files ending in ``_20`` (or a different number) are the " -"ephemeral base images." -msgstr "" -"一つ目は ``_base`` ディレクトリです。ここには、そのコンピュートノードで起動さ" -"れたそれぞれのイメージに関して、glance から取得したすべてのベースイメージの" -"キャッシュが置かれます。``_20`` (または他の番号) で終わるファイルは一時ディス" -"クのベースイメージです。" - -msgid "" -"The first place to look is the log file related to the command you are " -"trying to run. For example, if ``openstack server list`` is failing, try " -"tailing a nova log file and running the command again:" -msgstr "" -"最初に確認する場所は、実行しようとしているコマンドに関連するログファイルで" -"す。たとえば、``openstack server list`` が失敗していれば、nova ログファイル" -"を tail 表示しながら、次のコマンドを再実行してください。" - -msgid "" -"The first step in finding the source of an error is typically to search for " -"a CRITICAL, or ERROR message in the log starting at the bottom of the log " -"file." -msgstr "" -"エラーの原因を見つけるための典型的な最初のステップは、 CRTICAL、ERRORなどの" -"メッセージがログファイルの終わりで出力されていないかを確認することです。" - -msgid "" -"The first thing you must do is authenticate with the cloud using your " -"credentials to get an :term:`authentication token`." -msgstr "" -"まずはじめに、クラウドの認証が必要です。あなたの認証情報を用いて :term:`認証" -"トークン ` を入手してください。" - -msgid "" -"The following are steps needed to create the signature used for the signed " -"images:" -msgstr "" -"以下の手順が、署名付きイメージのために使用される署名を作成するために必要にな" -"ります。" - -msgid "" -"The following command requires you to have your shell environment configured " -"with the proper administrative variables:" -msgstr "" -"以下のコマンドを実行するには、管理系の変数を正しく設定したシェル環境が必要で" -"す。" - -msgid "" -"The following command will boot a new instance and attach a volume at the " -"same time. The volume of ID 13 will be attached as ``/dev/vdc``. It is not a " -"snapshot, does not specify a size, and will not be deleted when the instance " -"is terminated:" -msgstr "" -"以下のコマンドは、新しいインスタンスを起動して、同時にボリュームを接続しま" -"す。ID 13 のボリュームが ``/dev/vdc`` として接続されます。これは、スナップ" -"ショットではなく、容量を指定せず、インスタンスの削除時に一緒に削除されます。" - -msgid "The following comments are added to the rule set as appropriate:" -msgstr "以下のコメントが、適切にルールセットに追加されます。" - -msgid "" -"The following implicit values are being used to create the signature in this " -"example:" -msgstr "以下の暗黙的な値が、この例において署名を作成するために使用されます。" - -msgid "The following options are currently supported:" -msgstr "以下のオプションが現在サポートされます。" - -msgid "" -"The following people have contributed to this book: Akihiro Motoki, " -"Alejandro Avella, Alexandra Settle, Andreas Jaeger, Andy McCallum, Benjamin " -"Stassart, Chandan Kumar, Chris Ricker, David Cramer, David Wittman, Denny " -"Zhang, Emilien Macchi, Gauvain Pocentek, Ignacio Barrio, James E. Blair, Jay " -"Clark, Jeff White, Jeremy Stanley, K Jonathan Harker, KATO Tomoyuki, Lana " -"Brindley, Laura Alves, Lee Li, Lukasz Jernas, Mario B. Codeniera, Matthew " -"Kassawara, Michael Still, Monty Taylor, Nermina Miller, Nigel Williams, Phil " -"Hopkins, Russell Bryant, Sahid Orentino Ferdjaoui, Sandy Walsh, Sascha " -"Peilicke, Sean M. Collins, Sergey Lukjanov, Shilla Saebi, Stephen Gordon, " -"Summer Long, Uwe Stuehler, Vaibhav Bhatkar, Veronica Musso, Ying Chun \"Daisy" -"\" Guo, Zhengguang Ou, and ZhiQiang Fan." -msgstr "" -"以下の方々がこのドキュメントに貢献しています: Akihiro Motoki, Alejandro " -"Avella, Alexandra Settle, Andreas Jaeger, Andy McCallum, Benjamin Stassart, " -"Chandan Kumar, Chris Ricker, David Cramer, David Wittman, Denny Zhang, " -"Emilien Macchi, Gauvain Pocentek, Ignacio Barrio, James E. Blair, Jay Clark, " -"Jeff White, Jeremy Stanley, K Jonathan Harker, KATO Tomoyuki, Lana Brindley, " -"Laura Alves, Lee Li, Lukasz Jernas, Mario B. Codeniera, Matthew Kassawara, " -"Michael Still, Monty Taylor, Nermina Miller, Nigel Williams, Phil Hopkins, " -"Russell Bryant, Sahid Orentino Ferdjaoui, Sandy Walsh, Sascha Peilicke, Sean " -"M. Collins, Sergey Lukjanov, Shilla Saebi, Stephen Gordon, Summer Long, Uwe " -"Stuehler, Vaibhav Bhatkar, Veronica Musso, Ying Chun \"Daisy\" Guo, " -"Zhengguang Ou, ZhiQiang Fan." - -msgid "The following signature properties are used:" -msgstr "以下の署名のプロパティーが使用されます。" - -msgid "" -"The following steps described for Ubuntu have worked on at least one " -"production environment, but they might not work for all environments." -msgstr "" -"以下の手順は、Ubuntu 向けに記載しています。少なくとも 1 つの本番環境で動作し" -"ましたが、すべての環境で動作するとは限りません。" - -msgid "" -"The frequency is defined separately for each periodic task. Therefore, to " -"disable every periodic task in OpenStack Compute (nova), you would need to " -"set a number of configuration options to zero. The current list of " -"configuration options you would need to set to zero are:" -msgstr "" -"実行頻度は周期的タスク別に定義されています。したがって、OpenStack Compute " -"(nova) ではすべての周期的タスクは無効にするためには、多くの設定オプションを " -"0 に設定する必要があることでしょう。現在のところ 0 に設定する必要がある設定オ" -"プションの一覧は以下のとおりです。" - -msgid "" -"The general case for this is setting key-value pairs in the aggregate " -"metadata and matching key-value pairs in flavor's ``extra_specs`` metadata. " -"The ``AggregateInstanceExtraSpecsFilter`` in the filter scheduler will " -"enforce that instances be scheduled only on hosts in aggregates that define " -"the same key to the same value." -msgstr "" -"この一般的なケースは、アグリゲートメタデータで key-value ペアを設定して、フ" -"レーバーの ``extra_specs`` メタデータで key-value ペアを一致させます。フィル" -"タースケジューラーの ``AggregateInstanceExtraSpecsFilter`` は、強制的にインス" -"タンスが、同じ値に同じキーが定義されているアグリゲートのホストに対してのみス" -"ケジューリングするようにします。" - -msgid "The generated file looks something like this:" -msgstr "出力は以下のようになります。" - -msgid "" -"The genesis of this book was an in-person event, but now that the book is in " -"your hands, we want you to contribute to it. OpenStack documentation follows " -"the coding principles of iterative work, with bug logging, investigating, " -"and fixing. We also store the source content on GitHub and invite " -"collaborators through the OpenStack Gerrit installation, which offers " -"reviews. For the O'Reilly edition of this book, we are using the company's " -"Atlas system, which also stores source content on GitHub and enables " -"collaboration among contributors." -msgstr "" -"この本の元は人が集まったイベントで作成されましたが、今やこの本はみなさんも貢" -"献できる状態になっています。 OpenStack のドキュメント作成は、バグ登録、調査、" -"修正を繰り返して行うというコーディングの基本原則に基いて行われています。我々" -"はこの本のソースコンテンツを GitHub にも置いており、レビューシステムである " -"OpenStack Gerrit 経由で協力をお待ちしています。この本の O'Reilly 版では、我々" -"は O'Reilly の Atlas システムを使用していますが、ソースコンテンツは GitHub に" -"も格納され、コントリビュータ間での共同作業ができるようになっています。" - -msgid "" -"The good news: OpenStack has unprecedented transparency when it comes to " -"providing information about what's coming up. The bad news: each release " -"moves very quickly. The purpose of this appendix is to highlight some of the " -"useful pages to track, and take an educated guess at what is coming up in " -"the next release and perhaps further afield." -msgstr "" -"良いお知らせ: OpenStack は、次に行われることに関する情報を提供する際に、前例" -"がないくらいにオープンです。悪いお知らせ: 各リリースが非常に迅速に行われま" -"す。この付録の目的は、参照しておく価値のあるページをいくつか紹介すること、次" -"のリリースやその先に起こることを根拠を持って推測することです。" - -msgid "The horizon dashboard web application" -msgstr "horizon dashboard Web アプリケーション" - -msgid "" -"The initial implementation of OpenStack Compute had its own authentication " -"system and used the term ``project``. When authentication moved into the " -"OpenStack Identity (keystone) project, it used the term ``tenant`` to refer " -"to a group of users. Because of this legacy, some of the OpenStack tools " -"refer to projects and some refer to tenants." -msgstr "" -"OpenStack Compute の初期実装は独自の認証システムを持ち、``プロジェクト`` とい" -"う用語を使用していました。認証が OpenStack Identity (Keystone) プロジェクトに" -"移行したとき、ユーザーのグループを参照するために``テナント``という用語が使用" -"されました。このような経緯のため、いくつかの OpenStack ツールはプロジェクトを" -"使用し、いくつかはテナントを使用します。" - -msgid "The instance finally gives up." -msgstr "最終的に、インスタンスはIPアドレス取得を諦める。" - -msgid "" -"The instance generates a packet and places it on the virtual NIC inside the " -"instance, such as eth0." -msgstr "" -"インスタンスはパケットを生成し、インスタンス内の仮想NIC、例えば eth0にそれを" -"渡します。" - -msgid "" -"The instance generates a packet and places it on the virtual Network " -"Interface Card (NIC) inside the instance, such as ``eth0``." -msgstr "" -"インスタンスはパケットを生成し、インスタンス内の仮想NIC、例えば ``eth0`` にそ" -"れを渡します。" - -msgid "" -"The instances table carries most of the information related to both running " -"and deleted instances. It has a bewildering array of fields; for an " -"exhaustive list, look at the database. These are the most useful fields for " -"operators looking to form queries:" -msgstr "" -"インスタンスのテーブルは、実行中および削除済みの両方のインスタンスに関連する" -"情報のほとんどを保持しています。データベースで完全なリストを見ると、このテー" -"ブルには目が回るほどたくさんのフィールドがあることがわかります。以下に、クエ" -"リーを行おうとしている運用者にとって非常に有用なフィールドを挙げます。" - -msgid "" -"The internal availability zone is hidden in euca-describe-availability_zones " -"(nonverbose)." -msgstr "" -"内部のアベイラビリティゾーンは、euca-describe-availability_zones " -"(nonverbose) に隠し設定されています。" - -msgid "" -"The inverse operation is called :command:`openstack security group rule " -"delete`, specifying security-group-rule ID. Whole security groups can be " -"removed with :command:`openstack security group delete`." -msgstr "" -"逆の操作が :command:`openstack security group rule delete` です。セキュリ" -"ティーグループのルールの ID を指定します。:command:`openstack security group " -"delete` を使用して、セキュリティグループ全体を削除できます。" - -msgid "The keystone processes are run within Apache as WSGI applications." -msgstr "" -"keystone プロセスは、WSGI アプリケーションとして Apache の中で動作します。" - -msgid "The keystone service" -msgstr "keystone サービス" - -msgid "The load shot up to 8 right before I received the alert" -msgstr "私が警告を受け取る直前、負荷率は8に急増した。" - -msgid "" -"The maximum port number in the range that is matched by the security group " -"rule. The ``port_range_min`` attribute constrains the ``port_range_max`` " -"attribute. If the protocol is ICMP or ICMPv6, this value must be an ICMP or " -"ICMPv6 type, respectively." -msgstr "" -"セキュリティグループルールに一致する、ポート番号の範囲の最大値。" -"``port_range_min`` 属性が ``port_range_max`` 属性を制限します。プロトコルが " -"ICMP または ICMPv6 の場合、この値はそれぞれ ICMP または ICMPv6 タイプでなけれ" -"ばいけません。" - -msgid "" -"The minimum port number in the range that is matched by the security group " -"rule. If the protocol is TCP or UDP, this value must be less than or equal " -"to the ``port_range_max`` attribute value. If the protocol is ICMP or " -"ICMPv6, this value must be an ICMP or ICMPv6 type, respectively." -msgstr "" -"セキュリティグループルールに一致する、ポート番号の範囲の最小値。プロトコルが " -"TCP や UDP の場合、この値は ``port_range_max`` 属性の値以下でなければいけませ" -"ん。プロトコルが ICMP あるいは ICMPv6 の場合、この値はそれぞれ ICMP あるいは " -"ICMPv6 タイプでなければいけません。" - -msgid "" -"The most important step is the pre-upgrade testing. If you are upgrading " -"immediately after release of a new version, undiscovered bugs might hinder " -"your progress. Some deployers prefer to wait until the first point release " -"is announced. However, if you have a significant deployment, you might " -"follow the development and testing of the release to ensure that bugs for " -"your use cases are fixed." -msgstr "" -"すべて中で最も大切なステップは事前のアップグレードテストです。新しいバージョ" -"ンのリリース後すぐにアップグレードする場合、未発見のバグによってアップグレー" -"ドがうまくいかないこともあるでしょう。管理者によっては、最初のアップデート版" -"が出るまで待つことを選ぶ場合もあります。しかしながら、重要な環境の場合には、" -"リリース版の開発やテストに参加することで、あなたのユースケースでのバグを確実" -"に修正することもできるでしょう。" - -msgid "" -"The networking chapter of the `OpenStack Administrator Guide `_ shows a variety of networking " -"scenarios and their connection paths. The purpose of this section is to give " -"you the tools to troubleshoot the various components involved however they " -"are plumbed together in your environment." -msgstr "" -"`OpenStack Administrator Guide `_ のネットワークの章に、さまざまな種類のネットワークのシナリ" -"オや接続パスがあります。このセクションの目的は、どのようにお使いの環境に一緒" -"に関わっているかによらず、さまざまなコンポーネントをトラブルシューティングす" -"るためのツールを提供します。" - -msgid "" -"The next best approach is to use a configuration-management tool, such as " -"Puppet, to automatically build a cloud controller. This should not take more " -"than 15 minutes if you have a spare server available. After the controller " -"rebuilds, restore any backups taken (see :doc:`ops-backup-recovery`)." -msgstr "" -"次に最も優れているアプローチは、クラウドコントローラーを自動的に構築するため" -"に Puppet のような構成管理ツールを使用することです。利用可能な予備サーバーが" -"あれば、15 分もかかりません。コントローラーを再構築後、取得したすべてのバック" -"アップを復元します (:doc:`ops-backup-recovery` 参照)。" - -msgid "" -"The next step depends on whether the virtual network is configured to use " -"802.1q VLAN tags or GRE:" -msgstr "" -"次の手順は、仮想ネットワークが 802.1q VLAN タグや GRE を使用するよう設定して" -"いるかどうかに依存します。" - -msgid "The nova scheduler service" -msgstr "nova スケジューラーサービス" - -msgid "The nova services" -msgstr "nova サービス" - -msgid "The number of Object Storage requests each hour" -msgstr "1時間あたりの Object Storage リクエスト数" - -msgid "The number of ``nova-api`` requests each hour" -msgstr "1時間あたりの ``nova-api`` リクエスト数" - -msgid "The number of instances on each compute node" -msgstr "各コンピュートノード上のインスタンス数" - -msgid "" -"The number of virtual machines (VMs) you expect to run, ``((overcommit " -"fraction × cores) / virtual cores per instance)``" -msgstr "" -"実行する必要のある仮想マシン数: ``((オーバーコミット比率 × コア数) / インスタ" -"ンスあたりのコア数)``" - -msgid "The number of volumes in use" -msgstr "使用中のボリューム数" - -msgid "" -"The only thing the Image service does not store in a database is the image " -"itself. The Image service database has two main tables:" -msgstr "" -"Image サービスがデータベースに保存しない唯一のものは、イメージ自体です。" -"Image サービスのデータベースは、主要なテーブルが 2 つあります。" - -msgid "" -"The order you should upgrade services, and any changes from the general " -"upgrade process is described below:" -msgstr "" -"サービスをアップグレードすべき順番、一般的なアップグレード手順との違いは、以" -"下に示す通りです。" - -msgid "" -"The other directories are titled ``instance-xxxxxxxx``. These directories " -"correspond to instances running on that compute node. The files inside are " -"related to one of the files in the ``_base`` directory. They're essentially " -"differential-based files containing only the changes made from the original " -"``_base`` directory." -msgstr "" -"もう一つのディレクトリは ``instance-xxxxxxxx`` という名前です。これらのディレ" -"クトリはコンピュートノードにおいて実行中のインスタンスと対応します。中にある" -"ファイルは ``_base`` ディレクトリにあるファイルのどれかと関連があります。これ" -"らは基本的に、元々の ``_base`` ディレクトリからの変更点のみ含む、差分ベースの" -"ファイルです。" - -msgid "The output looks like the following:" -msgstr "出力は以下のようになります。" - -msgid "The output looks something like the following:" -msgstr "出力は以下のようになります。" - -msgid "" -"The output of this command varies depending on the hypervisor because " -"hypervisors support different attributes. The following demonstrates the " -"difference between the two most popular hypervisors. Here is example output " -"when the hypervisor is Xen:" -msgstr "" -"ハイパーバイザーによってサポートする属性が異なるため、コマンドの出力はハイ" -"パーバイザーによって異なります。以下の実例は、最もよく使用されている 2 つのハ" -"イパーバイザーの間で出力がどのように異なるかを示しています。ハイパーバイザー" -"が Xen の場合の例は以下のようになります。" - -msgid "" -"The output shows that there are five compute nodes and one cloud controller. " -"You see all the services in the up state, which indicates that the services " -"are up and running. If a service is in a down state, it is no longer " -"available. This is an indication that you should troubleshoot why the " -"service is down." -msgstr "" -"出力には、5 つのコンピュートノードと 1 つのクラウドコントローラーが表示されて" -"います。すべてのサービスが up 状態であることが分かります。 UP 状態は、サービ" -"スが起動しており稼働中であることを示します。あるサービスが down 状態の場合、" -"そのサービスは利用できません。この場合は、サービスが停止している理由をトラブ" -"ルシューティングする必要があります。" - -msgid "" -"The output shows three different dnsmasq processes. The dnsmasq process that " -"has the DHCP subnet range of 192.168.122.0 belongs to libvirt and can be " -"ignored. The other two dnsmasq processes belong to ``nova-network``. The two " -"processes are actually related—one is simply the parent process of the " -"other. The arguments of the dnsmasq processes should correspond to the " -"details you configured ``nova-network`` with." -msgstr "" -"出力は 3 種類の dnsmasq プロセスを示しています。192.168.122.0 の DHCP サブ" -"ネット範囲を持つ dnsmasq プロセスが、libvirt に属していますが、無視できます。" -"他の 2 つのプロセスは実際に関連します。1 つは単純なもう一つの親プロセスです。" -"dnsmasq プロセスの引数は、 ``nova-network`` に設定した詳細に対応するでしょ" -"う。" - -msgid "" -"The packet is then received on the network node. Note that any traffic to " -"the l3-agent or dhcp-agent will be visible only within their network " -"namespace. Watching any interfaces outside those namespaces, even those that " -"carry the network traffic, will only show broadcast packets like Address " -"Resolution Protocols (ARPs), but unicast traffic to the router or DHCP " -"address will not be seen. See :ref:`dealing_with_network_namespaces` for " -"detail on how to run commands within these namespaces." -msgstr "" -"次に、パケットはネットワークノードで受信されます。L3 エージェントや DHCP エー" -"ジェントへのすべての通信は、それらのネットワーク名前空間の中のみで参照できま" -"す。それらの名前空間の外部にあるすべてのインターフェースを監視することによ" -"り、ネットワーク通信を転送している場合でも、ARP のようなブロードキャストパ" -"ケットのみが表示されます。しかし、ルーターや DHCP アドレスへのユニキャスト通" -"信は表示されません。これらの名前空間の中でコマンドを実行する方法の詳細は、 :" -"ref:`dealing_with_network_namespaces` を参照してください。" - -msgid "" -"The packet then makes it to the l3-agent. This is actually another TAP " -"device within the router's network namespace. Router namespaces are named in " -"the form ``qrouter-``. Running :command:`ip a` within the " -"namespace will show the TAP device name, qr-e6256f7d-31 in this example:" -msgstr "" -"そして、パケットが L3 エージェントに到達します。これは実際には、ルーターの名" -"前空間の中にある別の TAP デバイスです。ルーター名前空間は、 ``qrouter-" -"`` という形式の名前です。名前空間の中で :command:`ip a` を実行す" -"ることにより、TAP デバイスの名前を表示します。この例では qr-e6256f7d-31 で" -"す。" - -msgid "" -"The packet transfers to a Test Access Point (TAP) device on the compute " -"host, such as tap690466bc-92. You can find out what TAP is being used by " -"looking at the ``/etc/libvirt/qemu/instance-xxxxxxxx.xml`` file." -msgstr "" -"そのパケットはコンピュートホストの Test Access Point (TAP)、例えば " -"tap690466bc-92 に転送されます。TAP の構成は、 ``/etc/libvirt/qemu/instance-" -"xxxxxxxx.xml`` を見ることで把握できます。" - -msgid "" -"The packet transfers to the main NIC of the compute node. You can also see " -"this NIC in the :command:`brctl` output, or you can find it by referencing " -"the ``flat_interface`` option in ``nova.conf``." -msgstr "" -"パケットはコンピュートノードの物理NICに送られます。このNICは :command:" -"`brctl` コマンドの出力から、もしくは ``nova.conf`` の ``flat_interface`` オプ" -"ションから確認できます。" - -msgid "" -"The packet transfers to the virtual NIC of the compute host, such as, " -"``vnet1``. You can find out what vnet NIC is being used by looking at the ``/" -"etc/libvirt/qemu/instance-xxxxxxxx.xml`` file." -msgstr "" -"そのパケットはコンピュートホストの仮想NIC、例えば ``vnet1`` に転送されます。 " -"vnet NIC の構成は、 ``/etc/libvirt/qemu/instance-xxxxxxxx.xml`` を見ることで" -"把握できます。" - -msgid "" -"The pip utility is used to manage package installation from the PyPI archive " -"and is available in the python-pip package in most Linux distributions. " -"While each OpenStack project has its own client, they are being deprecated " -"in favour of a common OpenStack client. It is generally recommended to " -"install the OpenStack client." -msgstr "" -"pip ユーティリティは、PyPI アーカイブからのパッケージインストールの管理に使" -"用するツールで、大半の Linux ディストリビューションの python-pip パッケージに" -"含まれています。各 OpenStack プロジェクトにはそれぞれ独自のクライアントがあり" -"ますが、それらは共通の OpenStack クライアントにより置き換えられていきます。一" -"般的に、OpenStack クライアントをインストールすることが推奨されます。" - -msgid "" -"The point we are trying to make here is that just because an option exists " -"doesn't mean that option is relevant to your driver choices. Normally, the " -"documentation notes which drivers the configuration applies to." -msgstr "" -"ここで言っておきたいことは、オプションが存在するからといって、そのオプション" -"があなたが選んだドライバーに関係するとは限らないということである。通常は、ド" -"キュメントには、その設定オプションが適用されるドライバーについての記載があり" -"ます。" - -msgid "" -"The policy engine reads entries from the ``policy.json`` file. The actual " -"location of this file might vary from distribution to distribution: for " -"nova, it is typically in ``/etc/nova/policy.json``. You can update entries " -"while the system is running, and you do not have to restart services. " -"Currently, the only way to update such policies is to edit the policy file." -msgstr "" -"ポリシーエンジンは ``policy.json`` ファイルから項目を読み込みます。このファイ" -"ルの実際の位置はディストリビューションにより異なります。一般的に Nova 用の設" -"定ファイルは ``/etc/nova/policy.json`` にあります。システムの実行中に項目を更" -"新でき、サービスを再起動する必要がありません。今のところ、ポリシーファイルの" -"編集がこのようなポリシーを更新する唯一の方法です。" - -msgid "" -"The preceding information was generated by using a custom script that can be " -"found on `GitHub `_." -msgstr "" -"前の情報は `GitHub `_ にあるカスタムスクリプトを使用して生成されました。" - -msgid "" -"The preceding output has been truncated to show only two services. You will " -"see one service entry for each service that your cloud provides. Note how " -"the endpoint domain can be different depending on the endpoint type. " -"Different endpoint domains per type are not required, but this can be done " -"for different reasons, such as endpoint privacy or network traffic " -"segregation." -msgstr "" -"上記の出力は、2 つのサービスのみを表示するようにカットされています。クラウド" -"が提供するサービスごとにサービスエントリーが 1 つ表示されているのがわかりま" -"す。エンドポイントタイプによってエンドポイントドメインが異なる場合がある点に" -"注意してください。タイプによってエンドポイントドメインを別にする必要はありま" -"せんが、エンドポイントのプライバシーやネットワークトラフィックの分離などの異" -"なる理由で分けることができます。" - -msgid "" -"The protocol that is matched by the security group rule. Valid values are " -"``null``, ``tcp``, ``udp``, ``icmp``, and ``icmpv6``." -msgstr "" -"セキュリティーグループルールによって突き合わせが行われるプロトコル。有効な値" -"は、 ``null``、``tcp``、``udp``、 ``icmp`` および ``icmpv6`` です。" - -msgid "" -"The purpose of automatic configuration management is to establish and " -"maintain the consistency of a system without using human intervention. You " -"want to maintain consistency in your deployments so that you can have the " -"same cloud every time, repeatably. Proper use of automatic configuration-" -"management tools ensures that components of the cloud systems are in " -"particular states, in addition to simplifying deployment, and configuration " -"change propagation." -msgstr "" -"自動環境設定管理の目的は、人間の介在なしにシステムの整合性を確保、維持するこ" -"とにあります。毎回、同じクラウド環境を繰り返し作るために、デプロイメントにお" -"ける整合性を確保します。自動環境設定管理ツールを正しく利用することによって、" -"デプロイメントと環境設定の変更を伝搬する作業を簡素化するだけでなく、クラウド" -"システムのコンポーネントが必ず特定の状態にあるようにすることができます。" - -msgid "The purpose of the screen windows are as follows:" -msgstr "screen ウィンドウの目的は、以下のとおりです。" - -msgid "" -"The qemu-nbd device tries to export the instance disk's different partitions " -"as separate devices. For example, if vda is the disk and vda1 is the root " -"partition, qemu-nbd exports the device as ``/dev/nbd0`` and ``/dev/nbd0p1``, " -"respectively:" -msgstr "" -"qemu-nbd デバイスはインスタンスのディスクの個々のパーティションを別々のデバイ" -"スとしてエクスポートしようとします。たとえば、ディスクが vda で、ルートパー" -"ティションが vda1 の場合、qemu-nbd はそれぞれ ``/dev/nbd0`` と ``/dev/" -"nbd0p1`` としてデバイスをエクスポートします。" - -msgid "" -"The roadmap for the next release as it is developed can be seen at `Releases " -"`_." -msgstr "" -"開発されている次のリリースのロードマップは `Releases `_ にあります。" - -msgid "" -"The simplest option to get started is to use one hard drive with two " -"partitions:" -msgstr "" -"最もシンプルに使用を開始できるオプションは、1台のハードディスクを2つのパー" -"ティションに分割することです。" - -msgid "" -"The simplest place to start testing the next version of OpenStack is by " -"setting up a new environment inside your own cloud. This might seem odd, " -"especially the double virtualization used in running compute nodes. But it " -"is a sure way to very quickly test your configuration." -msgstr "" -"OpenStack の次のバージョンをテストするための一番簡単な方法は、お使いのクラウ" -"ドの中に新しい環境をセットアップすることです。これは奇妙に思えるかもしれませ" -"ん。とくに、動作中のコンピュートノードで使用される二重仮想化です。しかし、お" -"使いの設定を非常に手軽にテストする確実な方法です。" - -msgid "" -"The simplest reasons for nodes to fail to launch are quota violations or the " -"scheduler being unable to find a suitable compute node on which to run the " -"instance. In these cases, the error is apparent when you run a :command:" -"`openstack server show` on the faulted instance:" -msgstr "" -"ノードが起動に失敗する最も簡単な理由は、クォータ違反、またはスケジューラーが" -"インスタンスを実行するのに適したコンピュートノードを見つけられなかった場合で" -"す。これらの場合、失敗したインスタンスに対して :command:`openstack server " -"show` を実行するとエラーが表示されます。" - -msgid "" -"The simplest way to identify that this is the problem with your instance is " -"to look at the console output of your instance. If DHCP failed, you can " -"retrieve the console log by doing:" -msgstr "" -"もっともシンプルにこの問題を特定する方法は、インスタンス上のコンソール出力を" -"確認することです。もしDHCPが正しく動いていなければ、下記のようにコンソールロ" -"グを参照してください。" - -msgid "" -"The size of the volume in gigabytes. It is safe to leave this blank and have " -"the Compute Service infer the size." -msgstr "" -"ボリュームのギガバイト単位の容量。このフィールドを空欄にして、Compute サービ" -"スに容量を推定させるのが安全です。" - -msgid "" -"The software stack is still Ubuntu 12.04 LTS, but now with OpenStack Havana " -"from the Ubuntu Cloud Archive. KVM is the hypervisor, deployed using `FAI " -"`_ and Puppet for configuration management. The FAI " -"and Puppet combination is used lab-wide, not only for OpenStack. There is a " -"single cloud controller node, which also acts as network controller, with " -"the remainder of the server hardware dedicated to compute nodes." -msgstr "" -"ソフトウェアスタックは Ubuntu 12.04 LTS と Ubuntu Cloud Archive からの " -"OpenStack Havana です。KVM がハイパーバイザで、`FAI `_ と Puppet を設定管理に使用してデプロイされています。FAI と Puppet の組み" -"合わせはOpenStack のみならず研究所全体で使用されています。単一のクラウドコン" -"トローラーノードがあり、これはネットワークコントローラーとしても動作します" -"が、他のコンピュータ・ハードウェアはコンピュートノードに使用されています。" - -msgid "" -"The source files are located in ``/usr/lib/python2.7/dist-packages/nova``." -msgstr "" -"ソースファイルは ``/usr/lib/python2.7/dist-packages/nova`` にあります。" - -msgid "The swift services" -msgstr "swift サービス" - -msgid "" -"The take away from this is if you observe an OpenStack process that appears " -"to \"stop\" for a while and then continue to process normally, you should " -"check that periodic tasks aren't the problem. One way to do this is to " -"disable the periodic tasks by setting their interval to zero. Additionally, " -"you can configure how often these periodic tasks run—in some cases, it might " -"make sense to run them at a different frequency from the default." -msgstr "" -"これから分かることは、OpenStack のプロセスが少しの間「停止」したように見え" -"て、それから通常通りの動作を継続するような状況を見つけた場合、周期的タスクが" -"問題になっていないかを確認すべきだということです。取りうる一つの方法は、間隔" -"を 0 に設定して周期的タスクを無効にすることです。また、周期的タスクの実行頻度" -"を設定することもできます。デフォルトとは異なる頻度で周期的タスクを実行する方" -"が意味がある場合もあります。" - -msgid "The team includes:" -msgstr "以下が執筆チームのメンバーです。" - -msgid "" -"The technology behind OpenStack consists of a series of interrelated " -"projects delivering various components for a cloud infrastructure solution. " -"Each service provides an open API so that all of these resources can be " -"managed through a dashboard that gives administrators control while " -"empowering users to provision resources through a web interface, a command-" -"line client, or software development kits that support the API. Many " -"OpenStack APIs are extensible, meaning you can keep compatibility with a " -"core set of calls while providing access to more resources and innovating " -"through API extensions. The OpenStack project is a global collaboration of " -"developers and cloud computing technologists. The project produces an open " -"standard cloud computing platform for both public and private clouds. By " -"focusing on ease of implementation, massive scalability, a variety of rich " -"features, and tremendous extensibility, the project aims to deliver a " -"practical and reliable cloud solution for all types of organizations." -msgstr "" -"OpenStack の背後にある技術は、クラウドインフラストラクチャーソリューション向" -"けのさまざまなコンポーネントを提供する、一連の相互に関連するプロジェクトから" -"構成されます。各サービスは、これらのリソースをすべてダッシュボードから管理で" -"きるよう、オープンな API を提供します。これは、権限を与えられたユーザーが、" -"API をサポートする Web インターフェース、コマンドラインクライアント、ソフト" -"ウェア開発キット (SDK) 経由でリソースを配備する管理者権限を与えます。多くの " -"OpenStack API は拡張できます。API 拡張経由でより多くのリソースにアクセスして" -"革新しながら、コアなコール群の互換性を保つことができます。OpenStack プロジェ" -"クトは、開発者とクラウドコンピューティング技術者のグローバルなコラボレーショ" -"ンです。プロジェクトは、パブリッククラウドおよびプライベートクラウド向けの" -"オープン標準なクラウドコンピューティングプラットフォームを生産します。実装の" -"しやすさ、大規模なスケーラビリティ、さまざまな豊富な機能、ものすごい拡張性に" -"注力することにより、プロジェクトはあらゆる種類の組織に対して実践的かつ信頼で" -"きるクラウドソリューションを提供することを目指しています。" - -msgid "" -"The third version of the Compute API was broadly discussed and worked on " -"during the Havana and Icehouse release cycles. Current discussions indicate " -"that the V2 API will remain for many releases, and the next iteration of the " -"API will be denoted v2.1 and have similar properties to the existing v2.0, " -"rather than an entirely new v3 API. This is a great time to evaluate all API " -"and provide comments while the next generation APIs are being defined. A new " -"working group was formed specifically to `improve OpenStack APIs `_ and create design guidelines, " -"which you are welcome to join." -msgstr "" -"Compute API の 3 番目のバージョンが幅広く議論され、Havana と Icehouse のリ" -"リースサイクル期間中に取り組まれました。現在の議論は、V2 API が多くのリリース" -"のために残され、次の API の繰り返しは v2.1 と表示されます。これは、完全に新し" -"い v3 API ではなく、既存の v2.0 と同じようなプロパティーを持ちます。これは、" -"すべての API を評価するための良い機会です。次世代の API が定義されるまでにコ" -"メントを出します。新しいワーキンググループが特別に形成され、`OpenStack API を" -"改善して `_、設計のガイド" -"ラインを作成します。あなたの参加も歓迎されています。" - -msgid "" -"The tunnel bridge, ``br-tun``, contains the ``patch-int`` interface and " -"``gre-`` interfaces for each peer it connects to via GRE, one for each " -"compute and network node in your cluster:" -msgstr "" -"トンネルブリッジ ``br-tun`` は、GRE 経由で接続するお互いの接続相手のために " -"``patch-int`` インターフェースと ``gre-`` インターフェース、クラスター内で" -"各コンピュートノードとネットワークノードのためのものを含みます。" - -msgid "The types of flavors in use" -msgstr "使用中のフレーバー" - -msgid "" -"The typical way is to trace the UUID associated with an instance across the " -"service logs." -msgstr "" -"一般的な方法はインスタンスのUUIDをキーにして、各サービスのログを追跡すること" -"です。" - -msgid "" -"Then check if the network became created by requesting the networks list " -"once again:" -msgstr "" -"次にネットワークの一覧を要求して、ネットワークが正常に作成されたことを確認し" -"ます。" - -msgid "Then it all made sense…" -msgstr "やっと事の全容が判明した…" - -msgid "" -"Then, if needed, update the name and description of the created snapshot:" -msgstr "" -"次に、必要に応じて、作成されたスナップショットの名前と説明を更新します。" - -msgid "" -"Then, if you use SSH to log into your instance and try ``ping openstack." -"org``, you should see something like:" -msgstr "" -"インスタンスに SSH ログインして、 ``ping openstack.org`` を試すと、以下のよう" -"なメッセージが確認できるでしょう。" - -msgid "" -"There are a large number of syslogs engines available, each have differing " -"capabilities and configuration requirements." -msgstr "" -"数多くの syslog エンジンが利用できます。それぞれ異なる機能や設定要件を持ちま" -"す。" - -msgid "" -"There are a number of optional items that can be specified. You should read " -"the rest of this section before trying to start an instance, but this is the " -"base command that later details are layered upon." -msgstr "" -"指定できる多くのオプション項目があります。インスタンスを起動しようとする前" -"に、このセクションを最後まで読んでください。しかし、これが今から説明する詳細" -"の基本となるコマンドです。" - -msgid "" -"There are also several :command:`*-manage` command-line tools. These are " -"installed with the project's services on the cloud controller and do not " -"need to be installed separately:" -msgstr "" -"``*-manage`` のコマンドラインツールも複数あります。これらは、プロジェクトの" -"サービスとともにクラウドコントローラーにインストールされるので、別途インス" -"トールする必要はありません。 " - -msgid "There are currently two categories of quotas for Object Storage:" -msgstr "現在、Object Storage に対する 2 種類のクォータがあります。" - -msgid "There are four major layers in Logstash setup which are:" -msgstr "Logstash は 4 つのおもなレイヤーがあります。" - -msgid "" -"There are other books on the `OpenStack documentation website `_ that can help you get the job done." -msgstr "" -"作業を完了するために役立つ、その他のガイドは `OpenStack ドキュメント Web サイ" -"ト `_ にあります。" - -msgid "" -"There are several good sources of information available that you can use to " -"track your OpenStack development desires." -msgstr "" -"OpenStack 環境の要望を把握するために使用できる、いくつかの良い情報源がありま" -"す。" - -msgid "" -"There are three clouds currently running at CERN, totaling about 4,700 " -"compute nodes, with approximately 120,000 cores. The CERN IT cloud aims to " -"expand to 300,000 cores by 2015." -msgstr "" -"CERN では現在 3 つのクラウドが稼働しており、合計で約 4,700 台のコンピュート" -"ノード、120,000 コアがあります。 CERN IT クラウドは 2015年までに 300,000 コ" -"アにまで拡張される予定です。" - -msgid "" -"There are two main types of instance-specific data: metadata and user data." -msgstr "" -"インスタンス固有のデータには 2 種類あります。メタデータとユーザーデータです。" - -msgid "" -"There are two types of monitoring: watching for problems and watching usage " -"trends. The former ensures that all services are up and running, creating a " -"functional cloud. The latter involves monitoring resource usage over time in " -"order to make informed decisions about potential bottlenecks and upgrades." -msgstr "" -"二つの監視のタイプがあります。問題の監視と、利用傾向の監視です。前者は全ての" -"サービスが動作していることを保証するものであり、後者は時間に沿ったリソース利" -"用状況を監視することで、潜在的なボトルネックの発見とアップグレードのための情" -"報を得るものです。" - -msgid "" -"There are two ways to ensure stability with this directory. The first is to " -"make sure this directory is run on a RAID array. If a disk fails, the " -"directory is available. The second way is to use a tool such as rsync to " -"replicate the images to another server:" -msgstr "" -"このディレクトリの永続性を保証するために二つの方法があります。一つ目はRAIDア" -"レイ上にこのディレクトリを置くことで、ディスク障害時にもこのディレクトリが利" -"用できます。二つ目の方法はrsyncのようなツールを用いてイメージを他のサーバーに" -"複製することです。" - -msgid "" -"There is a configuration option in ``/etc/glance/glance-api.conf`` that " -"limits the number of members allowed per image, called " -"``image_member_quota``, set to 128 by default. That setting is a different " -"quota from the storage quota." -msgstr "" -"イメージごとに許可されるメンバーの数を制限する、 ``/etc/glance/glance-api." -"conf`` の設定オプションがあります。これは、 ``image_member_quota`` であり、デ" -"フォルトで 128 です。その設定は、保存容量のクォータとは別のクォータです。" - -msgid "" -"There is a lot of useful information in ``context``, ``request_spec``, and " -"``filter_properties`` that you can use to decide where to schedule the " -"instance. To find out more about what properties are available, you can " -"insert the following log statements into the ``schedule_run_instance`` " -"method of the scheduler above:" -msgstr "" -"``context`` と ``request_spec`` と ``filter_properties`` には、どこにインスタ" -"ンスをスケジュールするのか決定するのに使える有用な情報が多数含まれています。" -"どんなプロパティが利用可能なのかを知るには、以下のログ出力文を上記の " -"``schedule_run_instance`` メソッドに挿入してください。" - -msgid "" -"There is a lot of useful information in ``env`` and ``conf`` that you can " -"use to decide what to do with the request. To find out more about what " -"properties are available, you can insert the following log statement into " -"the ``__init__`` method:" -msgstr "" -"``env`` と ``conf`` には、リクエストについて何をするのか判断するのに使える有" -"用な情報が多数含まれています。どんなプロパティが利用可能なのかを知るには、以" -"下のログ出力文を ``__init__`` メソッドに挿入してください。" - -msgid "" -"There is a pair of useful commands that help manipulate share networks. To " -"start, check the network list:" -msgstr "" -"共有ネットワークを操作する役に立つ、有用なコマンドがいくつかあります。まず" -"ネットワークの一覧を確認します。" - -msgid "" -"There is nothing OpenStack-specific in being aware of the steps needed to " -"access block devices from within the instance operating system, potentially " -"formatting them for first use and being cautious when removing them. What is " -"specific is how to create new volumes and attach and detach them from " -"instances. These operations can all be done from the :guilabel:`Volumes` " -"page of the dashboard or by using the ``openstack`` command-line client." -msgstr "" -"ブロックデバイスにアクセスするために、インスタンスのオペレーティングシステム" -"において必要となる手順に、OpenStack 固有の事項はありません。初めて使用すると" -"きにフォーマットが必要になる、デバイスを取り外すときに注意する、などが考えら" -"れます。固有の事項は、新しいボリュームを作成し、それらをインスタンスに接続お" -"よび切断する方法です。これらの操作は、ダッシュボードの :guilabel:`ボリューム" -"` ページからすべて実行できます。または、 ``openstack`` コマンドラインクライア" -"ントを使用します。" - -msgid "" -"Therefore, the fastest way to get your feature request up for consideration " -"is to create an Etherpad with your ideas and propose a session to the design " -"summit. If the design summit has already passed, you may also create a " -"blueprint directly. Read this `blog post about how to work with blueprints " -"`_ the perspective of Victoria Martínez, a developer intern." -msgstr "" -"あなたの機能追加リクエストを新機能として検討してもらうのに一番早い方法は、ア" -"イデアを書いた Etherpad を作成し、デザインサミットのセッションを提案すること" -"です。デザインサミットが終わっている場合には、blueprint を直接作成することも" -"できます。 Victoria Martínez の `blueprint での開発の進め方についてのブログ " -"`_ を是非読んでください。 OpenStack 開発者のインターンの視点で書か" -"れた記事です。" - -msgid "" -"These actions effectively take the storage node out of the storage cluster." -msgstr "" -"これらの操作はストレージノードをストレージクラスターから効率的に外せます。" - -msgid "" -"These are configurable by admin users (the rights may also be delegated to " -"other users by redefining the access controls for ``compute_extension:" -"flavormanage`` in ``/etc/nova/policy.json`` on the ``nova-api`` server). To " -"get the list of available flavors on your system, run:" -msgstr "" -"これらは管理ユーザーにより設定できます。``nova-api`` サーバーにおいて ``/etc/" -"nova/policy.json`` の ``compute_extension:flavormanage`` にあるアクセス制限を" -"再定義することにより、この権限を他のユーザーに委譲することもできます。次のよ" -"うに、お使いのシステムで利用可能なフレーバーの一覧を取得できます。" - -msgid "" -"These are shared legendary tales of image disappearances, VM massacres, and " -"crazy troubleshooting techniques that result in hard-learned lessons and " -"wisdom." -msgstr "" -"これらは、得がたい教訓や知見を得られた、イメージの消失、仮想マシンの大虐殺、" -"クレイジーなトラブルシューティング技術に関する伝説的な公開物語です。" - -msgid "" -"These changes have facilitated the first proper OpenStack upgrade guide, " -"found in :doc:`ops-upgrades`, and will continue to improve in the next " -"release." -msgstr "" -"これらの変更は、 :doc:`ops-upgrades` にある OpenStack アップグレードガイドを" -"促進しました。次のリリースにおいて継続的に改善されつづけています。" - -msgid "" -"These rules are all \"allow\" type rules, as the default is deny. This " -"example shows the full port range for all protocols allowed from all IPs. " -"This section describes the most common security group rule parameters:" -msgstr "" -"デフォルトは拒否なので、これらのルールはすべて「許可」形式のルールです。この" -"例は、すべての IP から、すべてのプロトコルの範囲全体が許可されることを示して" -"います。このセクションは、最も一般的なセキュリティーグループルールのパラメー" -"ターを説明します。" - -msgid "" -"These steps depend on your underlying distribution, but in general you " -"should be looking for :command:`purge` commands in your package manager, " -"like :command:`aptitude purge ~c $package`. Following this, you can look for " -"orphaned files in the directories referenced throughout this guide. To " -"uninstall the database properly, refer to the manual appropriate for the " -"product in use." -msgstr "" -"これらの手順はお使いのディストリビューションにより異なりますが、一般には :" -"command:`aptitude purge ~c $package` のようなパッケージマネージャーの :" -"command:`purge` (完全削除)コマンドを探すとよいでしょう。その後で、このガイ" -"ドの中に出てきたディレクトリにある不要なファイルを探します。データベースを適" -"切にアンインストールする方法については、使用しているソフトウェアの適切なマ" -"ニュアルを参照して下さい。" - -msgid "" -"These tools also make it possible to test and roll back changes, as they are " -"fully repeatable. Conveniently, a large body of work has been done by the " -"OpenStack community in this space. Puppet, a configuration management tool, " -"even provides official modules for OpenStack projects in an OpenStack " -"infrastructure system known as `Puppet OpenStack `_. Chef configuration management is provided within https://git." -"openstack.org/cgit/openstack/openstack-chef-repo. Additional configuration " -"management systems include Juju, Ansible, and Salt. Also, PackStack is a " -"command-line utility for Red Hat Enterprise Linux and derivatives that uses " -"Puppet modules to support rapid deployment of OpenStack on existing servers " -"over an SSH connection." -msgstr "" -"また、これらのツールでは、完全に繰り返しが可能であるため、変更のテストやロー" -"ルバックが可能です。従来、OpenStack コミュニティーにより多くの作業が行われて" -"いました。Puppet (設定管理ツール) は、 `Puppet OpenStack ` __ で知られる OpenStack インフラストラクチャーシ" -"ステム内の OpenStack の公式モジュールも提供しています。 Chef の設定管理は、 " -"https://github.com/openstack/openstack-chef-repo で提供されています。他の設定" -"管理システムには、Juju、Ansible、Salt などがあります。また、PackStack は Red " -"Hat Enterprise Linux のコマンドラインユーティリティーで、SSH 接続を使用して既" -"存のサーバーに OpenStack を迅速にデプロイできるように Puppet モジュールを使用" -"する派生プロダクトです。" - -msgid "" -"These tunnels use the regular routing tables on the host to route the " -"resulting GRE packet, so there is no requirement that GRE endpoints are all " -"on the same layer-2 network, unlike VLAN encapsulation." -msgstr "" -"これらのトンネルは、ホストにおいて通常のルーティングテーブルを使用して、でき" -"あがった GRE パケットを中継します。そのため、VLAN カプセル化と異なり、GRE エ" -"ンドポイントがすべて同じ L2 ネットワークにあるという要件は必要ありません。" - -msgid "" -"These two statements are produced by our middleware and show that the " -"request was sent from our DevStack instance and was allowed." -msgstr "" -"これらの2行は、このミドルウェアによって出力されており、リクエストが " -"DevStack インスタンスから送られており、許可されていることを示しています。" - -msgid "" -"Thinking it was just a one-off issue, I terminated the instance and launched " -"a new one. By then, the conference call ended and I was off to the data " -"center." -msgstr "" -"これは単なる1回限りの問題と思ったので、私はインスタンスを削除して、新しいイ" -"ンスタンスを起動した。その後電話会議は終了し、私はデータセンターを離れた。" - -msgid "" -"This allows for a single API server being used to control access to multiple " -"cloud installations. Introducing a second level of scheduling (the cell " -"selection), in addition to the regular ``nova-scheduler`` selection of " -"hosts, provides greater flexibility to control where virtual machines are " -"run." -msgstr "" -"これによって、複数のクラウドシステムに対するアクセスを、1つのAPIサーバで制御" -"することができます。通常の ``nova-scheduler`` によるホストの選択に加えて、第" -"二段階のスケジューリング(セルの選択)を導入することにより、仮想マシンを実行す" -"る場所の制御の柔軟性が大きく向上します。" - -msgid "" -"This appendix contains a small selection of use cases from the community, " -"with more technical detail than usual. Further examples can be found on the " -"`OpenStack website `_." -msgstr "" -"この付録ではコミュニティからを事例をいくつか紹介します。これらでは通常より多" -"くの技術的詳細情報が提供されています。他の事例は `OpenStack ウェブサイト " -"`_ で探して下さい。" - -msgid "" -"This attribute value matches the specified IP prefix as the source IP " -"address of the IP packet." -msgstr "" -"この属性値は、IP パケットの送信元 IP アドレスとして、指定された IP プレフィッ" -"クスと一致します。" - -msgid "" -"This book is for those of you starting to run OpenStack clouds as well as " -"those of you who were handed an operational one and want to keep it running " -"well. Perhaps you're on a DevOps team, perhaps you are a system " -"administrator starting to dabble in the cloud, or maybe you want to get on " -"the OpenStack cloud team at your company. This book is for all of you." -msgstr "" -"この本は、OpenStack クラウドの運用を始めようとして人も、運用中の OpenStack ク" -"ラウドを引き継ぎ、うまく動かし続けようとしている人も対象にしています。おそら" -"く、あなたは devops チームの一員であったり、クラウドを始めたばかりのシステム" -"管理者なのでしょう。あなたの会社の OpenStack クラウドチームに入ろうとしている" -"のかもしれません。この本はそのような皆さん全員に向けたものです。" - -msgid "" -"This bug report was the key to everything: `KVM images lose connectivity " -"with bridged network `_." -msgstr "" -"このバグ報告は、すべてに対して重要です。 `KVMイメージがブリッジネットワークで" -"接続を失う `_ 。" - -msgid "" -"This chapter describes only how to back up configuration files and databases " -"that the various OpenStack components need to run. This chapter does not " -"describe how to back up objects inside Object Storage or data contained " -"inside Block Storage. Generally these areas are left for users to back up on " -"their own." -msgstr "" -"この章では、OpenStackコンポーネントを動作させるのに必要な設定ファイルとデータ" -"ベースについてのバックアップ方法のみを説明します。オブジェクトストレージ内の" -"オブジェクトや、ブロックストレージ内のデータのバックアップについては説明して" -"いません。一般的にこれらの領域はユーザー自身でバックアップを行います。" - -msgid "" -"This chapter describes what you need to back up within OpenStack as well as " -"best practices for recovering backups." -msgstr "" -"この章は、OpenStack 中でバックアップする必要があるもの、バックアップのリカバ" -"リーのベストプラクティスについて説明します。" - -msgid "" -"This chapter focuses on the second path for customizing OpenStack by " -"providing two examples for writing new features. The first example shows how " -"to modify Object Storage service (swift) middleware to add a new feature, " -"and the second example provides a new scheduler feature for Compute service " -"(nova). To customize OpenStack this way you need a development environment. " -"The best way to get an environment up and running quickly is to run DevStack " -"within your cloud." -msgstr "" -"本章は、新しい機能を書くために 2 つの例を提供することにより、OpenStack をカス" -"タマイズするための 2 つ目のパスに焦点を当てます。1 番目の例は、新しい機能を追" -"加するために Object Storage サービス (swift) ミドルウェアを変更する方法を示し" -"ます。2 番目の例は、Compute サービス (nova) に新しいスケジューラー機能を提供" -"します。このように OpenStack をカスタマイズするために、開発環境が必要になりま" -"す。迅速に環境を準備して動作させる最良の方法は、クラウド内で DevStack を実行" -"することです。" - -msgid "" -"This chapter goes into the common failures that the authors have seen while " -"running clouds in production, including troubleshooting." -msgstr "" -"この章は、執筆者がこれまで本番環境を運用してきて、トラブルシューティングする" -"中で見てきた、一般的な障害について詳細に検討します。" - -msgid "" -"This chapter helps you set up your working environment and use it to take a " -"look around your cloud." -msgstr "" -"本章では、作業環境を設定し、クラウドの全体像を概観するのに役立つ内容を記載し" -"ます。" - -msgid "" -"This chapter is written to let you get your hands wrapped around your " -"OpenStack cloud through command-line tools and understanding what is already " -"set up in your cloud." -msgstr "" -"この章は、コマンドラインツールを用いてお使いの OpenStack クラウド全体を理解で" -"きるようにするために書かれました。また、お使いのクラウドにすでにセットアップ" -"されているものを理解することもできます。" - -msgid "" -"This chapter provides upgrade information based on the architectures used in " -"this book." -msgstr "" -"この章は、本書で使用されるアーキテクチャーに基づいて、アップグレードに関する" -"情報を提供します。" - -msgid "" -"This chapter shows you how to use OpenStack cloud resources and how to train " -"your users." -msgstr "" -"この章は、OpenStack リソースの使用方法、ユーザーの教育方法について説明しま" -"す。" - -msgid "" -"This chapter shows you where OpenStack places logs and how to best read and " -"manage logs for monitoring purposes." -msgstr "" -"この章は、OpenStack のログ保存場所、監視目的にログを参照および管理する最良の" -"方法について説明します。" - -msgid "" -"This chapter walks through user-enabling processes that all admins must face " -"to manage users, give them quotas to parcel out resources, and so on." -msgstr "" -"この章は、すべての管理者がユーザーを管理し、リソースを小さくまとめるクォータ" -"を設定するために必ず直面する、ユーザー有効化のプロセスを詳細に説明します。" - -msgid "" -"This command creates a project named ``demo``. Optionally, you can add a " -"description string by appending ``--description PROJECT_DESCRIPTION``, which " -"can be very useful. You can also create a project in a disabled state by " -"appending ``--disable`` to the command. By default, projects are created in " -"an enabled state." -msgstr "" -"このコマンドは、``demo`` という名前のプロジェクトを作成します。オプション" -"で、 ``--description PROJECT_DESCRIPTION`` を追加することで、説明の文字列を追" -"加することができ、非常に便利です。また、コマンドに ``--disable`` を追加して、" -"プロジェクトを無効な状態で作成することもできます。デフォルトでは、有効化され" -"た状態でプロジェクトが作成されます。" - -msgid "" -"This command displays a list of how many instances a tenant has running and " -"some light usage statistics about the combined instances. This command is " -"useful for a quick overview of your cloud, but it doesn't really get into a " -"lot of details." -msgstr "" -"このコマンドはテナント上で実行されるインスタンスのリストと、インスタンス全体" -"の簡単な利用統計を表示します。クラウドの簡単な概要を得るのに便利なコマンドで" -"すが、より詳細な情報については表示しません。" - -msgid "" -"This command will create a public share with the following parameters: " -"``name = netapp1``, ``spec_driver_handles_share_servers = False``" -msgstr "" -"このコマンドは、次のパラメーターを持つパブリックな共有を作成します: ``name = " -"netapp1``, ``spec_driver_handles_share_servers = False``" - -msgid "" -"This configuration will result in a separate log file for each compute node " -"as well as an aggregated log file that contains nova logs from all nodes." -msgstr "" -"この設定により、全ノードからの nova のログを含む集約されたログだけでなく、" -"個々のコンピュートノードのログも持つことになります。" - -msgid "" -"This creates a 10 GB volume. To list existing volumes and the instances they " -"are connected to, if any:" -msgstr "" -"これは 10 GB のボリュームを作成します。既存のボリュームの一覧を表示するには以" -"下のようにします。それらが接続されているインスタンスがあれば、インスタンス情" -"報も表示されます:" - -msgid "" -"This creates a key named ``mykey``, which you can associate with instances. " -"The file ``mykey.pem`` is the private key, which should be saved to a secure " -"location because it allows root access to instances the ``mykey`` key is " -"associated with." -msgstr "" -"これにより、インスタンスと関連付けられる ``mykey`` という名前の鍵が生成されま" -"す。 ``mykey.pem`` というファイルが秘密鍵です。これは、 ``mykey`` 鍵が関連付" -"けられたインスタンスに root アクセスできるので、安全な場所に保存すべきです。" - -msgid "" -"This does not save your password in plain text, which is a good thing. But " -"when you source or run the script, it prompts you for your password and then " -"stores your response in the environment variable ``OS_PASSWORD``. It is " -"important to note that this does require interactivity. It is possible to " -"store a value directly in the script if you require a noninteractive " -"operation, but you then need to be extremely cautious with the security and " -"permissions of this file." -msgstr "" -"この場合には、パスワードがプレーンテキスト形式で保存されないのがこの方法の利" -"点となりますが、このスクリプトを元データとして使用したり、実行したりする際に" -"は、パスワードが要求され、その回答は環境変数 ``OS_PASSWORD`` に保存されます。" -"この操作は対話的に実行される必要がある点は、注意すべき重要なポイントです。 操" -"作を非対話的に行う必要がある場合には、値をスクリプトに直接に保存することも可" -"能ですが、その場合にはこのファイルのセキュリティとアクセス権を極めて慎重に管" -"理する必要があります。" - -msgid "" -"This enables you to arrange OpenStack compute hosts into logical groups and " -"provides a form of physical isolation and redundancy from other availability " -"zones, such as by using a separate power supply or network equipment." -msgstr "" -"アベイラビリティーゾーンにより、OpenStack Compute ホストを論理グループにまと" -"めて、(独立した電源系統やネットワーク装置を使うことなどで)他のアベイラビリ" -"ティーゾーンとの物理的な分離や冗長性を実現できます。" - -msgid "" -"This enables you to partition OpenStack Compute deployments into logical " -"groups for load balancing and instance distribution. You can use host " -"aggregates to further partition an availability zone. For example, you might " -"use host aggregates to partition an availability zone into groups of hosts " -"that either share common resources, such as storage and network, or have a " -"special property, such as trusted computing hardware." -msgstr "" -"これにより、OpenStack コンピュートデプロイメントを負荷分散やインスタンスディ" -"ストリビューション用の論理グループに分割することができます。ホストアグリゲー" -"トを使用して、アベイラビリティゾーンをさらに分割することができます。例えば、" -"ホストアグリゲートを使用してアベイラビリティゾーンをストレージやネットワーク" -"などの共通のリソースを共有するか、信頼済みのコンピューティングハードウェアな" -"どの特別なプロパティを持つホストグループに分割することができます。" - -msgid "This example assumes that ``/dev/sdb`` has failed." -msgstr "この例では、 ``/dev/sdb`` が故障したと仮定します。" - -msgid "" -"This example configuration handles the nova service only. It first " -"configures rsyslog to act as a server that runs on port 514. Next, it " -"creates a series of logging templates. Logging templates control where " -"received logs are stored. Using the last example, a nova log from c01." -"example.com goes to the following locations:" -msgstr "" -"これはnovaサービスのみを扱っています。はじめに rsyslog を UDP 514番ポートで動" -"作するサーバーとして設定します。次に一連のログテンプレートを作成します。ログ" -"テンプレートは受け取ったログをどこに保管するかを指定します。最後の例を用いる" -"と、c01.example.comから送られるnovaのログは次の場所に保管されます。" - -msgid "" -"This example creates a security group that allows web traffic anywhere on " -"the Internet. We'll call this group ``global_http``, which is clear and " -"reasonably concise, encapsulating what is allowed and from where. From the " -"command line, do:" -msgstr "" -"この例は、インターネットのどこからでも Web 通信を許可するセキュリティグループ" -"を作成します。このグループを ``global_http`` と呼ぶことにします。許可されるも" -"のと許可されるところを要約した、明白で簡潔な名前になっています。コマンドライ" -"ンから以下のようにします。" - -msgid "" -"This example is for illustrative purposes only. It should not be used as a " -"container IP whitelist solution without further development and extensive " -"security testing." -msgstr "" -"この例は実証目的のみのためにあります。さらなる作りこみと広範なセキュリティテ" -"ストなしにコンテナのIPホワイトリスト・ソリューションとして使用するべきではあ" -"りません。" - -msgid "" -"This example is for illustrative purposes only. It should not be used as a " -"scheduler for Compute without further development and testing." -msgstr "" -"この例は実証目的のみのためにあります。さらなる作りこみとテストなしで、" -"Compute のスケジューラーとして使用するべきではありません。" - -msgid "" -"This example shows the HTTP requests from the client and the responses from " -"the endpoints, which can be helpful in creating custom tools written to the " -"OpenStack API." -msgstr "" -"この例は、クライアントからのHTTPリクエストとエンドポイントからのレスポンスを" -"表示しています。これはOpenStack APIを使ったカスタムツールを作る際に役立ちま" -"す。" - -msgid "" -"This external bridge also includes a physical network interface, ``eth2`` in " -"this example, which finally lands the packet on the external network " -"destined for an external router or destination." -msgstr "" -"この外部ブリッジも物理ネットワークインターフェースに含まれます。この例では " -"``eth2`` です。これは、最終的に外部ルーターや送信先に向けた外部ネットワークの" -"パケットを受け取ります。" - -msgid "" -"This formatting is used to support translation of logging messages into " -"different languages using the `gettext `_ internationalization library. You don't need to do this for " -"your own custom log messages. However, if you want to contribute the code " -"back to the OpenStack project that includes logging statements, you must " -"surround your log messages with underscores and parentheses." -msgstr "" -"このフォーマットは、ログメッセージを異なる言語に翻訳するために `gettext " -"`_ 国際化ライブラリーを利用し" -"ているためです。カスタムログには必要ありませんが、もし、OpenStackプロジェクト" -"にログステートメントを含むコードを提供する場合は、アンダースコアと括弧でログ" -"メッセージを囲わなければなりません。" - -msgid "" -"This functionality is an important piece of the puzzle when it comes to live " -"upgrades and is conceptually similar to the existing API versioning that " -"allows OpenStack services of different versions to communicate without issue." -msgstr "" -"この機能は、ライブアップグレードするということになると、パズルの重要な部品に" -"なります。異なるバージョンの OpenStack サービスが問題なく通信できるようにする" -"ために、既存の API バージョンと概念的に同じになります。" - -msgid "" -"This guide assumes that you are familiar with a Linux distribution that " -"supports OpenStack, SQL databases, and virtualization. You must be " -"comfortable administering and configuring multiple Linux machines for " -"networking. You must install and maintain an SQL database and occasionally " -"run queries against it." -msgstr "" -"このガイドは、OpenStack をサポートする Linux ディストリビューション、SQL デー" -"タベースや仮想化に関してよく知っていることを前提にしています。複数台の Linux " -"マシンのネットワーク設定・管理にも慣れている必要があります。SQL データベース" -"のインストールと管理を行い、場合によってはデータベースに対してクエリーを実行" -"することもあります。" - -msgid "" -"This guide is for OpenStack operators and does not seek to be an exhaustive " -"reference for users, but as an operator, you should have a basic " -"understanding of how to use the cloud facilities. This chapter looks at " -"OpenStack from a basic user perspective, which helps you understand your " -"users' needs and determine, when you get a trouble ticket, whether it is a " -"user issue or a service issue. The main concepts covered are images, " -"flavors, security groups, block storage, shared file system storage, and " -"instances." -msgstr "" -"このガイドは OpenStack の運用者向けです。ユーザー向けの膨大なリファレンスを目" -"指すものではありません。しかし運用者として、クラウド設備を使用する方法につい" -"て基本的な理解を持つべきです。本章は、基本的なユーザーの観点から OpenStack を" -"見ていきます。ユーザーが必要とすることを理解する手助けになります。また、トラ" -"ブルのチケットを受け取ったときに、ユーザーの問題またはサービスの問題のどちら" -"かを判断する手助けになります。取り扱っている主な概念はイメージ、フレーバー、" -"セキュリティグループ、ブロックストレージ、共有ファイルシステムストレージおよ" -"びインスタンスです。" - -msgid "" -"This guide targets OpenStack administrators seeking to deploy and manage " -"OpenStack Networking (neutron)." -msgstr "" -"このガイドは、OpenStack Networking (neutron) を導入して管理する方法を探してい" -"る、OpenStack 管理者を対象にしています。" - -msgid "" -"This guide uses the term ``project``, unless an example shows interaction " -"with a tool that uses the term ``tenant``." -msgstr "" -"このガイドは ``プロジェクト`` という用語を使用します。 ``テナント`` という用" -"語を使用するツールとやりとりする例もあります。" - -msgid "" -"This infrastructure includes systems to automatically install the operating " -"system's initial configuration and later coordinate the configuration of all " -"services automatically and centrally, which reduces both manual effort and " -"the chance for error. Examples include Ansible, CFEngine, Chef, Puppet, and " -"Salt. You can even use OpenStack to deploy OpenStack, named TripleO " -"(OpenStack On OpenStack)." -msgstr "" -"このインフラストラクチャーには、オペレーティングシステムの初期設定を自動にイ" -"ンストールするシステムや、全サーバーを自動的かつ一元的に連携、設定するシステ" -"ムが含まれており、手作業やエラーの発生する可能性を減らします。例えば、" -"Ansible、CFEngine、Chef、Puppet、Salt などのシステムです。OpenStack を使用し" -"て、OpenStack をデプロイすることも可能です。これは、TripleO (OpenStack 上の " -"OpenStack) という愛称で呼ばれています。" - -msgid "" -"This instructs rsyslog to send all logs to the IP listed. In this example, " -"the IP points to the cloud controller." -msgstr "" -"これは、rsyslogに全てのログを指定したIPアドレスに送るように命令しています。こ" -"の例では、IPアドレスはクラウドコントローラーを指しています。" - -msgid "" -"This is an ongoing and hot topic in networking circles —especially with the " -"raise of virtualization and virtual switches." -msgstr "" -"これはネットワーク業界で進行中で話題のトピックである。特に仮想マシンと仮想ス" -"イッチで発生する。" - -msgid "" -"This is the first half of the equation. To get flavor types that are " -"guaranteed a particular ratio, you must set the ``extra_specs`` in the " -"flavor type to the key-value pair you want to match in the aggregate. For " -"example, if you define ``extra_specs`` ``cpu_allocation_ratio`` to \"1.0\", " -"then instances of that type will run in aggregates only where the metadata " -"key ``cpu_allocation_ratio`` is also defined as \"1.0.\" In practice, it is " -"better to define an additional key-value pair in the aggregate metadata to " -"match on rather than match directly on ``cpu_allocation_ratio`` or " -"``core_allocation_ratio``. This allows better abstraction. For example, by " -"defining a key ``overcommit`` and setting a value of \"high,\" \"medium,\" " -"or \"low,\" you could then tune the numeric allocation ratios in the " -"aggregates without also needing to change all flavor types relating to them." -msgstr "" -"これは前半部分です。特定の比率を保証するフレーバー種別を取得するには、フレー" -"バー種別の ``extra_specs`` をアグリゲートでマッチする key-value ペアに設定す" -"る必要があります。たとえば、``extra_specs`` ``cpu_allocation_ratio`` を 1.0 " -"に定義すると、その種別のインスタンスは、メタデータキー " -"``cpu_allocation_ratio`` も ``1.0`` と定義されているアグリゲートのみで実行さ" -"れます。実際は、 ``cpu_allocation_ratio`` または ``core_allocation_ratio`` で" -"直接マッチさせるのではなく、マッチするアグリゲートメタデータに追加の key-" -"value ペアを定義すると良いでしょう。これにより抽象化が改善されます。たとえ" -"ば、``overcommit`` キーを定義して、高、中、低の値を設定することで、関連するフ" -"レーバー種別をすべて変更する必要なしに、アグリゲートの割合比を調節することが" -"できます。" - -msgid "This is useful, as logs from c02.example.com go to:" -msgstr "c02.example.comから送られたログはこちらに保管されます。" - -msgid "" -"This leaves you with an important point of decision when designing your " -"cloud. OpenStack Networking is robust enough to use with a small number of " -"limitations (performance issues in some scenarios, only basic high " -"availability of layer 3 systems) and provides many more features than ``nova-" -"network``. However, if you do not have the more complex use cases that can " -"benefit from fuller software-defined networking capabilities, or are " -"uncomfortable with the new concepts introduced, ``nova-network`` may " -"continue to be a viable option for the next 12 months." -msgstr "" -"これは、クラウドを設計するときに、重要な判断ポイントを残します。OpenStack " -"Networking は、いくつかのシナリオにおける性能問題、L3 システムの基本的な高可" -"用性のみなど、少しの制限を持ちますが、十分使用できる堅牢さを持ちます。 " -"``nova-network`` よりも多くの機能を提供します。しかしながら、より完全な SDN " -"の機能から利益を得る、より複雑なユースケースを持たない場合、または新しく導入" -"された概念になじめない場合、 ``nova-network`` は次の 12 か月間の主要な選択肢" -"であり続けるかもしれません。" - -msgid "" -"This list will change between releases, so please refer to your " -"configuration guide for up-to-date information." -msgstr "" -"上記のリストはリリース間で変更されることもあります。最新の情報については設定" -"ガイドを参照してください。" - -msgid "" -"This may be a dnsmasq and/or ``nova-network`` related issue. (For the " -"preceding example, the problem happened to be that dnsmasq did not have any " -"more IP addresses to give away because there were no more fixed IPs " -"available in the OpenStack Compute database.)" -msgstr "" -"これは dnsmasq の、もしくは dnsmasq と``nova-network`` 両方の問題です。(例え" -"ば上記では、OpenStack Compute データベース上に利用可能な固定IPがなく、 " -"dnsmasq が IP アドレスを払い出せない問題が発生しています)" - -msgid "This might print the error and cause of the problem." -msgstr "これにより、エラーと問題の原因が表示されるかもしれません。" - -msgid "" -"This output shows that an instance named ``devstack`` was created from an " -"Ubuntu 12.04 image using a flavor of ``m1.small`` and is hosted on the " -"compute node ``c02.example.com``." -msgstr "" -"この出力は、 ``devstack`` という名前のインスタンスが Ubuntu 12.04 イメージか" -"ら ``m1.small`` のフレーバーで作成され、コンピュートノード ``c02.example." -"com`` でホストされていることを示しています。" - -msgid "" -"This output shows that two networks are configured, each network containing " -"255 IPs (a /24 subnet). The first network has been assigned to a certain " -"project, while the second network is still open for assignment. You can " -"assign this network manually; otherwise, it is automatically assigned when a " -"project launches its first instance." -msgstr "" -"この出力は、2 つのネットワークが設定されており、各ネットワークには 255 の IP " -"アドレス (/24 サブネットが 1 つ) が含まれていることを示しています。1 番目の" -"ネットワークは、特定のプロジェクトに割り当て済みですが、2 番目のネットワーク" -"はまだ割り当てができる状態です。このネットワークは手動で割り当てることができ" -"ます。手動での割り当てを行わなかった場合には、プロジェクトで最初のインスタン" -"スが起動されたときに自動で割り当てられます。" - -msgid "" -"This past Valentine's Day, I received an alert that a compute node was no " -"longer available in the cloud—meaning," -msgstr "" -"この前のバレンタインデーに、クラウド中にあるコンピュートノードが最早動いてい" -"ないとの警告を受け取った。つまり、" - -msgid "" -"This project continues to improve and you may consider using it for " -"greenfield deployments, though according to the latest user survey results " -"it remains to see widespread uptake." -msgstr "" -"このプロジェクトは改善を続けています。greenfield の導入のために使用することを" -"検討する可能性があります。最新のユーザー調査の結果によると、大幅な理解が得ら" -"れたままです。" - -msgid "" -"This request took over two minutes to process, but executed quickly on " -"another co-existing Grizzly deployment using the same hardware and system " -"configuration." -msgstr "" -"このリクエストは、処理に 2 分以上かかりました。しかし、同じハードウェアとシス" -"テム設定を使用している、他の一緒に動いている Grizzly 環境は迅速に実行されまし" -"た。" - -msgid "" -"This script dumps the entire MySQL database and deletes any backups older " -"than seven days." -msgstr "" -"このスクリプトは MySQL データベース全体をダンプし、7日間より古いバックアップ" -"を削除します。" - -msgid "" -"This script is specific to a certain OpenStack installation and must be " -"modified to fit your environment. However, the logic should easily be " -"transferable." -msgstr "" -"このスクリプトは特定のOpenStackインストール環境向けなので、自身の環境に適用す" -"る際には変更しなくてはいけませんが、ロジックは簡単に変更できるでしょう。" - -msgid "" -"This section covers specific examples of configuration options you might " -"consider tuning. It is by no means an exhaustive list." -msgstr "" -"この節では、調整を検討した方がよい設定オプションの個別の具体例を扱います。決" -"して完全なリストではありません。" - -msgid "" -"This section describes the process to upgrade a basic OpenStack deployment " -"based on the basic two-node architecture in the `Installation Tutorials and " -"Guides `_. All " -"nodes must run a supported distribution of Linux with a recent kernel and " -"the current release packages." -msgstr "" -"このセクションは、 `Installation Tutorials and Guides `_ にある、基本的な 2 ノードアーキ" -"テクチャーを参照しています。すべてのノードでは、サポートする Linux ディストリ" -"ビューションで、最新のカーネルとカレントのリリースパッケージが実行されている" -"必要があります。" - -msgid "" -"This section discusses which files and directories should be backed up " -"regularly, organized by service." -msgstr "" -"このセクションは、サービスにより整理される、定期的にバックアップすべきファイ" -"ルやディレクトリーについて議論します。" - -msgid "" -"This section lists several of the most important Use Cases that demonstrate " -"the main functions and abilities of Shared File Systems service:" -msgstr "" -"このセクションは、主要な Shared File Systems サービスの機能を説明するために、" -"いくつかの重要なユースケースを示します。" - -msgid "" -"This section provides guidance for rolling back to a previous release of " -"OpenStack. All distributions follow a similar procedure." -msgstr "" -"このセクションは、前のリリースの OpenStack にロールバックするためのガイドを提" -"供します。すべてのディストリビューションは、同様の手順に従います。" - -msgid "This section provides tips on resolving common RabbitMQ issues." -msgstr "このセクションは一般的な RabbitMQ の問題を解決するヒントを提供します。" - -msgid "" -"This section was intended as a brief introduction to some of the most useful " -"of many OpenStack commands. For an exhaustive list, please refer to the " -"`OpenStack Administrator Guide `__. " -"We hope your users remain happy and recognize your hard work! (For more hard " -"work, turn the page to the next chapter, where we discuss the system-facing " -"operations: maintenance, failures and debugging.)" -msgstr "" -"このセクションは、多くの OpenStack コマンドの中から、いくつかの最も有用なもの" -"を簡単に説明することを意図していました。膨大なリストは、 `OpenStack 管理者" -"ユーザーガイド `__ を参照してくださ" -"い。ユーザーが幸せなままであり、あなたのハードワークに気づくことを願います。" -"(さらなるハードワークは、次の章にページを進んでください。システムが直面する" -"運用、メンテナンス、障害、デバッグについて議論しています。)" - -msgid "" -"This step applies if you have already restarted only the OpenStack " -"components, and cannot connect to the RabbitMQ service." -msgstr "" -"すでに OpenStack コンポーネントのみを再起動して、RabbitMQ サービスに接続でき" -"ない場合、この手順が適用されます。" - -msgid "" -"This step completes the rollback procedure. You should remove the upgrade " -"release repository and run :command:`apt-get update` to prevent accidental " -"upgrades until you solve whatever issue caused you to roll back your " -"environment." -msgstr "" -"この手順は、ロールバックの手順を完了します。アップグレードするリリースのリポ" -"ジトリーを作成し、 :command:`apt-get update`` を実行して、環境をロールバック" -"することになった問題を解決するまで、偶然アップグレードすることを防ぎます。" - -msgid "" -"This user data can be put in a file on your local system and then passed in " -"at instance creation with the flag ``--user-data ``." -msgstr "" -"このユーザーデータは、ローカルマシンのファイルに保存され、 ``--user-data " -"`` フラグを用いてインスタンスの生成時に渡されます。" - -msgid "This user was the only user on the node (new node)" -msgstr "このユーザはそのノード(新しいノード)上の唯一のユーザだった。" - -msgid "" -"Thoroughly review the `release notes `_ to " -"learn about new, updated, and deprecated features. Find incompatibilities " -"between versions." -msgstr "" -"全体的に `リリースノート `_ を確認して、新機" -"能、更新された機能、非推奨の機能について調べてください。バージョン間の非互換" -"を確認してください。" - -msgid "" -"Though many successfully use the various python-\\*client code as an " -"effective SDK for interacting with OpenStack, consistency between the " -"projects and documentation availability waxes and wanes. To combat this, an " -"`effort to improve the experience `_ has started. Cross-project development efforts in " -"OpenStack have a checkered history, such as the `unified client project " -"`_ having several false " -"starts. However, the early signs for the SDK project are promising, and we " -"expect to see results during the Juno cycle." -msgstr "" -"OpenStack と通信するための効率的な SDK として、さまざまな python-\\*client " -"コードを使用してとても成功していますが、プロジェクト間の整合性とドキュメント" -"の入手可能性は満ち欠けがあります。これを取り除くために、`エクスペリエンスを改" -"善するための取り組み `_ " -"が開始されました。OpenStack におけるクロスプロジェクトの取り組みは、いくつか" -"の失敗から始まった `統一クライアントのプロジェクト `_ など、波乱の歴史があります。しかしながら、SDK プ" -"ロジェクトの初期の目安が約束され、Juno サイクル中に結果を見ることを期待してい" -"ます。" - -msgid "" -"Through ``nova-network`` or ``neutron``, OpenStack Compute automatically " -"manages iptables, including forwarding packets to and from instances on a " -"compute node, forwarding floating IP traffic, and managing security group " -"rules. In addition to managing the rules, comments (if supported) will be " -"inserted in the rules to help indicate the purpose of the rule." -msgstr "" -"``nova-network`` か ``neutron`` に関わらず、OpenStack Compute は自動的に " -"iptables を管理します。コンピュートノードにあるインスタンスとのパケット転送、" -"Floating IP 通信の転送、セキュリティグループルールの管理など。ルールの管理に" -"加えて、(サポートされる場合) コメントがルールに挿入され、ルールの目的を理解し" -"やすくします。" - -msgid "" -"Tim Bell from CERN gave us feedback on the outline before we started and " -"reviewed it mid-week." -msgstr "" -"CERN の Tim Bell は、私たちが作業を開始する前に、その概要についてフィードバッ" -"クを与えてくれて、週の半ばにはレビューをしてくれました。" - -msgid "" -"To access the instance's disk (``/var/lib/nova/instances/instance-xxxxxx/" -"disk``), use the following steps:" -msgstr "" -"インスタンスのディスク (``/var/lib/nova/instances/instance-xxxxxx/disk``) に" -"アクセスするには、以下の手順を実行します。" - -msgid "To add a DEBUG logging statement, you would do:" -msgstr "DEBUGログステートメントを追加するには次のようにします。" - -msgid "" -"To add a project through the command line, you must use the OpenStack " -"command line client." -msgstr "" -"コマンドラインからのプロジェクト追加する場合、OpenStack コマンドラインクライ" -"アントを使用する必要があります。" - -msgid "" -"To add logging statements, the following line should be near the top of the " -"file. For most files, these should already be there:" -msgstr "" -"ログステートメントを追加するには、次の行をファイルの先頭に置きます。ほとんど" -"のファイルでは、これらは既に存在します。" - -msgid "" -"To add new volumes, you need only a volume size in gigabytes. Either put " -"these into the :guilabel:`Create Volume` web form or use the command line:" -msgstr "" -"新しいボリュームを追加する際に必要なのは、名前とギガバイト単位のボリューム容" -"量だけです。これらを :guilabel:`ボリュームの作成` ウェブフォームに記入しま" -"す。または、コマンドラインを使用します:" - -msgid "To address this issue:" -msgstr "この問題の解決手順:" - -msgid "To apply or update account quotas on a project:" -msgstr "プロジェクトのアカウントのクォータを適用または更新します。" - -msgid "" -"To associate a key with an instance on boot, add ``--key-name mykey`` to " -"your command line. For example:" -msgstr "" -"起動時にインスタンスに鍵を関連付けるには、たとえば、コマンドラインに ``--key-" -"name mykey`` を追加します。例えば、" - -msgid "" -"To associate or disassociate a floating IP with a server from the command " -"line, use the following commands:" -msgstr "" -"以下のコマンドを使用して、コマンドラインからサーバーに Floating IP アドレスを" -"関連付けまたは関連付け解除します。" - -msgid "" -"To avoid this situation, create a highly available cloud controller cluster. " -"This is outside the scope of this document, but you can read more in the " -"`OpenStack High Availability Guide `_." -msgstr "" -"この状況を避けるために、高可用なクラウドコントローラークラスターを作成しま" -"す。このことは、このドキュメントの範囲外ですが、 `OpenStack High " -"Availability Guide `_ に情報" -"があります。" - -msgid "" -"To begin, configure all OpenStack components to log to the syslog log file " -"in addition to their standard log file location. Also, configure each " -"component to log to a different syslog facility. This makes it easier to " -"split the logs into individual components on the central server:" -msgstr "" -"まず始めに、全ての OpenStack コンポーネントのログを標準ログに加えて syslog の" -"ログファイルに出力するように設定します。また、各コンポーネントが異なる " -"syslog ファシリティーになるように設定します。これによりログサーバー上で、個々" -"のコンポーネントのログを分離しやすくなります。" - -msgid "" -"To boot normally from an image and attach block storage, map to a device " -"other than vda. You can find instructions for launching an instance and " -"attaching a volume to the instance and for copying the image to the attached " -"volume in the `OpenStack End User Guide `__." -msgstr "" -"通常通りイメージから起動し、ブロックストレージを接続するために、vda 以外のデ" -"バイスを対応付けます。 `OpenStack エンドユーザーガイド `__ に、インスタンス" -"を起動して、それにボリュームを接続する方法、接続されたボリュームにイメージを" -"コピーする方法が説明されています。" - -msgid "" -"To confirm that creation has been successful, see the share in the share " -"list:" -msgstr "共有が共有一覧にあることを確認して、正常に作成されたことを確認します。" - -msgid "" -"To create a development environment, you can use DevStack. DevStack is " -"essentially a collection of shell scripts and configuration files that " -"builds an OpenStack development environment for you. You use it to create " -"such an environment for developing a new feature." -msgstr "" -"開発環境を作成するために、DevStack を使用できます。DevStack は本質的に、" -"OpenStack の開発環境を構築する、シェルスクリプトと設定ファイルの塊です。新し" -"い機能を開発するために、そのような環境を構築するために使用します。" - -msgid "To create a project through the OpenStack dashboard:" -msgstr "OpenStack Dashboard でプロジェクトを作成します。" - -msgid "" -"To create a scheduler, you must inherit from the class ``nova.scheduler." -"driver.Scheduler``. Of the five methods that you can override, you *must* " -"override the two methods marked with an asterisk (\\*) below:" -msgstr "" -"スケジューラーを作成するには、 ``nova.scheduler.driver.Scheduler`` クラスを継" -"承しなければなりません。オーバーライド可能な 5 つのメソッドのうち、以下のアス" -"タリスク (\\*) で示される 2 つのメソッドはオーバーライド *しなければなりませ" -"ん* 。" - -msgid "To create a user, you need the following information:" -msgstr "ユーザーを作成するには、以下の情報が必要です。" - -msgid "" -"To create security group rules for a cluster of instances, use RemoteGroups." -msgstr "" -"インスタンスのクラスター向けにセキュリティグループのルールを作成するために、" -"リモートグループを使用します。" - -msgid "" -"To deal with the \"dirty\" buffer issue, we recommend using the sync command " -"before snapshotting:" -msgstr "" -"「ダーティー」バッファーの問題を解決するために、スナップショットの前に sync " -"コマンドを使用することを推奨します。" - -msgid "To delete an image, just execute:" -msgstr "イメージを削除する場合、次のようにします。" - -msgid "" -"To delete instances from the dashboard, select the :guilabel:`Delete " -"Instance` action next to the instance on the :guilabel:`Instances` page." -msgstr "" -"ダッシュボードからインスタンスを削除するには、 :guilabel:`インスタンス` ペー" -"ジにおいてインスタンスの隣にある :guilabel:`インスタンスの削除` アクションを" -"選択します。" - -msgid "" -"To demonstrate customizing OpenStack, we'll create an example of a Compute " -"scheduler that randomly places an instance on a subset of hosts, depending " -"on the originating IP address of the request and the prefix of the hostname. " -"Such an example could be useful when you have a group of users on a subnet " -"and you want all of their instances to start within some subset of your " -"hosts." -msgstr "" -"OpenStack のカスタマイズをデモするために、リクエストの送信元IPアドレスとホス" -"ト名のプレフィックスに基づいてインスタンスを一部のホストにランダムに配置する" -"ような Compute のスケジューラーの例を作成します。この例は、1つのユーザのグ" -"ループが1つのサブネットにおり、インスタンスをホスト群の中の一部のサブネット" -"で起動したい場合に有用です。" - -msgid "" -"To determine the potential features going in to future releases, or to look " -"at features implemented previously, take a look at the existing blueprints " -"such as `OpenStack Compute (nova) Blueprints `_, `OpenStack Identity (keystone) Blueprints `_, and release notes." -msgstr "" -"将来のリリースで検討されている機能を確認したり、過去に実装された機能を見るに" -"は、`OpenStack Compute (nova) Blueprints `_、` OpenStack Identity (keystone) Blueprints `_ などの既存の Blueprint やリリースノートを見てくださ" -"い。" - -msgid "" -"To disable DEBUG-level logging, edit ``/etc/nova/nova.conf`` file as follows:" -msgstr "" -"DEBUG レベルのロギングを無効にするには、``/etc/nova/nova.conf`` を以下のよう" -"に編集します。" - -msgid "" -"To discover how API requests should be structured, read the `OpenStack API " -"Reference `_. To chew through the responses using jq, see the `jq Manual `_." -msgstr "" -"API 要求の構成方法については、 `OpenStack API Reference `_ を参照してください。jq を使" -"用した応答についての詳しい説明は `jq Manual `_ を参照してください。" - -msgid "" -"To do this, generate a list of instance UUIDs that are hosted on the failed " -"node by running the following query on the nova database:" -msgstr "" -"これを実行するために、nova データベースにおいて以下のクエリーを実行することに" -"より、故障したノードにおいてホストされているインスタンスの UUID の一覧を生成" -"します。" - -msgid "" -"To effectively disable the libvirt live snapshotting, until the problem is " -"resolved, add the below setting to nova.conf." -msgstr "" -"問題が解決するまでは、libvirt のライブスナップショットを効果的に無効化するた" -"めに、以下の設定を nova.conf に追加します。" - -msgid "" -"To enable nova to send notifications, add the following to the ``nova.conf`` " -"configuration file:" -msgstr "" -"以下を ``nova.conf`` 設定ファイルに追加して、nova で通知の送信を有効化しま" -"す。" - -msgid "" -"To enable this feature, edit the ``/etc/glance/glance-api.conf`` file, and " -"under the ``[DEFAULT]`` section, add:" -msgstr "" -"この機能を有効にするには、 ``/etc/glance/glance-api.conf`` ファイルを編集し" -"て ``[DEFAULT]`` セクションに以下を追加します。" - -msgid "" -"To ensure that important services have written their contents to disk (such " -"as databases), we recommend that you read the documentation for those " -"applications to determine what commands to issue to have them sync their " -"contents to disk. If you are unsure how to do this, the safest approach is " -"to simply stop these running services normally." -msgstr "" -"(データベースのような) 重要なサービスがコンテンツをディスクに書き込んだことを" -"保証するために、それらのアプリケーションのドキュメントを読んで、コンテンツを" -"ディスクに同期させるためにどのコマンドを発行する必要があるかを調べることを推" -"奨します。ディスクに同期させるための方法がはっきり分からない場合、最も安全な" -"方法は単にこれらの実行中のサービスを通常通り停止することです。" - -msgid "" -"To examine the secondary or ephemeral disk, use an alternate mount point if " -"you want both primary and secondary drives mounted at the same time:" -msgstr "" -"セカンダリディスクや一時ディスクを調査する際に、プライマリディスクとセカンダ" -"リディスクを同時にマウントしたければ、別のマウントポイントを使用してくださ" -"い。" - -msgid "To find out whether any floating IPs are available in your cloud, run:" -msgstr "" -"クラウドに利用可能な Floating IP アドレスがあるかどうかを確認するには、以下の" -"コマンドを実行します。" - -msgid "" -"To freeze the volume in preparation for snapshotting, you would do the " -"following, as root, inside the instance:" -msgstr "" -"スナップショットの準備においてボリュームをフリーズするには、インスタンスの中" -"で root として次のとおり実行します:" - -msgid "" -"To help understand how OpenStack works, this section describes the end-to-" -"end process and interaction of components when provisioning an instance on " -"OpenStack." -msgstr "" -"このセクションは、OpenStack がどのように機能するのかを理解するために、" -"OpenStack にインスタンスを配備するとき、最初から最後までの流れ、コンポーネン" -"ト間の通信を説明します。" - -msgid "" -"To launch an instance, you need to select an image, a flavor, and a name. " -"The name needn't be unique, but your life will be simpler if it is because " -"many tools will use the name in place of the UUID so long as the name is " -"unique. You can start an instance from the dashboard from the :guilabel:" -"`Launch Instance` button on the :guilabel:`Instances` page or by selecting " -"the :guilabel:`Launch` action next to an image or a snapshot on the :" -"guilabel:`Images` page." -msgstr "" -"インスタンスを起動するには、イメージ、フレーバーおよび名前を選択する必要があ" -"ります。名前は一意である必要がありませんが、名前が一意である限りは、多くの" -"ツールが UUID の代わりに名前を使用できるので、シンプルにできます。インスタン" -"スの起動は、ダッシュボードにおいて、 :guilabel:`インスタンス` ページにある :" -"guilabel:`起動` ボタン、または :guilabel:`イメージ` ページにある :guilabel:`" -"イメージ` または :guilabel:`スナップショット` の隣にある :guilabel:`インスタ" -"ンスの起動` アクションから実行できます。" - -msgid "" -"To list the bridges on a system, use :command:`ovs-vsctl list-br`. This " -"example shows a compute node that has an internal bridge and a tunnel " -"bridge. VLAN networks are trunked through the ``eth1`` network interface:" -msgstr "" -":command:`ovs-vsctl list-br` を使用して、システムにあるブリッジを一覧表示しま" -"す。この例は、内部ブリッジと統合ブリッジを持つコンピュートノードを表します。" -"VLAN ネットワークが ``eth1`` ネットワークインターフェース経由でトランクされま" -"す。" - -msgid "To make sure that the snapshot is available, run:" -msgstr "次のとおり実行して、スナップショットが利用できることを確認します。" - -msgid "" -"To make working with subsequent requests easier, store the token in an " -"environment variable:" -msgstr "次の要求での作業をより簡単に行うには、環境変数にトークンを保管します。" - -msgid "" -"To obtain snapshots of a Windows VM these commands can be scripted in " -"sequence: flush the filesystems, freeze the filesystems, snapshot the " -"filesystems, then unfreeze the filesystems. As with scripting similar " -"workflows against Linux VMs, care must be used when writing such a script to " -"ensure error handling is thorough and filesystems will not be left in a " -"frozen state." -msgstr "" -"Windows 仮想マシンのスナップショットを取得する場合、以下のコマンドを連続して" -"実行するスクリプト化できます。ファイルシステムをフラッシュする、ファイルシス" -"テムをフリーズする、ファイルシステムのスナップショットを取得する、ファイルシ" -"ステムをフリーズ解除する。Linux 仮想マシンのワークフローと同じように、そのよ" -"うなスクリプトを書くときに、注意して使用すべきです。エラー処理を徹底して、" -"ファイルシステムがフリーズ状態のままにならないようにします。" - -msgid "To perform this action from command line, run the following command:" -msgstr "" -"このアクションをコマンドラインから実行するには、以下のコマンドを実行します" - -msgid "" -"To plug this middleware into the swift Paste pipeline, you edit one " -"configuration file, ``/etc/swift/proxy-server.conf``:" -msgstr "" -"このミドルウェアを swift Paste のパイプラインに組み込むには、設定ファイル ``/" -"etc/swift/proxy-server.conf`` を編集します。" - -msgid "" -"To plug this scheduler into nova, edit one configuration file, ``/etc/nova/" -"nova.conf``:" -msgstr "" -"このスケジューラーを nova に追加するために、設定ファイル ``/etc/nova/nova." -"conf`` を編集します。" - -msgid "" -"To prevent system capacities from being exhausted without notification, you " -"can set up :term:`quotas `. Quotas are operational limits. For " -"example, the number of gigabytes allowed per tenant can be controlled to " -"ensure that a single tenant cannot consume all of the disk space. Quotas are " -"currently enforced at the tenant (or project) level, rather than the user " -"level." -msgstr "" -"システムの容量が通知なしに完全に消費されないように、 :term:`クォータ " -"` を設定することができます。クォータとは、運用上の制限値です。たとえ" -"ば、各テナントに許容される容量 (GB) を制御して、単一のテナントで全ディスク容" -"量すべてが消費されないようにします。現在、ユーザーレベルではなく、テナント " -"(またはプロジェクト) レベルで、クォータを有効にすることができます。" - -msgid "" -"To put the EC2 credentials into your environment, source the ``ec2rc.sh`` " -"file." -msgstr "" -"EC2 認証情報を環境に適用するには、 ``ec2rc.sh`` ファイルを元データとします。" - -msgid "To schedule a group of hosts with common features." -msgstr "共通の機能を持ったホストのグループに対してスケジューリングしたい場合" - -msgid "To see a list of projects that have been added to the cloud, run:" -msgstr "" -"クラウドに追加されたプロジェクトの一覧を確認するには、以下のコマンドを実行し" -"ます:" - -msgid "To see a list of running instances, run:" -msgstr "実行中のインスタンスを確認するには、以下のコマンドを実行します:" - -msgid "To see a list of users, run:" -msgstr "ユーザーのリストを見るためには、" - -msgid "To see whether you are using namespaces, run :command:`ip netns`:" -msgstr "" -":command:`ip netns` を実行して、名前空間を使用しているかどうかを確認します。" - -msgid "To see which bridge the packet will use, run the command:" -msgstr "" -"下記コマンドを実行することで、パケットがどのブリッジを使うか確認できます。" - -msgid "" -"To see which fixed IP networks are configured in your cloud, you can use " -"the :command:`openstack` command-line client to get the IP ranges:" -msgstr "" -"クラウドでどの Fixed IP ネットワークが設定されているかを確認するために、 :" -"command:`openstack` コマンドラインクライアントを使用して IP アドレスの範囲を" -"取得することができます。" - -msgid "" -"To set a configuration option to zero, include a line such as " -"``image_cache_manager_interval=0`` in your ``nova.conf`` file." -msgstr "" -"設定オプションを 0 に設定するには、``nova.conf`` に " -"``image_cache_manager_interval=0`` のような行を入れてください。" - -msgid "To set up the test environment, you can use one of several methods:" -msgstr "テスト環境をセットアップする場合、いくつかの方法を使用できます。" - -msgid "To share an image or snapshot with another project, do the following:" -msgstr "" -"以下のように、イメージやスナップショットを他のプロジェクトと共有します。" - -msgid "" -"To take advantage of either container quotas or account quotas, your Object " -"Storage proxy server must have ``container_quotas`` or ``account_quotas`` " -"(or both) added to the ``[pipeline:main]`` pipeline. Each quota type also " -"requires its own section in the ``proxy-server.conf`` file:" -msgstr "" -"コンテナーのクォータやアカウントのクォータの利点を得るために、Object Storage " -"のプロキシーサーバーが ``container_quotas`` や ``account_quotas`` (または両" -"方) を ``[pipeline:main]`` パイプラインに追加するする必要があります。各クォー" -"タの種類は、``proxy-server.conf`` ファイルにそれ自身のセクションも必要としま" -"す。" - -msgid "" -"To take the first path, you can modify the OpenStack code directly. Learn " -"`how to contribute `_, " -"follow the `Developer's Guide `_, make your changes, and contribute them back to the " -"upstream OpenStack project. This path is recommended if the feature you need " -"requires deep integration with an existing project. The community is always " -"open to contributions and welcomes new functionality that follows the " -"feature-development guidelines. This path still requires you to use DevStack " -"for testing your feature additions, so this chapter walks you through the " -"DevStack environment." -msgstr "" -"まず最初に、 `貢献するには `_ を読んで、 `開発者ガイド `_ に従って、あなたの修正をアップストリームの " -"OpenStack プロジェクトへコントリビュートしてください。もし、あなたが必要な機" -"能が既存のプロジェクトと密にインテグレーションする必要がある場合、これが推奨" -"される選択肢です。コミュニティは、いつでも貢献に対して開かれていますし、機能" -"開発ガイドラインに従う新機能を歓迎します。" - -msgid "" -"To this day, `the issue `__ " -"doesn't have a permanent resolution, but we look forward to the discussion " -"at the next summit." -msgstr "" -"今日に至るまで、`この問題 `__ に" -"は完全な解決策がないが、我々は次回のサミットの議論に期待している。" - -msgid "" -"To understand the difference between user data and metadata, realize that " -"user data is created before an instance is started. User data is accessible " -"from within the instance when it is running. User data can be used to store " -"configuration, a script, or anything the tenant wants." -msgstr "" -"ユーザーデータとメタデータの違いを理解するために、インスタンスが起動する前" -"に、ユーザーデータが作成されることに気づいてください。ユーザーデータは、イン" -"スタンスの実行時に、インスタンスの中からアクセスできます。設定、スクリプト、" -"テナントが必要とするものを保存するために使用できます。" - -msgid "" -"To update a default value for a new tenant, update the property in the ``/" -"etc/cinder/cinder.conf`` file." -msgstr "" -"新規テナントのクォータのデフォルト値を更新するには、``/etc/cinder/cinder." -"conf`` ファイルの対応する項目を更新します。" - -msgid "" -"To update a service on each node, you generally modify one or more " -"configuration files, stop the service, synchronize the database schema, and " -"start the service. Some services require different steps. We recommend " -"verifying operation of each service before proceeding to the next service." -msgstr "" -"各ノードにおいてサービスをアップグレードする場合、一般的には 1 つ以上の設定" -"ファイルの変更、サービスの停止、データベーススキーマの同期、サービスの起動を" -"行います。いくつかのサービスは、違う手順を必要とします。次のサービスに進む前" -"に、各サービスの動作を検証することを推奨します。" - -msgid "To verify the quota, run the :command:`swift stat` command again:" -msgstr "再び :command:`swift stat` コマンドを実行して、クォータを検証します。" - -msgid "To view a flavor's access list, do the following:" -msgstr "以下のように、フレーバーのアクセスリストを表示します。" - -msgid "To view a list of options for the ``nova quota-update`` command, run:" -msgstr "" -"以下を実行して、``nova quota-update`` コマンドのオプションリストを表示しま" -"す。" - -msgid "To view account quotas placed on a project:" -msgstr "プロジェクトのアカウントのクォータを表示します。" - -msgid "To view all tenants, run:" -msgstr "全てのテナントを表示するには、以下のコマンドを実行します。" - -msgid "" -"To view and update Object Storage quotas, use the :command:`swift` command " -"provided by the ``python-swiftclient`` package. Any user included in the " -"project can view the quotas placed on their project. To update Object " -"Storage quotas on a project, you must have the role of ResellerAdmin in the " -"project that the quota is being applied to." -msgstr "" -"Object Storage クォータを表示および更新するためには、``python-swiftclient`` " -"パッケージにより提供される ``swift`` コマンドを使用します。プロジェクト内の" -"ユーザーは誰でも、そのプロジェクトに設定されているクォータを表示できます。プ" -"ロジェクトの Object Storage クォータを更新する場合、クォータを適用するプロ" -"ジェクトにおいて ResellerAdmin ロールを持つ必要があります。" - -msgid "To view the details of a security group:" -msgstr "セキュリティグループの詳細を表示する方法:" - -msgid "Tom Fifield" -msgstr "Tom Fifield" - -msgid "Total Cloud Controller Failure" -msgstr "全体的なクラウドコントローラーの故障" - -msgid "Total Compute Node Failure" -msgstr "コンピュートノード全体の故障" - -msgid "Toward a Python SDK" -msgstr "Python SDK へ" - -msgid "Tracing Instance Requests" -msgstr "インスタンスリクエストの追跡" - -msgid "Trending" -msgstr "トレンド" - -msgid "" -"Trending can give you great insight into how your cloud is performing day to " -"day. You can learn, for example, if a busy day was simply a rare occurrence " -"or if you should start adding new compute nodes." -msgstr "" -"傾向は、あなたのクラウドが日々どのように動作しているかについて、素晴らしい洞" -"察を与えられます。例えば、忙しい日が単純にほとんど発生していないかどうか、新" -"しいコンピュートノードを追加しはじめるべきかどうかを学習できます。" - -msgid "" -"Trending takes a slightly different approach than alerting. While alerting " -"is interested in a binary result (whether a check succeeds or fails), " -"trending records the current state of something at a certain point in time. " -"Once enough points in time have been recorded, you can see how the value has " -"changed over time." -msgstr "" -"トレンドはアラートとは全く異なったアプローチです。アラートは0か1かの結果" -"(チェックが成功するか失敗するか)に注目しているのに対して、トレンドはある時点" -"での状態を定期的に記録します。十分な量が記録されれば、時系列でどのように値が" -"変化するかを確認できます。" - -msgid "Troubleshooting Open vSwitch" -msgstr "Open vSwitch のトラブルシューティング" - -msgid "" -"Try executing the :command:`openstack server reboot` command again. You " -"should see an error message about why the instance was not able to boot." -msgstr "" -"再び :command:`openstack server reboot` コマンドを実行してみてください。イン" -"スタンスがなぜブートできないかについて、エラーメッセージを確認すべきです。" - -msgid "" -"Typical use is to only create administrative users in a single project, by " -"convention the admin project, which is created by default during cloud " -"setup. If your administrative users also use the cloud to launch and manage " -"instances, it is strongly recommended that you use separate user accounts " -"for administrative access and normal operations and that they be in distinct " -"projects." -msgstr "" -"一般的な使用法は、一つのプロジェクトだけに管理ユーザーを所属させることです。" -"慣例により、\"admin\" プロジェクトがクラウド環境のセットアップ中に標準で作成" -"されます。管理ユーザーもクラウドを使用してインスタンスの起動、管理を行う場合" -"には、管理アクセスと一般アクセス用に別々のユーザーアカウントを使用し、それら" -"のユーザーを別々のプロジェクトにすることを強く推奨します。" - -msgid "" -"Typically, default values are changed because a tenant requires more than " -"the OpenStack default of 10 volumes per tenant, or more than the OpenStack " -"default of 1 TB of disk space on a compute node." -msgstr "" -"テナントには、10 個を超える Block Storage ボリュームまたはコンピュートノード" -"で 1 TB 以上が必要であるため、通常クラウドのオペレーターはデフォルト値を変更" -"します。" - -msgid "" -"Ubuntu 12.04 installs RabbitMQ version 2.7.1, which uses port 55672. " -"RabbitMQ versions 3.0 and above use port 15672 instead. You can check which " -"version of RabbitMQ you have running on your local Ubuntu machine by doing:" -msgstr "" -"Ubuntu 12.04はRabiitMQのバージョン2.7.1を55672番ポートを使うようにインストー" -"ルします。RabbitMQバージョン3.0以降では15672が利用されます。Ubuntuマシン上で" -"どのバージョンのRabbitMQが実行されているかは次のように確認できます。" - -msgid "Under Identity tab, click :guilabel:`Projects`." -msgstr "ユーザー管理タブの :guilabel:`プロジェクト` をクリックします。" - -msgid "" -"Underlying the use of the command-line tools is the OpenStack API, which is " -"a RESTful API that runs over HTTP. There may be cases where you want to " -"interact with the API directly or need to use it because of a suspected bug " -"in one of the CLI tools. The best way to do this is to use a combination " -"of `cURL `_ and another tool, such as `jq `_, to parse the JSON from the responses." -msgstr "" -"コマンドラインツールの使用の根底にあるのは、HTTP を介して実行する RESTful " -"API である OpenStack API です。API と直接対話を行いたい場合や、CLI ツールにバ" -"グがあることが疑われるために使用する必要がある場合があります。この場合の最善" -"の対処方法は、 `cURL `_ と `jq `_ などの他のツールを組み合わせて使用し、その応答から JSON を解析する" -"ことです。" - -msgid "" -"Unfortunately, sometimes the error is not apparent from the log files. In " -"this case, switch tactics and use a different command; maybe run the service " -"directly on the command line. For example, if the ``glance-api`` service " -"refuses to start and stay running, try launching the daemon from the command " -"line:" -msgstr "" -"残念ながら、ときどきエラーがログファイルに表れない場合があります。このような" -"場合、作戦を変更し、違うコマンドを使用します。おそらくコマンドラインにおいて" -"直接サービスを実行することです。たとえば、``glance-api`` サービスが起動しな" -"かったり、実行状態にとどまらない場合は、コマンドラインからデーモンを起動して" -"みます。" - -msgid "" -"Unfortunately, this command does not tell you various details about the " -"running instances, such as what compute node the instance is running on, " -"what flavor the instance is, and so on. You can use the following command to " -"view details about individual instances:" -msgstr "" -"残念ながら、このコマンドは、インスタンスを実行しているコンピュートノードやイ" -"ンスタンスのフレーバーなどのような、実行中のインスタンスについての多様な情報" -"は提供しません。個別のインスタンスについての詳しい情報を確認するには以下のコ" -"マンドを使用してください。" - -msgid "" -"Unfortunately, this story has an open ending... we're still looking into why " -"the CentOS image was sending out spanning tree packets. Further, we're " -"researching a proper way on how to mitigate this from happening. It's a " -"bigger issue than one might think. While it's extremely important for " -"switches to prevent spanning tree loops, it's very problematic to have an " -"entire compute node be cut from the network when this happens. If a compute " -"node is hosting 100 instances and one of them sends a spanning tree packet, " -"that instance has effectively DDOS'd the other 99 instances." -msgstr "" -"不幸にも、この話にはエンディングがない…我々は、なぜ CentOS イメージがスパニン" -"グツリーパケットを送信し始める原因をいまだ探している。更に、我々は障害時にス" -"パニングツリーを軽減する正しい方法を調査している。これは誰かが思うより大きな" -"問題だ。スパニングツリーループを防ぐことはスイッチにとって非常に重要である" -"が、スパニングツリーが起こった際に、コンピュートノード全体がネットワークから" -"切り離されることも大きな問題である。コンピュートノードが 100 インスタンスをホ" -"スティングしていて、そのうち1つがスパニングツリーパケットを送信した場合、そ" -"のインスタンスは事実上他の 99 インスタンスを DDoS(サービス不能攻撃)したこと" -"になる。" - -msgid "Uninstalling" -msgstr "アンインストール" - -msgid "Unique ID (integer or UUID) for the flavor." -msgstr "フレーバー向けの一意な ID (整数や UUID)。" - -msgid "" -"Unlike having a single API endpoint, regions have a separate API endpoint " -"per installation, allowing for a more discrete separation. Users wanting to " -"run instances across sites have to explicitly select a region. However, the " -"additional complexity of a running a new service is not required." -msgstr "" -"単独の API エンドポイントを持つ場合と異なり、リージョンは、クラウドごとに別々" -"のAPIエンドポイントを持ち、より細かい分離を実現できます。複数の拠点にまたがっ" -"てインスタンスを実行するユーザーは、明示的にリージョンを指定しなければなりま" -"せん。しかし、新規サービスを実行するなど、複雑化しなくて済みます。" - -msgid "" -"Unlike the CLI tools mentioned above, the :command:`*-manage` tools must be " -"run from the cloud controller, as root, because they need read access to the " -"config files such as ``/etc/nova/nova.conf`` and to make queries directly " -"against the database rather than against the OpenStack :term:`API endpoints " -"`." -msgstr "" -"前述の CLI ツールとは異なり、 ``*-manage`` ツールは、クラウドコントローラーか" -"ら root として実行する必要があります。これは、 ``/etc/nova/nova.conf`` などの" -"設定ファイルへの読み取りアクセスが必要で、かつ OpenStack :term:`API エンドポ" -"イント ` に対してではなくデータベースに対して直接クエリーを実行" -"しなければならないからです。" - -msgid "Unmount the device after inspecting." -msgstr "検査後にディスクをアンマウントします。" - -msgid "Update Share" -msgstr "共有の更新" - -msgid "Update a default value for a new tenant, as follows:" -msgstr "" -"新規テナントに対するクォータのデフォルト値を更新するには、以下のようにしま" -"す。" - -msgid "Update a particular quota value, as follows:" -msgstr "指定したクォータ値を更新します。" - -msgid "" -"Update all ``.ini`` files to match passwords and pipelines as required for " -"the OpenStack release in your environment." -msgstr "" -"すべての ``.ini`` ファイルを更新して、お使いの環境で OpenStack リリース向けに" -"必要となるパスワードおよびパイプラインと一致させます。" - -msgid "Update services" -msgstr "サービスの更新" - -msgid "Update the new port with the IPv4 address:" -msgstr "IPv4 アドレスを持つ新しいポートを更新します。" - -msgid "Update the repository database." -msgstr "リポジトリーデータベースを更新します。" - -msgid "Upgrade Levels" -msgstr "アップグレードレベル" - -msgid "Upgrade OpenStack." -msgstr "OpenStack をアップグレードします。" - -msgid "" -"Upgrade levels are a feature added to OpenStack Compute since the Grizzly " -"release to provide version locking on the RPC (Message Queue) communications " -"between the various Compute services." -msgstr "" -"アップグレードレベルは、OpenStack Compute の Grizzly リリースで追加された機能" -"です。これは、さまざまな Compute サービス間の RPC (メッセージキュー) 通信にお" -"いてバージョンを固定できます。" - -msgid "Upgrade packages on each node" -msgstr "各ノードにおけるパッケージのアップグレード" - -msgid "Upgrade planning" -msgstr "アップグレードの計画" - -msgid "Upgrade process" -msgstr "アップグレード手順" - -msgid "Upgrades" -msgstr "アップグレード" - -msgid "Upload Certificate in DER format to Castellan" -msgstr "証明書を DER 形式で Castellan にアップロードします" - -msgid "Upload Image to Image service, with Signature Metadata" -msgstr "" -"署名のメタデータを付けて、イメージをイメージサービスにアップロードします" - -msgid "Use Cases" -msgstr "事例" - -msgid "" -"Use ``openstack-nova-network`` on RHEL/CentOS/Fedora but ``nova-network`` on " -"Ubuntu/Debian." -msgstr "" -"RHEL/CentOS/Fedora の場合は ``openstack-nova-network`` を使用しますが、" -"Ubuntu/Debian の場合は ``nova-network`` を使用します。" - -msgid "Use a public cloud" -msgstr "パブリッククラウドの利用" - -msgid "" -"Use ping to quickly find where a failure exists in the network path. In an " -"instance, first see whether you can ping an external host, such as google." -"com. If you can, then there shouldn't be a network problem at all." -msgstr "" -"ネットワーク経路のどこに障害があるかを素早く見つけるには、pingを使います。ま" -"ずあなたがインスタンス上で、google.comのような外部ホストにpingできるのであれ" -"ば、ネットワークの問題はないでしょう。" - -msgid "Use private key to create a signature of the image" -msgstr "秘密鍵を使用したイメージ署名の作成" - -msgid "Use security services" -msgstr "セキュリティサービスを指定する。" - -msgid "" -"Use the :command:`apt-get install` command to install specific versions of " -"each package by specifying ``=``. The script in the " -"previous step conveniently created a list of ``package=version`` pairs for " -"you:" -msgstr "" -":command:`apt-get install` コマンドに ``=`` を指定し" -"て、各パッケージの特定のバージョンをインストールします。前の手順にあるスクリ" -"プトは、利便性のために ``package=version`` のペアの一覧を作成しました。" - -msgid "Use the port when creating an instance:" -msgstr "インスタンスの作成時にポートを使用します。" - -msgid "Use this command to register an existing key with OpenStack:" -msgstr "このコマンドを使用して、既存の鍵を OpenStack に登録します。" - -msgid "" -"Use this example priority list to ensure that user-affected services are " -"restored as soon as possible, but not before a stable environment is in " -"place. Of course, despite being listed as a single-line item, each step " -"requires significant work. For example, just after starting the database, " -"you should check its integrity, or, after starting the nova services, you " -"should verify that the hypervisor matches the database and fix any " -"mismatches." -msgstr "" -"この例にある優先度一覧を使用すると、きちんと安定した状態になる前であっても、" -"できる限り早くユーザーに影響するサービスを復旧させることができます。もちろ" -"ん、1 行の項目として一覧化されていますが、各ステップは多大な作業が必要です。" -"たとえば、データベースを開始した後、その完全性を確認すべきです。また、nova " -"サービスを開始した後、ハイパーバイザーがデータベースに一致しているかを確認" -"し、不一致があれば修正すべきです。" - -msgid "Use your own cloud" -msgstr "自身のクラウドの使用" - -msgid "User Management" -msgstr "ユーザー管理" - -msgid "User quotas" -msgstr "ユーザークォータ" - -msgid "User virtual machines" -msgstr "ユーザーの仮想マシン" - -msgid "User-Facing Operations" -msgstr "ユーザーによる運用" - -msgid "Username" -msgstr "ユーザー名" - -msgid "" -"Username and email address are self-explanatory, though your site may have " -"local conventions you should observe. The primary project is simply the " -"first project the user is associated with and must exist prior to creating " -"the user. Role is almost always going to be \"member.\" Out of the box, " -"OpenStack comes with two roles defined:" -msgstr "" -"ユーザー名と電子メールアドレスは見たとおりです。あなたのサイトは従うべき独自" -"ルールがあるかもしれません。主プロジェクトは単にユーザーが割り当てられる最初" -"のプロジェクトです。ユーザーを作成する前に存在している必要があります。役割は" -"多くの場合ずっと \"メンバー\" のままになります。標準の状態で、OpenStack は次" -"の 2 つの役割が定義されています。" - -msgid "Users Who Disrupt Other Users" -msgstr "他のユーザーに悪影響を与えるユーザー" - -msgid "Users and Projects" -msgstr "ユーザーとプロジェクト" - -msgid "" -"Users and groups are managed through Active Directory and imported into the " -"Identity service using LDAP. CLIs are available for nova and Euca2ools to do " -"this." -msgstr "" -"ユーザとグループは Active Directory で管理され、LDAP を使用して Identity にイ" -"ンポートされます。CLI は nova と euca2ools が使用可能です。" - -msgid "" -"Users being able to retrieve console logs from running instances is a boon " -"for support—many times they can figure out what's going on inside their " -"instance and fix what's going on without bothering you. Unfortunately, " -"sometimes overzealous logging of failures can cause problems of its own." -msgstr "" -"稼働中のインスタンスからコンソールログを取得可能なユーザはサポートの恩恵とな" -"る。インスタンスの中で何が起こっているのか何度も確認できるし、あなたが悩まず" -"に問題を修正することができる。不幸なことに、過剰な失敗の記録は時々、自らの問" -"題となり得る。" - -msgid "" -"Users must be associated with at least one project, though they may belong " -"to many. Therefore, you should add at least one project before adding users." -msgstr "" -"ユーザーは、多数のプロジェクトに所属することは可能ですが、最低でも 1 つのプロ" -"ジェクトと関連付ける必要があります。そのため、ユーザー追加の前にプロジェクト" -"を 1 つ追加しておく必要があります。" - -msgid "" -"Users on your cloud can disrupt other users, sometimes intentionally and " -"maliciously and other times by accident. Understanding the situation allows " -"you to make a better decision on how to handle the disruption." -msgstr "" -"クラウドのユーザーは他のユーザーに悪影響を与える場合があります。意図的に悪意" -"を持って行わる場合もあれば、偶然起こる場合もあります。状況を理解することによ" -"り、このような混乱に対処する方法について、よりよい判断をできるようになりま" -"す。" - -msgid "" -"Using :command:`openstack server show` as an admin user will show the " -"compute node the instance was scheduled on as ``hostId``. If the instance " -"failed during scheduling, this field is blank." -msgstr "" -"管理ユーザーとして :command:`openstack server show` を使用すると、インスタン" -"スがスケジュールされたコンピュートノードが ``hostId`` として表示されます。イ" -"ンスタンスがスケジュール中に失敗していれば、この項目が空白です。" - -msgid "" -"Using Image API v1 requires '-w 0' above, since multiline image properties " -"are not supported." -msgstr "" -"Image API v1 は、複数行のイメージプロパティーをサポートしないので、上の '-w " -"0' が必要になります。" - -msgid "Using Instance-Specific Data" -msgstr "インスタンス固有データの使用" - -msgid "Using OpenStack" -msgstr "OpenStack の使い方" - -msgid "" -"Using OpenStack Compute cells, the NeCTAR Research Cloud spans eight sites " -"with approximately 4,000 cores per site." -msgstr "" -"OpenStack Compute セルを使用して、NeCTAR Research Cloud は8サイトに及び、1" -"サイトあたり約4,000コアがあります。" - -msgid "" -"Using administrator credentials, confirm the lost IP address is still " -"available:" -msgstr "" -"管理者クレデンシャルを使用して、失われた IP アドレスがまだ利用できることを確" -"認します。" - -msgid "Using cURL for further inspection" -msgstr "cURL を使用したさらなる検査" - -msgid "Using the OpenStack Dashboard for Administration" -msgstr "管理目的での OpenStack Dashboard の使用" - -msgid "" -"Using the command-line interface, you can manage quotas for the OpenStack " -"Compute service and the Block Storage service." -msgstr "" -"コマンドラインインターフェースを使って、OpenStack Compute と Block Storage の" -"クォータを管理できます。" - -msgid "" -"Using this functionality, ideally one would lock the RPC version to the " -"OpenStack version being upgraded from on nova-compute nodes, to ensure that, " -"for example X+1 version nova-compute processes will continue to work with X " -"version nova-conductor processes while the upgrade completes. Once the " -"upgrade of nova-compute processes is complete, the operator can move onto " -"upgrading nova-conductor and remove the version locking for nova-compute in " -"``nova.conf``." -msgstr "" -"この機能を使用することにより、理想的には、 nova-compute においてアップグレー" -"ドされる OpenStack の RPC バージョンを固定します。例えば、X+1 バージョンの " -"nova-compute プロセスが、アップグレード完了まで、X バージョンの nova-" -"conductor プロセスと一緒に動作しつづけることを保証するためです。 nova-" -"compute プロセスのアップグレードが完了すると、運用者は nova-conductor のアッ" -"プグレードに進み、 ``nova.conf`` において nova-compute のバージョンロックを" -"削除できます。" - -msgid "VCPUs" -msgstr "仮想 CPU" - -msgid "" -"VLAN tags are translated between the external tag defined in the network " -"settings, and internal tags in several places. On the ``br-int``, incoming " -"packets from the ``int-br-eth1`` are translated from external tags to " -"internal tags. Other translations also happen on the other bridges and will " -"be discussed in those sections." -msgstr "" -"VLAN タグは、ネットワーク設定において定義された外部タグといくつかの場所にある" -"内部タグの間で変換されます。 ``br-int`` において、 ``int-br-eth1`` からの受信" -"パケットは、外部タグから内部タグへと変換されます。他の変換が他のブリッジにお" -"いても発生します。これらのセクションで議論されます。" - -msgid "" -"VLAN-based networks are received as tagged packets on a physical network " -"interface, ``eth1`` in this example. Just as on the compute node, this " -"interface is a member of the ``br-eth1`` bridge." -msgstr "" -"VLAN ベースのネットワークは、この例にある物理ネットワークインターフェース " -"``eth1`` においてタグ付きパケットとして受信されます。コンピュートノードで" -"は、このインターフェースが ``br-eth1`` ブリッジのメンバーです。" - -msgid "" -"VLAN-based networks exit the integration bridge via veth interface ``int-br-" -"eth1`` and arrive on the bridge ``br-eth1`` on the other member of the veth " -"pair ``phy-br-eth1``. Packets on this interface arrive with internal VLAN " -"tags and are translated to external tags in the reverse of the process " -"described above:" -msgstr "" -"VLAN ベースのネットワークは、仮想インターフェース ``int-br-eth1`` 経由で統合" -"ブリッジを抜けて、仮想イーサネットペア ``phy-br-eth1`` の他のメンバーにあるブ" -"リッジ ``br-eth1`` に届きます。このインターフェースのパケットは、内部 VLAN タ" -"グとともに届き、上で説明したプロセスの逆順において外部タグに変換されます。" - -msgid "Value" -msgstr "値" - -msgid "Verify RabbitMQ processes are running:" -msgstr "RabbitMQ プロセスが動作していることを確認します。" - -msgid "" -"Verify proper operation of your environment. Then, notify your users that " -"their cloud is operating normally again." -msgstr "" -"お使いの環境が正常に動作することを検証します。そして、クラウドが再び通常どお" -"り動作していることをユーザーに知らせます。" - -msgid "Verify that all hosted instances have been moved off the node:" -msgstr "" -"すべてのホストのインスタンスがノードからいなくなっていることを確認します。" - -msgid "Verify the instance has the correct IP address:" -msgstr "インスタンスが適切な IP アドレスを持っていることを検証します。" - -msgid "Verify your alert mechanisms are still working." -msgstr "アラート機能が動作していることを確認します。" - -msgid "View and update Block Storage quotas for a tenant (project)" -msgstr "" -"Block Storage サービスのテナント (プロジェクト) の クォータの表示と更新" - -msgid "View and update compute quotas for a tenant (project)" -msgstr "テナント (プロジェクト) のコンピュートクォータの表示/更新" - -msgid "View quotas for the tenant, as follows:" -msgstr "特定のテナントのクォータを表示するには以下のようにします。" - -msgid "Virtual cores" -msgstr "仮想コア数" - -msgid "" -"Virtual hardware templates are called \"flavors\" in OpenStack, defining " -"sizes for RAM, disk, number of cores, and so on. The default install " -"provides five flavors." -msgstr "" -"仮想ハードウェアのテンプレートは、OpenStack において「フレーバー」と呼ばれま" -"す。メモリー、ディスク、CPU コア数などを定義します。デフォルトインストールで" -"は、5 つのフレーバーが存在します。" - -msgid "Virtual machine memory in megabytes." -msgstr "メガバイト単位の仮想マシンメモリー。" - -msgid "" -"Virtual root disk size in gigabytes. This is an ephemeral disk the base " -"image is copied into. You don't use it when you boot from a persistent " -"volume. The \"0\" size is a special case that uses the native base image " -"size as the size of the ephemeral root volume." -msgstr "" -"ギガバイト単位の仮想ルートディスク容量。これはベースイメージがコピーされる一" -"時ディスクです。永続的なボリュームからブートするとき、これは使用されません。" -"「0」という容量は特別な値で、一時ルートボリュームの容量としてベースイメージの" -"ネイティブ容量をそのまま使用することを意味します。" - -msgid "Virtualization" -msgstr "仮想化" - -msgid "Visualizing OpenStack Networking Service Traffic in the Cloud" -msgstr "クラウド上の OpenStack Networking サービス通信の仮想化" - -msgid "Visualizing nova-network Traffic in the Cloud" -msgstr "クラウド上の nova-network 通信の仮想化" - -msgid "" -"VlanManager is used extensively for network management. All servers have two " -"bonded 10GbE NICs that are connected to two redundant switches. DAIR is set " -"up to use single-node networking where the cloud controller is the gateway " -"for all instances on all compute nodes. Internal OpenStack traffic (for " -"example, storage traffic) does not go through the cloud controller." -msgstr "" -"ネットワーク管理は VlanManager が広範囲に使用されています。全てのサーバーは2" -"つの冗長化(bonding)された 10GbE NIC があり、2つの独立したスイッチに接続さ" -"れています。DAIR はクラウドコントローラーが全コンピュートノード上の全インスタ" -"ンス用のゲートウェイとなる、単一ノードのネットワーキングを使用する設定がされ" -"ています。内部の OpenStack 通信(例:ストレージ通信)はクラウドコントローラー" -"を経由していません。" - -msgid "Volumes" -msgstr "ボリューム" - -msgid "Wash, rinse, and repeat until you find the core cause of the problem." -msgstr "問題の根本となる原因を見つけるまで、洗い出し、精査し、繰り返します。" - -msgid "Watch the network" -msgstr "ネットワークの監視" - -msgid "We also had some excellent input from outside of the room:" -msgstr "私たちは部屋の外から、いくつかの素晴らしいインプットを得ました。" - -msgid "" -"We approximate the older ``nova-network`` multi-host HA setup by using " -"\"provider VLAN networks\" that connect instances directly to existing " -"publicly addressable networks and use existing physical routers as their " -"default gateway. This means that if our network controller goes down, " -"running instances still have their network available, and no single Linux " -"host becomes a traffic bottleneck. We are able to do this because we have a " -"sufficient supply of IPv4 addresses to cover all of our instances and thus " -"don't need NAT and don't use floating IP addresses. We provide a single " -"generic public network to all projects and additional existing VLANs on a " -"project-by-project basis as needed. Individual projects are also allowed to " -"create their own private GRE based networks." -msgstr "" -"インスタンスが既存のパブリックにアクセスできるネットワークに直接接続され、デ" -"フォルトゲートウェイとして既存の物理ルーターを使用する、プロバイダー VLAN " -"ネットワークを使用した、より古い ``nova-network`` のマルチホスト HA セット" -"アップに近づいています。このことは、ネットワークコントローラーが停止した場合" -"に、実行中のインスタンスがネットワークを利用可能であり続けること、単独の " -"Linux ホストが通信のボトルネックにならないことを意味します。すべてのインスタ" -"ンスの IPv4 アドレスを十分に提供でき、NAT が必要なく、Floating IP アドレスを" -"使用しないので、これを実行できます。単独の汎用的なパブリックネットワークをす" -"べてのプロジェクトに提供し、必要に応じてプロジェクト単位に追加で既存の VLAN " -"を提供します。個々のプロジェクトは、自身のプライベートな GRE ネットワークを作" -"成することもできます。" - -msgid "" -"We called them and asked them to stop for a while, and they were happy to " -"abandon the horribly broken VM. After that, we started monitoring the size " -"of console logs." -msgstr "" -"我々はユーザを呼び、しばらくダッシュボードの更新を止めるよう申し入れた。する" -"と、恐ろしい VM の破壊は止み、彼らは大いに喜んだ。その後、我々はコンソールロ" -"グのサイズを監視するようになった。" - -msgid "" -"We couldn't have pulled it off without so much supportive help and " -"encouragement." -msgstr "" -"私たちは、これらの多大な協力的な援助と励まし無しには、これを成し遂げることは" -"できなかったでしょう。" - -msgid "" -"We decided to have ``tcpdump`` run on this instance and see if we could " -"catch it in action again. Sure enough, we did." -msgstr "" -"我々は、このインスタンス上で ``tcpdump`` を実行して、操作で再びこの現象に遭遇" -"するか見てみることにした。実際、我々はやってみた。" - -msgid "" -"We hope you have enjoyed this quick tour of your working environment, " -"including how to interact with your cloud and extract useful information. " -"From here, you can use the `OpenStack Administrator Guide `_ as your reference for all of the command-line " -"functionality in your cloud." -msgstr "" -"クラウドとの対話や有用な情報の抽出の方法など、作業環境の概観を確認する手順を" -"簡単にご紹介しました。役立てていただければ幸いです。ここで説明した内容よりも" -"さらに詳しい情報は、クラウドの全コマンドライン機能についての参考資料として" -"`OpenStack 管理ガイド `_ を参照して" -"ください。" - -msgid "" -"We initially deployed on Ubuntu 12.04 with the Essex release of OpenStack " -"using FlatDHCP multi-host networking." -msgstr "" -"最初は、Ubuntu 12.04 に OpenStack Essex を導入しました。FlatDHCP マルチホスト" -"ネットワークを使用しています。" - -msgid "" -"We reached out for help. A networking engineer suggested it was an MTU " -"issue. Great! MTU! Something to go on! What's MTU and why would it cause a " -"problem?" -msgstr "" -"我々は助けを求めた。ネットワークエンジニアは、これは MTU の問題ではないかとい" -"うのだ。素晴らしい!MTU! 事態は動き始めた! MTU とは何で、何故それが問題になる" -"のだろうか?" - -msgid "" -"We recommend that you choose one of the following multiple disk options:" -msgstr "以下に挙げる複数のディスクの選択肢から選ぶことを推奨します。" - -msgid "" -"We recommend that you use the same hardware for new compute and block " -"storage nodes. At the very least, ensure that the CPUs are similar in the " -"compute nodes to not break live migration." -msgstr "" -"新しいコンピュートノードとブロックストレージノードには、同じハードウェアを使" -"用することを推奨します。最低限、ライブマイグレーションが失敗しないように、コ" -"ンピュートノードでは CPU は同様のものにしてください。" - -msgid "" -"We recommend using a combination of the OpenStack command-line interface " -"(CLI) tools and the OpenStack dashboard for administration. Some users with " -"a background in other cloud technologies may be using the EC2 Compatibility " -"API, which uses naming conventions somewhat different from the native API." -msgstr "" -"管理には、OpenStack コマンドラインインターフェース (CLI) ツールと OpenStack " -"Dashboard を組み合わせて使用することをお勧めします。他のクラウドテクノロジー" -"の使用経験のある一部のユーザーは、EC2 互換 API を使用している可能性がありま" -"す。この API は、ネイティブの API とは若干異なる命名規則を採用しています。" - -msgid "" -"We reviewed both sets of logs. The one thing that stood out the most was " -"DHCP. At the time, OpenStack, by default, set DHCP leases for one minute " -"(it's now two minutes). This means that every instance contacts the cloud " -"controller (DHCP server) to renew its fixed IP. For some reason, this " -"instance could not renew its IP. We correlated the instance's logs with the " -"logs on the cloud controller and put together a conversation:" -msgstr "" -"我々はログのセットを両方見直した。頻発したログの1つは DHCP だった。当時、" -"OpenStack はデフォルトでは DHCP リース期間を 1分に設定していた (現在は 2分)。" -"これは、各インスタンスが固定 IP アドレスを更新するためにクラウドコントロー" -"ラー(DHCP サーバー)に接続することを意味する。幾つかの理由で、このインスタン" -"スはその IP アドレスを更新できなかった。インスタンスのログとクラウドコント" -"ローラー上のログを突き合わせ、並べてやりとりにしてみた。" - -msgid "" -"We use the Puppet Labs OpenStack modules to configure Compute, Image " -"service, Identity, and dashboard. Puppet is used widely for instance " -"configuration, and Foreman is used as a GUI for reporting and instance " -"provisioning." -msgstr "" -"我々は Compute、Image service、Identity、dashboard の設定に Puppet Labs の" -"OpenStack モジュールを使用しています。Puppet は、インスタンスの設定に幅広く使" -"用されます。Foreman は、レポートおよびインスタンスの配備の GUI として使用され" -"ます。" - -msgid "" -"We want to acknowledge our excellent host Rackers at Rackspace in Austin:" -msgstr "" -"私たちは、オースチンの Rackspace での素晴らしいホスト Rackersに感謝したい。" - -msgid "" -"We wrote furiously from our own experiences and bounced ideas between each " -"other. At regular intervals we reviewed the shape and organization of the " -"book and further molded it, leading to what you see today." -msgstr "" -"私たちは一心不乱に自分たちの経験に基づき執筆を行い、互いに意見をぶつけ合いま" -"した。一定の間隔で、本の現在の状況や構成をレビューし、本を作り上げていき、今" -"皆さんが見ているものができあがりました。" - -msgid "" -"We wrote this book because we have deployed and maintained OpenStack clouds " -"for at least a year and we wanted to share this knowledge with others. After " -"months of being the point people for an OpenStack cloud, we also wanted to " -"have a document to hand to our system administrators so that they'd know how " -"to operate the cloud on a daily basis—both reactively and pro-actively. We " -"wanted to provide more detailed technical information about the decisions " -"that deployers make along the way." -msgstr "" -"私たちは少なくとも1年以上 OpenStack クラウドを構築し運用してきました。そこで" -"得られた知識を多くの人と共有するために、この本を書きました。 OpenStack クラウ" -"ドの責任者として数ヶ月がたつと、そのドキュメントを渡しておけば、システム管理" -"者に日々のクラウドの運用をどのように行なえばよいかが分かるようなドキュメント" -"が欲しくなりました。また、クラウドを構築する際に選択したやり方のより詳細な技" -"術情報を共有したいと思いました。" - -msgid "" -"We wrote this book in a book sprint, which is a facilitated, rapid " -"development production method for books. For more information, see the " -"`BookSprints site `_. Your authors cobbled this " -"book together in five days during February 2013, fueled by caffeine and the " -"best takeout food that Austin, Texas, could offer." -msgstr "" -"私たちはこの本を Book Sprint で執筆しました。 Book Sprint は短い期間で本を建" -"設的に作成できるメソッドです。詳しい情報は、 `BookSprints のサイト `_ を参照して下さい。著者らは2013年2月の5日間でこの本を" -"まとめあげました。カフェインと、テキサス州オースティンの素晴らしいテイクアウ" -"トの食事は力になりました。" - -msgid "We wrote this book to help you:" -msgstr "次のような場面であなたの助けとなるように、この本を書きました。" - -msgid "Weekly" -msgstr "週次" - -msgid "What to Back Up" -msgstr "バックアップ対象" - -msgid "What to do when things are running slowly" -msgstr "動作が遅くなった場合に確認すること" - -msgid "" -"When adding a new security group, you should pick a descriptive but brief " -"name. This name shows up in brief descriptions of the instances that use it " -"where the longer description field often does not. Seeing that an instance " -"is using security group ``http`` is much easier to understand than " -"``bobs_group`` or ``secgrp1``." -msgstr "" -"新しいセキュリティグループを追加するとき、内容を表す簡潔な名前をつけるべきで" -"す。この名前はインスタンスの簡単な説明など、より長い説明フィールドが使用され" -"ないところで使用されます。インスタンスがセキュリティグループ ``http`` を使っ" -"ているのを見れば、``bobs_group`` や ``secgrp1`` よりはずっと理解しやすいこと" -"でしょう。" - -msgid "" -"When an instance fails to behave properly, you will often have to trace " -"activity associated with that instance across the log files of various " -"``nova-*`` services and across both the cloud controller and compute nodes." -msgstr "" -"インスタンスが正しく動作していない場合、インスタンスに関連したログを調べる必" -"要があります。これらのログは複数の ``nova-*`` サービスが出力しており、クラウ" -"ドコントローラーとコンピュートノードの両方に存在します。" - -msgid "" -"When booting a server, you can also add arbitrary metadata so that you can " -"more easily identify it among other running instances. Use the ``--" -"property`` option with a key-value pair, where you can make up the string " -"for both the key and the value. For example, you could add a description and " -"also the creator of the server:" -msgstr "" -"サーバーを起動するとき、他の実行中のインスタンスと区別しやすくするために、メ" -"タデータを追加することもできます。``--property`` オプションをキーバリューペア" -"とともに使用します。ここで、キーとバリューの両方の文字列を指定することができ" -"ます。たとえば、説明とサーバーの作成者を追加できます。" - -msgid "" -"When debugging DNS issues, start by making sure that the host where the " -"dnsmasq process for that instance runs is able to correctly resolve. If the " -"host cannot resolve, then the instances won't be able to either." -msgstr "" -"DNS問題のデバッグをするとき、そのインスタンスのdnsmasqが動いているホストが、" -"名前解決できるかを確認することから始めます。もしホストができないのであれば、" -"インスタンスも同様でしょう。" - -msgid "" -"When operating an OpenStack cloud, you may discover that your users can be " -"quite demanding. If OpenStack doesn't do what your users need, it may be up " -"to you to fulfill those requirements. This chapter provided you with some " -"options for customization and gave you the tools you need to get started." -msgstr "" -"OpenStack クラウドの運用時、ユーザーが非常に要望している可能性があることに気" -"が付くかもしれません。OpenStack がユーザーの必要とするものを実施していない場" -"合、それらの要求を満たすことをあなたに任せるかもしれません。本章は、いくつか" -"のカスタマイズの選択肢を提供し、始めるために必要となるツールを提供します。" - -msgid "" -"When the :command:`fsfreeze -f` command is issued, all ongoing transactions " -"in the file system are allowed to complete, new write system calls are " -"halted, and other calls that modify the file system are halted. Most " -"importantly, all dirty data, metadata, and log information are written to " -"disk." -msgstr "" -":command:`fsfreeze -f` コマンドが発行された場合、ファイルシステム内で進行中の" -"すべてのトランザクションが完了することが認められます。新規書き込みのシステム" -"コールは停止されます。そして、ファイルシステムを変更する他のコールは停止され" -"ます。最も重要なこととしては、すべてのダーティーデータ、メタデータ、およびロ" -"グ情報がディスクに書き込まれることです。" - -msgid "" -"When the node is able to rejoin the cluster, just add it back to the ring. " -"The exact syntax you use to add a node to your swift cluster with ``swift-" -"ring-builder`` heavily depends on the original options used when you " -"originally created your cluster. Please refer back to those commands." -msgstr "" -"ノードがクラスターに参加できるようになったら、ただリングに再度追加するだけで" -"す。 ``swift-ring-builder`` を使用して swift クラスターにノードを追加するため" -"の構文は、元々クラスターを作成したときに使用した元々のオプションに強く依存し" -"ます。作成時に使用したコマンドをもう一度見てください。" - -msgid "" -"When the snapshot is done, you can thaw the file system with the following " -"command, as root, inside of the instance:" -msgstr "" -"スナップショットの作成が終わったら、インスタンスの中で root として以下のコマ" -"ンドを用いて、ファイルシステムをフリーズ解除できます。" - -msgid "" -"When this node fully booted, I ran through the same scenario of seeing what " -"instances were running so I could turn them back on. There were a total of " -"four. Three booted and one gave an error. It was the same error as before: " -"unable to find the backing disk. Seriously, what?" -msgstr "" -"そのノードが完全に起動した際、インスタンスが起動した時に何が起こるのかを見る" -"ため、私は同じシナリオを実行して、インスタンスを復旧した。インスタンスは全部" -"で4つあった。3つは起動し、1つはエラーになった。このエラーは以前のエラーと" -"同じだった。「unable to find the backing disk.」マジ、何で?" - -msgid "" -"When users provision resources, they can specify from which availability " -"zone they want their instance to be built. This allows cloud consumers to " -"ensure that their application resources are spread across disparate machines " -"to achieve high availability in the event of hardware failure." -msgstr "" -"リソースのプロビジョニングの際には、インスタンスを作成するアベイラビリティ" -"ゾーンを指定することができます。これによって、クラウドの利用者は、アプリケー" -"ションのリソースが異なるマシンに分散して配置され、ハードウェア故障が発生した" -"場合でも高可用性を達成することができます。" - -msgid "" -"When viewing the server information, you can see the metadata included on " -"the metadata line:" -msgstr "" -"サーバーの情報を表示するとき、 metadata 行に含まれるメタデータを参照できます:" - -msgid "" -"When you create a deployment plan, focus on a few vital areas because they " -"are very hard to modify post deployment. The next two sections talk about " -"configurations for:" -msgstr "" -"デプロイメントプランを作成する場合、デプロイメント後の修正は困難であるため、" -"いくつかの重要な分野にフォーカスを当ててください。次の 2 章で以下の設定内容に" -"ついて説明していきます。" - -msgid "" -"When you join the screen session that ``stack.sh`` starts with ``screen -r " -"stack``, you are greeted with many screen windows:" -msgstr "" -"``stack.sh`` が ``screen -r stack`` で作成したセッションに join すると、多数" -"の screen ウィンドウが見えます。" - -msgid "" -"When you join the screen session that ``stack.sh`` starts with ``screen -r " -"stack``, you see a screen for each service running, which can be a few or " -"several, depending on how many services you configured DevStack to run." -msgstr "" -"``stack.sh`` が ``screen -r stack`` で作成したセッションに join すると、動作" -"中の各サービスのスクリーンを参照できます。これは、DevStack が実行するよう設定" -"したサービスの数に依存して、いくつかあるでしょう。" - -msgid "" -"When you reboot a compute node, first verify that it booted successfully. " -"This includes ensuring that the ``nova-compute`` service is running:" -msgstr "" -"コンピュートノードを再起動した場合、まず正常に起動していることを検証します。" -"これには、``nova-compute`` サービスの動作を確認することが含まれます。" - -msgid "" -"When you run any of the following operations, the services appear in their " -"own internal availability zone (CONF.internal_service_availability_zone):" -msgstr "" -"以下の操作のいずれかを実行する場合、サービスは独自の内部アベイラビリティゾー" -"ン(CONF.internal_service_availability_zone) に表示されます。" - -msgid "" -"When your middleware is done, we encourage you to open source it and let the " -"community know on the OpenStack mailing list. Perhaps others need the same " -"functionality. They can use your code, provide feedback, and possibly " -"contribute. If enough support exists for it, perhaps you can propose that it " -"be added to the official swift `middleware `_." -msgstr "" -"あなたのミドルウェアが完成したら、オープンソースにし、OpenStack メーリングリ" -"ストでコミュニティに知らせることをお薦めします。もしかしたら他の人も同じ機能" -"を必要としているかもしれません。彼らはあなたのコードを使い、フィードバック" -"し、おそらくコントリビュートするでしょう。もし十分な支持があれば、もしかした" -"ら公式な swift `ミドルウェア `_ への追加を提案してもよいでしょう。" - -msgid "" -"When your scheduler is done, we encourage you to open source it and let the " -"community know on the OpenStack mailing list. Perhaps others need the same " -"functionality. They can use your code, provide feedback, and possibly " -"contribute. If enough support exists for it, perhaps you can propose that it " -"be added to the official Compute `schedulers `_." -msgstr "" -"あなたのスケジューラーが完成したら、オープンソースにし、OpenStack メーリング" -"リストでコミュニティに知らせることをお薦めします。もしかしたら他の人も同じ機" -"能を必要としているかもしれません。彼らはあなたのコードを使い、フィードバック" -"し、おそらくコントリビュートするでしょう。もし十分な支持があれば、もしかした" -"ら公式な Compute `スケジューラー `_ への追加を提案してもよいでしょう。" - -msgid "Where Are the Logs?" -msgstr "ログはどこにあるのか?" - -msgid "" -"Where do you even begin troubleshooting something like this? An instance " -"that just randomly locks up when a command is issued. Is it the image? Nope—" -"it happens on all images. Is it the compute node? Nope—all nodes. Is the " -"instance locked up? No! New SSH connections work just fine!" -msgstr "" -"どこかであなたはこのような障害調査を行ったことがあるだろうか?インスタンスは" -"コマンドを打つ度に全くランダムにロックアップしてしまう。元になったイメージの" -"問題か?No-全てのイメージで同じ問題が発生する。コンピュートノードの問題か?" -"No-全てのノードで発生する。インスタンスはロックアップしたのか?No!新しいSSH" -"接続は問題なく機能する!" - -msgid "" -"Where floating IPs are configured in a deployment, each project will have a " -"limited number of floating IPs controlled by a quota. However, these need to " -"be allocated to the project from the central pool prior to their use—usually " -"by the administrator of the project. To allocate a floating IP to a project, " -"use the :guilabel:`Allocate IP To Project` button on the :guilabel:`Floating " -"IPs` tab of the :guilabel:`Access & Security` page of the dashboard. The " -"command line can also be used:" -msgstr "" -"Floating IP はクラウド全体で設定されますが、各プロジェクトはクォータにより " -"Floating IP 数を制限されているでしょう。使用する前に中央プールからプロジェク" -"トに確保する必要があります。一般的に、プロジェクトの管理者により行われます。" -"ダッシュボードの :guilabel:`アクセスとセキュリティー` ページの :guilabel:" -"`Floating IP` タブの :guilabel:`Floating IP の確保` ボタンを使用して、" -"Floating IP をプロジェクトに確保します。コマンドラインを使用することもできま" -"す。" - -msgid "" -"Whether access to a specific resource might be granted or not according to " -"the permissions configured for the resource (currently available only for " -"the network resource). The actual authorization policies enforced in an " -"OpenStack service vary from deployment to deployment." -msgstr "" -"リソースに対して設定されたパーミッションに基づいて、特性のリソースに対するア" -"クセスを許可するかを決定する (今のところネットワークリソースでのみ利用可能)。" -"OpenStack により強制される実際の認可ポリシーは、導入の仕方により異なります。" - -msgid "" -"While OpenStack is composed of many components and moving parts, backing up " -"the critical data is quite simple." -msgstr "" -"OpenStackは多くのコンポーネントから構成され、注意を払うべき箇所もたくさんあり" -"ますが、大事なデータのバックアップは非常に単純です。" - -msgid "" -"While bouncing this idea around in our heads, I was randomly typing commands " -"on the compute node:" -msgstr "" -"このアイデアが我々の頭を駆け巡る間、私はコンピュートノード上でコマンドをラン" -"ダムに叩いていた。" - -msgid "" -"While instance information is stored in a number of database tables, the " -"table you most likely need to look at in relation to user instances is the " -"instances table." -msgstr "" -"インスタンスの情報が数多くのデータベースのテーブルに保存されますが、ユーザー" -"のインスタンスに関連して参照する必要がありそうなテーブルは、instances テーブ" -"ルです。" - -msgid "" -"While monitoring system resources, I noticed a significant increase in " -"memory consumption while the EC2 API processed this request. I thought it " -"wasn't handling memory properly—possibly not releasing memory. If the API " -"received several of these requests, memory consumption quickly grew until " -"the system ran out of RAM and began using swap. Each node has 48 GB of RAM " -"and the \"nova-api\" process would consume all of it within minutes. Once " -"this happened, the entire system would become unusably slow until I " -"restarted the nova-api service." -msgstr "" -"システムリソースを監視しているうちに、EC2 API がこのリクエストを処理している" -"間、メモリー消費量が非常に増えていることに気が付きました。これは、メモリが開" -"放されず、正常に処理されていないと気づきました。API がこれらのいくつかのリク" -"エストを受け取ると、システムがメモリー不足になり、スワップを使い始めるまで、" -"メモリー消費がすぐに大きくなります。各ノードは 48GB メモリーを持ち、\"nova-" -"api\" プロセスが数分以内にそれらをすべて消費します。これが発生すると、nova-" -"api サービスを再起動するまで、システム全体が使えなくなるほど遅くなります。" - -msgid "" -"While the command should work with any hypervisor that is controlled through " -"libvirt (KVM, QEMU, or LXC), it has been tested only with KVM. Here is the " -"example output when the hypervisor is KVM:" -msgstr "" -"このコマンドは、libvirt によって管理されている任意のハイパーバイザー (例: " -"KVM、QEMU、LXC) で機能するはずですが、KVM でのみテスト済みです。ハイパーバイ" -"ザーが KVM の場合の例は以下のようになります。" - -msgid "" -"While we'd always recommend using your automated deployment system to " -"reinstall systems from scratch, sometimes you do need to remove OpenStack " -"from a system the hard way. Here's how:" -msgstr "" -"我々は常に、自動配備システムを使って、まっさらの状態からシステムを再インス" -"トールすることを進めていますが、時として OpenStack を地道にシステムから削除し" -"なければならない場合もあるでしょう。その場合には以下の手順となります。" - -msgid "" -"While you might end up with unused partitions, such as partition 1 in disk " -"three and four of this example, this option allows for maximum utilization " -"of disk space. I/O performance might be an issue as a result of all disks " -"being used for all tasks." -msgstr "" -"この例では、ディスク 3 と 4 のパーティション 1 のように未使用のパーティション" -"が残る可能性もありますが、このオプションにより、ディスク領域の使用状況を最大" -"化することができます。すべてのディスクがすべてのタスクで利用されるため、I/O " -"のパフォーマンスが問題になる可能性があります。" - -msgid "Who This Book Is For" -msgstr "この本の対象読者" - -msgid "" -"Who uses it: DAIR is an integrated virtual environment that leverages the " -"CANARIE network to develop and test new information communication technology " -"(ICT) and other digital technologies. It combines such digital " -"infrastructure as advanced networking and cloud computing and storage to " -"create an environment for developing and testing innovative ICT " -"applications, protocols, and services; performing at-scale experimentation " -"for deployment; and facilitating a faster time to market." -msgstr "" -"利用者:DAIR は新しい情報通信技術(ICT)と他のデジタル技術を開発・評価するた" -"めの CANARIE ネットワークを活用した統合仮想環境です。このシステムは、先進的な" -"ネットワーク、クラウドコンピューティング、ストレージといったデジタルインフラ" -"から構成されており、革新的な ICT アプリケーション、プロトコル、サービス、の開" -"発・評価環境の作成、デプロイのスケールに関する実験の実施、市場へのより早期の" -"投入促進を目的としています。" - -msgid "" -"Who uses it: researchers at CERN (European Organization for Nuclear " -"Research) conducting high-energy physics research." -msgstr "" -"利用者: 高エネルギー物理学の研究を指揮している CERN (European Organization " -"for Nuclear Research) の研究者。" - -msgid "" -"Who uses it: researchers from the Australian publicly funded research " -"sector. Use is across a wide variety of disciplines, with the purpose of " -"instances ranging from running simple web servers to using hundreds of cores " -"for high-throughput computing." -msgstr "" -"利用者:オーストラリアの公的資金による研究部門からの研究者。用途は、シンプル" -"な Web サーバー用のインスタンスから高スループットコンピューティング用の数百の" -"コア使用まで、多種多様な専門分野に渡ります。" - -msgid "" -"Who uses it: researchers from the MIT Computer Science and Artificial " -"Intelligence Lab." -msgstr "" -"利用者:MIT Computer Science and Artificial Intelligence Lab からの研究者。" - -msgid "Why and How We Wrote This Book" -msgstr "この本をなぜ書いたか?どうやって書いたか?" - -msgid "" -"Windows XP and later releases include a Volume Shadow Copy Service (VSS) " -"which provides a framework so that compliant applications can be " -"consistently backed up on a live filesystem. To use this framework, a VSS " -"requestor is run that signals to the VSS service that a consistent backup is " -"needed. The VSS service notifies compliant applications (called VSS writers) " -"to quiesce their data activity. The VSS service then tells the copy provider " -"to create a snapshot. Once the snapshot has been made, the VSS service " -"unfreezes VSS writers and normal I/O activity resumes." -msgstr "" -"Windows XP 以降は、従順なアプリケーションが動作中のファイルシステムで整合性の" -"あるバックアップを取得できるようにするフレームワークを提供する Volume Shadow " -"Copy Service (VSS) が含まれます。このフレームワークを使用するために、VSS リク" -"エスターが、整合性バックアップを必要とすることを VSS サービスに対してシグナル" -"を発行します。VSS サービスは、従順なアプリケーション (VSS ライターと言いま" -"す) に通知して、これらのデータ処理を休止します。そして、VSS サービスがコピー" -"プロバイダーにスナップショットを作成するよう指示します。スナップショットが作" -"成されると、VSS サービスが VSS ライターをフリーズ解除して、通常の I/O アク" -"ティビティーが再開されます。" - -msgid "" -"With ``nova-network``, the nova database table contains a few tables with " -"networking information:" -msgstr "" -"``nova-network`` を用いると、nova データベーステーブルは、いくつかのネット" -"ワーク情報を持つテーブルがあります。" - -msgid "" -"With our upgrade to Grizzly in August 2013, we moved to OpenStack " -"Networking, neutron (quantum at the time). Compute nodes have two-gigabit " -"network interfaces and a separate management card for IPMI management. One " -"network interface is used for node-to-node communications. The other is used " -"as a trunk port for OpenStack managed VLANs. The controller node uses two " -"bonded 10g network interfaces for its public IP communications. Big pipes " -"are used here because images are served over this port, and it is also used " -"to connect to iSCSI storage, back-ending the image storage and database. The " -"controller node also has a gigabit interface that is used in trunk mode for " -"OpenStack managed VLAN traffic. This port handles traffic to the dhcp-agent " -"and metadata-proxy." -msgstr "" -"2013 年 8 月に Grizzly へとアップグレードしたときに、OpenStack Networking に" -"移行しました。コンピュートノードは、2 個の GbE NIC を持ち、IPMI 管理専用のマ" -"ネジメントカードを持ちます。1 つの NIC は、ノード間通信のために使用されます。" -"もう 1 つは、OpenStack が管理する VLAN のトランクポートとして使用されます。コ" -"ントローラーノードは、パブリック IP 通信のために、ボンドした 2 つの 10 GbE " -"NICを持ちます。イメージがこのポート経由で使用されるため、ビッグパイプがここで" -"使用されます。また、イメージストレージとデータベースのバックエンドとなる " -"iSCSI ストレージに接続するためにも使用されます。コントローラーノードは、" -"OpenStack が管理する VLAN 通信のためにトランクモードで使用される GbE NIC も持" -"ちます。このポートは、DHCP エージェントとメタデータプロキシーへの通信も処理し" -"ます。" - -msgid "" -"With the exception of Object Storage, upgrading from one version of " -"OpenStack to another can take a great deal of effort. This chapter provides " -"some guidance on the operational aspects that you should consider for " -"performing an upgrade for an OpenStack environment." -msgstr "" -"Object Storage 以外は、OpenStack のあるバージョンから別のバージョンにアップグ" -"レードすることは非常に難しいことです。本章は運用観点でいくつかのガイドライン" -"を提供します。これは、OpenStack 環境のアップグレードを実行する際の考慮すべき" -"ことです。" - -msgid "" -"With the introduction of the full software-defined networking stack provided " -"by OpenStack Networking (neutron) in the Folsom release, development effort " -"on the initial networking code that remains part of the Compute component " -"has gradually lessened. While many still use ``nova-network`` in production, " -"there has been a long-term plan to remove the code in favor of the more " -"flexible and full-featured OpenStack Networking." -msgstr "" -"Folsom リリースにおいて OpenStack Networking (neutron) により提供された完全" -"な SDN スタックの導入により、Compute のコンポーネントの一部に残っている、初期" -"のネットワークのコードにおける開発の努力が徐々に少なくなってきました。まだた" -"くさん本番環境で ``nova-network`` を使用していますが、より柔軟で完全な機能を" -"持つ OpenStack Networking に移行して、そのコードを削除する長期的な計画があり" -"ました。" - -msgid "" -"With these two tables, you now have a good overview of what servers and " -"services make up your cloud." -msgstr "" -"これら2つの表で、どのサーバーとサービスがあなたのクラウドを構成しているのか、" -"概要を知ることができました。" - -msgid "" -"With this information in hand, we were sure that the problem had to do with " -"DHCP. We thought that for some reason, the instance wasn't getting a new IP " -"address and with no IP, it shut itself off from the network." -msgstr "" -"この情報により、我々は問題が DHCP 実行に起因するものと確信した。何らかの理由" -"でインスタンスが新しいIPアドレスを取得できず、その結果IPアドレスがなくなり、" -"インスタンスは自分自身をネットワークから切り離した、と考えた。" - -msgid "" -"With this option, you can assign different partitions to different RAID " -"arrays. You can allocate partition 1 of disk one and two to the ``/boot`` " -"partition mirror. You can make partition 2 of all disks the root partition " -"mirror. You can use partition 3 of all disks for a ``cinder-volumes`` LVM " -"partition running on a RAID 10 array." -msgstr "" -"このオプションでは、パーティションごとに異なる RAID アレイにおくことができま" -"す。例えば、ディスク 1 とディスク 2 のパーティション 1 を ``/boot`` パーティ" -"ションのミラーとして、すべてのディスクのパーティション 2 をルートパーティショ" -"ンのミラーとして、すべてのディスクのパーティション 3 を RAID10 アレイの上の " -"``cinder-volumes`` の LVM パーティションとして割り当てることができます。" - -msgid "" -"Within this scope, you must complete these steps to successfully roll back " -"your environment:" -msgstr "" -"この範囲内で、これらの手順を完了して、正常に環境をロールバックする必要があり" -"ます。" - -msgid "" -"Without upgrade levels, an X+1 version Compute service can receive and " -"understand X version RPC messages, but it can only send out X+1 version RPC " -"messages. For example, if a nova-conductor process has been upgraded to X+1 " -"version, then the conductor service will be able to understand messages from " -"X version nova-compute processes, but those compute services will not be " -"able to understand messages sent by the conductor service." -msgstr "" -"アップグレードレベルに関係なく、X+1 のバージョンの Compute サービスが X バー" -"ジョンの RPC メッセージを受信して理解できますが、X+1 のバージョンの RPC メッ" -"セージのみを送信できます。例えば、 nova-conductor プロセスが X+1 へとアップグ" -"レードされている場合、コンダクターサービスは、X バージョンの nova-compute プ" -"ロセスからのメッセージを理解できるようになります。しかし、それらのコンピュー" -"トサービスは、コンダクターサービスにより送信されたメッセージを理解できませ" -"ん。" - -msgid "" -"Working directly with the database and SQL queries can provide you with " -"custom lists and reports of images. Technically, you can update properties " -"about images through the database, although this is not generally " -"recommended." -msgstr "" -"データベースと SQL クエリーを直接使うことで、イメージの独自のリストやレポート" -"を得ることができます。一般には、推奨されませんが、技術的にはデータベース経由" -"でイメージのプロパティを更新できます。" - -msgid "" -"Working from the physical interface inwards, we can see the chain of ports " -"and bridges. First, the bridge ``eth1-br``, which contains the physical " -"network interface ``eth1`` and the virtual interface ``phy-eth1-br``:" -msgstr "" -"物理インターフェースより内側に取り組むと、ポートとブリッジのチェインを確認で" -"きます。まず、物理インターフェース ``eth1`` と仮想インターフェース ``phy-" -"eth1-br`` を含むブリッジ ``eth1-br`` です。" - -msgid "Working with Hardware" -msgstr "ハードウェアの取り扱い" - -msgid "Working with Roadmaps" -msgstr "ロードマップの取り扱い" - -msgid "" -"Write out \"dirty\" buffers to disk, similar to the Linux ``sync`` operation." -msgstr "" -"「ダーティー」バッファーをディスクに書き出します。Linux の ``sync`` 処理と似" -"ています。" - -msgid "" -"You *must mount the file system* before you run the :command:`fsfreeze` " -"command." -msgstr "" -"``fsfreeze`` コマンドを実行する前に、 *ファイルシステムをマウントする必要があ" -"ります。*" - -msgid "You also can add and remove the security services to the share network." -msgstr "共有ネットワークにセキュリティサービスを追加および削除できます。" - -msgid "" -"You also can see detailed information about the share network including " -"``network_type, segmentation_id`` fields:" -msgstr "" -"``network_type, segmentation_id`` 項目を含む、共有ネットワークに関する詳細を" -"参照することもできます。" - -msgid "" -"You are prompted for a project name and an optional, but recommended, " -"description. Select the check box at the bottom of the form to enable this " -"project. By default, it is enabled, as shown below:" -msgstr "" -"プロジェクト名および任意の説明 (推奨) が要求されます。フォームの一番下の" -"チェックボックスを選択してこのプロジェクトを有効にします。以下のように、デ" -"フォルトでは有効になっています。" - -msgid "" -"You can also `manage projects, users, and roles `_ from the command-" -"line clients." -msgstr "" -"コマンドラインクライアントから `プロジェクト、ユーザー、ロールを管理する " -"`_ こともできます。" - -msgid "You can also restore backed-up nova directories:" -msgstr "バックアップされた nova ディレクトリーもリストアできます。" - -msgid "" -"You can also specify block deviceblock device mapping at instance boot time " -"through the nova command-line client with this option set:" -msgstr "" -"nova コマンドラインクライアントに以下のようにオプションを付けて、インスタンス" -"の起動時にブロックデバイスのマッピングを指定することもできます。" - -msgid "" -"You can also use the Identity service (keystone) to see what services are " -"available in your cloud as well as what endpoints have been configured for " -"the services." -msgstr "" -"また、Identity サービス (keystone) を使用してクラウドで利用可能なサービスと、" -"サービス用に設定済みのエンドポイントを確認することもできます。" - -msgid "" -"You can attach block storage to instances from the dashboard on the :" -"guilabel:`Volumes` page. Click the :guilabel:`Manage Attachments` action " -"next to the volume you want to attach." -msgstr "" -"ダッシュボードの :guilabel:`ボリューム` ページから、インスタンスにブロックス" -"トレージを接続できます。接続したいボリュームの隣にある :guilabel:`接続の編集" -"` をクリックします。" - -msgid "" -"You can create a list of instances that are hosted on the compute node by " -"performing the following command:" -msgstr "" -"以下のコマンドを実行して、コンピュートノードにホストしているインスタンスの一" -"覧を作成できます。" - -msgid "" -"You can create automated alerts for critical processes by using Nagios and " -"NRPE. For example, to ensure that the ``nova-compute`` process is running on " -"the compute nodes, create an alert on your Nagios server:" -msgstr "" -"Nagios と NRPE を使って、クリティカルなプロセスの自動化されたアラートを作成す" -"ることが可能です。 ``nova-compute`` プロセスがコンピュートノードで動作してい" -"ることを保証するために、Nagios サーバー上で次のようなアラートを作成します。" - -msgid "" -"You can determine the package versions available for reversion by using the " -"``apt-cache policy`` command. For example:" -msgstr "" -"``apt-cache policy`` コマンドを使用して、バージョンを戻すために利用できるパッ" -"ケージのバージョンを確認できます。例:" - -msgid "" -"You can easily automate this process by creating a cron job that runs the " -"following script once per day:" -msgstr "" -"以下のようなcronジョブを一日に一度実行することで、簡単に自動化することも出来" -"ます。" - -msgid "" -"You can find the version of the Compute installation by using the OpenStack " -"command-line client:" -msgstr "" -"以下の OpenStack コマンドラインクライアントを使用して、インストールされてい" -"る Compute のバージョンを確認できます。" - -msgid "" -"You can follow a similar pattern in other projects that use the Python Paste " -"framework. Simply create a middleware module and plug it in through " -"configuration. The middleware runs in sequence as part of that project's " -"pipeline and can call out to other services as necessary. No project core " -"code is touched. Look for a ``pipeline`` value in the project's ``conf`` or " -"``ini`` configuration files in ``/etc/`` to identify projects that " -"use Paste." -msgstr "" -"Python Paste フレームワークを使う他のすべてのプロジェクトで、類似のパターンに" -"従うことができます。単純にミドルウェアモジュールを作成し、環境定義によって組" -"み込んでください。そのミドルウェアはプロジェクトのパイプラインの一部として順" -"番に実行され、必要に応じて他のサービスを呼び出します。プロジェクトのコア・" -"コードは一切修正しません。Paste を使っているプロジェクトを確認するには、 ``/" -"etc/`` に格納されている、プロジェクトの ``conf`` または ``ini`` 環境" -"定義ファイルの中で ``pipeline`` 変数を探してください。" - -msgid "" -"You can follow the progress being made on IPV6 support by watching the " -"`neutron IPv6 Subteam at work `_." -msgstr "" -"`neutron IPv6 Subteam at work `_ を確認して、進行状況を確認し続けられます。" - -msgid "" -"You can modify this example script on each node to handle different services." -msgstr "" -"さまざまなサービスを処理するために、各ノードでこのサンプルスクリプトを修正で" -"きます。" - -msgid "" -"You can now access the contents of ``/mnt``, which correspond to the first " -"partition of the instance's disk." -msgstr "" -"これで ``/mnt`` の中身にアクセスできます。これは、インスタンスのディスクの 1 " -"番目のパーティションに対応します。" - -msgid "You can now get the related floating IP entry:" -msgstr "関連する Floating IP のエントリーが見つかります。" - -msgid "" -"You can obtain extra information about virtual machines that are running—" -"their CPU usage, the memory, the disk I/O or network I/O—per instance, by " -"running the :command:`nova diagnostics` command with a server ID:" -msgstr "" -"実行中の仮想マシンの CPU 使用状況、メモリー、ディスク I/O、ネットワーク I/O " -"などの追加情報を取得するには、 ``nova diagnostics`` コマンドにサーバー ID を" -"指定して実行します:" - -msgid "" -"You can obtain further statistics by looking for the number of successful " -"requests:" -msgstr "成功したリクエストを検索することで、更なる情報を取得できます。" - -msgid "You can optionally also deallocate the IP from the user's pool:" -msgstr "また、ユーザプールからIPを開放することもできます。" - -msgid "" -"You can perform a couple of tricks with the database to either more quickly " -"retrieve information or fix a data inconsistency error—for example, an " -"instance was terminated, but the status was not updated in the database. " -"These tricks are discussed throughout this book." -msgstr "" -"より迅速に情報を取得したり、データ不整合のエラーを修正したりするために、デー" -"タベースでいくつかの小技を実行できます。たとえば、インスタンスが終了していた" -"が、データベースの状態が更新されていなかった、という状況です。こうした小技が" -"このドキュメント全体を通して議論されています。" - -msgid "You can re-enable the ``nova-compute`` service by undoing the commands:" -msgstr "コマンドを取り消すことにより ``nova-compute`` を再有効化できます。" - -msgid "" -"You can read a small selection of use cases from the OpenStack community " -"with some technical details and further resources." -msgstr "" -"OpenStack コミュニティーのユースケースをいくつか参照できます。少しの技術的な" -"詳細と参考資料もあります。" - -msgid "" -"You can restrict a project's image storage by total number of bytes. " -"Currently, this quota is applied cloud-wide, so if you were to set an Image " -"quota limit of 5 GB, then all projects in your cloud will be able to store " -"only 5 GB of images and snapshots." -msgstr "" -"プロジェクトのイメージ保存容量を合計バイト数で制限できます。現在、このクォー" -"タはクラウド全体に適用されます。そのため、イメージのクォータを 5 GB に設定す" -"る場合、クラウドの全プロジェクトが、5 GB 以内のイメージやスナップショットのみ" -"を保存できます。" - -msgid "" -"You can safely ignore the state of ``virbr0``, which is a default bridge " -"created by libvirt and not used by OpenStack." -msgstr "" -"``virbr0`` の状態は無視することができます。なぜならそれは libvirt が作成する" -"デフォルトのブリッジで、OpenStack からは使われないからです。" - -msgid "" -"You can save resources by looking at the best fit for the hardware you have " -"in place already. You might have some high-density storage hardware " -"available. You could format and repurpose those servers for OpenStack Object " -"Storage. All of these considerations and input from users help you build " -"your use case and your deployment plan." -msgstr "" -"すでに設置済みのハードウェアに最適な方法で使用されていることをチェックするこ" -"とで、リソースを節約することができます。高濃度のストレージハードウェアがある" -"とします。このハードウェアをフォーマットして、OpenStack Object Storage 用に" -"サーバーの用途を変更することができます。ユーザーからのこのような検討やイン" -"プットすべてをベースにすることで、ユースケースやデプロイメントプランの作成が" -"容易になります。" - -msgid "" -"You can save time by understanding the use cases for the cloud you want to " -"create. Use cases for OpenStack are varied. Some include object storage " -"only; others require preconfigured compute resources to speed development-" -"environment set up; and others need fast provisioning of compute resources " -"that are already secured per tenant with private networks. Your users may " -"have need for highly redundant servers to make sure their legacy " -"applications continue to run. Perhaps a goal would be to architect these " -"legacy applications so that they run on multiple instances in a cloudy, " -"fault-tolerant way, but not make it a goal to add to those clusters over " -"time. Your users may indicate that they need scaling considerations because " -"of heavy Windows server use." -msgstr "" -"作成するクラウドのユースケースを理解することで時間を節約することあできます。" -"OpenStack のユースケースはさまざまで、オブジェクトストレージのみのもの、開発" -"環境設定を加速するために事前設定されたコンピュートリソースが必要なもの、プラ" -"イベートネットワークでテナントごとにセキュリティが確保されたコンピュートリ" -"ソースの迅速にプロビジョニングするものもあります。ユーザーは、レガシーアプリ" -"ケーションが継続して実行されるように、非常に冗長化されたサーバーが必要な場合" -"もあります。おそらく、時間をかけてこれらのクラスターを追加するのが目的ではな" -"く、クラウドの耐障害性を確保したかたちで、複数のインスタンス上で実行するため" -"に、レガシーのアプリケーションを構築するのが目的の場合もあります。ユーザーに" -"よっては、負荷の高い Windows サーバーを使用するため、スケーリングを考慮する必" -"要があると指定する場合もあるでしょう。" - -msgid "" -"You can use these ratios to determine how much additional infrastructure you " -"need to support your cloud." -msgstr "" -"これらの比率を使用して、クラウドのサポートに必要なインフラストラクチャーがど" -"の程度必要か判断することができます。" - -msgid "" -"You could ask, \"Do I even need to build a cloud?\" If you want to start " -"using a compute or storage service by just swiping your credit card, you can " -"go to eNovance, HP, Rackspace, or other organizations to start using their " -"public OpenStack clouds. Using their OpenStack cloud resources is similar to " -"accessing the publicly available Amazon Web Services Elastic Compute Cloud " -"(EC2) or Simple Storage Solution (S3)." -msgstr "" -"「まだクラウドを構築する必要がありますか?」と質問したことでしょう。クレジット" -"カードを使うだけで、コンピュートサービスやストレージサービスを使いはじめたい" -"場合、eNovance、HP、Rackspace などのパブリック OpenStack クラウドを使うことが" -"できます。それらの OpenStack クラウドのリソースを使うことは、パブリックにアク" -"セスできる Amazon Web Services Elastic Compute Cloud (EC2) や Simple Storage " -"Solution (S3) にアクセスすることと同じです。" - -msgid "" -"You define the availability zone in which a specified compute host resides " -"locally on each server. An availability zone is commonly used to identify a " -"set of servers that have a common attribute. For instance, if some of the " -"racks in your data center are on a separate power source, you can put " -"servers in those racks in their own availability zone. Availability zones " -"can also help separate different classes of hardware." -msgstr "" -"指定したコンピュートホストがローカルでサーバー毎に所属するアベイラビリティ" -"ゾーンを定義します。アベイラビリティゾーンは一般的に、共通の属性を持つサー" -"バーを識別するために使用されます。例えば、データセンターのラックの一部が別の" -"電源を仕様している場合、このラックのサーバーを独自のアベイラビリティゾーンに" -"入れることができます。アベイラビリティゾーンは、異なるハードウェアクラスを分" -"割することもできます。" - -msgid "" -"You may find that you can automate the partitioning itself. For example, MIT " -"uses `Fully Automatic Installation (FAI) `_ to do " -"the initial PXE-based partition and then install using a combination of min/" -"max and percentage-based partitioning." -msgstr "" -"パーティショニング自体を自動化可能であることが分かります。例えば、MIT は " -"`Fully Automatic Installation (FAI) `_ を使用して、" -"初期の PXE ベースのパーティション分割を行い、min/max およびパーセントベースの" -"パーティショニングを組み合わせてインストールしていきます。" - -msgid "" -"You may need to explicitly install the ``ipset`` package if your " -"distribution does not install it as a dependency." -msgstr "" -"``ipset`` パッケージが、お使いのディストリビューションにおいて、依存関係でイ" -"ンストールされていない場合、それを明示的にインストールする必要があるかもしれ" -"ません。" - -msgid "" -"You may notice that all the existing logging messages are preceded by an " -"underscore and surrounded by parentheses, for example:" -msgstr "" -"以下に例を示しますが、全てのログメッセージはアンダースコアで始まり、括弧で括" -"られていることに気づいたでしょうか?" - -msgid "You might also see a message such as this:" -msgstr "このようなメッセージも確認できるかもしれません。" - -msgid "" -"You must complete the following configurations on the server's hard drives:" -msgstr "" -"サーバーのハードディスクに対して、以下の環境設定を完了させなければなりませ" -"ん。" - -msgid "" -"You must have the appropriate credentials if you want to use the command-" -"line tools to make queries against your OpenStack cloud. By far, the easiest " -"way to obtain :term:`authentication` credentials to use with command-line " -"clients is to use the OpenStack dashboard. Select :guilabel:`Project`, click " -"the :guilabel:`Project` tab, and click :guilabel:`Access & Security` on the :" -"guilabel:`Compute` category. On the :guilabel:`Access & Security` page, " -"click the :guilabel:`API Access` tab to display two buttons, :guilabel:" -"`Download OpenStack RC File` and :guilabel:`Download EC2 Credentials`, which " -"let you generate files that you can source in your shell to populate the " -"environment variables the command-line tools require to know where your " -"service endpoints and your authentication information are. The user you " -"logged in to the dashboard dictates the filename for the openrc file, such " -"as ``demo-openrc.sh``. When logged in as admin, the file is named ``admin-" -"openrc.sh``." -msgstr "" -"コマンドラインツールを使用して OpenStack クラウドに対してクエリーを実行するに" -"は、適切な認証情報が必要です。コマンドラインクライアントで使用する :term:`認" -"証 ` のクレデンシャルを取得する最も簡単な方法は、OpenStack " -"ダッシュボードを使用する方法です。 :guilabel:`プロジェクト` を選択し、 :" -"guilabel:`プロジェクト` タブをクリックし、 :guilabel:`コンピュート` カテゴ" -"リーにある :guilabel:`アクセスとセキュリティ` をクリックします。 :guilabel:`" -"アクセスとセキュリティ` ページにおいて、 :guilabel:`API アクセス` タブをク" -"リックして、 :guilabel:`OpenStack RC ファイルのダウンロード` と :guilabel:" -"`EC2 認証情報のダウンロード` の 2 つのボタンを表示します。これらのボタンによ" -"り、コマンドラインツールがサービスエンドポイントと認証情報の場所を知るのに必" -"要な環境変数を読み込むために、シェルで元データとして使用することのできるファ" -"イルを生成することができます。ダッシュボードにログインしたユーザーによって、" -"openrc ファイルのファイル名が決定します (例: ``demo-openrc.sh``)。admin とし" -"てログインした場合には、ファイル名は ``admin-openrc.sh`` となります。" - -msgid "" -"You must have the matching private key to access instances associated with " -"this key." -msgstr "" -"この鍵と関連付けられたインスタンスにアクセスするために、対応する秘密鍵を持つ" -"必要があります。" - -msgid "" -"You must remove the image after each test. Even better, test whether you can " -"successfully delete an image from the Image service." -msgstr "" -"毎回テスト後にイメージを削除する必要があります。 Image サービスからイメージが" -"削除できるかのテストにしてしまえば、さらによいです。" - -msgid "" -"You should be doing sanity checks on the interfaces using command such as :" -"command:`ip a` and :command:`brctl show` to ensure that the interfaces are " -"actually up and configured the way that you think that they are." -msgstr "" -"また、 :command:`ip a` や :command:`brctl show` などのコマンドを使って、イン" -"ターフェイスが実際にUPしているか、あなたが考えたとおりに設定されているか、正" -"当性を検査をすべきです。" - -msgid "You should see a message about ``/dev/sdb``." -msgstr "``/dev/sdb`` に関するメッセージを確認したほうがいいです。" - -msgid "You should see a result similar to the following:" -msgstr "以下のような結果を確認できます:" - -msgid "" -"You should verify that you have the requisite backups to restore. Rolling " -"back upgrades is a tricky process because distributions tend to put much " -"more effort into testing upgrades than downgrades. Broken downgrades take " -"significantly more effort to troubleshoot and, resolve than broken upgrades. " -"Only you can weigh the risks of trying to push a failed upgrade forward " -"versus rolling it back. Generally, consider rolling back as the very last " -"option." -msgstr "" -"リストアするために必要なバックアップがあることを確認すべきです。ディストリ" -"ビューションは、ダウングレードよりもアップグレードをテストすることにかなりの" -"労力をかける傾向があるため、ローリングバックアップグレードは扱いにくいプロセ" -"スです。失敗したダウングレードは、失敗したアップグレードよりトラブルシュー" -"ティングと解決に非常により多くの労力を必要とします。失敗したアップグレードを" -"前に進め続けるリスク、ロールバックするリスクを比較して重み付けすることだけが" -"できます。一般的に、かなり最後の選択肢としてロールバックを検討してください。" - -msgid "" -"You want to keep an eye on the areas improving within OpenStack. The best " -"way to \"watch\" roadmaps for each project is to look at the blueprints that " -"are being approved for work on milestone releases. You can also learn from " -"PTL webinars that follow the OpenStack summits twice a year." -msgstr "" -"OpenStack の中で改善されている領域を注目しつづけたいでしょう。各プロジェクト" -"のロードマップを「ウォッチ」する最善の方法は、今のマイルストーンリリースにお" -"いて取り組むために承認されたブループリントを確認することです。1 年に 2 回開催" -"されている OpenStack サミットの PTL による webinar からも知ることができます。" - -msgid "" -"Your credentials are a combination of username, password, and tenant " -"(project). You can extract these values from the ``openrc.sh`` discussed " -"above. The token allows you to interact with your other service endpoints " -"without needing to reauthenticate for every request. Tokens are typically " -"good for 24 hours, and when the token expires, you are alerted with a 401 " -"(Unauthorized) response and you can request another token." -msgstr "" -"認証情報はユーザー名、パスワード、テナント (プロジェクト) の組み合わせです。" -"これらの値は、前述の ``openrc.sh`` から抽出することができます。トークンによ" -"り、要求ごとに再認証する必要なく他のエンドポイントとの対話を行うことができま" -"す。トークンは通常 24 時間有効です。期限が切れると、401 (Unauthorized) の応答" -"で警告され、トークンをもう 1 つ要求することができます。" - -msgid "\\* ``schedule_run_instance``" -msgstr "\\* ``schedule_run_instance``" - -msgid "\\* ``select_destinations``" -msgstr "\\* ``select_destinations``" - -msgid "`2010.1 `_" -msgstr "`2010.1 `_" - -msgid "`2011.1 `_" -msgstr "`2011.1 `_" - -msgid "`2011.2 `_" -msgstr "`2011.2 `_" - -msgid "`2011.3 `_" -msgstr "`2011.3 `_" - -msgid "`2011.3.1 `_" -msgstr "`2011.3.1 `_" - -msgid "`2012.1 `_" -msgstr "`2012.1 `_" - -msgid "`2012.1.1 `_" -msgstr "`2012.1.1 `_" - -msgid "`2012.1.2 `_" -msgstr "`2012.1.2 `_" - -msgid "`2012.1.3 `_" -msgstr "`2012.1.3 `_" - -msgid "`2012.2 `_" -msgstr "`2012.2 `_" - -msgid "`2012.2.1 `_" -msgstr "`2012.2.1 `_" - -msgid "`2012.2.2 `_" -msgstr "`2012.2.2 `_" - -msgid "`2012.2.3 `_" -msgstr "`2012.2.3 `_" - -msgid "`2012.2.4 `_" -msgstr "`2012.2.4 `_" - -msgid "`2013.1 `_" -msgstr "`2013.1 `_" - -msgid "`2013.1.1 `_" -msgstr "`2013.1.1 `_" - -msgid "`2013.1.2 `_" -msgstr "`2013.1.2 `_" - -msgid "`2013.1.3 `_" -msgstr "`2013.1.3 `_" - -msgid "`2013.1.4 `_" -msgstr "`2013.1.4 `_" - -msgid "`2013.1.5 `_" -msgstr "`2013.1.5 `_" - -msgid "`2013.2 `_" -msgstr "`2013.2 `_" - -msgid "`2013.2.1 `_" -msgstr "`2013.2.1 `_" - -msgid "`2013.2.2 `_" -msgstr "`2013.2.2 `_" - -msgid "`2013.2.3 `_" -msgstr "`2013.2.3 `_" - -msgid "`2013.2.4 `_" -msgstr "`2013.2.4 `_" - -msgid "`2014.1 `_" -msgstr "`2014.1 `_" - -msgid "`2014.1.1 `_" -msgstr "`2014.1.1 `_" - -msgid "`2014.1.2 `_" -msgstr "`2014.1.2 `_" - -msgid "`2014.1.3 `_" -msgstr "`2014.1.3 `_" - -msgid "`2014.2 `_" -msgstr "`2014.2 `_" - -msgid "`2015.1 `_" -msgstr "`2015.1 `_" - -msgid "" -"`A breakdown of current features under development, with their target " -"milestone `_" -msgstr "" -"`現在の開発中の機能、それらの目標マイルストーンの詳細 `_" - -msgid "" -"`A list of all features, including those not yet under development `_" -msgstr "" -"`まだ開発中ではないものを含む、すべての機能の一覧 `_" - -msgid "" -"`Bare Metal service (ironic) upgrades `_" -msgstr "" -"`Bare Metal サービス (ironic) のアップグレード `_" - -msgid "" -"`Block Storage service (cinder) upgrades `_" -msgstr "" -"`Block Storage サービス (cinder) のアップグレード `_" - -msgid "`CSAIL homepage `_" -msgstr "`CSAIL ホームページ `_" - -msgid "" -"`Compute service (nova) upgrades `_" -msgstr "" -"`Compute サービス (nova) のアップグレード `_" - -msgid "" -"`Current stable release, security-supported `_" -msgstr "" -"`現在の安定版リリース、セキュリティアップデート対象 `_" - -msgid "`DAIR homepage `__" -msgstr "`DAIR ホームページ `__" - -msgid "`Dive Into Python (Apress) `_" -msgstr "`Dive Into Python (Apress) `_" - -msgid "" -"`End-of-life `_" -msgstr "" -"`エンドオブライフ `_" - -msgid "" -"`Identity service (keystone) upgrades `_" -msgstr "" -"`Identity サービス (keystone) のアップグレード `_" - -msgid "" -"`Image service (glance) rolling upgrades `_" -msgstr "" -"`Image サービス (glance) のローリングアップグレード `_" - -msgid "" -"`Image service (glance) zero downtime database upgrades `_" -msgstr "" -"`Image サービス (glance) の無停止データベースアップグレード `_" - -msgid "" -"`List of individual code changes under review `_" -msgstr "" -"`レビュー中の個々のコードの変更の一覧 `_" - -msgid "`NeCTAR website `_" -msgstr "`NeCTAR Web サイト `_" - -msgid "`NeCTAR-RC GitHub `_" -msgstr "`NeCTAR-RC GitHub `_" - -msgid "" -"`Networking service (neutron) upgrades `_" -msgstr "" -"`Networking サービス (neutron) のアップグレード `_" - -msgid "" -"`Object Storage service (swift) upgrades `_" -msgstr "" -"`Object Storage サービス (swift) のアップグレード `_" - -msgid "" -"`OpenStack API Guide `_" -msgstr "" -"`OpenStack API ガイド `_" - -msgid "" -"`OpenStack Administrator Guide `_" -msgstr "" -"`OpenStack Administrator Guide `_" - -msgid "" -"`OpenStack Architecture Design Guide `_" -msgstr "" -"`OpenStack アーキテクチャー設計ガイド `_" - -msgid "" -"`OpenStack Cloud Computing Cookbook (Packt Publishing) `_" -msgstr "" -"`OpenStack Cloud Computing Cookbook (Packt Publishing) `_" - -msgid "" -"`OpenStack Configuration Reference `_" -msgstr "" -"`OpenStack Configuration Reference `_" - -msgid "`OpenStack End User Guide `_" -msgstr "" -"`OpenStack エンドユーザーガイド `_" - -msgid "" -"`OpenStack High Availability Guide `_" -msgstr "`OpenStack 高可用性ガイド `_" - -msgid "" -"`OpenStack Installation Tutorial for Red Hat Enterprise Linux and CentOS " -"`_" -msgstr "" -"`OpenStack インストールチュートリアル Red Hat Enterprise Linux、CentOS 版 " -"`_" - -msgid "" -"`OpenStack Installation Tutorial for Ubuntu `_" -msgstr "" -"`OpenStack Installation Tutorial for Ubuntu `_" - -msgid "" -"`OpenStack Installation Tutorial for Ubuntu Server `_" -msgstr "" -"`OpenStack インストールチュートリアル Ubuntu 版 `_" - -msgid "" -"`OpenStack Installation Tutorial for openSUSE and SUSE Linux Enterprise " -"`_" -msgstr "" -"`OpenStack Installation Tutorial for openSUSE and SUSE Linux Enterprise " -"`_" - -msgid "" -"`OpenStack Installation Tutorial for openSUSE and SUSE Linux Enterprise " -"Server `_" -msgstr "" -"`OpenStack インストールチュートリアル openSUSE、SUSE Linux Enterprise Server " -"版 `_" - -msgid "" -"`OpenStack Networking Guide `_" -msgstr "" -"`OpenStack Networking Guide `_" - -msgid "" -"`OpenStack Security Guide `_" -msgstr "" -"`OpenStack セキュリティーガイド `_" - -msgid "" -"`OpenStack.org case study `_" -msgstr "" -"`OpenStack.org ケーススタディー `_" - -msgid "`Pro Puppet (Apress) `_" -msgstr "`Pro Puppet (Apress) `_" - -msgid "" -"`Problem with Heavy Network IO and Dnsmasq `_." -msgstr "" -"`高負荷ネットワーク IO と dnsmasq の問題 `_ 。" - -msgid "`Puppet Labs Documentation `_" -msgstr "`Puppet Labs Documentation `_" - -msgid "" -"`Rough-draft design discussions (\"etherpads\") from the last design summit " -"`_" -msgstr "" -"`直近のデザインサミットからの大まかな設定に関する議論 (etherpad) `_" - -msgid "" -"`Security-supported `_" -msgstr "" -"`セキュリティアップデート対象 `_" - -msgid "" -"`TCP/IP Illustrated, Volume 1: The Protocols, 2/E (Pearson) `_" -msgstr "" -"`TCP/IP Illustrated, Volume 1: The Protocols, 2/E (Pearson) `_" - -msgid "" -"`Telemetry service (ceilometer) upgrades `_" -msgstr "" -"`Telemetry サービス (ceilometer) のアップグレード `_" - -msgid "`The Book of Xen (No Starch Press) `_" -msgstr "`The Book of Xen (No Starch Press) `_" - -msgid "" -"`The TCP/IP Guide (No Starch Press) `_" -msgstr "" -"`The TCP/IP Guide (No Starch Press) `_" - -msgid "" -"`UNIX and Linux Systems Administration Handbook (Prentice Hall) `_" -msgstr "" -"`UNIX and Linux Systems Administration Handbook (Prentice Hall) `_" - -msgid "" -"`Under Development `_" -msgstr "`開発中 `_" - -msgid "" -"`Virtual Machine Image Guide `_" -msgstr "" -"`仮想マシンイメージガイド `_" - -msgid "" -"``/etc/cinder`` and ``/var/log/cinder`` follow the same rules as other " -"components." -msgstr "" -"``/etc/cinder`` と ``/var/log/cinder`` は他のコンポーネントの場合と同じルール" -"に従います。" - -msgid "" -"``/etc/glance`` and ``/var/log/glance`` follow the same rules as their nova " -"counterparts." -msgstr "" -"``/etc/keystone`` と ``/var/log/keystone`` は、対応する nova コンポーネントと" -"同じルールに従います。" - -msgid "" -"``/etc/keystone`` and ``/var/log/keystone`` follow the same rules as other " -"components." -msgstr "" -"``/etc/keystone`` と ``/var/log/keystone`` は他のコンポーネントの場合と同じ" -"ルールに従います。" - -msgid "" -"``/etc/neutron`` and ``/var/log/neutron`` follow the same rules as other " -"components." -msgstr "" -"``/etc/neutron`` と ``/var/log/neutron`` は他のコンポーネントの場合と同じルー" -"ルに従います。" - -msgid "" -"``/etc/swift`` is very important to have backed up. This directory contains " -"the swift configuration files as well as the ring files and ring :term:" -"`builder files `, which if lost, render the data on your " -"cluster inaccessible. A best practice is to copy the builder files to all " -"storage nodes along with the ring files. Multiple backup copies are spread " -"throughout your storage cluster." -msgstr "" -"``/etc/swift`` は非常に重要ですのでバックアップが必要です。このディレクトリに" -"は、swift の設定ファイル以外に、Ring ファイルや Ring :term:`ビルダーファイル " -"` が置かれています。これらのファイルを消失した場合はクラスター" -"上のデータにアクセスできなくなります。ベストプラクティスとしては、ビルダー" -"ファイルを全てのストレージノードに ring ファイルと共に置くことです。この方法" -"でストレージクラスター上にバックアップコピーが分散されて保存されます。" - -msgid "``/var/lib/cinder`` should also be backed up." -msgstr "``/var/lib/cinder`` もまたバックアップされるべきです。" - -msgid "" -"``/var/lib/glance`` should also be backed up. Take special notice of ``/var/" -"lib/glance/images``. If you are using a file-based back end of glance, ``/" -"var/lib/glance/images`` is where the images are stored and care should be " -"taken." -msgstr "" -"``/var/lib/glance`` もバックアップすべきです。 ``/var/lib/glance/images`` に" -"は特段の注意が必要です。もし、ファイルベースのバックエンドを利用しており、こ" -"のディレクトリがイメージの保管ディレクトリならば特にです。" - -msgid "" -"``/var/lib/keystone``, although it should not contain any data being used, " -"can also be backed up just in case." -msgstr "" -"``/var/lib/keystone`` は、使用されるデータは含まれていないはずですが、念のた" -"めバックアップします。" - -msgid "``/var/lib/neutron`` should also be backed up." -msgstr "``/var/lib/neutron`` もまたバックアップされるべきです。" - -msgid "``/var/lib/nova/instances/instance-/console.log``" -msgstr "``/var/lib/nova/instances/instance-/console.log``" - -msgid "``/var/lib/nova/instances`` contains two types of directories." -msgstr "``/var/lib/nova/instances`` には 2 種類のディレクトリがあります。" - -msgid "" -"``/var/lib/nova`` is another important directory to back up. The exception " -"to this is the ``/var/lib/nova/instances`` subdirectory on compute nodes. " -"This subdirectory contains the KVM images of running instances. You would " -"want to back up this directory only if you need to maintain backup copies of " -"all instances. Under most circumstances, you do not need to do this, but " -"this can vary from cloud to cloud and your service levels. Also be aware " -"that making a backup of a live KVM instance can cause that instance to not " -"boot properly if it is ever restored from a backup." -msgstr "" -"``/var/lib/nova`` がバックアップする他の重要なディレクトリです。これの例外が" -"コンピュートノードにある ``/var/lib/nova/instances`` サブディレクトリです。こ" -"のサブディレクトリには実行中のインスタンスの KVM イメージが置かれます。この" -"ディレクトリをバックアップしたいと思うのは、すべてのインスタンスのバックアッ" -"プコピーを保持する必要がある場合だけでしょう。多くの場合において、これを実行" -"する必要がありません。ただし、クラウドごとに異なり、サービスレベルによっても" -"異なる可能性があります。稼働中の KVM インスタンスのバックアップは、バックアッ" -"プから復元したときでも、正しく起動しない可能性があることに気をつけてくださ" -"い。" - -msgid "``/var/log/apache2/``" -msgstr "``/var/log/apache2/``" - -msgid "``/var/log/cinder/cinder-volume.log``" -msgstr "``/var/log/cinder/cinder-volume.log``" - -msgid "``/var/log/cinder``" -msgstr "``/var/log/cinder``" - -msgid "``/var/log/glance``" -msgstr "``/var/log/glance``" - -msgid "``/var/log/keystone``" -msgstr "``/var/log/keystone``" - -msgid "``/var/log/libvirt/libvirtd.log``" -msgstr "``/var/log/libvirt/libvirtd.log``" - -msgid "``/var/log/neutron``" -msgstr "``/var/log/neutron``" - -msgid "``/var/log/nova``" -msgstr "``/var/log/nova``" - -msgid "" -"``/var/log/nova`` does not need to be backed up if you have all logs going " -"to a central area. It is highly recommended to use a central logging server " -"or back up the log directory." -msgstr "" -"``/var/log/nova`` については、全てのログをリモートで集中管理しているのであれ" -"ば、バックアップの必要はありません。ログ集約システムの導入か、ログディレクト" -"リのバックアップを強く推奨します" - -msgid "``/var/log/rsyslog/c01.example.com/nova.log``" -msgstr "``/var/log/rsyslog/c01.example.com/nova.log``" - -msgid "``/var/log/rsyslog/c02.example.com/nova.log``" -msgstr "``/var/log/rsyslog/c02.example.com/nova.log``" - -msgid "``/var/log/rsyslog/nova.log``" -msgstr "``/var/log/rsyslog/nova.log``" - -msgid "``/var/log/syslog``" -msgstr "``/var/log/syslog``" - -msgid "``bandwidth_poll_interval``" -msgstr "``bandwidth_poll_interval``" - -msgid "``base_image_ref``" -msgstr "``base_image_ref``" - -msgid "``cinder-*``" -msgstr "``cinder-*``" - -msgid "``cinder-api``" -msgstr "``cinder-api``" - -msgid "``cinder-scheduler``" -msgstr "``cinder-scheduler``" - -msgid "``cinder.conf``:" -msgstr "``cinder.conf``:" - -msgid "``cores``" -msgstr "``cores``" - -msgid "``created_at``" -msgstr "``created_at``" - -msgid "``deleted_at``" -msgstr "``deleted_at``" - -msgid "``fixed-ips``" -msgstr "``fixed-ips``" - -msgid "``fixed_ips``" -msgstr "``fixed_ips``" - -msgid "``floating-ips``" -msgstr "``floating-ips``" - -msgid "``floating_ips``" -msgstr "``floating_ips``" - -msgid "``glance-*``" -msgstr "``glance-*``" - -msgid "``glance-api.conf`` and ``glance-registry.conf``:" -msgstr "``glance-api.conf`` と ``glance-registry.conf``:" - -msgid "``group_hosts``" -msgstr "``group_hosts``" - -msgid "``heal_instance_info_cache_interval``" -msgstr "``heal_instance_info_cache_interval``" - -msgid "``horizon``" -msgstr "``horizon``" - -msgid "``host_state_interval``" -msgstr "``host_state_interval``" - -msgid "``hosts_up``" -msgstr "``hosts_up``" - -msgid "``image_cache_manager_interval``" -msgstr "``image_cache_manager_interval``" - -msgid "``image_location``" -msgstr "``image_location``" - -msgid "``image_properties``" -msgstr "``image_properties``" - -msgid "``image_type``" -msgstr "``image_type``" - -msgid "``images``" -msgstr "``images``" - -msgid "``injected-file-content-bytes``" -msgstr "``injected-file-content-bytes``" - -msgid "``injected-file-path-bytes``" -msgstr "``injected-file-path-bytes``" - -msgid "``injected-files``" -msgstr "``injected-files``" - -msgid "``instance_delete_interval``" -msgstr "``instance_delete_interval``" - -msgid "``instance_uuid``" -msgstr "``instance_uuid``" - -msgid "``instances``" -msgstr "``instances``" - -msgid "" -"``ip``: authenticates an instance through its IP address. A valid format is " -"XX.XX.XX.XX orXX.XX.XX.XX/XX. For example 0.0.0.0/0." -msgstr "" -"``ip``: インスタンスの IP アドレスによりインスタンスを認証します。有効な形式" -"は XX.XX.XX.XX または XX.XX.XX.XX/XX です。例えば 0.0.0.0/0 です。" - -msgid "``key*``" -msgstr "``key*``" - -msgid "``key-pairs``" -msgstr "``key-pairs``" - -msgid "``key``" -msgstr "``key``" - -msgid "``keystone-*``" -msgstr "``keystone-*``" - -msgid "``keystone.conf``:" -msgstr "``keystone.conf``:" - -msgid "``launched_at``" -msgstr "``launched_at``" - -msgid "``metadata-items``" -msgstr "``metadata-items``" - -msgid "``n-sch``" -msgstr "``n-sch``" - -msgid "``n-{name}``" -msgstr "``n-{name}``" - -msgid "``neutron-*``" -msgstr "``neutron-*``" - -msgid "``nova-*``" -msgstr "``nova-*``" - -msgid "``nova-api`` services" -msgstr "``nova-api`` サービス" - -msgid "``nova-compute``, cinder hosts" -msgstr "``nova-compute``, cinder ホスト" - -msgid "``nova-scheduler`` services" -msgstr "``nova-scheduler`` サービス" - -msgid "``nova.conf``:" -msgstr "``nova.conf``:" - -msgid "``ram``" -msgstr "``ram``" - -msgid "``reclaim_instance_interval``" -msgstr "``reclaim_instance_interval``" - -msgid "``ro:`` read-only (RO) access." -msgstr "``ro:`` 読み取り専用アクセス。" - -msgid "``rw``: read and write (RW) access. This is the default value." -msgstr "``rw``: 読み書きアクセス。デフォルト。" - -msgid "``s-{name}``" -msgstr "``s-{name}``" - -msgid "``scheduled_at``" -msgstr "``scheduled_at``" - -msgid "``security-group-rules``" -msgstr "``security-group-rules``" - -msgid "``security-groups``" -msgstr "``security-groups``" - -msgid "``server_group_members``" -msgstr "``server_group_members``" - -msgid "``server_groups``" -msgstr "``server_groups``" - -msgid "``shell``" -msgstr "``shell``" - -msgid "``shelved_offload_time``" -msgstr "``shelved_offload_time``" - -msgid "``shelved_poll_interval``" -msgstr "``shelved_poll_interval``" - -msgid "``sync_power_state_interval``" -msgstr "``sync_power_state_interval``" - -msgid "``terminated_at``" -msgstr "``terminated_at``" - -msgid "``update_service_capabilities``" -msgstr "``update_service_capabilities``" - -msgid "``updated_at``" -msgstr "``updated_at``" - -msgid "``volume_usage_poll_interval``" -msgstr "``volume_usage_poll_interval``" - -msgid "" -"`instances losing IP address while running, due to No DHCPOFFER `_." -msgstr "" -"`DHCPOFFERが送信されない事による、起動中のインスタンスのIPアドレスの消失 " -"`_ 。" - -msgid "admin" -msgstr "admin" - -msgid "and the following log statement into the ``__call__`` method:" -msgstr "そして以下のログ出力分を ``__call__`` メソッドに挿入してください。" - -msgid "check the share's status:" -msgstr "共有の状態を確認します。" - -msgid "cinder-api" -msgstr "cinder-api" - -msgid "cinder-scheduler" -msgstr "cinder-scheduler" - -msgid "cinder-volume" -msgstr "cinder-volume" - -msgid "delete-on-terminate" -msgstr "delete-on-terminate" - -msgid "dev-name" -msgstr "dev-name" - -msgid "direction" -msgstr "方向" - -msgid "ethertype" -msgstr "ethertype" - -msgid "extra_specs" -msgstr "extra_specs" - -msgid "gigabytes" -msgstr "gigabytes" - -msgid "glance-api" -msgstr "glance-api" - -msgid "glance-registry" -msgstr "glance-registry" - -msgid "guest-file-flush" -msgstr "guest-file-flush" - -msgid "guest-fsfreeze" -msgstr "guest-fsfreeze" - -msgid "guest-fsfreeze-thaw" -msgstr "guest-fsfreeze-thaw" - -msgid "horizon" -msgstr "horizon" - -msgid "id" -msgstr "ID" - -msgid "img_signature uses the signature called signature_64" -msgstr "img_signature は signature_64 という署名を使用します" - -msgid "" -"img_signature_certificate_uuid uses the value from cert_uuid in section 5 " -"above" -msgstr "" -"img_signature_certificate_uuid は、上のセクション 5 にある cert_uuid の値を使" -"用します" - -msgid "img_signature_hash_method matches 'SHA-256' in section 2 above" -msgstr "" -"img_signature_hash_method は、上のセクション 2 にある SHA-256 と一致します" - -msgid "img_signature_key_type matches 'RSA-PSS' in section 2 above" -msgstr "" -"img_signature_key_type は、上のセクション 2 にある RSA-PSS と一致します" - -msgid "iptables" -msgstr "iptables" - -msgid "libvirt" -msgstr "libvirt" - -msgid "m1.large" -msgstr "m1.large" - -msgid "m1.medium" -msgstr "m1.medium" - -msgid "m1.small" -msgstr "m1.small" - -msgid "m1.tiny" -msgstr "m1.tiny" - -msgid "m1.xlarge" -msgstr "m1.xlarge" - -msgid "member" -msgstr "member" - -msgid "misc (swift, dnsmasq)" -msgstr "その他 (swift, dnsmasq)" - -msgid "neutron-api" -msgstr "neutron-api" - -msgid "neutron-dhcp-agent" -msgstr "neutron-dhcp-agent" - -msgid "neutron-l3-agent" -msgstr "neutron-l3-agent" - -msgid "neutron-metadata-agent" -msgstr "neutron-metadata-agent" - -msgid "neutron-openvswitch-agent" -msgstr "neutron-openvswitch-agent" - -msgid "neutron-server" -msgstr "neutron-server" - -msgid "nova-api" -msgstr "nova-api" - -msgid "nova-compute" -msgstr "nova-compute" - -msgid "nova-compute servers first need to be updated by the following steps:" -msgstr "まず nova-compute を以下の手順でアップデートする必要があります。" - -msgid "nova-conductor" -msgstr "nova-conductor" - -msgid "nova-novncproxy" -msgstr "nova-novncproxy" - -msgid "nova-scheduler" -msgstr "nova-scheduler" - -msgid "" -"or the `Command-Line Interface Reference `__." -msgstr "" -"または `コマンドラインインターフェースリファレンス `__ 。" - -msgid "port_range_max" -msgstr "port_range_max" - -msgid "port_range_min" -msgstr "port_range_min" - -msgid "protocol" -msgstr "プロトコル" - -msgid "remote_ip_prefix" -msgstr "remote_ip_prefix" - -msgid "rsyslog" -msgstr "rsyslog" - -msgid "rsyslog client configuration" -msgstr "rsyslog クライアント設定" - -msgid "rsyslog server configuration" -msgstr "rsyslog サーバー設定" - -msgid "showed this particular node in a down state." -msgstr "出力で、この特定のノードの状態が XXX になっていた。" - -msgid "size (GB)" -msgstr "size (GB)" - -msgid "snapshot" -msgstr "スナップショット" - -msgid "snapshots" -msgstr "snapshots" - -msgid "tcpdump" -msgstr "tcpdump" - -msgid "type" -msgstr "タイプ" - -msgid "" -"vlan20 is the VLAN that the data center gave us for outgoing Internet " -"access. It's a correct VLAN and is also attached to bond0." -msgstr "" -"vlan20 はデータセンターが外向けのインターネットアクセス用に我々に付与した " -"VLAN である。これは正しい VLAN で bond0 にアタッチされている。" - -msgid "volumes" -msgstr "volumes" - -msgid "where ``nova`` is the database you want to back up." -msgstr "ここで ``nova`` はバックアップ対象のデータベースです。" - -msgid "" -"will keep the RPC version locked across the specified services to the RPC " -"version used in X+1. As all instances of a particular service are upgraded " -"to the newer version, the corresponding line can be removed from ``nova." -"conf``." -msgstr "" -"指定したサービスをまたがり、RPC バージョンを X+1 の RPC バージョンに固定しま" -"す。特定のサービスのすべてのインスタンスがより新しいバージョンにアップグレー" -"ドされた後、対応する行を ``nova.conf`` から削除できます。" diff --git a/doc/ops-guide/source/ops-advanced-configuration.rst b/doc/ops-guide/source/ops-advanced-configuration.rst deleted file mode 100644 index 502749b244..0000000000 --- a/doc/ops-guide/source/ops-advanced-configuration.rst +++ /dev/null @@ -1,151 +0,0 @@ -====================== -Advanced Configuration -====================== - -OpenStack is intended to work well across a variety of installation -flavors, from very small private clouds to large public clouds. To -achieve this, the developers add configuration options to their code -that allow the behavior of the various components to be tweaked -depending on your needs. Unfortunately, it is not possible to cover all -possible deployments with the default configuration values. - -At the time of writing, OpenStack has more than 3,000 configuration -options. You can see them documented at the -`OpenStack Configuration Reference -`_. -This chapter cannot hope to document all of these, but we do try to -introduce the important concepts so that you know where to go digging -for more information. - -Differences Between Various Drivers -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Many OpenStack projects implement a driver layer, and each of these -drivers will implement its own configuration options. For example, in -OpenStack Compute (nova), there are various hypervisor drivers -implemented—libvirt, xenserver, hyper-v, and vmware, for example. Not -all of these hypervisor drivers have the same features, and each has -different tuning requirements. - -.. note:: - - The currently implemented hypervisors are listed on the `OpenStack - Configuration Reference - `__. - You can see a matrix of the various features in OpenStack Compute - (nova) hypervisor drivers at the `Hypervisor support matrix - page `_. - -The point we are trying to make here is that just because an option -exists doesn't mean that option is relevant to your driver choices. -Normally, the documentation notes which drivers the configuration -applies to. - -Implementing Periodic Tasks -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Another common concept across various OpenStack projects is that of -periodic tasks. Periodic tasks are much like cron jobs on traditional -Unix systems, but they are run inside an OpenStack process. For example, -when OpenStack Compute (nova) needs to work out what images it can -remove from its local cache, it runs a periodic task to do this. - -Periodic tasks are important to understand because of limitations in the -threading model that OpenStack uses. OpenStack uses cooperative -threading in Python, which means that if something long and complicated -is running, it will block other tasks inside that process from running -unless it voluntarily yields execution to another cooperative thread. - -A tangible example of this is the ``nova-compute`` process. In order to -manage the image cache with libvirt, ``nova-compute`` has a periodic -process that scans the contents of the image cache. Part of this scan is -calculating a checksum for each of the images and making sure that -checksum matches what ``nova-compute`` expects it to be. However, images -can be very large, and these checksums can take a long time to generate. -At one point, before it was reported as a bug and fixed, -``nova-compute`` would block on this task and stop responding to RPC -requests. This was visible to users as failure of operations such as -spawning or deleting instances. - -The take away from this is if you observe an OpenStack process that -appears to "stop" for a while and then continue to process normally, you -should check that periodic tasks aren't the problem. One way to do this -is to disable the periodic tasks by setting their interval to zero. -Additionally, you can configure how often these periodic tasks run—in -some cases, it might make sense to run them at a different frequency -from the default. - -The frequency is defined separately for each periodic task. Therefore, -to disable every periodic task in OpenStack Compute (nova), you would -need to set a number of configuration options to zero. The current list -of configuration options you would need to set to zero are: - -* ``bandwidth_poll_interval`` -* ``sync_power_state_interval`` -* ``heal_instance_info_cache_interval`` -* ``host_state_interval`` -* ``image_cache_manager_interval`` -* ``reclaim_instance_interval`` -* ``volume_usage_poll_interval`` -* ``shelved_poll_interval`` -* ``shelved_offload_time`` -* ``instance_delete_interval`` - -To set a configuration option to zero, include a line such as -``image_cache_manager_interval=0`` in your ``nova.conf`` file. - -This list will change between releases, so please refer to your -configuration guide for up-to-date information. - -Specific Configuration Topics -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section covers specific examples of configuration options you might -consider tuning. It is by no means an exhaustive list. - -Security Configuration for Compute, Networking, and Storage ------------------------------------------------------------ - -The `OpenStack Security Guide `_ -provides a deep dive into securing an OpenStack cloud, including -SSL/TLS, key management, PKI and certificate management, data transport -and privacy concerns, and compliance. - -High Availability ------------------ - -The `OpenStack High Availability -Guide `_ offers -suggestions for elimination of a single point of failure that could -cause system downtime. While it is not a completely prescriptive -document, it offers methods and techniques for avoiding downtime and -data loss. - -Enabling IPv6 Support ---------------------- - -You can follow the progress being made on IPV6 support by watching the -`neutron IPv6 Subteam at -work `_. - -By modifying your configuration setup, you can set up IPv6 when using -``nova-network`` for networking, and a tested setup is documented for -FlatDHCP and a multi-host configuration. The key is to make -``nova-network`` think a ``radvd`` command ran successfully. The entire -configuration is detailed in a Cybera blog post, `“An IPv6 enabled -cloud” `_. - -Geographical Considerations for Object Storage ----------------------------------------------- - -Support for global clustering of object storage servers is available for -all supported releases. You would implement these global clusters to -ensure replication across geographic areas in case of a natural disaster -and also to ensure that users can write or access their objects more -quickly based on the closest data center. You configure a default region -with one zone for each cluster, but be sure your network (WAN) can -handle the additional request and response load between zones as you add -more zones and build a ring that handles more zones. Refer to -`Geographically Distributed Clusters -`_ -in the documentation for additional information. diff --git a/doc/ops-guide/source/ops-backup-recovery.rst b/doc/ops-guide/source/ops-backup-recovery.rst deleted file mode 100644 index d1263d2b87..0000000000 --- a/doc/ops-guide/source/ops-backup-recovery.rst +++ /dev/null @@ -1,219 +0,0 @@ -=================== -Backup and Recovery -=================== - -Standard backup best practices apply when creating your OpenStack backup -policy. For example, how often to back up your data is closely related -to how quickly you need to recover from data loss. - -.. note:: - - If you cannot have any data loss at all, you should also focus on a - highly available deployment. The `OpenStack High Availability - Guide `_ offers - suggestions for elimination of a single point of failure that could - cause system downtime. While it is not a completely prescriptive - document, it offers methods and techniques for avoiding downtime and - data loss. - -Other backup considerations include: - -* How many backups to keep? -* Should backups be kept off-site? -* How often should backups be tested? - -Just as important as a backup policy is a recovery policy (or at least -recovery testing). - -What to Back Up -~~~~~~~~~~~~~~~ - -While OpenStack is composed of many components and moving parts, backing -up the critical data is quite simple. - -This chapter describes only how to back up configuration files and -databases that the various OpenStack components need to run. This -chapter does not describe how to back up objects inside Object Storage -or data contained inside Block Storage. Generally these areas are left -for users to back up on their own. - -Database Backups -~~~~~~~~~~~~~~~~ - -The example OpenStack architecture designates the cloud controller as -the MySQL server. This MySQL server hosts the databases for nova, -glance, cinder, and keystone. With all of these databases in one place, -it's very easy to create a database backup: - -.. code-block:: console - - # mysqldump --opt --all-databases > openstack.sql - -If you only want to backup a single database, you can instead run: - -.. code-block:: console - - # mysqldump --opt nova > nova.sql - -where ``nova`` is the database you want to back up. - -You can easily automate this process by creating a cron job that runs -the following script once per day: - -.. code-block:: bash - - #!/bin/bash - backup_dir="/var/lib/backups/mysql" - filename="${backup_dir}/mysql-`hostname`-`eval date +%Y%m%d`.sql.gz" - # Dump the entire MySQL database - /usr/bin/mysqldump --opt --all-databases | gzip > $filename - # Delete backups older than 7 days - find $backup_dir -ctime +7 -type f -delete - -This script dumps the entire MySQL database and deletes any backups -older than seven days. - -File System Backups -~~~~~~~~~~~~~~~~~~~ - -This section discusses which files and directories should be backed up -regularly, organized by service. - -Compute -------- - -The ``/etc/nova`` directory on both the cloud controller and compute -nodes should be regularly backed up. - -``/var/log/nova`` does not need to be backed up if you have all logs -going to a central area. It is highly recommended to use a central -logging server or back up the log directory. - -``/var/lib/nova`` is another important directory to back up. The -exception to this is the ``/var/lib/nova/instances`` subdirectory on -compute nodes. This subdirectory contains the KVM images of running -instances. You would want to back up this directory only if you need to -maintain backup copies of all instances. Under most circumstances, you -do not need to do this, but this can vary from cloud to cloud and your -service levels. Also be aware that making a backup of a live KVM -instance can cause that instance to not boot properly if it is ever -restored from a backup. - -Image Catalog and Delivery --------------------------- - -``/etc/glance`` and ``/var/log/glance`` follow the same rules as their -nova counterparts. - -``/var/lib/glance`` should also be backed up. Take special notice of -``/var/lib/glance/images``. If you are using a file-based back end of -glance, ``/var/lib/glance/images`` is where the images are stored and -care should be taken. - -There are two ways to ensure stability with this directory. The first is -to make sure this directory is run on a RAID array. If a disk fails, the -directory is available. The second way is to use a tool such as rsync to -replicate the images to another server: - -.. code-block:: console - - # rsync -az --progress /var/lib/glance/images backup-server:/var/lib/glance/images/ - -Identity --------- - -``/etc/keystone`` and ``/var/log/keystone`` follow the same rules as -other components. - -``/var/lib/keystone``, although it should not contain any data being -used, can also be backed up just in case. - -Block Storage -------------- - -``/etc/cinder`` and ``/var/log/cinder`` follow the same rules as other -components. - -``/var/lib/cinder`` should also be backed up. - -Networking ----------- - -``/etc/neutron`` and ``/var/log/neutron`` follow the same rules as other -components. - -``/var/lib/neutron`` should also be backed up. - -Object Storage --------------- - -``/etc/swift`` is very important to have backed up. This directory -contains the swift configuration files as well as the ring files and -ring :term:`builder files `, which if lost, render the data -on your cluster inaccessible. A best practice is to copy the builder files -to all storage nodes along with the ring files. Multiple backup copies are -spread throughout your storage cluster. - -Telemetry ---------- - -Back up the ``/etc/ceilometer`` directory containing Telemetry configuration -files. - -Orchestration -------------- - -Back up HOT template ``yaml`` files, and the ``/etc/heat/`` directory -containing Orchestration configuration files. - -Recovering Backups -~~~~~~~~~~~~~~~~~~ - -Recovering backups is a fairly simple process. To begin, first ensure -that the service you are recovering is not running. For example, to do a -full recovery of ``nova`` on the cloud controller, first stop all -``nova`` services: - -.. code-block:: console - - # stop nova-api - # stop nova-consoleauth - # stop nova-novncproxy - # stop nova-objectstore - # stop nova-scheduler - -Now you can import a previously backed-up database: - -.. code-block:: console - - # mysql nova < nova.sql - -You can also restore backed-up nova directories: - -.. code-block:: console - - # mv /etc/nova{,.orig} - # cp -a /path/to/backup/nova /etc/ - -Once the files are restored, start everything back up: - -.. code-block:: console - - # start mysql - # for i in nova-api nova-consoleauth nova-novncproxy \ - nova-objectstore nova-scheduler - > do - > start $i - > done - -Other services follow the same process, with their respective -directories and databases. - -Summary -~~~~~~~ - -Backup and subsequent recovery is one of the first tasks system -administrators learn. However, each system has different items that need -attention. By taking care of your database, image service, and -appropriate file system locations, you can be assured that you can -handle any event requiring recovery. diff --git a/doc/ops-guide/source/ops-capacity-planning-scaling.rst b/doc/ops-guide/source/ops-capacity-planning-scaling.rst deleted file mode 100644 index f9c408b956..0000000000 --- a/doc/ops-guide/source/ops-capacity-planning-scaling.rst +++ /dev/null @@ -1,423 +0,0 @@ -.. _capacity-planning-scaling: - -============================= -Capacity planning and scaling -============================= - -Cloud-based applications typically request more discrete hardware (horizontal -scaling) as opposed to traditional applications, which require larger hardware -to scale (vertical scaling). - -OpenStack is designed to be horizontally scalable. Rather than switching -to larger servers, you procure more servers and simply install identically -configured services. Ideally, you scale out and load balance among groups of -functionally identical services (for example, compute nodes or ``nova-api`` -nodes), that communicate on a message bus. - -Determining cloud scalability -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Determining the scalability of your cloud and how to improve it requires -balancing many variables. No one solution meets everyone's scalability goals. -However, it is helpful to track a number of metrics. You can define -virtual hardware templates called "flavors" in OpenStack, which will impact -your cloud scaling decisions. These templates define sizes for memory in RAM, -root disk size, amount of ephemeral data disk space available, and the number -of CPU cores. - -The default OpenStack flavors are shown in :ref:`table_default_flavors`. - -.. _table_default_flavors: - -.. list-table:: Table. OpenStack default flavors - :widths: 20 20 20 20 20 - :header-rows: 1 - - * - Name - - Virtual cores - - Memory - - Disk - - Ephemeral - * - m1.tiny - - 1 - - 512 MB - - 1 GB - - 0 GB - * - m1.small - - 1 - - 2 GB - - 10 GB - - 20 GB - * - m1.medium - - 2 - - 4 GB - - 10 GB - - 40 GB - * - m1.large - - 4 - - 8 GB - - 10 GB - - 80 GB - * - m1.xlarge - - 8 - - 16 GB - - 10 GB - - 160 GB - -The starting point is the core count of your cloud. By applying -some ratios, you can gather information about: - -- The number of virtual machines (VMs) you expect to run, - ``((overcommit fraction × cores) / virtual cores per instance)`` - -- How much storage is required ``(flavor disk size × number of instances)`` - -You can use these ratios to determine how much additional infrastructure -you need to support your cloud. - -Here is an example using the ratios for gathering scalability -information for the number of VMs expected as well as the storage -needed. The following numbers support (200 / 2) × 16 = 1600 VM instances -and require 80 TB of storage for ``/var/lib/nova/instances``: - -- 200 physical cores. - -- Most instances are size m1.medium (two virtual cores, 50 GB of - storage). - -- Default CPU overcommit ratio (``cpu_allocation_ratio`` in the ``nova.conf`` - file) of 16:1. - -.. note:: - Regardless of the overcommit ratio, an instance can not be placed - on any physical node with fewer raw (pre-overcommit) resources than - instance flavor requires. - -However, you need more than the core count alone to estimate the load -that the API services, database servers, and queue servers are likely to -encounter. You must also consider the usage patterns of your cloud. - -As a specific example, compare a cloud that supports a managed -web-hosting platform with one running integration tests for a -development project that creates one VM per code commit. In the former, -the heavy work of creating a VM happens only every few months, whereas -the latter puts constant heavy load on the cloud controller. You must -consider your average VM lifetime, as a larger number generally means -less load on the cloud controller. - -Aside from the creation and termination of VMs, you must consider the -impact of users accessing the service particularly on ``nova-api`` and -its associated database. Listing instances garners a great deal of -information and, given the frequency with which users run this -operation, a cloud with a large number of users can increase the load -significantly. This can occur even without their knowledge. For example, -leaving the OpenStack dashboard instances tab open in the browser -refreshes the list of VMs every 30 seconds. - -After you consider these factors, you can determine how many cloud -controller cores you require. A typical eight core, 8 GB of RAM server -is sufficient for up to a rack of compute nodes — given the above -caveats. - -You must also consider key hardware specifications for the performance -of user VMs, as well as budget and performance needs, including storage -performance (spindles/core), memory availability (RAM/core), network -bandwidth hardware specifications and (Gbps/core), and overall -CPU performance (CPU/core). - -.. tip:: - - For a discussion of metric tracking, including how to extract - metrics from your cloud, see the `OpenStack Operations Guide - `_. - -Adding cloud controller nodes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can facilitate the horizontal expansion of your cloud by adding -nodes. Adding compute nodes is straightforward since they are easily picked up -by the existing installation. However, you must consider some important -points when you design your cluster to be highly available. - -A cloud controller node runs several different services. You -can install services that communicate only using the message queue -internally— ``nova-scheduler`` and ``nova-console`` on a new server for -expansion. However, other integral parts require more care. - -You should load balance user-facing services such as dashboard, -``nova-api``, or the Object Storage proxy. Use any standard HTTP -load-balancing method (DNS round robin, hardware load balancer, or -software such as Pound or HAProxy). One caveat with dashboard is the VNC -proxy, which uses the WebSocket protocol— something that an L7 load -balancer might struggle with. See also `Horizon session storage -`_. - -You can configure some services, such as ``nova-api`` and -``glance-api``, to use multiple processes by changing a flag in their -configuration file allowing them to share work between multiple cores on -the one machine. - -.. tip:: - - Several options are available for MySQL load balancing, and the - supported AMQP brokers have built-in clustering support. Information - on how to configure these and many of the other services can be - found in the `Operations Guide - `_. - -Segregating your cloud -~~~~~~~~~~~~~~~~~~~~~~ - -Segregating your cloud is needed when users require different regions for legal -considerations for data storage, redundancy across earthquake fault lines, or -for low-latency API calls. It can be segregated by *cells*, *regions*, -*availability zones*, or *host aggregates*. - -Each method provides different functionality and can be best divided -into two groups: - -- Cells and regions, which segregate an entire cloud and result in - running separate Compute deployments. - -- :term:`Availability zones ` and host aggregates, - which merely divide a single Compute deployment. - -:ref:`table_segregation_methods` provides a comparison view of each -segregation method currently provided by OpenStack Compute. - -.. _table_segregation_methods: - -.. list-table:: Table. OpenStack segregation methods - :widths: 20 20 20 20 20 - :header-rows: 1 - - * - - - Cells - - Regions - - Availability zones - - Host aggregates - * - **Use** - - A single :term:`API endpoint` for compute, or you require a second - level of scheduling. - - Discrete regions with separate API endpoints and no coordination - between regions. - - Logical separation within your nova deployment for physical isolation - or redundancy. - - To schedule a group of hosts with common features. - * - **Example** - - A cloud with multiple sites where you can schedule VMs "anywhere" or on - a particular site. - - A cloud with multiple sites, where you schedule VMs to a particular - site and you want a shared infrastructure. - - A single-site cloud with equipment fed by separate power supplies. - - Scheduling to hosts with trusted hardware support. - * - **Overhead** - - Considered experimental. A new service, nova-cells. Each cell has a full - nova installation except nova-api. - - A different API endpoint for every region. Each region has a full nova - installation. - - Configuration changes to ``nova.conf``. - - Configuration changes to ``nova.conf``. - * - **Shared services** - - Keystone, ``nova-api`` - - Keystone - - Keystone, All nova services - - Keystone, All nova services - -Cells and regions ------------------ - -OpenStack Compute cells are designed to allow running the cloud in a -distributed fashion without having to use more complicated technologies, -or be invasive to existing nova installations. Hosts in a cloud are -partitioned into groups called *cells*. Cells are configured in a tree. -The top-level cell ("API cell") has a host that runs the ``nova-api`` -service, but no ``nova-compute`` services. Each child cell runs all of -the other typical ``nova-*`` services found in a regular installation, -except for the ``nova-api`` service. Each cell has its own message queue -and database service and also runs ``nova-cells``, which manages the -communication between the API cell and child cells. - -This allows for a single API server being used to control access to -multiple cloud installations. Introducing a second level of scheduling -(the cell selection), in addition to the regular ``nova-scheduler`` -selection of hosts, provides greater flexibility to control where -virtual machines are run. - -Unlike having a single API endpoint, regions have a separate API -endpoint per installation, allowing for a more discrete separation. -Users wanting to run instances across sites have to explicitly select a -region. However, the additional complexity of a running a new service is -not required. - -The OpenStack dashboard (horizon) can be configured to use multiple -regions. This can be configured through the ``AVAILABLE_REGIONS`` -parameter. - -Availability zones and host aggregates --------------------------------------- - -You can use availability zones, host aggregates, or both to partition a -nova deployment. Both methods are configured and implemented in a similar -way. - -Availability zone -^^^^^^^^^^^^^^^^^ - -This enables you to arrange OpenStack compute hosts into logical groups -and provides a form of physical isolation and redundancy from other -availability zones, such as by using a separate power supply or network -equipment. - -You define the availability zone in which a specified compute host -resides locally on each server. An availability zone is commonly used to -identify a set of servers that have a common attribute. For instance, if -some of the racks in your data center are on a separate power source, -you can put servers in those racks in their own availability zone. -Availability zones can also help separate different classes of hardware. - -When users provision resources, they can specify from which availability -zone they want their instance to be built. This allows cloud consumers -to ensure that their application resources are spread across disparate -machines to achieve high availability in the event of hardware failure. - -Host aggregates zone -^^^^^^^^^^^^^^^^^^^^ - -This enables you to partition OpenStack Compute deployments into logical -groups for load balancing and instance distribution. You can use host -aggregates to further partition an availability zone. For example, you -might use host aggregates to partition an availability zone into groups -of hosts that either share common resources, such as storage and -network, or have a special property, such as trusted computing -hardware. - -A common use of host aggregates is to provide information for use with -the ``nova-scheduler``. For example, you might use a host aggregate to -group a set of hosts that share specific flavors or images. - -The general case for this is setting key-value pairs in the aggregate -metadata and matching key-value pairs in flavor's ``extra_specs`` -metadata. The ``AggregateInstanceExtraSpecsFilter`` in the filter -scheduler will enforce that instances be scheduled only on hosts in -aggregates that define the same key to the same value. - -An advanced use of this general concept allows different flavor types to -run with different CPU and RAM allocation ratios so that high-intensity -computing loads and low-intensity development and testing systems can -share the same cloud without either starving the high-use systems or -wasting resources on low-utilization systems. This works by setting -``metadata`` in your host aggregates and matching ``extra_specs`` in -your flavor types. - -The first step is setting the aggregate metadata keys -``cpu_allocation_ratio`` and ``ram_allocation_ratio`` to a -floating-point value. The filter schedulers ``AggregateCoreFilter`` and -``AggregateRamFilter`` will use those values rather than the global -defaults in ``nova.conf`` when scheduling to hosts in the aggregate. Be -cautious when using this feature, since each host can be in multiple -aggregates, but should have only one allocation ratio for -each resources. It is up to you to avoid putting a host in multiple -aggregates that define different values for the same resource. - -This is the first half of the equation. To get flavor types that are -guaranteed a particular ratio, you must set the ``extra_specs`` in the -flavor type to the key-value pair you want to match in the aggregate. -For example, if you define ``extra_specs`` ``cpu_allocation_ratio`` to -"1.0", then instances of that type will run in aggregates only where the -metadata key ``cpu_allocation_ratio`` is also defined as "1.0." In -practice, it is better to define an additional key-value pair in the -aggregate metadata to match on rather than match directly on -``cpu_allocation_ratio`` or ``core_allocation_ratio``. This allows -better abstraction. For example, by defining a key ``overcommit`` and -setting a value of "high," "medium," or "low," you could then tune the -numeric allocation ratios in the aggregates without also needing to -change all flavor types relating to them. - -.. note:: - - Previously, all services had an availability zone. Currently, only - the ``nova-compute`` service has its own availability zone. Services - such as ``nova-scheduler``, ``nova-network``, and ``nova-conductor`` - have always spanned all availability zones. - - When you run any of the following operations, the services appear in - their own internal availability zone - (CONF.internal_service_availability_zone): - - - :command:`openstack host list` (os-hosts) - - - :command:`euca-describe-availability-zones verbose` - - - :command:`openstack compute service list` - - The internal availability zone is hidden in - euca-describe-availability_zones (nonverbose). - - CONF.node_availability_zone has been renamed to - CONF.default_availability_zone and is used only by the - ``nova-api`` and ``nova-scheduler`` services. - - CONF.node_availability_zone still works but is deprecated. - -Scalable Hardware -~~~~~~~~~~~~~~~~~ - -While several resources already exist to help with deploying and -installing OpenStack, it's very important to make sure that you have -your deployment planned out ahead of time. This guide presumes that you -have set aside a rack for the OpenStack cloud but also offers -suggestions for when and what to scale. - -Hardware Procurement --------------------- - -“The Cloud” has been described as a volatile environment where servers -can be created and terminated at will. While this may be true, it does -not mean that your servers must be volatile. Ensuring that your cloud's -hardware is stable and configured correctly means that your cloud -environment remains up and running. - -OpenStack can be deployed on any hardware supported by an -OpenStack compatible Linux distribution. - -Hardware does not have to be consistent, but it should at least have the -same type of CPU to support instance migration. - -The typical hardware recommended for use with OpenStack is the standard -value-for-money offerings that most hardware vendors stock. It should be -straightforward to divide your procurement into building blocks such as -"compute," "object storage," and "cloud controller," and request as many -of these as you need. Alternatively, any existing servers you have that meet -performance requirements and virtualization technology are likely to support -OpenStack. - -Capacity Planning ------------------ - -OpenStack is designed to increase in size in a straightforward manner. -Taking into account the considerations previous mentioned, particularly on the -sizing of the cloud controller, it should be possible to procure additional -compute or object storage nodes as needed. New nodes do not need to be the same -specification or vendor as existing nodes. - -For compute nodes, ``nova-scheduler`` will manage differences in -sizing with core count and RAM. However, you should consider that the user -experience changes with differing CPU speeds. When adding object storage -nodes, a :term:`weight` should be specified that reflects the -:term:`capability` of the node. - -Monitoring the resource usage and user growth will enable you to know -when to procure. The `Logging and Monitoring -`_ -chapte in the Operations Guide details some useful metrics. - -Burn-in Testing ---------------- - -The chances of failure for the server's hardware are high at the start -and the end of its life. As a result, dealing with hardware failures -while in production can be avoided by appropriate burn-in testing to -attempt to trigger the early-stage failures. The general principle is to -stress the hardware to its limits. Examples of burn-in tests include -running a CPU or disk benchmark for several days. diff --git a/doc/ops-guide/source/ops-customize-compute.rst b/doc/ops-guide/source/ops-customize-compute.rst deleted file mode 100644 index 47568c3cd5..0000000000 --- a/doc/ops-guide/source/ops-customize-compute.rst +++ /dev/null @@ -1,309 +0,0 @@ -================================================== -Customizing the OpenStack Compute (nova) Scheduler -================================================== - -Many OpenStack projects allow for customization of specific features -using a driver architecture. You can write a driver that conforms to a -particular interface and plug it in through configuration. For example, -you can easily plug in a new scheduler for Compute. The existing -schedulers for Compute are feature full and well documented at `Scheduling -`_. -However, depending on your user's use cases, the existing schedulers -might not meet your requirements. You might need to create a new scheduler. - -To create a scheduler, you must inherit from the class -``nova.scheduler.driver.Scheduler``. Of the five methods that you can -override, you *must* override the two methods marked with an asterisk -(\*) below: - -- ``update_service_capabilities`` - -- ``hosts_up`` - -- ``group_hosts`` - -- \* ``schedule_run_instance`` - -- \* ``select_destinations`` - -To demonstrate customizing OpenStack, we'll create an example of a -Compute scheduler that randomly places an instance on a subset of hosts, -depending on the originating IP address of the request and the prefix of -the hostname. Such an example could be useful when you have a group of -users on a subnet and you want all of their instances to start within -some subset of your hosts. - -.. warning:: - - This example is for illustrative purposes only. It should not be - used as a scheduler for Compute without further development and - testing. - -When you join the screen session that ``stack.sh`` starts with -``screen -r stack``, you are greeted with many screen windows: - -.. code-block:: console - - 0$ shell*  1$ key  2$ horizon  ...  9$ n-api  ...  14$ n-sch ... - - -``shell`` - A shell where you can get some work done - -``key`` - The keystone service - -``horizon`` - The horizon dashboard web application - -``n-{name}`` - The nova services - -``n-sch`` - The nova scheduler service - -**To create the scheduler and plug it in through configuration** - -#. The code for OpenStack lives in ``/opt/stack``, so go to the ``nova`` - directory and edit your scheduler module. Change to the directory where - ``nova`` is installed: - - .. code-block:: console - - $ cd /opt/stack/nova - -#. Create the ``ip_scheduler.py`` Python source code file: - - .. code-block:: console - - $ vim nova/scheduler/ip_scheduler.py - -#. The code shown below is a driver that will - schedule servers to hosts based on IP address as explained at the - beginning of the section. Copy the code into ``ip_scheduler.py``. When - you are done, save and close the file. - - .. code-block:: python - - # vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2014 OpenStack Foundation - # All Rights Reserved. - # - # Licensed under the Apache License, Version 2.0 (the "License"); you may - # not use this file except in compliance with the License. You may obtain - # a copy of the License at - # - # http://www.apache.org/licenses/LICENSE-2.0 - # - # Unless required by applicable law or agreed to in writing, software - # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - # License for the specific language governing permissions and limitations - # under the License. - - """ - IP Scheduler implementation - """ - - import random - - from oslo_config import cfg - - from nova.compute import rpcapi as compute_rpcapi - from nova import exception - from nova.openstack.common import log as logging - from nova.openstack.common.gettextutils import _ - from nova.scheduler import driver - - CONF = cfg.CONF - CONF.import_opt('compute_topic', 'nova.compute.rpcapi') - LOG = logging.getLogger(__name__) - - class IPScheduler(driver.Scheduler): - """ - Implements Scheduler as a random node selector based on - IP address and hostname prefix. - """ - - def __init__(self, *args, **kwargs): - super(IPScheduler, self).__init__(*args, **kwargs) - self.compute_rpcapi = compute_rpcapi.ComputeAPI() - - def _filter_hosts(self, request_spec, hosts, filter_properties, - hostname_prefix): - """Filter a list of hosts based on hostname prefix.""" - - hosts = [host for host in hosts if host.startswith(hostname_prefix)] - return hosts - - def _schedule(self, context, topic, request_spec, filter_properties): - """Picks a host that is up at random.""" - - elevated = context.elevated() - hosts = self.hosts_up(elevated, topic) - if not hosts: - msg = _("Is the appropriate service running?") - raise exception.NoValidHost(reason=msg) - - remote_ip = context.remote_address - - if remote_ip.startswith('10.1'): - hostname_prefix = 'doc' - elif remote_ip.startswith('10.2'): - hostname_prefix = 'ops' - else: - hostname_prefix = 'dev' - - hosts = self._filter_hosts(request_spec, hosts, filter_properties, - hostname_prefix) - if not hosts: - msg = _("Could not find another compute") - raise exception.NoValidHost(reason=msg) - - host = random.choice(hosts) - LOG.debug("Request from %(remote_ip)s scheduled to %(host)s" % locals()) - - return host - - def select_destinations(self, context, request_spec, filter_properties): - """Selects random destinations.""" - num_instances = request_spec['num_instances'] - # NOTE(timello): Returns a list of dicts with 'host', 'nodename' and - # 'limits' as keys for compatibility with filter_scheduler. - dests = [] - for i in range(num_instances): - host = self._schedule(context, CONF.compute_topic, - request_spec, filter_properties) - host_state = dict(host=host, nodename=None, limits=None) - dests.append(host_state) - - if len(dests) < num_instances: - raise exception.NoValidHost(reason='') - return dests - - def schedule_run_instance(self, context, request_spec, - admin_password, injected_files, - requested_networks, is_first_time, - filter_properties, legacy_bdm_in_spec): - """Create and run an instance or instances.""" - instance_uuids = request_spec.get('instance_uuids') - for num, instance_uuid in enumerate(instance_uuids): - request_spec['instance_properties']['launch_index'] = num - try: - host = self._schedule(context, CONF.compute_topic, - request_spec, filter_properties) - updated_instance = driver.instance_update_db(context, - instance_uuid) - self.compute_rpcapi.run_instance(context, - instance=updated_instance, host=host, - requested_networks=requested_networks, - injected_files=injected_files, - admin_password=admin_password, - is_first_time=is_first_time, - request_spec=request_spec, - filter_properties=filter_properties, - legacy_bdm_in_spec=legacy_bdm_in_spec) - except Exception as ex: - # NOTE(vish): we don't reraise the exception here to make sure - # that all instances in the request get set to - # error properly - driver.handle_schedule_error(context, ex, instance_uuid, - request_spec) - - There is a lot of useful information in ``context``, ``request_spec``, - and ``filter_properties`` that you can use to decide where to schedule - the instance. To find out more about what properties are available, you - can insert the following log statements into the - ``schedule_run_instance`` method of the scheduler above: - - .. code-block:: python - - LOG.debug("context = %(context)s" % {'context': context.__dict__}) - LOG.debug("request_spec = %(request_spec)s" % locals()) - LOG.debug("filter_properties = %(filter_properties)s" % locals()) - -#. To plug this scheduler into nova, edit one configuration file, - ``/etc/nova/nova.conf``: - - .. code-block:: console - - $ vim /etc/nova/nova.conf - -#. Find the ``scheduler_driver`` config and change it like so: - - .. code-block:: ini - - scheduler_driver=nova.scheduler.ip_scheduler.IPScheduler - -#. Restart the nova scheduler service to make nova use your scheduler. - Start by switching to the ``n-sch`` screen: - - #. Press **Ctrl+A** followed by **9**. - - #. Press **Ctrl+A** followed by **N** until you reach the ``n-sch`` screen. - - #. Press **Ctrl+C** to kill the service. - - #. Press **Up Arrow** to bring up the last command. - - #. Press **Enter** to run it. - -#. Test your scheduler with the nova CLI. Start by switching to the - ``shell`` screen and finish by switching back to the ``n-sch`` screen to - check the log output: - - #. Press  **Ctrl+A** followed by **0**. - - #. Make sure you are in the ``devstack`` directory: - - .. code-block:: console - - $ cd /root/devstack - - #. Source ``openrc`` to set up your environment variables for the CLI: - - .. code-block:: console - - $ . openrc - - #. Put the image ID for the only installed image into an environment - variable: - - .. code-block:: console - - $ IMAGE_ID=`openstack image list | egrep cirros | egrep -v "kernel|ramdisk" | awk '{print $2}'` - - #. Boot a test server: - - .. code-block:: console - - $ openstack server create --flavor 1 --image $IMAGE_ID scheduler-test - -#. Switch back to the ``n-sch`` screen. Among the log statements, you'll - see the line: - - .. code-block:: console - - 2014-01-23 19:57:47.262 DEBUG nova.scheduler.ip_scheduler - [req-... demo demo] Request from xx.xx.xx.xx scheduled to devstack-havana - _schedule /opt/stack/nova/nova/scheduler/ip_scheduler.py:76 - -.. warning:: - - Functional testing like this is not a replacement for proper unit - and integration testing, but it serves to get you started. - -A similar pattern can be followed in other projects that use the driver -architecture. Simply create a module and class that conform to the -driver interface and plug it in through configuration. Your code runs -when that feature is used and can call out to other services as -necessary. No project core code is touched. Look for a "driver" value in -the project's ``.conf`` configuration files in ``/etc/`` to -identify projects that use a driver architecture. - -When your scheduler is done, we encourage you to open source it and let -the community know on the OpenStack mailing list. Perhaps others need -the same functionality. They can use your code, provide feedback, and -possibly contribute. If enough support exists for it, perhaps you can -propose that it be added to the official Compute -`schedulers `_. diff --git a/doc/ops-guide/source/ops-customize-conclusion.rst b/doc/ops-guide/source/ops-customize-conclusion.rst deleted file mode 100644 index 6854eb20e8..0000000000 --- a/doc/ops-guide/source/ops-customize-conclusion.rst +++ /dev/null @@ -1,9 +0,0 @@ -========== -Conclusion -========== - -When operating an OpenStack cloud, you may discover that your users can -be quite demanding. If OpenStack doesn't do what your users need, it may -be up to you to fulfill those requirements. This chapter provided you -with some options for customization and gave you the tools you need to -get started. diff --git a/doc/ops-guide/source/ops-customize-dashboard.rst b/doc/ops-guide/source/ops-customize-dashboard.rst deleted file mode 100644 index 097edcc05a..0000000000 --- a/doc/ops-guide/source/ops-customize-dashboard.rst +++ /dev/null @@ -1,8 +0,0 @@ -=================================== -Customizing the Dashboard (Horizon) -=================================== - -The dashboard is based on the Python -`Django `_ web application framework. -To know how to build your Dashboard, see `Building a Dashboard using Horizon -`_. diff --git a/doc/ops-guide/source/ops-customize-development.rst b/doc/ops-guide/source/ops-customize-development.rst deleted file mode 100644 index a5ec4c8526..0000000000 --- a/doc/ops-guide/source/ops-customize-development.rst +++ /dev/null @@ -1,11 +0,0 @@ -=========================================== -Create an OpenStack Development Environment -=========================================== - -To create a development environment, you can use DevStack. DevStack is -essentially a collection of shell scripts and configuration files that -builds an OpenStack development environment for you. You use it to -create such an environment for developing a new feature. - -For more information on installing DevStack, see the -`DevStack `_ website. diff --git a/doc/ops-guide/source/ops-customize-objectstorage.rst b/doc/ops-guide/source/ops-customize-objectstorage.rst deleted file mode 100644 index 4963c663d1..0000000000 --- a/doc/ops-guide/source/ops-customize-objectstorage.rst +++ /dev/null @@ -1,341 +0,0 @@ -============================================= -Customizing Object Storage (Swift) Middleware -============================================= - -OpenStack Object Storage, known as swift when reading the code, is based -on the Python `Paste `_ framework. The best -introduction to its architecture is `A Do-It-Yourself -Framework `_. -Because of the swift project's use of this framework, you are able to -add features to a project by placing some custom code in a project's -pipeline without having to change any of the core code. - -Imagine a scenario where you have public access to one of your -containers, but what you really want is to restrict access to that to a -set of IPs based on a whitelist. In this example, we'll create a piece -of middleware for swift that allows access to a container from only a -set of IP addresses, as determined by the container's metadata items. -Only those IP addresses that you explicitly whitelist using the -container's metadata will be able to access the container. - -.. warning:: - - This example is for illustrative purposes only. It should not be - used as a container IP whitelist solution without further - development and extensive security testing. - -When you join the screen session that ``stack.sh`` starts with -``screen -r stack``, you see a screen for each service running, which -can be a few or several, depending on how many services you configured -DevStack to run. - -The asterisk * indicates which screen window you are viewing. This -example shows we are viewing the key (for keystone) screen window: - - -.. code-block:: console - - 0$ shell 1$ key* 2$ horizon 3$ s-proxy 4$ s-object 5$ s-container 6$ s-account - -The purpose of the screen windows are as follows: - - -``shell`` - A shell where you can get some work done - -``key*`` - The keystone service - -``horizon`` - The horizon dashboard web application - -``s-{name}`` - The swift services - -**To create the middleware and plug it in through Paste configuration:** - -All of the code for OpenStack lives in ``/opt/stack``. Go to the swift -directory in the ``shell`` screen and edit your middleware module. - -#. Change to the directory where Object Storage is installed: - - .. code-block:: console - - $ cd /opt/stack/swift - -#. Create the ``ip_whitelist.py`` Python source code file: - - .. code-block:: console - - $ vim swift/common/middleware/ip_whitelist.py - -#. Copy the code as shown below into ``ip_whitelist.py``. - The following code is a middleware example that - restricts access to a container based on IP address as explained at the - beginning of the section. Middleware passes the request on to another - application. This example uses the swift "swob" library to wrap Web - Server Gateway Interface (WSGI) requests and responses into objects for - swift to interact with. When you're done, save and close the file. - - .. code-block:: python - - # vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2014 OpenStack Foundation - # All Rights Reserved. - # - # Licensed under the Apache License, Version 2.0 (the "License"); you may - # not use this file except in compliance with the License. You may obtain - # a copy of the License at - # - # http://www.apache.org/licenses/LICENSE-2.0 - # - # Unless required by applicable law or agreed to in writing, software - # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - # License for the specific language governing permissions and limitations - # under the License. - - import socket - - from swift.common.utils import get_logger - from swift.proxy.controllers.base import get_container_info - from swift.common.swob import Request, Response - - class IPWhitelistMiddleware(object): - """ - IP Whitelist Middleware - - Middleware that allows access to a container from only a set of IP - addresses as determined by the container's metadata items that start - with the prefix 'allow'. E.G. allow-dev=192.168.0.20 - """ - - def __init__(self, app, conf, logger=None): - self.app = app - - if logger: - self.logger = logger - else: - self.logger = get_logger(conf, log_route='ip_whitelist') - - self.deny_message = conf.get('deny_message', "IP Denied") - self.local_ip = socket.gethostbyname(socket.gethostname()) - - def __call__(self, env, start_response): - """ - WSGI entry point. - Wraps env in swob.Request object and passes it down. - - :param env: WSGI environment dictionary - :param start_response: WSGI callable - """ - req = Request(env) - - try: - version, account, container, obj = req.split_path(1, 4, True) - except ValueError: - return self.app(env, start_response) - - container_info = get_container_info( - req.environ, self.app, swift_source='IPWhitelistMiddleware') - - remote_ip = env['REMOTE_ADDR'] - self.logger.debug("Remote IP: %(remote_ip)s", - {'remote_ip': remote_ip}) - - meta = container_info['meta'] - allow = {k:v for k,v in meta.iteritems() if k.startswith('allow')} - allow_ips = set(allow.values()) - allow_ips.add(self.local_ip) - self.logger.debug("Allow IPs: %(allow_ips)s", - {'allow_ips': allow_ips}) - - if remote_ip in allow_ips: - return self.app(env, start_response) - else: - self.logger.debug( - "IP %(remote_ip)s denied access to Account=%(account)s " - "Container=%(container)s. Not in %(allow_ips)s", locals()) - return Response( - status=403, - body=self.deny_message, - request=req)(env, start_response) - - - def filter_factory(global_conf, **local_conf): - """ - paste.deploy app factory for creating WSGI proxy apps. - """ - conf = global_conf.copy() - conf.update(local_conf) - - def ip_whitelist(app): - return IPWhitelistMiddleware(app, conf) - return ip_whitelist - - - There is a lot of useful information in ``env`` and ``conf`` that you - can use to decide what to do with the request. To find out more about - what properties are available, you can insert the following log - statement into the ``__init__`` method: - - .. code-block:: python - - self.logger.debug("conf = %(conf)s", locals()) - - - and the following log statement into the ``__call__`` method: - - .. code-block:: python - - self.logger.debug("env = %(env)s", locals()) - -#. To plug this middleware into the swift Paste pipeline, you edit one - configuration file, ``/etc/swift/proxy-server.conf``: - - .. code-block:: console - - $ vim /etc/swift/proxy-server.conf - -#. Find the ``[filter:ratelimit]`` section in - ``/etc/swift/proxy-server.conf``, and copy in the following - configuration section after it: - - .. code-block:: ini - - [filter:ip_whitelist] - paste.filter_factory = swift.common.middleware.ip_whitelist:filter_factory - # You can override the default log routing for this filter here: - # set log_name = ratelimit - # set log_facility = LOG_LOCAL0 - # set log_level = INFO - # set log_headers = False - # set log_address = /dev/log - deny_message = You shall not pass! - -#. Find the ``[pipeline:main]`` section in - ``/etc/swift/proxy-server.conf``, and add ``ip_whitelist`` after - ratelimit to the list like so. When you're done, save and close the - file: - - .. code-block:: ini - - [pipeline:main] - pipeline = catch_errors gatekeeper healthcheck proxy-logging cache bulk tempurl ratelimit ip_whitelist ... - -#. Restart the ``swift proxy`` service to make swift use your middleware. - Start by switching to the ``swift-proxy`` screen: - - #. Press **Ctrl+A** followed by **3**. - - #. Press **Ctrl+C** to kill the service. - - #. Press **Up Arrow** to bring up the last command. - - #. Press Enter to run it. - -#. Test your middleware with the ``swift`` CLI. Start by switching to the - shell screen and finish by switching back to the ``swift-proxy`` screen - to check the log output: - - #. Press  **Ctrl+A** followed by **0**. - - #. Make sure you're in the ``devstack`` directory: - - .. code-block:: console - - $ cd /root/devstack - - #. Source openrc to set up your environment variables for the CLI: - - .. code-block:: console - - $ . openrc - - #. Create a container called ``middleware-test``: - - .. code-block:: console - - $ swift post middleware-test - - #. Press **Ctrl+A** followed by **3** to check the log output. - -#. Among the log statements you'll see the lines: - - .. code-block:: none - - proxy-server Remote IP: my.instance.ip.address (txn: ...) - proxy-server Allow IPs: set(['my.instance.ip.address']) (txn: ...) - - These two statements are produced by our middleware and show that the - request was sent from our DevStack instance and was allowed. - -#. Test the middleware from outside DevStack on a remote machine that has - access to your DevStack instance: - - #. Install the ``keystone`` and ``swift`` clients on your local machine: - - .. code-block:: console - - # pip install python-keystoneclient python-swiftclient - - #. Attempt to list the objects in the ``middleware-test`` container: - - .. code-block:: console - - $ swift --os-auth-url=http://my.instance.ip.address:5000/v2.0/ \ - --os-region-name=RegionOne --os-username=demo:demo \ - --os-password=devstack list middleware-test - Container GET failed: http://my.instance.ip.address:8080/v1/AUTH_.../ - middleware-test?format=json 403 Forbidden   You shall not pass! - -#. Press **Ctrl+A** followed by **3** to check the log output. Look at the - swift log statements again, and among the log statements, you'll see the - lines: - - .. code-block:: console - - proxy-server Authorizing from an overriding middleware (i.e: tempurl) (txn: ...) - proxy-server ... IPWhitelistMiddleware - proxy-server Remote IP: my.local.ip.address (txn: ...) - proxy-server Allow IPs: set(['my.instance.ip.address']) (txn: ...) - proxy-server IP my.local.ip.address denied access to Account=AUTH_... \ - Container=None. Not in set(['my.instance.ip.address']) (txn: ...) - - Here we can see that the request was denied because the remote IP - address wasn't in the set of allowed IPs. - -#. Back in your DevStack instance on the shell screen, add some metadata to - your container to allow the request from the remote machine: - - #. Press **Ctrl+A** followed by **0**. - - #. Add metadata to the container to allow the IP: - - .. code-block:: console - - $ swift post --meta allow-dev:my.local.ip.address middleware-test - - #. Now try the command from Step 10 again and it succeeds. There are no - objects in the container, so there is nothing to list; however, there is - also no error to report. - - .. warning:: - - Functional testing like this is not a replacement for proper unit - and integration testing, but it serves to get you started. - -You can follow a similar pattern in other projects that use the Python -Paste framework. Simply create a middleware module and plug it in -through configuration. The middleware runs in sequence as part of that -project's pipeline and can call out to other services as necessary. No -project core code is touched. Look for a ``pipeline`` value in the -project's ``conf`` or ``ini`` configuration files in ``/etc/`` -to identify projects that use Paste. - -When your middleware is done, we encourage you to open source it and let -the community know on the OpenStack mailing list. Perhaps others need -the same functionality. They can use your code, provide feedback, and -possibly contribute. If enough support exists for it, perhaps you can -propose that it be added to the official swift -`middleware `_. diff --git a/doc/ops-guide/source/ops-customize-provision-instance.rst b/doc/ops-guide/source/ops-customize-provision-instance.rst deleted file mode 100644 index d25350178f..0000000000 --- a/doc/ops-guide/source/ops-customize-provision-instance.rst +++ /dev/null @@ -1,12 +0,0 @@ -===================== -Provision an instance -===================== - -To help understand how OpenStack works, this section describes the -end-to-end process and interaction of components when provisioning an instance -on OpenStack. - -**Provision an instance** - -.. figure:: figures/provision-an-instance.png - :width: 100% diff --git a/doc/ops-guide/source/ops-customize.rst b/doc/ops-guide/source/ops-customize.rst deleted file mode 100644 index a904f93487..0000000000 --- a/doc/ops-guide/source/ops-customize.rst +++ /dev/null @@ -1,45 +0,0 @@ -============= -Customization -============= - -.. toctree:: - :maxdepth: 1 - - ops-customize-provision-instance.rst - ops-customize-development.rst - ops-customize-objectstorage.rst - ops-customize-compute.rst - ops-customize-dashboard.rst - ops-customize-conclusion.rst - -OpenStack might not do everything you need it to do out of the box. To -add a new feature, you can follow different paths. - -To take the first path, you can modify the OpenStack code directly. -Learn `how to contribute -`_, -follow the `Developer's Guide -`_, make your -changes, and contribute them back to the upstream OpenStack project. -This path is recommended if the feature you need requires deep -integration with an existing project. The community is always open to -contributions and welcomes new functionality that follows the -feature-development guidelines. This path still requires you to use -DevStack for testing your feature additions, so this chapter walks you -through the DevStack environment. - -For the second path, you can write new features and plug them in using -changes to a configuration file. If the project where your feature would -need to reside uses the Python Paste framework, you can create -middleware for it and plug it in through configuration. There may also -be specific ways of customizing a project, such as creating a new -scheduler driver for Compute or a custom tab for the dashboard. - -This chapter focuses on the second path for customizing OpenStack by -providing two examples for writing new features. -The first example shows how to modify Object Storage service (swift) -middleware to add a new feature, and the second example provides a new -scheduler feature for Compute service (nova). -To customize OpenStack this way you need a development environment. -The best way to get an environment up and running quickly is to run -DevStack within your cloud. diff --git a/doc/ops-guide/source/ops-deployment-factors.rst b/doc/ops-guide/source/ops-deployment-factors.rst deleted file mode 100644 index c4229caca9..0000000000 --- a/doc/ops-guide/source/ops-deployment-factors.rst +++ /dev/null @@ -1,299 +0,0 @@ -.. _legal-requirements: - -====================================== -Factors affecting OpenStack deployment -====================================== - -Security requirements -~~~~~~~~~~~~~~~~~~~~~ - -When deploying OpenStack in an enterprise as a private cloud, it is -usually behind the firewall and within the trusted network alongside -existing systems. Users are employees that are bound by the -company security requirements. This tends to drive most of the security -domains towards a more trusted model. However, when deploying OpenStack -in a public facing role, no assumptions can be made and the attack vectors -significantly increase. - -Consider the following security implications and requirements: - -* Managing the users for both public and private clouds. The Identity service - allows for LDAP to be part of the authentication process. This may ease user - management if integrating into existing systems. - -* User authentication requests include sensitive information including - usernames, passwords, and authentication tokens. It is strongly recommended - to place API services behind hardware that performs SSL termination. - -* Negative or hostile users who would attack or compromise the security - of your deployment regardless of firewalls or security agreements. - -* Attack vectors increase further in a public facing OpenStack deployment. - For example, the API endpoints and the software behind it become - vulnerable to hostile entities attempting to gain unauthorized access - or prevent access to services. You should provide appropriate filtering and - periodic security auditing. - -.. warning:: - - Be mindful of consistency when utilizing third party - clouds to explore authentication options. - -For more information OpenStack Security, see the `OpenStack Security -Guide `_. - -Security domains ----------------- - -A security domain comprises of users, applications, servers or networks -that share common trust requirements and expectations within a system. -Typically they have the same authentication and authorization -requirements and users. - -Security domains include: - -Public security domains - The public security domain can refer to the internet as a whole or - networks over which you have no authority. This domain is considered - untrusted. For example, in a hybrid cloud deployment, any information - traversing between and beyond the clouds is in the public domain and - untrustworthy. - -Guest security domains - The guest security domain handles compute data generated by instances - on the cloud, but not services that support the operation of the - cloud, such as API calls. Public cloud providers and private cloud - providers who do not have stringent controls on instance use or who - allow unrestricted internet access to instances should consider this - domain to be untrusted. Private cloud providers may want to consider - this network as internal and therefore trusted only if they have - controls in place to assert that they trust instances and all their - tenants. - -Management security domains - The management security domain is where services interact. Sometimes - referred to as the control plane, the networks in this domain - transport confidential data such as configuration parameters, user - names, and passwords. In most deployments this domain is considered - trusted when it is behind an organization's firewall. - -Data security domains - The data security domain is primarily concerned with information - pertaining to the storage services within OpenStack. The data - that crosses this network has high integrity and confidentiality - requirements and, depending on the type of deployment, may also have - strong availability requirements. The trust level of this network is - heavily dependent on other deployment decisions. - -These security domains can be individually or collectively mapped to an -OpenStack deployment. The cloud operator should be aware of the appropriate -security concerns. Security domains should be mapped out against your specific -OpenStack deployment topology. The domains and their trust requirements depend -upon whether the cloud instance is public, private, or hybrid. - -Hypervisor security -------------------- - -The hypervisor also requires a security assessment. In a -public cloud, organizations typically do not have control -over the choice of hypervisor. Properly securing your -hypervisor is important. Attacks made upon the -unsecured hypervisor are called a **hypervisor breakout**. -Hypervisor breakout describes the event of a -compromised or malicious instance breaking out of the resource -controls of the hypervisor and gaining access to the bare -metal operating system and hardware resources. - -Hypervisor security is not an issue if the security of instances is not -important. However, enterprises can minimize vulnerability by avoiding -hardware sharing with others in a public cloud. - -Baremetal security ------------------- - -There are other services worth considering that provide a -bare metal instance instead of a cloud. In other cases, it is -possible to replicate a second private cloud by integrating -with a private Cloud-as-a-Service deployment. The -organization does not buy the hardware, but also does not share -with other tenants. It is also possible to use a provider that -hosts a bare-metal public cloud instance for which the -hardware is dedicated only to one customer, or a provider that -offers private Cloud-as-a-Service. - -.. important:: - - Each cloud implements services differently. Understand the security - requirements of every cloud that handles the organization's data or - workloads. - -Networking security -------------------- - -Consider security implications and requirements before designing the -physical and logical network topologies. Make sure that the networks are -properly segregated and traffic flows are going to the correct -destinations without crossing through locations that are undesirable. -Consider the following factors: - -* Firewalls -* Overlay interconnects for joining separated tenant networks -* Routing through or avoiding specific networks - -How networks attach to hypervisors can expose security -vulnerabilities. To mitigate hypervisor breakouts, separate networks -from other systems and schedule instances for the -network onto dedicated Compute nodes. This prevents attackers -from having access to the networks from a compromised instance. - -Multi-site security -------------------- - -Securing a multi-site OpenStack installation brings -several challenges. Tenants may expect a tenant-created network -to be secure. In a multi-site installation the use of a -non-private connection between sites may be required. This may -mean that traffic would be visible to third parties and, in -cases where an application requires security, this issue -requires mitigation. In these instances, install a VPN or -encrypted connection between sites to conceal sensitive traffic. - -Identity is another security consideration. Authentication -centralization provides a single authentication point for -users across the deployment, and a single administration point -for traditional create, read, update, and delete operations. -Centralized authentication is also useful for auditing purposes because -all authentication tokens originate from the same source. - -Tenants in multi-site installations need isolation -from each other. The main challenge is ensuring tenant networks -function across regions which is not currently supported in OpenStack -Networking (neutron). Therefore an external system may be required -to manage mapping. Tenant networks may contain sensitive information requiring -accurate and consistent mapping to ensure that a tenant in one site -does not connect to a different tenant in another site. - -Legal requirements -~~~~~~~~~~~~~~~~~~ - -Using remote resources for collection, processing, storage, -and retrieval provides potential benefits to businesses. -With the rapid growth of data within organizations, businesses -need to be proactive about their data storage strategies from -a compliance point of view. - -Most countries have legislative and regulatory requirements governing -the storage and management of data in cloud environments. This is -particularly relevant for public, community and hybrid cloud models, -to ensure data privacy and protection for organizations using a -third party cloud provider. - -Common areas of regulation include: - -* Data retention policies ensuring storage of persistent data - and records management to meet data archival requirements. -* Data ownership policies governing the possession and - responsibility for data. -* Data sovereignty policies governing the storage of data in - foreign countries or otherwise separate jurisdictions. -* Data compliance policies governing certain types of - information needing to reside in certain locations due to - regulatory issues - and more importantly, cannot reside in - other locations for the same reason. -* Data location policies ensuring that the services deployed - to the cloud are used according to laws and regulations in place - for the employees, foreign subsidiaries, or third parties. -* Disaster recovery policies ensuring regular data backups and - relocation of cloud applications to another supplier in scenarios - where a provider may go out of business, or their data center could - become inoperable. -* Security breach policies governing the ways to notify individuals - through cloud provider's systems or other means if their personal - data gets compromised in any way. -* Industry standards policy governing additional requirements on what - type of cardholder data may or may not be stored and how it is to - be protected. - -This is an example of such legal frameworks: - -Data storage regulations in Europe are currently driven by provisions of -the `Data protection framework `_. -`Financial Industry Regulatory Authority -`_ works on this in -the United States. - -Privacy and security are spread over different industry-specific laws and -regulations: - -* Health Insurance Portability and Accountability Act (HIPAA) -* Gramm-Leach-Bliley Act (GLBA) -* Payment Card Industry Data Security Standard (PCI DSS) -* Family Educational Rights and Privacy Act (FERPA) - -Cloud security architecture -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Cloud security architecture should recognize the issues -that arise with security management, which addresses these issues -with security controls. Cloud security controls are put in place to -safeguard any weaknesses in the system, and reduce the effect of an attack. - -The following security controls are described below. - -Deterrent controls: - Typically reduce the threat level by informing potential attackers - that there will be adverse consequences for them if they proceed. - -Preventive controls: - Strengthen the system against incidents, generally by reducing - if not actually eliminating vulnerabilities. - -Detective controls: - Intended to detect and react appropriately to any incidents - that occur. System and network security monitoring, including - intrusion detection and prevention arrangements, are typically - employed to detect attacks on cloud systems and the supporting - communications infrastructure. - -Corrective controls: - Reduce the consequences of an incident, normally by limiting - the damage. They come into effect during or after an incident. - Restoring system backups in order to rebuild a compromised - system is an example of a corrective control. - -For more information, see See also `NIST Special Publication 800-53 -`_. - - -Software licensing -~~~~~~~~~~~~~~~~~~ - -The many different forms of license agreements for software are often written -with the use of dedicated hardware in mind. This model is relevant for the -cloud platform itself, including the hypervisor operating system, supporting -software for items such as database, RPC, backup, and so on. Consideration -must be made when offering Compute service instances and applications to end -users of the cloud, since the license terms for that software may need some -adjustment to be able to operate economically in the cloud. - -Multi-site OpenStack deployments present additional licensing -considerations over and above regular OpenStack clouds, particularly -where site licenses are in use to provide cost efficient access to -software licenses. The licensing for host operating systems, guest -operating systems, OpenStack distributions (if applicable), -software-defined infrastructure including network controllers and -storage systems, and even individual applications need to be evaluated. - -Topics to consider include: - -* The definition of what constitutes a site in the relevant licenses, - as the term does not necessarily denote a geographic or otherwise - physically isolated location. - -* Differentiations between "hot" (active) and "cold" (inactive) sites, - where significant savings may be made in situations where one site is - a cold standby for disaster recovery purposes only. - -* Certain locations might require local vendors to provide support and - services for each site which may vary with the licensing agreement in - place. diff --git a/doc/ops-guide/source/ops-lay-of-the-land.rst b/doc/ops-guide/source/ops-lay-of-the-land.rst deleted file mode 100644 index adb6d89325..0000000000 --- a/doc/ops-guide/source/ops-lay-of-the-land.rst +++ /dev/null @@ -1,602 +0,0 @@ -=============== -Lay of the Land -=============== - -This chapter helps you set up your working environment and use it to -take a look around your cloud. - -Using the OpenStack Dashboard for Administration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -As a cloud administrative user, you can use the OpenStack dashboard to -create and manage projects, users, images, and flavors. Users are -allowed to create and manage images within specified projects and to -share images, depending on the Image service configuration. Typically, -the policy configuration allows admin users only to set quotas and -create and manage services. The dashboard provides an :guilabel:`Admin` -tab with a :guilabel:`System Panel` and an :guilabel:`Identity` tab. -These interfaces give you access to system information and usage as -well as to settings for configuring what -end users can do. Refer to the `OpenStack Administrator -Guide `__ for -detailed how-to information about using the dashboard as an admin user. - -Command-Line Tools -~~~~~~~~~~~~~~~~~~ - -We recommend using a combination of the OpenStack command-line interface -(CLI) tools and the OpenStack dashboard for administration. Some users -with a background in other cloud technologies may be using the EC2 -Compatibility API, which uses naming conventions somewhat different from -the native API. - -The pip utility is used to manage package installation from the PyPI -archive and is available in the python-pip package in most Linux -distributions. While each OpenStack project has its own client, they are -being deprecated in favour of a common OpenStack client. It is generally -recommended to install the OpenStack client. - -.. tip:: - - To perform testing and orchestration, it is usually easier to install the - OpenStack CLI tools in a dedicated VM in the cloud. We recommend - that you keep the VM installation simple. All the tools should be installed - from a single OpenStack release version. If you need to run tools from - multiple OpenStack releases, then we recommend that you run with multiple - VMs that are each running a dedicated version. - -Install OpenStack command-line clients --------------------------------------- - -For instructions on installing, upgrading, or removing command-line clients, -see the `Install the OpenStack command-line clients -`_ -section in OpenStack End User Guide. - -.. note:: - - If you support the EC2 API on your cloud, you should also install the - euca2ools package or some other EC2 API tool so that you can get the - same view your users have. Using EC2 API-based tools is mostly out of - the scope of this guide, though we discuss getting credentials for use - with it. - -Administrative Command-Line Tools ---------------------------------- - -There are also several :command:`*-manage` command-line tools. These are -installed with the project's services on the cloud controller and do not -need to be installed separately: - -* :command:`nova-manage` -* :command:`glance-manage` -* :command:`keystone-manage` -* :command:`cinder-manage` - -Unlike the CLI tools mentioned above, the :command:`*-manage` tools must -be run from the cloud controller, as root, because they need read access -to the config files such as ``/etc/nova/nova.conf`` and to make queries -directly against the database rather than against the OpenStack -:term:`API endpoints `. - -.. warning:: - - The existence of the ``*-manage`` tools is a legacy issue. It is a - goal of the OpenStack project to eventually migrate all of the - remaining functionality in the ``*-manage`` tools into the API-based - tools. Until that day, you need to SSH into the - :term:`cloud controller node` to perform some maintenance operations - that require one of the ``*-manage`` tools. - -Getting Credentials -------------------- - -You must have the appropriate credentials if you want to use the -command-line tools to make queries against your OpenStack cloud. By far, -the easiest way to obtain :term:`authentication` credentials to use with -command-line clients is to use the OpenStack dashboard. Select -:guilabel:`Project`, click the :guilabel:`Project` tab, and click -:guilabel:`Access & Security` on the :guilabel:`Compute` category. -On the :guilabel:`Access & Security` page, click the :guilabel:`API Access` -tab to display two buttons, :guilabel:`Download OpenStack RC File` and -:guilabel:`Download EC2 Credentials`, which let you generate files that -you can source in your shell to populate the environment variables the -command-line tools require to know where your service endpoints and your -authentication information are. The user you logged in to the dashboard -dictates the filename for the openrc file, such as ``demo-openrc.sh``. -When logged in as admin, the file is named ``admin-openrc.sh``. - -The generated file looks something like this: - -.. code-block:: bash - - #!/usr/bin/env bash - - # To use an OpenStack cloud you need to authenticate against the Identity - # service named keystone, which returns a **Token** and **Service Catalog**. - # The catalog contains the endpoints for all services the user/tenant has - # access to - such as Compute, Image Service, Identity, Object Storage, Block - # Storage, and Networking (code-named nova, glance, keystone, swift, - # cinder, and neutron). - # - # *NOTE*: Using the 3 *Identity API* does not necessarily mean any other - # OpenStack API is version 3. For example, your cloud provider may implement - # Image API v1.1, Block Storage API v2, and Compute API v2.0. OS_AUTH_URL is - # only for the Identity API served through keystone. - export OS_AUTH_URL=http://203.0.113.10:5000/v3 - - # With the addition of Keystone we have standardized on the term **project** - # as the entity that owns the resources. - export OS_PROJECT_ID=98333aba48e756fa8f629c83a818ad57 - export OS_PROJECT_NAME="test-project" - export OS_USER_DOMAIN_NAME="default" - if [ -z "$OS_USER_DOMAIN_NAME" ]; then unset OS_USER_DOMAIN_NAME; fi - - # In addition to the owning entity (tenant), OpenStack stores the entity - # performing the action as the **user**. - export OS_USERNAME="demo" - - # With Keystone you pass the keystone password. - echo "Please enter your OpenStack Password for project $OS_PROJECT_NAME as user $OS_USERNAME: " - read -sr OS_PASSWORD_INPUT - export OS_PASSWORD=$OS_PASSWORD_INPUT - - # If your configuration has multiple regions, we set that information here. - # OS_REGION_NAME is optional and only valid in certain environments. - export OS_REGION_NAME="RegionOne" - # Don't leave a blank variable, unset it if it was empty - if [ -z "$OS_REGION_NAME" ]; then unset OS_REGION_NAME; fi - - export OS_INTERFACE=public - export OS_IDENTITY_API_VERSION=3 - -.. warning:: - - This does not save your password in plain text, which is a good - thing. But when you source or run the script, it prompts you for - your password and then stores your response in the environment - variable ``OS_PASSWORD``. It is important to note that this does - require interactivity. It is possible to store a value directly in - the script if you require a noninteractive operation, but you then - need to be extremely cautious with the security and permissions of - this file. - -EC2 compatibility credentials can be downloaded by selecting -:guilabel:`Project`, then :guilabel:`Compute`, then -:guilabel:`Access & Security`, then :guilabel:`API Access` to display the -:guilabel:`Download EC2 Credentials` button. Click the button to generate -a ZIP file with server x509 certificates and a shell script fragment. -Create a new directory in a secure location because these are live credentials -containing all the authentication information required to access your -cloud identity, unlike the default ``user-openrc``. Extract the ZIP file -here. You should have ``cacert.pem``, ``cert.pem``, ``ec2rc.sh``, and -``pk.pem``. The ``ec2rc.sh`` is similar to this: - -.. code-block:: bash - - #!/bin/bash - - NOVARC=$(readlink -f "${BASH_SOURCE:-${0}}" 2>/dev/null) ||\ - NOVARC=$(python -c 'import os,sys; \ - print os.path.abspath(os.path.realpath(sys.argv[1]))' "${BASH_SOURCE:-${0}}") - NOVA_KEY_DIR=${NOVARC%/*} - export EC2_ACCESS_KEY=df7f93ec47e84ef8a347bbb3d598449a - export EC2_SECRET_KEY=ead2fff9f8a344e489956deacd47e818 - export EC2_URL=http://203.0.113.10:8773/services/Cloud - export EC2_USER_ID=42 # nova does not use user id, but bundling requires it - export EC2_PRIVATE_KEY=${NOVA_KEY_DIR}/pk.pem - export EC2_CERT=${NOVA_KEY_DIR}/cert.pem - export NOVA_CERT=${NOVA_KEY_DIR}/cacert.pem - export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this - - alias ec2-bundle-image="ec2-bundle-image --cert $EC2_CERT --privatekey \ - $EC2_PRIVATE_KEY --user 42 --ec2cert $NOVA_CERT" - alias ec2-upload-bundle="ec2-upload-bundle -a $EC2_ACCESS_KEY -s \ - $EC2_SECRET_KEY --url $S3_URL --ec2cert $NOVA_CERT" - -To put the EC2 credentials into your environment, source the -``ec2rc.sh`` file. - -Inspecting API Calls --------------------- - -The command-line tools can be made to show the OpenStack API calls they -make by passing the ``--debug`` flag to them. For example: - -.. code-block:: console - - # openstack --debug server list - -This example shows the HTTP requests from the client and the responses -from the endpoints, which can be helpful in creating custom tools -written to the OpenStack API. - -Using cURL for further inspection -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Underlying the use of the command-line tools is the OpenStack API, which -is a RESTful API that runs over HTTP. There may be cases where you want -to interact with the API directly or need to use it because of a -suspected bug in one of the CLI tools. The best way to do this is to use -a combination of `cURL `_ and another tool, -such as `jq `_, to parse the JSON from -the responses. - -The first thing you must do is authenticate with the cloud using your -credentials to get an :term:`authentication token`. - -Your credentials are a combination of username, password, and tenant -(project). You can extract these values from the ``openrc.sh`` discussed -above. The token allows you to interact with your other service -endpoints without needing to reauthenticate for every request. Tokens -are typically good for 24 hours, and when the token expires, you are -alerted with a 401 (Unauthorized) response and you can request another -token. - -#. Look at your OpenStack service :term:`catalog`: - - .. code-block:: console - - $ curl -s -X POST http://203.0.113.10:35357/v2.0/tokens \ - -d '{"auth": {"passwordCredentials": {"username":"test-user", "password":"test-password"}, "tenantName":"test-project"}}' \ - -H "Content-type: application/json" | jq . - -#. Read through the JSON response to get a feel for how the catalog is - laid out. - - To make working with subsequent requests easier, store the token in - an environment variable: - - .. code-block:: console - - $ TOKEN=`curl -s -X POST http://203.0.113.10:35357/v2.0/tokens \ - -d '{"auth": {"passwordCredentials": {"username":"test-user", "password":"test-password"}, "tenantName":"test-project"}}' \ - -H "Content-type: application/json" |  jq -r .access.token.id` - - Now you can refer to your token on the command line as ``$TOKEN``. - -#. Pick a service endpoint from your service catalog, such as compute. - Try a request, for example, listing instances (servers): - - .. code-block:: console - - $ curl -s \ - -H "X-Auth-Token: $TOKEN" \ - http://203.0.113.10:8774/v2.0/98333aba48e756fa8f629c83a818ad57/servers | jq . - -To discover how API requests should be structured, read the `OpenStack -API Reference `_. To chew -through the responses using jq, see the `jq -Manual `_. - -The ``-s flag`` used in the cURL commands above are used to prevent -the progress meter from being shown. If you are having trouble running -cURL commands, you'll want to remove it. Likewise, to help you -troubleshoot cURL commands, you can include the ``-v`` flag to show you -the verbose output. There are many more extremely useful features in -cURL; refer to the man page for all the options. - -Servers and Services --------------------- - -As an administrator, you have a few ways to discover what your OpenStack -cloud looks like simply by using the OpenStack tools available. This -section gives you an idea of how to get an overview of your cloud, its -shape, size, and current state. - -First, you can discover what servers belong to your OpenStack cloud by -running: - -.. code-block:: console - - # openstack compute service list --long - -The output looks like the following: - -.. code-block:: console - - +----+------------------+-------------------+------+---------+-------+----------------------------+-----------------+ - | Id | Binary | Host | Zone | Status | State | Updated_at | Disabled Reason | - +----+------------------+-------------------+------+---------+-------+----------------------------+-----------------+ - | 1 | nova-cert | cloud.example.com | nova | enabled | up | 2016-01-05T17:20:38.000000 | - | - | 2 | nova-compute | c01.example.com | nova | enabled | up | 2016-01-05T17:20:38.000000 | - | - | 3 | nova-compute | c01.example.com. | nova | enabled | up | 2016-01-05T17:20:38.000000 | - | - | 4 | nova-compute | c01.example.com | nova | enabled | up | 2016-01-05T17:20:38.000000 | - | - | 5 | nova-compute | c01.example.com | nova | enabled | up | 2016-01-05T17:20:38.000000 | - | - | 6 | nova-compute | c01.example.com | nova | enabled | up | 2016-01-05T17:20:38.000000 | - | - | 7 | nova-conductor | cloud.example.com | nova | enabled | up | 2016-01-05T17:20:38.000000 | - | - | 8 | nova-cert | cloud.example.com | nova | enabled | up | 2016-01-05T17:20:42.000000 | - | - | 9 | nova-scheduler | cloud.example.com | nova | enabled | up | 2016-01-05T17:20:38.000000 | - | - | 10 | nova-consoleauth | cloud.example.com | nova | enabled | up | 2016-01-05T17:20:35.000000 | - | - +----+------------------+-------------------+------+---------+-------+----------------------------+-----------------+ - -The output shows that there are five compute nodes and one cloud -controller. You see all the services in the up state, which indicates that -the services are up and running. If a service is in a down state, it is -no longer available. This is an indication that you -should troubleshoot why the service is down. - -If you are using cinder, run the following command to see a similar -listing: - -.. code-block:: console - - # cinder-manage host list | sort - host zone - c01.example.com nova - c02.example.com nova - c03.example.com nova - c04.example.com nova - c05.example.com nova - cloud.example.com nova - -With these two tables, you now have a good overview of what servers and -services make up your cloud. - -You can also use the Identity service (keystone) to see what services -are available in your cloud as well as what endpoints have been -configured for the services. - -The following command requires you to have your shell environment -configured with the proper administrative variables: - -.. code-block:: console - - $ openstack catalog list - +----------+------------+---------------------------------------------------------------------------------+ - | Name | Type | Endpoints | - +----------+------------+---------------------------------------------------------------------------------+ - | nova | compute | RegionOne | - | | | public: http://192.168.122.10:8774/v2/9faa845768224258808fc17a1bb27e5e | - | | | RegionOne | - | | | internal: http://192.168.122.10:8774/v2/9faa845768224258808fc17a1bb27e5e | - | | | RegionOne | - | | | admin: http://192.168.122.10:8774/v2/9faa845768224258808fc17a1bb27e5e | - | | | | - | cinderv2 | volumev2 | RegionOne | - | | | public: http://192.168.122.10:8776/v2/9faa845768224258808fc17a1bb27e5e | - | | | RegionOne | - | | | internal: http://192.168.122.10:8776/v2/9faa845768224258808fc17a1bb27e5e | - | | | RegionOne | - | | | admin: http://192.168.122.10:8776/v2/9faa845768224258808fc17a1bb27e5e | - | | | | - -The preceding output has been truncated to show only two services. You -will see one service entry for each service that your cloud provides. -Note how the endpoint domain can be different depending on the endpoint -type. Different endpoint domains per type are not required, but this can -be done for different reasons, such as endpoint privacy or network -traffic segregation. - -You can find the version of the Compute installation by using the -OpenStack command-line client: - -.. code-block:: console - - # openstack --version - -Diagnose Your Compute Nodes ---------------------------- - -You can obtain extra information about virtual machines that are -running—their CPU usage, the memory, the disk I/O or network I/O—per -instance, by running the :command:`nova diagnostics` command with a server ID: - -.. code-block:: console - - $ nova diagnostics - -The output of this command varies depending on the hypervisor because -hypervisors support different attributes. The following demonstrates -the difference between the two most popular hypervisors. -Here is example output when the hypervisor is Xen: - -.. code-block:: console - - +----------------+-----------------+ - | Property | Value | - +----------------+-----------------+ - | cpu0 | 4.3627 | - | memory | 1171088064.0000 | - | memory_target | 1171088064.0000 | - | vbd_xvda_read | 0.0 | - | vbd_xvda_write | 0.0 | - | vif_0_rx | 3223.6870 | - | vif_0_tx | 0.0 | - | vif_1_rx | 104.4955 | - | vif_1_tx | 0.0 | - +----------------+-----------------+ - -While the command should work with any hypervisor that is controlled -through libvirt (KVM, QEMU, or LXC), it has been tested only with KVM. -Here is the example output when the hypervisor is KVM: - -.. code-block:: console - - +------------------+------------+ - | Property | Value | - +------------------+------------+ - | cpu0_time | 2870000000 | - | memory | 524288 | - | vda_errors | -1 | - | vda_read | 262144 | - | vda_read_req | 112 | - | vda_write | 5606400 | - | vda_write_req | 376 | - | vnet0_rx | 63343 | - | vnet0_rx_drop | 0 | - | vnet0_rx_errors | 0 | - | vnet0_rx_packets | 431 | - | vnet0_tx | 4905 | - | vnet0_tx_drop | 0 | - | vnet0_tx_errors | 0 | - | vnet0_tx_packets | 45 | - +------------------+------------+ - -Network Inspection -~~~~~~~~~~~~~~~~~~ - -To see which fixed IP networks are configured in your cloud, you can use -the :command:`openstack` command-line client to get the IP ranges: - -.. code-block:: console - - $ openstack subnet list - +--------------------------------------+----------------+--------------------------------------+-----------------+ - | ID | Name | Network | Subnet | - +--------------------------------------+----------------+--------------------------------------+-----------------+ - | 346806ee-a53e-44fd-968a-ddb2bcd2ba96 | public_subnet | 0bf90de6-fc0f-4dba-b80d-96670dfb331a | 172.24.4.224/28 | - | f939a1e4-3dc3-4540-a9f6-053e6f04918f | private_subnet | 1f7f429e-c38e-47ba-8acf-c44e3f5e8d71 | 10.0.0.0/24 | - +--------------------------------------+----------------+--------------------------------------+-----------------+ - -The OpenStack command-line client can provide some additional details: - -.. code-block:: console - - # openstack compute service list - +----+------------------+------------+----------+---------+-------+----------------------------+ - | Id | Binary | Host | Zone | Status | State | Updated At | - +----+------------------+------------+----------+---------+-------+----------------------------+ - | 1 | nova-consoleauth | controller | internal | enabled | up | 2016-08-18T12:16:53.000000 | - | 2 | nova-scheduler | controller | internal | enabled | up | 2016-08-18T12:16:59.000000 | - | 3 | nova-conductor | controller | internal | enabled | up | 2016-08-18T12:16:52.000000 | - | 7 | nova-compute | controller | nova | enabled | up | 2016-08-18T12:16:58.000000 | - +----+------------------+------------+----------+---------+-------+----------------------------+ - - -This output shows that two networks are configured, each network -containing 255 IPs (a /24 subnet). The first network has been assigned -to a certain project, while the second network is still open for -assignment. You can assign this network manually; otherwise, it is -automatically assigned when a project launches its first instance. - -To find out whether any floating IPs are available in your cloud, run: - -.. code-block:: console - - # openstack floating ip list - +--------------------------------------+---------------------+------------------+--------------------------------------+ - | ID | Floating IP Address | Fixed IP Address | Port | - +--------------------------------------+---------------------+------------------+--------------------------------------+ - | 340cb36d-6a52-4091-b256-97b6e61cbb20 | 172.24.4.227 | 10.2.1.8 | 1fec8fb8-7a8c-44c2-acd8-f10e2e6cd326 | - | 8b1bfc0c-7a91-4da0-b3cc-4acae26cbdec | 172.24.4.228 | None | None | - +--------------------------------------+---------------------+------------------+--------------------------------------+ - -Here, two floating IPs are available. The first has been allocated to a -project, while the other is unallocated. - -Users and Projects -~~~~~~~~~~~~~~~~~~ - -To see a list of projects that have been added to the cloud, run: - -.. code-block:: console - - $ openstack project list - +----------------------------------+--------------------+ - | ID | Name | - +----------------------------------+--------------------+ - | 422c17c0b26f4fbe9449f37a5621a5e6 | alt_demo | - | 5dc65773519248f3a580cfe28ba7fa3f | demo | - | 9faa845768224258808fc17a1bb27e5e | admin | - | a733070a420c4b509784d7ea8f6884f7 | invisible_to_admin | - | aeb3e976e7794f3f89e4a7965db46c1e | service | - +----------------------------------+--------------------+ - -To see a list of users, run: - -.. code-block:: console - - $ openstack user list - +----------------------------------+----------+ - | ID | Name | - +----------------------------------+----------+ - | 5837063598694771aedd66aa4cddf0b8 | demo | - | 58efd9d852b74b87acc6efafaf31b30e | cinder | - | 6845d995a57a441f890abc8f55da8dfb | glance | - | ac2d15a1205f46d4837d5336cd4c5f5a | alt_demo | - | d8f593c3ae2b47289221f17a776a218b | admin | - | d959ec0a99e24df0b7cb106ff940df20 | nova | - +----------------------------------+----------+ - -.. note:: - - Sometimes a user and a group have a one-to-one mapping. This happens - for standard system accounts, such as cinder, glance, nova, and - swift, or when only one user is part of a group. - -Running Instances -~~~~~~~~~~~~~~~~~ - -To see a list of running instances, run: - -.. code-block:: console - - $ openstack server list --all-projects - +--------------------------------------+------+--------+---------------------+------------+ - | ID | Name | Status | Networks | Image Name | - +--------------------------------------+------+--------+---------------------+------------+ - | 495b4f5e-0b12-4c5a-b4e0-4326dee17a5a | vm1 | ACTIVE | public=172.24.4.232 | cirros | - | e83686f9-16e8-45e6-911d-48f75cb8c0fb | vm2 | ACTIVE | private=10.0.0.7 | cirros | - +--------------------------------------+------+--------+---------------------+------------+ - -Unfortunately, this command does not tell you various details about the -running instances, such as what compute node the instance is running on, -what flavor the instance is, and so on. You can use the following -command to view details about individual instances: - -.. code-block:: console - - $ openstack server show - -For example: - -.. code-block:: console - - # openstack server show 81db556b-8aa5-427d-a95c-2a9a6972f630 - +--------------------------------------+----------------------------------------------------------+ - | Field | Value | - +--------------------------------------+----------------------------------------------------------+ - | OS-DCF:diskConfig | AUTO | - | OS-EXT-AZ:availability_zone | nova | - | OS-EXT-SRV-ATTR:host | c02.example.com | - | OS-EXT-SRV-ATTR:hypervisor_hostname | c02.example.com | - | OS-EXT-SRV-ATTR:instance_name | instance-00000001 | - | OS-EXT-STS:power_state | Running | - | OS-EXT-STS:task_state | None | - | OS-EXT-STS:vm_state | active | - | OS-SRV-USG:launched_at | 2016-10-19T15:18:09.000000 | - | OS-SRV-USG:terminated_at | None | - | accessIPv4 | | - | accessIPv6 | | - | addresses | private=10.0.0.7 | - | config_drive | | - | created | 2016-10-19T15:17:46Z | - | flavor | m1.tiny (1) | - | hostId | 2b57e2b7a839508337fb55695b8f6e65aa881460a20449a76352040b | - | id | e83686f9-16e8-45e6-911d-48f75cb8c0fb | - | image | cirros (9fef3b2d-c35d-4b61-bea8-09cc6dc41829) | - | key_name | None | - | name | test | - | os-extended-volumes:volumes_attached | [] | - | progress | 0 | - | project_id | 1eaaf6ede7a24e78859591444abf314a | - | properties | | - | security_groups | [{u'name': u'default'}] | - | status | ACTIVE | - | updated | 2016-10-19T15:18:58Z | - | user_id | 7aaa9b5573ce441b98dae857a82ecc68 | - +--------------------------------------+----------------------------------------------------------+ - -This output shows that an instance named ``devstack`` was created from -an Ubuntu 12.04 image using a flavor of ``m1.small`` and is hosted on -the compute node ``c02.example.com``. - -Summary -~~~~~~~ - -We hope you have enjoyed this quick tour of your working environment, -including how to interact with your cloud and extract useful -information. From here, you can use the `OpenStack Administrator -Guide `_ as your -reference for all of the command-line functionality in your cloud. diff --git a/doc/ops-guide/source/ops-logging-monitoring-summary.rst b/doc/ops-guide/source/ops-logging-monitoring-summary.rst deleted file mode 100644 index 6234c9196a..0000000000 --- a/doc/ops-guide/source/ops-logging-monitoring-summary.rst +++ /dev/null @@ -1,10 +0,0 @@ -======= -Summary -======= - -For stable operations, you want to detect failure promptly and determine -causes efficiently. With a distributed system, it's even more important -to track the right items to meet a service-level target. Learning where -these logs are located in the file system or API gives you an advantage. -This chapter also showed how to read, interpret, and manipulate -information from OpenStack services so that you can monitor effectively. diff --git a/doc/ops-guide/source/ops-logging-monitoring.rst b/doc/ops-guide/source/ops-logging-monitoring.rst deleted file mode 100644 index 5d26401855..0000000000 --- a/doc/ops-guide/source/ops-logging-monitoring.rst +++ /dev/null @@ -1,15 +0,0 @@ -====================== -Logging and Monitoring -====================== - -.. toctree:: - :maxdepth: 1 - - ops-logging.rst - ops-monitoring.rst - ops-logging-monitoring-summary.rst - -As an OpenStack cloud is composed of so many different services, there -are a large number of log files. This chapter aims to assist you in -locating and working with them and describes other ways to track the -status of your deployment. diff --git a/doc/ops-guide/source/ops-logging-rsyslog.rst b/doc/ops-guide/source/ops-logging-rsyslog.rst deleted file mode 100644 index 13cc50ef33..0000000000 --- a/doc/ops-guide/source/ops-logging-rsyslog.rst +++ /dev/null @@ -1,105 +0,0 @@ -======= -rsyslog -======= - -A number of operating systems use rsyslog as the default logging service. -Since it is natively able to send logs to a remote location, you do not -have to install anything extra to enable this feature, just modify the -configuration file. In doing this, consider running your logging over a -management network or using an encrypted VPN to avoid interception. - -rsyslog client configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To begin, configure all OpenStack components to log to the syslog log -file in addition to their standard log file location. Also, configure each -component to log to a different syslog facility. This makes it easier to -split the logs into individual components on the central server: - -``nova.conf``: - -.. code-block:: ini - - use_syslog=True - syslog_log_facility=LOG_LOCAL0 - -``glance-api.conf`` and ``glance-registry.conf``: - -.. code-block:: ini - - use_syslog=True - syslog_log_facility=LOG_LOCAL1 - -``cinder.conf``: - -.. code-block:: ini - - use_syslog=True - syslog_log_facility=LOG_LOCAL2 - -``keystone.conf``: - -.. code-block:: ini - - use_syslog=True - syslog_log_facility=LOG_LOCAL3 - -By default, Object Storage logs to syslog. - -Next, create ``/etc/rsyslog.d/client.conf`` with the following line: - -.. code-block:: none - - *.* @192.168.1.10 - -This instructs rsyslog to send all logs to the IP listed. In this -example, the IP points to the cloud controller. - -rsyslog server configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Designate a server as the central logging server. The best practice is -to choose a server that is solely dedicated to this purpose. Create a -file called ``/etc/rsyslog.d/server.conf`` with the following contents: - -.. code-block:: none - - # Enable UDP - $ModLoad imudp - # Listen on 192.168.1.10 only - $UDPServerAddress 192.168.1.10 - # Port 514 - $UDPServerRun 514 - - # Create logging templates for nova - $template NovaFile,"/var/log/rsyslog/%HOSTNAME%/nova.log" - $template NovaAll,"/var/log/rsyslog/nova.log" - - # Log everything else to syslog.log - $template DynFile,"/var/log/rsyslog/%HOSTNAME%/syslog.log" - *.* ?DynFile - - # Log various openstack components to their own individual file - local0.* ?NovaFile - local0.* ?NovaAll - & ~ - -This example configuration handles the nova service only. It first -configures rsyslog to act as a server that runs on port 514. Next, it -creates a series of logging templates. Logging templates control where -received logs are stored. Using the last example, a nova log from -c01.example.com goes to the following locations: - -- ``/var/log/rsyslog/c01.example.com/nova.log`` - -- ``/var/log/rsyslog/nova.log`` - -This is useful, as logs from c02.example.com go to: - -- ``/var/log/rsyslog/c02.example.com/nova.log`` - -- ``/var/log/rsyslog/nova.log`` - -This configuration will result in a separate log file for each compute -node as well as an aggregated log file that contains nova logs from all -nodes. diff --git a/doc/ops-guide/source/ops-logging.rst b/doc/ops-guide/source/ops-logging.rst deleted file mode 100644 index afa81f8559..0000000000 --- a/doc/ops-guide/source/ops-logging.rst +++ /dev/null @@ -1,257 +0,0 @@ -======= -Logging -======= - -Where Are the Logs? -~~~~~~~~~~~~~~~~~~~ - -Most services use the convention of writing their log files to -subdirectories of the ``/var/log directory``, as listed in -:ref:`table_log_locations`. - -.. _table_log_locations: - -.. list-table:: Table OpenStack log locations - :widths: 25 25 50 - :header-rows: 1 - - * - Node type - - Service - - Log location - * - Cloud controller - - ``nova-*`` - - ``/var/log/nova`` - * - Cloud controller - - ``glance-*`` - - ``/var/log/glance`` - * - Cloud controller - - ``cinder-*`` - - ``/var/log/cinder`` - * - Cloud controller - - ``keystone-*`` - - ``/var/log/keystone`` - * - Cloud controller - - ``neutron-*`` - - ``/var/log/neutron`` - * - Cloud controller - - horizon - - ``/var/log/apache2/`` - * - All nodes - - misc (swift, dnsmasq) - - ``/var/log/syslog`` - * - Compute nodes - - libvirt - - ``/var/log/libvirt/libvirtd.log`` - * - Compute nodes - - Console (boot up messages) for VM instances: - - ``/var/lib/nova/instances/instance-/console.log`` - * - Block Storage nodes - - cinder-volume - - ``/var/log/cinder/cinder-volume.log`` - -Reading the Logs -~~~~~~~~~~~~~~~~ - -OpenStack services use the standard logging levels, at increasing -severity: TRACE, DEBUG, INFO, AUDIT, WARNING, ERROR, and CRITICAL. That -is, messages only appear in the logs if they are more "severe" than the -particular log level, with DEBUG allowing all log statements through. -For example, TRACE is logged only if the software has a stack trace, -while INFO is logged for every message including those that are only for -information. - -To disable DEBUG-level logging, edit ``/etc/nova/nova.conf`` file as follows: - -.. code-block:: ini - - debug=false - -Keystone is handled a little differently. To modify the logging level, -edit the ``/etc/keystone/logging.conf`` file and look at the -``logger_root`` and ``handler_file`` sections. - -Logging for horizon is configured in -``/etc/openstack_dashboard/local_settings.py``. Because horizon is -a Django web application, it follows the `Django Logging framework -conventions `_. - -The first step in finding the source of an error is typically to search -for a CRITICAL, or ERROR message in the log starting at the -bottom of the log file. - -Here is an example of a log message with the corresponding -ERROR (Python traceback) immediately following: - -.. code-block:: console - - 2017-01-18 15:54:00.467 32552 ERROR oslo_messaging.rpc.server [req-c0b38ace-2586-48ce-9336-6233efa1f035 6c9808c2c5044e1388a83a74da9364d5 e07f5395c - 2eb428cafc41679e7deeab1 - default default] Exception during message handling - 2017-01-18 15:54:00.467 32552 ERROR oslo_messaging.rpc.server Traceback (most recent call last): - 2017-01-18 15:54:00.467 32552 ERROR oslo_messaging.rpc.server File "/openstack/venvs/cinder-14.0.0/lib/python2.7/site-packages/oslo_messaging/rpc/server.py", line 133, in _process_incoming - 2017-01-18 15:54:00.467 32552 ERROR oslo_messaging.rpc.server res = self.dispatcher.dispatch(message) - 2017-01-18 15:54:00.467 32552 ERROR oslo_messaging.rpc.server File "/openstack/venvs/cinder-14.0.0/lib/python2.7/site-packages/oslo_messaging/rpc/dispatcher.py", line 150, in dispatch - 2017-01-18 15:54:00.467 32552 ERROR oslo_messaging.rpc.server return self._do_dispatch(endpoint, method, ctxt, args) - 2017-01-18 15:54:00.467 32552 ERROR oslo_messaging.rpc.server File "/openstack/venvs/cinder-14.0.0/lib/python2.7/site-packages/oslo_messaging/rpc/dispatcher.py", line 121, in _do_dispatch - 2017-01-18 15:54:00.467 32552 ERROR oslo_messaging.rpc.server result = func(ctxt, **new_args) - 2017-01-18 15:54:00.467 32552 ERROR oslo_messaging.rpc.server File "/openstack/venvs/cinder-14.0.0/lib/python2.7/site-packages/cinder/volume/manager.py", line 4366, in create_volume - 2017-01-18 15:54:00.467 32552 ERROR oslo_messaging.rpc.server allow_reschedule=allow_reschedule, volume=volume) - 2017-01-18 15:54:00.467 32552 ERROR oslo_messaging.rpc.server File "/openstack/venvs/cinder-14.0.0/lib/python2.7/site-packages/cinder/volume/manager.py", line 634, in create_volume - 2017-01-18 15:54:00.467 32552 ERROR oslo_messaging.rpc.server _run_flow() - 2017-01-18 15:54:00.467 32552 ERROR oslo_messaging.rpc.server File "/openstack/venvs/cinder-14.0.0/lib/python2.7/site-packages/cinder/volume/manager.py", line 626, in _run_flow - 2017-01-18 15:54:00.467 32552 ERROR oslo_messaging.rpc.server flow_engine.run() - 2017-01-18 15:54:00.467 32552 ERROR oslo_messaging.rpc.server File "/openstack/venvs/cinder-14.0.0/lib/python2.7/site-packages/taskflow/engines/action_engine/engine.py", line 247, in run - 2017-01-18 15:54:00.467 32552 ERROR oslo_messaging.rpc.server for _state in self.run_iter(timeout=timeout): - 2017-01-18 15:54:00.467 32552 ERROR oslo_messaging.rpc.server File "/openstack/venvs/cinder-14.0.0/lib/python2.7/site-packages/taskflow/engines/action_engine/engine.py", line 340, in run_iter - 2017-01-18 15:54:00.467 32552 ERROR oslo_messaging.rpc.server failure.Failure.reraise_if_any(er_failures) - 2017-01-18 15:54:00.467 32552 ERROR oslo_messaging.rpc.server File "/openstack/venvs/cinder-14.0.0/lib/python2.7/site-packages/taskflow/types/failure.py", line 336, in reraise_if_any - 2017-01-18 15:54:00.467 32552 ERROR oslo_messaging.rpc.server failures[0].reraise() - 2017-01-18 15:54:00.467 32552 ERROR oslo_messaging.rpc.server File "/openstack/venvs/cinder-14.0.0/lib/python2.7/site-packages/taskflow/types/failure.py", line 343, in reraise - -In this example, ``cinder-volumes`` failed to start and has provided a -stack trace, since its volume back end has been unable to set up the -storage volume—probably because the LVM volume that is expected from the -configuration does not exist. - -Here is an example error log: - -.. code-block:: console - - 2013-02-25 20:26:33 6619 ERROR nova.openstack.common.rpc.common [-] AMQP server on localhost:5672 is unreachable: - [Errno 111] ECONNREFUSED. Trying again in 23 seconds. - -In this error, a nova service has failed to connect to the RabbitMQ -server because it got a connection refused error. - -Tracing Instance Requests -~~~~~~~~~~~~~~~~~~~~~~~~~ - -When an instance fails to behave properly, you will often have to trace -activity associated with that instance across the log files of various -``nova-*`` services and across both the cloud controller and compute -nodes. - -The typical way is to trace the UUID associated with an instance across -the service logs. - -Consider the following example: - -.. code-block:: console - - $ openstack server list - +--------------------------------+--------+--------+--------------------------+------------+ - | ID | Name | Status | Networks | Image Name | - +--------------------------------+--------+--------+--------------------------+------------+ - | fafed8-4a46-413b-b113-f1959ffe | cirros | ACTIVE | novanetwork=192.168.100.3| cirros | - +--------------------------------------+--------+--------+--------------------+------------+ - -Here, the ID associated with the instance is -``faf7ded8-4a46-413b-b113-f19590746ffe``. If you search for this string -on the cloud controller in the ``/var/log/nova-*.log`` files, it appears -in ``nova-api.log`` and ``nova-scheduler.log``. If you search for this -on the compute nodes in ``/var/log/nova-*.log``, it appears in -``nova-compute.log``. If no ERROR or CRITICAL messages appear, the most -recent log entry that reports this may provide a hint about what has gone -wrong. - -Adding Custom Logging Statements -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If there is not enough information in the existing logs, you may need to -add your own custom logging statements to the ``nova-*`` -services. - -The source files are located in -``/usr/lib/python2.7/dist-packages/nova``. - -To add logging statements, the following line should be near the top of -the file. For most files, these should already be there: - -.. code-block:: python - - from nova.openstack.common import log as logging - LOG = logging.getLogger(__name__) - -To add a DEBUG logging statement, you would do: - -.. code-block:: python - - LOG.debug("This is a custom debugging statement") - -You may notice that all the existing logging messages are preceded by an -underscore and surrounded by parentheses, for example: - -.. code-block:: python - - LOG.debug(_("Logging statement appears here")) - -This formatting is used to support translation of logging messages into -different languages using the -`gettext `_ -internationalization library. You don't need to do this for your own -custom log messages. However, if you want to contribute the code back to -the OpenStack project that includes logging statements, you must -surround your log messages with underscores and parentheses. - -RabbitMQ Web Management Interface or rabbitmqctl -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Aside from connection failures, RabbitMQ log files are generally not -useful for debugging OpenStack related issues. Instead, we recommend you -use the RabbitMQ web management interface. Enable it on your cloud -controller: - -.. code-block:: console - - # /usr/lib/rabbitmq/bin/rabbitmq-plugins enable rabbitmq_management - -.. code-block:: console - - # service rabbitmq-server restart - -The RabbitMQ web management interface is accessible on your cloud -controller at *http://localhost:55672*. - -.. note:: - - Ubuntu 12.04 installs RabbitMQ version 2.7.1, which uses port 55672. - RabbitMQ versions 3.0 and above use port 15672 instead. You can - check which version of RabbitMQ you have running on your local - Ubuntu machine by doing: - - .. code-block:: console - - $ dpkg -s rabbitmq-server | grep "Version:" - Version: 2.7.1-0ubuntu4 - -An alternative to enabling the RabbitMQ web management interface is to -use the ``rabbitmqctl`` commands. For example, -:command:`rabbitmqctl list_queues| grep cinder` displays any messages left in -the queue. If there are messages, it's a possible sign that cinder -services didn't connect properly to rabbitmq and might have to be -restarted. - -Items to monitor for RabbitMQ include the number of items in each of the -queues and the processing time statistics for the server. - -Centrally Managing Logs -~~~~~~~~~~~~~~~~~~~~~~~ - -Because your cloud is most likely composed of many servers, you must -check logs on each of those servers to properly piece an event together. -A better solution is to send the logs of all servers to a central -location so that they can all be accessed from the same -area. - -The choice of central logging engine will be dependent on the operating -system in use as well as any organizational requirements for logging tools. - -Syslog choices --------------- - -There are a large number of syslogs engines available, each have differing -capabilities and configuration requirements. - -.. toctree:: - :maxdepth: 1 - - ops-logging-rsyslog.rst diff --git a/doc/ops-guide/source/ops-maintenance-complete.rst b/doc/ops-guide/source/ops-maintenance-complete.rst deleted file mode 100644 index 369173d119..0000000000 --- a/doc/ops-guide/source/ops-maintenance-complete.rst +++ /dev/null @@ -1,50 +0,0 @@ -=========================== -Handling a Complete Failure -=========================== - -A common way of dealing with the recovery from a full system failure, -such as a power outage of a data center, is to assign each service a -priority, and restore in order. -:ref:`table_example_priority` shows an example. - -.. _table_example_priority: - -.. list-table:: Table. Example service restoration priority list - :header-rows: 1 - - * - Priority - - Services - * - 1 - - Internal network connectivity - * - 2 - - Backing storage services - * - 3 - - Public network connectivity for user virtual machines - * - 4 - - ``nova-compute``, cinder hosts - * - 5 - - User virtual machines - * - 10 - - Message queue and database services - * - 15 - - Keystone services - * - 20 - - ``cinder-scheduler`` - * - 21 - - Image Catalog and Delivery services - * - 22 - - ``nova-scheduler`` services - * - 98 - - ``cinder-api`` - * - 99 - - ``nova-api`` services - * - 100 - - Dashboard node - -Use this example priority list to ensure that user-affected services are -restored as soon as possible, but not before a stable environment is in -place. Of course, despite being listed as a single-line item, each step -requires significant work. For example, just after starting the -database, you should check its integrity, or, after starting the nova -services, you should verify that the hypervisor matches the database and -fix any mismatches. diff --git a/doc/ops-guide/source/ops-maintenance-compute.rst b/doc/ops-guide/source/ops-maintenance-compute.rst deleted file mode 100644 index b222ca0106..0000000000 --- a/doc/ops-guide/source/ops-maintenance-compute.rst +++ /dev/null @@ -1,638 +0,0 @@ -===================================== -Compute Node Failures and Maintenance -===================================== - -Sometimes a compute node either crashes unexpectedly or requires a -reboot for maintenance reasons. - -Planned Maintenance -~~~~~~~~~~~~~~~~~~~ - -If you need to reboot a compute node due to planned maintenance, such as -a software or hardware upgrade, perform the following steps: - -#. Disable scheduling of new VMs to the node, optionally providing a reason - comment: - - .. code-block:: console - - # openstack compute service set --disable --disable-reason \ - maintenance c01.example.com nova-compute - -#. Verify that all hosted instances have been moved off the node: - - * If your cloud is using a shared storage: - - #. Get a list of instances that need to be moved: - - .. code-block:: console - - # openstack server list --host c01.example.com --all-projects - - #. Migrate all instances one by one: - - .. code-block:: console - - # openstack server migrate --live c02.example.com - - * If your cloud is not using a shared storage, run: - - .. code-block:: console - - # openstack server migrate --live --block-migration c02.example.com - -#. Stop the ``nova-compute`` service: - - .. code-block:: console - - # stop nova-compute - - If you use a configuration-management system, such as Puppet, that - ensures the ``nova-compute`` service is always running, you can - temporarily move the ``init`` files: - - .. code-block:: console - - # mkdir /root/tmp - # mv /etc/init/nova-compute.conf /root/tmp - # mv /etc/init.d/nova-compute /root/tmp - -#. Shut down your compute node, perform the maintenance, and turn - the node back on. - -#. Start the ``nova-compute`` service: - - .. code-block:: console - - # start nova-compute - - You can re-enable the ``nova-compute`` service by undoing the commands: - - .. code-block:: console - - # mv /root/tmp/nova-compute.conf /etc/init - # mv /root/tmp/nova-compute /etc/init.d/ - -#. Enable scheduling of VMs to the node: - - .. code-block:: console - - # openstack compute service set --enable c01.example.com nova-compute - -#. Optionally, migrate the instances back to their original compute node. - -After a Compute Node Reboots -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When you reboot a compute node, first verify that it booted -successfully. This includes ensuring that the ``nova-compute`` service -is running: - -.. code-block:: console - - # ps aux | grep nova-compute - # status nova-compute - -Also ensure that it has successfully connected to the AMQP server: - -.. code-block:: console - - # grep AMQP /var/log/nova/nova-compute.log - 2013-02-26 09:51:31 12427 INFO nova.openstack.common.rpc.common [-] Connected to AMQP server on 199.116.232.36:5672 - -After the compute node is successfully running, you must deal with the -instances that are hosted on that compute node because none of them are -running. Depending on your SLA with your users or customers, you might -have to start each instance and ensure that they start correctly. - -Instances -~~~~~~~~~ - -You can create a list of instances that are hosted on the compute node -by performing the following command: - -.. code-block:: console - - # openstack server list --host c01.example.com --all-projects - -After you have the list, you can use the :command:`openstack` command to -start each instance: - -.. code-block:: console - - # openstack server reboot - -.. note:: - - Any time an instance shuts down unexpectedly, it might have problems - on boot. For example, the instance might require an ``fsck`` on the - root partition. If this happens, the user can use the dashboard VNC - console to fix this. - -If an instance does not boot, meaning ``virsh list`` never shows the -instance as even attempting to boot, do the following on the compute -node: - -.. code-block:: console - - # tail -f /var/log/nova/nova-compute.log - -Try executing the :command:`openstack server reboot` command again. You should -see an error message about why the instance was not able to boot. - -In most cases, the error is the result of something in libvirt's XML -file (``/etc/libvirt/qemu/instance-xxxxxxxx.xml``) that no longer -exists. You can enforce re-creation of the XML file as well as rebooting -the instance by running the following command: - -.. code-block:: console - - # openstack server reboot --hard - -Inspecting and Recovering Data from Failed Instances -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In some scenarios, instances are running but are inaccessible through -SSH and do not respond to any command. The VNC console could be -displaying a boot failure or kernel panic error messages. This could be -an indication of file system corruption on the VM itself. If you need to -recover files or inspect the content of the instance, qemu-nbd can be -used to mount the disk. - -.. warning:: - - If you access or view the user's content and data, get approval first! - -To access the instance's disk -(``/var/lib/nova/instances/instance-xxxxxx/disk``), use the following -steps: - -#. Suspend the instance using the ``virsh`` command. - -#. Connect the qemu-nbd device to the disk. - -#. Mount the qemu-nbd device. - -#. Unmount the device after inspecting. - -#. Disconnect the qemu-nbd device. - -#. Resume the instance. - -If you do not follow last three steps, OpenStack Compute cannot manage -the instance any longer. It fails to respond to any command issued by -OpenStack Compute, and it is marked as shut down. - -Once you mount the disk file, you should be able to access it and treat -it as a collection of normal directories with files and a directory -structure. However, we do not recommend that you edit or touch any files -because this could change the -:term:`access control lists (ACLs) ` that are used -to determine which accounts can perform what operations on files and -directories. Changing ACLs can make the instance unbootable if it is not -already. - -#. Suspend the instance using the :command:`virsh` command, taking note of the - internal ID: - - .. code-block:: console - - # virsh list - Id Name State - ---------------------------------- - 1 instance-00000981 running - 2 instance-000009f5 running - 30 instance-0000274a running - - # virsh suspend 30 - Domain 30 suspended - -#. Find the ID for each instance by listing the server IDs using the - following command: - - .. code-block:: console - - # openstack server list - +--------------------------------------+-------+---------+-----------------------------+------------+ - | ID | Name | Status | Networks | Image Name | - +--------------------------------------+-------+---------+-----------------------------+------------+ - | 2da14c5c-de6d-407d-a7d2-2dd0862b9967 | try3 | ACTIVE | finance-internal=10.10.0.4 | | - | 223f4860-722a-44a0-bac7-f73f58beec7b | try2 | ACTIVE | finance-internal=10.10.0.13 | | - +--------------------------------------+-------+---------+-----------------------------+------------+ - -#. Connect the qemu-nbd device to the disk: - - .. code-block:: console - - # cd /var/lib/nova/instances/instance-0000274a - # ls -lh - total 33M - -rw-rw---- 1 libvirt-qemu kvm 6.3K Oct 15 11:31 console.log - -rw-r--r-- 1 libvirt-qemu kvm 33M Oct 15 22:06 disk - -rw-r--r-- 1 libvirt-qemu kvm 384K Oct 15 22:06 disk.local - -rw-rw-r-- 1 nova nova 1.7K Oct 15 11:30 libvirt.xml - # qemu-nbd -c /dev/nbd0 `pwd`/disk - -#. Mount the qemu-nbd device. - - The qemu-nbd device tries to export the instance disk's different - partitions as separate devices. For example, if vda is the disk and - vda1 is the root partition, qemu-nbd exports the device as - ``/dev/nbd0`` and ``/dev/nbd0p1``, respectively: - - .. code-block:: console - - # mount /dev/nbd0p1 /mnt/ - - You can now access the contents of ``/mnt``, which correspond to the - first partition of the instance's disk. - - To examine the secondary or ephemeral disk, use an alternate mount - point if you want both primary and secondary drives mounted at the - same time: - - .. code-block:: console - - # umount /mnt - # qemu-nbd -c /dev/nbd1 `pwd`/disk.local - # mount /dev/nbd1 /mnt/ - # ls -lh /mnt/ - total 76K - lrwxrwxrwx. 1 root root 7 Oct 15 00:44 bin -> usr/bin - dr-xr-xr-x. 4 root root 4.0K Oct 15 01:07 boot - drwxr-xr-x. 2 root root 4.0K Oct 15 00:42 dev - drwxr-xr-x. 70 root root 4.0K Oct 15 11:31 etc - drwxr-xr-x. 3 root root 4.0K Oct 15 01:07 home - lrwxrwxrwx. 1 root root 7 Oct 15 00:44 lib -> usr/lib - lrwxrwxrwx. 1 root root 9 Oct 15 00:44 lib64 -> usr/lib64 - drwx------. 2 root root 16K Oct 15 00:42 lost+found - drwxr-xr-x. 2 root root 4.0K Feb 3 2012 media - drwxr-xr-x. 2 root root 4.0K Feb 3 2012 mnt - drwxr-xr-x. 2 root root 4.0K Feb 3 2012 opt - drwxr-xr-x. 2 root root 4.0K Oct 15 00:42 proc - dr-xr-x---. 3 root root 4.0K Oct 15 21:56 root - drwxr-xr-x. 14 root root 4.0K Oct 15 01:07 run - lrwxrwxrwx. 1 root root 8 Oct 15 00:44 sbin -> usr/sbin - drwxr-xr-x. 2 root root 4.0K Feb 3 2012 srv - drwxr-xr-x. 2 root root 4.0K Oct 15 00:42 sys - drwxrwxrwt. 9 root root 4.0K Oct 15 16:29 tmp - drwxr-xr-x. 13 root root 4.0K Oct 15 00:44 usr - drwxr-xr-x. 17 root root 4.0K Oct 15 00:44 var - -#. Once you have completed the inspection, unmount the mount point and - release the qemu-nbd device: - - .. code-block:: console - - # umount /mnt - # qemu-nbd -d /dev/nbd0 - /dev/nbd0 disconnected - -#. Resume the instance using :command:`virsh`: - - .. code-block:: console - - # virsh list - Id Name State - ---------------------------------- - 1 instance-00000981 running - 2 instance-000009f5 running - 30 instance-0000274a paused - - # virsh resume 30 - Domain 30 resumed - -Managing floating IP addresses between instances -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In an elastic cloud environment using the ``Public_AGILE`` network, each -instance has a publicly accessible IPv4 & IPv6 address. It does not support -the concept of OpenStack floating IP addresses that can easily be attached, -removed, and transferred between instances. However, there is a workaround -using neutron ports which contain the IPv4 & IPv6 address. - -**Create a port that can be reused** - -#. Create a port on the ``Public_AGILE`` network: - - .. code-block:: console - - $ openstack port create port1 --network Public_AGILE - - Created a new port: - +-----------------------+------------------------------------------------------+ - | Field | Value | - +-----------------------+------------------------------------------------------+ - | admin_state_up | UP | - | allowed_address_pairs | | - | binding_host_id | None | - | binding_profile | None | - | binding_vif_details | None | - | binding_vif_type | None | - | binding_vnic_type | normal | - | created_at | 2017-02-26T14:23:18Z | - | description | | - | device_id | | - | device_owner | | - | dns_assignment | None | - | dns_name | None | - | extra_dhcp_opts | | - | fixed_ips | ip_address='96.118.182.106', | - | | subnet_id='4279c70a-7218-4c7e-94e5-7bd4c045644e' | - | | ip_address='2001:558:fc0b:100:f816:3eff:fefb:45fb', | - | | subnet_id='11d8087b-6288-4129-95ff-42c3df0c1df0' | - | id | 3871bf29-e963-4701-a7dd-8888dbaab375 | - | ip_address | None | - | mac_address | fa:16:3e:e2:09:e0 | - | name | port1 | - | network_id | f41bd921-3a59-49c4-aa95-c2e4496a4b56 | - | option_name | None | - | option_value | None | - | port_security_enabled | True | - | project_id | 52f0574689f14c8a99e7ca22c4eb572 | - | qos_policy_id | None | - | revision_number | 6 | - | security_groups | 20d96891-0055-428a-8fa6-d5aed25f0dc6 | - | status | DOWN | - | subnet_id | None | - | updated_at | 2017-02-26T14:23:19Z | - +-----------------------+------------------------------------------------------+ - -#. If you know the fully qualified domain name (FQDN) that will be assigned to - the IP address, assign the port with the same name: - - .. code-block:: console - - $ openstack port create "example-fqdn-01.sys.example.com" --network Public_AGILE - - Created a new port: - +-----------------------+------------------------------------------------------+ - | Field | Value | - +-----------------------+------------------------------------------------------+ - | admin_state_up | UP | - | allowed_address_pairs | | - | binding_host_id | None | - | binding_profile | None | - | binding_vif_details | None | - | binding_vif_type | None | - | binding_vnic_type | normal | - | created_at | 2017-02-26T14:24:16Z | - | description | | - | device_id | | - | device_owner | | - | dns_assignment | None | - | dns_name | None | - | extra_dhcp_opts | | - | fixed_ips | ip_address='96.118.182.107', | - | | subnet_id='4279c70a-7218-4c7e-94e5-7bd4c045644e' | - | | ip_address='2001:558:fc0b:100:f816:3eff:fefb:65fc', | - | | subnet_id='11d8087b-6288-4129-95ff-42c3df0c1df0' | - | id | 731c3b28-3753-4e63-bae3-b58a52d6ccca | - | ip_address | None | - | mac_address | fa:16:3e:fb:65:fc | - | name | example-fqdn-01.sys.example.com | - | network_id | f41bd921-3a59-49c4-aa95-c2e4496a4b56 | - | option_name | None | - | option_value | None | - | port_security_enabled | True | - | project_id | 52f0574689f14c8a99e7ca22c4eb5720 | - | qos_policy_id | None | - | revision_number | 6 | - | security_groups | 20d96891-0055-428a-8fa6-d5aed25f0dc6 | - | status | DOWN | - | subnet_id | None | - | updated_at | 2017-02-26T14:24:17Z | - +-----------------------+------------------------------------------------------+ - -#. Use the port when creating an instance: - - .. code-block:: console - - $ openstack server create --flavor m1.medium --image ubuntu.qcow2 \ - --key-name team_key --nic port-id=PORT_ID \ - "example-fqdn-01.sys.example.com" - -#. Verify the instance has the correct IP address: - - .. code-block:: console - - +--------------------------------------+----------------------------------------------------------+ - | Field | Value | - +--------------------------------------+----------------------------------------------------------+ - | OS-DCF:diskConfig | MANUAL | - | OS-EXT-AZ:availability_zone | nova | - | OS-EXT-SRV-ATTR:host | os_compute-1 | - | OS-EXT-SRV-ATTR:hypervisor_hostname | os_compute.ece.example.com | - | OS-EXT-SRV-ATTR:instance_name | instance-00012b82 | - | OS-EXT-STS:power_state | Running | - | OS-EXT-STS:task_state | None | - | OS-EXT-STS:vm_state | active | - | OS-SRV-USG:launched_at | 2016-11-30T08:55:27.000000 | - | OS-SRV-USG:terminated_at | None | - | accessIPv4 | | - | accessIPv6 | | - | addresses | public=172.24.4.236 | - | config_drive | | - | created | 2016-11-30T08:55:14Z | - | flavor | m1.medium (103) | - | hostId | aca973d5b7981faaf8c713a0130713bbc1e64151be65c8dfb53039f7 | - | id | f91bd761-6407-46a6-b5fd-11a8a46e4983 | - | image | Example Cloud Ubuntu 14.04 x86_64 v2.5 (fb49d7e1-273b-...| - | key_name | team_key | - | name | example-fqdn-01.sys.example.com | - | os-extended-volumes:volumes_attached | [] | - | progress | 0 | - | project_id | 2daf82a578e9437cab396c888ff0ca57 | - | properties | | - | security_groups | [{u'name': u'default'}] | - | status | ACTIVE | - | updated | 2016-11-30T08:55:27Z | - | user_id | 8cbea24666ae49bbb8c1641f9b12d2d2 | - +--------------------------------------+----------------------------------------------------------+ - -#. Check the port connection using the netcat utility: - - .. code-block:: console - - $ nc -v -w 2 96.118.182.107 22 - Ncat: Version 7.00 ( https://nmap.org/ncat ) - Ncat: Connected to 96.118.182.107:22. - SSH-2.0-OpenSSH_6.6.1p1 Ubuntu-2ubuntu2.6 - -**Detach a port from an instance** - -#. Find the port corresponding to the instance. For example: - - .. code-block:: console - - $ openstack port list | grep -B1 96.118.182.107 - - | 731c3b28-3753-4e63-bae3-b58a52d6ccca | example-fqdn-01.sys.example.com | fa:16:3e:fb:65:fc | ip_address='96.118.182.107', subnet_id='4279c70a-7218-4c7e-94e5-7bd4c045644e' | - -#. Run the :command:`openstack port set` command to remove the port from - the instance: - - .. code-block:: console - - $ openstack port set 731c3b28-3753-4e63-bae3-b58a52d6ccca \ - --device "" --device-owner "" --no-binding-profile - -#. Delete the instance and create a new instance using the - ``--nic port-id`` option. - -**Retrieve an IP address when an instance is deleted before detaching -a port** - -The following procedure is a possible workaround to retrieve an IP address -when an instance has been deleted with the port still attached: - -#. Launch several neutron ports: - - .. code-block:: console - - $ for i in {0..10}; do openstack port create --network Public_AGILE \ - ip-recovery; done - -#. Check the ports for the lost IP address and update the name: - - .. code-block:: console - - $ openstack port set 731c3b28-3753-4e63-bae3-b58a52d6ccca \ - --name "don't delete" - -#. Delete the ports that are not needed: - - .. code-block:: console - - $ for port in $(openstack port list | grep -i ip-recovery | \ - awk '{print $2}'); do openstack port delete $port; done - -#. If you still cannot find the lost IP address, repeat these steps - again. - -.. _volumes: - -Volumes -~~~~~~~ - -If the affected instances also had attached volumes, first generate a -list of instance and volume UUIDs: - -.. code-block:: mysql - - mysql> select nova.instances.uuid as instance_uuid, - cinder.volumes.id as volume_uuid, cinder.volumes.status, - cinder.volumes.attach_status, cinder.volumes.mountpoint, - cinder.volumes.display_name from cinder.volumes - inner join nova.instances on cinder.volumes.instance_uuid=nova.instances.uuid - where nova.instances.host = 'c01.example.com'; - -You should see a result similar to the following: - -.. code-block:: mysql - - +--------------+------------+-------+--------------+-----------+--------------+ - |instance_uuid |volume_uuid |status |attach_status |mountpoint | display_name | - +--------------+------------+-------+--------------+-----------+--------------+ - |9b969a05 |1f0fbf36 |in-use |attached |/dev/vdc | test | - +--------------+------------+-------+--------------+-----------+--------------+ - 1 row in set (0.00 sec) - -Next, manually detach and reattach the volumes, where X is the proper -mount point: - -.. code-block:: console - - # openstack server remove volume - # openstack server add volume --device /dev/vdX - -Be sure that the instance has successfully booted and is at a login -screen before doing the above. - -Total Compute Node Failure -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Compute nodes can fail the same way a cloud controller can fail. A -motherboard failure or some other type of hardware failure can cause an -entire compute node to go offline. When this happens, all instances -running on that compute node will not be available. Just like with a -cloud controller failure, if your infrastructure monitoring does not -detect a failed compute node, your users will notify you because of -their lost instances. - -If a compute node fails and won't be fixed for a few hours (or at all), -you can relaunch all instances that are hosted on the failed node if you -use shared storage for ``/var/lib/nova/instances``. - -To do this, generate a list of instance UUIDs that are hosted on the -failed node by running the following query on the nova database: - -.. code-block:: mysql - - mysql> select uuid from instances - where host = 'c01.example.com' and deleted = 0; - -Next, update the nova database to indicate that all instances that used -to be hosted on c01.example.com are now hosted on c02.example.com: - -.. code-block:: mysql - - mysql> update instances set host = 'c02.example.com' - where host = 'c01.example.com' and deleted = 0; - -If you're using the Networking service ML2 plug-in, update the -Networking service database to indicate that all ports that used to be -hosted on c01.example.com are now hosted on c02.example.com: - -.. code-block:: mysql - - mysql> update ml2_port_bindings set host = 'c02.example.com' - where host = 'c01.example.com'; - mysql> update ml2_port_binding_levels set host = 'c02.example.com' - where host = 'c01.example.com'; - -After that, use the :command:`openstack` command to reboot all instances -that were on c01.example.com while regenerating their XML files at the same -time: - -.. code-block:: console - - # openstack server reboot --hard - -Finally, reattach volumes using the same method described in the section -:ref:`volumes`. - -/var/lib/nova/instances -~~~~~~~~~~~~~~~~~~~~~~~ - -It's worth mentioning this directory in the context of failed compute -nodes. This directory contains the libvirt KVM file-based disk images -for the instances that are hosted on that compute node. If you are not -running your cloud in a shared storage environment, this directory is -unique across all compute nodes. - -``/var/lib/nova/instances`` contains two types of directories. - -The first is the ``_base`` directory. This contains all the cached base -images from glance for each unique image that has been launched on that -compute node. Files ending in ``_20`` (or a different number) are the -ephemeral base images. - -The other directories are titled ``instance-xxxxxxxx``. These -directories correspond to instances running on that compute node. The -files inside are related to one of the files in the ``_base`` directory. -They're essentially differential-based files containing only the changes -made from the original ``_base`` directory. - -All files and directories in ``/var/lib/nova/instances`` are uniquely -named. The files in \_base are uniquely titled for the glance image that -they are based on, and the directory names ``instance-xxxxxxxx`` are -uniquely titled for that particular instance. For example, if you copy -all data from ``/var/lib/nova/instances`` on one compute node to -another, you do not overwrite any files or cause any damage to images -that have the same unique name, because they are essentially the same -file. - -Although this method is not documented or supported, you can use it when -your compute node is permanently offline but you have instances locally -stored on it. diff --git a/doc/ops-guide/source/ops-maintenance-configuration.rst b/doc/ops-guide/source/ops-maintenance-configuration.rst deleted file mode 100644 index 16c3c77d02..0000000000 --- a/doc/ops-guide/source/ops-maintenance-configuration.rst +++ /dev/null @@ -1,29 +0,0 @@ -======================== -Configuration Management -======================== - -Maintaining an OpenStack cloud requires that you manage multiple -physical servers, and this number might grow over time. Because managing -nodes manually is error prone, we strongly recommend that you use a -configuration-management tool. These tools automate the process of -ensuring that all your nodes are configured properly and encourage you -to maintain your configuration information (such as packages and -configuration options) in a version-controlled repository. - -.. note:: - - Several configuration-management tools are available, and this guide does - not recommend a specific one. The most popular ones in the OpenStack - community are: - - * `Puppet `_, with available `OpenStack - Puppet modules `_ - * `Ansible `_, with `OpenStack Ansible - `_ - * `Chef `_, with available `OpenStack Chef - recipes `_ - - Other newer configuration tools include `Juju `_ - and `Salt `_; and more mature configuration - management tools include `CFEngine `_ and `Bcfg2 - `_. diff --git a/doc/ops-guide/source/ops-maintenance-controller.rst b/doc/ops-guide/source/ops-maintenance-controller.rst deleted file mode 100644 index 83fa4e13de..0000000000 --- a/doc/ops-guide/source/ops-maintenance-controller.rst +++ /dev/null @@ -1,96 +0,0 @@ -=========================================================== -Cloud Controller and Storage Proxy Failures and Maintenance -=========================================================== - -The cloud controller and storage proxy are very similar to each other -when it comes to expected and unexpected downtime. One of each server -type typically runs in the cloud, which makes them very noticeable when -they are not running. - -For the cloud controller, the good news is if your cloud is using the -FlatDHCP multi-host HA network mode, existing instances and volumes -continue to operate while the cloud controller is offline. For the -storage proxy, however, no storage traffic is possible until it is back -up and running. - -Planned Maintenance -~~~~~~~~~~~~~~~~~~~ - -One way to plan for cloud controller or storage proxy maintenance is to -simply do it off-hours, such as at 1 a.m. or 2 a.m. This strategy -affects fewer users. If your cloud controller or storage proxy is too -important to have unavailable at any point in time, you must look into -high-availability options. - -Rebooting a Cloud Controller or Storage Proxy -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -All in all, just issue the :command:`reboot` command. The operating system -cleanly shuts down services and then automatically reboots. If you want -to be very thorough, run your backup jobs just before you -reboot. - -After a cloud controller reboots, ensure that all required services were -successfully started. The following commands use :command:`ps` and -:command:`grep` to determine if nova, glance, and keystone are currently -running: - -.. code-block:: console - - # ps aux | grep nova- - # ps aux | grep glance- - # ps aux | grep keystone - # ps aux | grep cinder - -Also check that all services are functioning. The following set of -commands sources the ``openrc`` file, then runs some basic glance, nova, -and openstack commands. If the commands work as expected, you can be -confident that those services are in working condition: - -.. code-block:: console - - # . openrc - # openstack image list - # openstack server list - # openstack project list - -For the storage proxy, ensure that the :term:`Object Storage service ` has resumed: - -.. code-block:: console - - # ps aux | grep swift - -Also check that it is functioning: - -.. code-block:: console - - # swift stat - -Total Cloud Controller Failure -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The cloud controller could completely fail if, for example, its -motherboard goes bad. Users will immediately notice the loss of a cloud -controller since it provides core functionality to your cloud -environment. If your infrastructure monitoring does not alert you that -your cloud controller has failed, your users definitely will. -Unfortunately, this is a rough situation. The cloud controller is an -integral part of your cloud. If you have only one controller, you will -have many missing services if it goes down. - -To avoid this situation, create a highly available cloud controller -cluster. This is outside the scope of this document, but you can read -more in the `OpenStack High Availability -Guide `_. - -The next best approach is to use a configuration-management tool, such -as Puppet, to automatically build a cloud controller. This should not -take more than 15 minutes if you have a spare server available. After -the controller rebuilds, restore any backups taken -(see :doc:`ops-backup-recovery`). - -Also, in practice, the ``nova-compute`` services on the compute nodes do -not always reconnect cleanly to rabbitmq hosted on the controller when -it comes back up after a long reboot; a restart on the nova services on -the compute nodes is required. diff --git a/doc/ops-guide/source/ops-maintenance-database.rst b/doc/ops-guide/source/ops-maintenance-database.rst deleted file mode 100644 index 798ec63c63..0000000000 --- a/doc/ops-guide/source/ops-maintenance-database.rst +++ /dev/null @@ -1,51 +0,0 @@ -========= -Databases -========= - -Almost all OpenStack components have an underlying database to store -persistent information. Usually this database is MySQL. Normal MySQL -administration is applicable to these databases. OpenStack does not -configure the databases out of the ordinary. Basic administration -includes performance tweaking, high availability, backup, recovery, and -repairing. For more information, see a standard MySQL administration guide. - -You can perform a couple of tricks with the database to either more -quickly retrieve information or fix a data inconsistency error—for -example, an instance was terminated, but the status was not updated in -the database. These tricks are discussed throughout this book. - -Database Connectivity -~~~~~~~~~~~~~~~~~~~~~ - -Review the component's configuration file to see how each OpenStack component -accesses its corresponding database. Look for a ``connection`` option. The -following command uses ``grep`` to display the SQL connection string for nova, -glance, cinder, and keystone: - -.. code-block:: console - - # grep -hE "connection ?=" \ - /etc/nova/nova.conf /etc/glance/glance-*.conf \ - /etc/cinder/cinder.conf /etc/keystone/keystone.conf \ - /etc/neutron/neutron.conf - connection = mysql+pymysql://nova:password@cloud.example.com/nova - connection = mysql+pymysql://glance:password@cloud.example.com/glance - connection = mysql+pymysql://glance:password@cloud.example.com/glance - connection = mysql+pymysql://cinder:password@cloud.example.com/cinder - connection = mysql+pymysql://keystone:password@cloud.example.com/keystone - connection = mysql+pymysql://neutron:password@cloud.example.com/neutron - -The connection strings take this format: - -.. code-block:: console - - mysql+pymysql:// : @ / - -Performance and Optimizing -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -As your cloud grows, MySQL is utilized more and more. If you suspect -that MySQL might be becoming a bottleneck, you should start researching -MySQL optimization. The MySQL manual has an entire section dedicated to -this topic: `Optimization Overview -`_. diff --git a/doc/ops-guide/source/ops-maintenance-determine.rst b/doc/ops-guide/source/ops-maintenance-determine.rst deleted file mode 100644 index 331ecd5efd..0000000000 --- a/doc/ops-guide/source/ops-maintenance-determine.rst +++ /dev/null @@ -1,92 +0,0 @@ -===================================== -Determining Which Component Is Broken -===================================== - -OpenStack's collection of different components interact with each other -strongly. For example, uploading an image requires interaction from -``nova-api``, ``glance-api``, ``glance-registry``, keystone, and -potentially ``swift-proxy``. As a result, it is sometimes difficult to -determine exactly where problems lie. Assisting in this is the purpose -of this section. - -Tailing Logs -~~~~~~~~~~~~ - -The first place to look is the log file related to the command you are -trying to run. For example, if ``openstack server list`` is failing, try -tailing a nova log file and running the command again: - -Terminal 1: - -.. code-block:: console - - # tail -f /var/log/nova/nova-api.log - -Terminal 2: - -.. code-block:: console - - # openstack server list - -Look for any errors or traces in the log file. For more information, see -:doc:`ops-logging-monitoring`. - -If the error indicates that the problem is with another component, -switch to tailing that component's log file. For example, if nova cannot -access glance, look at the ``glance-api`` log: - -Terminal 1: - -.. code-block:: console - - # tail -f /var/log/glance/api.log - -Terminal 2: - -.. code-block:: console - - # openstack server list - -Wash, rinse, and repeat until you find the core cause of the problem. - -Running Daemons on the CLI -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Unfortunately, sometimes the error is not apparent from the log files. -In this case, switch tactics and use a different command; maybe run the -service directly on the command line. For example, if the ``glance-api`` -service refuses to start and stay running, try launching the daemon from -the command line: - -.. code-block:: console - - # sudo -u glance -H glance-api - -This might print the error and cause of the problem. - -.. note:: - - The ``-H`` flag is required when running the daemons with sudo - because some daemons will write files relative to the user's home - directory, and this write may fail if ``-H`` is left off. - -.. Tip:: - - **Example of Complexity** - - One morning, a compute node failed to run any instances. The log files - were a bit vague, claiming that a certain instance was unable to be - started. This ended up being a red herring because the instance was - simply the first instance in alphabetical order, so it was the first - instance that ``nova-compute`` would touch. - - Further troubleshooting showed that libvirt was not running at all. This - made more sense. If libvirt wasn't running, then no instance could be - virtualized through KVM. Upon trying to start libvirt, it would silently - die immediately. The libvirt logs did not explain why. - - Next, the ``libvirtd`` daemon was run on the command line. Finally a - helpful error message: it could not connect to d-bus. As ridiculous as - it sounds, libvirt, and thus ``nova-compute``, relies on d-bus and - somehow d-bus crashed. Simply starting d-bus set the entire chain back - on track, and soon everything was back up and running. diff --git a/doc/ops-guide/source/ops-maintenance-hardware.rst b/doc/ops-guide/source/ops-maintenance-hardware.rst deleted file mode 100644 index 64ead9f6c0..0000000000 --- a/doc/ops-guide/source/ops-maintenance-hardware.rst +++ /dev/null @@ -1,64 +0,0 @@ -===================== -Working with Hardware -===================== - -As for your initial deployment, you should ensure that all hardware is -appropriately burned in before adding it to production. Run software -that uses the hardware to its limits—maxing out RAM, CPU, disk, and -network. Many options are available, and normally double as benchmark -software, so you also get a good idea of the performance of your -system. - -Adding a Compute Node -~~~~~~~~~~~~~~~~~~~~~ - -If you find that you have reached or are reaching the capacity limit of -your computing resources, you should plan to add additional compute -nodes. Adding more nodes is quite easy. The process for adding compute -nodes is the same as when the initial compute nodes were deployed to -your cloud: use an automated deployment system to bootstrap the -bare-metal server with the operating system and then have a -configuration-management system install and configure OpenStack Compute. -Once the Compute service has been installed and configured in the same -way as the other compute nodes, it automatically attaches itself to the -cloud. The cloud controller notices the new node(s) and begins -scheduling instances to launch there. - -If your OpenStack Block Storage nodes are separate from your compute -nodes, the same procedure still applies because the same queuing and -polling system is used in both services. - -We recommend that you use the same hardware for new compute and block -storage nodes. At the very least, ensure that the CPUs are similar in -the compute nodes to not break live migration. - -Adding an Object Storage Node -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Adding a new object storage node is different from adding compute or -block storage nodes. You still want to initially configure the server by -using your automated deployment and configuration-management systems. -After that is done, you need to add the local disks of the object -storage node into the object storage ring. The exact command to do this -is the same command that was used to add the initial disks to the ring. -Simply rerun this command on the object storage proxy server for all -disks on the new object storage node. Once this has been done, rebalance -the ring and copy the resulting ring files to the other storage nodes. - -.. note:: - - If your new object storage node has a different number of disks than - the original nodes have, the command to add the new node is - different from the original commands. These parameters vary from - environment to environment. - -Replacing Components -~~~~~~~~~~~~~~~~~~~~ - -Failures of hardware are common in large-scale deployments such as an -infrastructure cloud. Consider your processes and balance time saving -against availability. For example, an Object Storage cluster can easily -live with dead disks in it for some period of time if it has sufficient -capacity. Or, if your compute installation is not full, you could -consider live migrating instances off a host with a RAM failure until -you have time to deal with the problem. diff --git a/doc/ops-guide/source/ops-maintenance-hdmwy.rst b/doc/ops-guide/source/ops-maintenance-hdmwy.rst deleted file mode 100644 index 7651aaca93..0000000000 --- a/doc/ops-guide/source/ops-maintenance-hdmwy.rst +++ /dev/null @@ -1,54 +0,0 @@ -===== -HDWMY -===== - -Here's a quick list of various to-do items for each hour, day, week, -month, and year. Please note that these tasks are neither required nor -definitive but helpful ideas: - -Hourly -~~~~~~ - -* Check your monitoring system for alerts and act on them. -* Check your ticket queue for new tickets. - -Daily -~~~~~ - -* Check for instances in a failed or weird state and investigate why. -* Check for security patches and apply them as needed. - -Weekly -~~~~~~ - -* Check cloud usage: - - * User quotas - * Disk space - * Image usage - * Large instances - * Network usage (bandwidth and IP usage) - -* Verify your alert mechanisms are still working. - -Monthly -~~~~~~~ - -* Check usage and trends over the past month. -* Check for user accounts that should be removed. -* Check for operator accounts that should be removed. - -Quarterly -~~~~~~~~~ - -* Review usage and trends over the past quarter. -* Prepare any quarterly reports on usage and statistics. -* Review and plan any necessary cloud additions. -* Review and plan any major OpenStack upgrades. - -Semiannually -~~~~~~~~~~~~ - -* Upgrade OpenStack. -* Clean up after an OpenStack upgrade (any unused or new services to be - aware of?). diff --git a/doc/ops-guide/source/ops-maintenance-rabbitmq.rst b/doc/ops-guide/source/ops-maintenance-rabbitmq.rst deleted file mode 100644 index f82db214ee..0000000000 --- a/doc/ops-guide/source/ops-maintenance-rabbitmq.rst +++ /dev/null @@ -1,148 +0,0 @@ -======================== -RabbitMQ troubleshooting -======================== - -This section provides tips on resolving common RabbitMQ issues. - -RabbitMQ service hangs -~~~~~~~~~~~~~~~~~~~~~~ - -It is quite common for the RabbitMQ service to hang when it is -restarted or stopped. Therefore, it is highly recommended that -you manually restart RabbitMQ on each controller node. - -.. note:: - - The RabbitMQ service name may vary depending on your operating - system or vendor who supplies your RabbitMQ service. - -#. Restart the RabbitMQ service on the first controller node. The - :command:`service rabbitmq-server restart` command may not work - in certain situations, so it is best to use: - - .. code-block:: console - - # service rabbitmq-server stop - # service rabbitmq-server start - - -#. If the service refuses to stop, then run the :command:`pkill` command - to stop the service, then restart the service: - - .. code-block:: console - - # pkill -KILL -u rabbitmq - # service rabbitmq-server start - -#. Verify RabbitMQ processes are running: - - .. code-block:: console - - # ps -ef | grep rabbitmq - # rabbitmqctl list_queues - # rabbitmqctl list_queues 2>&1 | grep -i error - -#. If there are errors, run the :command:`cluster_status` command to make sure - there are no partitions: - - .. code-block:: console - - # rabbitmqctl cluster_status - - For more information, see `RabbitMQ documentation - `_. - -#. Go back to the first step and try restarting the RabbitMQ service again. If - you still have errors, remove the contents in the - ``/var/lib/rabbitmq/mnesia/`` directory between stopping and starting the - RabbitMQ service. - -#. If there are no errors, restart the RabbitMQ service on the next controller - node. - -Since the Liberty release, OpenStack services will automatically recover from -a RabbitMQ outage. You should only consider restarting OpenStack services -after checking if RabbitMQ heartbeat functionality is enabled, and if -OpenStack services are not picking up messages from RabbitMQ queues. - -RabbitMQ alerts -~~~~~~~~~~~~~~~ - -If you receive alerts for RabbitMQ, take the following steps to troubleshoot -and resolve the issue: - -#. Determine which servers the RabbitMQ alarms are coming from. -#. Attempt to boot a nova instance in the affected environment. -#. If you cannot launch an instance, continue to troubleshoot the issue. -#. Log in to each of the controller nodes for the affected environment, and - check the ``/var/log/rabbitmq`` log files for any reported issues. -#. Look for connection issues identified in the log files. -#. For each controller node in your environment, view the ``/etc/init.d`` - directory to check it contains nova*, cinder*, neutron*, or - glance*. Also check RabbitMQ message queues that are growing without being - consumed which will indicate which OpenStack service is affected. Restart - the affected OpenStack service. -#. For each compute node your environment, view the ``/etc/init.d`` directory - and check if it contains nova*, cinder*, neutron*, or glance*, Also check - RabbitMQ message queues that are growing without being consumed which will - indicate which OpenStack services are affected. Restart the affected - OpenStack services. -#. Open OpenStack Dashboard and launch an instance. If the instance launches, - the issue is resolved. -#. If you cannot launch an instance, check the ``/var/log/rabbitmq`` log - files for reported connection issues. -#. Restart the RabbitMQ service on all of the controller nodes: - - .. code-block:: console - - # service rabbitmq-server stop - # service rabbitmq-server start - - .. note:: - - This step applies if you have already restarted only the OpenStack components, and - cannot connect to the RabbitMQ service. - -#. Repeat steps 7-8. - -Excessive database management memory consumption -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Since the Liberty release, OpenStack with RabbitMQ 3.4.x or 3.6.x has an issue -with the management database consuming the memory allocated to RabbitMQ. -This is caused by statistics collection and processing. When a single node -with RabbitMQ reaches its memory threshold, all exchange and queue processing -is halted until the memory alarm recovers. - -To address this issue: - -#. Check memory consumption: - - .. code-block:: console - - # rabbitmqctl status - -#. Edit the ``/etc/rabbitmq/rabbitmq.config`` configuration file, and change - the ``collect_statistics_interval`` parameter between 30000-60000 - milliseconds. Alternatively you can turn off statistics collection by - setting ``collect_statistics`` parameter to "none". - -File descriptor limits when scaling a cloud environment -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -A cloud environment that is scaled to a certain size will require the file -descriptor limits to be adjusted. - -Run the :command:`rabbitmqctl status` to view the current file descriptor -limits: - -.. code-block:: console - - "{file_descriptors, - [{total_limit,3996}, - {total_used,135}, - {sockets_limit,3594}, - {sockets_used,133}]}," - -Adjust the appropriate limits in the -``/etc/security/limits.conf`` configuration file. diff --git a/doc/ops-guide/source/ops-maintenance-slow.rst b/doc/ops-guide/source/ops-maintenance-slow.rst deleted file mode 100644 index 8d65a0c22d..0000000000 --- a/doc/ops-guide/source/ops-maintenance-slow.rst +++ /dev/null @@ -1,92 +0,0 @@ -========================================= -What to do when things are running slowly -========================================= - -When you are getting slow responses from various services, it can be -hard to know where to start looking. The first thing to check is the -extent of the slowness: is it specific to a single service, or varied -among different services? If your problem is isolated to a specific -service, it can temporarily be fixed by restarting the service, but that -is often only a fix for the symptom and not the actual problem. - -This is a collection of ideas from experienced operators on common -things to look at that may be the cause of slowness. It is not, however, -designed to be an exhaustive list. - -OpenStack Identity service -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If OpenStack :term:`Identity service ` is -responding slowly, it could be due to the token table getting large. -This can be fixed by running the :command:`keystone-manage token_flush` -command. - -Additionally, for Identity-related issues, try the tips -in :ref:`sql_backend`. - -OpenStack Image service -~~~~~~~~~~~~~~~~~~~~~~~ - -OpenStack :term:`Image service ` can be slowed down -by things related to the Identity service, but the Image service itself can be -slowed down if connectivity to the back-end storage in use is slow or otherwise -problematic. For example, your back-end NFS server might have gone down. - -OpenStack Block Storage service -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -OpenStack :term:`Block Storage service ` is -similar to the Image service, so start by checking Identity-related services, -and the back-end storage. -Additionally, both the Block Storage and Image services rely on AMQP and -SQL functionality, so consider these when debugging. - -OpenStack Compute service -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Services related to OpenStack Compute are normally fairly fast and rely -on a couple of backend services: Identity for authentication and -authorization), and AMQP for interoperability. Any slowness related to -services is normally related to one of these. Also, as with all other -services, SQL is used extensively. - -OpenStack Networking service -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Slowness in the OpenStack :term:`Networking service ` can be caused by services that it relies upon, but it can -also be related to either physical or virtual networking. For example: -network namespaces that do not exist or are not tied to interfaces correctly; -DHCP daemons that have hung or are not running; a cable being physically -disconnected; a switch not being configured correctly. When debugging -Networking service problems, begin by verifying all physical networking -functionality (switch configuration, physical cabling, etc.). After the -physical networking is verified, check to be sure all of the Networking -services are running (neutron-server, neutron-dhcp-agent, etc.), then check -on AMQP and SQL back ends. - -AMQP broker -~~~~~~~~~~~ - -Regardless of which AMQP broker you use, such as RabbitMQ, there are -common issues which not only slow down operations, but can also cause -real problems. Sometimes messages queued for services stay on the queues -and are not consumed. This can be due to dead or stagnant services and -can be commonly cleared up by either restarting the AMQP-related -services or the OpenStack service in question. - -.. _sql_backend: - -SQL back end -~~~~~~~~~~~~ - -Whether you use SQLite or an RDBMS (such as MySQL), SQL interoperability -is essential to a functioning OpenStack environment. A large or -fragmented SQLite file can cause slowness when using files as a back -end. A locked or long-running query can cause delays for most RDBMS -services. In this case, do not kill the query immediately, but look into -it to see if it is a problem with something that is hung, or something -that is just taking a long time to run and needs to finish on its own. -The administration of an RDBMS is outside the scope of this document, -but it should be noted that a properly functioning RDBMS is essential to -most OpenStack services. diff --git a/doc/ops-guide/source/ops-maintenance-storage.rst b/doc/ops-guide/source/ops-maintenance-storage.rst deleted file mode 100644 index 52e5b31b34..0000000000 --- a/doc/ops-guide/source/ops-maintenance-storage.rst +++ /dev/null @@ -1,91 +0,0 @@ -===================================== -Storage Node Failures and Maintenance -===================================== - -Because of the high redundancy of Object Storage, dealing with object -storage node issues is a lot easier than dealing with compute node -issues. - -Rebooting a Storage Node -~~~~~~~~~~~~~~~~~~~~~~~~ - -If a storage node requires a reboot, simply reboot it. Requests for data -hosted on that node are redirected to other copies while the server is -rebooting. - -Shutting Down a Storage Node -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If you need to shut down a storage node for an extended period of time -(one or more days), consider removing the node from the storage ring. -For example: - -.. code-block:: console - - # swift-ring-builder account.builder remove - # swift-ring-builder container.builder remove - # swift-ring-builder object.builder remove - # swift-ring-builder account.builder rebalance - # swift-ring-builder container.builder rebalance - # swift-ring-builder object.builder rebalance - -Next, redistribute the ring files to the other nodes: - -.. code-block:: console - - # for i in s01.example.com s02.example.com s03.example.com - > do - > scp *.ring.gz $i:/etc/swift - > done - -These actions effectively take the storage node out of the storage -cluster. - -When the node is able to rejoin the cluster, just add it back to the -ring. The exact syntax you use to add a node to your swift cluster with -``swift-ring-builder`` heavily depends on the original options used when -you originally created your cluster. Please refer back to those -commands. - -Replacing a Swift Disk -~~~~~~~~~~~~~~~~~~~~~~ - -If a hard drive fails in an Object Storage node, replacing it is -relatively easy. This assumes that your Object Storage environment is -configured correctly, where the data that is stored on the failed drive -is also replicated to other drives in the Object Storage environment. - -This example assumes that ``/dev/sdb`` has failed. - -First, unmount the disk: - -.. code-block:: console - - # umount /dev/sdb - -Next, physically remove the disk from the server and replace it with a -working disk. - -Ensure that the operating system has recognized the new disk: - -.. code-block:: console - - # dmesg | tail - -You should see a message about ``/dev/sdb``. - -Because it is recommended to not use partitions on a swift disk, simply -format the disk as a whole: - -.. code-block:: console - - # mkfs.xfs /dev/sdb - -Finally, mount the disk: - -.. code-block:: console - - # mount -a - -Swift should notice the new disk and that no data exists. It then begins -replicating the data to the disk from the other existing replicas. diff --git a/doc/ops-guide/source/ops-maintenance.rst b/doc/ops-guide/source/ops-maintenance.rst deleted file mode 100644 index aafa314e53..0000000000 --- a/doc/ops-guide/source/ops-maintenance.rst +++ /dev/null @@ -1,23 +0,0 @@ -==================================== -Maintenance, Failures, and Debugging -==================================== - -.. toctree:: - :maxdepth: 2 - - ops-maintenance-controller.rst - ops-maintenance-compute.rst - ops-maintenance-storage.rst - ops-maintenance-complete.rst - ops-maintenance-configuration.rst - ops-maintenance-hardware.rst - ops-maintenance-database.rst - ops-maintenance-rabbitmq.rst - ops-maintenance-hdmwy.rst - ops-maintenance-determine.rst - ops-maintenance-slow.rst - ops-uninstall.rst - -Downtime, whether planned or unscheduled, is a certainty when running a -cloud. This chapter aims to provide useful information for dealing -proactively, or reactively, with these occurrences. diff --git a/doc/ops-guide/source/ops-monitoring.rst b/doc/ops-guide/source/ops-monitoring.rst deleted file mode 100644 index f92e396a09..0000000000 --- a/doc/ops-guide/source/ops-monitoring.rst +++ /dev/null @@ -1,437 +0,0 @@ -========== -Monitoring -========== - -There are two types of monitoring: watching for problems and watching -usage trends. The former ensures that all services are up and running, -creating a functional cloud. The latter involves monitoring resource -usage over time in order to make informed decisions about potential -bottlenecks and upgrades. - -Process Monitoring -~~~~~~~~~~~~~~~~~~ - -A basic type of alert monitoring is to simply check and see whether a -required process is running. For example, ensure that -the ``nova-api`` service is running on the cloud controller: - -.. code-block:: console - - # ps aux | grep nova-api - nova 12786 0.0 0.0 37952 1312 ? Ss Feb11 0:00 su -s /bin/sh -c exec nova-api - --config-file=/etc/nova/nova.conf nova - nova 12787 0.0 0.1 135764 57400 ? S Feb11 0:01 /usr/bin/python - /usr/bin/nova-api --config-file=/etc/nova/nova.conf - nova 12792 0.0 0.0 96052 22856 ? S Feb11 0:01 /usr/bin/python - /usr/bin/nova-api --config-file=/etc/nova/nova.conf - nova 12793 0.0 0.3 290688 115516 ? S Feb11 1:23 /usr/bin/python - /usr/bin/nova-api --config-file=/etc/nova/nova.conf - nova 12794 0.0 0.2 248636 77068 ? S Feb11 0:04 /usr/bin/python - /usr/bin/nova-api --config-file=/etc/nova/nova.conf - root 24121 0.0 0.0 11688 912 pts/5 S+ 13:07 0:00 grep nova-api - - -The OpenStack processes that should be monitored depend on the specific -configuration of the environment, but can include: - -**Compute service (nova)** - -* nova-api -* nova-scheduler -* nova-conductor -* nova-novncproxy -* nova-compute - -**Block Storage service (cinder)** - -* cinder-volume -* cinder-api -* cinder-scheduler - -**Networking service (neutron)** - -* neutron-api -* neutron-server -* neutron-openvswitch-agent -* neutron-dhcp-agent -* neutron-l3-agent -* neutron-metadata-agent - -**Image service (glance)** - -* glance-api -* glance-registry - -**Identity service (keystone)** - -The keystone processes are run within Apache as WSGI applications. - -Resource Alerting -~~~~~~~~~~~~~~~~~ - -Resource alerting provides notifications when one or more resources are -critically low. While the monitoring thresholds should be tuned to your -specific OpenStack environment, monitoring resource usage is not -specific to OpenStack at all—any generic type of alert will work -fine. - -Some of the resources that you want to monitor include: - -* Disk usage -* Server load -* Memory usage -* Network I/O -* Available vCPUs - -Telemetry Service -~~~~~~~~~~~~~~~~~ - -The Telemetry service (:term:`ceilometer`) collects -metering and event data relating to OpenStack services. Data collected -by the Telemetry service could be used for billing. Depending on -deployment configuration, collected data may be accessible to users -based on the deployment configuration. The Telemetry service provides a -REST API documented at `ceilometer V2 Web API -`_. You can -read more about the module in the `OpenStack Administrator -Guide `_ or -in the `developer -documentation `_. - -OpenStack Specific Resources -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Resources such as memory, disk, and CPU are generic resources that all -servers (even non-OpenStack servers) have and are important to the -overall health of the server. When dealing with OpenStack specifically, -these resources are important for a second reason: ensuring that enough -are available to launch instances. There are a few ways you can see -OpenStack resource usage. -The first is through the :command:`nova` command: - -.. code-block:: console - - # openstack usage list - -This command displays a list of how many instances a tenant has running -and some light usage statistics about the combined instances. This -command is useful for a quick overview of your cloud, but it doesn't -really get into a lot of details. - -Next, the ``nova`` database contains three tables that store usage -information. - -The ``nova.quotas`` and ``nova.quota_usages`` tables store quota -information. If a tenant's quota is different from the default quota -settings, its quota is stored in the ``nova.quotas`` table. For example: - -.. code-block:: mysql - - mysql> select project_id, resource, hard_limit from quotas; - +----------------------------------+-----------------------------+------------+ - | project_id | resource | hard_limit | - +----------------------------------+-----------------------------+------------+ - | 628df59f091142399e0689a2696f5baa | metadata_items | 128 | - | 628df59f091142399e0689a2696f5baa | injected_file_content_bytes | 10240 | - | 628df59f091142399e0689a2696f5baa | injected_files | 5 | - | 628df59f091142399e0689a2696f5baa | gigabytes | 1000 | - | 628df59f091142399e0689a2696f5baa | ram | 51200 | - | 628df59f091142399e0689a2696f5baa | floating_ips | 10 | - | 628df59f091142399e0689a2696f5baa | instances | 10 | - | 628df59f091142399e0689a2696f5baa | volumes | 10 | - | 628df59f091142399e0689a2696f5baa | cores | 20 | - +----------------------------------+-----------------------------+------------+ - -The ``nova.quota_usages`` table keeps track of how many resources the -tenant currently has in use: - -.. code-block:: mysql - - mysql> select project_id, resource, in_use from quota_usages where project_id like '628%'; - +----------------------------------+--------------+--------+ - | project_id | resource | in_use | - +----------------------------------+--------------+--------+ - | 628df59f091142399e0689a2696f5baa | instances | 1 | - | 628df59f091142399e0689a2696f5baa | ram | 512 | - | 628df59f091142399e0689a2696f5baa | cores | 1 | - | 628df59f091142399e0689a2696f5baa | floating_ips | 1 | - | 628df59f091142399e0689a2696f5baa | volumes | 2 | - | 628df59f091142399e0689a2696f5baa | gigabytes | 12 | - | 628df59f091142399e0689a2696f5baa | images | 1 | - +----------------------------------+--------------+--------+ - -By comparing a tenant's hard limit with their current resource usage, -you can see their usage percentage. For example, if this tenant is using -1 floating IP out of 10, then they are using 10 percent of their -floating IP quota. Rather than doing the calculation manually, you can -use SQL or the scripting language of your choice and create a formatted -report: - -.. code-block:: mysql - - +----------------------------------+------------+-------------+---------------+ - | some_tenant | - +-----------------------------------+------------+------------+---------------+ - | Resource | Used | Limit | | - +-----------------------------------+------------+------------+---------------+ - | cores | 1 | 20 | 5 % | - | floating_ips | 1 | 10 | 10 % | - | gigabytes | 12 | 1000 | 1 % | - | images | 1 | 4 | 25 % | - | injected_file_content_bytes | 0 | 10240 | 0 % | - | injected_file_path_bytes | 0 | 255 | 0 % | - | injected_files | 0 | 5 | 0 % | - | instances | 1 | 10 | 10 % | - | key_pairs | 0 | 100 | 0 % | - | metadata_items | 0 | 128 | 0 % | - | ram | 512 | 51200 | 1 % | - | reservation_expire | 0 | 86400 | 0 % | - | security_group_rules | 0 | 20 | 0 % | - | security_groups | 0 | 10 | 0 % | - | volumes | 2 | 10 | 20 % | - +-----------------------------------+------------+------------+---------------+ - -The preceding information was generated by using a custom script that -can be found on -`GitHub `_. - -.. note:: - - This script is specific to a certain OpenStack installation and must - be modified to fit your environment. However, the logic should - easily be transferable. - -Intelligent Alerting -~~~~~~~~~~~~~~~~~~~~ - -Intelligent alerting can be thought of as a form of continuous -integration for operations. For example, you can easily check to see -whether the Image service is up and running by ensuring that -the ``glance-api`` and ``glance-registry`` processes are running or by -seeing whether ``glance-api`` is responding on port 9292. - -But how can you tell whether images are being successfully uploaded to -the Image service? Maybe the disk that Image service is storing the -images on is full or the S3 back end is down. You could naturally check -this by doing a quick image upload: - -.. code-block:: bash - - #!/bin/bash - # - # assumes that reasonable credentials have been stored at - # /root/auth - - - . /root/openrc - wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img - openstack image create --name='cirros image' --public \ - --container-format=bare --disk-format=qcow2 \ - --file cirros-0.3.5-x86_64-disk.img - -By taking this script and rolling it into an alert for your monitoring -system (such as Nagios), you now have an automated way of ensuring that -image uploads to the Image Catalog are working. - -.. note:: - - You must remove the image after each test. Even better, test whether - you can successfully delete an image from the Image service. - -Intelligent alerting takes considerably more time to plan and implement -than the other alerts described in this chapter. A good outline to -implement intelligent alerting is: - -- Review common actions in your cloud. - -- Create ways to automatically test these actions. - -- Roll these tests into an alerting system. - -Some other examples for Intelligent Alerting include: - -- Can instances launch and be destroyed? - -- Can users be created? - -- Can objects be stored and deleted? - -- Can volumes be created and destroyed? - -Trending -~~~~~~~~ - -Trending can give you great insight into how your cloud is performing -day to day. You can learn, for example, if a busy day was simply a rare -occurrence or if you should start adding new compute nodes. - -Trending takes a slightly different approach than alerting. While -alerting is interested in a binary result (whether a check succeeds or -fails), trending records the current state of something at a certain -point in time. Once enough points in time have been recorded, you can -see how the value has changed over time. - -All of the alert types mentioned earlier can also be used for trend -reporting. Some other trend examples include: - -* The number of instances on each compute node -* The types of flavors in use -* The number of volumes in use -* The number of Object Storage requests each hour -* The number of ``nova-api`` requests each hour -* The I/O statistics of your storage services - -As an example, recording ``nova-api`` usage can allow you to track the -need to scale your cloud controller. By keeping an eye on ``nova-api`` -requests, you can determine whether you need to spawn more ``nova-api`` -processes or go as far as introducing an entirely new server to run -``nova-api``. To get an approximate count of the requests, look for -standard INFO messages in ``/var/log/nova/nova-api.log``: - -.. code-block:: console - - # grep INFO /var/log/nova/nova-api.log | wc - -You can obtain further statistics by looking for the number of -successful requests: - -.. code-block:: console - - # grep " 200 " /var/log/nova/nova-api.log | wc - -By running this command periodically and keeping a record of the result, -you can create a trending report over time that shows whether your -``nova-api`` usage is increasing, decreasing, or keeping steady. - -A tool such as **collectd** can be used to store this information. While -collectd is out of the scope of this book, a good starting point would -be to use collectd to store the result as a COUNTER data type. More -information can be found in `collectd's -documentation `_. - - -Monitoring Tools -~~~~~~~~~~~~~~~~ - -Nagios ------- - - -Nagios is an open source monitoring service. It is capable of executing -arbitrary commands to check the status of server and network services, -remotely executing arbitrary commands directly on servers, and allowing -servers to push notifications back in the form of passive monitoring. -Nagios has been around since 1999. Although newer monitoring services -are available, Nagios is a tried-and-true systems administration -staple. - -You can create automated alerts for critical processes by using Nagios -and NRPE. For example, to ensure that the ``nova-compute`` process is -running on the compute nodes, create an alert on your Nagios server: - -.. code-block:: none - - define service { - host_name c01.example.com - check_command check_nrpe_1arg!check_nova-compute - use generic-service - notification_period 24x7 - contact_groups sysadmins - service_description nova-compute - } - -On the Compute node, create the following NRPE -configuration: - -.. code-block:: ini - - command[check_nova-compute]=/usr/lib/nagios/plugins/check_procs -c 1: -a nova-compute - -Nagios checks that at least one ``nova-compute`` service is running at -all times. - -For resource alerting, for example, monitor disk capacity on a compute node -with Nagios, add the following to your Nagios configuration: - -.. code-block:: none - - define service { - host_name c01.example.com - check_command check_nrpe!check_all_disks!20% 10% - use generic-service - contact_groups sysadmins - service_description Disk - } - -On the compute node, add the following to your NRPE configuration: - -.. code-block:: none - - command[check_all_disks]=/usr/lib/nagios/plugins/check_disk -w $ARG1$ -c $ARG2$ -e - -Nagios alerts you with a `WARNING` when any disk on the compute node is 80 -percent full and `CRITICAL` when 90 percent is full. - -StackTach ---------- - -StackTach is a tool that collects and reports the notifications sent by -nova. Notifications are essentially the same as logs but can be much -more detailed. Nearly all OpenStack components are capable of generating -notifications when significant events occur. Notifications are messages -placed on the OpenStack queue (generally RabbitMQ) for consumption by -downstream systems. An overview of notifications can be found at `System -Usage -Data `_. - -To enable nova to send notifications, add the following to the -``nova.conf`` configuration file: - -.. code-block:: ini - - notification_topics=monitor - notification_driver=messagingv2 - -Once nova is sending notifications, install and configure StackTach. -StackTach works for queue consumption and pipeline processing are -configured to read these notifications from RabbitMQ servers and store -them in a database. Users can inquire on instances, requests, and servers -by using the browser interface or command-line tool, -`Stacky `_. Since StackTach is -relatively new and constantly changing, installation instructions -quickly become outdated. Refer to the `StackTach Git -repository `_ for -instructions as well as a demostration video. Additional details on the latest -developments can be discovered at the `official -page `_ - -Logstash -~~~~~~~~ - -Logstash is a high performance indexing and search engine for logs. Logs -from Jenkins test runs are sent to logstash where they are indexed and -stored. Logstash facilitates reviewing logs from multiple sources in a -single test run, searching for errors or particular events within a test -run, and searching for log event trends across test runs. - -There are four major layers in Logstash setup which are: - -* Log Pusher -* Log Indexer -* ElasticSearch -* Kibana - -Each layer scales horizontally. As the number of logs grows you can add -more log pushers, more Logstash indexers, and more ElasticSearch nodes. - -Logpusher is a pair of Python scripts that first listens to Jenkins -build events, then converts them into Gearman jobs. Gearman provides a -generic application framework to farm out work to other machines or -processes that are better suited to do the work. It allows you to do -work in parallel, to load balance processing, and to call functions -between languages. Later, Logpusher performs Gearman jobs to push log -files into logstash. Logstash indexer reads these log events, filters -them to remove unwanted lines, collapse multiple events together, and -parses useful information before shipping them to ElasticSearch for -storage and indexing. Kibana is a logstash oriented web client for -ElasticSearch. diff --git a/doc/ops-guide/source/ops-network-troubleshooting.rst b/doc/ops-guide/source/ops-network-troubleshooting.rst deleted file mode 100644 index 2cdf29ce4c..0000000000 --- a/doc/ops-guide/source/ops-network-troubleshooting.rst +++ /dev/null @@ -1,1095 +0,0 @@ -======================= -Network Troubleshooting -======================= - -Network troubleshooting can be challenging. A network issue may cause -problems at any point in the cloud. Using a logical troubleshooting -procedure can help mitigate the issue and isolate where the network issue is. -This chapter aims to give you the information you need to identify any -issues for ``nova-network`` or OpenStack Networking (neutron) with Linux -Bridge or Open vSwitch. - -Using ip a to Check Interface States -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -On compute nodes and nodes running ``nova-network``, use the following -command to see information about interfaces, including information about -IPs, VLANs, and whether your interfaces are up: - -.. code-block:: console - - # ip a - -If you are encountering any sort of networking difficulty, one good initial -troubleshooting step is to make sure that your interfaces are up. For example: - -.. code-block:: console - - $ ip a | grep state - 1: lo: mtu 16436 qdisc noqueue state UNKNOWN - 2: eth0: mtu 1500 qdisc pfifo_fast state UP - qlen 1000 - 3: eth1: mtu 1500 qdisc pfifo_fast - master br100 state UP qlen 1000 - 4: virbr0: mtu 1500 qdisc noqueue state DOWN - 5: br100: mtu 1500 qdisc noqueue state UP - -You can safely ignore the state of ``virbr0``, which is a default bridge -created by libvirt and not used by OpenStack. - -Visualizing nova-network Traffic in the Cloud -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If you are logged in to an instance and ping an external host, for -example, Google, the ping packet takes the route shown in -:ref:`figure_traffic_route`. - -.. _figure_traffic_route: - -.. figure:: figures/osog_1201.png - :alt: Traffic route for ping packet - :width: 100% - - Figure. Traffic route for ping packet - -#. The instance generates a packet and places it on the virtual Network - Interface Card (NIC) inside the instance, such as ``eth0``. - -#. The packet transfers to the virtual NIC of the compute host, such as, - ``vnet1``. You can find out what vnet NIC is being used by looking at - the ``/etc/libvirt/qemu/instance-xxxxxxxx.xml`` file. - -#. From the vnet NIC, the packet transfers to a bridge on the compute - node, such as ``br100``. - - If you run FlatDHCPManager, one bridge is on the compute node. If you - run VlanManager, one bridge exists for each VLAN. - - To see which bridge the packet will use, run the command: - - .. code-block:: console - - $ brctl show - - Look for the vnet NIC. You can also reference ``nova.conf`` and look - for the ``flat_interface_bridge`` option. - -#. The packet transfers to the main NIC of the compute node. You can - also see this NIC in the :command:`brctl` output, or you can find it by - referencing the ``flat_interface`` option in ``nova.conf``. - -#. After the packet is on this NIC, it transfers to the compute node's - default gateway. The packet is now most likely out of your control at - this point. The diagram depicts an external gateway. However, in the - default configuration with multi-host, the compute host is the - gateway. - -Reverse the direction to see the path of a ping reply. From this path, -you can see that a single packet travels across four different NICs. If -a problem occurs with any of these NICs, a network issue occurs. - -Visualizing OpenStack Networking Service Traffic in the Cloud -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -OpenStack Networking has many more degrees of freedom than -``nova-network`` does because of its pluggable back end. It can be -configured with open source or vendor proprietary plug-ins that control -software defined networking (SDN) hardware or plug-ins that use Linux -native facilities on your hosts, such as Open vSwitch or Linux Bridge. - -The networking chapter of the `OpenStack Administrator -Guide `_ -shows a variety of networking scenarios and their connection paths. The -purpose of this section is to give you the tools to troubleshoot the -various components involved however they are plumbed together in your -environment. - -For this example, we will use the Open vSwitch (OVS) back end. Other -back-end plug-ins will have very different flow paths. OVS is the most -popularly deployed network driver, according to the April 2016 -OpenStack User Survey. We'll describe each step in turn, with -:ref:`network_paths` for reference. - -#. The instance generates a packet and places it on the virtual NIC - inside the instance, such as eth0. - -#. The packet transfers to a Test Access Point (TAP) device on the - compute host, such as tap690466bc-92. You can find out what TAP is - being used by looking at the - ``/etc/libvirt/qemu/instance-xxxxxxxx.xml`` file. - - The TAP device name is constructed using the first 11 characters of - the port ID (10 hex digits plus an included '-'), so another means of - finding the device name is to use the :command:`neutron` command. This - returns a pipe-delimited list, the first item of which is the port - ID. For example, to get the port ID associated with IP address - 10.0.0.10, do this: - - .. code-block:: console - - # openstack port list | grep 10.0.0.10 | cut -d \| -f 2 - ff387e54-9e54-442b-94a3-aa4481764f1d - - Taking the first 11 characters, we can construct a device name of - tapff387e54-9e from this output. - - .. _network_paths: - - .. figure:: figures/osog_1202.png - :alt: Neutron network paths - :width: 100% - - Figure. Neutron network paths - -#. The TAP device is connected to the integration bridge, ``br-int``. - This bridge connects all the instance TAP devices and any other - bridges on the system. In this example, we have ``int-br-eth1`` and - ``patch-tun``. ``int-br-eth1`` is one half of a veth pair connecting - to the bridge ``br-eth1``, which handles VLAN networks trunked over - the physical Ethernet device ``eth1``. ``patch-tun`` is an Open - vSwitch internal port that connects to the ``br-tun`` bridge for GRE - networks. - - The TAP devices and veth devices are normal Linux network devices and - may be inspected with the usual tools, such as :command:`ip` and - :command:`tcpdump`. Open vSwitch internal devices, such as ``patch-tun``, - are only visible within the Open vSwitch environment. If you try to - run :command:`tcpdump -i patch-tun`, it will raise an error, saying that - the device does not exist. - - It is possible to watch packets on internal interfaces, but it does - take a little bit of networking gymnastics. First you need to create - a dummy network device that normal Linux tools can see. Then you need - to add it to the bridge containing the internal interface you want to - snoop on. Finally, you need to tell Open vSwitch to mirror all - traffic to or from the internal port onto this dummy port. After all - this, you can then run :command:`tcpdump` on the dummy interface and see - the traffic on the internal port. - - **To capture packets from the patch-tun internal interface on integration - bridge, br-int:** - - #. Create and bring up a dummy interface, ``snooper0``: - - .. code-block:: console - - # ip link add name snooper0 type dummy - # ip link set dev snooper0 up - - #. Add device ``snooper0`` to bridge ``br-int``: - - .. code-block:: console - - # ovs-vsctl add-port br-int snooper0 - - #. Create mirror of ``patch-tun`` to ``snooper0`` (returns UUID of - mirror port): - - .. code-block:: console - - # ovs-vsctl -- set Bridge br-int mirrors=@m -- --id=@snooper0 \ - get Port snooper0 -- --id=@patch-tun get Port patch-tun \ - -- --id=@m create Mirror name=mymirror select-dst-port=@patch-tun \ - select-src-port=@patch-tun output-port=@snooper0 select_all=1 - - #. Profit. You can now see traffic on ``patch-tun`` by running - :command:`tcpdump -i snooper0`. - - #. Clean up by clearing all mirrors on ``br-int`` and deleting the dummy - interface: - - .. code-block:: console - - # ovs-vsctl clear Bridge br-int mirrors - # ovs-vsctl del-port br-int snooper0 - # ip link delete dev snooper0 - - On the integration bridge, networks are distinguished using internal - VLANs regardless of how the networking service defines them. This - allows instances on the same host to communicate directly without - transiting the rest of the virtual, or physical, network. These - internal VLAN IDs are based on the order they are created on the node - and may vary between nodes. These IDs are in no way related to the - segmentation IDs used in the network definition and on the physical - wire. - - VLAN tags are translated between the external tag defined in the - network settings, and internal tags in several places. On the - ``br-int``, incoming packets from the ``int-br-eth1`` are translated - from external tags to internal tags. Other translations also happen - on the other bridges and will be discussed in those sections. - - **To discover which internal VLAN tag is in use for a given external VLAN - by using the ovs-ofctl command** - - #. Find the external VLAN tag of the network you're interested in. This - is the ``provider:segmentation_id`` as returned by the networking - service: - - .. code-block:: console - - # neutron net-show --fields provider:segmentation_id - +---------------------------+--------------------------------------+ - | Field | Value | - +---------------------------+--------------------------------------+ - | provider:network_type | vlan | - | provider:segmentation_id | 2113 | - +---------------------------+--------------------------------------+ - - #. Grep for the ``provider:segmentation_id``, 2113 in this case, in the - output of :command:`ovs-ofctl dump-flows br-int`: - - .. code-block:: console - - # ovs-ofctl dump-flows br-int | grep vlan=2113 - cookie=0x0, duration=173615.481s, table=0, n_packets=7676140, - n_bytes=444818637, idle_age=0, hard_age=65534, priority=3, - in_port=1,dl_vlan=2113 actions=mod_vlan_vid:7,NORMAL - - Here you can see packets received on port ID 1 with the VLAN tag 2113 - are modified to have the internal VLAN tag 7. Digging a little - deeper, you can confirm that port 1 is in fact ``int-br-eth1``: - - .. code-block:: console - - # ovs-ofctl show br-int - OFPT_FEATURES_REPLY (xid=0x2): dpid:000022bc45e1914b - n_tables:254, n_buffers:256 - capabilities: FLOW_STATS TABLE_STATS PORT_STATS QUEUE_STATS - ARP_MATCH_IP - actions: OUTPUT SET_VLAN_VID SET_VLAN_PCP STRIP_VLAN SET_DL_SRC - SET_DL_DST SET_NW_SRC SET_NW_DST SET_NW_TOS SET_TP_SRC - SET_TP_DST ENQUEUE - 1(int-br-eth1): addr:c2:72:74:7f:86:08 - config: 0 - state: 0 - current: 10GB-FD COPPER - speed: 10000 Mbps now, 0 Mbps max - 2(patch-tun): addr:fa:24:73:75:ad:cd - config: 0 - state: 0 - speed: 0 Mbps now, 0 Mbps max - 3(tap9be586e6-79): addr:fe:16:3e:e6:98:56 - config: 0 - state: 0 - current: 10MB-FD COPPER - speed: 10 Mbps now, 0 Mbps max - LOCAL(br-int): addr:22:bc:45:e1:91:4b - config: 0 - state: 0 - speed: 0 Mbps now, 0 Mbps max - OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0 - -#. The next step depends on whether the virtual network is configured to - use 802.1q VLAN tags or GRE: - - #. VLAN-based networks exit the integration bridge via veth interface - ``int-br-eth1`` and arrive on the bridge ``br-eth1`` on the other - member of the veth pair ``phy-br-eth1``. Packets on this interface - arrive with internal VLAN tags and are translated to external tags - in the reverse of the process described above: - - .. code-block:: console - - # ovs-ofctl dump-flows br-eth1 | grep 2113 - cookie=0x0, duration=184168.225s, table=0, n_packets=0, n_bytes=0, - idle_age=65534, hard_age=65534, priority=4,in_port=1,dl_vlan=7 - actions=mod_vlan_vid:2113,NORMAL - - Packets, now tagged with the external VLAN tag, then exit onto the - physical network via ``eth1``. The Layer2 switch this interface is - connected to must be configured to accept traffic with the VLAN ID - used. The next hop for this packet must also be on the same - layer-2 network. - - #. GRE-based networks are passed with ``patch-tun`` to the tunnel - bridge ``br-tun`` on interface ``patch-int``. This bridge also - contains one port for each GRE tunnel peer, so one for each - compute node and network node in your network. The ports are named - sequentially from ``gre-1`` onward. - - Matching ``gre-`` interfaces to tunnel endpoints is possible by - looking at the Open vSwitch state: - - .. code-block:: console - - # ovs-vsctl show | grep -A 3 -e Port\ \"gre- - Port "gre-1" - Interface "gre-1" - type: gre - options: {in_key=flow, local_ip="10.10.128.21", - out_key=flow, remote_ip="10.10.128.16"} - - - In this case, ``gre-1`` is a tunnel from IP 10.10.128.21, which - should match a local interface on this node, to IP 10.10.128.16 on - the remote side. - - These tunnels use the regular routing tables on the host to route - the resulting GRE packet, so there is no requirement that GRE - endpoints are all on the same layer-2 network, unlike VLAN - encapsulation. - - All interfaces on the ``br-tun`` are internal to Open vSwitch. To - monitor traffic on them, you need to set up a mirror port as - described above for ``patch-tun`` in the ``br-int`` bridge. - - All translation of GRE tunnels to and from internal VLANs happens - on this bridge. - - **To discover which internal VLAN tag is in use for a GRE tunnel by using - the ovs-ofctl command** - - #. Find the ``provider:segmentation_id`` of the network you're - interested in. This is the same field used for the VLAN ID in - VLAN-based networks: - - .. code-block:: console - - # neutron net-show --fields provider:segmentation_id - +--------------------------+-------+ - | Field | Value | - +--------------------------+-------+ - | provider:network_type | gre | - | provider:segmentation_id | 3 | - +--------------------------+-------+ - - #. Grep for 0x<``provider:segmentation_id``>, 0x3 in this case, in the - output of ``ovs-ofctl dump-flows br-tun``: - - .. code-block:: console - - # ovs-ofctl dump-flows br-tun|grep 0x3 - cookie=0x0, duration=380575.724s, table=2, n_packets=1800, - n_bytes=286104, priority=1,tun_id=0x3 - actions=mod_vlan_vid:1,resubmit(,10) - cookie=0x0, duration=715.529s, table=20, n_packets=5, - n_bytes=830, hard_timeout=300,priority=1, - vlan_tci=0x0001/0x0fff,dl_dst=fa:16:3e:a6:48:24 - actions=load:0->NXM_OF_VLAN_TCI[], - load:0x3->NXM_NX_TUN_ID[],output:53 - cookie=0x0, duration=193729.242s, table=21, n_packets=58761, - n_bytes=2618498, dl_vlan=1 actions=strip_vlan,set_tunnel:0x3, - output:4,output:58,output:56,output:11,output:12,output:47, - output:13,output:48,output:49,output:44,output:43,output:45, - output:46,output:30,output:31,output:29,output:28,output:26, - output:27,output:24,output:25,output:32,output:19,output:21, - output:59,output:60,output:57,output:6,output:5,output:20, - output:18,output:17,output:16,output:15,output:14,output:7, - output:9,output:8,output:53,output:10,output:3,output:2, - output:38,output:37,output:39,output:40,output:34,output:23, - output:36,output:35,output:22,output:42,output:41,output:54, - output:52,output:51,output:50,output:55,output:33 - - Here, you see three flows related to this GRE tunnel. The first is - the translation from inbound packets with this tunnel ID to internal - VLAN ID 1. The second shows a unicast flow to output port 53 for - packets destined for MAC address fa:16:3e:a6:48:24. The third shows - the translation from the internal VLAN representation to the GRE - tunnel ID flooded to all output ports. For further details of the - flow descriptions, see the man page for ``ovs-ofctl``. As in the - previous VLAN example, numeric port IDs can be matched with their - named representations by examining the output of ``ovs-ofctl show br-tun``. - -#. The packet is then received on the network node. Note that any - traffic to the l3-agent or dhcp-agent will be visible only within - their network namespace. Watching any interfaces outside those - namespaces, even those that carry the network traffic, will only show - broadcast packets like Address Resolution Protocols (ARPs), but - unicast traffic to the router or DHCP address will not be seen. See - :ref:`dealing_with_network_namespaces` - for detail on how to run commands within these namespaces. - - Alternatively, it is possible to configure VLAN-based networks to use - external routers rather than the l3-agent shown here, so long as the - external router is on the same VLAN: - - #. VLAN-based networks are received as tagged packets on a physical - network interface, ``eth1`` in this example. Just as on the - compute node, this interface is a member of the ``br-eth1`` - bridge. - - #. GRE-based networks will be passed to the tunnel bridge ``br-tun``, - which behaves just like the GRE interfaces on the compute node. - -#. Next, the packets from either input go through the integration - bridge, again just as on the compute node. - -#. The packet then makes it to the l3-agent. This is actually another - TAP device within the router's network namespace. Router namespaces - are named in the form ``qrouter-``. Running :command:`ip a` - within the namespace will show the TAP device name, - qr-e6256f7d-31 in this example: - - .. code-block:: console - - # ip netns exec qrouter-e521f9d0-a1bd-4ff4-bc81-78a60dd88fe5 ip a | grep state - 10: qr-e6256f7d-31: mtu 1500 qdisc noqueue - state UNKNOWN - 11: qg-35916e1f-36: mtu 1500 - qdisc pfifo_fast state UNKNOWN qlen 500 - 28: lo: mtu 16436 qdisc noqueue state UNKNOWN - -#. The ``qg-`` interface in the l3-agent router namespace sends the - packet on to its next hop through device ``eth2`` on the external - bridge ``br-ex``. This bridge is constructed similarly to ``br-eth1`` - and may be inspected in the same way. - -#. This external bridge also includes a physical network interface, - ``eth2`` in this example, which finally lands the packet on the - external network destined for an external router or destination. - -#. DHCP agents running on OpenStack networks run in namespaces similar - to the l3-agents. DHCP namespaces are named ``qdhcp-`` and have - a TAP device on the integration bridge. Debugging of DHCP issues - usually involves working inside this network namespace. - -Finding a Failure in the Path -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Use ping to quickly find where a failure exists in the network path. In -an instance, first see whether you can ping an external host, such as -google.com. If you can, then there shouldn't be a network problem at -all. - -If you can't, try pinging the IP address of the compute node where the -instance is hosted. If you can ping this IP, then the problem is -somewhere between the compute node and that compute node's gateway. - -If you can't ping the IP address of the compute node, the problem is -between the instance and the compute node. This includes the bridge -connecting the compute node's main NIC with the vnet NIC of the -instance. - -One last test is to launch a second instance and see whether the two -instances can ping each other. If they can, the issue might be related -to the firewall on the compute node. - -tcpdump -~~~~~~~ - -One great, although very in-depth, way of troubleshooting network issues -is to use ``tcpdump``. We recommended using ``tcpdump`` at several -points along the network path to correlate where a problem might be. If -you prefer working with a GUI, either live or by using a ``tcpdump`` -capture, check out -`Wireshark `_. - -For example, run the following command: - -.. code-block:: console - - # tcpdump -i any -n -v 'icmp[icmptype] = icmp-echoreply or icmp[icmptype] = icmp-echo' - -Run this on the command line of the following areas: - -#. An external server outside of the cloud - -#. A compute node - -#. An instance running on that compute node - -In this example, these locations have the following IP addresses: - -.. code-block:: console - - Instance - 10.0.2.24 - 203.0.113.30 - Compute Node - 10.0.0.42 - 203.0.113.34 - External Server - 1.2.3.4 - -Next, open a new shell to the instance and then ping the external host -where ``tcpdump`` is running. If the network path to the external server -and back is fully functional, you see something like the following: - -On the external server: - -.. code-block:: console - - 12:51:42.020227 IP (tos 0x0, ttl 61, id 0, offset 0, flags [DF], - proto ICMP (1), length 84) - 203.0.113.30 > 1.2.3.4: ICMP echo request, id 24895, seq 1, length 64 - 12:51:42.020255 IP (tos 0x0, ttl 64, id 8137, offset 0, flags [none], - proto ICMP (1), length 84) - 1.2.3.4 > 203.0.113.30: ICMP echo reply, id 24895, seq 1, - length 64 - -On the compute node: - -.. code-block:: console - - 12:51:42.019519 IP (tos 0x0, ttl 64, id 0, offset 0, flags [DF], - proto ICMP (1), length 84) - 10.0.2.24 > 1.2.3.4: ICMP echo request, id 24895, seq 1, length 64 - 12:51:42.019519 IP (tos 0x0, ttl 64, id 0, offset 0, flags [DF], - proto ICMP (1), length 84) - 10.0.2.24 > 1.2.3.4: ICMP echo request, id 24895, seq 1, length 64 - 12:51:42.019545 IP (tos 0x0, ttl 63, id 0, offset 0, flags [DF], - proto ICMP (1), length 84) - 203.0.113.30 > 1.2.3.4: ICMP echo request, id 24895, seq 1, length 64 - 12:51:42.019780 IP (tos 0x0, ttl 62, id 8137, offset 0, flags [none], - proto ICMP (1), length 84) - 1.2.3.4 > 203.0.113.30: ICMP echo reply, id 24895, seq 1, length 64 - 12:51:42.019801 IP (tos 0x0, ttl 61, id 8137, offset 0, flags [none], - proto ICMP (1), length 84) - 1.2.3.4 > 10.0.2.24: ICMP echo reply, id 24895, seq 1, length 64 - 12:51:42.019807 IP (tos 0x0, ttl 61, id 8137, offset 0, flags [none], - proto ICMP (1), length 84) - 1.2.3.4 > 10.0.2.24: ICMP echo reply, id 24895, seq 1, length 64 - -On the instance: - -.. code-block:: console - - 12:51:42.020974 IP (tos 0x0, ttl 61, id 8137, offset 0, flags [none], - proto ICMP (1), length 84) - 1.2.3.4 > 10.0.2.24: ICMP echo reply, id 24895, seq 1, length 64 - -Here, the external server received the ping request and sent a ping -reply. On the compute node, you can see that both the ping and ping -reply successfully passed through. You might also see duplicate packets -on the compute node, as seen above, because ``tcpdump`` captured the -packet on both the bridge and outgoing interface. - -iptables -~~~~~~~~ - -Through ``nova-network`` or ``neutron``, OpenStack Compute automatically -manages iptables, including forwarding packets to and from instances on -a compute node, forwarding floating IP traffic, and managing security -group rules. In addition to managing the rules, comments (if supported) -will be inserted in the rules to help indicate the purpose of the rule. - -The following comments are added to the rule set as appropriate: - -* Perform source NAT on outgoing traffic. -* Default drop rule for unmatched traffic. -* Direct traffic from the VM interface to the security group chain. -* Jump to the VM specific chain. -* Direct incoming traffic from VM to the security group chain. -* Allow traffic from defined IP/MAC pairs. -* Drop traffic without an IP/MAC allow rule. -* Allow DHCP client traffic. -* Prevent DHCP Spoofing by VM. -* Send unmatched traffic to the fallback chain. -* Drop packets that are not associated with a state. -* Direct packets associated with a known session to the RETURN chain. -* Allow IPv6 ICMP traffic to allow RA packets. - -Run the following command to view the current iptables configuration: - -.. code-block:: console - - # iptables-save - -.. note:: - - If you modify the configuration, it reverts the next time you - restart ``nova-network`` or ``neutron-server``. You must use - OpenStack to manage iptables. - -Network Configuration in the Database for nova-network -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -With ``nova-network``, the nova database table contains a few tables -with networking information: - -``fixed_ips`` - Contains each possible IP address for the subnet(s) added to - Compute. This table is related to the ``instances`` table by way of - the ``fixed_ips.instance_uuid`` column. - -``floating_ips`` - Contains each floating IP address that was added to Compute. This - table is related to the ``fixed_ips`` table by way of the - ``floating_ips.fixed_ip_id`` column. - -``instances`` - Not entirely network specific, but it contains information about the - instance that is utilizing the ``fixed_ip`` and optional - ``floating_ip``. - -From these tables, you can see that a floating IP is technically never -directly related to an instance; it must always go through a fixed IP. - -Manually Disassociating a Floating IP -------------------------------------- - -Sometimes an instance is terminated but the floating IP was not -correctly disassociated from that instance. Because the database is in -an inconsistent state, the usual tools to disassociate the IP no longer -work. To fix this, you must manually update the database. - -First, find the UUID of the instance in question: - -.. code-block:: mysql - - mysql> select uuid from instances where hostname = 'hostname'; - -Next, find the fixed IP entry for that UUID: - -.. code-block:: mysql - - mysql> select * from fixed_ips where instance_uuid = ''; - -You can now get the related floating IP entry: - -.. code-block:: mysql - - mysql> select * from floating_ips where fixed_ip_id = ''; - -And finally, you can disassociate the floating IP: - -.. code-block:: mysql - - mysql> update floating_ips set fixed_ip_id = NULL, host = NULL where - fixed_ip_id = ''; - -You can optionally also deallocate the IP from the user's pool: - -.. code-block:: mysql - - mysql> update floating_ips set project_id = NULL where - fixed_ip_id = ''; - -Debugging DHCP Issues with nova-network -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -One common networking problem is that an instance boots successfully but -is not reachable because it failed to obtain an IP address from dnsmasq, -which is the DHCP server that is launched by the ``nova-network`` -service. - -The simplest way to identify that this is the problem with your instance -is to look at the console output of your instance. If DHCP failed, you -can retrieve the console log by doing: - -.. code-block:: console - - $ openstack console log show - -If your instance failed to obtain an IP through DHCP, some messages -should appear in the console. For example, for the Cirros image, you see -output that looks like the following: - -.. code-block:: console - - udhcpc (v1.17.2) started - Sending discover... - Sending discover... - Sending discover... - No lease, forking to background - starting DHCP forEthernet interface eth0 [ [1;32mOK[0;39m ] - cloud-setup: checking http://169.254.169.254/2009-04-04/meta-data/instance-id - wget: can't connect to remote host (169.254.169.254): Network is - unreachable - -After you establish that the instance booted properly, the task is to -figure out where the failure is. - -A DHCP problem might be caused by a misbehaving dnsmasq process. First, -debug by checking logs and then restart the dnsmasq processes only for -that project (tenant). In VLAN mode, there is a dnsmasq process for each -tenant. Once you have restarted targeted dnsmasq processes, the simplest -way to rule out dnsmasq causes is to kill all of the dnsmasq processes -on the machine and restart ``nova-network``. As a last resort, do this -as root: - -.. code-block:: console - - # killall dnsmasq - # restart nova-network - -.. note:: - - Use ``openstack-nova-network`` on RHEL/CentOS/Fedora but - ``nova-network`` on Ubuntu/Debian. - -Several minutes after ``nova-network`` is restarted, you should see new -dnsmasq processes running: - -.. code-block:: console - - # ps aux | grep dnsmasq - nobody 3735 0.0 0.0 27540 1044 ? S 15:40 0:00 /usr/sbin/dnsmasq --strict-order - --bind-interfaces --conf-file= - --domain=novalocal --pid-file=/var/lib/nova/networks/nova-br100.pid - --listen-address=192.168.100.1 --except-interface=lo - --dhcp-range=set:'novanetwork',192.168.100.2,static,120s - --dhcp-lease-max=256 - --dhcp-hostsfile=/var/lib/nova/networks/nova-br100.conf - --dhcp-script=/usr/bin/nova-dhcpbridge --leasefile-ro - root 3736 0.0 0.0 27512 444 ? S 15:40 0:00 /usr/sbin/dnsmasq --strict-order - --bind-interfaces --conf-file= - --domain=novalocal --pid-file=/var/lib/nova/networks/nova-br100.pid - --listen-address=192.168.100.1 --except-interface=lo - --dhcp-range=set:'novanetwork',192.168.100.2,static,120s - --dhcp-lease-max=256 - --dhcp-hostsfile=/var/lib/nova/networks/nova-br100.conf - --dhcp-script=/usr/bin/nova-dhcpbridge --leasefile-ro - -If your instances are still not able to obtain IP addresses, the next -thing to check is whether dnsmasq is seeing the DHCP requests from the -instance. On the machine that is running the dnsmasq process, which is -the compute host if running in multi-host mode, look at -``/var/log/syslog`` to see the dnsmasq output. If dnsmasq is seeing the -request properly and handing out an IP, the output looks like this: - -.. code-block:: console - - Feb 27 22:01:36 mynode dnsmasq-dhcp[2438]: DHCPDISCOVER(br100) fa:16:3e:56:0b:6f - Feb 27 22:01:36 mynode dnsmasq-dhcp[2438]: DHCPOFFER(br100) 192.168.100.3 - fa:16:3e:56:0b:6f - Feb 27 22:01:36 mynode dnsmasq-dhcp[2438]: DHCPREQUEST(br100) 192.168.100.3 - fa:16:3e:56:0b:6f - Feb 27 22:01:36 mynode dnsmasq-dhcp[2438]: DHCPACK(br100) 192.168.100.3 - fa:16:3e:56:0b:6f test - -If you do not see the ``DHCPDISCOVER``, a problem exists with the packet -getting from the instance to the machine running dnsmasq. If you see all -of the preceding output and your instances are still not able to obtain -IP addresses, then the packet is able to get from the instance to the -host running dnsmasq, but it is not able to make the return trip. - -You might also see a message such as this: - -.. code-block:: console - - Feb 27 22:01:36 mynode dnsmasq-dhcp[25435]: DHCPDISCOVER(br100) - fa:16:3e:78:44:84 no address available - -This may be a dnsmasq and/or ``nova-network`` related issue. (For the -preceding example, the problem happened to be that dnsmasq did not have -any more IP addresses to give away because there were no more fixed IPs -available in the OpenStack Compute database.) - -If there's a suspicious-looking dnsmasq log message, take a look at the -command-line arguments to the dnsmasq processes to see if they look -correct: - -.. code-block:: console - - $ ps aux | grep dnsmasq - -The output looks something like the following: - -.. code-block:: console - - 108 1695 0.0 0.0 25972 1000 ? S Feb26 0:00 /usr/sbin/dnsmasq - -u libvirt-dnsmasq - --strict-order --bind-interfaces - --pid-file=/var/run/libvirt/network/default.pid --conf-file= - --except-interface lo --listen-address 192.168.122.1 - --dhcp-range 192.168.122.2,192.168.122.254 - --dhcp-leasefile=/var/lib/libvirt/dnsmasq/default.leases - --dhcp-lease-max=253 --dhcp-no-override - nobody 2438 0.0 0.0 27540 1096 ? S Feb26 0:00 /usr/sbin/dnsmasq - --strict-order --bind-interfaces --conf-file= - --domain=novalocal --pid-file=/var/lib/nova/networks/nova-br100.pid - --listen-address=192.168.100.1 - --except-interface=lo - --dhcp-range=set:'novanetwork',192.168.100.2,static,120s - --dhcp-lease-max=256 - --dhcp-hostsfile=/var/lib/nova/networks/nova-br100.conf - --dhcp-script=/usr/bin/nova-dhcpbridge --leasefile-ro - root 2439 0.0 0.0 27512 472 ? S Feb26 0:00 /usr/sbin/dnsmasq --strict-order - --bind-interfaces --conf-file= - --domain=novalocal --pid-file=/var/lib/nova/networks/nova-br100.pid - --listen-address=192.168.100.1 - --except-interface=lo - --dhcp-range=set:'novanetwork',192.168.100.2,static,120s - --dhcp-lease-max=256 - --dhcp-hostsfile=/var/lib/nova/networks/nova-br100.conf - --dhcp-script=/usr/bin/nova-dhcpbridge --leasefile-ro - -The output shows three different dnsmasq processes. The dnsmasq process -that has the DHCP subnet range of 192.168.122.0 belongs to libvirt and -can be ignored. The other two dnsmasq processes belong to -``nova-network``. The two processes are actually related—one is simply -the parent process of the other. The arguments of the dnsmasq processes -should correspond to the details you configured ``nova-network`` with. - -If the problem does not seem to be related to dnsmasq itself, at this -point use ``tcpdump`` on the interfaces to determine where the packets -are getting lost. - -DHCP traffic uses UDP. The client sends from port 68 to port 67 on the -server. Try to boot a new instance and then systematically listen on the -NICs until you identify the one that isn't seeing the traffic. To use -``tcpdump`` to listen to ports 67 and 68 on br100, you would do: - -.. code-block:: console - - # tcpdump -i br100 -n port 67 or port 68 - -You should be doing sanity checks on the interfaces using command such -as :command:`ip a` and :command:`brctl show` to ensure that the interfaces are -actually up and configured the way that you think that they are. - -Debugging DNS Issues -~~~~~~~~~~~~~~~~~~~~ - -If you are able to use :term:`SSH ` to log into an -instance, but it takes a very long time (on the order of a minute) to get -a prompt, then you might have a DNS issue. The reason a DNS issue can cause -this problem is that the SSH server does a reverse DNS lookup on the -IP address that you are connecting from. If DNS lookup isn't working on your -instances, then you must wait for the DNS reverse lookup timeout to occur for -the SSH login process to complete. - -When debugging DNS issues, start by making sure that the host where the -dnsmasq process for that instance runs is able to correctly resolve. If -the host cannot resolve, then the instances won't be able to either. - -A quick way to check whether DNS is working is to resolve a hostname -inside your instance by using the :command:`host` command. If DNS is working, -you should see: - -.. code-block:: console - - $ host openstack.org - openstack.org has address 174.143.194.225 - openstack.org mail is handled by 10 mx1.emailsrvr.com. - openstack.org mail is handled by 20 mx2.emailsrvr.com. - -If you're running the Cirros image, it doesn't have the "host" program -installed, in which case you can use ping to try to access a machine by -hostname to see whether it resolves. If DNS is working, the first line -of ping would be: - -.. code-block:: console - - $ ping openstack.org - PING openstack.org (174.143.194.225): 56 data bytes - -If the instance fails to resolve the hostname, you have a DNS problem. -For example: - -.. code-block:: console - - $ ping openstack.org - ping: bad address 'openstack.org' - -In an OpenStack cloud, the dnsmasq process acts as the DNS server for -the instances in addition to acting as the DHCP server. A misbehaving -dnsmasq process may be the source of DNS-related issues inside the -instance. As mentioned in the previous section, the simplest way to rule -out a misbehaving dnsmasq process is to kill all the dnsmasq processes -on the machine and restart ``nova-network``. However, be aware that this -command affects everyone running instances on this node, including -tenants that have not seen the issue. As a last resort, as root: - -.. code-block:: console - - # killall dnsmasq - # restart nova-network - -After the dnsmasq processes start again, check whether DNS is working. - -If restarting the dnsmasq process doesn't fix the issue, you might need -to use ``tcpdump`` to look at the packets to trace where the failure is. -The DNS server listens on UDP port 53. You should see the DNS request on -the bridge (such as, br100) of your compute node. Let's say you start -listening with ``tcpdump`` on the compute node: - -.. code-block:: console - - # tcpdump -i br100 -n -v udp port 53 - tcpdump: listening on br100, link-type EN10MB (Ethernet), capture size 65535 bytes - -Then, if you use SSH to log into your instance and try ``ping openstack.org``, -you should see something like: - -.. code-block:: console - - 16:36:18.807518 IP (tos 0x0, ttl 64, id 56057, offset 0, flags [DF], - proto UDP (17), length 59) - 192.168.100.4.54244 > 192.168.100.1.53: 2+ A? openstack.org. (31) - 16:36:18.808285 IP (tos 0x0, ttl 64, id 0, offset 0, flags [DF], - proto UDP (17), length 75) - 192.168.100.1.53 > 192.168.100.4.54244: 2 1/0/0 openstack.org. A - 174.143.194.225 (47) - -Troubleshooting Open vSwitch -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Open vSwitch, as used in the previous OpenStack Networking examples is a -full-featured multilayer virtual switch licensed under the open source -Apache 2.0 license. Full documentation can be found at `the project's -website `_. In practice, given the preceding -configuration, the most common issues are being sure that the required -bridges (``br-int``, ``br-tun``, and ``br-ex``) exist and have the -proper ports connected to them. - -The Open vSwitch driver should and usually does manage this -automatically, but it is useful to know how to do this by hand with the -:command:`ovs-vsctl` command. This command has many more subcommands than we -will use here; see the man page or use :command:`ovs-vsctl --help` for the -full listing. - -To list the bridges on a system, use :command:`ovs-vsctl list-br`. -This example shows a compute node that has an internal -bridge and a tunnel bridge. VLAN networks are trunked through the -``eth1`` network interface: - -.. code-block:: console - - # ovs-vsctl list-br - br-int - br-tun - eth1-br - -Working from the physical interface inwards, we can see the chain of -ports and bridges. First, the bridge ``eth1-br``, which contains the -physical network interface ``eth1`` and the virtual interface -``phy-eth1-br``: - -.. code-block:: console - - # ovs-vsctl list-ports eth1-br - eth1 - phy-eth1-br - -Next, the internal bridge, ``br-int``, contains ``int-eth1-br``, which -pairs with ``phy-eth1-br`` to connect to the physical network shown in -the previous bridge, ``patch-tun``, which is used to connect to the GRE -tunnel bridge and the TAP devices that connect to the instances -currently running on the system: - -.. code-block:: console - - # ovs-vsctl list-ports br-int - int-eth1-br - patch-tun - tap2d782834-d1 - tap690466bc-92 - tap8a864970-2d - -The tunnel bridge, ``br-tun``, contains the ``patch-int`` interface and -``gre-`` interfaces for each peer it connects to via GRE, one for -each compute and network node in your cluster: - -.. code-block:: console - - # ovs-vsctl list-ports br-tun - patch-int - gre-1 - . - . - . - gre- - -If any of these links are missing or incorrect, it suggests a -configuration error. Bridges can be added with ``ovs-vsctl add-br``, -and ports can be added to bridges with -``ovs-vsctl add-port``. While running these by hand can be useful -debugging, it is imperative that manual changes that you intend to keep -be reflected back into your configuration files. - -.. _dealing_with_network_namespaces: - -Dealing with Network Namespaces -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Linux network namespaces are a kernel feature the networking service -uses to support multiple isolated layer-2 networks with overlapping IP -address ranges. The support may be disabled, but it is on by default. If -it is enabled in your environment, your network nodes will run their -dhcp-agents and l3-agents in isolated namespaces. Network interfaces and -traffic on those interfaces will not be visible in the default -namespace. - -To see whether you are using namespaces, run :command:`ip netns`: - -.. code-block:: console - - # ip netns - qdhcp-e521f9d0-a1bd-4ff4-bc81-78a60dd88fe5 - qdhcp-a4d00c60-f005-400e-a24c-1bf8b8308f98 - qdhcp-fe178706-9942-4600-9224-b2ae7c61db71 - qdhcp-0a1d0a27-cffa-4de3-92c5-9d3fd3f2e74d - qrouter-8a4ce760-ab55-4f2f-8ec5-a2e858ce0d39 - -L3-agent router namespaces are named ``qrouter-``, and -dhcp-agent name spaces are named ``qdhcp-``. This output -shows a network node with four networks running dhcp-agents, one of -which is also running an l3-agent router. It's important to know which -network you need to be working in. A list of existing networks and their -UUIDs can be obtained by running ``openstack network list`` with -administrative credentials. - - -Once you've determined which namespace you need to work in, you can use -any of the debugging tools mention earlier by prefixing the command with -``ip netns exec ``. For example, to see what network -interfaces exist in the first qdhcp namespace returned above, do this: - -.. code-block:: console - - # ip netns exec qdhcp-e521f9d0-a1bd-4ff4-bc81-78a60dd88fe5 ip a - 10: tape6256f7d-31: mtu 1500 qdisc noqueue state UNKNOWN - link/ether fa:16:3e:aa:f7:a1 brd ff:ff:ff:ff:ff:ff - inet 10.0.1.100/24 brd 10.0.1.255 scope global tape6256f7d-31 - inet 169.254.169.254/16 brd 169.254.255.255 scope global tape6256f7d-31 - inet6 fe80::f816:3eff:feaa:f7a1/64 scope link - valid_lft forever preferred_lft forever - 28: lo: mtu 16436 qdisc noqueue state UNKNOWN - link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 - inet 127.0.0.1/8 scope host lo - inet6 ::1/128 scope host - valid_lft forever preferred_lft forever - -From this you see that the DHCP server on that network is using the -``tape6256f7d-31`` device and has an IP address of ``10.0.1.100``. -Seeing the address ``169.254.169.254``, you can also see that the -dhcp-agent is running a metadata-proxy service. Any of the commands -mentioned previously in this chapter can be run in the same way. -It is also possible to run a shell, such as ``bash``, and have an -interactive session within the namespace. In the latter case, -exiting the shell returns you to the top-level default namespace. - -Assign a lost IPv4 address back to a project -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Using administrator credentials, confirm the lost IP address is still - available: - - .. code-block:: console - - # openstack server list --all-project | grep 'IP-ADDRESS' - -#. Create a port: - - .. code-block:: console - - $ openstack port create --network NETWORK_ID PORT_NAME - -#. Update the new port with the IPv4 address: - - .. code-block:: console - - # openstack subnet list - # neutron port-update PORT_NAME --request-format=json --fixed-ips \ - type=dict list=true subnet_id=NETWORK_ID_IPv4_SUBNET_ID \ - ip_address=IP_ADDRESS subnet_id=NETWORK_ID_IPv6_SUBNET_ID - # openstack port show PORT-NAME - -Tools for automated neutron diagnosis -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -`easyOVS `_ is a useful tool when it comes -to operating your OpenvSwitch bridges and iptables on your OpenStack platform. -It automatically associates the virtual ports with the VM MAC/IP, VLAN tag -and namespace information, as well as the iptables rules for VMs. - -`Don `_ is another convenient network -analysis and diagnostic system that provides a completely automated service -for verifying and diagnosing the networking functionality provided by OVS. - -Additionally, you can refer to -`neutron debug `_ -for more options. diff --git a/doc/ops-guide/source/ops-planning.rst b/doc/ops-guide/source/ops-planning.rst deleted file mode 100644 index 239d1a64bb..0000000000 --- a/doc/ops-guide/source/ops-planning.rst +++ /dev/null @@ -1,252 +0,0 @@ -================================================= -Planning for deploying and provisioning OpenStack -================================================= - -The decisions you make with respect to provisioning and deployment will -affect your maintenance of the cloud. Your configuration management will be -able to evolve over time. However, more thought and design need to be done -for upfront choices about deployment, disk partitioning, and network -configuration. - -A critical part of a cloud's scalability is the amount of effort that it -takes to run your cloud. To minimize the operational cost of running -your cloud, set up and use an automated deployment and configuration -infrastructure with a configuration management system, such as :term:`Puppet` -or :term:`Chef`. Combined, these systems greatly reduce manual effort and the -chance for operator error. - -This infrastructure includes systems to automatically install the -operating system's initial configuration and later coordinate the -configuration of all services automatically and centrally, which reduces -both manual effort and the chance for error. Examples include Ansible, -CFEngine, Chef, Puppet, and Salt. You can even use OpenStack to deploy -OpenStack, named TripleO (OpenStack On OpenStack). - -Automated deployment -~~~~~~~~~~~~~~~~~~~~ - -An automated deployment system installs and configures operating systems -on new servers, without intervention, after the absolute minimum amount -of manual work, including physical racking, MAC-to-IP assignment, and -power configuration. Typically, solutions rely on wrappers around PXE -boot and TFTP servers for the basic operating system install and then -hand off to an automated configuration management system. - -Both Ubuntu and Red Hat Enterprise Linux include mechanisms for -configuring the operating system, including preseed and kickstart, that -you can use after a network boot. Typically, these are used to bootstrap -an automated configuration system. Alternatively, you can use an -image-based approach for deploying the operating system, such as -systemimager. You can use both approaches with a virtualized -infrastructure, such as when you run VMs to separate your control -services and physical infrastructure. - -When you create a deployment plan, focus on a few vital areas because -they are very hard to modify post deployment. The next two sections talk -about configurations for: - -- Disk partitioning and disk array setup for scalability - -- Networking configuration just for PXE booting - -Disk partitioning and RAID --------------------------- - -At the very base of any operating system are the hard drives on which -the operating system (OS) is installed. - -You must complete the following configurations on the server's hard -drives: - -- Partitioning, which provides greater flexibility for layout of - operating system and swap space, as described below. - -- Adding to a RAID array (RAID stands for redundant array of - independent disks), based on the number of disks you have available, - so that you can add capacity as your cloud grows. Some options are - described in more detail below. - -The simplest option to get started is to use one hard drive with two -partitions: - -- File system to store files and directories, where all the data lives, - including the root partition that starts and runs the system. - -- Swap space to free up memory for processes, as an independent area of - the physical disk used only for swapping and nothing else. - -RAID is not used in this simplistic one-drive setup because generally -for production clouds, you want to ensure that if one disk fails, -another can take its place. Instead, for production, use more than one -disk. The number of disks determine what types of RAID arrays to build. - -We recommend that you choose one of the following multiple disk options: - -Option 1 - Partition all drives in the same way in a horizontal fashion, as - shown in :ref:`partition_setup`. - - With this option, you can assign different partitions to different - RAID arrays. You can allocate partition 1 of disk one and two to the - ``/boot`` partition mirror. You can make partition 2 of all disks - the root partition mirror. You can use partition 3 of all disks for - a ``cinder-volumes`` LVM partition running on a RAID 10 array. - - .. _partition_setup: - - .. figure:: figures/osog_0201.png - - Partition setup of drives - - While you might end up with unused partitions, such as partition 1 - in disk three and four of this example, this option allows for - maximum utilization of disk space. I/O performance might be an issue - as a result of all disks being used for all tasks. - -Option 2 - Add all raw disks to one large RAID array, either hardware or - software based. You can partition this large array with the boot, - root, swap, and LVM areas. This option is simple to implement and - uses all partitions. However, disk I/O might suffer. - -Option 3 - Dedicate entire disks to certain partitions. For example, you could - allocate disk one and two entirely to the boot, root, and swap - partitions under a RAID 1 mirror. Then, allocate disk three and four - entirely to the LVM partition, also under a RAID 1 mirror. Disk I/O - should be better because I/O is focused on dedicated tasks. However, - the LVM partition is much smaller. - -.. tip:: - - You may find that you can automate the partitioning itself. For - example, MIT uses `Fully Automatic Installation - (FAI) `_ to do the initial PXE-based - partition and then install using a combination of min/max and - percentage-based partitioning. - -As with most architecture choices, the right answer depends on your -environment. If you are using existing hardware, you know the disk -density of your servers and can determine some decisions based on the -options above. If you are going through a procurement process, your -user's requirements also help you determine hardware purchases. Here are -some examples from a private cloud providing web developers custom -environments at AT&T. This example is from a specific deployment, so -your existing hardware or procurement opportunity may vary from this. -AT&T uses three types of hardware in its deployment: - -- Hardware for controller nodes, used for all stateless OpenStack API - services. About 32–64 GB memory, small attached disk, one processor, - varied number of cores, such as 6–12. - -- Hardware for compute nodes. Typically 256 or 144 GB memory, two - processors, 24 cores. 4–6 TB direct attached storage, typically in a - RAID 5 configuration. - -- Hardware for storage nodes. Typically for these, the disk space is - optimized for the lowest cost per GB of storage while maintaining - rack-space efficiency. - -Again, the right answer depends on your environment. You have to make -your decision based on the trade-offs between space utilization, -simplicity, and I/O performance. - -Network configuration ---------------------- - -.. TODO Reference to networking sections in the following paragraph. - -Network configuration is a very large topic that spans multiple areas of -this book. For now, make sure that your servers can PXE boot and -successfully communicate with the deployment server. - -For example, you usually cannot configure NICs for VLANs when PXE -booting. Additionally, you usually cannot PXE boot with bonded NICs. If -you run into this scenario, consider using a simple 1 GB switch in a -private network on which only your cloud communicates. - -Automated configuration -~~~~~~~~~~~~~~~~~~~~~~~ - -The purpose of automatic configuration management is to establish and -maintain the consistency of a system without using human intervention. -You want to maintain consistency in your deployments so that you can -have the same cloud every time, repeatably. Proper use of automatic -configuration-management tools ensures that components of the cloud -systems are in particular states, in addition to simplifying deployment, -and configuration change propagation. - -These tools also make it possible to test and roll back changes, as they -are fully repeatable. Conveniently, a large body of work has been done -by the OpenStack community in this space. Puppet, a configuration -management tool, even provides official modules for OpenStack projects -in an OpenStack infrastructure system known as `Puppet -OpenStack `_. Chef -configuration management is provided within -https://git.openstack.org/cgit/openstack/openstack-chef-repo. Additional -configuration management systems include Juju, Ansible, and Salt. Also, -PackStack is a command-line utility for Red Hat Enterprise Linux and -derivatives that uses Puppet modules to support rapid deployment of -OpenStack on existing servers over an SSH connection. - -An integral part of a configuration-management system is the item that -it controls. You should carefully consider all of the items that you -want, or do not want, to be automatically managed. For example, you may -not want to automatically format hard drives with user data. - -Remote management -~~~~~~~~~~~~~~~~~ - -In our experience, most operators don't sit right next to the servers -running the cloud, and many don't necessarily enjoy visiting the data -center. OpenStack should be entirely remotely configurable, but -sometimes not everything goes according to plan. - -In this instance, having an out-of-band access into nodes running -OpenStack components is a boon. The IPMI protocol is the de facto -standard here, and acquiring hardware that supports it is highly -recommended to achieve that lights-out data center aim. - -In addition, consider remote power control as well. While IPMI usually -controls the server's power state, having remote access to the PDU that -the server is plugged into can really be useful for situations when -everything seems wedged. - -Other considerations -~~~~~~~~~~~~~~~~~~~~ - -.. TODO In the first paragraph, reference to use case sections. - -You can save time by understanding the use cases for the cloud you want -to create. Use cases for OpenStack are varied. Some include object -storage only; others require preconfigured compute resources to speed -development-environment set up; and others need fast provisioning of -compute resources that are already secured per tenant with private -networks. Your users may have need for highly redundant servers to make -sure their legacy applications continue to run. Perhaps a goal would be -to architect these legacy applications so that they run on multiple -instances in a cloudy, fault-tolerant way, but not make it a goal to add -to those clusters over time. Your users may indicate that they need -scaling considerations because of heavy Windows server use. - -You can save resources by looking at the best fit for the hardware you -have in place already. You might have some high-density storage hardware -available. You could format and repurpose those servers for OpenStack -Object Storage. All of these considerations and input from users help -you build your use case and your deployment plan. - -.. tip:: - - For further research about OpenStack deployment, investigate the - supported and documented preconfigured, prepackaged installers for - OpenStack from companies such as - `Canonical `_, - `Cisco `_, - `Cloudscaling `_, - `IBM `_, - `Metacloud `_, - `Mirantis `_, - `Rackspace `_, - `Red Hat `_, - `SUSE `_, - and `SwiftStack `_. diff --git a/doc/ops-guide/source/ops-projects-users-summary.rst b/doc/ops-guide/source/ops-projects-users-summary.rst deleted file mode 100644 index 8c142ca605..0000000000 --- a/doc/ops-guide/source/ops-projects-users-summary.rst +++ /dev/null @@ -1,11 +0,0 @@ -======= -Summary -======= - -One key element of systems administration that is often overlooked is -that end users are the reason systems administrators exist. Don't go the -BOFH route and terminate every user who causes an alert to go off. Work -with users to understand what they're trying to accomplish and see how -your environment can better assist them in achieving their goals. Meet -your users needs by organizing your users into projects, applying -policies, managing quotas, and working with them. diff --git a/doc/ops-guide/source/ops-projects-users.rst b/doc/ops-guide/source/ops-projects-users.rst deleted file mode 100644 index 25c7aa7de1..0000000000 --- a/doc/ops-guide/source/ops-projects-users.rst +++ /dev/null @@ -1,33 +0,0 @@ -=========================== -Managing Projects and Users -=========================== - -.. toctree:: - - ops-projects.rst - ops-quotas.rst - ops-users.rst - ops-projects-users-summary.rst - -An OpenStack cloud does not have much value without users. This chapter -covers topics that relate to managing users, projects, and quotas. This -chapter describes users and projects as described by version 2 of the -OpenStack Identity API. - -Projects or Tenants? -~~~~~~~~~~~~~~~~~~~~ - -In OpenStack user interfaces and documentation, a group of users is -referred to as a :term:`project` or :term:`tenant`. -These terms are interchangeable. - -The initial implementation of OpenStack Compute had its own -authentication system and used the term ``project``. When authentication -moved into the OpenStack Identity (keystone) project, it used the term -``tenant`` to refer to a group of users. Because of this legacy, some of -the OpenStack tools refer to projects and some refer to tenants. - -.. tip:: - - This guide uses the term ``project``, unless an example shows - interaction with a tool that uses the term ``tenant``. diff --git a/doc/ops-guide/source/ops-projects.rst b/doc/ops-guide/source/ops-projects.rst deleted file mode 100644 index 600a584626..0000000000 --- a/doc/ops-guide/source/ops-projects.rst +++ /dev/null @@ -1,44 +0,0 @@ -================= -Managing Projects -================= - -Users must be associated with at least one project, though they may -belong to many. Therefore, you should add at least one project before -adding users. - -Adding Projects -~~~~~~~~~~~~~~~ - -To create a project through the OpenStack dashboard: - -#. Log in as an administrative user. - -#. Select the :guilabel:`Identity` tab in the left navigation bar. - -#. Under Identity tab, click :guilabel:`Projects`. - -#. Click the :guilabel:`Create Project` button. - -You are prompted for a project name and an optional, but recommended, -description. Select the check box at the bottom of the form to enable -this project. By default, it is enabled, as shown below: - -.. figure:: figures/create_project.png - :alt: Create Project form - -It is also possible to add project members and adjust the project -quotas. We'll discuss those actions later, but in practice, it can be -quite convenient to deal with all these operations at one time. - -To add a project through the command line, you must use the OpenStack -command line client. - -.. code-block:: console - - # openstack project create demo --domain default - -This command creates a project named ``demo``. Optionally, you can add a -description string by appending ``--description PROJECT_DESCRIPTION``, -which can be very useful. You can also -create a project in a disabled state by appending ``--disable`` to the -command. By default, projects are created in an enabled state. diff --git a/doc/ops-guide/source/ops-quotas.rst b/doc/ops-guide/source/ops-quotas.rst deleted file mode 100644 index 5bee3a1423..0000000000 --- a/doc/ops-guide/source/ops-quotas.rst +++ /dev/null @@ -1,451 +0,0 @@ -====== -Quotas -====== - -To prevent system capacities from being exhausted without notification, -you can set up :term:`quotas `. Quotas are operational limits. For example, -the number of gigabytes allowed per tenant can be controlled to ensure that -a single tenant cannot consume all of the disk space. Quotas are -currently enforced at the tenant (or project) level, rather than the -user level. - -.. warning:: - - Because without sensible quotas a single tenant could use up all the - available resources, default quotas are shipped with OpenStack. You - should pay attention to which quota settings make sense for your - hardware capabilities. - -Using the command-line interface, you can manage quotas for the -OpenStack Compute service and the Block Storage service. - -Typically, default values are changed because a tenant requires more -than the OpenStack default of 10 volumes per tenant, or more than the -OpenStack default of 1 TB of disk space on a compute node. - -.. note:: - - To view all tenants, run: - - .. code-block:: console - - $ openstack project list - +---------------------------------+----------+ - | ID | Name | - +---------------------------------+----------+ - | a981642d22c94e159a4a6540f70f9f8 | admin | - | 934b662357674c7b9f5e4ec6ded4d0e | tenant01 | - | 7bc1dbfd7d284ec4a856ea1eb82dca8 | tenant02 | - | 9c554aaef7804ba49e1b21cbd97d218 | services | - +---------------------------------+----------+ - -Set Image Quotas -~~~~~~~~~~~~~~~~ - -You can restrict a project's image storage by total number of bytes. -Currently, this quota is applied cloud-wide, so if you were to set an -Image quota limit of 5 GB, then all projects in your cloud will be able -to store only 5 GB of images and snapshots. - -To enable this feature, edit the ``/etc/glance/glance-api.conf`` file, -and under the ``[DEFAULT]`` section, add: - -.. code-block:: ini - - user_storage_quota = - -For example, to restrict a project's image storage to 5 GB, do this: - -.. code-block:: ini - - user_storage_quota = 5368709120 - -.. note:: - - There is a configuration option in ``/etc/glance/glance-api.conf`` that limits - the number of members allowed per image, called - ``image_member_quota``, set to 128 by default. That setting is a - different quota from the storage quota. - -Set Compute Service Quotas -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -As an administrative user, you can update the Compute service quotas for -an existing tenant, as well as update the quota defaults for a new -tenant. See :ref:`table_compute_quota`. - -.. _table_compute_quota: - -.. list-table:: Compute quota descriptions - :widths: 30 40 30 - :header-rows: 1 - - * - Quota - - Description - - Property name - * - Fixed IPs - - Number of fixed IP addresses allowed per project. - This number must be equal to or greater than the number - of allowed instances. - - ``fixed-ips`` - * - Floating IPs - - Number of floating IP addresses allowed per project. - - ``floating-ips`` - * - Injected file content bytes - - Number of content bytes allowed per injected file. - - ``injected-file-content-bytes`` - * - Injected file path bytes - - Number of bytes allowed per injected file path. - - ``injected-file-path-bytes`` - * - Injected files - - Number of injected files allowed per project. - - ``injected-files`` - * - Instances - - Number of instances allowed per project. - - ``instances`` - * - Key pairs - - Number of key pairs allowed per user. - - ``key-pairs`` - * - Metadata items - - Number of metadata items allowed per instance. - - ``metadata-items`` - * - RAM - - Megabytes of instance RAM allowed per project. - - ``ram`` - * - Security group rules - - Number of security group rules per project. - - ``security-group-rules`` - * - Security groups - - Number of security groups per project. - - ``security-groups`` - * - VCPUs - - Number of instance cores allowed per project. - - ``cores`` - * - Server Groups - - Number of server groups per project. - - ``server_groups`` - * - Server Group Members - - Number of servers per server group. - - ``server_group_members`` - -View and update compute quotas for a tenant (project) ------------------------------------------------------ - -As an administrative user, you can use the :command:`nova quota-*` -commands, which are provided by the -``python-novaclient`` package, to view and update tenant quotas. - -**To view and update default quota values** - -#. List all default quotas for all tenants, as follows: - - .. code-block:: console - - $ nova quota-defaults - - For example: - - .. code-block:: console - - $ nova quota-defaults - +-----------------------------+-------+ - | Quota | Limit | - +-----------------------------+-------+ - | instances | 10 | - | cores | 20 | - | ram | 51200 | - | floating_ips | 10 | - | fixed_ips | -1 | - | metadata_items | 128 | - | injected_files | 5 | - | injected_file_content_bytes | 10240 | - | injected_file_path_bytes | 255 | - | key_pairs | 100 | - | security_groups | 10 | - | security_group_rules | 20 | - | server_groups | 10 | - | server_group_members | 10 | - +-----------------------------+-------+ - -#. Update a default value for a new tenant, as follows: - - .. code-block:: console - - $ nova quota-class-update default key value - - For example: - - .. code-block:: console - - $ nova quota-class-update default --instances 15 - -**To view quota values for a tenant (project)** - -#. Place the tenant ID in a variable: - - .. code-block:: console - - $ tenant=$(openstack project list | awk '/tenantName/ {print $2}') - -#. List the currently set quota values for a tenant, as follows: - - .. code-block:: console - - $ nova quota-show --tenant $tenant - - For example: - - .. code-block:: console - - $ nova quota-show --tenant $tenant - +-----------------------------+-------+ - | Quota | Limit | - +-----------------------------+-------+ - | instances | 10 | - | cores | 20 | - | ram | 51200 | - | floating_ips | 10 | - | fixed_ips | -1 | - | metadata_items | 128 | - | injected_files | 5 | - | injected_file_content_bytes | 10240 | - | injected_file_path_bytes | 255 | - | key_pairs | 100 | - | security_groups | 10 | - | security_group_rules | 20 | - | server_groups | 10 | - | server_group_members | 10 | - +-----------------------------+-------+ - -**To update quota values for a tenant (project)** - -#. Obtain the tenant ID, as follows: - - .. code-block:: console - - $ tenant=$(openstack project list | awk '/tenantName/ {print $2}') - -#. Update a particular quota value, as follows: - - .. code-block:: console - - # nova quota-update --quotaName quotaValue tenantID - - For example: - - .. code-block:: console - - # nova quota-update --floating-ips 20 $tenant - # nova quota-show --tenant $tenant - +-----------------------------+-------+ - | Quota | Limit | - +-----------------------------+-------+ - | instances | 10 | - | cores | 20 | - | ram | 51200 | - | floating_ips | 20 | - | fixed_ips | -1 | - | metadata_items | 128 | - | injected_files | 5 | - | injected_file_content_bytes | 10240 | - | injected_file_path_bytes | 255 | - | key_pairs | 100 | - | security_groups | 10 | - | security_group_rules | 20 | - | server_groups | 10 | - | server_group_members | 10 | - +-----------------------------+-------+ - - .. note:: - - To view a list of options for the ``nova quota-update`` command, run: - - .. code-block:: console - - $ nova help quota-update - -Set Object Storage Quotas -~~~~~~~~~~~~~~~~~~~~~~~~~ - -There are currently two categories of quotas for Object Storage: - -Container quotas - Limit the total size (in bytes) or number of objects that can be - stored in a single container. - -Account quotas - Limit the total size (in bytes) that a user has available in the - Object Storage service. - -To take advantage of either container quotas or account quotas, your -Object Storage proxy server must have ``container_quotas`` or -``account_quotas`` (or both) added to the ``[pipeline:main]`` pipeline. -Each quota type also requires its own section in the -``proxy-server.conf`` file: - -.. code-block:: ini - - [pipeline:main] - pipeline = catch_errors [...] slo dlo account_quotas proxy-server - - [filter:account_quotas] - use = egg:swift#account_quotas - - [filter:container_quotas] - use = egg:swift#container_quotas - -To view and update Object Storage quotas, use the :command:`swift` command -provided by the ``python-swiftclient`` package. Any user included in the -project can view the quotas placed on their project. To update Object -Storage quotas on a project, you must have the role of ResellerAdmin in -the project that the quota is being applied to. - -To view account quotas placed on a project: - -.. code-block:: console - - $ swift stat - Account: AUTH_b36ed2d326034beba0a9dd1fb19b70f9 - Containers: 0 - Objects: 0 - Bytes: 0 - Meta Quota-Bytes: 214748364800 - X-Timestamp: 1351050521.29419 - Content-Type: text/plain; charset=utf-8 - Accept-Ranges: bytes - -To apply or update account quotas on a project: - -.. code-block:: console - - $ swift post -m quota-bytes: - - -For example, to place a 5 GB quota on an account: - -.. code-block:: console - - $ swift post -m quota-bytes: - 5368709120 - -To verify the quota, run the :command:`swift stat` command again: - -.. code-block:: console - - $ swift stat - Account: AUTH_b36ed2d326034beba0a9dd1fb19b70f9 - Containers: 0 - Objects: 0 - Bytes: 0 - Meta Quota-Bytes: 5368709120 - X-Timestamp: 1351541410.38328 - Content-Type: text/plain; charset=utf-8 - Accept-Ranges: bytes - -Set Block Storage Quotas -~~~~~~~~~~~~~~~~~~~~~~~~ - -As an administrative user, you can update the Block Storage service -quotas for a tenant, as well as update the quota defaults for a new -tenant. See :ref:`table_block_storage_quota`. - -.. _table_block_storage_quota: - -.. list-table:: Table: Block Storage quota descriptions - :widths: 50 50 - :header-rows: 1 - - * - Property name - - Description - * - gigabytes - - Number of volume gigabytes allowed per tenant - * - snapshots - - Number of Block Storage snapshots allowed per tenant. - * - volumes - - Number of Block Storage volumes allowed per tenant - -View and update Block Storage quotas for a tenant (project) ------------------------------------------------------------ - -As an administrative user, you can use the :command:`cinder quota-*` -commands, which are provided by the -``python-cinderclient`` package, to view and update tenant quotas. - -**To view and update default Block Storage quota values** - -#. List all default quotas for all tenants, as follows: - - .. code-block:: console - - $ cinder quota-defaults tenantID - -#. Obtain the tenant ID, as follows: - - .. code-block:: console - - $ tenant=$(openstack project list | awk '/tenantName/ {print $2}') - - For example: - - .. code-block:: console - - $ cinder quota-defaults $tenant - +-----------+-------+ - | Property | Value | - +-----------+-------+ - | gigabytes | 1000 | - | snapshots | 10 | - | volumes | 10 | - +-----------+-------+ - -#. To update a default value for a new tenant, update the property in the - ``/etc/cinder/cinder.conf`` file. - -**To view Block Storage quotas for a tenant (project)** - -#. View quotas for the tenant, as follows: - - .. code-block:: console - - # cinder quota-show tenantID - - For example: - - .. code-block:: console - - # cinder quota-show $tenant - +-----------+-------+ - | Property | Value | - +-----------+-------+ - | gigabytes | 1000 | - | snapshots | 10 | - | volumes | 10 | - +-----------+-------+ - -**To update Block Storage quotas for a tenant (project)** - -#. Place the tenant ID in a variable: - - .. code-block:: console - - $ tenant=$(openstack project list | awk '/tenantName/ {print $2}') - -#. Update a particular quota value, as follows: - - .. code-block:: console - - # cinder quota-update --quotaName NewValue tenantID - - For example: - - .. code-block:: console - - # cinder quota-update --volumes 15 $tenant - # cinder quota-show $tenant - +-----------+-------+ - | Property | Value | - +-----------+-------+ - | gigabytes | 1000 | - | snapshots | 10 | - | volumes | 15 | - +-----------+-------+ diff --git a/doc/ops-guide/source/ops-uninstall.rst b/doc/ops-guide/source/ops-uninstall.rst deleted file mode 100644 index 792023cf6d..0000000000 --- a/doc/ops-guide/source/ops-uninstall.rst +++ /dev/null @@ -1,18 +0,0 @@ -============ -Uninstalling -============ - -While we'd always recommend using your automated deployment system to -reinstall systems from scratch, sometimes you do need to remove -OpenStack from a system the hard way. Here's how: - -* Remove all packages. -* Remove remaining files. -* Remove databases. - -These steps depend on your underlying distribution, but in general you -should be looking for :command:`purge` commands in your package manager, like -:command:`aptitude purge ~c $package`. Following this, you can look for -orphaned files in the directories referenced throughout this guide. To -uninstall the database properly, refer to the manual appropriate for the -product in use. diff --git a/doc/ops-guide/source/ops-upgrades.rst b/doc/ops-guide/source/ops-upgrades.rst deleted file mode 100644 index 0fbb033cfd..0000000000 --- a/doc/ops-guide/source/ops-upgrades.rst +++ /dev/null @@ -1,553 +0,0 @@ -======== -Upgrades -======== - -With the exception of Object Storage, upgrading from one version of -OpenStack to another can take a great deal of effort. This chapter -provides some guidance on the operational aspects that you should -consider for performing an upgrade for an OpenStack environment. - -Pre-upgrade considerations -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Upgrade planning ----------------- - -- Thoroughly review the `release - notes `_ to learn - about new, updated, and deprecated features. Find incompatibilities - between versions. - -- Consider the impact of an upgrade to users. The upgrade process - interrupts management of your environment including the dashboard. If - you properly prepare for the upgrade, existing instances, networking, - and storage should continue to operate. However, instances might - experience intermittent network interruptions. - -- Consider the approach to upgrading your environment. You can perform - an upgrade with operational instances, but this is a dangerous - approach. You might consider using live migration to temporarily - relocate instances to other compute nodes while performing upgrades. - However, you must ensure database consistency throughout the process; - otherwise your environment might become unstable. Also, don't forget - to provide sufficient notice to your users, including giving them - plenty of time to perform their own backups. - -- Consider adopting structure and options from the service - configuration files and merging them with existing configuration - files. The `OpenStack Configuration - Reference `_ - contains new, updated, and deprecated options for most services. - -- Like all major system upgrades, your upgrade could fail for one or - more reasons. You can prepare for this situation by having the - ability to roll back your environment to the previous release, - including databases, configuration files, and packages. We provide an - example process for rolling back your environment in - :ref:`rolling_back_a_failed_upgrade`. - -- Develop an upgrade procedure and assess it thoroughly by using a test - environment similar to your production environment. - -Pre-upgrade testing environment -------------------------------- - -The most important step is the pre-upgrade testing. If you are upgrading -immediately after release of a new version, undiscovered bugs might -hinder your progress. Some deployers prefer to wait until the first -point release is announced. However, if you have a significant -deployment, you might follow the development and testing of the release -to ensure that bugs for your use cases are fixed. - -Each OpenStack cloud is different even if you have a near-identical -architecture as described in this guide. As a result, you must still -test upgrades between versions in your environment using an approximate -clone of your environment. - -However, that is not to say that it needs to be the same size or use -identical hardware as the production environment. It is important to -consider the hardware and scale of the cloud that you are upgrading. The -following tips can help you minimise the cost: - -Use your own cloud - The simplest place to start testing the next version of OpenStack is - by setting up a new environment inside your own cloud. This might - seem odd, especially the double virtualization used in running - compute nodes. But it is a sure way to very quickly test your - configuration. - -Use a public cloud - Consider using a public cloud to test the scalability limits of your - cloud controller configuration. Most public clouds bill by the hour, - which means it can be inexpensive to perform even a test with many - nodes. - -Make another storage endpoint on the same system - If you use an external storage plug-in or shared file system with - your cloud, you can test whether it works by creating a second share - or endpoint. This allows you to test the system before entrusting - the new version on to your storage. - -Watch the network - Even at smaller-scale testing, look for excess network packets to - determine whether something is going horribly wrong in - inter-component communication. - -To set up the test environment, you can use one of several methods: - -- Do a full manual install by using the `Installation Tutorials and Guides - `_ for - your platform. Review the final configuration files and installed - packages. - -- Create a clone of your automated configuration infrastructure with - changed package repository URLs. - - Alter the configuration until it works. - -Either approach is valid. Use the approach that matches your experience. - -An upgrade pre-testing system is excellent for getting the configuration -to work. However, it is important to note that the historical use of the -system and differences in user interaction can affect the success of -upgrades. - -If possible, we highly recommend that you dump your production database -tables and test the upgrade in your development environment using this -data. Several MySQL bugs have been uncovered during database migrations -because of slight table differences between a fresh installation and -tables that migrated from one version to another. This will have impact -on large real datasets, which you do not want to encounter during a -production outage. - -Artificial scale testing can go only so far. After your cloud is -upgraded, you must pay careful attention to the performance aspects of -your cloud. - -Upgrade Levels --------------- - -Upgrade levels are a feature added to OpenStack Compute since the -Grizzly release to provide version locking on the RPC (Message Queue) -communications between the various Compute services. - -This functionality is an important piece of the puzzle when it comes to -live upgrades and is conceptually similar to the existing API versioning -that allows OpenStack services of different versions to communicate -without issue. - -Without upgrade levels, an X+1 version Compute service can receive and -understand X version RPC messages, but it can only send out X+1 version -RPC messages. For example, if a nova-conductor process has been upgraded -to X+1 version, then the conductor service will be able to understand -messages from X version nova-compute processes, but those compute -services will not be able to understand messages sent by the conductor -service. - -During an upgrade, operators can add configuration options to -``nova.conf`` which lock the version of RPC messages and allow live -upgrading of the services without interruption caused by version -mismatch. The configuration options allow the specification of RPC -version numbers if desired, but release name alias are also supported. -For example: - -.. code-block:: ini - - [upgrade_levels] - compute=X+1 - conductor=X+1 - scheduler=X+1 - -will keep the RPC version locked across the specified services to the -RPC version used in X+1. As all instances of a particular service are -upgraded to the newer version, the corresponding line can be removed -from ``nova.conf``. - -Using this functionality, ideally one would lock the RPC version to the -OpenStack version being upgraded from on nova-compute nodes, to ensure -that, for example X+1 version nova-compute processes will continue to -work with X version nova-conductor processes while the upgrade -completes. Once the upgrade of nova-compute processes is complete, the -operator can move onto upgrading nova-conductor and remove the version -locking for nova-compute in ``nova.conf``. - -Upgrade process -~~~~~~~~~~~~~~~ - -This section describes the process to upgrade a basic OpenStack -deployment based on the basic two-node architecture in the `Installation -Tutorials and Guides -`_. All -nodes must run a supported distribution of Linux with a recent kernel -and the current release packages. - -Service specific upgrade instructions -------------------------------------- - -Refer to the following upgrade notes for information on upgrading specific -OpenStack services: - -* `Networking service (neutron) upgrades - `_ -* `Compute service (nova) upgrades - `_ -* `Identity service (keystone) upgrades - `_ -* `Block Storage service (cinder) upgrades - `_ -* `Image service (glance) zero downtime database upgrades - `_ -* `Image service (glance) rolling upgrades - `_ -* `Bare Metal service (ironic) upgrades - `_ -* `Object Storage service (swift) upgrades - `_ -* `Telemetry service (ceilometer) upgrades - `_ - -Prerequisites -------------- - -- Perform some cleaning of the environment prior to starting the - upgrade process to ensure a consistent state. For example, instances - not fully purged from the system after deletion might cause - indeterminate behavior. - -- For environments using the OpenStack Networking service (neutron), - verify the release version of the database. For example: - - .. code-block:: console - - # su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \ - --config-file /etc/neutron/plugins/ml2/ml2_conf.ini current" neutron - -Perform a backup ----------------- - -#. Save the configuration files on all nodes. For example: - - .. code-block:: console - - # for i in keystone glance nova neutron openstack-dashboard cinder heat ceilometer; \ - do mkdir $i-RELEASE_NAME; \ - done - # for i in keystone glance nova neutron openstack-dashboard cinder heat ceilometer; \ - do cp -r /etc/$i/* $i-RELEASE_NAME/; \ - done - - .. note:: - - You can modify this example script on each node to handle different - services. - -#. Make a full database backup of your production data. Since the Kilo release, - database downgrades are not supported, and restoring from backup is the only - method available to retrieve a previous database version. - - .. code-block:: console - - # mysqldump -u root -p --opt --add-drop-database --all-databases > RELEASE_NAME-db-backup.sql - - .. note:: - - Consider updating your SQL server configuration as described in the - `Installation Tutorials and Guides - `_. - -Manage repositories -------------------- - -On all nodes: - -#. Remove the repository for the previous release packages. - -#. Add the repository for the new release packages. - -#. Update the repository database. - -Upgrade packages on each node ------------------------------ - -Depending on your specific configuration, upgrading all packages might -restart or break services supplemental to your OpenStack environment. -For example, if you use the TGT iSCSI framework for Block Storage -volumes and the upgrade includes new packages for it, the package -manager might restart the TGT iSCSI services and impact connectivity to -volumes. - -If the package manager prompts you to update configuration files, reject -the changes. The package manager appends a suffix to newer versions of -configuration files. Consider reviewing and adopting content from these -files. - -.. note:: - - You may need to explicitly install the ``ipset`` package if your - distribution does not install it as a dependency. - -Update services ---------------- - -To update a service on each node, you generally modify one or more -configuration files, stop the service, synchronize the database schema, -and start the service. Some services require different steps. We -recommend verifying operation of each service before proceeding to the -next service. - -The order you should upgrade services, and any changes from the general -upgrade process is described below: - -**Controller node** - -#. Identity service - Clear any expired tokens before synchronizing - the database. - -#. Image service - -#. Compute service, including networking components. - -#. Networking service - -#. Block Storage service - -#. Dashboard - In typical environments, updating Dashboard only - requires restarting the Apache HTTP service. - -#. Orchestration service - -#. Telemetry service - In typical environments, updating the - Telemetry service only requires restarting the service. - -#. Compute service - Edit the configuration file and restart the service. - -#. Networking service - Edit the configuration file and restart the service. - -**Storage nodes** - -* Block Storage service - Updating the Block Storage service only requires - restarting the service. - -**Compute nodes** - -* Networking service - Edit the configuration file and restart the service. - -Final steps ------------ - -On all distributions, you must perform some final tasks to complete the -upgrade process. - -#. Decrease DHCP timeouts by modifying the :file:`/etc/nova/nova.conf` file on - the compute nodes back to the original value for your environment. - -#. Update all ``.ini`` files to match passwords and pipelines as required - for the OpenStack release in your environment. - -#. After migration, users see different results from - :command:`openstack image list` and :command:`glance image-list`. To ensure - users see the same images in the list commands, edit the - :file:`/etc/glance/policy.json` file and :file:`/etc/nova/policy.json` file - to contain ``"context_is_admin": "role:admin"``, which limits access to - private images for projects. - -#. Verify proper operation of your environment. Then, notify your users - that their cloud is operating normally again. - -.. _rolling_back_a_failed_upgrade: - -Rolling back a failed upgrade -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section provides guidance for rolling back to a previous release of -OpenStack. All distributions follow a similar procedure. - -.. warning:: - - Rolling back your environment should be the final course of action - since you are likely to lose any data added since the backup. - -A common scenario is to take down production management services in -preparation for an upgrade, completed part of the upgrade process, and -discovered one or more problems not encountered during testing. As a -consequence, you must roll back your environment to the original "known -good" state. You also made sure that you did not make any state changes -after attempting the upgrade process; no new instances, networks, -storage volumes, and so on. Any of these new resources will be in a -frozen state after the databases are restored from backup. - -Within this scope, you must complete these steps to successfully roll -back your environment: - -#. Roll back configuration files. - -#. Restore databases from backup. - -#. Roll back packages. - -You should verify that you have the requisite backups to restore. -Rolling back upgrades is a tricky process because distributions tend to -put much more effort into testing upgrades than downgrades. Broken -downgrades take significantly more effort to troubleshoot and, resolve -than broken upgrades. Only you can weigh the risks of trying to push a -failed upgrade forward versus rolling it back. Generally, consider -rolling back as the very last option. - -The following steps described for Ubuntu have worked on at least one -production environment, but they might not work for all environments. - -**To perform a rollback** - -#. Stop all OpenStack services. - -#. Copy contents of configuration backup directories that you created - during the upgrade process back to ``/etc/`` directory. - -#. Restore databases from the ``RELEASE_NAME-db-backup.sql`` backup file - that you created with the :command:`mysqldump` command during the upgrade - process: - - .. code-block:: console - - # mysql -u root -p < RELEASE_NAME-db-backup.sql - -#. Downgrade OpenStack packages. - - .. warning:: - - Downgrading packages is by far the most complicated step; it is - highly dependent on the distribution and the overall administration - of the system. - - #. Determine which OpenStack packages are installed on your system. Use the - :command:`dpkg --get-selections` command. Filter for OpenStack - packages, filter again to omit packages explicitly marked in the - ``deinstall`` state, and save the final output to a file. For example, - the following command covers a controller node with keystone, glance, - nova, neutron, and cinder: - - .. code-block:: console - - # dpkg --get-selections | grep -e keystone -e glance -e nova -e neutron \ - -e cinder | grep -v deinstall | tee openstack-selections - cinder-api install - cinder-common install - cinder-scheduler install - cinder-volume install - glance install - glance-api install - glance-common install - glance-registry install - neutron-common install - neutron-dhcp-agent install - neutron-l3-agent install - neutron-lbaas-agent install - neutron-metadata-agent install - neutron-plugin-openvswitch install - neutron-plugin-openvswitch-agent install - neutron-server install - nova-api install - nova-common install - nova-conductor install - nova-consoleauth install - nova-novncproxy install - nova-objectstore install - nova-scheduler install - python-cinder install - python-cinderclient install - python-glance install - python-glanceclient install - python-keystone install - python-keystoneclient install - python-neutron install - python-neutronclient install - python-nova install - python-novaclient install - - .. note:: - - Depending on the type of server, the contents and order of your - package list might vary from this example. - - #. You can determine the package versions available for reversion by using - the ``apt-cache policy`` command. For example: - - .. code-block:: console - - # apt-cache policy nova-common - - nova-common: - Installed: 2:14.0.1-0ubuntu1~cloud0 - Candidate: 2:14.0.1-0ubuntu1~cloud0 - Version table: - *** 2:14.0.1-0ubuntu1~cloud0 500 - 500 http://ubuntu-cloud.archive.canonical.com/ubuntu xenial-updates/newton/main amd64 Packages - 100 /var/lib/dpkg/status - 2:13.1.2-0ubuntu2 500 - 500 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 Packages - 2:13.0.0-0ubuntu2 500 - 500 http://archive.ubuntu.com/ubuntu xenial/main amd64 Packages - - .. note:: - - If you removed the release repositories, you must first reinstall - them and run the :command:`apt-get update` command. - - The command output lists the currently installed version of the package, - newest candidate version, and all versions along with the repository that - contains each version. Look for the appropriate release - version— ``2:14.0.1-0ubuntu1~cloud0`` in this case. The process of - manually picking through this list of packages is rather tedious and - prone to errors. You should consider using a script to help - with this process. For example: - - .. code-block:: console - - # for i in `cut -f 1 openstack-selections | sed 's/neutron/;'`; - do echo -n $i ;apt-cache policy $i | grep -B 1 RELEASE_NAME | - grep -v Packages | awk '{print "="$1}';done | tr '\n' ' ' | - tee openstack-RELEASE_NAME-versions - cinder-api=2:9.0.0-0ubuntu1~cloud0 - cinder-common=2:9.0.0-0ubuntu1~cloud0 - cinder-scheduler=2:9.0.0-0ubuntu1~cloud0 - cinder-volume=2:9.0.0-0ubuntu1~cloud0 - glance=2:13.0.0-0ubuntu1~cloud0 - glance-api=2:13.0.0-0ubuntu1~cloud0 500 - glance-common=2:13.0.0-0ubuntu1~cloud0 500 - glance-registry=2:13.0.0-0ubuntu1~cloud0 500 - neutron-common=2:9.0.0-0ubuntu1~cloud0 - neutron-dhcp-agent=2:9.0.0-0ubuntu1~cloud0 - neutron-l3-agent=2:9.0.0-0ubuntu1~cloud0 - neutron-lbaas-agent=2:9.0.0-0ubuntu1~cloud0 - neutron-metadata-agent=2:9.0.0-0ubuntu1~cloud0 - neutron-server=2:9.0.0-0ubuntu1~cloud0 - nova-api=2:14.0.1-0ubuntu1~cloud0 - nova-common=2:14.0.1-0ubuntu1~cloud0 - nova-conductor=2:14.0.1-0ubuntu1~cloud0 - nova-consoleauth=2:14.0.1-0ubuntu1~cloud0 - nova-novncproxy=2:14.0.1-0ubuntu1~cloud0 - nova-objectstore=2:14.0.1-0ubuntu1~cloud0 - nova-scheduler=2:14.0.1-0ubuntu1~cloud0 - python-cinder=2:9.0.0-0ubuntu1~cloud0 - python-cinderclient=1:1.9.0-0ubuntu1~cloud0 - python-glance=2:13.0.0-0ubuntu1~cloud0 - python-glanceclient=1:2.5.0-0ubuntu1~cloud0 - python-neutron=2:9.0.0-0ubuntu1~cloud0 - python-neutronclient=1:6.0.0-0ubuntu1~cloud0 - python-nova=2:14.0.1-0ubuntu1~cloud0 - python-novaclient=2:6.0.0-0ubuntu1~cloud0 - python-openstackclient=3.2.0-0ubuntu2~cloud0 - - #. Use the :command:`apt-get install` command to install specific versions - of each package by specifying ``=``. The script in - the previous step conveniently created a list of ``package=version`` - pairs for you: - - .. code-block:: console - - # apt-get install `cat openstack-RELEASE_NAME-versions` - - This step completes the rollback procedure. You should remove the - upgrade release repository and run :command:`apt-get update` to prevent - accidental upgrades until you solve whatever issue caused you to roll - back your environment. diff --git a/doc/ops-guide/source/ops-user-facing-operations.rst b/doc/ops-guide/source/ops-user-facing-operations.rst deleted file mode 100644 index d1e41baff5..0000000000 --- a/doc/ops-guide/source/ops-user-facing-operations.rst +++ /dev/null @@ -1,2295 +0,0 @@ -====================== -User-Facing Operations -====================== - -This guide is for OpenStack operators and does not seek to be an -exhaustive reference for users, but as an operator, you should have a -basic understanding of how to use the cloud facilities. This chapter -looks at OpenStack from a basic user perspective, which helps you -understand your users' needs and determine, when you get a trouble -ticket, whether it is a user issue or a service issue. The main concepts -covered are images, flavors, security groups, block storage, shared file -system storage, and instances. - -Images -~~~~~~ - -OpenStack images can often be thought of as "virtual machine templates." -Images can also be standard installation media such as ISO images. -Essentially, they contain bootable file systems that are used to launch -instances. - -Adding Images -------------- - -Several pre-made images exist and can easily be imported into the Image -service. A common image to add is the CirrOS image, which is very small -and used for testing purposes. To add this image, simply do: - -.. code-block:: console - - $ wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img - $ openstack image create --file cirros-0.3.5-x86_64-disk.img \ - --public --container-format bare \ - --disk-format qcow2 "cirros image" - -The :command:`openstack image create` command provides a large set of options -for working with your image. For example, the ``--min-disk`` option is -useful for images that require root disks of a certain size (for example, -large Windows images). To view these options, run: - -.. code-block:: console - - $ openstack help image create - -Run the following command to view the properties of existing images: - -.. code-block:: console - - $ openstack image show IMAGE_NAME_OR_UUID - -Adding Signed Images --------------------- - -To provide a chain of trust from an end user to the Image service, -and the Image service to Compute, an end user can import signed images -that can be initially verified in the Image service, and later verified -in the Compute service. Appropriate Image service properties need -to be set to enable this signature feature. - -.. note:: - - Prior to the steps below, an asymmetric keypair and certificate must - be generated. In this example, these are called private_key.pem and - new_cert.crt, respectively, and both reside in the current - directory. Also note that the image in this example is - cirros-0.3.5-x86_64-disk.img, but any image can be used. - -The following are steps needed to create the signature used for the -signed images: - -#. Retrieve image for upload - - .. code-block:: console - - $ wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img - -#. Use private key to create a signature of the image - - .. note:: - - The following implicit values are being used to create the signature - in this example: - - - Signature hash method = SHA-256 - - - Signature key type = RSA-PSS - - .. note:: - - The following options are currently supported: - - - Signature hash methods: SHA-224, SHA-256, SHA-384, and SHA-512 - - - Signature key types: DSA, ECC_SECT571K1, ECC_SECT409K1, - ECC_SECT571R1, ECC_SECT409R1, ECC_SECP521R1, ECC_SECP384R1, - and RSA-PSS - - Generate signature of image and convert it to a base64 representation: - - .. code-block:: console - - $ openssl dgst -sha256 -sign private_key.pem -sigopt rsa_padding_mode:pss \ - -out image-file.signature cirros-0.3.5-x86_64-disk.img - $ base64 -w 0 image-file.signature > signature_64 - $ cat signature_64 - 'c4br5f3FYQV6Nu20cRUSnx75R/VcW3diQdsUN2nhPw+UcQRDoGx92hwMgRxzFYeUyydRTWCcUS2ZLudPR9X7rM - THFInA54Zj1TwEIbJTkHwlqbWBMU4+k5IUIjXxHO6RuH3Z5f/SlSt7ajsNVXaIclWqIw5YvEkgXTIEuDPE+C4=' - - .. note:: - - - Using Image API v1 requires '-w 0' above, since multiline image - properties are not supported. - - Image API v2 supports multiline properties, so this option is not - required for v2 but it can still be used. - - -#. Create context - - .. code-block:: console - - $ python - >>> from keystoneclient.v3 import client - >>> keystone_client = client.Client(username='demo', - user_domain_name='Default', - password='password', - project_name='demo', - auth_url='http://localhost:5000/v3') - - >>> from oslo_context import context - >>> context = context.RequestContext(auth_token=keystone_client.auth_token, - tenant=keystone_client.project_id) - -#. Encode certificate in DER format - - .. code-block:: python - - >>> from cryptography import x509 as cryptography_x509 - >>> from cryptography.hazmat import backends - >>> from cryptography.hazmat.primitives import serialization - >>> with open("new_cert.crt", "rb") as cert_file: - >>> cert = cryptography_x509.load_pem_x509_certificate( - cert_file.read(), - backend=backends.default_backend() - ) - >>> certificate_der = cert.public_bytes(encoding=serialization.Encoding.DER) - -#. Upload Certificate in DER format to Castellan - - .. code-block:: python - - >>> from castellan.common.objects import x_509 - >>> from castellan import key_manager - >>> castellan_cert = x_509.X509(certificate_der) - >>> key_API = key_manager.API() - >>> cert_uuid = key_API.store(context, castellan_cert) - >>> cert_uuid - u'62a33f41-f061-44ba-9a69-4fc247d3bfce' - -#. Upload Image to Image service, with Signature Metadata - - .. note:: - - The following signature properties are used: - - - img_signature uses the signature called signature_64 - - - img_signature_certificate_uuid uses the value from cert_uuid - in section 5 above - - - img_signature_hash_method matches 'SHA-256' in section 2 above - - - img_signature_key_type matches 'RSA-PSS' in section 2 above - - .. code-block:: console - - $ . openrc demo - $ export OS_IMAGE_API_VERSION=2 - $ openstack image create --property name=cirrosSignedImage_goodSignature \ - --property is-public=true --container-format bare --disk-format qcow2 \ - --property img_signature='c4br5f3FYQV6Nu20cRUSnx75R/VcW3diQdsUN2nhPw+UcQRDoGx92hwMgRxzFYeUyydRTWCcUS2ZLudPR9X7rMTHFInA54Zj1TwEIbJTkHwlqbWBMU4+k5IUIjXxHO6RuH3Z5fSlSt7ajsNVXaIclWqIw5YvEkgXTIEuDPE+C4=' \ - --property img_signature_certificate_uuid='62a33f41-f061-44ba-9a69-4fc247d3bfce' \ - --property img_signature_hash_method='SHA-256' \ - --property img_signature_key_type='RSA-PSS' < ~/cirros-0.3.5-x86_64-disk.img - - .. note:: - - The maximum image signature character limit is 255. - -#. Verify the Keystone URL - - .. note:: - - The default Keystone configuration assumes that Keystone is - in the local host, and it uses ``http://localhost:5000/v3`` - as the endpoint URL, which is specified in ``glance-api.conf`` - and ``nova-api.conf`` files: - - .. code-block:: ini - - [barbican] - auth_endpoint = http://localhost:5000/v3 - - .. note:: - - If Keystone is located remotely instead, edit the - ``glance-api.conf`` and ``nova.conf`` files. In the ``[barbican]`` - section, configre the ``auth_endpoint`` option: - - .. code-block:: ini - - [barbican] - auth_endpoint = https://192.168.245.9:5000/v3 - -#. Signature verification will occur when Compute boots the signed image - - .. note:: - - nova-compute servers first need to be updated by the following steps: - - - Ensure that cryptsetup is installed, and ensure that - ``pythin-barbicanclient`` Python package is installed - - Set up the Key Manager service by editing /etc/nova/nova.conf and - adding the entries in the codeblock below - - The flag verify_glance_signatures enables Compute to automatically - validate signed instances prior to its launch. This validation - feature is enabled when the value is set to TRUE - - .. code-block:: console - - [key_manager] - api_class = castellan.key_manager.barbican_key_manager.BarbicanKeyManager - [glance] - verify_glance_signatures = TRUE - - .. note:: - - The api_class [keymgr] is deprecated as of Newton, so it - should not be included in this release or beyond. - - .. note: - - restart nova-compute - - -Sharing Images Between Projects -------------------------------- - -In a multi-tenant cloud environment, users sometimes want to share their -personal images or snapshots with other projects. This can be done on -the command line with the ``glance`` tool by the owner of the image. - -To share an image or snapshot with another project, do the following: - -#. Obtain the UUID of the image: - - .. code-block:: console - - $ openstack image list - -#. Obtain the UUID of the project with which you want to share your image, - let's call it target project. - Unfortunately, non-admin users are unable to use the :command:`openstack` - command to do this. The easiest solution is to obtain the UUID either - from an administrator of the cloud or from a user located in the - target project. - -#. Once you have both pieces of information, run - the :command:`openstack image add project` command: - - .. code-block:: console - - $ openstack image add project IMAGE_NAME_OR_UUID PROJECT_NAME_OR_UUID - - For example: - - .. code-block:: console - - $ openstack image add project 733d1c44-a2ea-414b-aca7-69decf20d810 \ - 771ed149ef7e4b2b88665cc1c98f77ca - -#. You now need to act in the target project scope. - - .. note:: - - You will not see the shared image yet. - Therefore the sharing needs to be accepted. - - To accept the sharing, you need to update the member status: - - .. code-block:: console - - $ glance member-update IMAGE_UUID PROJECT_UUID accepted - - For example: - - .. code-block:: console - - $ glance member-update 733d1c44-a2ea-414b-aca7-69decf20d810 \ - 771ed149ef7e4b2b88665cc1c98f77ca accepted - - Project ``771ed149ef7e4b2b88665cc1c98f77ca`` will now have access to image - ``733d1c44-a2ea-414b-aca7-69decf20d810``. - - .. tip:: - - You can explicitly ask for pending member status to view shared images not yet accepted: - - .. code-block:: console - - $ glance image-list --member-status pending - - -Deleting Images ---------------- - -To delete an image, just execute: - -.. code-block:: console - - $ openstack image delete IMAGE_NAME_OR_UUID - -.. caution:: - - Generally, deleting an image does not affect instances or snapshots that were - based on the image. However, some drivers may require the original image to be - present to perform a migration. For example, XenAPI live-migrate will work - fine if the image is deleted, but libvirt will fail. - -Other CLI Options ------------------ - -A full set of options can be found using: - -.. code-block:: console - - $ glance help - -or the `Command-Line Interface -Reference `__. - -The Image service and the Database ----------------------------------- - -The only thing the Image service does not store in a database is -the image itself. The Image service database has two main -tables: - -* ``images`` -* ``image_properties`` - -Working directly with the database and SQL queries can provide you with -custom lists and reports of images. Technically, you can update -properties about images through the database, although this is not -generally recommended. - -Example Image service Database Queries --------------------------------------- - -One interesting example is modifying the table of images and the owner -of that image. This can be easily done if you simply display the unique -ID of the owner. This example goes one -step further and displays the readable name of the owner: - -.. code-block:: mysql - - mysql> select glance.images.id, - glance.images.name, keystone.tenant.name, is_public from - glance.images inner join keystone.tenant on - glance.images.owner=keystone.tenant.id; - -Another example is displaying all properties for a certain image: - -.. code-block:: mysql - - mysql> select name, value from - image_properties where id = - -Flavors -~~~~~~~ - -Virtual hardware templates are called "flavors" in OpenStack, defining -sizes for RAM, disk, number of cores, and so on. The default install -provides five flavors. - -These are configurable by admin users (the rights may also be delegated -to other users by redefining the access controls for -``compute_extension:flavormanage`` in ``/etc/nova/policy.json`` on the -``nova-api`` server). To get the list of available flavors on your -system, run: - -.. code-block:: console - - $ openstack flavor list - +----+-----------+-------+------+-----------+-------+-----------+ - | ID | Name | RAM | Disk | Ephemeral | VCPUs | Is Public | - +----+-----------+-------+------+-----------+-------+-----------+ - | 1 | m1.tiny | 512 | 1 | 0 | 1 | True | - | 2 | m1.small | 2048 | 20 | 0 | 1 | True | - | 3 | m1.medium | 4096 | 40 | 0 | 2 | True | - | 4 | m1.large | 8192 | 80 | 0 | 4 | True | - | 5 | m1.xlarge | 16384 | 160 | 0 | 8 | True | - +----+-----------+-------+------+-----------+-------+-----------+ - -The :command:`openstack flavor create` command allows authorized users to -create new flavors. Additional flavor manipulation commands can be shown with -the following command: - -.. code-block:: console - - $ openstack help | grep flavor - -Flavors define a number of parameters, resulting in the user having a -choice of what type of virtual machine to run—just like they would have -if they were purchasing a physical server. -:ref:`table_flavor_params` lists the elements that can be set. -Note in particular ``extra_specs``, which can be used to -define free-form characteristics, giving a lot of flexibility beyond just the -size of RAM, CPU, and Disk. - -.. _table_flavor_params: - -.. list-table:: Table. Flavor parameters - :widths: 25 75 - :header-rows: 1 - - * - **Column** - - **Description** - * - ID - - Unique ID (integer or UUID) for the flavor. - * - Name - - A descriptive name, such as xx.size\_name, is conventional but not required, though some third-party tools may rely on it. - * - Memory\_MB - - Virtual machine memory in megabytes. - * - Disk - - Virtual root disk size in gigabytes. This is an ephemeral disk the base image is copied into. You don't use it when you boot from a persistent volume. The "0" size is a special case that uses the native base image size as the size of the ephemeral root volume. - * - Ephemeral - - Specifies the size of a secondary ephemeral data disk. This is an empty, unformatted disk and exists only for the life of the instance. - * - Swap - - Optional swap space allocation for the instance. - * - VCPUs - - Number of virtual CPUs presented to the instance. - * - RXTX_Factor - - Optional property that allows created servers to have a different - bandwidth cap from that defined in the network - they are attached to. This factor is multiplied by the rxtx\_base - property of the network. - Default value is 1.0 (that is, the same as the attached network). - * - Is_Public - - Boolean value that indicates whether the flavor is available to - all users or private. Private flavors do not get the current - tenant assigned to them. Defaults to ``True``. - * - extra_specs - - Additional optional restrictions on which compute nodes the - flavor can run on. This is implemented as key-value pairs that must - match against the corresponding key-value pairs on compute nodes. - Can be used to implement things like special resources (such as - flavors that can run only on compute nodes with GPU hardware). - - -Private Flavors ---------------- - -A user might need a custom flavor that is uniquely tuned for a project -she is working on. For example, the user might require 128 GB of memory. -If you create a new flavor as described above, the user would have -access to the custom flavor, but so would all other tenants in your -cloud. Sometimes this sharing isn't desirable. In this scenario, -allowing all users to have access to a flavor with 128 GB of memory -might cause your cloud to reach full capacity very quickly. To prevent -this, you can restrict access to the custom flavor using the -:command:`nova flavor-access-add` command: - -.. code-block:: console - - $ nova flavor-access-add FLAVOR_ID PROJECT_ID - -To view a flavor's access list, do the following: - -.. code-block:: console - - $ nova flavor-access-list [--flavor FLAVOR_ID] - -.. tip:: - - Once access to a flavor has been restricted, no other projects - besides the ones granted explicit access will be able to see the - flavor. This includes the admin project. Make sure to add the admin - project in addition to the original project. - - It's also helpful to allocate a specific numeric range for custom - and private flavors. On UNIX-based systems, nonsystem accounts - usually have a UID starting at 500. A similar approach can be taken - with custom flavors. This helps you easily identify which flavors - are custom, private, and public for the entire cloud. - -How Do I Modify an Existing Flavor? ------------------------------------ - -The OpenStack dashboard simulates the ability to modify a flavor by -deleting an existing flavor and creating a new one with the same name. - -Security Groups -~~~~~~~~~~~~~~~ - -A common new-user issue with OpenStack is failing to set an appropriate -security group when launching an instance. As a result, the user is -unable to contact the instance on the network. - -Security groups are sets of IP filter rules that are applied to an -instance's networking. They are project specific, and project members -can edit the default rules for their group and add new rules sets. All -projects have a "default" security group, which is applied to instances -that have no other security group defined. Unless changed, this security -group denies all incoming traffic. - -.. tip:: - - As noted in the previous chapter, the number of rules per security - group is controlled by the ``quota_security_group_rules``, and the - number of allowed security groups per project is controlled by the - ``quota_security_groups`` quota. - -End-User Configuration of Security Groups ------------------------------------------ - -Security groups for the current project can be found on the OpenStack -dashboard under :guilabel:`Access & Security`. To see details of an -existing group, select the :guilabel:`Edit Security Group` action for that -security group. Obviously, modifying existing groups can be done from this -edit interface. There is a :guilabel:`Create Security Group` button on the -main :guilabel:`Access & Security` page for creating new groups. -We discuss the terms used in these fields when we explain the -command-line equivalents. - -**Setting with openstack command** - -If your environment is using Neutron, you can configure security groups -settings using the :command:`openstack` command. Get a list of security groups -for the project you are acting in, by using following command: - -.. code-block:: console - - $ openstack security group list - +------------------------+---------+------------------------+-------------------------+ - | ID | Name | Description | Project | - +------------------------+---------+------------------------+-------------------------+ - | 3bef30ed-442d-4cf1 | default | Default security group | 35e3820f7490493ca9e3a5e | - | -b84d-2ba50a395599 | | | 685393298 | - | aaf1d0b7-98a0-41a3-ae1 | default | Default security group | 32e9707393c34364923edf8 | - | 6-a58b94503289 | | | f5029cbfe | - +------------------------+---------+------------------------+-------------------------+ - - -To view the details of a security group: - -.. code-block:: console - - $ openstack security group show 3bef30ed-442d-4cf1-b84d-2ba50a395599 - +-----------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Field | Value | - +-----------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | created_at | 2016-11-08T21:55:19Z | - | description | Default security group | - | id | 3bef30ed-442d-4cf1-b84d-2ba50a395599 | - | name | default | - | project_id | 35e3820f7490493ca9e3a5e685393298 | - | project_id | 35e3820f7490493ca9e3a5e685393298 | - | revision_number | 1 | - | rules | created_at='2016-11-08T21:55:19Z', direction='egress', ethertype='IPv6', id='1dca4cac-d4f2-46f5-b757-d53c01a87bdf', project_id='35e3820f7490493ca9e3a5e685393298', | - | | revision_number='1', updated_at='2016-11-08T21:55:19Z' | - | | created_at='2016-11-08T21:55:19Z', direction='egress', ethertype='IPv4', id='2d83d6f2-424e-4b7c-b9c4-1ede89c00aab', project_id='35e3820f7490493ca9e3a5e685393298', | - | | revision_number='1', updated_at='2016-11-08T21:55:19Z' | - | | created_at='2016-11-08T21:55:19Z', direction='ingress', ethertype='IPv4', id='62b7d1eb-b98d-4707-a29f-6df379afdbaa', project_id='35e3820f7490493ca9e3a5e685393298', remote_group_id | - | | ='3bef30ed-442d-4cf1-b84d-2ba50a395599', revision_number='1', updated_at='2016-11-08T21:55:19Z' | - | | created_at='2016-11-08T21:55:19Z', direction='ingress', ethertype='IPv6', id='f0d4b8d6-32d4-4f93-813d-3ede9d698fbb', project_id='35e3820f7490493ca9e3a5e685393298', remote_group_id | - | | ='3bef30ed-442d-4cf1-b84d-2ba50a395599', revision_number='1', updated_at='2016-11-08T21:55:19Z' | - | updated_at | 2016-11-08T21:55:19Z | - +-----------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - -These rules are all "allow" type rules, as the default is deny. This -example shows the full port range for all protocols allowed from all -IPs. This section describes the most common security group rule -parameters: - -direction - The direction in which the security group rule is applied. Valid - values are ``ingress`` or ``egress``. - -remote_ip_prefix - This attribute value matches the specified IP prefix as the source - IP address of the IP packet. - -protocol - The protocol that is matched by the security group rule. Valid - values are ``null``, ``tcp``, ``udp``, ``icmp``, and ``icmpv6``. - -port_range_min - The minimum port number in the range that is matched by the security - group rule. If the protocol is TCP or UDP, this value must be less - than or equal to the ``port_range_max`` attribute value. If the - protocol is ICMP or ICMPv6, this value must be an ICMP or ICMPv6 - type, respectively. - -port_range_max - The maximum port number in the range that is matched by the security - group rule. The ``port_range_min`` attribute constrains the - ``port_range_max`` attribute. If the protocol is ICMP or ICMPv6, - this value must be an ICMP or ICMPv6 type, respectively. - -ethertype - Must be ``IPv4`` or ``IPv6``, and addresses represented in CIDR must - match the ingress or egress rules. - -When adding a new security group, you should pick a descriptive but -brief name. This name shows up in brief descriptions of the instances -that use it where the longer description field often does not. Seeing -that an instance is using security group ``http`` is much easier to -understand than ``bobs_group`` or ``secgrp1``. - -This example creates a security group that allows web traffic anywhere -on the Internet. We'll call this group ``global_http``, which is clear -and reasonably concise, encapsulating what is allowed and from where. -From the command line, do: - -.. code-block:: console - - $ openstack security group create global_http --description "allow web traffic from the Internet" - Created a new security_group: - +-----------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Field | Value | - +-----------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | created_at | 2016-11-10T16:09:18Z | - | description | allow web traffic from the Internet | - | headers | | - | id | 70675447-1b92-4102-a7ea-6a3ca99d2290 | - | name | global_http | - | project_id | 32e9707393c34364923edf8f5029cbfe | - | project_id | 32e9707393c34364923edf8f5029cbfe | - | revision_number | 1 | - | rules | created_at='2016-11-10T16:09:18Z', direction='egress', ethertype='IPv4', id='e440b13a-e74f-4700-a36f-9ecc0de76612', project_id='32e9707393c34364923edf8f5029cbfe', | - | | revision_number='1', updated_at='2016-11-10T16:09:18Z' | - | | created_at='2016-11-10T16:09:18Z', direction='egress', ethertype='IPv6', id='0debf8cb-9f1d-45e5-98db-ee169c0715fe', project_id='32e9707393c34364923edf8f5029cbfe', | - | | revision_number='1', updated_at='2016-11-10T16:09:18Z' | - | updated_at | 2016-11-10T16:09:18Z | - +-----------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - -Immediately after create, the security group has only an allow egress -rule. To make it do what we want, we need to add some rules: - -.. code-block:: console - - $ openstack security group rule create --help - usage: openstack security group rule create [-h] - [-f {json,shell,table,value,yaml}] - [-c COLUMN] - [--max-width ] - [--noindent] [--prefix PREFIX] - [--remote-ip | --remote-group ] - [--dst-port ] - [--icmp-type ] - [--icmp-code ] - [--protocol ] - [--ingress | --egress] - [--ethertype ] - [--project ] - [--project-domain ] - - - $ openstack security group rule create --ingress --ethertype IPv4 \ - --protocol tcp --remote-ip 0.0.0.0/0 global_http - - Created a new security group rule: - +-------------------+--------------------------------------+ - | Field | Value | - +-------------------+--------------------------------------+ - | created_at | 2016-11-10T16:12:27Z | - | description | | - | direction | ingress | - | ethertype | IPv4 | - | headers | | - | id | 694d30b1-1c4d-4bb8-acbe-7f1b3de2b20f | - | port_range_max | None | - | port_range_min | None | - | project_id | 32e9707393c34364923edf8f5029cbfe | - | project_id | 32e9707393c34364923edf8f5029cbfe | - | protocol | tcp | - | remote_group_id | None | - | remote_ip_prefix | 0.0.0.0/0 | - | revision_number | 1 | - | security_group_id | 70675447-1b92-4102-a7ea-6a3ca99d2290 | - | updated_at | 2016-11-10T16:12:27Z | - +-------------------+--------------------------------------+ - -Despite only outputting the newly added rule, this operation is -additive: - -.. code-block:: console - - $ openstack security group show global_http - +-----------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Field | Value | - +-----------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | created_at | 2016-11-10T16:09:18Z | - | description | allow web traffic from the Internet | - | id | 70675447-1b92-4102-a7ea-6a3ca99d2290 | - | name | global_http | - | project_id | 32e9707393c34364923edf8f5029cbfe | - | project_id | 32e9707393c34364923edf8f5029cbfe | - | revision_number | 2 | - | rules | created_at='2016-11-10T16:09:18Z', direction='egress', ethertype='IPv6', id='0debf8cb-9f1d-45e5-98db-ee169c0715fe', project_id='32e9707393c34364923edf8f5029cbfe', | - | | revision_number='1', updated_at='2016-11-10T16:09:18Z' | - | | created_at='2016-11-10T16:12:27Z', direction='ingress', ethertype='IPv4', id='694d30b1-1c4d-4bb8-acbe-7f1b3de2b20f', project_id='32e9707393c34364923edf8f5029cbfe', protocol='tcp', | - | | remote_ip_prefix='0.0.0.0/0', revision_number='1', updated_at='2016-11-10T16:12:27Z' | - | | created_at='2016-11-10T16:09:18Z', direction='egress', ethertype='IPv4', id='e440b13a-e74f-4700-a36f-9ecc0de76612', project_id='32e9707393c34364923edf8f5029cbfe', | - | | revision_number='1', updated_at='2016-11-10T16:09:18Z' | - | updated_at | 2016-11-10T16:12:27Z | - +-----------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - -The inverse operation is called -:command:`openstack security group rule delete`, -specifying security-group-rule ID. Whole security groups can be removed -with :command:`openstack security group delete`. - -To create security group rules for a cluster of instances, use -RemoteGroups. - -RemoteGroups are a dynamic way of defining the CIDR of allowed sources. -The user specifies a RemoteGroup (security group name) and then all the -users' other instances using the specified RemoteGroup are selected -dynamically. This dynamic selection alleviates the need for individual -rules to allow each new member of the cluster. - -The code is similar to the above example of -:command:`openstack security group rule create`. To use RemoteGroup, specify -``--remote-group`` instead of ``--remote-ip``. -For example: - -.. code-block:: console - - $ openstack security group rule create --ingress \ - --ethertype IPv4 --protocol tcp \ - --remote-group global_http cluster - -The "cluster" rule allows SSH access from any other instance that uses -the ``global-http`` group. - -Block Storage -~~~~~~~~~~~~~ - -OpenStack volumes are persistent block-storage devices that may be -attached and detached from instances, but they can be attached to only -one instance at a time. Similar to an external hard drive, they do not -provide shared storage in the way a network file system or object store -does. It is left to the operating system in the instance to put a file -system on the block device and mount it, or not. - -As with other removable disk technology, it is important that the -operating system is not trying to make use of the disk before removing -it. On Linux instances, this typically involves unmounting any file -systems mounted from the volume. The OpenStack volume service cannot -tell whether it is safe to remove volumes from an instance, so it does -what it is told. If a user tells the volume service to detach a volume -from an instance while it is being written to, you can expect some level -of file system corruption as well as faults from whatever process within -the instance was using the device. - -There is nothing OpenStack-specific in being aware of the steps needed -to access block devices from within the instance operating system, -potentially formatting them for first use and being cautious when -removing them. What is specific is how to create new volumes and attach -and detach them from instances. These operations can all be done from -the :guilabel:`Volumes` page of the dashboard or by using the ``openstack`` -command-line client. - -To add new volumes, you need only a volume size in gigabytes. -Either put these into the :guilabel:`Create Volume` web form or use the command -line: - -.. code-block:: console - - $ openstack volume create volume1 --size 10 - -This creates a 10 GB volume. To list existing -volumes and the instances they are connected to, if any: - -.. code-block:: console - - $ openstack volume list - +--------------------------------------+--------------+--------+------+-------------+ - | ID | Display Name | Status | Size | Attached to | - +--------------------------------------+--------------+--------+------+-------------+ - | 6cf4114a-56b2-476b-acf7-7359d8334aa2 | volume1 | error | 10 | | - +--------------------------------------+--------------+--------+------+-------------+ - -OpenStack Block Storage also allows creating snapshots of volumes. -Remember that this is a block-level snapshot that is crash consistent, -so it is best if the volume is not connected to an instance when the -snapshot is taken and second best if the volume is not in use on the -instance it is attached to. If the volume is under heavy use, the -snapshot may have an inconsistent file system. In fact, by default, the -volume service does not take a snapshot of a volume that is attached to -an image, though it can be forced to. To take a volume snapshot, either -select :guilabel:`Create Snapshot` from the actions column -next to the volume name on the dashboard :guilabel:`Volumes` page, -or run this from the command line: - -.. code-block:: console - - $ openstack help snapshot create - usage: openstack snapshot create [-h] [-f {json,shell,table,value,yaml}] - [-c COLUMN] [--max-width ] - [--noindent] [--prefix PREFIX] - [--name ] [--description ] - [--force] [--property ] - - - Create new snapshot - - positional arguments: - Volume to snapshot (name or ID) - - optional arguments: - -h, --help show this help message and exit - --name Name of the snapshot - --description - Description of the snapshot - --force Create a snapshot attached to an instance. Default is - False - --property - Set a property to this snapshot (repeat option to set - multiple properties) - - output formatters: - output formatter options - - -f {json,shell,table,value,yaml}, --format {json,shell,table,value,yaml} - the output format, defaults to table - -c COLUMN, --column COLUMN - specify the column(s) to include, can be repeated - - table formatter: - --max-width - Maximum display width, <1 to disable. You can also use - the CLIFF_MAX_TERM_WIDTH environment variable, but the - parameter takes precedence. - - json formatter: - --noindent whether to disable indenting the JSON - - shell formatter: - a format a UNIX shell can parse (variable="value") - - --prefix PREFIX add a prefix to all variable names - -.. note:: - - For more information about updating Block Storage volumes (for - example, resizing or transferring), see the `OpenStack End User - Guide `__. - -Block Storage Creation Failures -------------------------------- - -If a user tries to create a volume and the volume immediately goes into -an error state, the best way to troubleshoot is to grep the cinder log -files for the volume's UUID. First try the log files on the cloud -controller, and then try the storage node where the volume was attempted -to be created: - -.. code-block:: console - - # grep 903b85d0-bacc-4855-a261-10843fc2d65b /var/log/cinder/*.log - -Shared File Systems Service -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Similar to Block Storage, the Shared File System is a persistent -storage, called share, that can be used in multi-tenant environments. -Users create and mount a share as a remote file system on any machine -that allows mounting shares, and has network access to share exporter. -This share can then be used for storing, sharing, and exchanging files. -The default configuration of the Shared File Systems service depends on -the back-end driver the admin chooses when starting the Shared File -Systems service. For more information about existing back-end drivers, -see `Share Backends -`__ -of Shared File Systems service Developer Guide. For example, in case of -OpenStack Block Storage based back-end is used, the Shared File Systems -service cares about everything, including VMs, networking, keypairs, and -security groups. Other configurations require more detailed knowledge of -shares functionality to set up and tune specific parameters and modes of -shares functioning. - -Shares are a remote mountable file system, so users can mount a share to -multiple hosts, and have it accessed from multiple hosts by multiple -users at a time. With the Shared File Systems service, you can perform a -large number of operations with shares: - -* Create, update, delete, and force-delete shares -* Change access rules for shares, reset share state -* Specify quotas for existing users or tenants -* Create share networks -* Define new share types -* Perform operations with share snapshots: - create, change name, create a share from a snapshot, delete -* Operate with consistency groups -* Use security services - -For more information on share management see `Share management -`__ -of chapter “Shared File Systems” in OpenStack Administrator Guide. -As to Security services, you should remember that different drivers -support different authentication methods, while generic driver does not -support Security Services at all (see section `Security services -`__ -of chapter “Shared File Systems” in OpenStack Administrator Guide). - -You can create a share in a network, list shares, and show information -for, update, and delete a specified share. You can also create snapshots -of shares (see `Share snapshots -`__ -of chapter “Shared File Systems” in OpenStack Administrator Guide). - -There are default and specific share types that allow you to filter or -choose back-ends before you create a share. Functions and behaviour of -share type is similar to Block Storage volume type (see `Share types -`__ -of chapter “Shared File Systems” in OpenStack Administrator Guide). - -To help users keep and restore their data, Shared File Systems service -provides a mechanism to create and operate snapshots (see `Share snapshots -`__ -of chapter “Shared File Systems” in OpenStack Administrator Guide). - -A security service stores configuration information for clients for -authentication and authorization. Inside Manila a share network can be -associated with up to three security types (for detailed information see -`Security services -`__ -of chapter “Shared File Systems” in OpenStack Administrator Guide): - -* LDAP -* Kerberos -* Microsoft Active Directory - -Shared File Systems service differs from the principles implemented in -Block Storage. Shared File Systems service can work in two modes: - -* Without interaction with share networks, in so called "no share - servers" mode. -* Interacting with share networks. - -Networking service is used by the Shared File Systems service to -directly operate with share servers. For switching interaction with -Networking service on, create a share specifying a share network. To use -"share servers" mode even being out of OpenStack, a network plugin -called StandaloneNetworkPlugin is used. In this case, provide network -information in the configuration: IP range, network type, and -segmentation ID. Also you can add security services to a share network -(see section -`“Networking” `__ -of chapter “Shared File Systems” in OpenStack Administrator Guide). - - -The main idea of consistency groups is to enable you to create snapshots -at the exact same point in time from multiple file system shares. Those -snapshots can be then used for restoring all shares that were associated -with the consistency group (see section `“Consistency -groups” `__ -of chapter “Shared File Systems” in OpenStack Administrator Guide). - -Shared File System storage allows administrators to set limits and -quotas for specific tenants and users. Limits are the resource -limitations that are allowed for each tenant or user. Limits consist of: - -* Rate limits -* Absolute limits - -Rate limits control the frequency at which users can issue specific API -requests. Rate limits are configured by administrators in a config file. -Also, administrator can specify quotas also known as max values of -absolute limits per tenant. Whereas users can see only the amount of -their consumed resources. Administrator can specify rate limits or -quotas for the following resources: - -* Max amount of space available for all shares -* Max number of shares -* Max number of shared networks -* Max number of share snapshots -* Max total amount of all snapshots -* Type and number of API calls that can be made in a specific time interval - -User can see his rate limits and absolute limits by running commands -:command:`manila rate-limits` and :command:`manila absolute-limits` -respectively. For more details on limits and quotas see `Quotas and limits -`__ -of "Share management" section of OpenStack Administrator Guide document. - -This section lists several of the most important Use Cases that -demonstrate the main functions and abilities of Shared File Systems -service: - -* Create share -* Operating with a share -* Manage access to shares -* Create snapshots -* Create a share network -* Manage a share network - -.. note:: - - Shared File Systems service cannot warn you beforehand if it is safe - to write a specific large amount of data onto a certain share or to - remove a consistency group if it has a number of shares assigned to - it. In such a potentially erroneous situations, if a mistake - happens, you can expect some error message or even failing of shares - or consistency groups into an incorrect status. You can also expect - some level of system corruption if a user tries to unmount an - unmanaged share while a process is using it for data transfer. - - -.. _create_share: - -Create Share ------------- - -In this section, we examine the process of creating a simple share. It -consists of several steps: - -- Check if there is an appropriate share type defined in the Shared - File Systems service - -- If such a share type does not exist, an Admin should create it using - :command:`manila type-create` command before other users are able to use it - -- Using a share network is optional. However if you need one, check if - there is an appropriate network defined in Shared File Systems - service by using :command:`manila share-network-list` command. For the - information on creating a share network, see - :ref:`create_a_share_network` below in this chapter. - -- Create a public share using :command:`manila create`. - -- Make sure that the share has been created successfully and is ready - to use (check the share status and see the share export location) - -Below is the same whole procedure described step by step and in more -detail. - -.. note:: - - Before you start, make sure that Shared File Systems service is - installed on your OpenStack cluster and is ready to use. - -By default, there are no share types defined in Shared File Systems -service, so you can check if a required one has been already created: - -.. code-block:: console - - $ manila type-list - +------+--------+-----------+-----------+----------------------------------+----------------------+ - | ID | Name | Visibility| is_default| required_extra_specs | optional_extra_specs | - +------+--------+-----------+-----------+----------------------------------+----------------------+ - | c0...| default| public | YES | driver_handles_share_servers:True| snapshot_support:True| - +------+--------+-----------+-----------+----------------------------------+----------------------+ - -If the share types list is empty or does not contain a type you need, -create the required share type using this command: - -.. code-block:: console - - $ manila type-create netapp1 False --is_public True - -This command will create a public share with the following parameters: -``name = netapp1``, ``spec_driver_handles_share_servers = False`` - -You can now create a public share with my_share_net network, default -share type, NFS shared file systems protocol, and 1 GB size: - -.. code-block:: console - - $ manila create nfs 1 --name "Share1" --description "My first share" \ - --share-type default --share-network my_share_net --metadata aim=testing --public - +-----------------------------+--------------------------------------+ - | Property | Value | - +-----------------------------+--------------------------------------+ - | status | creating | - | share_type_name | default | - | description | My first share | - | availability_zone | None | - | share_network_id | 9c187d23-7e1d-4d91-92d0-77ea4b9b9496 | - | share_server_id | None | - | host | | - | access_rules_status | active | - | snapshot_id | None | - | is_public | True | - | task_state | None | - | snapshot_support | True | - | id | edd82179-587e-4a87-9601-f34b2ca47e5b | - | size | 1 | - | name | Share1 | - | share_type | e031d5e9-f113-491a-843f-607128a5c649 | - | has_replicas | False | - | replication_type | None | - | created_at | 2016-03-20T00:00:00.000000 | - | share_proto | NFS | - | consistency_group_id | None | - | source_cgsnapshot_member_id | None | - | project_id | e81908b1bfe8468abb4791eae0ef6dd9 | - | metadata | {u'aim': u'testing'} | - +-----------------------------+--------------------------------------+ - -To confirm that creation has been successful, see the share in the share -list: - -.. code-block:: console - - $ manila list - +----+-------+-----+------------+-----------+-------------------------------+----------------------+ - | ID | Name | Size| Share Proto| Share Type| Export location | Host | - +----+-------+-----+------------+-----------+-------------------------------+----------------------+ - | a..| Share1| 1 | NFS | c0086... | 10.254.0.3:/shares/share-2d5..| manila@generic1#GEN..| - +----+-------+-----+------------+-----------+-------------------------------+----------------------+ - -Check the share status and see the share export location. After -creation, the share status should become ``available``: - -.. code-block:: console - - $ manila show Share1 - +-----------------------------+----------------------------------------------------------------------+ - | Property | Value | - +-----------------------------+----------------------------------------------------------------------+ - | status | available | - | share_type_name | default | - | description | My first share | - | availability_zone | nova | - | share_network_id | 9c187d23-7e1d-4d91-92d0-77ea4b9b9496 | - | export_locations | | - | | path = 10.254.0.3:/shares/share-18cb05be-eb69-4cb2-810f-91c75ef30f90 | - | | preferred = False | - | | is_admin_only = False | - | | id = d6a82c0d-36b0-438b-bf34-63f3932ddf4e | - | | share_instance_id = 18cb05be-eb69-4cb2-810f-91c75ef30f90 | - | | path = 10.0.0.3:/shares/share-18cb05be-eb69-4cb2-810f-91c75ef30f90 | - | | preferred = False | - | | is_admin_only = True | - | | id = 51672666-06b8-4741-99ea-64f2286f52e2 | - | | share_instance_id = 18cb05be-eb69-4cb2-810f-91c75ef30f90 | - | share_server_id | ea8b3a93-ab41-475e-9df1-0f7d49b8fa54 | - | host | manila@generic1#GENERIC1 | - | access_rules_status | active | - | snapshot_id | None | - | is_public | True | - | task_state | None | - | snapshot_support | True | - | id | e7364bcc-3821-49bf-82d6-0c9f0276d4ce | - | size | 1 | - | name | Share1 | - | share_type | e031d5e9-f113-491a-843f-607128a5c649 | - | has_replicas | False | - | replication_type | None | - | created_at | 2016-03-20T00:00:00.000000 | - | share_proto | NFS | - | consistency_group_id | None | - | source_cgsnapshot_member_id | None | - | project_id | e81908b1bfe8468abb4791eae0ef6dd9 | - | metadata | {u'aim': u'testing'} | - +-----------------------------+----------------------------------------------------------------------+ - -The value ``is_public`` defines the level of visibility for the share: -whether other tenants can or cannot see the share. By default, the share -is private. Now you can mount the created share like a remote file -system and use it for your purposes. - -.. note:: - - See `Share Management - `__ - of “Shared File Systems” section of OpenStack Administrator Guide - document for the details on share management operations. - -Manage Access To Shares ------------------------ - -Currently, you have a share and would like to control access to this -share for other users. For this, you have to perform a number of steps -and operations. Before getting to manage access to the share, pay -attention to the following important parameters. To grant or deny access -to a share, specify one of these supported share access levels: - -- ``rw``: read and write (RW) access. This is the default value. - -- ``ro:`` read-only (RO) access. - -Additionally, you should also specify one of these supported -authentication methods: - -- ``ip``: authenticates an instance through its IP address. A valid - format is XX.XX.XX.XX orXX.XX.XX.XX/XX. For example 0.0.0.0/0. - -- ``cert``: authenticates an instance through a TLS certificate. - Specify the TLS identity as the IDENTKEY. A valid value is any string - up to 64 characters long in the common name (CN) of the certificate. - The meaning of a string depends on its interpretation. - -- ``user``: authenticates by a specified user or group name. A valid - value is an alphanumeric string that can contain some special - characters and is from 4 to 32 characters long. - -.. note:: - - Do not mount a share without an access rule! This can lead to an - exception. - -Allow access to the share with IP access type and 10.254.0.4 IP address: - -.. code-block:: console - - $ manila access-allow Share1 ip 10.254.0.4 --access-level rw - +--------------+--------------------------------------+ - | Property | Value | - +--------------+--------------------------------------+ - | share_id | 7bcd888b-681b-4836-ac9c-c3add4e62537 | - | access_type | ip | - | access_to | 10.254.0.4 | - | access_level | rw | - | state | new | - | id | de715226-da00-4cfc-b1ab-c11f3393745e | - +--------------+--------------------------------------+ - -Mount the Share: - -.. code-block:: console - - $ sudo mount -v -t nfs 10.254.0.5:/shares/share-5789ddcf-35c9-4b64-a28a-7f6a4a574b6a /mnt/ - -Then check if the share mounted successfully and according to the -specified access rules: - -.. code-block:: console - - $ manila access-list Share1 - +--------------------------------------+-------------+------------+--------------+--------+ - | id | access type | access to | access level | state | - +--------------------------------------+-------------+------------+--------------+--------+ - | 4f391c6b-fb4f-47f5-8b4b-88c5ec9d568a | user | demo | rw | error | - | de715226-da00-4cfc-b1ab-c11f3393745e | ip | 10.254.0.4 | rw | active | - +--------------------------------------+-------------+------------+--------------+--------+ - -.. note:: - - Different share features are supported by different share drivers. - In these examples there was used generic (Cinder as a back-end) - driver that does not support ``user`` and ``cert`` authentication - methods. - -.. tip:: - - For the details of features supported by different drivers see - `Manila share features support mapping - `__ - of Manila Developer Guide document. - -Manage Shares -------------- - -There are several other useful operations you would perform when working -with shares. - -Update Share ------------- - -To change the name of a share, or update its description, or level of -visibility for other tenants, use this command: - -.. code-block:: console - - $ manila update Share1 --description "My first share. Updated" --is-public False - -Check the attributes of the updated Share1: - -.. code-block:: console - - $ manila show Share1 - +-----------------------------+----------------------------------------------------------------------+ - | Property | Value | - +-----------------------------+----------------------------------------------------------------------+ - | status | available | - | share_type_name | default | - | description | My first share. Updated | - | availability_zone | nova | - | share_network_id | 9c187d23-7e1d-4d91-92d0-77ea4b9b9496 | - | export_locations | | - | | path = 10.254.0.3:/shares/share-18cb05be-eb69-4cb2-810f-91c75ef30f90 | - | | preferred = False | - | | is_admin_only = False | - | | id = d6a82c0d-36b0-438b-bf34-63f3932ddf4e | - | | share_instance_id = 18cb05be-eb69-4cb2-810f-91c75ef30f90 | - | | path = 10.0.0.3:/shares/share-18cb05be-eb69-4cb2-810f-91c75ef30f90 | - | | preferred = False | - | | is_admin_only = True | - | | id = 51672666-06b8-4741-99ea-64f2286f52e2 | - | | share_instance_id = 18cb05be-eb69-4cb2-810f-91c75ef30f90 | - | share_server_id | ea8b3a93-ab41-475e-9df1-0f7d49b8fa54 | - | host | manila@generic1#GENERIC1 | - | access_rules_status | active | - | snapshot_id | None | - | is_public | False | - | task_state | None | - | snapshot_support | True | - | id | e7364bcc-3821-49bf-82d6-0c9f0276d4ce | - | size | 1 | - | name | Share1 | - | share_type | e031d5e9-f113-491a-843f-607128a5c649 | - | has_replicas | False | - | replication_type | None | - | created_at | 2016-03-20T00:00:00.000000 | - | share_proto | NFS | - | consistency_group_id | None | - | source_cgsnapshot_member_id | None | - | project_id | e81908b1bfe8468abb4791eae0ef6dd9 | - | metadata | {u'aim': u'testing'} | - +-----------------------------+----------------------------------------------------------------------+ - -Reset Share State ------------------ - -Sometimes a share may appear and then hang in an erroneous or a -transitional state. Unprivileged users do not have the appropriate -access rights to correct this situation. However, having cloud -administrator's permissions, you can reset the share's state by using - -.. code-block:: console - - $ manila reset-state [–state state] share_name - -command to reset share state, where state indicates which state to -assign the share to. Options include: -``available, error, creating, deleting, error_deleting`` states. - -After running - -.. code-block:: console - - $ manila reset-state Share2 --state deleting - -check the share's status: - -.. code-block:: console - - $ manila show Share2 - +-----------------------------+-------------------------------------------+ - | Property | Value | - +-----------------------------+-------------------------------------------+ - | status | deleting | - | share_type_name | default | - | description | share from a snapshot. | - | availability_zone | nova | - | share_network_id | 5c3cbabb-f4da-465f-bc7f-fadbe047b85a | - | export_locations | [] | - | share_server_id | 41b7829d-7f6b-4c96-aea5-d106c2959961 | - | host | manila@generic1#GENERIC1 | - | snapshot_id | 962e8126-35c3-47bb-8c00-f0ee37f42ddd | - | is_public | False | - | task_state | None | - | snapshot_support | True | - | id | b6b0617c-ea51-4450-848e-e7cff69238c7 | - | size | 1 | - | name | Share2 | - | share_type | c0086582-30a6-4060-b096-a42ec9d66b86 | - | created_at | 2015-09-25T06:25:50.000000 | - | export_location | 10.254.0.3:/shares/share-1dc2a471-3d47-...| - | share_proto | NFS | - | consistency_group_id | None | - | source_cgsnapshot_member_id | None | - | project_id | 20787a7ba11946adad976463b57d8a2f | - | metadata | {u'source': u'snapshot'} | - +-----------------------------+-------------------------------------------+ - -Delete Share ------------- - -If you do not need a share any more, you can delete it using -:command:`manila delete share_name_or_ID` command like: - -.. code-block:: console - - $ manila delete Share2 - -.. note:: - - If you specified the consistency group while creating a share, you - should provide the --consistency-group parameter to delete the - share: - -.. code-block:: console - - $ manila delete ba52454e-2ea3-47fa-a683-3176a01295e6 --consistency-group \ - ffee08d9-c86c-45e5-861e-175c731daca2 - -Sometimes it appears that a share hangs in one of transitional states -(i.e. -``creating, deleting, managing, unmanaging, extending, and shrinking``). -In that case, to delete it, you need -:command:`manila force-delete share_name_or_ID` command and administrative -permissions to run it: - -.. code-block:: console - - $ manila force-delete b6b0617c-ea51-4450-848e-e7cff69238c7 - -.. tip:: - - For more details and additional information about other cases, - features, API commands etc, see `Share Management - `__ - of “Shared File Systems” section of OpenStack Administrator Guide document. - -Create Snapshots ----------------- - -The Shared File Systems service provides a mechanism of snapshots to -help users to restore their own data. To create a snapshot, use -:command:`manila snapshot-create` command like: - -.. code-block:: console - - $ manila snapshot-create Share1 --name Snapshot1 --description "Snapshot of Share1" - +-------------------+--------------------------------------+ - | Property | Value | - +-------------------+--------------------------------------+ - | status | creating | - | share_id | e7364bcc-3821-49bf-82d6-0c9f0276d4ce | - | description | Snapshot of Share1 | - | created_at | 2016-03-20T00:00:00.000000 | - | share_proto | NFS | - | provider_location | None | - | id | a96cf025-92d1-4012-abdd-bb0f29e5aa8f | - | size | 1 | - | share_size | 1 | - | name | Snapshot1 | - +-------------------+--------------------------------------+ - -Then, if needed, update the name and description of the created -snapshot: - -.. code-block:: console - - $ manila snapshot-rename Snapshot1 Snapshot_1 --description "Snapshot of Share1. Updated." - -To make sure that the snapshot is available, run: - -.. code-block:: console - - $ manila snapshot-show Snapshot1 - +-------------------+--------------------------------------+ - | Property | Value | - +-------------------+--------------------------------------+ - | status | available | - | share_id | e7364bcc-3821-49bf-82d6-0c9f0276d4ce | - | description | Snapshot of Share1 | - | created_at | 2016-03-30T10:53:19.000000 | - | share_proto | NFS | - | provider_location | 3ca7a3b2-9f9f-46af-906f-6a565bf8ee37 | - | id | a96cf025-92d1-4012-abdd-bb0f29e5aa8f | - | size | 1 | - | share_size | 1 | - | name | Snapshot1 | - +-------------------+--------------------------------------+ - -.. tip:: - - For more details and additional information on snapshots, see - `Share Snapshots - `__ - of “Shared File Systems” section of “OpenStack Administrator Guide” document. - - -.. _create_a_share_network: - -Create a Share Network ----------------------- - -To control a share network, Shared File Systems service requires -interaction with Networking service to manage share servers on its own. -If the selected driver runs in a mode that requires such kind of -interaction, you need to specify the share network when a share is -created. For the information on share creation, -see :ref:`create_share` earlier in this chapter. Initially, check -the existing share networks type list by: - -.. code-block:: console - - $ manila share-network-list - +--------------------------------------+--------------+ - | id | name | - +--------------------------------------+--------------+ - +--------------------------------------+--------------+ - -If share network list is empty or does not contain a required network, -just create, for example, a share network with a private network and -subnetwork. - -.. code-block:: console - - $ manila share-network-create --neutron-net-id 5ed5a854-21dc-4ed3-870a-117b7064eb21 \ - --neutron-subnet-id 74dcfb5a-b4d7-4855-86f5-a669729428dc --name my_share_net \ - --description "My first share network" - +-------------------+--------------------------------------+ - | Property | Value | - +-------------------+--------------------------------------+ - | name | my_share_net | - | segmentation_id | None | - | created_at | 2015-09-24T12:06:32.602174 | - | neutron_subnet_id | 74dcfb5a-b4d7-4855-86f5-a669729428dc | - | updated_at | None | - | network_type | None | - | neutron_net_id | 5ed5a854-21dc-4ed3-870a-117b7064eb21 | - | ip_version | None | - | nova_net_id | None | - | cidr | None | - | project_id | 20787a7ba11946adad976463b57d8a2f | - | id | 5c3cbabb-f4da-465f-bc7f-fadbe047b85a | - | description | My first share network | - +-------------------+--------------------------------------+ - -The ``segmentation_id``, ``cidr``, ``ip_version``, and ``network_type`` -share network attributes are automatically set to the values determined -by the network provider. - -Then check if the network became created by requesting the networks list -once again: - -.. code-block:: console - - $ manila share-network-list - +--------------------------------------+--------------+ - | id | name | - +--------------------------------------+--------------+ - | 5c3cbabb-f4da-465f-bc7f-fadbe047b85a | my_share_net | - +--------------------------------------+--------------+ - -Finally, to create a share that uses this share network, get to Create -Share use case described earlier in this chapter. - -.. tip:: - - See `Share Networks - `__ - of “Shared File Systems” section of OpenStack Administrator Guide - document for more details. - -Manage a Share Network ----------------------- - -There is a pair of useful commands that help manipulate share networks. -To start, check the network list: - -.. code-block:: console - - $ manila share-network-list - +--------------------------------------+--------------+ - | id | name | - +--------------------------------------+--------------+ - | 5c3cbabb-f4da-465f-bc7f-fadbe047b85a | my_share_net | - +--------------------------------------+--------------+ - -If you configured the back-end with -``driver_handles_share_servers = True`` (with the share servers) and had -already some operations in the Shared File Systems service, you can see -``manila_service_network`` in the neutron list of networks. This network -was created by the share driver for internal usage. - -.. code-block:: console - - $ openstack network list - +--------------+------------------------+------------------------------------+ - | ID | Name | Subnets | - +--------------+------------------------+------------------------------------+ - | 3b5a629a-e...| manila_service_network | 4f366100-50... 10.254.0.0/28 | - | bee7411d-d...| public | 884a6564-01... 2001:db8::/64 | - | | | e6da81fa-55... 172.24.4.0/24 | - | 5ed5a854-2...| private | 74dcfb5a-bd... 10.0.0.0/24 | - | | | cc297be2-51... fd7d:177d:a48b::/64 | - +--------------+------------------------+------------------------------------+ - -You also can see detailed information about the share network including -``network_type, segmentation_id`` fields: - -.. code-block:: console - - $ openstack network show manila_service_network - +---------------------------+--------------------------------------+ - | Field | Value | - +---------------------------+--------------------------------------+ - | admin_state_up | True | - | availability_zone_hints | | - | availability_zones | nova | - | created_at | 2016-03-20T00:00:00 | - | description | | - | id | ef5282ab-dbf9-4d47-91d4-b0cc9b164567 | - | ipv4_address_scope | | - | ipv6_address_scope | | - | mtu | 1450 | - | name | manila_service_network | - | port_security_enabled | True | - | provider:network_type | vxlan | - | provider:physical_network | | - | provider:segmentation_id | 1047 | - | router:external | False | - | shared | False | - | status | ACTIVE | - | subnets | aba49c7d-c7eb-44b9-9c8f-f6112b05a2e0 | - | tags | | - | tenant_id | f121b3ee03804266af2959e56671b24a | - | updated_at | 2016-03-20T00:00:00 | - +---------------------------+--------------------------------------+ - -You also can add and remove the security services to the share network. - -.. tip:: - - For details, see subsection `Security Services - `__ - of “Shared File Systems” section of OpenStack Administrator Guide document. - -Instances -~~~~~~~~~ - -Instances are the running virtual machines within an OpenStack cloud. -This section deals with how to work with them and their underlying -images, their network properties, and how they are represented in the -database. - -Starting Instances ------------------- - -To launch an instance, you need to select an image, a flavor, and a -name. The name needn't be unique, but your life will be simpler if it is -because many tools will use the name in place of the UUID so long as the -name is unique. You can start an instance from the dashboard from the -:guilabel:`Launch Instance` button on the :guilabel:`Instances` page -or by selecting the :guilabel:`Launch` action next to an -image or a snapshot on the :guilabel:`Images` page. - -On the command line, do this: - -.. code-block:: console - - $ openstack server create --flavor FLAVOR --image IMAGE_NAME_OR_ID - -There are a number of optional items that can be specified. You should -read the rest of this section before trying to start an instance, but -this is the base command that later details are layered upon. - -To delete instances from the dashboard, select the -:guilabel:`Delete Instance` action next to the -instance on the :guilabel:`Instances` page. - -.. note:: - - In releases prior to Mitaka, select the equivalent :guilabel:`Terminate - instance` action. - -From the command line, do this: - -.. code-block:: console - - $ openstack server delete INSTANCE_ID - -It is important to note that powering off an instance does not terminate -it in the OpenStack sense. - -Instance Boot Failures ----------------------- - -If an instance fails to start and immediately moves to an error state, -there are a few different ways to track down what has gone wrong. Some -of these can be done with normal user access, while others require -access to your log server or compute nodes. - -The simplest reasons for nodes to fail to launch are quota violations or -the scheduler being unable to find a suitable compute node on which to -run the instance. In these cases, the error is apparent when you run a -:command:`openstack server show` on the faulted instance: - -.. code-block:: console - - $ openstack server show test-instance - +--------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ - | Field | Value | - +--------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ - | OS-DCF:diskConfig | AUTO | - | OS-EXT-AZ:availability_zone | nova | - | OS-EXT-SRV-ATTR:host | None | - | OS-EXT-SRV-ATTR:hypervisor_hostname | None | - | OS-EXT-SRV-ATTR:instance_name | instance-0000000a | - | OS-EXT-STS:power_state | NOSTATE | - | OS-EXT-STS:task_state | None | - | OS-EXT-STS:vm_state | error | - | OS-SRV-USG:launched_at | None | - | OS-SRV-USG:terminated_at | None | - | accessIPv4 | | - | accessIPv6 | | - | addresses | | - | config_drive | | - | created | 2016-11-23T07:51:53Z | - | fault | {u'message': u'Build of instance 6ec42311-a121-4887-aece-48fb93a4a098 aborted: Failed to allocate the network(s), not rescheduling.', | - | | u'code': 500, u'details': u' File "/usr/lib/python2.7/site-packages/nova/compute/manager.py", line 1779, in | - | | _do_build_and_run_instance\n filter_properties)\n File "/usr/lib/python2.7/site-packages/nova/compute/manager.py", line 1960, in | - | | _build_and_run_instance\n reason=msg)\n', u'created': u'2016-11-23T07:57:04Z'} | - | flavor | m1.tiny (1) | - | hostId | | - | id | 6ec42311-a121-4887-aece-48fb93a4a098 | - | image | cirros (9fef3b2d-c35d-4b61-bea8-09cc6dc41829) | - | key_name | None | - | name | test-instance | - | os-extended-volumes:volumes_attached | [] | - | project_id | 5669caad86a04256994cdf755df4d3c1 | - | properties | | - | status | ERROR | - | updated | 2016-11-23T07:57:04Z | - | user_id | c36cec73b0e44876a4478b1e6cd749bb | - +--------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ - -In this case, looking at the ``fault`` message shows ``NoValidHost``, -indicating that the scheduler was unable to match the instance -requirements. - -If :command:`openstack server show` does not sufficiently explain the failure, -searching for the instance UUID in the ``nova-compute.log`` on the compute -node it was scheduled on or the ``nova-scheduler.log`` on your scheduler hosts -is a good place to start looking for lower-level problems. - -Using :command:`openstack server show` as an admin user will show the compute -node the instance was scheduled on as ``hostId``. If the instance failed -during scheduling, this field is blank. - -Using Instance-Specific Data ----------------------------- - -There are two main types of instance-specific data: metadata and user -data. - -Instance metadata ------------------ - -For Compute, instance metadata is a collection of key-value pairs -associated with an instance. Compute reads and writes to these key-value -pairs any time during the instance lifetime, from inside and outside the -instance, when the end user uses the Compute API to do so. However, you -cannot query the instance-associated key-value pairs with the metadata -service that is compatible with the Amazon EC2 metadata service. - -For an example of instance metadata, users can generate and register SSH -keys using the :command:`openstack keypair create` command: - -.. code-block:: console - - $ openstack keypair create mykey > mykey.pem - -This creates a key named ``mykey``, which you can associate with -instances. The file ``mykey.pem`` is the private key, which should be -saved to a secure location because it allows root access to instances -the ``mykey`` key is associated with. - -Use this command to register an existing key with OpenStack: - -.. code-block:: console - - $ openstack keypair create --public-key mykey.pub mykey - -.. note:: - - You must have the matching private key to access instances - associated with this key. - -To associate a key with an instance on boot, add ``--key-name mykey`` to -your command line. For example: - -.. code-block:: console - - $ openstack server create --image ubuntu-cloudimage --flavor 2 \ - --key-name mykey myimage - -When booting a server, you can also add arbitrary metadata so that you -can more easily identify it among other running instances. Use the -``--property`` option with a key-value pair, where you can make up -the string for both the key and the value. For example, you could add a -description and also the creator of the server: - -.. code-block:: console - - $ openstack server create --image=test-image --flavor=1 \ - --property description='Small test image' smallimage - -When viewing the server information, you can see the metadata included -on the metadata line: - -.. code-block:: console - - $ openstack server show smallimage - - +--------------------------------------+----------------------------------------------------------+ - | Field | Value | - +--------------------------------------+----------------------------------------------------------+ - | OS-DCF:diskConfig | MANUAL | - | OS-EXT-AZ:availability_zone | nova | - | OS-EXT-SRV-ATTR:host | rdo-newton.novalocal | - | OS-EXT-SRV-ATTR:hypervisor_hostname | rdo-newton.novalocal | - | OS-EXT-SRV-ATTR:instance_name | instance-00000002 | - | OS-EXT-STS:power_state | Running | - | OS-EXT-STS:task_state | None | - | OS-EXT-STS:vm_state | active | - | OS-SRV-USG:launched_at | 2016-12-07T11:20:08.000000 | - | OS-SRV-USG:terminated_at | None | - | accessIPv4 | | - | accessIPv6 | | - | addresses | public=172.24.4.227 | - | config_drive | | - | created | 2016-12-07T11:17:44Z | - | flavor | m1.tiny (1) | - | hostId | aca973d5b7981faaf8c713a0130713bbc1e64151be65c8dfb53039f7 | - | id | 4f7c6b2c-f27e-4ccd-a606-6bfc9d7c0d91 | - | image | cirros (01bcb649-45d7-4e3d-8a58-1fcc87816907) | - | key_name | None | - | name | smallimage | - | os-extended-volumes:volumes_attached | [] | - | progress | 0 | - | project_id | 2daf82a578e9437cab396c888ff0ca57 | - | properties | description='Small test image' | - | security_groups | [{u'name': u'default'}] | - | status | ACTIVE | - | updated | 2016-12-07T11:20:08Z | - | user_id | 8cbea24666ae49bbb8c1641f9b12d2d2 | - +--------------------------------------+----------------------------------------------------------+ - -Instance user data ------------------- - -The ``user-data`` key is a special key in the metadata service that -holds a file that cloud-aware applications within the guest instance can -access. For example, -`cloudinit `__ is an open -source package from Ubuntu, but available in most distributions, that -handles early initialization of a cloud instance that makes use of this -user data. - -This user data can be put in a file on your local system and then passed -in at instance creation with the flag -``--user-data ``. - -For example - -.. code-block:: console - - $ openstack server create --image ubuntu-cloudimage --flavor 1 \ - --user-data mydata.file mydatainstance - -To understand the difference between user data and metadata, realize -that user data is created before an instance is started. User data is -accessible from within the instance when it is running. User data can be -used to store configuration, a script, or anything the tenant wants. - -File injection --------------- - -Arbitrary local files can also be placed into the instance file system -at creation time by using the ``--file `` option. -You may store up to five files. - -For example, let's say you have a special ``authorized_keys`` file named -special_authorized_keysfile that for some reason you want to put on -the instance instead of using the regular SSH key injection. In this -case, you can use the following command: - -.. code-block:: console - - $ openstack server create --image ubuntu-cloudimage --flavor 1 \ - --file /root/.ssh/authorized_keys=special_authorized_keysfile \ - authkeyinstance - -Associating Security Groups -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Security groups, as discussed earlier, are typically required to allow -network traffic to an instance, unless the default security group for a -project has been modified to be more permissive. - -Adding security groups is typically done on instance boot. When -launching from the dashboard, you do this on the -:guilabel:`Access & Security` tab of the :guilabel:`Launch Instance` dialog. -When launching from the command line, append ``--security-groups`` -with a comma-separated list of security groups. - -It is also possible to add and remove security groups when an instance -is running. Currently this is only available through the command-line -tools. Here is an example: - -.. code-block:: console - - $ openstack server add security group SERVER SECURITY_GROUP_NAME_OR_ID - -.. code-block:: console - - $ openstack server remove security group SERVER SECURITY_GROUP_NAME_OR_ID - -Floating IPs -~~~~~~~~~~~~ - -Where floating IPs are configured in a deployment, each project will -have a limited number of floating IPs controlled by a quota. However, -these need to be allocated to the project from the central pool prior to -their use—usually by the administrator of the project. To allocate a -floating IP to a project, use the :guilabel:`Allocate IP To Project` button -on the :guilabel:`Floating IPs` tab of the :guilabel:`Access & Security` page -of the dashboard. The command line can also be used: - -.. code-block:: console - - $ openstack floating ip create NETWORK_NAME_OR_ID - -Once allocated, a floating IP can be assigned to running instances from -the dashboard either by selecting :guilabel:`Associate` from the -actions drop-down next to the IP on the :guilabel:`Floating IPs` tab of the -:guilabel:`Access & Security` page or by making this selection next to the -instance you want to associate it with on the Instances page. The inverse -action, Dissociate Floating IP, is available from the :guilabel:`Floating IPs` -tab of the :guilabel:`Access & Security` page and from the -:guilabel:`Instances` page. - -To associate or disassociate a floating IP with a server from the -command line, use the following commands: - -.. code-block:: console - - $ openstack server add floating ip SERVER IP_ADDRESS - -.. code-block:: console - - $ openstack server remove floating ip SERVER IP_ADDRESS - -Attaching Block Storage -~~~~~~~~~~~~~~~~~~~~~~~ - -You can attach block storage to instances from the dashboard on the -:guilabel:`Volumes` page. Click the :guilabel:`Manage Attachments` action -next to the volume you want to attach. - -To perform this action from command line, run the following command: - -.. code-block:: console - - $ openstack server add volume SERVER VOLUME_NAME_OR_ID --device DEVICE - -You can also specify block deviceblock device mapping at instance boot -time through the nova command-line client with this option set: - -.. code-block:: console - - --block-device-mapping - -The block device mapping format is -``=:::``, -where: - -dev-name - A device name where the volume is attached in the system at - ``/dev/dev_name`` - -id - The ID of the volume to boot from, as shown in the output of - :command:`openstack volume list` - -type - Either ``snap``, which means that the volume was created from a - snapshot, or anything other than ``snap`` (a blank string is valid). - In the preceding example, the volume was not created from a - snapshot, so we leave this field blank in our following example. - -size (GB) - The size of the volume in gigabytes. It is safe to leave this blank - and have the Compute Service infer the size. - -delete-on-terminate - A boolean to indicate whether the volume should be deleted when the - instance is terminated. True can be specified as ``True`` or ``1``. - False can be specified as ``False`` or ``0``. - -The following command will boot a new instance and attach a volume at -the same time. The volume of ID 13 will be attached as ``/dev/vdc``. It -is not a snapshot, does not specify a size, and will not be deleted when -the instance is terminated: - -.. code-block:: console - - $ openstack server create --image 4042220e-4f5e-4398-9054-39fbd75a5dd7 \ - --flavor 2 --key-name mykey --block-device-mapping vdc=13:::0 \ - boot-with-vol-test - -If you have previously prepared block storage with a bootable file -system image, it is even possible to boot from persistent block storage. -The following command boots an image from the specified volume. It is -similar to the previous command, but the image is omitted and the volume -is now attached as ``/dev/vda``: - -.. code-block:: console - - $ openstck server create --flavor 2 --key-name mykey \ - --block-device-mapping vda=13:::0 boot-from-vol-test - -Read more detailed instructions for launching an instance from a -bootable volume in the `OpenStack End User -Guide `__. - -To boot normally from an image and attach block storage, map to a device -other than vda. You can find instructions for launching an instance and -attaching a volume to the instance and for copying the image to the -attached volume in the `OpenStack End User -Guide `__. - -Taking Snapshots -~~~~~~~~~~~~~~~~ - -The OpenStack snapshot mechanism allows you to create new images from -running instances. This is very convenient for upgrading base images or -for taking a published image and customizing it for local use. To -snapshot a running instance to an image using the CLI, do this: - -.. code-block:: console - - $ openstack image create IMAGE_NAME --volume VOLUME_NAME_OR_ID - -The dashboard interface for snapshots can be confusing because the -snapshots and images are displayed in the :guilabel:`Images` page. However, an -instance snapshot *is* an image. The only difference between an image -that you upload directly to the Image Service and an image that you -create by snapshot is that an image created by snapshot has additional -properties in the glance database. These properties are found in the -``image_properties`` table and include: - -.. list-table:: - :header-rows: 1 - - * - Name - - Value - * - ``image_type`` - - snapshot - * - ``instance_uuid`` - - - * - ``base_image_ref`` - - - * - ``image_location`` - - snapshot - -Live Snapshots --------------- - -Live snapshots is a feature that allows users to snapshot the running -virtual machines without pausing them. These snapshots are simply -disk-only snapshots. Snapshotting an instance can now be performed with -no downtime (assuming QEMU 1.3+ and libvirt 1.0+ are used). - -.. note:: - - If you use libvirt version ``1.2.2``, you may experience - intermittent problems with live snapshot creation. - - To effectively disable the libvirt live snapshotting, until the - problem is resolved, add the below setting to nova.conf. - - .. code-block:: ini - - [workarounds] - disable_libvirt_livesnapshot = True - -**Ensuring Snapshots of Linux Guests Are Consistent** - -The following section is from Sébastien Han's `OpenStack: Perform -Consistent Snapshots blog -entry `__. - -A snapshot captures the state of the file system, but not the state of -the memory. Therefore, to ensure your snapshot contains the data that -you want, before your snapshot you need to ensure that: - -- Running programs have written their contents to disk - -- The file system does not have any "dirty" buffers: where programs - have issued the command to write to disk, but the operating system - has not yet done the write - -To ensure that important services have written their contents to disk -(such as databases), we recommend that you read the documentation for -those applications to determine what commands to issue to have them sync -their contents to disk. If you are unsure how to do this, the safest -approach is to simply stop these running services normally. - -To deal with the "dirty" buffer issue, we recommend using the sync -command before snapshotting: - -.. code-block:: console - - # sync - -Running ``sync`` writes dirty buffers (buffered blocks that have been -modified but not written yet to the disk block) to disk. - -Just running ``sync`` is not enough to ensure that the file system is -consistent. We recommend that you use the ``fsfreeze`` tool, which halts -new access to the file system, and create a stable image on disk that is -suitable for snapshotting. The ``fsfreeze`` tool supports several file -systems, including ext3, ext4, and XFS. If your virtual machine instance -is running on Ubuntu, install the util-linux package to get -``fsfreeze``: - -.. note:: - - In the very common case where the underlying snapshot is done via - LVM, the filesystem freeze is automatically handled by LVM. - -.. code-block:: console - - # apt-get install util-linux - -If your operating system doesn't have a version of ``fsfreeze`` -available, you can use ``xfs_freeze`` instead, which is available on -Ubuntu in the xfsprogs package. Despite the "xfs" in the name, -xfs_freeze also works on ext3 and ext4 if you are using a Linux kernel -version 2.6.29 or greater, since it works at the virtual file system -(VFS) level starting at 2.6.29. The xfs_freeze version supports the -same command-line arguments as ``fsfreeze``. - -Consider the example where you want to take a snapshot of a persistent -block storage volume, detected by the guest operating system as -``/dev/vdb`` and mounted on ``/mnt``. The fsfreeze command accepts two -arguments: - --f - Freeze the system - --u - Thaw (unfreeze) the system - -To freeze the volume in preparation for snapshotting, you would do the -following, as root, inside the instance: - -.. code-block:: console - - # fsfreeze -f /mnt - -You *must mount the file system* before you run the :command:`fsfreeze` -command. - -When the :command:`fsfreeze -f` command is issued, all ongoing transactions in -the file system are allowed to complete, new write system calls are -halted, and other calls that modify the file system are halted. Most -importantly, all dirty data, metadata, and log information are written -to disk. - -Once the volume has been frozen, do not attempt to read from or write to -the volume, as these operations hang. The operating system stops every -I/O operation and any I/O attempts are delayed until the file system has -been unfrozen. - -Once you have issued the :command:`fsfreeze` command, it is safe to perform -the snapshot. For example, if the volume of your instance was named -``mon-volume`` and you wanted to snapshot it to an image named -``mon-snapshot``, you could now run the following: - -.. code-block:: console - - $ openstack image create mon-snapshot --volume mon-volume - -When the snapshot is done, you can thaw the file system with the -following command, as root, inside of the instance: - -.. code-block:: console - - # fsfreeze -u /mnt - -If you want to back up the root file system, you can't simply run the -preceding command because it will freeze the prompt. Instead, run the -following one-liner, as root, inside the instance: - -.. code-block:: console - - # fsfreeze -f / && read x; fsfreeze -u / - -After this command it is common practice -to call :command:`openstack image create` from your workstation, and -once done press enter in your instance shell to unfreeze it. -Obviously you could automate this, but at least it will let you -properly synchronize. - - -**Ensuring Snapshots of Windows Guests Are Consistent** - -Obtaining consistent snapshots of Windows VMs is conceptually similar to -obtaining consistent snapshots of Linux VMs, although it requires -additional utilities to coordinate with a Windows-only subsystem -designed to facilitate consistent backups. - -Windows XP and later releases include a Volume Shadow Copy Service (VSS) -which provides a framework so that compliant applications can be -consistently backed up on a live filesystem. To use this framework, a -VSS requestor is run that signals to the VSS service that a consistent -backup is needed. The VSS service notifies compliant applications -(called VSS writers) to quiesce their data activity. The VSS service -then tells the copy provider to create a snapshot. Once the snapshot has -been made, the VSS service unfreezes VSS writers and normal I/O activity -resumes. - -QEMU provides a guest agent that can be run in guests running on KVM -hypervisors. This guest agent, on Windows VMs, coordinates with the -Windows VSS service to facilitate a workflow which ensures consistent -snapshots. This feature requires at least QEMU 1.7. The relevant guest -agent commands are: - -guest-file-flush - Write out "dirty" buffers to disk, similar to the Linux ``sync`` - operation. - -guest-fsfreeze - Suspend I/O to the disks, similar to the Linux ``fsfreeze -f`` - operation. - -guest-fsfreeze-thaw - Resume I/O to the disks, similar to the Linux ``fsfreeze -u`` - operation. - -To obtain snapshots of a Windows VM these commands can be scripted in -sequence: flush the filesystems, freeze the filesystems, snapshot the -filesystems, then unfreeze the filesystems. As with scripting similar -workflows against Linux VMs, care must be used when writing such a -script to ensure error handling is thorough and filesystems will not be -left in a frozen state. - -Instances in the Database -~~~~~~~~~~~~~~~~~~~~~~~~~ - -While instance information is stored in a number of database tables, the -table you most likely need to look at in relation to user instances is -the instances table. - -The instances table carries most of the information related to both -running and deleted instances. It has a bewildering array of fields; for -an exhaustive list, look at the database. These are the most useful -fields for operators looking to form queries: - -- The ``deleted`` field is set to ``1`` if the instance has been - deleted and ``NULL`` if it has not been deleted. This field is - important for excluding deleted instances from your queries. - -- The ``uuid`` field is the UUID of the instance and is used throughout - other tables in the database as a foreign key. This ID is also - reported in logs, the dashboard, and command-line tools to uniquely - identify an instance. - -- A collection of foreign keys are available to find relations to the - instance. The most useful of these — ``user_id`` and ``project_id`` - are the UUIDs of the user who launched the instance - and the project it was launched in. - -- The ``host`` field tells which compute node is hosting the instance. - -- The ``hostname`` field holds the name of the instance when it is - launched. The display-name is initially the same as hostname but can - be reset using the nova rename command. - -A number of time-related fields are useful for tracking when state -changes happened on an instance: - -- ``created_at`` - -- ``updated_at`` - -- ``deleted_at`` - -- ``scheduled_at`` - -- ``launched_at`` - -- ``terminated_at`` - -Good Luck! -~~~~~~~~~~ - -This section was intended as a brief introduction to some of the most -useful of many OpenStack commands. For an exhaustive list, please refer -to the `OpenStack Administrator Guide `__. -We hope your users remain happy and recognize your hard work! -(For more hard work, turn the page to the next chapter, where we discuss -the system-facing operations: maintenance, failures and debugging.) diff --git a/doc/ops-guide/source/ops-users.rst b/doc/ops-guide/source/ops-users.rst deleted file mode 100644 index 346ad27da9..0000000000 --- a/doc/ops-guide/source/ops-users.rst +++ /dev/null @@ -1,253 +0,0 @@ -=============== -User Management -=============== - -The OpenStack Dashboard provides a graphical interface to manage users. -This section describes user management with the Dashboard. - -You can also `manage projects, users, and roles -`_ -from the command-line clients. - -In addition, many sites write custom tools for local needs to enforce -local policies and provide levels of self-service to users that are not -currently available with packaged tools. - -Creating New Users -~~~~~~~~~~~~~~~~~~ - -To create a user, you need the following information: - -* Username -* Description -* Email address -* Password -* Primary project -* Role -* Enabled - -Username and email address are self-explanatory, though your site may -have local conventions you should observe. The primary project is simply -the first project the user is associated with and must exist prior to -creating the user. Role is almost always going to be "member." Out of -the box, OpenStack comes with two roles defined: - -member - A typical user - -admin - An administrative super user, which has full permissions across all - projects and should be used with great care - -It is possible to define other roles, but doing so is uncommon. - -Once you've gathered this information, creating the user in the -dashboard is just another web form similar to what we've seen before and -can be found by clicking the :guilabel:`Users` link in the -:guilabel:`Identity` navigation bar and then clicking the -:guilabel:`Create User` button at the top right. - -Modifying users is also done from this :guilabel:`Users` page. If you have a -large number of users, this page can get quite crowded. The :guilabel:`Filter` -search box at the top of the page can be used to limit the users listing. A -form very similar to the user creation dialog can be pulled up by selecting -:guilabel:`Edit` from the actions drop-down menu at the end of the line for -the user you are modifying. - -Associating Users with Projects -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Many sites run with users being associated with only one project. This -is a more conservative and simpler choice both for administration and -for users. Administratively, if a user reports a problem with an -instance or quota, it is obvious which project this relates to. Users -needn't worry about what project they are acting in if they are only in -one project. However, note that, by default, any user can affect the -resources of any other user within their project. It is also possible to -associate users with multiple projects if that makes sense for your -organization. - -Associating existing users with an additional project or removing them -from an older project is done from the :guilabel:`Projects` page of the -dashboard by selecting :guilabel:`Manage Members` from the -:guilabel:`Actions` column, as shown in the screenshot below. - -From this view, you can do a number of useful things, as well as a few -dangerous ones. - -The first column of this form, named :guilabel:`All Users`, includes a list of -all the users in your cloud who are not already associated with this -project. The second column shows all the users who are. These lists can -be quite long, but they can be limited by typing a substring of the -username you are looking for in the filter field at the top of the -column. - -From here, click the :guilabel:`+` icon to add users to the project. -Click the :guilabel:`-` to remove them. - -.. figure:: figures/edit_project_member.png - :alt: Edit Project Members tab - -The dangerous possibility comes with the ability to change member roles. -This is the dropdown list below the username in the -:guilabel:`Project Members` list. In virtually all cases, -this value should be set to :guilabel:`Member`. This example purposefully -shows an administrative user where this value is ``admin``. - -.. warning:: - - The admin is global, not per project, so granting a user the ``admin`` - role in any project gives the user administrative rights across the - whole cloud. - -Typical use is to only create administrative users in a single project, -by convention the admin project, which is created by default during -cloud setup. If your administrative users also use the cloud to launch -and manage instances, it is strongly recommended that you use separate -user accounts for administrative access and normal operations and that -they be in distinct projects. - -Customizing Authorization -------------------------- - -The default :term:`authorization` settings allow administrative users -only to create resources on behalf of a different project. -OpenStack handles two kinds of authorization policies: - -Operation based - Policies specify access criteria for specific operations, possibly - with fine-grained control over specific attributes. - -Resource based - Whether access to a specific resource might be granted or not - according to the permissions configured for the resource (currently - available only for the network resource). The actual authorization - policies enforced in an OpenStack service vary from deployment to - deployment. - -The policy engine reads entries from the ``policy.json`` file. The -actual location of this file might vary from distribution to -distribution: for nova, it is typically in ``/etc/nova/policy.json``. -You can update entries while the system is running, and you do not have -to restart services. Currently, the only way to update such policies is -to edit the policy file. - -The OpenStack service's policy engine matches a policy directly. A rule -indicates evaluation of the elements of such policies. For instance, in -a ``compute:create: "rule:admin_or_owner"`` statement, the policy is -``compute:create``, and the rule is ``admin_or_owner``. - -Policies are triggered by an OpenStack policy engine whenever one of -them matches an OpenStack API operation or a specific attribute being -used in a given operation. For instance, the engine tests the -``create:compute`` policy every time a user sends a -``POST /v2/{tenant_id}/servers`` request to the OpenStack Compute API -server. Policies can be also related to specific :term:`API extensions -`. For instance, if a user needs an extension like -``compute_extension:rescue``, the attributes defined by the provider -extensions trigger the rule test for that operation. - -An authorization policy can be composed by one or more rules. If more -rules are specified, evaluation policy is successful if any of the rules -evaluates successfully; if an API operation matches multiple policies, -then all the policies must evaluate successfully. Also, authorization -rules are recursive. Once a rule is matched, the rule(s) can be resolved -to another rule, until a terminal rule is reached. These are the rules -defined: - -Role-based rules - Evaluate successfully if the user submitting the request has the - specified role. For instance, ``"role:admin"`` is successful if the - user submitting the request is an administrator. - -Field-based rules - Evaluate successfully if a field of the resource specified in the - current request matches a specific value. For instance, - ``"field:networks:shared=True"`` is successful if the attribute - shared of the network resource is set to ``true``. - -Generic rules - Compare an attribute in the resource with an attribute extracted - from the user's security credentials and evaluates successfully if - the comparison is successful. For instance, - ``"tenant_id:%(tenant_id)s"`` is successful if the tenant identifier - in the resource is equal to the tenant identifier of the user - submitting the request. - -Here are snippets of the default nova ``policy.json`` file: - -.. code-block:: none - - { - "context_is_admin": "role:admin", - "admin_or_owner": "is_admin:True", "project_id:%(project_id)s", ~~~~(1)~~~~ - "default": "rule:admin_or_owner", ~~~~(2)~~~~ - "compute:create": "", - "compute:create:attach_network": "", - "compute:create:attach_volume": "", - "compute:get_all": "", - "admin_api": "is_admin:True", - "compute_extension:accounts": "rule:admin_api", - "compute_extension:admin_actions": "rule:admin_api", - "compute_extension:admin_actions:pause": "rule:admin_or_owner", - "compute_extension:admin_actions:unpause": "rule:admin_or_owner", - ... - "compute_extension:admin_actions:migrate": "rule:admin_api", - "compute_extension:aggregates": "rule:admin_api", - "compute_extension:certificates": "", - ... - "compute_extension:flavorextraspecs": "", - "compute_extension:flavormanage": "rule:admin_api", ~~~~(3)~~~~ - } - - -1. Shows a rule that evaluates successfully if the current user is an - administrator or the owner of the resource specified in the request - (tenant identifier is equal). - -2. Shows the default policy, which is always evaluated if an API - operation does not match any of the policies in ``policy.json``. - -3. Shows a policy restricting the ability to manipulate flavors to - administrators using the Admin API only. - -In some cases, some operations should be restricted to administrators -only. Therefore, as a further example, let us consider how this sample -policy file could be modified in a scenario where we enable users to -create their own flavors: - -.. code-block:: none - - "compute_extension:flavormanage": "", - -Users Who Disrupt Other Users ------------------------------ - -Users on your cloud can disrupt other users, sometimes intentionally and -maliciously and other times by accident. Understanding the situation -allows you to make a better decision on how to handle the -disruption. - -For example, a group of users have instances that are utilizing a large -amount of compute resources for very compute-intensive tasks. This is -driving the load up on compute nodes and affecting other users. In this -situation, review your user use cases. You may find that high compute -scenarios are common, and should then plan for proper segregation in -your cloud, such as host aggregation or regions. - -Another example is a user consuming a very large amount of bandwidth. -Again, the key is to understand what the user is doing. -If she naturally needs a high amount of bandwidth, -you might have to limit her transmission rate as to not -affect other users or move her to an area with more bandwidth available. -On the other hand, maybe her instance has been hacked and is part of a -botnet launching DDOS attacks. Resolution of this issue is the same as -though any other server on your network has been hacked. Contact the -user and give her time to respond. If she doesn't respond, shut down the -instance. - -A final example is if a user is hammering cloud resources repeatedly. -Contact the user and learn what he is trying to do. Maybe he doesn't -understand that what he's doing is inappropriate, or maybe there is an -issue with the resource he is trying to access that is causing his -requests to queue or lag. diff --git a/doc/ops-guide/source/preface.rst b/doc/ops-guide/source/preface.rst deleted file mode 100644 index 8149700c5e..0000000000 --- a/doc/ops-guide/source/preface.rst +++ /dev/null @@ -1,410 +0,0 @@ -======= -Preface -======= - -OpenStack is an open source platform that lets you build an -:term:`Infrastructure-as-a-Service (IaaS)` cloud that runs on commodity -hardware. - -Introduction to OpenStack -~~~~~~~~~~~~~~~~~~~~~~~~~ - -OpenStack believes in open source, open design, and open development, -all in an open community that encourages participation by anyone. The -long-term vision for OpenStack is to produce a ubiquitous open source -cloud computing platform that meets the needs of public and private -cloud providers regardless of size. OpenStack services control large -pools of compute, storage, and networking resources throughout a data -center. - -The technology behind OpenStack consists of a series of interrelated -projects delivering various components for a cloud infrastructure -solution. Each service provides an open API so that all of these -resources can be managed through a dashboard that gives administrators -control while empowering users to provision resources through a web -interface, a command-line client, or software development kits that -support the API. Many OpenStack APIs are extensible, meaning you can -keep compatibility with a core set of calls while providing access to -more resources and innovating through API extensions. The OpenStack -project is a global collaboration of developers and cloud computing -technologists. The project produces an open standard cloud computing -platform for both public and private clouds. By focusing on ease of -implementation, massive scalability, a variety of rich features, and -tremendous extensibility, the project aims to deliver a practical and -reliable cloud solution for all types of organizations. - -Getting Started with OpenStack -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -As an open source project, one of the unique aspects of OpenStack is -that it has many different levels at which you can begin to engage with -it—you don't have to do everything yourself. - -Using OpenStack ---------------- - -You could ask, "Do I even need to build a cloud?" If you want to start -using a compute or storage service by just swiping your credit card, you -can go to eNovance, HP, Rackspace, or other organizations to start using -their public OpenStack clouds. Using their OpenStack cloud resources is -similar to accessing the publicly available Amazon Web Services Elastic -Compute Cloud (EC2) or Simple Storage Solution (S3). - -Plug and Play OpenStack ------------------------ - -However, the enticing part of OpenStack might be to build your own -private cloud, and there are several ways to accomplish this goal. -Perhaps the simplest of all is an appliance-style solution. You purchase -an appliance, unpack it, plug in the power and the network, and watch it -transform into an OpenStack cloud with minimal additional configuration. - -However, hardware choice is important for many applications, so if that -applies to you, consider that there are several software distributions -available that you can run on servers, storage, and network products of -your choosing. Canonical (where OpenStack replaced Eucalyptus as the -default cloud option in 2011), Red Hat, and SUSE offer enterprise -OpenStack solutions and support. You may also want to take a look at -some of the specialized distributions, such as those from Rackspace, -Piston, SwiftStack, or Cloudscaling. - -Alternatively, if you want someone to help guide you through the -decisions about the underlying hardware or your applications, perhaps -adding in a few features or integrating components along the way, -consider contacting one of the system integrators with OpenStack -experience, such as Mirantis or Metacloud. - -If your preference is to build your own OpenStack expertise internally, -a good way to kick-start that might be to attend or arrange a training -session. The OpenStack Foundation has a `Training -Marketplace `_ where you -can look for nearby events. Also, the OpenStack community is `working to -produce `_ open source -training materials. - -Roll Your Own OpenStack ------------------------ - -However, this guide has a different audience—those seeking flexibility -from the OpenStack framework by deploying do-it-yourself solutions. - -OpenStack is designed for horizontal scalability, so you can easily add -new compute, network, and storage resources to grow your cloud over -time. In addition to the pervasiveness of massive OpenStack public -clouds, many organizations, such as PayPal, Intel, and Comcast, build -large-scale private clouds. OpenStack offers much more than a typical -software package because it lets you integrate a number of different -technologies to construct a cloud. This approach provides great -flexibility, but the number of options might be daunting at first. - -Who This Book Is For -~~~~~~~~~~~~~~~~~~~~ - -This book is for those of you starting to run OpenStack clouds as well -as those of you who were handed an operational one and want to keep it -running well. Perhaps you're on a DevOps team, perhaps you are a system -administrator starting to dabble in the cloud, or maybe you want to get -on the OpenStack cloud team at your company. This book is for all of -you. - -This guide assumes that you are familiar with a Linux distribution that -supports OpenStack, SQL databases, and virtualization. You must be -comfortable administering and configuring multiple Linux machines for -networking. You must install and maintain an SQL database and -occasionally run queries against it. - -One of the most complex aspects of an OpenStack cloud is the networking -configuration. You should be familiar with concepts such as DHCP, Linux -bridges, VLANs, and iptables. You must also have access to a network -hardware expert who can configure the switches and routers required in -your OpenStack cloud. - -.. note:: - - Cloud computing is quite an advanced topic, and this book requires a - lot of background knowledge. However, if you are fairly new to cloud - computing, we recommend that you make use of the :doc:`common/glossary` - at the back of the book, as well as the online documentation for OpenStack - and additional resources mentioned in this book in :doc:`app-resources`. - -Further Reading ---------------- - -There are other books on the `OpenStack documentation -website `_ that can help you get the job -done. - -Installation Tutorials and Guides - Describes a manual installation process, as in, by hand, without - automation, for multiple distributions based on a packaging system: - - - `OpenStack Installation Tutorial for openSUSE and SUSE Linux Enterprise - `_ - - - `OpenStack Installation Tutorial for Red Hat Enterprise Linux and CentOS - `_ - - - `OpenStack Installation Tutorial for Ubuntu - `_ - -`OpenStack Configuration Reference `_ - Contains a reference listing of all configuration options for core - and integrated OpenStack services by release version - -`OpenStack Architecture Design Guide `_ - Contains guidelines for designing an OpenStack cloud - -`OpenStack Administrator Guide `_ - Contains how-to information for managing an OpenStack cloud as - needed for your use cases, such as storage, computing, or - software-defined-networking - -`OpenStack High Availability Guide `_ - Describes potential strategies for making your OpenStack services - and related controllers and data stores highly available - -`OpenStack Security Guide `_ - Provides best practices and conceptual information about securing an - OpenStack cloud - -`Virtual Machine Image Guide `_ - Shows you how to obtain, create, and modify virtual machine images - that are compatible with OpenStack - -`OpenStack End User Guide `_ - Shows OpenStack end users how to create and manage resources in an - OpenStack cloud with the OpenStack dashboard and OpenStack client - commands - -`OpenStack Networking Guide `_ - This guide targets OpenStack administrators seeking to deploy and - manage OpenStack Networking (neutron). - -`OpenStack API Guide `_ - A brief overview of how to send REST API requests to endpoints for - OpenStack services - -How This Book Is Organized -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This book contains several parts to show best practices and tips for -the repeated operations for running OpenStack clouds. - -:doc:`ops-lay-of-the-land` - This chapter is written to let you get your hands wrapped around - your OpenStack cloud through command-line tools and understanding - what is already set up in your cloud. - -:doc:`ops-projects-users` - This chapter walks through user-enabling processes that all admins - must face to manage users, give them quotas to parcel out resources, - and so on. - -:doc:`ops-user-facing-operations` - This chapter shows you how to use OpenStack cloud resources and how - to train your users. - -:doc:`ops-maintenance` - This chapter goes into the common failures that the authors have - seen while running clouds in production, including troubleshooting. - -:doc:`ops-network-troubleshooting` - Because network troubleshooting is especially difficult with virtual - resources, this chapter is chock-full of helpful tips and tricks for - tracing network traffic, finding the root cause of networking - failures, and debugging related services, such as DHCP and DNS. - -:doc:`ops-logging-monitoring` - This chapter shows you where OpenStack places logs and how to best - read and manage logs for monitoring purposes. - -:doc:`ops-backup-recovery` - This chapter describes what you need to back up within OpenStack as - well as best practices for recovering backups. - -:doc:`ops-customize` - For readers who need to get a specialized feature into OpenStack, - this chapter describes how to use DevStack to write custom - middleware or a custom scheduler to rebalance your resources. - -:doc:`ops-advanced-configuration` - Much of OpenStack is driver-oriented, so you can plug in different - solutions to the base set of services. This chapter describes some - advanced configuration topics. - -:doc:`ops-upgrades` - This chapter provides upgrade information based on the architectures - used in this book. - -**Back matter:** - -:doc:`app-usecases` - You can read a small selection of use cases from the OpenStack - community with some technical details and further resources. - -:doc:`app-crypt` - These are shared legendary tales of image disappearances, VM - massacres, and crazy troubleshooting techniques that result in - hard-learned lessons and wisdom. - -:doc:`app-roadmaps` - Read about how to track the OpenStack roadmap through the open and - transparent development processes. - -:doc:`app-resources` - So many OpenStack resources are available online because of the - fast-moving nature of the project, but there are also resources - listed here that the authors found helpful while learning - themselves. - -:doc:`common/glossary` - A list of terms used in this book is included, which is a subset of - the larger OpenStack glossary available online. - -Why and How We Wrote This Book -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -We wrote this book because we have deployed and maintained OpenStack -clouds for at least a year and we wanted to share this knowledge with -others. After months of being the point people for an OpenStack cloud, -we also wanted to have a document to hand to our system administrators -so that they'd know how to operate the cloud on a daily basis—both -reactively and pro-actively. We wanted to provide more detailed -technical information about the decisions that deployers make along the -way. - -We wrote this book to help you: - -- Design and create an architecture for your first nontrivial OpenStack - cloud. After you read this guide, you'll know which questions to ask - and how to organize your compute, networking, and storage resources - and the associated software packages. - -- Perform the day-to-day tasks required to administer a cloud. - -We wrote this book in a book sprint, which is a facilitated, rapid -development production method for books. For more information, see the -`BookSprints site `_. Your authors cobbled -this book together in five days during February 2013, fueled by caffeine -and the best takeout food that Austin, Texas, could offer. - -On the first day, we filled white boards with colorful sticky notes to -start to shape this nebulous book about how to architect and operate -clouds: - -.. figure:: figures/osog_00in01.png - :figwidth: 100% - -We wrote furiously from our own experiences and bounced ideas between -each other. At regular intervals we reviewed the shape and organization -of the book and further molded it, leading to what you see today. - -The team includes: - -Tom Fifield - After learning about scalability in computing from particle physics - experiments, such as ATLAS at the Large Hadron Collider (LHC) at - CERN, Tom worked on OpenStack clouds in production to support the - Australian public research sector. Tom currently serves as an - OpenStack community manager and works on OpenStack documentation in - his spare time. - -Diane Fleming - Diane works on the OpenStack API documentation tirelessly. She - helped out wherever she could on this project. - -Anne Gentle - Anne is the documentation coordinator for OpenStack and also served - as an individual contributor to the Google Documentation Summit in - 2011, working with the Open Street Maps team. She has worked on book - sprints in the past, with FLOSS Manuals’ Adam Hyde facilitating. - Anne lives in Austin, Texas. - -Lorin Hochstein - An academic turned software-developer-slash-operator, Lorin worked - as the lead architect for Cloud Services at Nimbis Services, where - he deploys OpenStack for technical computing applications. He has - been working with OpenStack since the Cactus release. Previously, he - worked on high-performance computing extensions for OpenStack at - University of Southern California's Information Sciences Institute - (USC-ISI). - -Adam Hyde - Adam facilitated this book sprint. He also founded the book sprint - methodology and is the most experienced book-sprint facilitator - around. See `BookSprints `_ for more - information. Adam founded FLOSS Manuals—a community of some 3,000 - individuals developing Free Manuals about Free Software. He is also the - founder and project manager for Booktype, an open source project for - writing, editing, and publishing books online and in print. - -Jonathan Proulx - Jon has been piloting an OpenStack cloud as a senior technical - architect at the MIT Computer Science and Artificial Intelligence - Lab for his researchers to have as much computing power as they - need. He started contributing to OpenStack documentation and - reviewing the documentation so that he could accelerate his - learning. - -Everett Toews - Everett is a developer advocate at Rackspace making OpenStack and - the Rackspace Cloud easy to use. Sometimes developer, sometimes - advocate, and sometimes operator, he's built web applications, - taught workshops, given presentations around the world, and deployed - OpenStack for production use by academia and business. - -Joe Topjian - Joe has designed and deployed several clouds at Cybera, a nonprofit - where they are building e-infrastructure to support entrepreneurs - and local researchers in Alberta, Canada. He also actively maintains - and operates these clouds as a systems architect, and his - experiences have generated a wealth of troubleshooting skills for - cloud environments. - -OpenStack community members - Many individual efforts keep a community book alive. Our community - members updated content for this book year-round. Also, a year after - the first sprint, Jon Proulx hosted a second two-day mini-sprint at - MIT with the goal of updating the book for the latest release. Since - the book's inception, more than 30 contributors have supported this - book. We have a tool chain for reviews, continuous builds, and - translations. Writers and developers continuously review patches, - enter doc bugs, edit content, and fix doc bugs. We want to recognize - their efforts! - - The following people have contributed to this book: Akihiro Motoki, - Alejandro Avella, Alexandra Settle, Andreas Jaeger, Andy McCallum, - Benjamin Stassart, Chandan Kumar, Chris Ricker, David Cramer, David - Wittman, Denny Zhang, Emilien Macchi, Gauvain Pocentek, Ignacio - Barrio, James E. Blair, Jay Clark, Jeff White, Jeremy Stanley, K - Jonathan Harker, KATO Tomoyuki, Lana Brindley, Laura Alves, Lee Li, - Lukasz Jernas, Mario B. Codeniera, Matthew Kassawara, Michael Still, - Monty Taylor, Nermina Miller, Nigel Williams, Phil Hopkins, Russell - Bryant, Sahid Orentino Ferdjaoui, Sandy Walsh, Sascha Peilicke, Sean - M. Collins, Sergey Lukjanov, Shilla Saebi, Stephen Gordon, Summer - Long, Uwe Stuehler, Vaibhav Bhatkar, Veronica Musso, Ying Chun - "Daisy" Guo, Zhengguang Ou, and ZhiQiang Fan. - -How to Contribute to This Book -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The genesis of this book was an in-person event, but now that the book -is in your hands, we want you to contribute to it. OpenStack -documentation follows the coding principles of iterative work, with bug -logging, investigating, and fixing. We also store the source content on -GitHub and invite collaborators through the OpenStack Gerrit -installation, which offers reviews. For the O'Reilly edition of this -book, we are using the company's Atlas system, which also stores source -content on GitHub and enables collaboration among contributors. - -Learn more about how to contribute to the OpenStack docs at `OpenStack -Documentation Contributor -Guide `_. - -If you find a bug and can't fix it or aren't sure it's really a doc bug, -log a bug at `OpenStack -Manuals `_. Tag the bug -under Extra options with the ``ops-guide`` tag to indicate that the bug -is in this guide. You can assign the bug to yourself if you know how to -fix it. Also, a member of the OpenStack doc-core team can triage the doc -bug. diff --git a/tools/build-all-rst.sh b/tools/build-all-rst.sh index 79cefc56b6..219f01dcba 100755 --- a/tools/build-all-rst.sh +++ b/tools/build-all-rst.sh @@ -29,12 +29,11 @@ done # PDF targets for Install guides are dealt in build-install-guides-rst.sh PDF_TARGETS=( 'arch-design'\ 'ha-guide' \ - 'image-guide'\ - 'ops-guide' ) + 'image-guide') # Note that these guides are only build for master branch -for guide in admin-guide arch-design contributor-guide \ - ha-guide image-guide ops-guide; do +for guide in arch-design contributor-guide \ + ha-guide image-guide; do if [[ ${PDF_TARGETS[*]} =~ $guide ]]; then tools/build-rst.sh doc/$guide --build build \ --target $guide $LINKCHECK $PDF_OPTION diff --git a/tools/publishdocs.sh b/tools/publishdocs.sh index f17c99fc27..b2cdee04c9 100755 --- a/tools/publishdocs.sh +++ b/tools/publishdocs.sh @@ -33,7 +33,6 @@ function copy_to_branch { rm -f publish-docs/$BRANCH/draft-index.html # We don't need these draft guides on the branch rm -rf publish-docs/$BRANCH/arch-design-to-archive - rm -rf publish-docs/$BRANCH/ops-guide for f in $(find publish-docs/$BRANCH -name "atom.xml"); do sed -i -e "s|/draft/|/$BRANCH/|g" $f diff --git a/www/.htaccess b/www/.htaccess index caa57dfa89..637b75b5ce 100644 --- a/www/.htaccess +++ b/www/.htaccess @@ -46,6 +46,9 @@ redirectmatch 301 "^/releases.*$" http://releases.openstack.org$1 # Redirect removed user guide redirectmatch 301 /user-guide/.*$ /user/ +# Redirect removed ops guide +redirectmatch 301 /ops-guide/.*$ /admin/ + # Redirect changed directory name in the Contributor Guide redirect 301 /contributor-guide/ui-text-guidelines.html /contributor-guide/ux-ui-guidelines/ui-text-guidelines.html redirect 301 /contributor-guide/ui-text-guidelines /contributor-guide/ux-ui-guidelines