OpenStack Compute (Nova)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

5018 lines
207 KiB

  1. # vim: tabstop=4 shiftwidth=4 softtabstop=4
  2. # Copyright 2010 United States Government as represented by the
  3. # Administrator of the National Aeronautics and Space Administration.
  4. # All Rights Reserved.
  5. # Copyright (c) 2010 Citrix Systems, Inc.
  6. # Copyright (c) 2011 Piston Cloud Computing, Inc
  7. # Copyright (c) 2012 University Of Minho
  8. # (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
  9. #
  10. # Licensed under the Apache License, Version 2.0 (the "License"); you may
  11. # not use this file except in compliance with the License. You may obtain
  12. # a copy of the License at
  13. #
  14. # http://www.apache.org/licenses/LICENSE-2.0
  15. #
  16. # Unless required by applicable law or agreed to in writing, software
  17. # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  18. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  19. # License for the specific language governing permissions and limitations
  20. # under the License.
  21. """
  22. A connection to a hypervisor through libvirt.
  23. Supports KVM, LXC, QEMU, UML, and XEN.
  24. **Related Flags**
  25. :driver_type: Libvirt domain type. Can be kvm, qemu, uml, xen (default: kvm).
  26. :connection_uri: Override for the default libvirt URI (depends on
  27. driver_type).
  28. :disk_prefix: Override the default disk prefix for the devices
  29. attached to a server.
  30. :rescue_image_id: Rescue ami image (None = original image).
  31. :rescue_kernel_id: Rescue aki image (None = original image).
  32. :rescue_ramdisk_id: Rescue ari image (None = original image).
  33. :injected_network_template: Template file for injected network
  34. :allow_same_net_traffic: Whether to allow in project network traffic
  35. """
  36. import errno
  37. import eventlet
  38. import functools
  39. import glob
  40. import os
  41. import shutil
  42. import socket
  43. import sys
  44. import tempfile
  45. import threading
  46. import time
  47. import uuid
  48. from eventlet import greenio
  49. from eventlet import greenthread
  50. from eventlet import patcher
  51. from eventlet import tpool
  52. from eventlet import util as eventlet_util
  53. from lxml import etree
  54. from oslo.config import cfg
  55. from nova.api.metadata import base as instance_metadata
  56. from nova import block_device
  57. from nova.compute import flavors
  58. from nova.compute import power_state
  59. from nova.compute import task_states
  60. from nova.compute import utils as compute_utils
  61. from nova.compute import vm_mode
  62. from nova import context as nova_context
  63. from nova import exception
  64. from nova.image import glance
  65. from nova import notifier
  66. from nova.objects import instance as instance_obj
  67. from nova.objects import service as service_obj
  68. from nova.openstack.common import excutils
  69. from nova.openstack.common import fileutils
  70. from nova.openstack.common.gettextutils import _
  71. from nova.openstack.common import importutils
  72. from nova.openstack.common import jsonutils
  73. from nova.openstack.common import log as logging
  74. from nova.openstack.common import loopingcall
  75. from nova.openstack.common import processutils
  76. from nova.openstack.common import xmlutils
  77. from nova.pci import pci_manager
  78. from nova.pci import pci_utils
  79. from nova.pci import pci_whitelist
  80. from nova import unit
  81. from nova import utils
  82. from nova import version
  83. from nova.virt import configdrive
  84. from nova.virt.disk import api as disk
  85. from nova.virt import driver
  86. from nova.virt import event as virtevent
  87. from nova.virt import firewall
  88. from nova.virt.libvirt import blockinfo
  89. from nova.virt.libvirt import config as vconfig
  90. from nova.virt.libvirt import firewall as libvirt_firewall
  91. from nova.virt.libvirt import imagebackend
  92. from nova.virt.libvirt import imagecache
  93. from nova.virt.libvirt import utils as libvirt_utils
  94. from nova.virt import netutils
  95. from nova import volume
  96. from nova.volume import encryptors
  97. native_threading = patcher.original("threading")
  98. native_Queue = patcher.original("Queue")
  99. libvirt = None
  100. LOG = logging.getLogger(__name__)
  101. libvirt_opts = [
  102. cfg.StrOpt('rescue_image_id',
  103. help='Rescue ami image',
  104. deprecated_group='DEFAULT'),
  105. cfg.StrOpt('rescue_kernel_id',
  106. help='Rescue aki image',
  107. deprecated_group='DEFAULT'),
  108. cfg.StrOpt('rescue_ramdisk_id',
  109. help='Rescue ari image',
  110. deprecated_group='DEFAULT'),
  111. cfg.StrOpt('virt_type',
  112. default='kvm',
  113. help='Libvirt domain type (valid options are: '
  114. 'kvm, lxc, qemu, uml, xen)',
  115. deprecated_group='DEFAULT',
  116. deprecated_name='libvirt_type'),
  117. cfg.StrOpt('connection_uri',
  118. default='',
  119. help='Override the default libvirt URI '
  120. '(which is dependent on virt_type)',
  121. deprecated_group='DEFAULT',
  122. deprecated_name='libvirt_uri'),
  123. cfg.BoolOpt('inject_password',
  124. default=False,
  125. help='Inject the admin password at boot time, '
  126. 'without an agent.',
  127. deprecated_name='libvirt_inject_password',
  128. deprecated_group='DEFAULT'),
  129. cfg.BoolOpt('inject_key',
  130. default=True,
  131. help='Inject the ssh public key at boot time',
  132. deprecated_name='libvirt_inject_key',
  133. deprecated_group='DEFAULT'),
  134. cfg.IntOpt('inject_partition',
  135. default=1,
  136. help='The partition to inject to : '
  137. '-2 => disable, -1 => inspect (libguestfs only), '
  138. '0 => not partitioned, >0 => partition number',
  139. deprecated_name='libvirt_inject_partition',
  140. deprecated_group='DEFAULT'),
  141. cfg.BoolOpt('use_usb_tablet',
  142. default=True,
  143. help='Sync virtual and real mouse cursors in Windows VMs',
  144. deprecated_group='DEFAULT'),
  145. cfg.StrOpt('live_migration_uri',
  146. default="qemu+tcp://%s/system",
  147. help='Migration target URI '
  148. '(any included "%s" is replaced with '
  149. 'the migration target hostname)',
  150. deprecated_group='DEFAULT'),
  151. cfg.StrOpt('live_migration_flag',
  152. default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER',
  153. help='Migration flags to be set for live migration',
  154. deprecated_group='DEFAULT'),
  155. cfg.StrOpt('block_migration_flag',
  156. default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
  157. 'VIR_MIGRATE_NON_SHARED_INC',
  158. help='Migration flags to be set for block migration',
  159. deprecated_group='DEFAULT'),
  160. cfg.IntOpt('live_migration_bandwidth',
  161. default=0,
  162. help='Maximum bandwidth to be used during migration, in Mbps',
  163. deprecated_group='DEFAULT'),
  164. cfg.StrOpt('snapshot_image_format',
  165. help='Snapshot image format (valid options are : '
  166. 'raw, qcow2, vmdk, vdi). '
  167. 'Defaults to same as source image',
  168. deprecated_group='DEFAULT'),
  169. cfg.StrOpt('vif_driver',
  170. default='nova.virt.libvirt.vif.LibvirtGenericVIFDriver',
  171. help='The libvirt VIF driver to configure the VIFs.',
  172. deprecated_name='libvirt_vif_driver',
  173. deprecated_group='DEFAULT'),
  174. cfg.ListOpt('volume_drivers',
  175. default=[
  176. 'iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver',
  177. 'iser=nova.virt.libvirt.volume.LibvirtISERVolumeDriver',
  178. 'local=nova.virt.libvirt.volume.LibvirtVolumeDriver',
  179. 'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver',
  180. 'rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
  181. 'sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
  182. 'nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver',
  183. 'aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver',
  184. 'glusterfs='
  185. 'nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver',
  186. 'fibre_channel=nova.virt.libvirt.volume.'
  187. 'LibvirtFibreChannelVolumeDriver',
  188. 'scality='
  189. 'nova.virt.libvirt.volume.LibvirtScalityVolumeDriver',
  190. ],
  191. help='Libvirt handlers for remote volumes.',
  192. deprecated_name='libvirt_volume_drivers',
  193. deprecated_group='DEFAULT'),
  194. cfg.StrOpt('disk_prefix',
  195. help='Override the default disk prefix for the devices attached'
  196. ' to a server, which is dependent on virt_type. '
  197. '(valid options are: sd, xvd, uvd, vd)',
  198. deprecated_name='libvirt_disk_prefix',
  199. deprecated_group='DEFAULT'),
  200. cfg.IntOpt('wait_soft_reboot_seconds',
  201. default=120,
  202. help='Number of seconds to wait for instance to shut down after'
  203. ' soft reboot request is made. We fall back to hard reboot'
  204. ' if instance does not shutdown within this window.',
  205. deprecated_name='libvirt_wait_soft_reboot_seconds',
  206. deprecated_group='DEFAULT'),
  207. cfg.BoolOpt('api_thread_pool',
  208. default=True,
  209. help='Use a separated OS thread pool to realize non-blocking'
  210. ' libvirt calls',
  211. deprecated_name='libvirt_non_blocking',
  212. deprecated_group='DEFAULT'),
  213. cfg.StrOpt('cpu_mode',
  214. help='Set to "host-model" to clone the host CPU feature flags; '
  215. 'to "host-passthrough" to use the host CPU model exactly; '
  216. 'to "custom" to use a named CPU model; '
  217. 'to "none" to not set any CPU model. '
  218. 'If virt_type="kvm|qemu", it will default to '
  219. '"host-model", otherwise it will default to "none"',
  220. deprecated_name='libvirt_cpu_mode',
  221. deprecated_group='DEFAULT'),
  222. cfg.StrOpt('cpu_model',
  223. help='Set to a named libvirt CPU model (see names listed '
  224. 'in /usr/share/libvirt/cpu_map.xml). Only has effect if '
  225. 'cpu_mode="custom" and virt_type="kvm|qemu"',
  226. deprecated_name='libvirt_cpu_model',
  227. deprecated_group='DEFAULT'),
  228. cfg.StrOpt('snapshots_directory',
  229. default='$instances_path/snapshots',
  230. help='Location where libvirt driver will store snapshots '
  231. 'before uploading them to image service',
  232. deprecated_name='libvirt_snapshots_directory',
  233. deprecated_group='DEFAULT'),
  234. cfg.StrOpt('xen_hvmloader_path',
  235. default='/usr/lib/xen/boot/hvmloader',
  236. help='Location where the Xen hvmloader is kept',
  237. deprecated_group='DEFAULT'),
  238. cfg.ListOpt('disk_cachemodes',
  239. default=[],
  240. help='Specific cachemodes to use for different disk types '
  241. 'e.g: file=directsync,block=none',
  242. deprecated_group='DEFAULT'),
  243. cfg.StrOpt('vcpu_pin_set',
  244. help='Which pcpus can be used by vcpus of instance '
  245. 'e.g: "4-12,^8,15"',
  246. deprecated_group='DEFAULT'),
  247. ]
  248. CONF = cfg.CONF
  249. CONF.register_opts(libvirt_opts, 'libvirt')
  250. CONF.import_opt('host', 'nova.netconf')
  251. CONF.import_opt('my_ip', 'nova.netconf')
  252. CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
  253. CONF.import_opt('use_cow_images', 'nova.virt.driver')
  254. CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
  255. CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
  256. CONF.import_opt('server_proxyclient_address', 'nova.spice', group='spice')
  257. DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
  258. libvirt_firewall.__name__,
  259. libvirt_firewall.IptablesFirewallDriver.__name__)
  260. MAX_CONSOLE_BYTES = 100 * unit.Ki
  261. # The libvirt driver will prefix any disable reason codes with this string.
  262. DISABLE_PREFIX = 'AUTO: '
  263. def patch_tpool_proxy():
  264. """eventlet.tpool.Proxy doesn't work with old-style class in __str__()
  265. or __repr__() calls. See bug #962840 for details.
  266. We perform a monkey patch to replace those two instance methods.
  267. """
  268. def str_method(self):
  269. return str(self._obj)
  270. def repr_method(self):
  271. return repr(self._obj)
  272. tpool.Proxy.__str__ = str_method
  273. tpool.Proxy.__repr__ = repr_method
  274. patch_tpool_proxy()
  275. VIR_DOMAIN_NOSTATE = 0
  276. VIR_DOMAIN_RUNNING = 1
  277. VIR_DOMAIN_BLOCKED = 2
  278. VIR_DOMAIN_PAUSED = 3
  279. VIR_DOMAIN_SHUTDOWN = 4
  280. VIR_DOMAIN_SHUTOFF = 5
  281. VIR_DOMAIN_CRASHED = 6
  282. VIR_DOMAIN_PMSUSPENDED = 7
  283. LIBVIRT_POWER_STATE = {
  284. VIR_DOMAIN_NOSTATE: power_state.NOSTATE,
  285. VIR_DOMAIN_RUNNING: power_state.RUNNING,
  286. # NOTE(maoy): The DOMAIN_BLOCKED state is only valid in Xen.
  287. # It means that the VM is running and the vCPU is idle. So,
  288. # we map it to RUNNING
  289. VIR_DOMAIN_BLOCKED: power_state.RUNNING,
  290. VIR_DOMAIN_PAUSED: power_state.PAUSED,
  291. # NOTE(maoy): The libvirt API doc says that DOMAIN_SHUTDOWN
  292. # means the domain is being shut down. So technically the domain
  293. # is still running. SHUTOFF is the real powered off state.
  294. # But we will map both to SHUTDOWN anyway.
  295. # http://libvirt.org/html/libvirt-libvirt.html
  296. VIR_DOMAIN_SHUTDOWN: power_state.SHUTDOWN,
  297. VIR_DOMAIN_SHUTOFF: power_state.SHUTDOWN,
  298. VIR_DOMAIN_CRASHED: power_state.CRASHED,
  299. VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED,
  300. }
  301. MIN_LIBVIRT_VERSION = (0, 9, 6)
  302. # When the above version matches/exceeds this version
  303. # delete it & corresponding code using it
  304. MIN_LIBVIRT_HOST_CPU_VERSION = (0, 9, 10)
  305. MIN_LIBVIRT_CLOSE_CALLBACK_VERSION = (1, 0, 1)
  306. MIN_LIBVIRT_DEVICE_CALLBACK_VERSION = (1, 1, 1)
  307. # Live snapshot requirements
  308. REQ_HYPERVISOR_LIVESNAPSHOT = "QEMU"
  309. MIN_LIBVIRT_LIVESNAPSHOT_VERSION = (1, 0, 0)
  310. MIN_QEMU_LIVESNAPSHOT_VERSION = (1, 3, 0)
  311. # block size tuning requirements
  312. MIN_LIBVIRT_BLOCKIO_VERSION = (0, 10, 2)
  313. # BlockJobInfo management requirement
  314. MIN_LIBVIRT_BLOCKJOBINFO_VERSION = (1, 1, 1)
  315. def libvirt_error_handler(context, err):
  316. # Just ignore instead of default outputting to stderr.
  317. pass
  318. class LibvirtDriver(driver.ComputeDriver):
  319. capabilities = {
  320. "has_imagecache": True,
  321. "supports_recreate": True,
  322. }
  323. def __init__(self, virtapi, read_only=False):
  324. super(LibvirtDriver, self).__init__(virtapi)
  325. global libvirt
  326. if libvirt is None:
  327. libvirt = __import__('libvirt')
  328. self._host_state = None
  329. self._initiator = None
  330. self._fc_wwnns = None
  331. self._fc_wwpns = None
  332. self._wrapped_conn = None
  333. self._wrapped_conn_lock = threading.Lock()
  334. self._caps = None
  335. self._vcpu_total = 0
  336. self.read_only = read_only
  337. self.firewall_driver = firewall.load_driver(
  338. DEFAULT_FIREWALL_DRIVER,
  339. self.virtapi,
  340. get_connection=self._get_connection)
  341. vif_class = importutils.import_class(CONF.libvirt.vif_driver)
  342. self.vif_driver = vif_class(self._get_connection)
  343. self.volume_drivers = driver.driver_dict_from_config(
  344. CONF.libvirt.volume_drivers, self)
  345. self.dev_filter = pci_whitelist.get_pci_devices_filter()
  346. self._event_queue = None
  347. self._disk_cachemode = None
  348. self.image_cache_manager = imagecache.ImageCacheManager()
  349. self.image_backend = imagebackend.Backend(CONF.use_cow_images)
  350. self.disk_cachemodes = {}
  351. self.valid_cachemodes = ["default",
  352. "none",
  353. "writethrough",
  354. "writeback",
  355. "directsync",
  356. "unsafe",
  357. ]
  358. for mode_str in CONF.libvirt.disk_cachemodes:
  359. disk_type, sep, cache_mode = mode_str.partition('=')
  360. if cache_mode not in self.valid_cachemodes:
  361. LOG.warn(_('Invalid cachemode %(cache_mode)s specified '
  362. 'for disk type %(disk_type)s.'),
  363. {'cache_mode': cache_mode, 'disk_type': disk_type})
  364. continue
  365. self.disk_cachemodes[disk_type] = cache_mode
  366. self._volume_api = volume.API()
  367. @property
  368. def disk_cachemode(self):
  369. if self._disk_cachemode is None:
  370. # We prefer 'none' for consistent performance, host crash
  371. # safety & migration correctness by avoiding host page cache.
  372. # Some filesystems (eg GlusterFS via FUSE) don't support
  373. # O_DIRECT though. For those we fallback to 'writethrough'
  374. # which gives host crash safety, and is safe for migration
  375. # provided the filesystem is cache coherent (cluster filesystems
  376. # typically are, but things like NFS are not).
  377. self._disk_cachemode = "none"
  378. if not self._supports_direct_io(CONF.instances_path):
  379. self._disk_cachemode = "writethrough"
  380. return self._disk_cachemode
  381. @property
  382. def host_state(self):
  383. if not self._host_state:
  384. self._host_state = HostState(self)
  385. return self._host_state
  386. def set_cache_mode(self, conf):
  387. """Set cache mode on LibvirtConfigGuestDisk object."""
  388. try:
  389. source_type = conf.source_type
  390. driver_cache = conf.driver_cache
  391. except AttributeError:
  392. return
  393. cache_mode = self.disk_cachemodes.get(source_type,
  394. driver_cache)
  395. conf.driver_cache = cache_mode
  396. @staticmethod
  397. def _has_min_version(conn, lv_ver=None, hv_ver=None, hv_type=None):
  398. try:
  399. if lv_ver is not None:
  400. libvirt_version = conn.getLibVersion()
  401. if libvirt_version < utils.convert_version_to_int(lv_ver):
  402. return False
  403. if hv_ver is not None:
  404. hypervisor_version = conn.getVersion()
  405. if hypervisor_version < utils.convert_version_to_int(hv_ver):
  406. return False
  407. if hv_type is not None:
  408. hypervisor_type = conn.getType()
  409. if hypervisor_type != hv_type:
  410. return False
  411. return True
  412. except Exception:
  413. return False
  414. def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
  415. return self._has_min_version(self._conn, lv_ver, hv_ver, hv_type)
  416. def _native_thread(self):
  417. """Receives async events coming in from libvirtd.
  418. This is a native thread which runs the default
  419. libvirt event loop implementation. This processes
  420. any incoming async events from libvirtd and queues
  421. them for later dispatch. This thread is only
  422. permitted to use libvirt python APIs, and the
  423. driver.queue_event method. In particular any use
  424. of logging is forbidden, since it will confuse
  425. eventlet's greenthread integration
  426. """
  427. while True:
  428. libvirt.virEventRunDefaultImpl()
  429. def _dispatch_thread(self):
  430. """Dispatches async events coming in from libvirtd.
  431. This is a green thread which waits for events to
  432. arrive from the libvirt event loop thread. This
  433. then dispatches the events to the compute manager.
  434. """
  435. while True:
  436. self._dispatch_events()
  437. @staticmethod
  438. def _event_lifecycle_callback(conn, dom, event, detail, opaque):
  439. """Receives lifecycle events from libvirt.
  440. NB: this method is executing in a native thread, not
  441. an eventlet coroutine. It can only invoke other libvirt
  442. APIs, or use self.queue_event(). Any use of logging APIs
  443. in particular is forbidden.
  444. """
  445. self = opaque
  446. uuid = dom.UUIDString()
  447. transition = None
  448. if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
  449. transition = virtevent.EVENT_LIFECYCLE_STOPPED
  450. elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
  451. transition = virtevent.EVENT_LIFECYCLE_STARTED
  452. elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
  453. transition = virtevent.EVENT_LIFECYCLE_PAUSED
  454. elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
  455. transition = virtevent.EVENT_LIFECYCLE_RESUMED
  456. if transition is not None:
  457. self._queue_event(virtevent.LifecycleEvent(uuid, transition))
  458. def _queue_event(self, event):
  459. """Puts an event on the queue for dispatch.
  460. This method is called by the native event thread to
  461. put events on the queue for later dispatch by the
  462. green thread.
  463. """
  464. if self._event_queue is None:
  465. LOG.debug(_("Event loop thread is not active, "
  466. "discarding event %s") % event)
  467. return
  468. # Queue the event...
  469. self._event_queue.put(event)
  470. # ...then wakeup the green thread to dispatch it
  471. c = ' '.encode()
  472. self._event_notify_send.write(c)
  473. self._event_notify_send.flush()
  474. def _dispatch_events(self):
  475. """Wait for & dispatch events from native thread
  476. Blocks until native thread indicates some events
  477. are ready. Then dispatches all queued events.
  478. """
  479. # Wait to be notified that there are some
  480. # events pending
  481. try:
  482. _c = self._event_notify_recv.read(1)
  483. assert _c
  484. except ValueError:
  485. return # will be raised when pipe is closed
  486. # Process as many events as possible without
  487. # blocking
  488. while not self._event_queue.empty():
  489. try:
  490. event = self._event_queue.get(block=False)
  491. self.emit_event(event)
  492. except native_Queue.Empty:
  493. pass
  494. def _init_events_pipe(self):
  495. """Create a self-pipe for the native thread to synchronize on.
  496. This code is taken from the eventlet tpool module, under terms
  497. of the Apache License v2.0.
  498. """
  499. self._event_queue = native_Queue.Queue()
  500. try:
  501. rpipe, wpipe = os.pipe()
  502. self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
  503. self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
  504. except (ImportError, NotImplementedError):
  505. # This is Windows compatibility -- use a socket instead
  506. # of a pipe because pipes don't really exist on Windows.
  507. sock = eventlet_util.__original_socket__(socket.AF_INET,
  508. socket.SOCK_STREAM)
  509. sock.bind(('localhost', 0))
  510. sock.listen(50)
  511. csock = eventlet_util.__original_socket__(socket.AF_INET,
  512. socket.SOCK_STREAM)
  513. csock.connect(('localhost', sock.getsockname()[1]))
  514. nsock, addr = sock.accept()
  515. self._event_notify_send = nsock.makefile('wb', 0)
  516. gsock = greenio.GreenSocket(csock)
  517. self._event_notify_recv = gsock.makefile('rb', 0)
  518. def _init_events(self):
  519. """Initializes the libvirt events subsystem.
  520. This requires running a native thread to provide the
  521. libvirt event loop integration. This forwards events
  522. to a green thread which does the actual dispatching.
  523. """
  524. self._init_events_pipe()
  525. LOG.debug(_("Starting native event thread"))
  526. event_thread = native_threading.Thread(target=self._native_thread)
  527. event_thread.setDaemon(True)
  528. event_thread.start()
  529. LOG.debug(_("Starting green dispatch thread"))
  530. eventlet.spawn(self._dispatch_thread)
  531. def init_host(self, host):
  532. libvirt.registerErrorHandler(libvirt_error_handler, None)
  533. libvirt.virEventRegisterDefaultImpl()
  534. if not self.has_min_version(MIN_LIBVIRT_VERSION):
  535. major = MIN_LIBVIRT_VERSION[0]
  536. minor = MIN_LIBVIRT_VERSION[1]
  537. micro = MIN_LIBVIRT_VERSION[2]
  538. LOG.error(_('Nova requires libvirt version '
  539. '%(major)i.%(minor)i.%(micro)i or greater.'),
  540. {'major': major, 'minor': minor, 'micro': micro})
  541. self._init_events()
  542. def _get_new_connection(self):
  543. # call with _wrapped_conn_lock held
  544. LOG.debug(_('Connecting to libvirt: %s'), self.uri())
  545. wrapped_conn = None
  546. try:
  547. wrapped_conn = self._connect(self.uri(), self.read_only)
  548. finally:
  549. # Enabling the compute service, in case it was disabled
  550. # since the connection was successful.
  551. is_connected = bool(wrapped_conn)
  552. self.set_host_enabled(CONF.host, is_connected)
  553. self._wrapped_conn = wrapped_conn
  554. try:
  555. LOG.debug(_("Registering for lifecycle events %s") % str(self))
  556. wrapped_conn.domainEventRegisterAny(
  557. None,
  558. libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
  559. self._event_lifecycle_callback,
  560. self)
  561. except Exception as e:
  562. LOG.warn(_("URI %(uri)s does not support events: %(error)s"),
  563. {'uri': self.uri(), 'error': e})
  564. if self._has_min_version(wrapped_conn,
  565. MIN_LIBVIRT_CLOSE_CALLBACK_VERSION):
  566. try:
  567. LOG.debug(_("Registering for connection events: %s") %
  568. str(self))
  569. wrapped_conn.registerCloseCallback(
  570. self._close_callback, None)
  571. except libvirt.libvirtError as e:
  572. LOG.warn(_("URI %(uri)s does not support connection"
  573. " events: %(error)s"),
  574. {'uri': self.uri(), 'error': e})
  575. return wrapped_conn
  576. def _get_connection(self):
  577. # multiple concurrent connections are protected by _wrapped_conn_lock
  578. with self._wrapped_conn_lock:
  579. wrapped_conn = self._wrapped_conn
  580. if not wrapped_conn or not self._test_connection(wrapped_conn):
  581. wrapped_conn = self._get_new_connection()
  582. return wrapped_conn
  583. _conn = property(_get_connection)
  584. def _close_callback(self, conn, reason, opaque):
  585. with self._wrapped_conn_lock:
  586. if conn == self._wrapped_conn:
  587. _error = _("Connection to libvirt lost: %s") % reason
  588. LOG.warn(_error)
  589. self._wrapped_conn = None
  590. # Disable compute service to avoid
  591. # new instances of being scheduled on this host.
  592. self.set_host_enabled(CONF.host, _error)
  593. @staticmethod
  594. def _test_connection(conn):
  595. try:
  596. conn.getLibVersion()
  597. return True
  598. except libvirt.libvirtError as e:
  599. if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
  600. libvirt.VIR_ERR_INTERNAL_ERROR) and
  601. e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
  602. libvirt.VIR_FROM_RPC)):
  603. LOG.debug(_('Connection to libvirt broke'))
  604. return False
  605. raise
  606. @staticmethod
  607. def uri():
  608. if CONF.libvirt.virt_type == 'uml':
  609. uri = CONF.libvirt.connection_uri or 'uml:///system'
  610. elif CONF.libvirt.virt_type == 'xen':
  611. uri = CONF.libvirt.connection_uri or 'xen:///'
  612. elif CONF.libvirt.virt_type == 'lxc':
  613. uri = CONF.libvirt.connection_uri or 'lxc:///'
  614. else:
  615. uri = CONF.libvirt.connection_uri or 'qemu:///system'
  616. return uri
  617. @staticmethod
  618. def _connect(uri, read_only):
  619. def _connect_auth_cb(creds, opaque):
  620. if len(creds) == 0:
  621. return 0
  622. LOG.warning(
  623. _("Can not handle authentication request for %d credentials")
  624. % len(creds))
  625. raise exception.NovaException(
  626. _("Can not handle authentication request for %d credentials")
  627. % len(creds))
  628. auth = [[libvirt.VIR_CRED_AUTHNAME,
  629. libvirt.VIR_CRED_ECHOPROMPT,
  630. libvirt.VIR_CRED_REALM,
  631. libvirt.VIR_CRED_PASSPHRASE,
  632. libvirt.VIR_CRED_NOECHOPROMPT,
  633. libvirt.VIR_CRED_EXTERNAL],
  634. _connect_auth_cb,
  635. None]
  636. try:
  637. flags = 0
  638. if read_only:
  639. flags = libvirt.VIR_CONNECT_RO
  640. if not CONF.libvirt.api_thread_pool:
  641. return libvirt.openAuth(uri, auth, flags)
  642. else:
  643. # tpool.proxy_call creates a native thread. Due to limitations
  644. # with eventlet locking we cannot use the logging API inside
  645. # the called function.
  646. return tpool.proxy_call(
  647. (libvirt.virDomain, libvirt.virConnect),
  648. libvirt.openAuth, uri, auth, flags)
  649. except libvirt.libvirtError as ex:
  650. LOG.exception(_("Connection to libvirt failed: %s"), ex)
  651. payload = dict(ip=LibvirtDriver.get_host_ip_addr(),
  652. method='_connect',
  653. reason=ex)
  654. notifier.get_notifier('compute').error(
  655. nova_context.get_admin_context(),
  656. 'compute.libvirt.error', payload)
  657. raise exception.HypervisorUnavailable(host=CONF.host)
  658. def get_num_instances(self):
  659. """Efficient override of base instance_exists method."""
  660. return self._conn.numOfDomains()
  661. def instance_exists(self, instance_name):
  662. """Efficient override of base instance_exists method."""
  663. try:
  664. self._lookup_by_name(instance_name)
  665. return True
  666. except exception.NovaException:
  667. return False
  668. # TODO(Shrews): Remove when libvirt Bugzilla bug # 836647 is fixed.
  669. def list_instance_ids(self):
  670. if self._conn.numOfDomains() == 0:
  671. return []
  672. return self._conn.listDomainsID()
  673. def list_instances(self):
  674. names = []
  675. for domain_id in self.list_instance_ids():
  676. try:
  677. # We skip domains with ID 0 (hypervisors).
  678. if domain_id != 0:
  679. domain = self._lookup_by_id(domain_id)
  680. names.append(domain.name())
  681. except exception.InstanceNotFound:
  682. # Ignore deleted instance while listing
  683. continue
  684. # extend instance list to contain also defined domains
  685. names.extend([vm for vm in self._conn.listDefinedDomains()
  686. if vm not in names])
  687. return names
  688. def list_instance_uuids(self):
  689. uuids = set()
  690. for domain_id in self.list_instance_ids():
  691. try:
  692. # We skip domains with ID 0 (hypervisors).
  693. if domain_id != 0:
  694. domain = self._lookup_by_id(domain_id)
  695. uuids.add(domain.UUIDString())
  696. except exception.InstanceNotFound:
  697. # Ignore deleted instance while listing
  698. continue
  699. # extend instance list to contain also defined domains
  700. for domain_name in self._conn.listDefinedDomains():
  701. try:
  702. uuids.add(self._lookup_by_name(domain_name).UUIDString())
  703. except exception.InstanceNotFound:
  704. # Ignore deleted instance while listing
  705. continue
  706. return list(uuids)
  707. def plug_vifs(self, instance, network_info):
  708. """Plug VIFs into networks."""
  709. for vif in network_info:
  710. self.vif_driver.plug(instance, vif)
  711. def unplug_vifs(self, instance, network_info):
  712. """Unplug VIFs from networks."""
  713. for vif in network_info:
  714. self.vif_driver.unplug(instance, vif)
  715. def _destroy(self, instance):
  716. try:
  717. virt_dom = self._lookup_by_name(instance['name'])
  718. except exception.InstanceNotFound:
  719. virt_dom = None
  720. # If the instance is already terminated, we're still happy
  721. # Otherwise, destroy it
  722. old_domid = -1
  723. if virt_dom is not None:
  724. try:
  725. old_domid = virt_dom.ID()
  726. virt_dom.destroy()
  727. except libvirt.libvirtError as e:
  728. is_okay = False
  729. errcode = e.get_error_code()
  730. if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
  731. # If the instance is already shut off, we get this:
  732. # Code=55 Error=Requested operation is not valid:
  733. # domain is not running
  734. (state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
  735. state = LIBVIRT_POWER_STATE[state]
  736. if state == power_state.SHUTDOWN:
  737. is_okay = True
  738. elif errcode == libvirt.VIR_ERR_OPERATION_TIMEOUT:
  739. LOG.warn(_("Cannot destroy instance, operation time out"),
  740. instance=instance)
  741. reason = _("operation time out")
  742. raise exception.InstancePowerOffFailure(reason=reason)
  743. if not is_okay:
  744. with excutils.save_and_reraise_exception():
  745. LOG.error(_('Error from libvirt during destroy. '
  746. 'Code=%(errcode)s Error=%(e)s'),
  747. {'errcode': errcode, 'e': e},
  748. instance=instance)
  749. def _wait_for_destroy(expected_domid):
  750. """Called at an interval until the VM is gone."""
  751. # NOTE(vish): If the instance disappears during the destroy
  752. # we ignore it so the cleanup can still be
  753. # attempted because we would prefer destroy to
  754. # never fail.
  755. try:
  756. dom_info = self.get_info(instance)
  757. state = dom_info['state']
  758. new_domid = dom_info['id']
  759. except exception.InstanceNotFound:
  760. LOG.error(_("During wait destroy, instance disappeared."),
  761. instance=instance)
  762. raise loopingcall.LoopingCallDone()
  763. if state == power_state.SHUTDOWN:
  764. LOG.info(_("Instance destroyed successfully."),
  765. instance=instance)
  766. raise loopingcall.LoopingCallDone()
  767. # NOTE(wangpan): If the instance was booted again after destroy,
  768. # this may be a endless loop, so check the id of
  769. # domain here, if it changed and the instance is
  770. # still running, we should destroy it again.
  771. # see https://bugs.launchpad.net/nova/+bug/1111213 for more details
  772. if new_domid != expected_domid:
  773. LOG.info(_("Instance may be started again."),
  774. instance=instance)
  775. kwargs['is_running'] = True
  776. raise loopingcall.LoopingCallDone()
  777. kwargs = {'is_running': False}
  778. timer = loopingcall.FixedIntervalLoopingCall(_wait_for_destroy,
  779. old_domid)
  780. timer.start(interval=0.5).wait()
  781. if kwargs['is_running']:
  782. LOG.info(_("Going to destroy instance again."), instance=instance)
  783. self._destroy(instance)
  784. def destroy(self, context, instance, network_info, block_device_info=None,
  785. destroy_disks=True):
  786. self._destroy(instance)
  787. self._cleanup(context, instance, network_info, block_device_info,
  788. destroy_disks)
  789. def _undefine_domain(self, instance):
  790. try:
  791. virt_dom = self._lookup_by_name(instance['name'])
  792. except exception.InstanceNotFound:
  793. virt_dom = None
  794. if virt_dom:
  795. try:
  796. try:
  797. virt_dom.undefineFlags(
  798. libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
  799. except libvirt.libvirtError:
  800. LOG.debug(_("Error from libvirt during undefineFlags."
  801. " Retrying with undefine"), instance=instance)
  802. virt_dom.undefine()
  803. except AttributeError:
  804. # NOTE(vish): Older versions of libvirt don't support
  805. # undefine flags, so attempt to do the
  806. # right thing.
  807. try:
  808. if virt_dom.hasManagedSaveImage(0):
  809. virt_dom.managedSaveRemove(0)
  810. except AttributeError:
  811. pass
  812. virt_dom.undefine()
  813. except libvirt.libvirtError as e:
  814. with excutils.save_and_reraise_exception():
  815. errcode = e.get_error_code()
  816. LOG.error(_('Error from libvirt during undefine. '
  817. 'Code=%(errcode)s Error=%(e)s') %
  818. {'errcode': errcode, 'e': e}, instance=instance)
  819. def _cleanup(self, context, instance, network_info, block_device_info,
  820. destroy_disks):
  821. self._undefine_domain(instance)
  822. self.unplug_vifs(instance, network_info)
  823. retry = True
  824. while retry:
  825. try:
  826. self.firewall_driver.unfilter_instance(instance,
  827. network_info=network_info)
  828. except libvirt.libvirtError as e:
  829. try:
  830. state = self.get_info(instance)['state']
  831. except exception.InstanceNotFound:
  832. state = power_state.SHUTDOWN
  833. if state != power_state.SHUTDOWN:
  834. LOG.warn(_("Instance may be still running, destroy "
  835. "it again."), instance=instance)
  836. self._destroy(instance)
  837. else:
  838. retry = False
  839. errcode = e.get_error_code()
  840. LOG.error(_('Error from libvirt during unfilter. '
  841. 'Code=%(errcode)s Error=%(e)s') %
  842. {'errcode': errcode, 'e': e},
  843. instance=instance)
  844. reason = "Error unfiltering instance."
  845. raise exception.InstanceTerminationFailure(reason=reason)
  846. except Exception:
  847. retry = False
  848. raise
  849. else:
  850. retry = False
  851. # FIXME(wangpan): if the instance is booted again here, such as the
  852. # the soft reboot operation boot it here, it will
  853. # become "running deleted", should we check and destroy
  854. # it at the end of this method?
  855. # NOTE(vish): we disconnect from volumes regardless
  856. block_device_mapping = driver.block_device_info_get_mapping(
  857. block_device_info)
  858. for vol in block_device_mapping:
  859. connection_info = vol['connection_info']
  860. disk_dev = vol['mount_device'].rpartition("/")[2]
  861. if ('data' in connection_info and
  862. 'volume_id' in connection_info['data']):
  863. volume_id = connection_info['data']['volume_id']
  864. encryption = encryptors.get_encryption_metadata(
  865. context, self._volume_api, volume_id, connection_info)
  866. if encryption:
  867. # The volume must be detached from the VM before
  868. # disconnecting it from its encryptor. Otherwise, the
  869. # encryptor may report that the volume is still in use.
  870. encryptor = self._get_volume_encryptor(connection_info,
  871. encryption)
  872. encryptor.detach_volume(**encryption)
  873. try:
  874. self.volume_driver_method('disconnect_volume',
  875. connection_info,
  876. disk_dev)
  877. except Exception as exc:
  878. with excutils.save_and_reraise_exception() as ctxt:
  879. if destroy_disks:
  880. # Don't block on Volume errors if we're trying to
  881. # delete the instance as we may be patially created
  882. # or deleted
  883. ctxt.reraise = False
  884. LOG.warn(_("Ignoring Volume Error on vol %(vol_id)s "
  885. "during delete %(exc)s"),
  886. {'vol_id': vol.get('volume_id'), 'exc': exc},
  887. instance=instance)
  888. if destroy_disks:
  889. #NOTE(GuanQiang): teardown lxc container to avoid resource leak
  890. if CONF.libvirt.virt_type == 'lxc':
  891. inst_path = libvirt_utils.get_instance_path(instance)
  892. container_dir = os.path.join(inst_path, 'rootfs')
  893. container_root_device = instance.get('root_device_name')
  894. disk.teardown_container(container_dir, container_root_device)
  895. self._delete_instance_files(instance)
  896. self._cleanup_lvm(instance)
  897. #NOTE(haomai): destroy volumes if needed
  898. if CONF.libvirt.images_type == 'rbd':
  899. self._cleanup_rbd(instance)
  900. def _cleanup_rbd(self, instance):
  901. pool = CONF.libvirt.images_rbd_pool
  902. volumes = libvirt_utils.list_rbd_volumes(pool)
  903. pattern = instance['uuid']
  904. def belongs_to_instance(disk):
  905. return disk.startswith(pattern)
  906. volumes = filter(belongs_to_instance, volumes)
  907. if volumes:
  908. libvirt_utils.remove_rbd_volumes(pool, *volumes)
  909. def _cleanup_lvm(self, instance):
  910. """Delete all LVM disks for given instance object."""
  911. disks = self._lvm_disks(instance)
  912. if disks:
  913. libvirt_utils.remove_logical_volumes(*disks)
  914. def _lvm_disks(self, instance):
  915. """Returns all LVM disks for given instance object."""
  916. if CONF.libvirt.images_volume_group:
  917. vg = os.path.join('/dev', CONF.libvirt.images_volume_group)
  918. if not os.path.exists(vg):
  919. return []
  920. pattern = '%s_' % instance['name']
  921. def belongs_to_instance(disk):
  922. return disk.startswith(pattern)
  923. def fullpath(name):
  924. return os.path.join(vg, name)
  925. logical_volumes = libvirt_utils.list_logical_volumes(vg)
  926. disk_names = filter(belongs_to_instance, logical_volumes)
  927. disks = map(fullpath, disk_names)
  928. return disks
  929. return []
  930. def get_volume_connector(self, instance):
  931. if not self._initiator:
  932. self._initiator = libvirt_utils.get_iscsi_initiator()
  933. if not self._initiator:
  934. LOG.debug(_('Could not determine iscsi initiator name'),
  935. instance=instance)
  936. if not self._fc_wwnns:
  937. self._fc_wwnns = libvirt_utils.get_fc_wwnns()
  938. if not self._fc_wwnns or len(self._fc_wwnns) == 0:
  939. LOG.debug(_('Could not determine fibre channel '
  940. 'world wide node names'),
  941. instance=instance)
  942. if not self._fc_wwpns:
  943. self._fc_wwpns = libvirt_utils.get_fc_wwpns()
  944. if not self._fc_wwpns or len(self._fc_wwpns) == 0:
  945. LOG.debug(_('Could not determine fibre channel '
  946. 'world wide port names'),
  947. instance=instance)
  948. connector = {'ip': CONF.my_ip,
  949. 'host': CONF.host}
  950. if self._initiator:
  951. connector['initiator'] = self._initiator
  952. if self._fc_wwnns and self._fc_wwpns:
  953. connector["wwnns"] = self._fc_wwnns
  954. connector["wwpns"] = self._fc_wwpns
  955. return connector
  956. def _cleanup_resize(self, instance, network_info):
  957. target = libvirt_utils.get_instance_path(instance) + "_resize"
  958. if os.path.exists(target):
  959. shutil.rmtree(target)
  960. if instance['host'] != CONF.host:
  961. self._undefine_domain(instance)
  962. self.unplug_vifs(instance, network_info)
  963. self.firewall_driver.unfilter_instance(instance, network_info)
  964. def volume_driver_method(self, method_name, connection_info,
  965. *args, **kwargs):
  966. driver_type = connection_info.get('driver_volume_type')
  967. if driver_type not in self.volume_drivers:
  968. raise exception.VolumeDriverNotFound(driver_type=driver_type)
  969. driver = self.volume_drivers[driver_type]
  970. method = getattr(driver, method_name)
  971. return method(connection_info, *args, **kwargs)
  972. def _get_volume_encryptor(self, connection_info, encryption):
  973. encryptor = encryptors.get_volume_encryptor(connection_info,
  974. **encryption)
  975. return encryptor
  976. def attach_volume(self, context, connection_info, instance, mountpoint,
  977. encryption=None):
  978. instance_name = instance['name']
  979. virt_dom = self._lookup_by_name(instance_name)
  980. disk_dev = mountpoint.rpartition("/")[2]
  981. disk_info = {
  982. 'dev': disk_dev,
  983. 'bus': blockinfo.get_disk_bus_for_disk_dev(
  984. CONF.libvirt.virt_type, disk_dev),
  985. 'type': 'disk',
  986. }
  987. # Note(cfb): If the volume has a custom block size, check that
  988. # that we are using QEMU/KVM and libvirt >= 0.10.2. The
  989. # presence of a block size is considered mandatory by
  990. # cinder so we fail if we can't honor the request.
  991. data = {}
  992. if ('data' in connection_info):
  993. data = connection_info['data']
  994. if ('logical_block_size' in data or 'physical_block_size' in data):
  995. if ((CONF.libvirt.virt_type != "kvm" and
  996. CONF.libvirt.virt_type != "qemu")):
  997. msg = _("Volume sets block size, but the current "
  998. "libvirt hypervisor '%s' does not support custom "
  999. "block size") % CONF.libvirt.virt_type
  1000. raise exception.InvalidHypervisorType(msg)
  1001. if not self.has_min_version(MIN_LIBVIRT_BLOCKIO_VERSION):
  1002. ver = ".".join([str(x) for x in MIN_LIBVIRT_BLOCKIO_VERSION])
  1003. msg = _("Volume sets block size, but libvirt '%s' or later is "
  1004. "required.") % ver
  1005. raise exception.Invalid(msg)
  1006. conf = self.volume_driver_method('connect_volume',
  1007. connection_info,
  1008. disk_info)
  1009. self.set_cache_mode(conf)
  1010. try:
  1011. # NOTE(vish): We can always affect config because our
  1012. # domains are persistent, but we should only
  1013. # affect live if the domain is running.
  1014. flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
  1015. state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
  1016. if state == power_state.RUNNING:
  1017. flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
  1018. # cache device_path in connection_info -- required by encryptors
  1019. if 'data' in connection_info:
  1020. connection_info['data']['device_path'] = conf.source_path
  1021. if encryption:
  1022. encryptor = self._get_volume_encryptor(connection_info,
  1023. encryption)
  1024. encryptor.attach_volume(context, **encryption)
  1025. virt_dom.attachDeviceFlags(conf.to_xml(), flags)
  1026. except Exception as ex:
  1027. if isinstance(ex, libvirt.libvirtError):
  1028. errcode = ex.get_error_code()
  1029. if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
  1030. self.volume_driver_method('disconnect_volume',
  1031. connection_info,
  1032. disk_dev)
  1033. raise exception.DeviceIsBusy(device=disk_dev)
  1034. with excutils.save_and_reraise_exception():
  1035. self.volume_driver_method('disconnect_volume',
  1036. connection_info,
  1037. disk_dev)
  1038. def _swap_volume(self, domain, disk_path, new_path):
  1039. """Swap existing disk with a new block device."""
  1040. # Save a copy of the domain's running XML file
  1041. xml = domain.XMLDesc(0)
  1042. # Abort is an idempotent operation, so make sure any block
  1043. # jobs which may have failed are ended.
  1044. try:
  1045. domain.blockJobAbort(disk_path, 0)
  1046. except Exception:
  1047. pass
  1048. try:
  1049. # NOTE (rmk): blockRebase cannot be executed on persistent
  1050. # domains, so we need to temporarily undefine it.
  1051. # If any part of this block fails, the domain is
  1052. # re-defined regardless.
  1053. if domain.isPersistent():
  1054. domain.undefine()
  1055. # Start copy with VIR_DOMAIN_REBASE_REUSE_EXT flag to
  1056. # allow writing to existing external volume file
  1057. domain.blockRebase(disk_path, new_path, 0,
  1058. libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
  1059. libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)
  1060. while self._wait_for_block_job(domain, disk_path):
  1061. time.sleep(0.5)
  1062. domain.blockJobAbort(disk_path,
  1063. libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT)
  1064. finally:
  1065. self._conn.defineXML(xml)
  1066. def swap_volume(self, old_connection_info,
  1067. new_connection_info, instance, mountpoint):
  1068. instance_name = instance['name']
  1069. virt_dom = self._lookup_by_name(instance_name)
  1070. disk_dev = mountpoint.rpartition("/")[2]
  1071. xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev)
  1072. if not xml:
  1073. raise exception.DiskNotFound(location=disk_dev)
  1074. disk_info = {
  1075. 'dev': disk_dev,
  1076. 'bus': blockinfo.get_disk_bus_for_disk_dev(
  1077. CONF.libvirt.virt_type, disk_dev),
  1078. 'type': 'disk',
  1079. }
  1080. conf = self.volume_driver_method('connect_volume',
  1081. new_connection_info,
  1082. disk_info)
  1083. if not conf.source_path:
  1084. self.volume_driver_method('disconnect_volume',
  1085. new_connection_info,
  1086. disk_dev)
  1087. raise NotImplementedError(_("Swap only supports host devices"))
  1088. self._swap_volume(virt_dom, disk_dev, conf.source_path)
  1089. self.volume_driver_method('disconnect_volume',
  1090. old_connection_info,
  1091. disk_dev)
  1092. @staticmethod
  1093. def _get_disk_xml(xml, device):
  1094. """Returns the xml for the disk mounted at device."""
  1095. try:
  1096. doc = etree.fromstring(xml)
  1097. except Exception:
  1098. return None
  1099. ret = doc.findall('./devices/disk')
  1100. for node in ret:
  1101. for child in node.getchildren():
  1102. if child.tag == 'target':
  1103. if child.get('dev') == device:
  1104. return etree.tostring(node)
  1105. def _get_existing_domain_xml(self, instance, network_info,
  1106. block_device_info=None):
  1107. try:
  1108. virt_dom = self._lookup_by_name(instance['name'])
  1109. xml = virt_dom.XMLDesc(0)
  1110. except exception.InstanceNotFound:
  1111. disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
  1112. instance,
  1113. block_device_info)
  1114. xml = self.to_xml(nova_context.get_admin_context(),
  1115. instance, network_info, disk_info,
  1116. block_device_info=block_device_info)
  1117. return xml
  1118. def detach_volume(self, connection_info, instance, mountpoint,
  1119. encryption=None):
  1120. instance_name = instance['name']
  1121. disk_dev = mountpoint.rpartition("/")[2]
  1122. try:
  1123. virt_dom = self._lookup_by_name(instance_name)
  1124. xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev)
  1125. if not xml:
  1126. raise exception.DiskNotFound(location=disk_dev)
  1127. else:
  1128. # NOTE(vish): We can always affect config because our
  1129. # domains are persistent, but we should only
  1130. # affect live if the domain is running.
  1131. flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
  1132. state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
  1133. if state == power_state.RUNNING:
  1134. flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
  1135. virt_dom.detachDeviceFlags(xml, flags)
  1136. if encryption:
  1137. # The volume must be detached from the VM before
  1138. # disconnecting it from its encryptor. Otherwise, the
  1139. # encryptor may report that the volume is still in use.
  1140. encryptor = self._get_volume_encryptor(connection_info,
  1141. encryption)
  1142. encryptor.detach_volume(**encryption)
  1143. except libvirt.libvirtError as ex:
  1144. # NOTE(vish): This is called to cleanup volumes after live
  1145. # migration, so we should still disconnect even if
  1146. # the instance doesn't exist here anymore.
  1147. error_code = ex.get_error_code()
  1148. if error_code == libvirt.VIR_ERR_NO_DOMAIN:
  1149. # NOTE(vish):
  1150. LOG.warn(_("During detach_volume, instance disappeared."))
  1151. else:
  1152. raise
  1153. self.volume_driver_method('disconnect_volume',
  1154. connection_info,
  1155. disk_dev)
  1156. def attach_interface(self, instance, image_meta, vif):
  1157. virt_dom = self._lookup_by_name(instance['name'])
  1158. inst_type = self.virtapi.flavor_get(
  1159. nova_context.get_admin_context(read_deleted='yes'),
  1160. instance['instance_type_id'])
  1161. self.vif_driver.plug(instance, vif)
  1162. self.firewall_driver.setup_basic_filtering(instance, [vif])
  1163. cfg = self.vif_driver.get_config(instance, vif, image_meta,
  1164. inst_type)
  1165. try:
  1166. flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
  1167. state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
  1168. if state == power_state.RUNNING:
  1169. flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
  1170. virt_dom.attachDeviceFlags(cfg.to_xml(), flags)
  1171. except libvirt.libvirtError:
  1172. LOG.error(_('attaching network adapter failed.'),
  1173. instance=instance)
  1174. self.vif_driver.unplug(instance, vif)
  1175. raise exception.InterfaceAttachFailed(instance)
  1176. def detach_interface(self, instance, vif):
  1177. virt_dom = self._lookup_by_name(instance['name'])
  1178. inst_type = self.virtapi.flavor_get(
  1179. nova_context.get_admin_context(read_deleted='yes'),
  1180. instance['instance_type_id'])
  1181. cfg = self.vif_driver.get_config(instance, vif, None, inst_type)
  1182. try:
  1183. self.vif_driver.unplug(instance, vif)
  1184. flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
  1185. state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
  1186. if state == power_state.RUNNING:
  1187. flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
  1188. virt_dom.detachDeviceFlags(cfg.to_xml(), flags)
  1189. except libvirt.libvirtError as ex:
  1190. error_code = ex.get_error_code()
  1191. if error_code == libvirt.VIR_ERR_NO_DOMAIN:
  1192. LOG.warn(_("During detach_interface, "
  1193. "instance disappeared."),
  1194. instance=instance)
  1195. else:
  1196. LOG.error(_('detaching network adapter failed.'),
  1197. instance=instance)
  1198. raise exception.InterfaceDetachFailed(instance)
  1199. def _create_snapshot_metadata(self, base, instance, img_fmt, snp_name):
  1200. metadata = {'is_public': False,
  1201. 'status': 'active',
  1202. 'name': snp_name,
  1203. 'properties': {
  1204. 'kernel_id': instance['kernel_id'],
  1205. 'image_location': 'snapshot',
  1206. 'image_state': 'available',
  1207. 'owner_id': instance['project_id'],
  1208. 'ramdisk_id': instance['ramdisk_id'],
  1209. }
  1210. }
  1211. if instance['os_type']:
  1212. metadata['properties']['os_type'] = instance['os_type']
  1213. # NOTE(vish): glance forces ami disk format to be ami
  1214. if base.get('disk_format') == 'ami':
  1215. metadata['disk_format'] = 'ami'
  1216. else:
  1217. metadata['disk_format'] = img_fmt
  1218. metadata['container_format'] = base.get('container_format', 'bare')
  1219. return metadata
  1220. def snapshot(self, context, instance, image_href, update_task_state):
  1221. """Create snapshot from a running VM instance.
  1222. This command only works with qemu 0.14+
  1223. """
  1224. try:
  1225. virt_dom = self._lookup_by_name(instance['name'])
  1226. except exception.InstanceNotFound:
  1227. raise exception.InstanceNotRunning(instance_id=instance['uuid'])
  1228. (image_service, image_id) = glance.get_remote_image_service(
  1229. context, instance['image_ref'])
  1230. base = compute_utils.get_image_metadata(
  1231. context, image_service, image_id, instance)
  1232. _image_service = glance.get_remote_image_service(context, image_href)
  1233. snapshot_image_service, snapshot_image_id = _image_service
  1234. snapshot = snapshot_image_service.show(context, snapshot_image_id)
  1235. disk_path = libvirt_utils.find_disk(virt_dom)
  1236. source_format = libvirt_utils.get_disk_type(disk_path)
  1237. image_format = CONF.libvirt.snapshot_image_format or source_format
  1238. # NOTE(bfilippov): save lvm and rbd as raw
  1239. if image_format == 'lvm' or image_format == 'rbd':
  1240. image_format = 'raw'
  1241. metadata = self._create_snapshot_metadata(base,
  1242. instance,
  1243. image_format,
  1244. snapshot['name'])
  1245. snapshot_name = uuid.uuid4().hex
  1246. (state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
  1247. state = LIBVIRT_POWER_STATE[state]
  1248. # NOTE(rmk): Live snapshots require QEMU 1.3 and Libvirt 1.0.0.
  1249. # These restrictions can be relaxed as other configurations
  1250. # can be validated.
  1251. if self.has_min_version(MIN_LIBVIRT_LIVESNAPSHOT_VERSION,
  1252. MIN_QEMU_LIVESNAPSHOT_VERSION,
  1253. REQ_HYPERVISOR_LIVESNAPSHOT) \
  1254. and not source_format == "lvm" and not source_format == 'rbd':
  1255. live_snapshot = True
  1256. # Abort is an idempotent operation, so make sure any block
  1257. # jobs which may have failed are ended. This operation also
  1258. # confirms the running instance, as opposed to the system as a
  1259. # whole, has a new enough version of the hypervisor (bug 1193146).
  1260. try:
  1261. virt_dom.blockJobAbort(disk_path, 0)
  1262. except libvirt.libvirtError as ex:
  1263. error_code = ex.get_error_code()
  1264. if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:
  1265. live_snapshot = False
  1266. else:
  1267. pass
  1268. else:
  1269. live_snapshot = False
  1270. # NOTE(rmk): We cannot perform live snapshots when a managedSave
  1271. # file is present, so we will use the cold/legacy method
  1272. # for instances which are shutdown.
  1273. if state == power_state.SHUTDOWN:
  1274. live_snapshot = False
  1275. # NOTE(dkang): managedSave does not work for LXC
  1276. if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
  1277. if state == power_state.RUNNING or state == power_state.PAUSED:
  1278. self._detach_pci_devices(virt_dom,
  1279. pci_manager.get_instance_pci_devs(instance))
  1280. virt_dom.managedSave(0)
  1281. snapshot_backend = self.image_backend.snapshot(disk_path,
  1282. image_type=source_format)
  1283. if live_snapshot:
  1284. LOG.info(_("Beginning live snapshot process"),
  1285. instance=instance)
  1286. else:
  1287. LOG.info(_("Beginning cold snapshot process"),
  1288. instance=instance)
  1289. update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
  1290. snapshot_directory = CONF.libvirt.snapshots_directory
  1291. fileutils.ensure_tree(snapshot_directory)
  1292. with utils.tempdir(dir=snapshot_directory) as tmpdir:
  1293. try:
  1294. out_path = os.path.join(tmpdir, snapshot_name)
  1295. if live_snapshot:
  1296. # NOTE(xqueralt): libvirt needs o+x in the temp directory
  1297. os.chmod(tmpdir, 0o701)
  1298. self._live_snapshot(virt_dom, disk_path, out_path,
  1299. image_format)
  1300. else:
  1301. snapshot_backend.snapshot_extract(out_path, image_format)
  1302. finally:
  1303. new_dom = None
  1304. # NOTE(dkang): because previous managedSave is not called
  1305. # for LXC, _create_domain must not be called.
  1306. if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
  1307. if state == power_state.RUNNING:
  1308. new_dom = self._create_domain(domain=virt_dom)
  1309. elif state == power_state.PAUSED:
  1310. new_dom = self._create_domain(domain=virt_dom,
  1311. launch_flags=libvirt.VIR_DOMAIN_START_PAUSED)
  1312. if new_dom is not None:
  1313. self._attach_pci_devices(new_dom,
  1314. pci_manager.get_instance_pci_devs(instance))
  1315. LOG.info(_("Snapshot extracted, beginning image upload"),
  1316. instance=instance)
  1317. # Upload that image to the image service
  1318. update_task_state(task_state=task_states.IMAGE_UPLOADING,
  1319. expected_state=task_states.IMAGE_PENDING_UPLOAD)
  1320. with libvirt_utils.file_open(out_path) as image_file:
  1321. image_service.update(context,
  1322. image_href,
  1323. metadata,
  1324. image_file)
  1325. LOG.info(_("Snapshot image upload complete"),
  1326. instance=instance)
  1327. @staticmethod
  1328. def _wait_for_block_job(domain, disk_path, abort_on_error=False):
  1329. status = domain.blockJobInfo(disk_path, 0)
  1330. if status == -1 and abort_on_error:
  1331. msg = _('libvirt error while requesting blockjob info.')
  1332. raise exception.NovaException(msg)
  1333. try:
  1334. cur = status.get('cur', 0)
  1335. end = status.get('end', 0)
  1336. except Exception:
  1337. return False
  1338. if cur == end and cur != 0 and end != 0:
  1339. return False
  1340. else:
  1341. return True
  1342. def _live_snapshot(self, domain, disk_path, out_path, image_format):
  1343. """Snapshot an instance without downtime."""
  1344. # Save a copy of the domain's running XML file
  1345. xml = domain.XMLDesc(0)
  1346. # Abort is an idempotent operation, so make sure any block
  1347. # jobs which may have failed are ended.
  1348. try:
  1349. domain.blockJobAbort(disk_path, 0)
  1350. except Exception:
  1351. pass
  1352. # NOTE (rmk): We are using shallow rebases as a workaround to a bug
  1353. # in QEMU 1.3. In order to do this, we need to create
  1354. # a destination image with the original backing file
  1355. # and matching size of the instance root disk.
  1356. src_disk_size = libvirt_utils.get_disk_size(disk_path)
  1357. src_back_path = libvirt_utils.get_disk_backing_file(disk_path,
  1358. basename=False)
  1359. disk_delta = out_path + '.delta'
  1360. libvirt_utils.create_cow_image(src_back_path, disk_delta,
  1361. src_disk_size)
  1362. try:
  1363. # NOTE (rmk): blockRebase cannot be executed on persistent
  1364. # domains, so we need to temporarily undefine it.
  1365. # If any part of this block fails, the domain is
  1366. # re-defined regardless.
  1367. if domain.isPersistent():
  1368. domain.undefine()
  1369. # NOTE (rmk): Establish a temporary mirror of our root disk and
  1370. # issue an abort once we have a complete copy.
  1371. domain.blockRebase(disk_path, disk_delta, 0,
  1372. libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
  1373. libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
  1374. libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)
  1375. while self._wait_for_block_job(domain, disk_path):
  1376. time.sleep(0.5)
  1377. domain.blockJobAbort(disk_path, 0)
  1378. libvirt_utils.chown(disk_delta, os.getuid())
  1379. finally:
  1380. self._conn.defineXML(xml)
  1381. # Convert the delta (CoW) image with a backing file to a flat
  1382. # image with no backing file.
  1383. libvirt_utils.extract_snapshot(disk_delta, 'qcow2',
  1384. out_path, image_format)
  1385. def _volume_snapshot_update_status(self, context, snapshot_id, status):
  1386. """Send a snapshot status update to Cinder.
  1387. This method captures and logs exceptions that occur
  1388. since callers cannot do anything useful with these exceptions.
  1389. Operations on the Cinder side waiting for this will time out if
  1390. a failure occurs sending the update.
  1391. :param context: security context
  1392. :param snapshot_id: id of snapshot being updated
  1393. :param status: new status value
  1394. """
  1395. try:
  1396. self._volume_api.update_snapshot_status(context,
  1397. snapshot_id,
  1398. status)
  1399. except Exception:
  1400. msg = _('Failed to send updated snapshot status '
  1401. 'to volume service.')
  1402. LOG.exception(msg)
  1403. def _volume_snapshot_create(self, context, instance, domain,
  1404. volume_id, snapshot_id, new_file):
  1405. """Perform volume snapshot.
  1406. :param domain: VM that volume is attached to
  1407. :param volume_id: volume UUID to snapshot
  1408. :param snapshot_id: UUID of snapshot being created
  1409. :param new_file: relative path to new qcow2 file present on share
  1410. """
  1411. xml = domain.XMLDesc(0)
  1412. xml_doc = etree.fromstring(xml)
  1413. device_info = vconfig.LibvirtConfigGuest()
  1414. device_info.parse_dom(xml_doc)
  1415. disks_to_snap = [] # to be snapshotted by libvirt
  1416. disks_to_skip = [] # local disks not snapshotted
  1417. for disk in device_info.devices:
  1418. if (disk.root_name != 'disk'):
  1419. continue
  1420. if (disk.target_dev is None):
  1421. continue
  1422. if (disk.serial is None or disk.serial != volume_id):
  1423. disks_to_skip.append(disk.source_path)
  1424. continue
  1425. # disk is a Cinder volume with the correct volume_id
  1426. disk_info = {
  1427. 'dev': disk.target_dev,
  1428. 'serial': disk.serial,
  1429. 'current_file': disk.source_path
  1430. }
  1431. # Determine path for new_file based on current path
  1432. current_file = disk_info['current_file']
  1433. new_file_path = os.path.join(os.path.dirname(current_file),
  1434. new_file)
  1435. disks_to_snap.append((current_file, new_file_path))
  1436. if not disks_to_snap:
  1437. msg = _('Found no disk to snapshot.')
  1438. raise exception.NovaException(msg)
  1439. snapshot = vconfig.LibvirtConfigGuestSnapshot()
  1440. for current_name, new_filename in disks_to_snap:
  1441. snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
  1442. snap_disk.name = current_name
  1443. snap_disk.source_path = new_filename
  1444. snap_disk.source_type = 'file'
  1445. snap_disk.snapshot = 'external'
  1446. snap_disk.driver_name = 'qcow2'
  1447. snapshot.add_disk(snap_disk)
  1448. for dev in disks_to_skip:
  1449. snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
  1450. snap_disk.name = dev
  1451. snap_disk.snapshot = 'no'
  1452. snapshot.add_disk(snap_disk)
  1453. snapshot_xml = snapshot.to_xml()
  1454. LOG.debug(_("snap xml: %s") % snapshot_xml)
  1455. snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
  1456. libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
  1457. libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
  1458. QUIESCE = libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE
  1459. try:
  1460. domain.snapshotCreateXML(snapshot_xml,
  1461. snap_flags | QUIESCE)
  1462. return
  1463. except libvirt.libvirtError:
  1464. msg = _('Unable to create quiesced VM snapshot, '
  1465. 'attempting again with quiescing disabled.')
  1466. LOG.exception(msg)
  1467. try:
  1468. domain.snapshotCreateXML(snapshot_xml, snap_flags)
  1469. except libvirt.libvirtError:
  1470. msg = _('Unable to create VM snapshot, '
  1471. 'failing volume_snapshot operation.')
  1472. LOG.exception(msg)
  1473. raise
  1474. def volume_snapshot_create(self, context, instance, volume_id,
  1475. create_info):
  1476. """Create snapshots of a Cinder volume via libvirt.
  1477. :param instance: VM instance reference
  1478. :param volume_id: id of volume being snapshotted
  1479. :param create_info: dict of information used to create snapshots
  1480. - snapshot_id : ID of snapshot
  1481. - type : qcow2 / <other>
  1482. - new_file : qcow2 file created by Cinder which
  1483. becomes the VM's active image after
  1484. the snapshot is complete
  1485. """
  1486. LOG.debug(_("volume_snapshot_create: create_info: %(c_info)s"),
  1487. {'c_info': create_info}, instance=instance)
  1488. try:
  1489. virt_dom = self._lookup_by_name(instance['name'])
  1490. except exception.InstanceNotFound:
  1491. raise exception.InstanceNotRunning(instance_id=instance['uuid'])
  1492. if create_info['type'] != 'qcow2':
  1493. raise exception.NovaException(_('Unknown type: %s') %
  1494. create_info['type'])
  1495. snapshot_id = create_info.get('snapshot_id', None)
  1496. if snapshot_id is None:
  1497. raise exception.NovaException(_('snapshot_id required '
  1498. 'in create_info'))
  1499. try:
  1500. self._volume_snapshot_create(context, instance, virt_dom,
  1501. volume_id, snapshot_id,
  1502. create_info['new_file'])
  1503. except Exception:
  1504. with excutils.save_and_reraise_exception():
  1505. msg = _('Error occurred during volume_snapshot_create, '
  1506. 'sending error status to Cinder.')
  1507. LOG.exception(msg)
  1508. self._volume_snapshot_update_status(
  1509. context, snapshot_id, 'error')
  1510. self._volume_snapshot_update_status(
  1511. context, snapshot_id, 'creating')
  1512. def _volume_snapshot_delete(self, context, instance, volume_id,
  1513. snapshot_id, delete_info=None):
  1514. """
  1515. Note:
  1516. if file being merged into == active image:
  1517. do a blockRebase (pull) operation
  1518. else:
  1519. do a blockCommit operation
  1520. Files must be adjacent in snap chain.
  1521. :param instance: instance reference
  1522. :param volume_id: volume UUID
  1523. :param snapshot_id: snapshot UUID (unused currently)
  1524. :param delete_info: {
  1525. 'type': 'qcow2',
  1526. 'file_to_merge': 'a.img',
  1527. 'merge_target_file': 'b.img' or None (if merging file_to_merge into
  1528. active image)
  1529. }
  1530. Libvirt blockjob handling required for this method is broken
  1531. in versions of libvirt that do not contain:
  1532. http://libvirt.org/git/?p=libvirt.git;h=0f9e67bfad (1.1.1)
  1533. (Patch is pending in 1.0.5-maint branch as well, but we cannot detect
  1534. libvirt 1.0.5.5 vs. 1.0.5.6 here.)
  1535. """
  1536. if not self.has_min_version(MIN_LIBVIRT_BLOCKJOBINFO_VERSION):
  1537. ver = '.'.join([str(x) for x in MIN_LIBVIRT_BLOCKJOBINFO_VERSION])
  1538. msg = _("Libvirt '%s' or later is required for online deletion "
  1539. "of volume snapshots.") % ver
  1540. raise exception.Invalid(msg)
  1541. LOG.debug(_('volume_snapshot_delete: delete_info: %s') % delete_info)
  1542. if delete_info['type'] != 'qcow2':
  1543. msg = _('Unknown delete_info type %s') % delete_info['type']
  1544. raise exception.NovaException(msg)
  1545. try:
  1546. virt_dom = self._lookup_by_name(instance['name'])
  1547. except exception.InstanceNotFound:
  1548. raise exception.InstanceNotRunning(instance_id=instance['uuid'])
  1549. ##### Find dev name
  1550. my_dev = None
  1551. active_disk = None
  1552. xml = virt_dom.XMLDesc(0)
  1553. xml_doc = etree.fromstring(xml)
  1554. device_info = vconfig.LibvirtConfigGuest()
  1555. device_info.parse_dom(xml_doc)
  1556. for disk in device_info.devices:
  1557. if (disk.root_name != 'disk'):
  1558. continue
  1559. if (disk.target_dev is None or disk.serial is None):
  1560. continue
  1561. if disk.serial == volume_id:
  1562. my_dev = disk.target_dev
  1563. active_disk = disk.source_path
  1564. if my_dev is None or active_disk is None:
  1565. msg = _('Unable to locate disk matching id: %s') % volume_id
  1566. raise exception.NovaException(msg)
  1567. LOG.debug(_("found dev, it's %(dev)s, with active disk: %(disk)s"),
  1568. {'dev': my_dev, 'disk': active_disk})
  1569. if delete_info['merge_target_file'] is None:
  1570. # pull via blockRebase()
  1571. # Merge the most recent snapshot into the active image
  1572. rebase_disk = my_dev
  1573. rebase_base = delete_info['file_to_merge']
  1574. rebase_bw = 0
  1575. rebase_flags = 0
  1576. LOG.debug(_('disk: %(disk)s, base: %(base)s, '
  1577. 'bw: %(bw)s, flags: %(flags)s') %
  1578. {'disk': rebase_disk,
  1579. 'base': rebase_base,
  1580. 'bw': rebase_bw,
  1581. 'flags': rebase_flags})
  1582. result = virt_dom.blockRebase(rebase_disk, rebase_base,
  1583. rebase_bw, rebase_flags)
  1584. if result == 0:
  1585. LOG.debug(_('blockRebase started successfully'))
  1586. while self._wait_for_block_job(virt_dom, rebase_disk,
  1587. abort_on_error=True):
  1588. LOG.debug(_('waiting for blockRebase job completion'))
  1589. time.sleep(0.5)
  1590. else:
  1591. # commit with blockCommit()
  1592. commit_disk = my_dev
  1593. commit_base = delete_info['merge_target_file']
  1594. commit_top = delete_info['file_to_merge']
  1595. bandwidth = 0
  1596. flags = 0
  1597. result = virt_dom.blockCommit(commit_disk, commit_base, commit_top,
  1598. bandwidth, flags)
  1599. if result == 0:
  1600. LOG.debug(_('blockCommit started successfully'))
  1601. while self._wait_for_block_job(virt_dom, commit_disk,
  1602. abort_on_error=True):
  1603. LOG.debug(_('waiting for blockCommit job completion'))
  1604. time.sleep(0.5)
  1605. def volume_snapshot_delete(self, context, instance, volume_id, snapshot_id,
  1606. delete_info=None):
  1607. try:
  1608. self._volume_snapshot_delete(context, instance, volume_id,
  1609. snapshot_id, delete_info=delete_info)
  1610. except Exception:
  1611. with excutils.save_and_reraise_exception():
  1612. msg = _('Error occurred during volume_snapshot_delete, '
  1613. 'sending error status to Cinder.')
  1614. LOG.exception(msg)
  1615. self._volume_snapshot_update_status(
  1616. context, snapshot_id, 'error_deleting')
  1617. self._volume_snapshot_update_status(context, snapshot_id, 'deleting')
  1618. def reboot(self, context, instance, network_info, reboot_type='SOFT',
  1619. block_device_info=None, bad_volumes_callback=None):
  1620. """Reboot a virtual machine, given an instance reference."""
  1621. if reboot_type == 'SOFT':
  1622. # NOTE(vish): This will attempt to do a graceful shutdown/restart.
  1623. try:
  1624. soft_reboot_success = self._soft_reboot(instance)
  1625. except libvirt.libvirtError as e:
  1626. LOG.debug(_("Instance soft reboot failed: %s"), e)
  1627. soft_reboot_success = False
  1628. if soft_reboot_success:
  1629. LOG.info(_("Instance soft rebooted successfully."),
  1630. instance=instance)
  1631. return
  1632. else:
  1633. LOG.warn(_("Failed to soft reboot instance. "
  1634. "Trying hard reboot."),
  1635. instance=instance)
  1636. return self._hard_reboot(context, instance, network_info,
  1637. block_device_info)
  1638. def _soft_reboot(self, instance):
  1639. """Attempt to shutdown and restart the instance gracefully.
  1640. We use shutdown and create here so we can return if the guest
  1641. responded and actually rebooted. Note that this method only
  1642. succeeds if the guest responds to acpi. Therefore we return
  1643. success or failure so we can fall back to a hard reboot if
  1644. necessary.
  1645. :returns: True if the reboot succeeded
  1646. """
  1647. dom = self._lookup_by_name(instance["name"])
  1648. (state, _max_mem, _mem, _cpus, _t) = dom.info()
  1649. state = LIBVIRT_POWER_STATE[state]
  1650. old_domid = dom.ID()
  1651. # NOTE(vish): This check allows us to reboot an instance that
  1652. # is already shutdown.
  1653. if state == power_state.RUNNING:
  1654. dom.shutdown()
  1655. # NOTE(vish): This actually could take slightly longer than the
  1656. # FLAG defines depending on how long the get_info
  1657. # call takes to return.
  1658. self._prepare_pci_devices_for_use(
  1659. pci_manager.get_instance_pci_devs(instance))
  1660. for x in xrange(CONF.libvirt.wait_soft_reboot_seconds):
  1661. dom = self._lookup_by_name(instance["name"])
  1662. (state, _max_mem, _mem, _cpus, _t) = dom.info()
  1663. state = LIBVIRT_POWER_STATE[state]
  1664. new_domid = dom.ID()
  1665. # NOTE(ivoks): By checking domain IDs, we make sure we are
  1666. # not recreating domain that's already running.
  1667. if old_domid != new_domid:
  1668. if state in [power_state.SHUTDOWN,
  1669. power_state.CRASHED]:
  1670. LOG.info(_("Instance shutdown successfully."),
  1671. instance=instance)
  1672. self._create_domain(domain=dom)
  1673. timer = loopingcall.FixedIntervalLoopingCall(
  1674. self._wait_for_running, instance)
  1675. timer.start(interval=0.5).wait()
  1676. return True
  1677. else:
  1678. LOG.info(_("Instance may have been rebooted during soft "
  1679. "reboot, so return now."), instance=instance)
  1680. return True
  1681. greenthread.sleep(1)
  1682. return False
  1683. def _hard_reboot(self, context, instance, network_info,
  1684. block_device_info=None):
  1685. """Reboot a virtual machine, given an instance reference.
  1686. Performs a Libvirt reset (if supported) on the domain.
  1687. If Libvirt reset is unavailable this method actually destroys and
  1688. re-creates the domain to ensure the reboot happens, as the guest
  1689. OS cannot ignore this action.
  1690. If xml is set, it uses the passed in xml in place of the xml from the
  1691. existing domain.
  1692. """
  1693. self._destroy(instance)
  1694. disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
  1695. instance,
  1696. block_device_info)
  1697. # NOTE(vish): This could generate the wrong device_format if we are
  1698. # using the raw backend and the images don't exist yet.
  1699. # The create_images_and_backing below doesn't properly
  1700. # regenerate raw backend images, however, so when it
  1701. # does we need to (re)generate the xml after the images
  1702. # are in place.
  1703. xml = self.to_xml(context, instance, network_info, disk_info,
  1704. block_device_info=block_device_info,
  1705. write_to_disk=True)
  1706. # NOTE (rmk): Re-populate any missing backing files.
  1707. disk_info_json = self.get_instance_disk_info(instance['name'], xml,
  1708. block_device_info)
  1709. instance_dir = libvirt_utils.get_instance_path(instance)
  1710. self._create_images_and_backing(context, instance, instance_dir,
  1711. disk_info_json)
  1712. # Initialize all the necessary networking, block devices and
  1713. # start the instance.
  1714. self._create_domain_and_network(context, xml, instance, network_info,
  1715. block_device_info, reboot=True)
  1716. self._prepare_pci_devices_for_use(
  1717. pci_manager.get_instance_pci_devs(instance))
  1718. def _wait_for_reboot():
  1719. """Called at an interval until the VM is running again."""
  1720. state = self.get_info(instance)['state']
  1721. if state == power_state.RUNNING:
  1722. LOG.info(_("Instance rebooted successfully."),
  1723. instance=instance)
  1724. raise loopingcall.LoopingCallDone()
  1725. timer = loopingcall.FixedIntervalLoopingCall(_wait_for_reboot)
  1726. timer.start(interval=0.5).wait()
  1727. def pause(self, instance):
  1728. """Pause VM instance."""
  1729. dom = self._lookup_by_name(instance['name'])
  1730. dom.suspend()
  1731. def unpause(self, instance):
  1732. """Unpause paused VM instance."""
  1733. dom = self._lookup_by_name(instance['name'])
  1734. dom.resume()
  1735. def power_off(self, instance):
  1736. """Power off the specified instance."""
  1737. self._destroy(instance)
  1738. def power_on(self, context, instance, network_info,
  1739. block_device_info=None):
  1740. """Power on the specified instance."""
  1741. # We use _hard_reboot here to ensure that all backing files,
  1742. # network, and block device connections, etc. are established
  1743. # and available before we attempt to start the instance.
  1744. self._hard_reboot(context, instance, network_info, block_device_info)
  1745. def suspend(self, instance):
  1746. """Suspend the specified instance."""
  1747. dom = self._lookup_by_name(instance['name'])
  1748. self._detach_pci_devices(dom,
  1749. pci_manager.get_instance_pci_devs(instance))
  1750. dom.managedSave(0)
  1751. def resume(self, context, instance, network_info, block_device_info=None):
  1752. """resume the specified instance."""
  1753. xml = self._get_existing_domain_xml(instance, network_info,
  1754. block_device_info)
  1755. dom = self._create_domain_and_network(context, xml, instance,
  1756. network_info, block_device_info=block_device_info)
  1757. self._attach_pci_devices(dom,
  1758. pci_manager.get_instance_pci_devs(instance))
  1759. def resume_state_on_host_boot(self, context, instance, network_info,
  1760. block_device_info=None):
  1761. """resume guest state when a host is booted."""
  1762. # Check if the instance is running already and avoid doing
  1763. # anything if it is.
  1764. if self.instance_exists(instance['name']):
  1765. domain = self._lookup_by_name(instance['name'])
  1766. state = LIBVIRT_POWER_STATE[domain.info()[0]]
  1767. ignored_states = (power_state.RUNNING,
  1768. power_state.SUSPENDED,
  1769. power_state.NOSTATE,
  1770. power_state.PAUSED)
  1771. if state in ignored_states:
  1772. return
  1773. # Instance is not up and could be in an unknown state.
  1774. # Be as absolute as possible about getting it back into
  1775. # a known and running state.
  1776. self._hard_reboot(context, instance, network_info, block_device_info)
  1777. def rescue(self, context, instance, network_info, image_meta,
  1778. rescue_password):
  1779. """Loads a VM using rescue images.
  1780. A rescue is normally performed when something goes wrong with the
  1781. primary images and data needs to be corrected/recovered. Rescuing
  1782. should not edit or over-ride the original image, only allow for
  1783. data recovery.
  1784. """
  1785. instance_dir = libvirt_utils.get_instance_path(instance)
  1786. unrescue_xml = self._get_existing_domain_xml(instance, network_info)
  1787. unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
  1788. libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
  1789. rescue_images = {
  1790. 'image_id': CONF.libvirt.rescue_image_id or instance['image_ref'],
  1791. 'kernel_id': (CONF.libvirt.rescue_kernel_id or
  1792. instance['kernel_id']),
  1793. 'ramdisk_id': (CONF.libvirt.rescue_ramdisk_id or
  1794. instance['ramdisk_id']),
  1795. }
  1796. disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
  1797. instance,
  1798. None,
  1799. image_meta,
  1800. rescue=True)
  1801. self._create_image(context, instance,
  1802. disk_info['mapping'],
  1803. '.rescue', rescue_images,
  1804. network_info=network_info,
  1805. admin_pass=rescue_password)
  1806. xml = self.to_xml(context, instance, network_info, disk_info,
  1807. image_meta, rescue=rescue_images,
  1808. write_to_disk=True)
  1809. self._destroy(instance)
  1810. self._create_domain(xml)
  1811. def unrescue(self, instance, network_info):
  1812. """Reboot the VM which is being rescued back into primary images.
  1813. """
  1814. instance_dir = libvirt_utils.get_instance_path(instance)
  1815. unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
  1816. xml = libvirt_utils.load_file(unrescue_xml_path)
  1817. virt_dom = self._lookup_by_name(instance['name'])
  1818. self._destroy(instance)
  1819. self._create_domain(xml, virt_dom)
  1820. libvirt_utils.file_delete(unrescue_xml_path)
  1821. rescue_files = os.path.join(instance_dir, "*.rescue")
  1822. for rescue_file in glob.iglob(rescue_files):
  1823. libvirt_utils.file_delete(rescue_file)
  1824. def poll_rebooting_instances(self, timeout, instances):
  1825. pass
  1826. def _enable_hairpin(self, xml):
  1827. interfaces = self.get_interfaces(xml)
  1828. for interface in interfaces:
  1829. utils.execute('tee',
  1830. '/sys/class/net/%s/brport/hairpin_mode' % interface,
  1831. process_input='1',
  1832. run_as_root=True,
  1833. check_exit_code=[0, 1])
  1834. # NOTE(ilyaalekseyev): Implementation like in multinics
  1835. # for xenapi(tr3buchet)
  1836. def spawn(self, context, instance, image_meta, injected_files,
  1837. admin_password, network_info=None, block_device_info=None):
  1838. disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
  1839. instance,
  1840. block_device_info,
  1841. image_meta)
  1842. self._create_image(context, instance,
  1843. disk_info['mapping'],
  1844. network_info=network_info,
  1845. block_device_info=block_device_info,
  1846. files=injected_files,
  1847. admin_pass=admin_password)
  1848. xml = self.to_xml(context, instance, network_info,
  1849. disk_info, image_meta,
  1850. block_device_info=block_device_info,
  1851. write_to_disk=True)
  1852. self._create_domain_and_network(context, xml, instance, network_info,
  1853. block_device_info)
  1854. LOG.debug(_("Instance is running"), instance=instance)
  1855. def _wait_for_boot():
  1856. """Called at an interval until the VM is running."""
  1857. state = self.get_info(instance)['state']
  1858. if state == power_state.RUNNING:
  1859. LOG.info(_("Instance spawned successfully."),
  1860. instance=instance)
  1861. raise loopingcall.LoopingCallDone()
  1862. timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot)
  1863. timer.start(interval=0.5).wait()
  1864. def _flush_libvirt_console(self, pty):
  1865. out, err = utils.execute('dd',
  1866. 'if=%s' % pty,
  1867. 'iflag=nonblock',
  1868. run_as_root=True,
  1869. check_exit_code=False)
  1870. return out
  1871. def _append_to_file(self, data, fpath):
  1872. LOG.info(_('data: %(data)r, fpath: %(fpath)r'),
  1873. {'data': data, 'fpath': fpath})
  1874. fp = open(fpath, 'a+')
  1875. fp.write(data)
  1876. return fpath
  1877. def get_console_output(self, instance):
  1878. virt_dom = self._lookup_by_name(instance['name'])
  1879. xml = virt_dom.XMLDesc(0)
  1880. tree = etree.fromstring(xml)
  1881. console_types = {}
  1882. # NOTE(comstud): We want to try 'file' types first, then try 'pty'
  1883. # types. We can't use Python 2.7 syntax of:
  1884. # tree.find("./devices/console[@type='file']/source")
  1885. # because we need to support 2.6.
  1886. console_nodes = tree.findall('./devices/console')
  1887. for console_node in console_nodes:
  1888. console_type = console_node.get('type')
  1889. console_types.setdefault(console_type, [])
  1890. console_types[console_type].append(console_node)
  1891. # If the guest has a console logging to a file prefer to use that
  1892. if console_types.get('file'):
  1893. for file_console in console_types.get('file'):
  1894. source_node = file_console.find('./source')
  1895. if source_node is None:
  1896. continue
  1897. path = source_node.get("path")
  1898. if not path:
  1899. continue
  1900. libvirt_utils.chown(path, os.getuid())
  1901. with libvirt_utils.file_open(path, 'rb') as fp:
  1902. log_data, remaining = utils.last_bytes(fp,
  1903. MAX_CONSOLE_BYTES)
  1904. if remaining > 0:
  1905. LOG.info(_('Truncated console log returned, %d bytes '
  1906. 'ignored'), remaining, instance=instance)
  1907. return log_data
  1908. # Try 'pty' types
  1909. if console_types.get('pty'):
  1910. for pty_console in console_types.get('pty'):
  1911. source_node = pty_console.find('./source')
  1912. if source_node is None:
  1913. continue
  1914. pty = source_node.get("path")
  1915. if not pty:
  1916. continue
  1917. break
  1918. else:
  1919. msg = _("Guest does not have a console available")
  1920. raise exception.NovaException(msg)
  1921. self._chown_console_log_for_instance(instance)
  1922. data = self._flush_libvirt_console(pty)
  1923. console_log = self._get_console_log_path(instance)
  1924. fpath = self._append_to_file(data, console_log)
  1925. with libvirt_utils.file_open(fpath, 'rb') as fp:
  1926. log_data, remaining = utils.last_bytes(fp, MAX_CONSOLE_BYTES)
  1927. if remaining > 0:
  1928. LOG.info(_('Truncated console log returned, %d bytes ignored'),
  1929. remaining, instance=instance)
  1930. return log_data
  1931. @staticmethod
  1932. def get_host_ip_addr():
  1933. return CONF.my_ip
  1934. def get_vnc_console(self, instance):
  1935. def get_vnc_port_for_instance(instance_name):
  1936. virt_dom = self._lookup_by_name(instance_name)
  1937. xml = virt_dom.XMLDesc(0)
  1938. dom = xmlutils.safe_minidom_parse_string(xml)
  1939. for graphic in dom.getElementsByTagName('graphics'):
  1940. if graphic.getAttribute('type') == 'vnc':
  1941. return graphic.getAttribute('port')
  1942. # NOTE(rmk): We had VNC consoles enabled but the instance in
  1943. # question is not actually listening for connections.
  1944. raise exception.ConsoleTypeUnavailable(console_type='vnc')
  1945. port = get_vnc_port_for_instance(instance['name'])
  1946. host = CONF.vncserver_proxyclient_address
  1947. return {'host': host, 'port': port, 'internal_access_path': None}
  1948. def get_spice_console(self, context, instance):
  1949. def get_spice_ports_for_instance(instance_name):
  1950. virt_dom = self._lookup_by_name(instance_name)
  1951. xml = virt_dom.XMLDesc(0)
  1952. # TODO(sleepsonthefloor): use etree instead of minidom
  1953. dom = xmlutils.safe_minidom_parse_string(xml)
  1954. for graphic in dom.getElementsByTagName('graphics'):
  1955. if graphic.getAttribute('type') == 'spice':
  1956. return (graphic.getAttribute('port'),
  1957. graphic.getAttribute('tlsPort'))
  1958. # NOTE(rmk): We had Spice consoles enabled but the instance in
  1959. # question is not actually listening for connections.
  1960. raise exception.ConsoleTypeUnavailable(console_type='spice')
  1961. ports = get_spice_ports_for_instance(instance['name'])
  1962. host = CONF.spice.server_proxyclient_address
  1963. return {'host': host, 'port': ports[0],
  1964. 'tlsPort': ports[1], 'internal_access_path': None}
  1965. @staticmethod
  1966. def _supports_direct_io(dirpath):
  1967. if not hasattr(os, 'O_DIRECT'):
  1968. LOG.debug(_("This python runtime does not support direct I/O"))
  1969. return False
  1970. testfile = os.path.join(dirpath, ".directio.test")
  1971. hasDirectIO = True
  1972. try:
  1973. f = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
  1974. os.close(f)
  1975. LOG.debug(_("Path '%(path)s' supports direct I/O") %
  1976. {'path': dirpath})
  1977. except OSError as e:
  1978. if e.errno == errno.EINVAL:
  1979. LOG.debug(_("Path '%(path)s' does not support direct I/O: "
  1980. "'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
  1981. hasDirectIO = False
  1982. else:
  1983. with excutils.save_and_reraise_exception():
  1984. LOG.error(_("Error on '%(path)s' while checking "
  1985. "direct I/O: '%(ex)s'") %
  1986. {'path': dirpath, 'ex': str(e)})
  1987. except Exception as e:
  1988. with excutils.save_and_reraise_exception():
  1989. LOG.error(_("Error on '%(path)s' while checking direct I/O: "
  1990. "'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
  1991. finally:
  1992. try:
  1993. os.unlink(testfile)
  1994. except Exception:
  1995. pass
  1996. return hasDirectIO
  1997. @staticmethod
  1998. def _create_local(target, local_size, unit='G',
  1999. fs_format=None, label=None):
  2000. """Create a blank image of specified size."""
  2001. libvirt_utils.create_image('raw', target,
  2002. '%d%c' % (local_size, unit))
  2003. def _create_ephemeral(self, target, ephemeral_size,
  2004. fs_label, os_type, is_block_dev=False,
  2005. max_size=None):
  2006. if not is_block_dev:
  2007. self._create_local(target, ephemeral_size)
  2008. # Run as root only for block devices.
  2009. disk.mkfs(os_type, fs_label, target, run_as_root=is_block_dev)
  2010. @staticmethod
  2011. def _create_swap(target, swap_mb, max_size=None):
  2012. """Create a swap file of specified size."""
  2013. libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
  2014. utils.mkfs('swap', target)
  2015. @staticmethod
  2016. def _get_console_log_path(instance):
  2017. return os.path.join(libvirt_utils.get_instance_path(instance),
  2018. 'console.log')
  2019. @staticmethod
  2020. def _get_disk_config_path(instance):
  2021. return os.path.join(libvirt_utils.get_instance_path(instance),
  2022. 'disk.config')
  2023. def _chown_console_log_for_instance(self, instance):
  2024. console_log = self._get_console_log_path(instance)
  2025. if os.path.exists(console_log):
  2026. libvirt_utils.chown(console_log, os.getuid())
  2027. def _chown_disk_config_for_instance(self, instance):
  2028. disk_config = self._get_disk_config_path(instance)
  2029. if os.path.exists(disk_config):
  2030. libvirt_utils.chown(disk_config, os.getuid())
  2031. def _create_image(self, context, instance,
  2032. disk_mapping, suffix='',
  2033. disk_images=None, network_info=None,
  2034. block_device_info=None, files=None,
  2035. admin_pass=None, inject_files=True):
  2036. if not suffix:
  2037. suffix = ''
  2038. booted_from_volume = (
  2039. (not bool(instance.get('image_ref')))
  2040. or 'disk' not in disk_mapping
  2041. )
  2042. # syntactic nicety
  2043. def basepath(fname='', suffix=suffix):
  2044. return os.path.join(libvirt_utils.get_instance_path(instance),
  2045. fname + suffix)
  2046. def image(fname, image_type=CONF.libvirt.images_type):
  2047. return self.image_backend.image(instance,
  2048. fname + suffix, image_type)
  2049. def raw(fname):
  2050. return image(fname, image_type='raw')
  2051. # ensure directories exist and are writable
  2052. fileutils.ensure_tree(basepath(suffix=''))
  2053. LOG.info(_('Creating image'), instance=instance)
  2054. # NOTE(dprince): for rescue console.log may already exist... chown it.
  2055. self._chown_console_log_for_instance(instance)
  2056. # NOTE(yaguang): For evacuate disk.config already exist in shared
  2057. # storage, chown it.
  2058. self._chown_disk_config_for_instance(instance)
  2059. # NOTE(vish): No need add the suffix to console.log
  2060. libvirt_utils.write_to_file(
  2061. self._get_console_log_path(instance), '', 7)
  2062. if not disk_images:
  2063. disk_images = {'image_id': instance['image_ref'],
  2064. 'kernel_id': instance['kernel_id'],
  2065. 'ramdisk_id': instance['ramdisk_id']}
  2066. if disk_images['kernel_id']:
  2067. fname = imagecache.get_cache_fname(disk_images, 'kernel_id')
  2068. raw('kernel').cache(fetch_func=libvirt_utils.fetch_image,
  2069. context=context,
  2070. filename=fname,
  2071. image_id=disk_images['kernel_id'],
  2072. user_id=instance['user_id'],
  2073. project_id=instance['project_id'])
  2074. if disk_images['ramdisk_id']:
  2075. fname = imagecache.get_cache_fname(disk_images, 'ramdisk_id')
  2076. raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_image,
  2077. context=context,
  2078. filename=fname,
  2079. image_id=disk_images['ramdisk_id'],
  2080. user_id=instance['user_id'],
  2081. project_id=instance['project_id'])
  2082. inst_type = flavors.extract_flavor(instance)
  2083. # NOTE(ndipanov): Even if disk_mapping was passed in, which
  2084. # currently happens only on rescue - we still don't want to
  2085. # create a base image.
  2086. if not booted_from_volume:
  2087. root_fname = imagecache.get_cache_fname(disk_images, 'image_id')
  2088. size = instance['root_gb'] * unit.Gi
  2089. if size == 0 or suffix == '.rescue':
  2090. size = None
  2091. image('disk').cache(fetch_func=libvirt_utils.fetch_image,
  2092. context=context,
  2093. filename=root_fname,
  2094. size=size,
  2095. image_id=disk_images['image_id'],
  2096. user_id=instance['user_id'],
  2097. project_id=instance['project_id'])
  2098. # Lookup the filesystem type if required
  2099. os_type_with_default = instance['os_type']
  2100. if not os_type_with_default:
  2101. os_type_with_default = 'default'
  2102. ephemeral_gb = instance['ephemeral_gb']
  2103. if 'disk.local' in disk_mapping:
  2104. disk_image = image('disk.local')
  2105. fn = functools.partial(self._create_ephemeral,
  2106. fs_label='ephemeral0',
  2107. os_type=instance["os_type"],
  2108. is_block_dev=disk_image.is_block_dev)
  2109. fname = "ephemeral_%s_%s" % (ephemeral_gb, os_type_with_default)
  2110. size = ephemeral_gb * unit.Gi
  2111. disk_image.cache(fetch_func=fn,
  2112. filename=fname,
  2113. size=size,
  2114. ephemeral_size=ephemeral_gb)
  2115. for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
  2116. block_device_info)):
  2117. disk_image = image(blockinfo.get_eph_disk(idx))
  2118. fn = functools.partial(self._create_ephemeral,
  2119. fs_label='ephemeral%d' % idx,
  2120. os_type=instance["os_type"],
  2121. is_block_dev=disk_image.is_block_dev)
  2122. size = eph['size'] * unit.Gi
  2123. fname = "ephemeral_%s_%s" % (eph['size'], os_type_with_default)
  2124. disk_image.cache(
  2125. fetch_func=fn,
  2126. filename=fname,
  2127. size=size,
  2128. ephemeral_size=eph['size'])
  2129. if 'disk.swap' in disk_mapping:
  2130. mapping = disk_mapping['disk.swap']
  2131. swap_mb = 0
  2132. swap = driver.block_device_info_get_swap(block_device_info)
  2133. if driver.swap_is_usable(swap):
  2134. swap_mb = swap['swap_size']
  2135. elif (inst_type['swap'] > 0 and
  2136. not block_device.volume_in_mapping(
  2137. mapping['dev'], block_device_info)):
  2138. swap_mb = inst_type['swap']
  2139. if swap_mb > 0:
  2140. size = swap_mb * unit.Mi
  2141. image('disk.swap').cache(fetch_func=self._create_swap,
  2142. filename="swap_%s" % swap_mb,
  2143. size=size,
  2144. swap_mb=swap_mb)
  2145. # Config drive
  2146. if configdrive.required_by(instance):
  2147. LOG.info(_('Using config drive'), instance=instance)
  2148. extra_md = {}
  2149. if admin_pass:
  2150. extra_md['admin_pass'] = admin_pass
  2151. inst_md = instance_metadata.InstanceMetadata(instance,
  2152. content=files, extra_md=extra_md, network_info=network_info)
  2153. with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
  2154. configdrive_path = basepath(fname='disk.config')
  2155. LOG.info(_('Creating config drive at %(path)s'),
  2156. {'path': configdrive_path}, instance=instance)
  2157. try:
  2158. cdb.make_drive(configdrive_path)
  2159. except processutils.ProcessExecutionError as e:
  2160. with excutils.save_and_reraise_exception():
  2161. LOG.error(_('Creating config drive failed '
  2162. 'with error: %s'),
  2163. e, instance=instance)
  2164. # File injection only if needed
  2165. elif inject_files and CONF.libvirt.inject_partition != -2:
  2166. if booted_from_volume:
  2167. LOG.warn(_('File injection into a boot from volume '
  2168. 'instance is not supported'), instance=instance)
  2169. target_partition = None
  2170. if not instance['kernel_id']:
  2171. target_partition = CONF.libvirt.inject_partition
  2172. if target_partition == 0:
  2173. target_partition = None
  2174. if CONF.libvirt.virt_type == 'lxc':
  2175. target_partition = None
  2176. if CONF.libvirt.inject_key and instance['key_data']:
  2177. key = str(instance['key_data'])
  2178. else:
  2179. key = None
  2180. net = netutils.get_injected_network_template(network_info)
  2181. metadata = instance.get('metadata')
  2182. if not CONF.libvirt.inject_password:
  2183. admin_pass = None
  2184. if any((key, net, metadata, admin_pass, files)):
  2185. # If we're not using config_drive, inject into root fs
  2186. injection_path = image('disk').path
  2187. img_id = instance['image_ref']
  2188. for inj, val in [('key', key),
  2189. ('net', net),
  2190. ('metadata', metadata),
  2191. ('admin_pass', admin_pass),
  2192. ('files', files)]:
  2193. if val:
  2194. LOG.info(_('Injecting %(inj)s into image '
  2195. '%(img_id)s'),
  2196. {'inj': inj, 'img_id': img_id},
  2197. instance=instance)
  2198. try:
  2199. disk.inject_data(injection_path,
  2200. key, net, metadata, admin_pass, files,
  2201. partition=target_partition,
  2202. use_cow=CONF.use_cow_images,
  2203. mandatory=('files',))
  2204. except Exception as e:
  2205. with excutils.save_and_reraise_exception():
  2206. LOG.error(_('Error injecting data into image '
  2207. '%(img_id)s (%(e)s)'),
  2208. {'img_id': img_id, 'e': e},
  2209. instance=instance)
  2210. if CONF.libvirt.virt_type == 'uml':
  2211. libvirt_utils.chown(image('disk').path, 'root')
  2212. def _prepare_pci_devices_for_use(self, pci_devices):
  2213. # kvm , qemu support managed mode
  2214. # In managed mode, the configured device will be automatically
  2215. # detached from the host OS drivers when the guest is started,
  2216. # and then re-attached when the guest shuts down.
  2217. if CONF.libvirt.virt_type != 'xen':
  2218. # we do manual detach only for xen
  2219. return
  2220. try:
  2221. for dev in pci_devices:
  2222. libvirt_dev_addr = dev['hypervisor_name']
  2223. libvirt_dev = \
  2224. self._conn.nodeDeviceLookupByName(libvirt_dev_addr)
  2225. # Note(yjiang5) Spelling for 'dettach' is correct, see
  2226. # http://libvirt.org/html/libvirt-libvirt.html.
  2227. libvirt_dev.dettach()
  2228. # Note(yjiang5): A reset of one PCI device may impact other
  2229. # devices on the same bus, thus we need two separated loops
  2230. # to detach and then reset it.
  2231. for dev in pci_devices:
  2232. libvirt_dev_addr = dev['hypervisor_name']
  2233. libvirt_dev = \
  2234. self._conn.nodeDeviceLookupByName(libvirt_dev_addr)
  2235. libvirt_dev.reset()
  2236. except libvirt.libvirtError as exc:
  2237. raise exception.PciDevicePrepareFailed(id=dev['id'],
  2238. instance_uuid=
  2239. dev['instance_uuid'],
  2240. reason=str(exc))
  2241. def _detach_pci_devices(self, dom, pci_devs):
  2242. # for libvirt version < 1.1.1, this is race condition
  2243. # so forbid detach if not had this version
  2244. if not self.has_min_version(MIN_LIBVIRT_DEVICE_CALLBACK_VERSION):
  2245. if pci_devs:
  2246. reason = (_("Detaching PCI devices with libvirt < %(ver)s"
  2247. " is not permitted") %
  2248. {'ver': MIN_LIBVIRT_DEVICE_CALLBACK_VERSION})
  2249. raise exception.PciDeviceDetachFailed(reason=reason,
  2250. dev=pci_devs)
  2251. try:
  2252. for dev in pci_devs:
  2253. dom.detachDeviceFlags(self.get_guest_pci_device(dev).to_xml(),
  2254. libvirt.VIR_DOMAIN_AFFECT_LIVE)
  2255. # after detachDeviceFlags returned, we should check the dom to
  2256. # ensure the detaching is finished
  2257. xml = dom.XMLDesc(0)
  2258. xml_doc = etree.fromstring(xml)
  2259. guest_config = vconfig.LibvirtConfigGuest()
  2260. guest_config.parse_dom(xml_doc)
  2261. for hdev in [d for d in guest_config.devices
  2262. if d.type == 'pci']:
  2263. hdbsf = [hdev.domain, hdev.bus, hdev.slot, hdev.function]
  2264. dbsf = pci_utils.parse_address(dev['address'])
  2265. if [int(x, 16) for x in hdbsf] ==\
  2266. [int(x, 16) for x in dbsf]:
  2267. raise exception.PciDeviceDetachFailed(reason=
  2268. "timeout",
  2269. dev=dev)
  2270. except libvirt.libvirtError as ex:
  2271. error_code = ex.get_error_code()
  2272. if error_code == libvirt.VIR_ERR_NO_DOMAIN:
  2273. LOG.warn(_("Instance disappeared while detaching "
  2274. "a PCI device from it."))
  2275. else:
  2276. raise
  2277. def _attach_pci_devices(self, dom, pci_devs):
  2278. try:
  2279. for dev in pci_devs:
  2280. dom.attachDevice(self.get_guest_pci_device(dev).to_xml())
  2281. except libvirt.libvirtError:
  2282. LOG.error(_('Attaching PCI devices %(dev)s to %(dom)s failed.')
  2283. % {'dev': pci_devs, 'dom': dom.ID()})
  2284. raise
  2285. def set_host_enabled(self, host, enabled):
  2286. """Sets the specified host's ability to accept new instances.
  2287. This doesn't override non-automatic disablement with an automatic
  2288. setting; thereby permitting operators to keep otherwise
  2289. healthy hosts out of rotation.
  2290. """
  2291. status_name = {True: 'disabled',
  2292. False: 'enabled'}
  2293. if isinstance(enabled, bool):
  2294. disable_service = not enabled
  2295. disable_reason = ''
  2296. else:
  2297. disable_service = bool(enabled)
  2298. disable_reason = enabled
  2299. ctx = nova_context.get_admin_context()
  2300. try:
  2301. service = service_obj.Service.get_by_compute_host(ctx, CONF.host)
  2302. if service.disabled != disable_service:
  2303. # Note(jang): this is a quick fix to stop operator-
  2304. # disabled compute hosts from re-enabling themselves
  2305. # automatically. We prefix any automatic reason code
  2306. # with a fixed string. We only re-enable a host
  2307. # automatically if we find that string in place.
  2308. # This should probably be replaced with a separate flag.
  2309. if not service.disabled or (
  2310. service.disabled_reason and
  2311. service.disabled_reason.startswith(DISABLE_PREFIX)):
  2312. service.disabled = disable_service
  2313. service.disabled_reason = (
  2314. DISABLE_PREFIX + disable_reason
  2315. if disable_service else '')
  2316. service.save()
  2317. LOG.debug(_('Updating compute service status to %s'),
  2318. status_name[disable_service])
  2319. else:
  2320. LOG.debug(_('Not overriding manual compute service '
  2321. 'status with: %s'),
  2322. status_name[disable_service])
  2323. return status_name[disable_service]
  2324. except exception.ComputeHostNotFound:
  2325. LOG.warn(_('Cannot update service status on host: %s,'
  2326. 'since it is not registered.') % CONF.host)
  2327. except Exception:
  2328. LOG.warn(_('Cannot update service status on host: %s,'
  2329. 'due to an unexpected exception.') % CONF.host,
  2330. exc_info=True)
  2331. def get_host_capabilities(self):
  2332. """Returns an instance of config.LibvirtConfigCaps representing
  2333. the capabilities of the host.
  2334. """
  2335. if not self._caps:
  2336. xmlstr = self._conn.getCapabilities()
  2337. self._caps = vconfig.LibvirtConfigCaps()
  2338. self._caps.parse_str(xmlstr)
  2339. if hasattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'):
  2340. try:
  2341. features = self._conn.baselineCPU(
  2342. [self._caps.host.cpu.to_xml()],
  2343. libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES)
  2344. if features:
  2345. self._caps.host.cpu = vconfig.LibvirtConfigCPU()
  2346. self._caps.host.cpu.parse_str(features)
  2347. except libvirt.VIR_ERR_NO_SUPPORT:
  2348. # Note(yjiang5): ignore if libvirt has no support
  2349. pass
  2350. return self._caps
  2351. def get_host_uuid(self):
  2352. """Returns a UUID representing the host."""
  2353. caps = self.get_host_capabilities()
  2354. return caps.host.uuid
  2355. def get_host_cpu_for_guest(self):
  2356. """Returns an instance of config.LibvirtConfigGuestCPU
  2357. representing the host's CPU model & topology with
  2358. policy for configuring a guest to match
  2359. """
  2360. caps = self.get_host_capabilities()
  2361. hostcpu = caps.host.cpu
  2362. guestcpu = vconfig.LibvirtConfigGuestCPU()
  2363. guestcpu.model = hostcpu.model
  2364. guestcpu.vendor = hostcpu.vendor
  2365. guestcpu.arch = hostcpu.arch
  2366. guestcpu.match = "exact"
  2367. for hostfeat in hostcpu.features:
  2368. guestfeat = vconfig.LibvirtConfigGuestCPUFeature(hostfeat.name)
  2369. guestfeat.policy = "require"
  2370. guestcpu.features.append(guestfeat)
  2371. return guestcpu
  2372. def get_guest_cpu_config(self):
  2373. mode = CONF.libvirt.cpu_mode
  2374. model = CONF.libvirt.cpu_model
  2375. if mode is None:
  2376. if ((CONF.libvirt.virt_type == "kvm" or
  2377. CONF.libvirt.virt_type == "qemu")):
  2378. mode = "host-model"
  2379. else:
  2380. mode = "none"
  2381. if mode == "none":
  2382. return None
  2383. if ((CONF.libvirt.virt_type != "kvm" and
  2384. CONF.libvirt.virt_type != "qemu")):
  2385. msg = _("Config requested an explicit CPU model, but "
  2386. "the current libvirt hypervisor '%s' does not "
  2387. "support selecting CPU models") % CONF.libvirt.virt_type
  2388. raise exception.Invalid(msg)
  2389. if mode == "custom" and model is None:
  2390. msg = _("Config requested a custom CPU model, but no "
  2391. "model name was provided")
  2392. raise exception.Invalid(msg)
  2393. elif mode != "custom" and model is not None:
  2394. msg = _("A CPU model name should not be set when a "
  2395. "host CPU model is requested")
  2396. raise exception.Invalid(msg)
  2397. LOG.debug(_("CPU mode '%(mode)s' model '%(model)s' was chosen")
  2398. % {'mode': mode, 'model': (model or "")})
  2399. # TODO(berrange): in the future, when MIN_LIBVIRT_VERSION is
  2400. # updated to be at least this new, we can kill off the elif
  2401. # blocks here
  2402. if self.has_min_version(MIN_LIBVIRT_HOST_CPU_VERSION):
  2403. cpu = vconfig.LibvirtConfigGuestCPU()
  2404. cpu.mode = mode
  2405. cpu.model = model
  2406. elif mode == "custom":
  2407. cpu = vconfig.LibvirtConfigGuestCPU()
  2408. cpu.model = model
  2409. elif mode == "host-model":
  2410. cpu = self.get_host_cpu_for_guest()
  2411. elif mode == "host-passthrough":
  2412. msg = _("Passthrough of the host CPU was requested but "
  2413. "this libvirt version does not support this feature")
  2414. raise exception.NovaException(msg)
  2415. return cpu
  2416. def get_guest_disk_config(self, instance, name, disk_mapping, inst_type,
  2417. image_type=None):
  2418. image = self.image_backend.image(instance,
  2419. name,
  2420. image_type)
  2421. disk_info = disk_mapping[name]
  2422. return image.libvirt_info(disk_info['bus'],
  2423. disk_info['dev'],
  2424. disk_info['type'],
  2425. self.disk_cachemode,
  2426. inst_type['extra_specs'],
  2427. self.get_hypervisor_version())
  2428. def get_guest_storage_config(self, instance, image_meta,
  2429. disk_info,
  2430. rescue, block_device_info,
  2431. inst_type):
  2432. devices = []
  2433. disk_mapping = disk_info['mapping']
  2434. block_device_mapping = driver.block_device_info_get_mapping(
  2435. block_device_info)
  2436. if CONF.libvirt.virt_type == "lxc":
  2437. fs = vconfig.LibvirtConfigGuestFilesys()
  2438. fs.source_type = "mount"
  2439. fs.source_dir = os.path.join(
  2440. libvirt_utils.get_instance_path(instance), 'rootfs')
  2441. devices.append(fs)
  2442. else:
  2443. if rescue:
  2444. diskrescue = self.get_guest_disk_config(instance,
  2445. 'disk.rescue',
  2446. disk_mapping,
  2447. inst_type)
  2448. devices.append(diskrescue)
  2449. diskos = self.get_guest_disk_config(instance,
  2450. 'disk',
  2451. disk_mapping,
  2452. inst_type)
  2453. devices.append(diskos)
  2454. else:
  2455. if 'disk' in disk_mapping:
  2456. diskos = self.get_guest_disk_config(instance,
  2457. 'disk',
  2458. disk_mapping,
  2459. inst_type)
  2460. devices.append(diskos)
  2461. if 'disk.local' in disk_mapping:
  2462. disklocal = self.get_guest_disk_config(instance,
  2463. 'disk.local',
  2464. disk_mapping,
  2465. inst_type)
  2466. devices.append(disklocal)
  2467. self.virtapi.instance_update(
  2468. nova_context.get_admin_context(), instance['uuid'],
  2469. {'default_ephemeral_device':
  2470. block_device.prepend_dev(disklocal.target_dev)})
  2471. for idx, eph in enumerate(
  2472. driver.block_device_info_get_ephemerals(
  2473. block_device_info)):
  2474. diskeph = self.get_guest_disk_config(
  2475. instance,
  2476. blockinfo.get_eph_disk(idx),
  2477. disk_mapping, inst_type)
  2478. devices.append(diskeph)
  2479. if 'disk.swap' in disk_mapping:
  2480. diskswap = self.get_guest_disk_config(instance,
  2481. 'disk.swap',
  2482. disk_mapping,
  2483. inst_type)
  2484. devices.append(diskswap)
  2485. self.virtapi.instance_update(
  2486. nova_context.get_admin_context(), instance['uuid'],
  2487. {'default_swap_device': block_device.prepend_dev(
  2488. diskswap.target_dev)})
  2489. for vol in block_device_mapping:
  2490. connection_info = vol['connection_info']
  2491. vol_dev = block_device.prepend_dev(vol['mount_device'])
  2492. info = disk_mapping[vol_dev]
  2493. cfg = self.volume_driver_method('connect_volume',
  2494. connection_info,
  2495. info)
  2496. devices.append(cfg)
  2497. if 'disk.config' in disk_mapping:
  2498. diskconfig = self.get_guest_disk_config(instance,
  2499. 'disk.config',
  2500. disk_mapping,
  2501. inst_type,
  2502. 'raw')
  2503. devices.append(diskconfig)
  2504. for d in devices:
  2505. self.set_cache_mode(d)
  2506. return devices
  2507. def get_guest_config_sysinfo(self, instance):
  2508. sysinfo = vconfig.LibvirtConfigGuestSysinfo()
  2509. sysinfo.system_manufacturer = version.vendor_string()
  2510. sysinfo.system_product = version.product_string()
  2511. sysinfo.system_version = version.version_string_with_package()
  2512. sysinfo.system_serial = self.get_host_uuid()
  2513. sysinfo.system_uuid = instance['uuid']
  2514. return sysinfo
  2515. def get_guest_pci_device(self, pci_device):
  2516. dbsf = pci_utils.parse_address(pci_device['address'])
  2517. dev = vconfig.LibvirtConfigGuestHostdevPCI()
  2518. dev.domain, dev.bus, dev.slot, dev.function = dbsf
  2519. # only kvm support managed mode
  2520. if CONF.libvirt.virt_type in ('xen',):
  2521. dev.managed = 'no'
  2522. if CONF.libvirt.virt_type in ('kvm', 'qemu'):
  2523. dev.managed = 'yes'
  2524. return dev
  2525. def get_guest_config(self, instance, network_info, image_meta,
  2526. disk_info, rescue=None, block_device_info=None):
  2527. """Get config data for parameters.
  2528. :param rescue: optional dictionary that should contain the key
  2529. 'ramdisk_id' if a ramdisk is needed for the rescue image and
  2530. 'kernel_id' if a kernel is needed for the rescue image.
  2531. """
  2532. inst_type = self.virtapi.flavor_get(
  2533. nova_context.get_admin_context(read_deleted='yes'),
  2534. instance['instance_type_id'])
  2535. inst_path = libvirt_utils.get_instance_path(instance)
  2536. disk_mapping = disk_info['mapping']
  2537. CONSOLE = "console=tty0 console=ttyS0"
  2538. guest = vconfig.LibvirtConfigGuest()
  2539. guest.virt_type = CONF.libvirt.virt_type
  2540. guest.name = instance['name']
  2541. guest.uuid = instance['uuid']
  2542. guest.memory = inst_type['memory_mb'] * 1024
  2543. guest.vcpus = inst_type['vcpus']
  2544. guest.cpuset = CONF.libvirt.vcpu_pin_set
  2545. quota_items = ['cpu_shares', 'cpu_period', 'cpu_quota']
  2546. for key, value in inst_type['extra_specs'].iteritems():
  2547. scope = key.split(':')
  2548. if len(scope) > 1 and scope[0] == 'quota':
  2549. if scope[1] in quota_items:
  2550. setattr(guest, scope[1], value)
  2551. guest.cpu = self.get_guest_cpu_config()
  2552. if 'root' in disk_mapping:
  2553. root_device_name = block_device.prepend_dev(
  2554. disk_mapping['root']['dev'])
  2555. else:
  2556. root_device_name = None
  2557. if root_device_name:
  2558. # NOTE(yamahata):
  2559. # for nova.api.ec2.cloud.CloudController.get_metadata()
  2560. self.virtapi.instance_update(
  2561. nova_context.get_admin_context(), instance['uuid'],
  2562. {'root_device_name': root_device_name})
  2563. guest.os_type = vm_mode.get_from_instance(instance)
  2564. if guest.os_type is None:
  2565. if CONF.libvirt.virt_type == "lxc":
  2566. guest.os_type = vm_mode.EXE
  2567. elif CONF.libvirt.virt_type == "uml":
  2568. guest.os_type = vm_mode.UML
  2569. elif CONF.libvirt.virt_type == "xen":
  2570. guest.os_type = vm_mode.XEN
  2571. else:
  2572. guest.os_type = vm_mode.HVM
  2573. if CONF.libvirt.virt_type == "xen" and guest.os_type == vm_mode.HVM:
  2574. guest.os_loader = CONF.libvirt.xen_hvmloader_path
  2575. if CONF.libvirt.virt_type in ("kvm", "qemu"):
  2576. caps = self.get_host_capabilities()
  2577. if caps.host.cpu.arch in ("i686", "x86_64"):
  2578. guest.sysinfo = self.get_guest_config_sysinfo(instance)
  2579. guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
  2580. if CONF.libvirt.virt_type == "lxc":
  2581. guest.os_type = vm_mode.EXE
  2582. guest.os_init_path = "/sbin/init"
  2583. guest.os_cmdline = CONSOLE
  2584. elif CONF.libvirt.virt_type == "uml":
  2585. guest.os_type = vm_mode.UML
  2586. guest.os_kernel = "/usr/bin/linux"
  2587. guest.os_root = root_device_name
  2588. else:
  2589. if ((CONF.libvirt.virt_type == "xen" and
  2590. guest.os_type == vm_mode.XEN)):
  2591. guest.os_root = root_device_name
  2592. else:
  2593. guest.os_type = vm_mode.HVM
  2594. if rescue:
  2595. if rescue.get('kernel_id'):
  2596. guest.os_kernel = os.path.join(inst_path, "kernel.rescue")
  2597. if CONF.libvirt.virt_type == "xen":
  2598. guest.os_cmdline = "ro"
  2599. else:
  2600. guest.os_cmdline = ("root=%s %s" % (root_device_name,
  2601. CONSOLE))
  2602. if rescue.get('ramdisk_id'):
  2603. guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue")
  2604. elif instance['kernel_id']:
  2605. guest.os_kernel = os.path.join(inst_path, "kernel")
  2606. if CONF.libvirt.virt_type == "xen":
  2607. guest.os_cmdline = "ro"
  2608. else:
  2609. guest.os_cmdline = ("root=%s %s" % (root_device_name,
  2610. CONSOLE))
  2611. if instance['ramdisk_id']:
  2612. guest.os_initrd = os.path.join(inst_path, "ramdisk")
  2613. else:
  2614. guest.os_boot_dev = blockinfo.get_boot_order(disk_info)
  2615. if ((CONF.libvirt.virt_type != "lxc" and
  2616. CONF.libvirt.virt_type != "uml")):
  2617. guest.acpi = True
  2618. guest.apic = True
  2619. # NOTE(mikal): Microsoft Windows expects the clock to be in
  2620. # "localtime". If the clock is set to UTC, then you can use a
  2621. # registry key to let windows know, but Microsoft says this is
  2622. # buggy in http://support.microsoft.com/kb/2687252
  2623. clk = vconfig.LibvirtConfigGuestClock()
  2624. if instance['os_type'] == 'windows':
  2625. LOG.info(_('Configuring timezone for windows instance to '
  2626. 'localtime'), instance=instance)
  2627. clk.offset = 'localtime'
  2628. else:
  2629. clk.offset = 'utc'
  2630. guest.set_clock(clk)
  2631. if CONF.libvirt.virt_type == "kvm":
  2632. # TODO(berrange) One day this should be per-guest
  2633. # OS type configurable
  2634. tmpit = vconfig.LibvirtConfigGuestTimer()
  2635. tmpit.name = "pit"
  2636. tmpit.tickpolicy = "delay"
  2637. tmrtc = vconfig.LibvirtConfigGuestTimer()
  2638. tmrtc.name = "rtc"
  2639. tmrtc.tickpolicy = "catchup"
  2640. clk.add_timer(tmpit)
  2641. clk.add_timer(tmrtc)
  2642. for cfg in self.get_guest_storage_config(instance,
  2643. image_meta,
  2644. disk_info,
  2645. rescue,
  2646. block_device_info,
  2647. inst_type):
  2648. guest.add_device(cfg)
  2649. for vif in network_info:
  2650. cfg = self.vif_driver.get_config(instance,
  2651. vif,
  2652. image_meta,
  2653. inst_type)
  2654. guest.add_device(cfg)
  2655. if ((CONF.libvirt.virt_type == "qemu" or
  2656. CONF.libvirt.virt_type == "kvm")):
  2657. # The QEMU 'pty' driver throws away any data if no
  2658. # client app is connected. Thus we can't get away
  2659. # with a single type=pty console. Instead we have
  2660. # to configure two separate consoles.
  2661. consolelog = vconfig.LibvirtConfigGuestSerial()
  2662. consolelog.type = "file"
  2663. consolelog.source_path = self._get_console_log_path(instance)
  2664. guest.add_device(consolelog)
  2665. consolepty = vconfig.LibvirtConfigGuestSerial()
  2666. consolepty.type = "pty"
  2667. guest.add_device(consolepty)
  2668. else:
  2669. consolepty = vconfig.LibvirtConfigGuestConsole()
  2670. consolepty.type = "pty"
  2671. guest.add_device(consolepty)
  2672. # We want a tablet if VNC is enabled,
  2673. # or SPICE is enabled and the SPICE agent is disabled
  2674. # NB: this implies that if both SPICE + VNC are enabled
  2675. # at the same time, we'll get the tablet whether the
  2676. # SPICE agent is used or not.
  2677. need_usb_tablet = False
  2678. if CONF.vnc_enabled:
  2679. need_usb_tablet = CONF.libvirt.use_usb_tablet
  2680. elif CONF.spice.enabled and not CONF.spice.agent_enabled:
  2681. need_usb_tablet = CONF.libvirt.use_usb_tablet
  2682. if need_usb_tablet and guest.os_type == vm_mode.HVM:
  2683. tablet = vconfig.LibvirtConfigGuestInput()
  2684. tablet.type = "tablet"
  2685. tablet.bus = "usb"
  2686. guest.add_device(tablet)
  2687. if CONF.spice.enabled and CONF.spice.agent_enabled and \
  2688. CONF.libvirt.virt_type not in ('lxc', 'uml', 'xen'):
  2689. channel = vconfig.LibvirtConfigGuestChannel()
  2690. channel.target_name = "com.redhat.spice.0"
  2691. guest.add_device(channel)
  2692. # NB some versions of libvirt support both SPICE and VNC
  2693. # at the same time. We're not trying to second guess which
  2694. # those versions are. We'll just let libvirt report the
  2695. # errors appropriately if the user enables both.
  2696. if ((CONF.vnc_enabled and
  2697. CONF.libvirt.virt_type not in ('lxc', 'uml'))):
  2698. graphics = vconfig.LibvirtConfigGuestGraphics()
  2699. graphics.type = "vnc"
  2700. graphics.keymap = CONF.vnc_keymap
  2701. graphics.listen = CONF.vncserver_listen
  2702. guest.add_device(graphics)
  2703. if CONF.spice.enabled and \
  2704. CONF.libvirt.virt_type not in ('lxc', 'uml', 'xen'):
  2705. graphics = vconfig.LibvirtConfigGuestGraphics()
  2706. graphics.type = "spice"
  2707. graphics.keymap = CONF.spice.keymap
  2708. graphics.listen = CONF.spice.server_listen
  2709. guest.add_device(graphics)
  2710. # Qemu guest agent only support 'qemu' and 'kvm' hypervisor
  2711. if CONF.libvirt.virt_type in ('qemu', 'kvm'):
  2712. qga_enabled = False
  2713. # Enable qga only if the 'hw_qemu_guest_agent' property is set
  2714. if (image_meta is not None and image_meta.get('properties') and
  2715. image_meta['properties'].get('hw_qemu_guest_agent')
  2716. is not None):
  2717. hw_qga = image_meta['properties']['hw_qemu_guest_agent']
  2718. if hw_qga.lower() == 'yes':
  2719. LOG.debug(_("Qemu guest agent is enabled through image "
  2720. "metadata"), instance=instance)
  2721. qga_enabled = True
  2722. if qga_enabled:
  2723. qga = vconfig.LibvirtConfigGuestChannel()
  2724. qga.type = "unix"
  2725. qga.target_name = "org.qemu.guest_agent.0"
  2726. qga.source_path = ("/var/lib/libvirt/qemu/%s.%s.sock" %
  2727. ("org.qemu.guest_agent.0", instance['name']))
  2728. guest.add_device(qga)
  2729. if CONF.libvirt.virt_type in ('xen', 'qemu', 'kvm'):
  2730. for pci_dev in pci_manager.get_instance_pci_devs(instance):
  2731. guest.add_device(self.get_guest_pci_device(pci_dev))
  2732. else:
  2733. if len(pci_manager.get_instance_pci_devs(instance)) > 0:
  2734. raise exception.PciDeviceUnsupportedHypervisor(
  2735. type=CONF.libvirt.virt_type)
  2736. return guest
  2737. def to_xml(self, context, instance, network_info, disk_info,
  2738. image_meta=None, rescue=None,
  2739. block_device_info=None, write_to_disk=False):
  2740. # We should get image metadata every time for generating xml
  2741. if image_meta is None:
  2742. (image_service, image_id) = glance.get_remote_image_service(
  2743. context, instance['image_ref'])
  2744. image_meta = compute_utils.get_image_metadata(
  2745. context, image_service, image_id, instance)
  2746. LOG.debug(_('Start to_xml '
  2747. 'network_info=%(network_info)s '
  2748. 'disk_info=%(disk_info)s '
  2749. 'image_meta=%(image_meta)s rescue=%(rescue)s'
  2750. 'block_device_info=%(block_device_info)s'),
  2751. {'network_info': network_info, 'disk_info': disk_info,
  2752. 'image_meta': image_meta, 'rescue': rescue,
  2753. 'block_device_info': block_device_info},
  2754. instance=instance)
  2755. conf = self.get_guest_config(instance, network_info, image_meta,
  2756. disk_info, rescue, block_device_info)
  2757. xml = conf.to_xml()
  2758. if write_to_disk:
  2759. instance_dir = libvirt_utils.get_instance_path(instance)
  2760. xml_path = os.path.join(instance_dir, 'libvirt.xml')
  2761. libvirt_utils.write_to_file(xml_path, xml)
  2762. LOG.debug(_('End to_xml xml=%(xml)s'),
  2763. {'xml': xml}, instance=instance)
  2764. return xml
  2765. def _lookup_by_id(self, instance_id):
  2766. """Retrieve libvirt domain object given an instance id.
  2767. All libvirt error handling should be handled in this method and
  2768. relevant nova exceptions should be raised in response.
  2769. """
  2770. try:
  2771. return self._conn.lookupByID(instance_id)
  2772. except libvirt.libvirtError as ex:
  2773. error_code = ex.get_error_code()
  2774. if error_code == libvirt.VIR_ERR_NO_DOMAIN:
  2775. raise exception.InstanceNotFound(instance_id=instance_id)
  2776. msg = (_("Error from libvirt while looking up %(instance_id)s: "
  2777. "[Error Code %(error_code)s] %(ex)s")
  2778. % {'instance_id': instance_id,
  2779. 'error_code': error_code,
  2780. 'ex': ex})
  2781. raise exception.NovaException(msg)
  2782. def _lookup_by_name(self, instance_name):
  2783. """Retrieve libvirt domain object given an instance name.
  2784. All libvirt error handling should be handled in this method and
  2785. relevant nova exceptions should be raised in response.
  2786. """
  2787. try:
  2788. return self._conn.lookupByName(instance_name)
  2789. except libvirt.libvirtError as ex:
  2790. error_code = ex.get_error_code()
  2791. if error_code == libvirt.VIR_ERR_NO_DOMAIN:
  2792. raise exception.InstanceNotFound(instance_id=instance_name)
  2793. msg = (_('Error from libvirt while looking up %(instance_name)s: '
  2794. '[Error Code %(error_code)s] %(ex)s') %
  2795. {'instance_name': instance_name,
  2796. 'error_code': error_code,
  2797. 'ex': ex})
  2798. raise exception.NovaException(msg)
  2799. def get_info(self, instance):
  2800. """Retrieve information from libvirt for a specific instance name.
  2801. If a libvirt error is encountered during lookup, we might raise a
  2802. NotFound exception or Error exception depending on how severe the
  2803. libvirt error is.
  2804. """
  2805. virt_dom = self._lookup_by_name(instance['name'])
  2806. (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
  2807. return {'state': LIBVIRT_POWER_STATE[state],
  2808. 'max_mem': max_mem,
  2809. 'mem': mem,
  2810. 'num_cpu': num_cpu,
  2811. 'cpu_time': cpu_time,
  2812. 'id': virt_dom.ID()}
  2813. def _create_domain(self, xml=None, domain=None,
  2814. instance=None, launch_flags=0, power_on=True):
  2815. """Create a domain.
  2816. Either domain or xml must be passed in. If both are passed, then
  2817. the domain definition is overwritten from the xml.
  2818. """
  2819. inst_path = None
  2820. if instance:
  2821. inst_path = libvirt_utils.get_instance_path(instance)
  2822. if CONF.libvirt.virt_type == 'lxc':
  2823. if not inst_path:
  2824. inst_path = None
  2825. container_dir = os.path.join(inst_path, 'rootfs')
  2826. fileutils.ensure_tree(container_dir)
  2827. image = self.image_backend.image(instance, 'disk')
  2828. container_root_device = disk.setup_container(image.path,
  2829. container_dir=container_dir,
  2830. use_cow=CONF.use_cow_images)
  2831. #Note(GuanQiang): save container root device name here, used for
  2832. # detaching the linked image device when deleting
  2833. # the lxc instance.
  2834. if container_root_device:
  2835. self.virtapi.instance_update(
  2836. nova_context.get_admin_context(), instance['uuid'],
  2837. {'root_device_name': container_root_device})
  2838. if xml:
  2839. try:
  2840. domain = self._conn.defineXML(xml)
  2841. except Exception as e:
  2842. LOG.error(_("An error occurred while trying to define a domain"
  2843. " with xml: %s") % xml)
  2844. raise e
  2845. if power_on:
  2846. try:
  2847. domain.createWithFlags(launch_flags)
  2848. except Exception as e:
  2849. with excutils.save_and_reraise_exception():
  2850. LOG.error(_("An error occurred while trying to launch a "
  2851. "defined domain with xml: %s") %
  2852. domain.XMLDesc(0))
  2853. try:
  2854. self._enable_hairpin(domain.XMLDesc(0))
  2855. except Exception:
  2856. with excutils.save_and_reraise_exception():
  2857. LOG.error(_("An error occurred while enabling hairpin mode on "
  2858. "domain with xml: %s") % domain.XMLDesc(0))
  2859. # NOTE(uni): Now the container is running with its own private mount
  2860. # namespace and so there is no need to keep the container rootfs
  2861. # mounted in the host namespace
  2862. if CONF.libvirt.virt_type == 'lxc':
  2863. state = self.get_info(instance)['state']
  2864. container_dir = os.path.join(inst_path, 'rootfs')
  2865. if state == power_state.RUNNING:
  2866. disk.clean_lxc_namespace(container_dir=container_dir)
  2867. else:
  2868. disk.teardown_container(container_dir=container_dir)
  2869. return domain
  2870. def _create_domain_and_network(self, context, xml, instance, network_info,
  2871. block_device_info=None, power_on=True,
  2872. reboot=False):
  2873. """Do required network setup and create domain."""
  2874. block_device_mapping = driver.block_device_info_get_mapping(
  2875. block_device_info)
  2876. for vol in block_device_mapping:
  2877. connection_info = vol['connection_info']
  2878. disk_info = blockinfo.get_info_from_bdm(
  2879. CONF.libvirt.virt_type, vol)
  2880. conf = self.volume_driver_method('connect_volume',
  2881. connection_info,
  2882. disk_info)
  2883. # cache device_path in connection_info -- required by encryptors
  2884. if (not reboot and 'data' in connection_info and
  2885. 'volume_id' in connection_info['data']):
  2886. connection_info['data']['device_path'] = conf.source_path
  2887. self.virtapi.block_device_mapping_update(context, vol.id,
  2888. {'connection_info': jsonutils.dumps(connection_info)})
  2889. volume_id = connection_info['data']['volume_id']
  2890. encryption = encryptors.get_encryption_metadata(
  2891. context, self._volume_api, volume_id, connection_info)
  2892. if encryption:
  2893. encryptor = self._get_volume_encryptor(connection_info,
  2894. encryption)
  2895. encryptor.attach_volume(context, **encryption)
  2896. self.plug_vifs(instance, network_info)
  2897. self.firewall_driver.setup_basic_filtering(instance, network_info)
  2898. self.firewall_driver.prepare_instance_filter(instance, network_info)
  2899. domain = self._create_domain(xml, instance=instance, power_on=power_on)
  2900. self.firewall_driver.apply_instance_filter(instance, network_info)
  2901. return domain
  2902. def get_all_block_devices(self):
  2903. """
  2904. Return all block devices in use on this node.
  2905. """
  2906. devices = []
  2907. for dom_id in self.list_instance_ids():
  2908. try:
  2909. domain = self._lookup_by_id(dom_id)
  2910. doc = etree.fromstring(domain.XMLDesc(0))
  2911. except exception.InstanceNotFound:
  2912. LOG.info(_("libvirt can't find a domain with id: %s") % dom_id)
  2913. continue
  2914. except Exception:
  2915. continue
  2916. ret = doc.findall('./devices/disk')
  2917. for node in ret:
  2918. if node.get('type') != 'block':
  2919. continue
  2920. for child in node.getchildren():
  2921. if child.tag == 'source':
  2922. devices.append(child.get('dev'))
  2923. return devices
  2924. def get_disks(self, instance_name):
  2925. """
  2926. Note that this function takes an instance name.
  2927. Returns a list of all block devices for this domain.
  2928. """
  2929. domain = self._lookup_by_name(instance_name)
  2930. xml = domain.XMLDesc(0)
  2931. try:
  2932. doc = etree.fromstring(xml)
  2933. except Exception:
  2934. return []
  2935. return filter(bool,
  2936. [target.get("dev")
  2937. for target in doc.findall('devices/disk/target')])
  2938. def get_interfaces(self, xml):
  2939. """
  2940. Note that this function takes a domain xml.
  2941. Returns a list of all network interfaces for this instance.
  2942. """
  2943. doc = None
  2944. try:
  2945. doc = etree.fromstring(xml)
  2946. except Exception:
  2947. return []
  2948. interfaces = []
  2949. ret = doc.findall('./devices/interface')
  2950. for node in ret:
  2951. devdst = None
  2952. for child in list(node):
  2953. if child.tag == 'target':
  2954. devdst = child.attrib['dev']
  2955. if devdst is None:
  2956. continue
  2957. interfaces.append(devdst)
  2958. return interfaces
  2959. def _get_cpuset_ids(self):
  2960. """
  2961. Parsing vcpu_pin_set config.
  2962. Returns a list of pcpu ids can be used by instances.
  2963. """
  2964. cpuset_ids = set()
  2965. cpuset_reject_ids = set()
  2966. for rule in CONF.libvirt.vcpu_pin_set.split(','):
  2967. rule = rule.strip()
  2968. # Handle multi ','
  2969. if len(rule) < 1:
  2970. continue
  2971. # Note the count limit in the .split() call
  2972. range_parts = rule.split('-', 1)
  2973. if len(range_parts) > 1:
  2974. # So, this was a range; start by converting the parts to ints
  2975. try:
  2976. start, end = [int(p.strip()) for p in range_parts]
  2977. except ValueError:
  2978. raise exception.Invalid(_("Invalid range expression %r")
  2979. % rule)
  2980. # Make sure it's a valid range
  2981. if start > end:
  2982. raise exception.Invalid(_("Invalid range expression %r")
  2983. % rule)
  2984. # Add available pcpu ids to set
  2985. cpuset_ids |= set(range(start, end + 1))
  2986. elif rule[0] == '^':
  2987. # Not a range, the rule is an exclusion rule; convert to int
  2988. try:
  2989. cpuset_reject_ids.add(int(rule[1:].strip()))
  2990. except ValueError:
  2991. raise exception.Invalid(_("Invalid exclusion "
  2992. "expression %r") % rule)
  2993. else:
  2994. # OK, a single PCPU to include; convert to int
  2995. try:
  2996. cpuset_ids.add(int(rule))
  2997. except ValueError:
  2998. raise exception.Invalid(_("Invalid inclusion "
  2999. "expression %r") % rule)
  3000. # Use sets to handle the exclusion rules for us
  3001. cpuset_ids -= cpuset_reject_ids
  3002. if not cpuset_ids:
  3003. raise exception.Invalid(_("No CPUs available after parsing %r") %
  3004. CONF.libvirt.vcpu_pin_set)
  3005. # This will convert the set to a sorted list for us
  3006. return sorted(cpuset_ids)
  3007. def get_vcpu_total(self):
  3008. """Get available vcpu number of physical computer.
  3009. :returns: the number of cpu core instances can be used.
  3010. """
  3011. if self._vcpu_total != 0:
  3012. return self._vcpu_total
  3013. try:
  3014. total_pcpus = self._conn.getInfo()[2]
  3015. except libvirt.libvirtError:
  3016. LOG.warn(_("Cannot get the number of cpu, because this "
  3017. "function is not implemented for this platform. "))
  3018. return 0
  3019. if CONF.libvirt.vcpu_pin_set is None:
  3020. self._vcpu_total = total_pcpus
  3021. return self._vcpu_total
  3022. available_ids = self._get_cpuset_ids()
  3023. if available_ids[-1] >= total_pcpus:
  3024. raise exception.Invalid(_("Invalid vcpu_pin_set config, "
  3025. "out of hypervisor cpu range."))
  3026. self._vcpu_total = len(available_ids)
  3027. return self._vcpu_total
  3028. def get_memory_mb_total(self):
  3029. """Get the total memory size(MB) of physical computer.
  3030. :returns: the total amount of memory(MB).
  3031. """
  3032. return self._conn.getInfo()[1]
  3033. @staticmethod
  3034. def get_local_gb_info():
  3035. """Get local storage info of the compute node in GB.
  3036. :returns: A dict containing:
  3037. :total: How big the overall usable filesystem is (in gigabytes)
  3038. :free: How much space is free (in gigabytes)
  3039. :used: How much space is used (in gigabytes)
  3040. """
  3041. if CONF.libvirt.images_type == 'lvm':
  3042. info = libvirt_utils.get_volume_group_info(
  3043. CONF.libvirt.images_volume_group)
  3044. else:
  3045. info = libvirt_utils.get_fs_info(CONF.instances_path)
  3046. for (k, v) in info.iteritems():
  3047. info[k] = v / unit.Gi
  3048. return info
  3049. def get_vcpu_used(self):
  3050. """Get vcpu usage number of physical computer.
  3051. :returns: The total number of vcpu that currently used.
  3052. """
  3053. total = 0
  3054. if CONF.libvirt.virt_type == 'lxc':
  3055. return total + 1
  3056. dom_ids = self.list_instance_ids()
  3057. for dom_id in dom_ids:
  3058. try:
  3059. dom = self._lookup_by_id(dom_id)
  3060. vcpus = dom.vcpus()
  3061. if vcpus is None:
  3062. LOG.debug(_("couldn't obtain the vpu count from domain id:"
  3063. " %s") % dom_id)
  3064. else:
  3065. total += len(vcpus[1])
  3066. except exception.InstanceNotFound:
  3067. LOG.info(_("libvirt can't find a domain with id: %s") % dom_id)
  3068. continue
  3069. # NOTE(gtt116): give change to do other task.
  3070. greenthread.sleep(0)
  3071. return total
  3072. def get_memory_mb_used(self):
  3073. """Get the free memory size(MB) of physical computer.
  3074. :returns: the total usage of memory(MB).
  3075. """
  3076. if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
  3077. return 0
  3078. m = open('/proc/meminfo').read().split()
  3079. idx1 = m.index('MemFree:')
  3080. idx2 = m.index('Buffers:')
  3081. idx3 = m.index('Cached:')
  3082. if CONF.libvirt.virt_type == 'xen':
  3083. used = 0
  3084. for domain_id in self.list_instance_ids():
  3085. try:
  3086. dom_mem = int(self._lookup_by_id(domain_id).info()[2])
  3087. except exception.InstanceNotFound:
  3088. LOG.info(_("libvirt can't find a domain with id: %s")
  3089. % domain_id)
  3090. continue
  3091. # skip dom0
  3092. if domain_id != 0:
  3093. used += dom_mem
  3094. else:
  3095. # the mem reported by dom0 is be greater of what
  3096. # it is being used
  3097. used += (dom_mem -
  3098. (int(m[idx1 + 1]) +
  3099. int(m[idx2 + 1]) +
  3100. int(m[idx3 + 1])))
  3101. # Convert it to MB
  3102. return used / 1024
  3103. else:
  3104. avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1]))
  3105. # Convert it to MB
  3106. return self.get_memory_mb_total() - avail / 1024
  3107. def get_hypervisor_type(self):
  3108. """Get hypervisor type.
  3109. :returns: hypervisor type (ex. qemu)
  3110. """
  3111. return self._conn.getType()
  3112. def get_hypervisor_version(self):
  3113. """Get hypervisor version.
  3114. :returns: hypervisor version (ex. 12003)
  3115. """
  3116. # NOTE(justinsb): getVersion moved between libvirt versions
  3117. # Trying to do be compatible with older versions is a lost cause
  3118. # But ... we can at least give the user a nice message
  3119. method = getattr(self._conn, 'getVersion', None)
  3120. if method is None:
  3121. raise exception.NovaException(_("libvirt version is too old"
  3122. " (does not support getVersion)"))
  3123. # NOTE(justinsb): If we wanted to get the version, we could:
  3124. # method = getattr(libvirt, 'getVersion', None)
  3125. # NOTE(justinsb): This would then rely on a proper version check
  3126. return method()
  3127. def get_hypervisor_hostname(self):
  3128. """Returns the hostname of the hypervisor."""
  3129. hostname = self._conn.getHostname()
  3130. if not hasattr(self, '_hypervisor_hostname'):
  3131. self._hypervisor_hostname = hostname
  3132. elif hostname != self._hypervisor_hostname:
  3133. LOG.error(_('Hostname has changed from %(old)s '
  3134. 'to %(new)s. A restart is required to take effect.'
  3135. ) % {'old': self._hypervisor_hostname,
  3136. 'new': hostname})
  3137. return self._hypervisor_hostname
  3138. def get_instance_capabilities(self):
  3139. """Get hypervisor instance capabilities
  3140. Returns a list of tuples that describe instances the
  3141. hypervisor is capable of hosting. Each tuple consists
  3142. of the triplet (arch, hypervisor_type, vm_mode).
  3143. :returns: List of tuples describing instance capabilities
  3144. """
  3145. caps = self.get_host_capabilities()
  3146. instance_caps = list()
  3147. for g in caps.guests:
  3148. for dt in g.domtype:
  3149. instance_cap = (g.arch, dt, g.ostype)
  3150. instance_caps.append(instance_cap)
  3151. return instance_caps
  3152. def get_cpu_info(self):
  3153. """Get cpuinfo information.
  3154. Obtains cpu feature from virConnect.getCapabilities,
  3155. and returns as a json string.
  3156. :return: see above description
  3157. """
  3158. caps = self.get_host_capabilities()
  3159. cpu_info = dict()
  3160. cpu_info['arch'] = caps.host.cpu.arch
  3161. cpu_info['model'] = caps.host.cpu.model
  3162. cpu_info['vendor'] = caps.host.cpu.vendor
  3163. topology = dict()
  3164. topology['sockets'] = caps.host.cpu.sockets
  3165. topology['cores'] = caps.host.cpu.cores
  3166. topology['threads'] = caps.host.cpu.threads
  3167. cpu_info['topology'] = topology
  3168. features = list()
  3169. for f in caps.host.cpu.features:
  3170. features.append(f.name)
  3171. cpu_info['features'] = features
  3172. # TODO(berrange): why do we bother converting the
  3173. # libvirt capabilities XML into a special JSON format ?
  3174. # The data format is different across all the drivers
  3175. # so we could just return the raw capabilities XML
  3176. # which 'compare_cpu' could use directly
  3177. #
  3178. # That said, arch_filter.py now seems to rely on
  3179. # the libvirt drivers format which suggests this
  3180. # data format needs to be standardized across drivers
  3181. return jsonutils.dumps(cpu_info)
  3182. def _get_pcidev_info(self, devname):
  3183. """Returns a dict of PCI device."""
  3184. def _get_device_type(cfgdev):
  3185. """Get a PCI device's device type.
  3186. An assignable PCI device can be a normal PCI device,
  3187. a SR-IOV Physical Function (PF), or a SR-IOV Virtual
  3188. Function (VF). Only normal PCI devices or SR-IOV VFs
  3189. are assignable, while SR-IOV PFs are always owned by
  3190. hypervisor.
  3191. Please notice that a PCI device with SR-IOV
  3192. capability but not enabled is reported as normal PCI device.
  3193. """
  3194. for fun_cap in cfgdev.pci_capability.fun_capability:
  3195. if len(fun_cap.device_addrs) != 0:
  3196. if fun_cap.type == 'virt_functions':
  3197. return {'dev_type': 'type-PF'}
  3198. if fun_cap.type == 'phys_function':
  3199. return {'dev_type': 'type-VF',
  3200. 'phys_function': fun_cap.device_addrs}
  3201. return {'dev_type': 'type-PCI'}
  3202. virtdev = self._conn.nodeDeviceLookupByName(devname)
  3203. xmlstr = virtdev.XMLDesc(0)
  3204. cfgdev = vconfig.LibvirtConfigNodeDevice()
  3205. cfgdev.parse_str(xmlstr)
  3206. address = "%04x:%02x:%02x.%1x" % (
  3207. cfgdev.pci_capability.domain,
  3208. cfgdev.pci_capability.bus,
  3209. cfgdev.pci_capability.slot,
  3210. cfgdev.pci_capability.function)
  3211. device = {
  3212. "dev_id": cfgdev.name,
  3213. "address": address,
  3214. "product_id": cfgdev.pci_capability.product_id[2:6],
  3215. "vendor_id": cfgdev.pci_capability.vendor_id[2:6],
  3216. }
  3217. #requirement by DataBase Model
  3218. device['label'] = 'label_%(vendor_id)s_%(product_id)s' % device
  3219. device.update(_get_device_type(cfgdev))
  3220. return device
  3221. def _pci_device_assignable(self, device):
  3222. if device['dev_type'] == 'type-PF':
  3223. return False
  3224. return self.dev_filter.device_assignable(device)
  3225. def get_pci_passthrough_devices(self):
  3226. """Get host pci devices information.
  3227. Obtains pci devices information from libvirt, and returns
  3228. as a json string.
  3229. Each device information is a dictionary, with mandatory keys
  3230. of 'address', 'vendor_id', 'product_id', 'dev_type', 'dev_id',
  3231. 'label' and other optional device specific information.
  3232. Refer to the objects/pci_device.py for more idea of these keys.
  3233. :returns: a list of the assignable pci devices information
  3234. """
  3235. pci_info = []
  3236. dev_names = self._conn.listDevices('pci', 0) or []
  3237. for name in dev_names:
  3238. pci_dev = self._get_pcidev_info(name)
  3239. if self._pci_device_assignable(pci_dev):
  3240. pci_info.append(pci_dev)
  3241. return jsonutils.dumps(pci_info)
  3242. def get_all_volume_usage(self, context, compute_host_bdms):
  3243. """Return usage info for volumes attached to vms on
  3244. a given host.
  3245. """
  3246. vol_usage = []
  3247. for instance_bdms in compute_host_bdms:
  3248. instance = instance_bdms['instance']
  3249. for bdm in instance_bdms['instance_bdms']:
  3250. vol_stats = []
  3251. mountpoint = bdm['device_name']
  3252. if mountpoint.startswith('/dev/'):
  3253. mountpoint = mountpoint[5:]
  3254. volume_id = bdm['volume_id']
  3255. LOG.debug(_("Trying to get stats for the volume %s"),
  3256. volume_id)
  3257. vol_stats = self.block_stats(instance['name'], mountpoint)
  3258. if vol_stats:
  3259. stats = dict(volume=volume_id,
  3260. instance=instance,
  3261. rd_req=vol_stats[0],
  3262. rd_bytes=vol_stats[1],
  3263. wr_req=vol_stats[2],
  3264. wr_bytes=vol_stats[3],
  3265. flush_operations=vol_stats[4])
  3266. LOG.debug(
  3267. _("Got volume usage stats for the volume=%(volume)s,"
  3268. " rd_req=%(rd_req)d, rd_bytes=%(rd_bytes)d, "
  3269. "wr_req=%(wr_req)d, wr_bytes=%(wr_bytes)d"),
  3270. stats, instance=instance)
  3271. vol_usage.append(stats)
  3272. return vol_usage
  3273. def block_stats(self, instance_name, disk):
  3274. """
  3275. Note that this function takes an instance name.
  3276. """
  3277. try:
  3278. domain = self._lookup_by_name(instance_name)
  3279. return domain.blockStats(disk)
  3280. except libvirt.libvirtError as e:
  3281. errcode = e.get_error_code()
  3282. LOG.info(_('Getting block stats failed, device might have '
  3283. 'been detached. Instance=%(instance_name)s '
  3284. 'Disk=%(disk)s Code=%(errcode)s Error=%(e)s'),
  3285. {'instance_name': instance_name, 'disk': disk,
  3286. 'errcode': errcode, 'e': e})
  3287. except exception.InstanceNotFound:
  3288. LOG.info(_('Could not find domain in libvirt for instance %s. '
  3289. 'Cannot get block stats for device'), instance_name)
  3290. def interface_stats(self, instance_name, interface):
  3291. """
  3292. Note that this function takes an instance name.
  3293. """
  3294. domain = self._lookup_by_name(instance_name)
  3295. return domain.interfaceStats(interface)
  3296. def get_console_pool_info(self, console_type):
  3297. #TODO(mdragon): console proxy should be implemented for libvirt,
  3298. # in case someone wants to use it with kvm or
  3299. # such. For now return fake data.
  3300. return {'address': '127.0.0.1',
  3301. 'username': 'fakeuser',
  3302. 'password': 'fakepassword'}
  3303. def refresh_security_group_rules(self, security_group_id):
  3304. self.firewall_driver.refresh_security_group_rules(security_group_id)
  3305. def refresh_security_group_members(self, security_group_id):
  3306. self.firewall_driver.refresh_security_group_members(security_group_id)
  3307. def refresh_instance_security_rules(self, instance):
  3308. self.firewall_driver.refresh_instance_security_rules(instance)
  3309. def refresh_provider_fw_rules(self):
  3310. self.firewall_driver.refresh_provider_fw_rules()
  3311. def get_available_resource(self, nodename):
  3312. """Retrieve resource information.
  3313. This method is called when nova-compute launches, and
  3314. as part of a periodic task that records the results in the DB.
  3315. :param nodename: will be put in PCI device
  3316. :returns: dictionary containing resource info
  3317. """
  3318. # Temporary: convert supported_instances into a string, while keeping
  3319. # the RPC version as JSON. Can be changed when RPC broadcast is removed
  3320. stats = self.host_state.get_host_stats(refresh=True)
  3321. stats['supported_instances'] = jsonutils.dumps(
  3322. stats['supported_instances'])
  3323. return stats
  3324. def check_instance_shared_storage_local(self, context, instance):
  3325. dirpath = libvirt_utils.get_instance_path(instance)
  3326. if not os.path.exists(dirpath):
  3327. return None
  3328. fd, tmp_file = tempfile.mkstemp(dir=dirpath)
  3329. LOG.debug(_("Creating tmpfile %s to verify with other "
  3330. "compute node that the instance is on "
  3331. "the same shared storage."),
  3332. tmp_file, instance=instance)
  3333. os.close(fd)
  3334. return {"filename": tmp_file}
  3335. def check_instance_shared_storage_remote(self, context, data):
  3336. return os.path.exists(data['filename'])
  3337. def check_instance_shared_storage_cleanup(self, context, data):
  3338. fileutils.delete_if_exists(data["filename"])
  3339. def check_can_live_migrate_destination(self, context, instance,
  3340. src_compute_info, dst_compute_info,
  3341. block_migration=False,
  3342. disk_over_commit=False):
  3343. """Check if it is possible to execute live migration.
  3344. This runs checks on the destination host, and then calls
  3345. back to the source host to check the results.
  3346. :param context: security context
  3347. :param instance: nova.db.sqlalchemy.models.Instance
  3348. :param block_migration: if true, prepare for block migration
  3349. :param disk_over_commit: if true, allow disk over commit
  3350. :returns: a dict containing:
  3351. :filename: name of the tmpfile under CONF.instances_path
  3352. :block_migration: whether this is block migration
  3353. :disk_over_commit: disk-over-commit factor on dest host
  3354. :disk_available_mb: available disk space on dest host
  3355. """
  3356. disk_available_mb = None
  3357. if block_migration:
  3358. disk_available_gb = dst_compute_info['disk_available_least']
  3359. disk_available_mb = \
  3360. (disk_available_gb * 1024) - CONF.reserved_host_disk_mb
  3361. # Compare CPU
  3362. source_cpu_info = src_compute_info['cpu_info']
  3363. self._compare_cpu(source_cpu_info)
  3364. # Create file on storage, to be checked on source host
  3365. filename = self._create_shared_storage_test_file()
  3366. return {"filename": filename,
  3367. "block_migration": block_migration,
  3368. "disk_over_commit": disk_over_commit,
  3369. "disk_available_mb": disk_available_mb}
  3370. def check_can_live_migrate_destination_cleanup(self, context,
  3371. dest_check_data):
  3372. """Do required cleanup on dest host after check_can_live_migrate calls
  3373. :param context: security context
  3374. """
  3375. filename = dest_check_data["filename"]
  3376. self._cleanup_shared_storage_test_file(filename)
  3377. def check_can_live_migrate_source(self, context, instance,
  3378. dest_check_data):
  3379. """Check if it is possible to execute live migration.
  3380. This checks if the live migration can succeed, based on the
  3381. results from check_can_live_migrate_destination.
  3382. :param context: security context
  3383. :param instance: nova.db.sqlalchemy.models.Instance
  3384. :param dest_check_data: result of check_can_live_migrate_destination
  3385. :returns: a dict containing migration info
  3386. """
  3387. # Checking shared storage connectivity
  3388. # if block migration, instances_paths should not be on shared storage.
  3389. source = CONF.host
  3390. filename = dest_check_data["filename"]
  3391. block_migration = dest_check_data["block_migration"]
  3392. is_volume_backed = dest_check_data.get('is_volume_backed', False)
  3393. has_local_disks = bool(
  3394. jsonutils.loads(self.get_instance_disk_info(instance['name'])))
  3395. shared = self._check_shared_storage_test_file(filename)
  3396. if block_migration:
  3397. if shared:
  3398. reason = _("Block migration can not be used "
  3399. "with shared storage.")
  3400. raise exception.InvalidLocalStorage(reason=reason, path=source)
  3401. self._assert_dest_node_has_enough_disk(context, instance,
  3402. dest_check_data['disk_available_mb'],
  3403. dest_check_data['disk_over_commit'])
  3404. elif not shared and (not is_volume_backed or has_local_disks):
  3405. reason = _("Live migration can not be used "
  3406. "without shared storage.")
  3407. raise exception.InvalidSharedStorage(reason=reason, path=source)
  3408. dest_check_data.update({"is_shared_storage": shared})
  3409. # NOTE(mikal): include the instance directory name here because it
  3410. # doesn't yet exist on the destination but we want to force that
  3411. # same name to be used
  3412. instance_path = libvirt_utils.get_instance_path(instance,
  3413. relative=True)
  3414. dest_check_data['instance_relative_path'] = instance_path
  3415. return dest_check_data
  3416. def _assert_dest_node_has_enough_disk(self, context, instance,
  3417. available_mb, disk_over_commit):
  3418. """Checks if destination has enough disk for block migration."""
  3419. # Libvirt supports qcow2 disk format,which is usually compressed
  3420. # on compute nodes.
  3421. # Real disk image (compressed) may enlarged to "virtual disk size",
  3422. # that is specified as the maximum disk size.
  3423. # (See qemu-img -f path-to-disk)
  3424. # Scheduler recognizes destination host still has enough disk space
  3425. # if real disk size < available disk size
  3426. # if disk_over_commit is True,
  3427. # otherwise virtual disk size < available disk size.
  3428. available = 0
  3429. if available_mb:
  3430. available = available_mb * unit.Mi
  3431. ret = self.get_instance_disk_info(instance['name'])
  3432. disk_infos = jsonutils.loads(ret)
  3433. necessary = 0
  3434. if disk_over_commit:
  3435. for info in disk_infos:
  3436. necessary += int(info['disk_size'])
  3437. else:
  3438. for info in disk_infos:
  3439. necessary += int(info['virt_disk_size'])
  3440. # Check that available disk > necessary disk
  3441. if (available - necessary) < 0:
  3442. reason = (_('Unable to migrate %(instance_uuid)s: '
  3443. 'Disk of instance is too large(available'
  3444. ' on destination host:%(available)s '
  3445. '< need:%(necessary)s)') %
  3446. {'instance_uuid': instance['uuid'],
  3447. 'available': available,
  3448. 'necessary': necessary})
  3449. raise exception.MigrationPreCheckError(reason=reason)
  3450. def _compare_cpu(self, cpu_info):
  3451. """Checks the host cpu is compatible to a cpu given by xml.
  3452. "xml" must be a part of libvirt.openAuth(...).getCapabilities().
  3453. return values follows by virCPUCompareResult.
  3454. if 0 > return value, do live migration.
  3455. 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult'
  3456. :param cpu_info: json string that shows cpu feature(see get_cpu_info())
  3457. :returns:
  3458. None. if given cpu info is not compatible to this server,
  3459. raise exception.
  3460. """
  3461. # NOTE(berendt): virConnectCompareCPU not working for Xen
  3462. if CONF.libvirt.virt_type == 'xen':
  3463. return 1
  3464. info = jsonutils.loads(cpu_info)
  3465. LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info)
  3466. cpu = vconfig.LibvirtConfigCPU()
  3467. cpu.arch = info['arch']
  3468. cpu.model = info['model']
  3469. cpu.vendor = info['vendor']
  3470. cpu.sockets = info['topology']['sockets']
  3471. cpu.cores = info['topology']['cores']
  3472. cpu.threads = info['topology']['threads']
  3473. for f in info['features']:
  3474. cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f))
  3475. u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
  3476. m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
  3477. # unknown character exists in xml, then libvirt complains
  3478. try:
  3479. ret = self._conn.compareCPU(cpu.to_xml(), 0)
  3480. except libvirt.libvirtError as e:
  3481. with excutils.save_and_reraise_exception():
  3482. ret = unicode(e)
  3483. LOG.error(m, {'ret': ret, 'u': u})
  3484. if ret <= 0:
  3485. LOG.error(m, {'ret': ret, 'u': u})
  3486. raise exception.InvalidCPUInfo(reason=m % {'ret': ret, 'u': u})
  3487. def _create_shared_storage_test_file(self):
  3488. """Makes tmpfile under CONF.instances_path."""
  3489. dirpath = CONF.instances_path
  3490. fd, tmp_file = tempfile.mkstemp(dir=dirpath)
  3491. LOG.debug(_("Creating tmpfile %s to notify to other "
  3492. "compute nodes that they should mount "
  3493. "the same storage.") % tmp_file)
  3494. os.close(fd)
  3495. return os.path.basename(tmp_file)
  3496. def _check_shared_storage_test_file(self, filename):
  3497. """Confirms existence of the tmpfile under CONF.instances_path.
  3498. Cannot confirm tmpfile return False.
  3499. """
  3500. tmp_file = os.path.join(CONF.instances_path, filename)
  3501. if not os.path.exists(tmp_file):
  3502. return False
  3503. else:
  3504. return True
  3505. def _cleanup_shared_storage_test_file(self, filename):
  3506. """Removes existence of the tmpfile under CONF.instances_path."""
  3507. tmp_file = os.path.join(CONF.instances_path, filename)
  3508. os.remove(tmp_file)
  3509. def ensure_filtering_rules_for_instance(self, instance, network_info,
  3510. time_module=None):
  3511. """Ensure that an instance's filtering rules are enabled.
  3512. When migrating an instance, we need the filtering rules to
  3513. be configured on the destination host before starting the
  3514. migration.
  3515. Also, when restarting the compute service, we need to ensure
  3516. that filtering rules exist for all running services.
  3517. """
  3518. if not time_module:
  3519. time_module = greenthread
  3520. self.firewall_driver.setup_basic_filtering(instance, network_info)
  3521. self.firewall_driver.prepare_instance_filter(instance,
  3522. network_info)
  3523. # nwfilters may be defined in a separate thread in the case
  3524. # of libvirt non-blocking mode, so we wait for completion
  3525. timeout_count = range(CONF.live_migration_retry_count)
  3526. while timeout_count:
  3527. if self.firewall_driver.instance_filter_exists(instance,
  3528. network_info):
  3529. break
  3530. timeout_count.pop()
  3531. if len(timeout_count) == 0:
  3532. msg = _('The firewall filter for %s does not exist')
  3533. raise exception.NovaException(msg % instance["name"])
  3534. time_module.sleep(1)
  3535. def filter_defer_apply_on(self):
  3536. self.firewall_driver.filter_defer_apply_on()
  3537. def filter_defer_apply_off(self):
  3538. self.firewall_driver.filter_defer_apply_off()
  3539. def live_migration(self, context, instance, dest,
  3540. post_method, recover_method, block_migration=False,
  3541. migrate_data=None):
  3542. """Spawning live_migration operation for distributing high-load.
  3543. :params context: security context
  3544. :params instance:
  3545. nova.db.sqlalchemy.models.Instance object
  3546. instance object that is migrated.
  3547. :params dest: destination host
  3548. :params block_migration: destination host
  3549. :params post_method:
  3550. post operation method.
  3551. expected nova.compute.manager.post_live_migration.
  3552. :params recover_method:
  3553. recovery method when any exception occurs.
  3554. expected nova.compute.manager.recover_live_migration.
  3555. :params block_migration: if true, do block migration.
  3556. :params migrate_data: implementation specific params
  3557. """
  3558. greenthread.spawn(self._live_migration, context, instance, dest,
  3559. post_method, recover_method, block_migration,
  3560. migrate_data)
  3561. def _live_migration(self, context, instance, dest, post_method,
  3562. recover_method, block_migration=False,
  3563. migrate_data=None):
  3564. """Do live migration.
  3565. :params context: security context
  3566. :params instance:
  3567. nova.db.sqlalchemy.models.Instance object
  3568. instance object that is migrated.
  3569. :params dest: destination host
  3570. :params post_method:
  3571. post operation method.
  3572. expected nova.compute.manager.post_live_migration.
  3573. :params recover_method:
  3574. recovery method when any exception occurs.
  3575. expected nova.compute.manager.recover_live_migration.
  3576. :params migrate_data: implementation specific params
  3577. """
  3578. # Do live migration.
  3579. try:
  3580. if block_migration:
  3581. flaglist = CONF.libvirt.block_migration_flag.split(',')
  3582. else:
  3583. flaglist = CONF.libvirt.live_migration_flag.split(',')
  3584. flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
  3585. logical_sum = reduce(lambda x, y: x | y, flagvals)
  3586. dom = self._lookup_by_name(instance["name"])
  3587. dom.migrateToURI(CONF.libvirt.live_migration_uri % dest,
  3588. logical_sum,
  3589. None,
  3590. CONF.libvirt.live_migration_bandwidth)
  3591. except Exception as e:
  3592. with excutils.save_and_reraise_exception():
  3593. LOG.error(_("Live Migration failure: %s"), e,
  3594. instance=instance)
  3595. recover_method(context, instance, dest, block_migration)
  3596. # Waiting for completion of live_migration.
  3597. timer = loopingcall.FixedIntervalLoopingCall(f=None)
  3598. def wait_for_live_migration():
  3599. """waiting for live migration completion."""
  3600. try:
  3601. self.get_info(instance)['state']
  3602. except exception.InstanceNotFound:
  3603. timer.stop()
  3604. post_method(context, instance, dest, block_migration,
  3605. migrate_data)
  3606. timer.f = wait_for_live_migration
  3607. timer.start(interval=0.5).wait()
  3608. def _fetch_instance_kernel_ramdisk(self, context, instance):
  3609. """Download kernel and ramdisk for instance in instance directory."""
  3610. instance_dir = libvirt_utils.get_instance_path(instance)
  3611. if instance['kernel_id']:
  3612. libvirt_utils.fetch_image(context,
  3613. os.path.join(instance_dir, 'kernel'),
  3614. instance['kernel_id'],
  3615. instance['user_id'],
  3616. instance['project_id'])
  3617. if instance['ramdisk_id']:
  3618. libvirt_utils.fetch_image(context,
  3619. os.path.join(instance_dir,
  3620. 'ramdisk'),
  3621. instance['ramdisk_id'],
  3622. instance['user_id'],
  3623. instance['project_id'])
  3624. def pre_live_migration(self, context, instance, block_device_info,
  3625. network_info, disk_info, migrate_data=None):
  3626. """Preparation live migration."""
  3627. # Steps for volume backed instance live migration w/o shared storage.
  3628. is_shared_storage = True
  3629. is_volume_backed = False
  3630. is_block_migration = True
  3631. instance_relative_path = None
  3632. if migrate_data:
  3633. is_shared_storage = migrate_data.get('is_shared_storage', True)
  3634. is_volume_backed = migrate_data.get('is_volume_backed', False)
  3635. is_block_migration = migrate_data.get('block_migration', True)
  3636. instance_relative_path = migrate_data.get('instance_relative_path')
  3637. if not is_shared_storage:
  3638. # NOTE(mikal): this doesn't use libvirt_utils.get_instance_path
  3639. # because we are ensuring that the same instance directory name
  3640. # is used as was at the source
  3641. if instance_relative_path:
  3642. instance_dir = os.path.join(CONF.instances_path,
  3643. instance_relative_path)
  3644. else:
  3645. instance_dir = libvirt_utils.get_instance_path(instance)
  3646. if os.path.exists(instance_dir):
  3647. raise exception.DestinationDiskExists(path=instance_dir)
  3648. os.mkdir(instance_dir)
  3649. # Ensure images and backing files are present.
  3650. self._create_images_and_backing(context, instance, instance_dir,
  3651. disk_info)
  3652. if is_volume_backed and not (is_block_migration or is_shared_storage):
  3653. # Touch the console.log file, required by libvirt.
  3654. console_file = self._get_console_log_path(instance)
  3655. libvirt_utils.file_open(console_file, 'a').close()
  3656. # if image has kernel and ramdisk, just download
  3657. # following normal way.
  3658. self._fetch_instance_kernel_ramdisk(context, instance)
  3659. # Establishing connection to volume server.
  3660. block_device_mapping = driver.block_device_info_get_mapping(
  3661. block_device_info)
  3662. for vol in block_device_mapping:
  3663. connection_info = vol['connection_info']
  3664. disk_info = blockinfo.get_info_from_bdm(
  3665. CONF.libvirt.virt_type, vol)
  3666. self.volume_driver_method('connect_volume',
  3667. connection_info,
  3668. disk_info)
  3669. # We call plug_vifs before the compute manager calls
  3670. # ensure_filtering_rules_for_instance, to ensure bridge is set up
  3671. # Retry operation is necessary because continuously request comes,
  3672. # concurrent request occurs to iptables, then it complains.
  3673. max_retry = CONF.live_migration_retry_count
  3674. for cnt in range(max_retry):
  3675. try:
  3676. self.plug_vifs(instance, network_info)
  3677. break
  3678. except processutils.ProcessExecutionError:
  3679. if cnt == max_retry - 1:
  3680. raise
  3681. else:
  3682. LOG.warn(_('plug_vifs() failed %(cnt)d. Retry up to '
  3683. '%(max_retry)d.'),
  3684. {'cnt': cnt,
  3685. 'max_retry': max_retry},
  3686. instance=instance)
  3687. greenthread.sleep(1)
  3688. def _create_images_and_backing(self, context, instance, instance_dir,
  3689. disk_info_json):
  3690. """
  3691. :params context: security context
  3692. :params instance:
  3693. nova.db.sqlalchemy.models.Instance object
  3694. instance object that is migrated.
  3695. :params instance_dir:
  3696. instance path to use, calculated externally to handle block
  3697. migrating an instance with an old style instance path
  3698. :params disk_info_json:
  3699. json strings specified in get_instance_disk_info
  3700. """
  3701. if not disk_info_json:
  3702. disk_info = []
  3703. else:
  3704. disk_info = jsonutils.loads(disk_info_json)
  3705. for info in disk_info:
  3706. base = os.path.basename(info['path'])
  3707. # Get image type and create empty disk image, and
  3708. # create backing file in case of qcow2.
  3709. instance_disk = os.path.join(instance_dir, base)
  3710. if not info['backing_file'] and not os.path.exists(instance_disk):
  3711. libvirt_utils.create_image(info['type'], instance_disk,
  3712. info['disk_size'])
  3713. elif info['backing_file']:
  3714. # Creating backing file follows same way as spawning instances.
  3715. cache_name = os.path.basename(info['backing_file'])
  3716. image = self.image_backend.image(instance,
  3717. instance_disk,
  3718. CONF.libvirt.images_type)
  3719. image.cache(fetch_func=libvirt_utils.fetch_image,
  3720. context=context,
  3721. filename=cache_name,
  3722. image_id=instance['image_ref'],
  3723. user_id=instance['user_id'],
  3724. project_id=instance['project_id'],
  3725. size=info['virt_disk_size'])
  3726. # if image has kernel and ramdisk, just download
  3727. # following normal way.
  3728. self._fetch_instance_kernel_ramdisk(context, instance)
  3729. def post_live_migration(self, context, instance, block_device_info,
  3730. migrate_data=None):
  3731. # Disconnect from volume server
  3732. block_device_mapping = driver.block_device_info_get_mapping(
  3733. block_device_info)
  3734. for vol in block_device_mapping:
  3735. connection_info = vol['connection_info']
  3736. disk_dev = vol['mount_device'].rpartition("/")[2]
  3737. self.volume_driver_method('disconnect_volume',
  3738. connection_info,
  3739. disk_dev)
  3740. def post_live_migration_at_destination(self, context,
  3741. instance,
  3742. network_info,
  3743. block_migration,
  3744. block_device_info=None):
  3745. """Post operation of live migration at destination host.
  3746. :param context: security context
  3747. :param instance:
  3748. nova.db.sqlalchemy.models.Instance object
  3749. instance object that is migrated.
  3750. :param network_info: instance network information
  3751. :param block_migration: if true, post operation of block_migration.
  3752. """
  3753. # Define migrated instance, otherwise, suspend/destroy does not work.
  3754. dom_list = self._conn.listDefinedDomains()
  3755. if instance["name"] not in dom_list:
  3756. # In case of block migration, destination does not have
  3757. # libvirt.xml
  3758. disk_info = blockinfo.get_disk_info(
  3759. CONF.libvirt.virt_type, instance)
  3760. self.to_xml(context, instance, network_info, disk_info,
  3761. block_device_info, write_to_disk=True)
  3762. # libvirt.xml should be made by to_xml(), but libvirt
  3763. # does not accept to_xml() result, since uuid is not
  3764. # included in to_xml() result.
  3765. dom = self._lookup_by_name(instance["name"])
  3766. self._conn.defineXML(dom.XMLDesc(0))
  3767. def get_instance_disk_info(self, instance_name, xml=None,
  3768. block_device_info=None):
  3769. """Preparation block migration.
  3770. :params instance:
  3771. nova.db.sqlalchemy.models.Instance object
  3772. instance object that is migrated.
  3773. :return:
  3774. json strings with below format::
  3775. "[{'path':'disk', 'type':'raw',
  3776. 'virt_disk_size':'10737418240',
  3777. 'backing_file':'backing_file',
  3778. 'disk_size':'83886080'},...]"
  3779. """
  3780. # NOTE (rmk): Passing the domain XML into this function is optional.
  3781. # When it is not passed, we attempt to extract it from
  3782. # the pre-existing definition.
  3783. if xml is None:
  3784. try:
  3785. virt_dom = self._lookup_by_name(instance_name)
  3786. xml = virt_dom.XMLDesc(0)
  3787. except libvirt.libvirtError as ex:
  3788. error_code = ex.get_error_code()
  3789. msg = (_('Error from libvirt while getting description of '
  3790. '%(instance_name)s: [Error Code %(error_code)s] '
  3791. '%(ex)s') %
  3792. {'instance_name': instance_name,
  3793. 'error_code': error_code,
  3794. 'ex': ex})
  3795. LOG.warn(msg)
  3796. raise exception.InstanceNotFound(instance_id=instance_name)
  3797. # NOTE (rmk): When block_device_info is provided, we will use it to
  3798. # filter out devices which are actually volumes.
  3799. block_device_mapping = driver.block_device_info_get_mapping(
  3800. block_device_info)
  3801. volume_devices = set()
  3802. for vol in block_device_mapping:
  3803. disk_dev = vol['mount_device'].rpartition("/")[2]
  3804. volume_devices.add(disk_dev)
  3805. disk_info = []
  3806. doc = etree.fromstring(xml)
  3807. disk_nodes = doc.findall('.//devices/disk')
  3808. path_nodes = doc.findall('.//devices/disk/source')
  3809. driver_nodes = doc.findall('.//devices/disk/driver')
  3810. target_nodes = doc.findall('.//devices/disk/target')
  3811. for cnt, path_node in enumerate(path_nodes):
  3812. disk_type = disk_nodes[cnt].get('type')
  3813. path = path_node.get('file')
  3814. target = target_nodes[cnt].attrib['dev']
  3815. if not path:
  3816. LOG.debug(_('skipping disk for %s as it does not have a path'),
  3817. instance_name)
  3818. continue
  3819. if disk_type != 'file':
  3820. LOG.debug(_('skipping %s since it looks like volume'), path)
  3821. continue
  3822. if target in volume_devices:
  3823. LOG.debug(_('skipping disk %(path)s (%(target)s) as it is a '
  3824. 'volume'), {'path': path, 'target': target})
  3825. continue
  3826. # get the real disk size or
  3827. # raise a localized error if image is unavailable
  3828. dk_size = int(os.path.getsize(path))
  3829. disk_type = driver_nodes[cnt].get('type')
  3830. if disk_type == "qcow2":
  3831. backing_file = libvirt_utils.get_disk_backing_file(path)
  3832. virt_size = disk.get_disk_size(path)
  3833. over_commit_size = int(virt_size) - dk_size
  3834. else:
  3835. backing_file = ""
  3836. virt_size = 0
  3837. over_commit_size = 0
  3838. disk_info.append({'type': disk_type,
  3839. 'path': path,
  3840. 'virt_disk_size': virt_size,
  3841. 'backing_file': backing_file,
  3842. 'disk_size': dk_size,
  3843. 'over_committed_disk_size': over_commit_size})
  3844. return jsonutils.dumps(disk_info)
  3845. def get_disk_over_committed_size_total(self):
  3846. """Return total over committed disk size for all instances."""
  3847. # Disk size that all instance uses : virtual_size - disk_size
  3848. instances_name = self.list_instances()
  3849. disk_over_committed_size = 0
  3850. for i_name in instances_name:
  3851. try:
  3852. disk_infos = jsonutils.loads(
  3853. self.get_instance_disk_info(i_name))
  3854. for info in disk_infos:
  3855. disk_over_committed_size += int(
  3856. info['over_committed_disk_size'])
  3857. except OSError as e:
  3858. if e.errno == errno.ENOENT:
  3859. LOG.error(_('Getting disk size of %(i_name)s: %(e)s'),
  3860. {'i_name': i_name, 'e': e})
  3861. else:
  3862. raise
  3863. except exception.InstanceNotFound:
  3864. # Instance was deleted during the check so ignore it
  3865. pass
  3866. # NOTE(gtt116): give change to do other task.
  3867. greenthread.sleep(0)
  3868. return disk_over_committed_size
  3869. def unfilter_instance(self, instance, network_info):
  3870. """See comments of same method in firewall_driver."""
  3871. self.firewall_driver.unfilter_instance(instance,
  3872. network_info=network_info)
  3873. def get_host_stats(self, refresh=False):
  3874. """Return the current state of the host.
  3875. If 'refresh' is True, run update the stats first.
  3876. """
  3877. return self.host_state.get_host_stats(refresh=refresh)
  3878. def get_host_cpu_stats(self):
  3879. """Return the current CPU state of the host."""
  3880. # Extract node's CPU statistics.
  3881. stats = self._conn.getCPUStats(libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)
  3882. # getInfo() returns various information about the host node
  3883. # No. 3 is the expected CPU frequency.
  3884. stats["frequency"] = self._conn.getInfo()[3]
  3885. return stats
  3886. def get_host_uptime(self, host):
  3887. """Returns the result of calling "uptime"."""
  3888. #NOTE(dprince): host seems to be ignored for this call and in
  3889. # other compute drivers as well. Perhaps we should remove it?
  3890. out, err = utils.execute('env', 'LANG=C', 'uptime')
  3891. return out
  3892. def manage_image_cache(self, context, all_instances):
  3893. """Manage the local cache of images."""
  3894. self.image_cache_manager.verify_base_images(context, all_instances)
  3895. def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize,
  3896. shared_storage=False):
  3897. """Used only for cleanup in case migrate_disk_and_power_off fails."""
  3898. try:
  3899. if os.path.exists(inst_base_resize):
  3900. utils.execute('rm', '-rf', inst_base)
  3901. utils.execute('mv', inst_base_resize, inst_base)
  3902. if not shared_storage:
  3903. utils.execute('ssh', dest, 'rm', '-rf', inst_base)
  3904. except Exception:
  3905. pass
  3906. def _is_storage_shared_with(self, dest, inst_base):
  3907. # NOTE (rmk): There are two methods of determining whether we are
  3908. # on the same filesystem: the source and dest IP are the
  3909. # same, or we create a file on the dest system via SSH
  3910. # and check whether the source system can also see it.
  3911. shared_storage = (dest == self.get_host_ip_addr())
  3912. if not shared_storage:
  3913. tmp_file = uuid.uuid4().hex + '.tmp'
  3914. tmp_path = os.path.join(inst_base, tmp_file)
  3915. try:
  3916. utils.execute('ssh', dest, 'touch', tmp_path)
  3917. if os.path.exists(tmp_path):
  3918. shared_storage = True
  3919. os.unlink(tmp_path)
  3920. else:
  3921. utils.execute('ssh', dest, 'rm', tmp_path)
  3922. except Exception:
  3923. pass
  3924. return shared_storage
  3925. def migrate_disk_and_power_off(self, context, instance, dest,
  3926. flavor, network_info,
  3927. block_device_info=None):
  3928. LOG.debug(_("Starting migrate_disk_and_power_off"),
  3929. instance=instance)
  3930. disk_info_text = self.get_instance_disk_info(instance['name'],
  3931. block_device_info=block_device_info)
  3932. disk_info = jsonutils.loads(disk_info_text)
  3933. # copy disks to destination
  3934. # rename instance dir to +_resize at first for using
  3935. # shared storage for instance dir (eg. NFS).
  3936. inst_base = libvirt_utils.get_instance_path(instance)
  3937. inst_base_resize = inst_base + "_resize"
  3938. shared_storage = self._is_storage_shared_with(dest, inst_base)
  3939. # try to create the directory on the remote compute node
  3940. # if this fails we pass the exception up the stack so we can catch
  3941. # failures here earlier
  3942. if not shared_storage:
  3943. utils.execute('ssh', dest, 'mkdir', '-p', inst_base)
  3944. self.power_off(instance)
  3945. block_device_mapping = driver.block_device_info_get_mapping(
  3946. block_device_info)
  3947. for vol in block_device_mapping:
  3948. connection_info = vol['connection_info']
  3949. disk_dev = vol['mount_device'].rpartition("/")[2]
  3950. self.volume_driver_method('disconnect_volume',
  3951. connection_info,
  3952. disk_dev)
  3953. try:
  3954. utils.execute('mv', inst_base, inst_base_resize)
  3955. # if we are migrating the instance with shared storage then
  3956. # create the directory. If it is a remote node the directory
  3957. # has already been created
  3958. if shared_storage:
  3959. dest = None
  3960. utils.execute('mkdir', '-p', inst_base)
  3961. for info in disk_info:
  3962. # assume inst_base == dirname(info['path'])
  3963. img_path = info['path']
  3964. fname = os.path.basename(img_path)
  3965. from_path = os.path.join(inst_base_resize, fname)
  3966. if info['type'] == 'qcow2' and info['backing_file']:
  3967. tmp_path = from_path + "_rbase"
  3968. # merge backing file
  3969. utils.execute('qemu-img', 'convert', '-f', 'qcow2',
  3970. '-O', 'qcow2', from_path, tmp_path)
  3971. if shared_storage:
  3972. utils.execute('mv', tmp_path, img_path)
  3973. else:
  3974. libvirt_utils.copy_image(tmp_path, img_path, host=dest)
  3975. utils.execute('rm', '-f', tmp_path)
  3976. else: # raw or qcow2 with no backing file
  3977. libvirt_utils.copy_image(from_path, img_path, host=dest)
  3978. except Exception:
  3979. with excutils.save_and_reraise_exception():
  3980. self._cleanup_remote_migration(dest, inst_base,
  3981. inst_base_resize,
  3982. shared_storage)
  3983. return disk_info_text
  3984. def _wait_for_running(self, instance):
  3985. state = self.get_info(instance)['state']
  3986. if state == power_state.RUNNING:
  3987. LOG.info(_("Instance running successfully."), instance=instance)
  3988. raise loopingcall.LoopingCallDone()
  3989. def finish_migration(self, context, migration, instance, disk_info,
  3990. network_info, image_meta, resize_instance,
  3991. block_device_info=None, power_on=True):
  3992. LOG.debug(_("Starting finish_migration"), instance=instance)
  3993. # resize disks. only "disk" and "disk.local" are necessary.
  3994. disk_info = jsonutils.loads(disk_info)
  3995. for info in disk_info:
  3996. fname = os.path.basename(info['path'])
  3997. if fname == 'disk':
  3998. size = instance['root_gb']
  3999. elif fname == 'disk.local':
  4000. size = instance['ephemeral_gb']
  4001. else:
  4002. size = 0
  4003. size *= unit.Gi
  4004. # If we have a non partitioned image that we can extend
  4005. # then ensure we're in 'raw' format so we can extend file system.
  4006. fmt = info['type']
  4007. if (size and fmt == 'qcow2' and
  4008. disk.can_resize_image(info['path'], size) and
  4009. disk.is_image_partitionless(info['path'], use_cow=True)):
  4010. path_raw = info['path'] + '_raw'
  4011. utils.execute('qemu-img', 'convert', '-f', 'qcow2',
  4012. '-O', 'raw', info['path'], path_raw)
  4013. utils.execute('mv', path_raw, info['path'])
  4014. fmt = 'raw'
  4015. if size:
  4016. use_cow = fmt == 'qcow2'
  4017. disk.extend(info['path'], size, use_cow=use_cow)
  4018. if fmt == 'raw' and CONF.use_cow_images:
  4019. # back to qcow2 (no backing_file though) so that snapshot
  4020. # will be available
  4021. path_qcow = info['path'] + '_qcow'
  4022. utils.execute('qemu-img', 'convert', '-f', 'raw',
  4023. '-O', 'qcow2', info['path'], path_qcow)
  4024. utils.execute('mv', path_qcow, info['path'])
  4025. disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
  4026. instance,
  4027. block_device_info,
  4028. image_meta)
  4029. # assume _create_image do nothing if a target file exists.
  4030. self._create_image(context, instance,
  4031. disk_mapping=disk_info['mapping'],
  4032. network_info=network_info,
  4033. block_device_info=None, inject_files=False)
  4034. xml = self.to_xml(context, instance, network_info, disk_info,
  4035. block_device_info=block_device_info,
  4036. write_to_disk=True)
  4037. self._create_domain_and_network(context, xml, instance, network_info,
  4038. block_device_info, power_on)
  4039. if power_on:
  4040. timer = loopingcall.FixedIntervalLoopingCall(
  4041. self._wait_for_running,
  4042. instance)
  4043. timer.start(interval=0.5).wait()
  4044. def _cleanup_failed_migration(self, inst_base):
  4045. """Make sure that a failed migrate doesn't prevent us from rolling
  4046. back in a revert.
  4047. """
  4048. try:
  4049. shutil.rmtree(inst_base)
  4050. except OSError as e:
  4051. if e.errno != errno.ENOENT:
  4052. raise
  4053. def finish_revert_migration(self, context, instance, network_info,
  4054. block_device_info=None, power_on=True):
  4055. LOG.debug(_("Starting finish_revert_migration"),
  4056. instance=instance)
  4057. inst_base = libvirt_utils.get_instance_path(instance)
  4058. inst_base_resize = inst_base + "_resize"
  4059. # NOTE(danms): if we're recovering from a failed migration,
  4060. # make sure we don't have a left-over same-host base directory
  4061. # that would conflict. Also, don't fail on the rename if the
  4062. # failure happened early.
  4063. if os.path.exists(inst_base_resize):
  4064. self._cleanup_failed_migration(inst_base)
  4065. utils.execute('mv', inst_base_resize, inst_base)
  4066. disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
  4067. instance,
  4068. block_device_info)
  4069. xml = self.to_xml(context, instance, network_info, disk_info,
  4070. block_device_info=block_device_info)
  4071. self._create_domain_and_network(context, xml, instance, network_info,
  4072. block_device_info, power_on)
  4073. if power_on:
  4074. timer = loopingcall.FixedIntervalLoopingCall(
  4075. self._wait_for_running,
  4076. instance)
  4077. timer.start(interval=0.5).wait()
  4078. def confirm_migration(self, migration, instance, network_info):
  4079. """Confirms a resize, destroying the source VM."""
  4080. self._cleanup_resize(instance, network_info)
  4081. def get_diagnostics(self, instance):
  4082. def get_io_devices(xml_doc):
  4083. """get the list of io devices from the xml document."""
  4084. result = {"volumes": [], "ifaces": []}
  4085. try:
  4086. doc = etree.fromstring(xml_doc)
  4087. except Exception:
  4088. return result
  4089. blocks = [('./devices/disk', 'volumes'),
  4090. ('./devices/interface', 'ifaces')]
  4091. for block, key in blocks:
  4092. section = doc.findall(block)
  4093. for node in section:
  4094. for child in node.getchildren():
  4095. if child.tag == 'target' and child.get('dev'):
  4096. result[key].append(child.get('dev'))
  4097. return result
  4098. domain = self._lookup_by_name(instance['name'])
  4099. output = {}
  4100. # get cpu time, might launch an exception if the method
  4101. # is not supported by the underlying hypervisor being
  4102. # used by libvirt
  4103. try:
  4104. cputime = domain.vcpus()[0]
  4105. for i in range(len(cputime)):
  4106. output["cpu" + str(i) + "_time"] = cputime[i][2]
  4107. except libvirt.libvirtError:
  4108. pass
  4109. # get io status
  4110. xml = domain.XMLDesc(0)
  4111. dom_io = get_io_devices(xml)
  4112. for disk in dom_io["volumes"]:
  4113. try:
  4114. # blockStats might launch an exception if the method
  4115. # is not supported by the underlying hypervisor being
  4116. # used by libvirt
  4117. stats = domain.blockStats(disk)
  4118. output[disk + "_read_req"] = stats[0]
  4119. output[disk + "_read"] = stats[1]
  4120. output[disk + "_write_req"] = stats[2]
  4121. output[disk + "_write"] = stats[3]
  4122. output[disk + "_errors"] = stats[4]
  4123. except libvirt.libvirtError:
  4124. pass
  4125. for interface in dom_io["ifaces"]:
  4126. try:
  4127. # interfaceStats might launch an exception if the method
  4128. # is not supported by the underlying hypervisor being
  4129. # used by libvirt
  4130. stats = domain.interfaceStats(interface)
  4131. output[interface + "_rx"] = stats[0]
  4132. output[interface + "_rx_packets"] = stats[1]
  4133. output[interface + "_rx_errors"] = stats[2]
  4134. output[interface + "_rx_drop"] = stats[3]
  4135. output[interface + "_tx"] = stats[4]
  4136. output[interface + "_tx_packets"] = stats[5]
  4137. output[interface + "_tx_errors"] = stats[6]
  4138. output[interface + "_tx_drop"] = stats[7]
  4139. except libvirt.libvirtError:
  4140. pass
  4141. output["memory"] = domain.maxMemory()
  4142. # memoryStats might launch an exception if the method
  4143. # is not supported by the underlying hypervisor being
  4144. # used by libvirt
  4145. try:
  4146. mem = domain.memoryStats()
  4147. for key in mem.keys():
  4148. output["memory-" + key] = mem[key]
  4149. except (libvirt.libvirtError, AttributeError):
  4150. pass
  4151. return output
  4152. def instance_on_disk(self, instance):
  4153. # ensure directories exist and are writable
  4154. instance_path = libvirt_utils.get_instance_path(instance)
  4155. LOG.debug(_('Checking instance files accessibility %s'), instance_path)
  4156. return os.access(instance_path, os.W_OK)
  4157. def inject_network_info(self, instance, nw_info):
  4158. self.firewall_driver.setup_basic_filtering(instance, nw_info)
  4159. def _delete_instance_files(self, instance):
  4160. # NOTE(mikal): a shim to handle this file not using instance objects
  4161. # everywhere. Remove this when that conversion happens.
  4162. context = nova_context.get_admin_context()
  4163. inst_obj = instance_obj.Instance.get_by_uuid(context, instance['uuid'])
  4164. # NOTE(mikal): this code should be pushed up a layer when this shim is
  4165. # removed.
  4166. attempts = int(inst_obj.system_metadata.get('clean_attempts', '0'))
  4167. success = self.delete_instance_files(inst_obj)
  4168. inst_obj.system_metadata['clean_attempts'] = str(attempts + 1)
  4169. if success:
  4170. inst_obj.cleaned = True
  4171. inst_obj.save(context)
  4172. def delete_instance_files(self, instance):
  4173. target = libvirt_utils.get_instance_path(instance)
  4174. if os.path.exists(target):
  4175. LOG.info(_('Deleting instance files %s'), target,
  4176. instance=instance)
  4177. try:
  4178. shutil.rmtree(target)
  4179. except OSError as e:
  4180. LOG.error(_('Failed to cleanup directory %(target)s: '
  4181. '%(e)s'), {'target': target, 'e': e},
  4182. instance=instance)
  4183. # It is possible that the delete failed, if so don't mark the instance
  4184. # as cleaned.
  4185. if os.path.exists(target):
  4186. LOG.info(_('Deletion of %s failed'), target, instance=instance)
  4187. return False
  4188. LOG.info(_('Deletion of %s complete'), target, instance=instance)
  4189. return True
  4190. @property
  4191. def need_legacy_block_device_info(self):
  4192. return False
  4193. def default_root_device_name(self, instance, image_meta, root_bdm):
  4194. disk_bus = blockinfo.get_disk_bus_for_device_type(
  4195. CONF.libvirt.virt_type, image_meta, "disk")
  4196. cdrom_bus = blockinfo.get_disk_bus_for_device_type(
  4197. CONF.libvirt.virt_type, image_meta, "cdrom")
  4198. root_info = blockinfo.get_root_info(
  4199. CONF.libvirt.virt_type, image_meta, root_bdm, disk_bus,
  4200. cdrom_bus)
  4201. return block_device.prepend_dev(root_info['dev'])
  4202. def default_device_names_for_instance(self, instance, root_device_name,
  4203. *block_device_lists):
  4204. ephemerals, swap, block_device_mapping = block_device_lists[:3]
  4205. def _update_func(bdm):
  4206. bdm_id = bdm.get('id')
  4207. self.virtapi.block_device_mapping_update(
  4208. nova_context.get_admin_context(),
  4209. bdm_id, bdm)
  4210. blockinfo.default_device_names(CONF.libvirt.virt_type,
  4211. instance, root_device_name,
  4212. _update_func,
  4213. ephemerals, swap,
  4214. block_device_mapping)
  4215. class HostState(object):
  4216. """Manages information about the compute node through libvirt."""
  4217. def __init__(self, driver):
  4218. super(HostState, self).__init__()
  4219. self._stats = {}
  4220. self.driver = driver
  4221. self.update_status()
  4222. def get_host_stats(self, refresh=False):
  4223. """Return the current state of the host.
  4224. If 'refresh' is True, run update the stats first.
  4225. """
  4226. if refresh or not self._stats:
  4227. self.update_status()
  4228. return self._stats
  4229. def update_status(self):
  4230. """Retrieve status info from libvirt."""
  4231. def _get_disk_available_least():
  4232. """Return total real disk available least size.
  4233. The size of available disk, when block_migration command given
  4234. disk_over_commit param is FALSE.
  4235. The size that deducted real instance disk size from the total size
  4236. of the virtual disk of all instances.
  4237. """
  4238. disk_free_gb = disk_info_dict['free']
  4239. disk_over_committed = (self.driver.
  4240. get_disk_over_committed_size_total())
  4241. # Disk available least size
  4242. available_least = disk_free_gb * unit.Gi - disk_over_committed
  4243. return (available_least / unit.Gi)
  4244. LOG.debug(_("Updating host stats"))
  4245. disk_info_dict = self.driver.get_local_gb_info()
  4246. data = {}
  4247. #NOTE(dprince): calling capabilities before getVersion works around
  4248. # an initialization issue with some versions of Libvirt (1.0.5.5).
  4249. # See: https://bugzilla.redhat.com/show_bug.cgi?id=1000116
  4250. # See: https://bugs.launchpad.net/nova/+bug/1215593
  4251. data["supported_instances"] = \
  4252. self.driver.get_instance_capabilities()
  4253. data["vcpus"] = self.driver.get_vcpu_total()
  4254. data["memory_mb"] = self.driver.get_memory_mb_total()
  4255. data["local_gb"] = disk_info_dict['total']
  4256. data["vcpus_used"] = self.driver.get_vcpu_used()
  4257. data["memory_mb_used"] = self.driver.get_memory_mb_used()
  4258. data["local_gb_used"] = disk_info_dict['used']
  4259. data["hypervisor_type"] = self.driver.get_hypervisor_type()
  4260. data["hypervisor_version"] = self.driver.get_hypervisor_version()
  4261. data["hypervisor_hostname"] = self.driver.get_hypervisor_hostname()
  4262. data["cpu_info"] = self.driver.get_cpu_info()
  4263. data['disk_available_least'] = _get_disk_available_least()
  4264. data['pci_passthrough_devices'] = \
  4265. self.driver.get_pci_passthrough_devices()
  4266. self._stats = data
  4267. return data