OpenStack Compute (Nova)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

vm_utils.py 97KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607
  1. # Copyright (c) 2010 Citrix Systems, Inc.
  2. # Copyright 2011 Piston Cloud Computing, Inc.
  3. # Copyright 2012 OpenStack Foundation
  4. #
  5. # Licensed under the Apache License, Version 2.0 (the "License"); you may
  6. # not use this file except in compliance with the License. You may obtain
  7. # a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  13. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  14. # License for the specific language governing permissions and limitations
  15. # under the License.
  16. """
  17. Helper methods for operations related to the management of VM records and
  18. their attributes like VDIs, VIFs, as well as their lookup functions.
  19. """
  20. import contextlib
  21. import math
  22. import os
  23. import time
  24. from xml.dom import minidom
  25. from xml.parsers import expat
  26. from eventlet import greenthread
  27. from os_xenapi.client import disk_management
  28. from os_xenapi.client import host_network
  29. from os_xenapi.client import vm_management
  30. from oslo_concurrency import processutils
  31. from oslo_log import log as logging
  32. from oslo_utils import excutils
  33. from oslo_utils import strutils
  34. from oslo_utils import timeutils
  35. from oslo_utils import units
  36. from oslo_utils import uuidutils
  37. from oslo_utils import versionutils
  38. import six
  39. from six.moves import range
  40. import six.moves.urllib.parse as urlparse
  41. import six.moves.urllib.request as urlrequest
  42. from nova.api.metadata import base as instance_metadata
  43. from nova.compute import power_state
  44. from nova.compute import task_states
  45. from nova.compute import utils as compute_utils
  46. import nova.conf
  47. from nova import exception
  48. from nova.i18n import _
  49. from nova.network import model as network_model
  50. from nova.objects import diagnostics
  51. from nova.objects import fields as obj_fields
  52. import nova.privsep.fs
  53. import nova.privsep.xenapi
  54. from nova import utils
  55. from nova.virt import configdrive
  56. from nova.virt.disk import api as disk
  57. from nova.virt.disk.vfs import localfs as vfsimpl
  58. from nova.virt import hardware
  59. from nova.virt.image import model as imgmodel
  60. from nova.virt import netutils
  61. from nova.virt.xenapi import agent
  62. from nova.virt.xenapi.image import utils as image_utils
  63. from nova.virt.xenapi import volume_utils
  64. LOG = logging.getLogger(__name__)
  65. CONF = nova.conf.CONF
  66. XENAPI_POWER_STATE = {
  67. 'Halted': power_state.SHUTDOWN,
  68. 'Running': power_state.RUNNING,
  69. 'Paused': power_state.PAUSED,
  70. 'Suspended': power_state.SUSPENDED,
  71. 'Crashed': power_state.CRASHED}
  72. SECTOR_SIZE = 512
  73. MBR_SIZE_SECTORS = 63
  74. MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE
  75. MAX_VDI_CHAIN_SIZE = 16
  76. PROGRESS_INTERVAL_SECONDS = 300
  77. DD_BLOCKSIZE = 65536
  78. # Fudge factor to allow for the VHD chain to be slightly larger than
  79. # the partitioned space. Otherwise, legitimate images near their
  80. # maximum allowed size can fail on build with FlavorDiskSmallerThanImage.
  81. VHD_SIZE_CHECK_FUDGE_FACTOR_GB = 10
  82. class ImageType(object):
  83. """Enumeration class for distinguishing different image types
  84. | 0 - kernel image (goes on dom0's filesystem)
  85. | 1 - ramdisk image (goes on dom0's filesystem)
  86. | 2 - disk image (local SR, partitioned by objectstore plugin)
  87. | 3 - raw disk image (local SR, NOT partitioned by plugin)
  88. | 4 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
  89. | linux, HVM assumed for Windows)
  90. | 5 - ISO disk image (local SR, NOT partitioned by plugin)
  91. | 6 - config drive
  92. """
  93. KERNEL = 0
  94. RAMDISK = 1
  95. DISK = 2
  96. DISK_RAW = 3
  97. DISK_VHD = 4
  98. DISK_ISO = 5
  99. DISK_CONFIGDRIVE = 6
  100. _ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD, DISK_ISO,
  101. DISK_CONFIGDRIVE)
  102. KERNEL_STR = "kernel"
  103. RAMDISK_STR = "ramdisk"
  104. DISK_STR = "root"
  105. DISK_RAW_STR = "os_raw"
  106. DISK_VHD_STR = "vhd"
  107. DISK_ISO_STR = "iso"
  108. DISK_CONFIGDRIVE_STR = "configdrive"
  109. _strs = (KERNEL_STR, RAMDISK_STR, DISK_STR, DISK_RAW_STR, DISK_VHD_STR,
  110. DISK_ISO_STR, DISK_CONFIGDRIVE_STR)
  111. @classmethod
  112. def to_string(cls, image_type):
  113. return dict(zip(cls._ids, ImageType._strs)).get(image_type)
  114. @classmethod
  115. def get_role(cls, image_type_id):
  116. """Get the role played by the image, based on its type."""
  117. return {
  118. cls.KERNEL: 'kernel',
  119. cls.RAMDISK: 'ramdisk',
  120. cls.DISK: 'root',
  121. cls.DISK_RAW: 'root',
  122. cls.DISK_VHD: 'root',
  123. cls.DISK_ISO: 'iso',
  124. cls.DISK_CONFIGDRIVE: 'configdrive'
  125. }.get(image_type_id)
  126. def get_vm_device_id(session, image_meta):
  127. # NOTE: device_id should be 2 for windows VMs which run new xentools
  128. # (>=6.1). Refer to http://support.citrix.com/article/CTX135099 for more
  129. # information.
  130. device_id = image_meta.properties.get('hw_device_id')
  131. # The device_id is required to be set for hypervisor version 6.1 and above
  132. if device_id:
  133. hypervisor_version = session.product_version
  134. if _hypervisor_supports_device_id(hypervisor_version):
  135. return device_id
  136. else:
  137. msg = _("Device id %(id)s specified is not supported by "
  138. "hypervisor version %(version)s") % {'id': device_id,
  139. 'version': hypervisor_version}
  140. raise exception.NovaException(msg)
  141. def _hypervisor_supports_device_id(version):
  142. version_as_string = '.'.join(str(v) for v in version)
  143. return versionutils.is_compatible('6.1', version_as_string)
  144. def create_vm(session, instance, name_label, kernel, ramdisk,
  145. use_pv_kernel=False, device_id=None):
  146. """Create a VM record. Returns new VM reference.
  147. the use_pv_kernel flag indicates whether the guest is HVM or PV
  148. There are 3 scenarios:
  149. 1. Using paravirtualization, kernel passed in
  150. 2. Using paravirtualization, kernel within the image
  151. 3. Using hardware virtualization
  152. """
  153. flavor = instance.get_flavor()
  154. mem = str(int(flavor.memory_mb) * units.Mi)
  155. vcpus = str(flavor.vcpus)
  156. vcpu_weight = flavor.vcpu_weight
  157. vcpu_params = {}
  158. if vcpu_weight is not None:
  159. # NOTE(johngarbutt) bug in XenServer 6.1 and 6.2 means
  160. # we need to specify both weight and cap for either to apply
  161. vcpu_params = {"weight": str(vcpu_weight), "cap": "0"}
  162. cpu_mask_list = hardware.get_vcpu_pin_set()
  163. if cpu_mask_list:
  164. cpu_mask = hardware.format_cpu_spec(cpu_mask_list,
  165. allow_ranges=False)
  166. vcpu_params["mask"] = cpu_mask
  167. viridian = 'true' if instance['os_type'] == 'windows' else 'false'
  168. rec = {
  169. 'actions_after_crash': 'destroy',
  170. 'actions_after_reboot': 'restart',
  171. 'actions_after_shutdown': 'destroy',
  172. 'affinity': '',
  173. 'blocked_operations': {},
  174. 'ha_always_run': False,
  175. 'ha_restart_priority': '',
  176. 'HVM_boot_params': {},
  177. 'HVM_boot_policy': '',
  178. 'is_a_template': False,
  179. 'memory_dynamic_min': mem,
  180. 'memory_dynamic_max': mem,
  181. 'memory_static_min': '0',
  182. 'memory_static_max': mem,
  183. 'memory_target': mem,
  184. 'name_description': '',
  185. 'name_label': name_label,
  186. 'other_config': {'nova_uuid': str(instance['uuid'])},
  187. 'PCI_bus': '',
  188. 'platform': {'acpi': 'true', 'apic': 'true', 'pae': 'true',
  189. 'viridian': viridian, 'timeoffset': '0'},
  190. 'PV_args': '',
  191. 'PV_bootloader': '',
  192. 'PV_bootloader_args': '',
  193. 'PV_kernel': '',
  194. 'PV_legacy_args': '',
  195. 'PV_ramdisk': '',
  196. 'recommendations': '',
  197. 'tags': [],
  198. 'user_version': '0',
  199. 'VCPUs_at_startup': vcpus,
  200. 'VCPUs_max': vcpus,
  201. 'VCPUs_params': vcpu_params,
  202. 'xenstore_data': {'vm-data/allowvssprovider': 'false'}}
  203. # Complete VM configuration record according to the image type
  204. # non-raw/raw with PV kernel/raw in HVM mode
  205. if use_pv_kernel:
  206. rec['platform']['nx'] = 'false'
  207. if instance['kernel_id']:
  208. # 1. Kernel explicitly passed in, use that
  209. rec['PV_args'] = 'root=/dev/xvda1'
  210. rec['PV_kernel'] = kernel
  211. rec['PV_ramdisk'] = ramdisk
  212. else:
  213. # 2. Use kernel within the image
  214. rec['PV_bootloader'] = 'pygrub'
  215. else:
  216. # 3. Using hardware virtualization
  217. rec['platform']['nx'] = 'true'
  218. rec['HVM_boot_params'] = {'order': 'dc'}
  219. rec['HVM_boot_policy'] = 'BIOS order'
  220. if device_id:
  221. rec['platform']['device_id'] = str(device_id).zfill(4)
  222. vm_ref = session.VM.create(rec)
  223. LOG.debug('Created VM', instance=instance)
  224. return vm_ref
  225. def destroy_vm(session, instance, vm_ref):
  226. """Destroys a VM record."""
  227. try:
  228. session.VM.destroy(vm_ref)
  229. except session.XenAPI.Failure:
  230. LOG.exception(_('Destroy VM failed'))
  231. return
  232. LOG.debug("VM destroyed", instance=instance)
  233. def clean_shutdown_vm(session, instance, vm_ref):
  234. if is_vm_shutdown(session, vm_ref):
  235. LOG.warning("VM already halted, skipping shutdown...",
  236. instance=instance)
  237. return True
  238. LOG.debug("Shutting down VM (cleanly)", instance=instance)
  239. try:
  240. session.call_xenapi('VM.clean_shutdown', vm_ref)
  241. except session.XenAPI.Failure:
  242. LOG.exception(_('Shutting down VM (cleanly) failed.'))
  243. return False
  244. return True
  245. def hard_shutdown_vm(session, instance, vm_ref):
  246. if is_vm_shutdown(session, vm_ref):
  247. LOG.warning("VM already halted, skipping shutdown...",
  248. instance=instance)
  249. return True
  250. LOG.debug("Shutting down VM (hard)", instance=instance)
  251. try:
  252. session.call_xenapi('VM.hard_shutdown', vm_ref)
  253. except session.XenAPI.Failure:
  254. LOG.exception(_('Shutting down VM (hard) failed'))
  255. return False
  256. return True
  257. def is_vm_shutdown(session, vm_ref):
  258. state = get_power_state(session, vm_ref)
  259. if state == power_state.SHUTDOWN:
  260. return True
  261. return False
  262. def is_enough_free_mem(session, instance):
  263. flavor = instance.get_flavor()
  264. mem = int(flavor.memory_mb) * units.Mi
  265. host_free_mem = int(session.call_xenapi("host.compute_free_memory",
  266. session.host_ref))
  267. return host_free_mem >= mem
  268. def _should_retry_unplug_vbd(err):
  269. """Retry if failed with some specific errors.
  270. The retrable errors include:
  271. 1. DEVICE_DETACH_REJECTED
  272. For reasons which we don't understand, we're seeing the device
  273. still in use, even when all processes using the device should
  274. be dead.
  275. 2. INTERNAL_ERROR
  276. Since XenServer 6.2, we also need to retry if we get INTERNAL_ERROR,
  277. as that error goes away when you retry.
  278. 3. VM_MISSING_PV_DRIVERS
  279. NOTE(jianghuaw): It requires some time for PV(Paravirtualization)
  280. driver to be connected at VM booting, so retry if unplug failed
  281. with VM_MISSING_PV_DRIVERS.
  282. """
  283. can_retry_errs = (
  284. 'DEVICE_DETACH_REJECTED',
  285. 'INTERNAL_ERROR',
  286. 'VM_MISSING_PV_DRIVERS',
  287. )
  288. return err in can_retry_errs
  289. def unplug_vbd(session, vbd_ref, this_vm_ref):
  290. # make sure that perform at least once
  291. max_attempts = max(0, CONF.xenserver.num_vbd_unplug_retries) + 1
  292. for num_attempt in range(1, max_attempts + 1):
  293. try:
  294. if num_attempt > 1:
  295. greenthread.sleep(1)
  296. session.VBD.unplug(vbd_ref, this_vm_ref)
  297. return
  298. except session.XenAPI.Failure as exc:
  299. err = len(exc.details) > 0 and exc.details[0]
  300. if err == 'DEVICE_ALREADY_DETACHED':
  301. LOG.info('VBD %s already detached', vbd_ref)
  302. return
  303. elif _should_retry_unplug_vbd(err):
  304. LOG.info('VBD %(vbd_ref)s unplug failed with "%(err)s", '
  305. 'attempt %(num_attempt)d/%(max_attempts)d',
  306. {'vbd_ref': vbd_ref, 'num_attempt': num_attempt,
  307. 'max_attempts': max_attempts, 'err': err})
  308. else:
  309. LOG.exception(_('Unable to unplug VBD'))
  310. raise exception.StorageError(
  311. reason=_('Unable to unplug VBD %s') % vbd_ref)
  312. raise exception.StorageError(
  313. reason=_('Reached maximum number of retries '
  314. 'trying to unplug VBD %s')
  315. % vbd_ref)
  316. def destroy_vbd(session, vbd_ref):
  317. """Destroy VBD from host database."""
  318. try:
  319. session.call_xenapi('VBD.destroy', vbd_ref)
  320. except session.XenAPI.Failure:
  321. LOG.exception(_('Unable to destroy VBD'))
  322. raise exception.StorageError(
  323. reason=_('Unable to destroy VBD %s') % vbd_ref)
  324. def create_vbd(session, vm_ref, vdi_ref, userdevice, vbd_type='disk',
  325. read_only=False, bootable=False, osvol=False,
  326. empty=False, unpluggable=True):
  327. """Create a VBD record and returns its reference."""
  328. vbd_rec = {}
  329. vbd_rec['VM'] = vm_ref
  330. if vdi_ref is None:
  331. vdi_ref = 'OpaqueRef:NULL'
  332. vbd_rec['VDI'] = vdi_ref
  333. vbd_rec['userdevice'] = str(userdevice)
  334. vbd_rec['bootable'] = bootable
  335. vbd_rec['mode'] = read_only and 'RO' or 'RW'
  336. vbd_rec['type'] = vbd_type
  337. vbd_rec['unpluggable'] = unpluggable
  338. vbd_rec['empty'] = empty
  339. vbd_rec['other_config'] = {}
  340. vbd_rec['qos_algorithm_type'] = ''
  341. vbd_rec['qos_algorithm_params'] = {}
  342. vbd_rec['qos_supported_algorithms'] = []
  343. LOG.debug('Creating %(vbd_type)s-type VBD for VM %(vm_ref)s,'
  344. ' VDI %(vdi_ref)s ... ',
  345. {'vbd_type': vbd_type, 'vm_ref': vm_ref, 'vdi_ref': vdi_ref})
  346. vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
  347. LOG.debug('Created VBD %(vbd_ref)s for VM %(vm_ref)s,'
  348. ' VDI %(vdi_ref)s.',
  349. {'vbd_ref': vbd_ref, 'vm_ref': vm_ref, 'vdi_ref': vdi_ref})
  350. if osvol:
  351. # set osvol=True in other-config to indicate this is an
  352. # attached nova (or cinder) volume
  353. session.call_xenapi('VBD.add_to_other_config',
  354. vbd_ref, 'osvol', 'True')
  355. return vbd_ref
  356. def attach_cd(session, vm_ref, vdi_ref, userdevice):
  357. """Create an empty VBD, then insert the CD."""
  358. vbd_ref = create_vbd(session, vm_ref, None, userdevice,
  359. vbd_type='cd', read_only=True,
  360. bootable=True, empty=True,
  361. unpluggable=False)
  362. session.call_xenapi('VBD.insert', vbd_ref, vdi_ref)
  363. return vbd_ref
  364. def destroy_vdi(session, vdi_ref):
  365. try:
  366. session.call_xenapi('VDI.destroy', vdi_ref)
  367. except session.XenAPI.Failure:
  368. LOG.debug("Unable to destroy VDI %s", vdi_ref, exc_info=True)
  369. msg = _("Unable to destroy VDI %s") % vdi_ref
  370. LOG.error(msg)
  371. raise exception.StorageError(reason=msg)
  372. def safe_destroy_vdis(session, vdi_refs):
  373. """Tries to destroy the requested VDIs, but ignores any errors."""
  374. for vdi_ref in vdi_refs:
  375. try:
  376. destroy_vdi(session, vdi_ref)
  377. except exception.StorageError:
  378. LOG.debug("Ignoring error while destroying VDI: %s", vdi_ref)
  379. def create_vdi(session, sr_ref, instance, name_label, disk_type, virtual_size,
  380. read_only=False):
  381. """Create a VDI record and returns its reference."""
  382. vdi_ref = session.call_xenapi("VDI.create",
  383. {'name_label': name_label,
  384. 'name_description': disk_type,
  385. 'SR': sr_ref,
  386. 'virtual_size': str(virtual_size),
  387. 'type': 'User',
  388. 'sharable': False,
  389. 'read_only': read_only,
  390. 'xenstore_data': {},
  391. 'other_config': _get_vdi_other_config(disk_type, instance=instance),
  392. 'sm_config': {},
  393. 'tags': []})
  394. LOG.debug('Created VDI %(vdi_ref)s (%(name_label)s,'
  395. ' %(virtual_size)s, %(read_only)s) on %(sr_ref)s.',
  396. {'vdi_ref': vdi_ref, 'name_label': name_label,
  397. 'virtual_size': virtual_size, 'read_only': read_only,
  398. 'sr_ref': sr_ref})
  399. return vdi_ref
  400. @contextlib.contextmanager
  401. def _dummy_vm(session, instance, vdi_ref):
  402. """This creates a temporary VM so that we can snapshot a VDI.
  403. VDI's can't be snapshotted directly since the API expects a `vm_ref`. To
  404. work around this, we need to create a temporary VM and then map the VDI to
  405. the VM using a temporary VBD.
  406. """
  407. name_label = "dummy"
  408. vm_ref = create_vm(session, instance, name_label, None, None)
  409. try:
  410. vbd_ref = create_vbd(session, vm_ref, vdi_ref, 'autodetect',
  411. read_only=True)
  412. try:
  413. yield vm_ref
  414. finally:
  415. try:
  416. destroy_vbd(session, vbd_ref)
  417. except exception.StorageError:
  418. # destroy_vbd() will log error
  419. pass
  420. finally:
  421. destroy_vm(session, instance, vm_ref)
  422. def _safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
  423. """Copy a VDI and return the new VDIs reference.
  424. This function differs from the XenAPI `VDI.copy` call in that the copy is
  425. atomic and isolated, meaning we don't see half-downloaded images. It
  426. accomplishes this by copying the VDI's into a temporary directory and then
  427. atomically renaming them into the SR when the copy is completed.
  428. The correct long term solution is to fix `VDI.copy` so that it is atomic
  429. and isolated.
  430. """
  431. with _dummy_vm(session, instance, vdi_to_copy_ref) as vm_ref:
  432. label = "snapshot"
  433. with snapshot_attached_here(
  434. session, instance, vm_ref, label) as vdi_uuids:
  435. sr_path = get_sr_path(session, sr_ref=sr_ref)
  436. uuid_stack = _make_uuid_stack()
  437. imported_vhds = disk_management.safe_copy_vdis(
  438. session, sr_path, vdi_uuids, uuid_stack)
  439. root_uuid = imported_vhds['root']['uuid']
  440. # rescan to discover new VHDs
  441. scan_default_sr(session)
  442. vdi_ref = session.call_xenapi('VDI.get_by_uuid', root_uuid)
  443. return vdi_ref
  444. def _clone_vdi(session, vdi_to_clone_ref):
  445. """Clones a VDI and return the new VDIs reference."""
  446. vdi_ref = session.call_xenapi('VDI.clone', vdi_to_clone_ref)
  447. LOG.debug('Cloned VDI %(vdi_ref)s from VDI '
  448. '%(vdi_to_clone_ref)s',
  449. {'vdi_ref': vdi_ref, 'vdi_to_clone_ref': vdi_to_clone_ref})
  450. return vdi_ref
  451. def _get_vdi_other_config(disk_type, instance=None):
  452. """Return metadata to store in VDI's other_config attribute.
  453. `nova_instance_uuid` is used to associate a VDI with a particular instance
  454. so that, if it becomes orphaned from an unclean shutdown of a
  455. compute-worker, we can safely detach it.
  456. """
  457. other_config = {'nova_disk_type': disk_type}
  458. # create_vdi may be called simply while creating a volume
  459. # hence information about instance may or may not be present
  460. if instance:
  461. other_config['nova_instance_uuid'] = instance['uuid']
  462. return other_config
  463. def _set_vdi_info(session, vdi_ref, vdi_type, name_label, description,
  464. instance):
  465. existing_other_config = session.call_xenapi('VDI.get_other_config',
  466. vdi_ref)
  467. session.call_xenapi('VDI.set_name_label', vdi_ref, name_label)
  468. session.call_xenapi('VDI.set_name_description', vdi_ref, description)
  469. other_config = _get_vdi_other_config(vdi_type, instance=instance)
  470. for key, value in other_config.items():
  471. if key not in existing_other_config:
  472. session.call_xenapi(
  473. "VDI.add_to_other_config", vdi_ref, key, value)
  474. def _vm_get_vbd_refs(session, vm_ref):
  475. return session.call_xenapi("VM.get_VBDs", vm_ref)
  476. def _vbd_get_rec(session, vbd_ref):
  477. return session.call_xenapi("VBD.get_record", vbd_ref)
  478. def _vdi_get_rec(session, vdi_ref):
  479. return session.call_xenapi("VDI.get_record", vdi_ref)
  480. def _vdi_get_uuid(session, vdi_ref):
  481. return session.call_xenapi("VDI.get_uuid", vdi_ref)
  482. def _vdi_snapshot(session, vdi_ref):
  483. return session.call_xenapi("VDI.snapshot", vdi_ref, {})
  484. def get_vdi_for_vm_safely(session, vm_ref, userdevice='0'):
  485. """Retrieves the primary VDI for a VM."""
  486. vbd_refs = _vm_get_vbd_refs(session, vm_ref)
  487. for vbd_ref in vbd_refs:
  488. vbd_rec = _vbd_get_rec(session, vbd_ref)
  489. # Convention dictates the primary VDI will be userdevice 0
  490. if vbd_rec['userdevice'] == userdevice:
  491. vdi_ref = vbd_rec['VDI']
  492. vdi_rec = _vdi_get_rec(session, vdi_ref)
  493. return vdi_ref, vdi_rec
  494. raise exception.NovaException(_("No primary VDI found for %s") % vm_ref)
  495. def get_all_vdi_uuids_for_vm(session, vm_ref, min_userdevice=0):
  496. vbd_refs = _vm_get_vbd_refs(session, vm_ref)
  497. for vbd_ref in vbd_refs:
  498. vbd_rec = _vbd_get_rec(session, vbd_ref)
  499. if int(vbd_rec['userdevice']) >= min_userdevice:
  500. vdi_ref = vbd_rec['VDI']
  501. yield _vdi_get_uuid(session, vdi_ref)
  502. def _try_strip_base_mirror_from_vdi(session, vdi_ref):
  503. try:
  504. session.call_xenapi("VDI.remove_from_sm_config", vdi_ref,
  505. "base_mirror")
  506. except session.XenAPI.Failure:
  507. LOG.debug("Error while removing sm_config", exc_info=True)
  508. def strip_base_mirror_from_vdis(session, vm_ref):
  509. # NOTE(johngarbutt) part of workaround for XenServer bug CA-98606
  510. vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
  511. for vbd_ref in vbd_refs:
  512. vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
  513. _try_strip_base_mirror_from_vdi(session, vdi_ref)
  514. def _delete_snapshots_in_vdi_chain(session, instance, vdi_uuid_chain, sr_ref):
  515. possible_snapshot_parents = vdi_uuid_chain[1:]
  516. if len(possible_snapshot_parents) == 0:
  517. LOG.debug("No VHD chain.", instance=instance)
  518. return
  519. snapshot_uuids = _child_vhds(session, sr_ref, possible_snapshot_parents,
  520. old_snapshots_only=True)
  521. number_of_snapshots = len(snapshot_uuids)
  522. if number_of_snapshots <= 0:
  523. LOG.debug("No snapshots to remove.", instance=instance)
  524. return
  525. vdi_refs = [session.VDI.get_by_uuid(vdi_uuid)
  526. for vdi_uuid in snapshot_uuids]
  527. safe_destroy_vdis(session, vdi_refs)
  528. # ensure garbage collector has been run
  529. _scan_sr(session, sr_ref)
  530. LOG.info("Deleted %s snapshots.", number_of_snapshots, instance=instance)
  531. def remove_old_snapshots(session, instance, vm_ref):
  532. """See if there is an snapshot present that should be removed."""
  533. LOG.debug("Starting remove_old_snapshots for VM", instance=instance)
  534. vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
  535. chain = _walk_vdi_chain(session, vm_vdi_rec['uuid'])
  536. vdi_uuid_chain = [vdi_rec['uuid'] for vdi_rec in chain]
  537. sr_ref = vm_vdi_rec["SR"]
  538. _delete_snapshots_in_vdi_chain(session, instance, vdi_uuid_chain, sr_ref)
  539. @contextlib.contextmanager
  540. def snapshot_attached_here(session, instance, vm_ref, label, userdevice='0',
  541. post_snapshot_callback=None):
  542. # impl method allow easier patching for tests
  543. return _snapshot_attached_here_impl(session, instance, vm_ref, label,
  544. userdevice, post_snapshot_callback)
  545. def _snapshot_attached_here_impl(session, instance, vm_ref, label, userdevice,
  546. post_snapshot_callback):
  547. """Snapshot the root disk only. Return a list of uuids for the vhds
  548. in the chain.
  549. """
  550. LOG.debug("Starting snapshot for VM", instance=instance)
  551. # Memorize the VDI chain so we can poll for coalesce
  552. vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref,
  553. userdevice)
  554. chain = _walk_vdi_chain(session, vm_vdi_rec['uuid'])
  555. vdi_uuid_chain = [vdi_rec['uuid'] for vdi_rec in chain]
  556. sr_ref = vm_vdi_rec["SR"]
  557. # clean up after any interrupted snapshot attempts
  558. _delete_snapshots_in_vdi_chain(session, instance, vdi_uuid_chain, sr_ref)
  559. snapshot_ref = _vdi_snapshot(session, vm_vdi_ref)
  560. if post_snapshot_callback is not None:
  561. post_snapshot_callback(task_state=task_states.IMAGE_PENDING_UPLOAD)
  562. try:
  563. # When the VDI snapshot is taken a new parent is introduced.
  564. # If we have taken a snapshot before, the new parent can be coalesced.
  565. # We need to wait for this to happen before trying to copy the chain.
  566. _wait_for_vhd_coalesce(session, instance, sr_ref, vm_vdi_ref,
  567. vdi_uuid_chain)
  568. snapshot_uuid = _vdi_get_uuid(session, snapshot_ref)
  569. chain = _walk_vdi_chain(session, snapshot_uuid)
  570. vdi_uuids = [vdi_rec['uuid'] for vdi_rec in chain]
  571. yield vdi_uuids
  572. finally:
  573. safe_destroy_vdis(session, [snapshot_ref])
  574. # TODO(johngarbut) we need to check the snapshot has been coalesced
  575. # now its associated VDI has been deleted.
  576. def get_sr_path(session, sr_ref=None):
  577. """Return the path to our storage repository
  578. This is used when we're dealing with VHDs directly, either by taking
  579. snapshots or by restoring an image in the DISK_VHD format.
  580. """
  581. if sr_ref is None:
  582. sr_ref = safe_find_sr(session)
  583. pbd_rec = session.call_xenapi("PBD.get_all_records_where",
  584. 'field "host"="%s" and '
  585. 'field "SR"="%s"' %
  586. (session.host_ref, sr_ref))
  587. # NOTE(bobball): There can only be one PBD for a host/SR pair, but path is
  588. # not always present - older versions of XS do not set it.
  589. pbd_ref = list(pbd_rec.keys())[0]
  590. device_config = pbd_rec[pbd_ref]['device_config']
  591. if 'path' in device_config:
  592. return device_config['path']
  593. sr_rec = session.call_xenapi("SR.get_record", sr_ref)
  594. sr_uuid = sr_rec["uuid"]
  595. if sr_rec["type"] not in ["ext", "nfs"]:
  596. raise exception.NovaException(
  597. _("Only file-based SRs (ext/NFS) are supported by this feature."
  598. " SR %(uuid)s is of type %(type)s") %
  599. {"uuid": sr_uuid, "type": sr_rec["type"]})
  600. return os.path.join(CONF.xenserver.sr_base_path, sr_uuid)
  601. def destroy_cached_images(session, sr_ref, all_cached=False, dry_run=False,
  602. keep_days=0):
  603. """Destroy used or unused cached images.
  604. A cached image that is being used by at least one VM is said to be 'used'.
  605. In the case of an 'unused' image, the cached image will be the only
  606. descendent of the base-copy. So when we delete the cached-image, the
  607. refcount will drop to zero and XenServer will automatically destroy the
  608. base-copy for us.
  609. The default behavior of this function is to destroy only 'unused' cached
  610. images. To destroy all cached images, use the `all_cached=True` kwarg.
  611. `keep_days` is used to destroy images based on when they were created.
  612. Only the images which were created `keep_days` ago will be deleted if the
  613. argument has been set.
  614. """
  615. cached_images = _find_cached_images(session, sr_ref)
  616. destroyed = set()
  617. def destroy_cached_vdi(vdi_uuid, vdi_ref):
  618. LOG.debug("Destroying cached VDI '%(vdi_uuid)s'")
  619. if not dry_run:
  620. destroy_vdi(session, vdi_ref)
  621. destroyed.add(vdi_uuid)
  622. for vdi_dict in cached_images.values():
  623. vdi_ref = vdi_dict['vdi_ref']
  624. vdi_uuid = session.call_xenapi('VDI.get_uuid', vdi_ref)
  625. if all_cached:
  626. destroy_cached_vdi(vdi_uuid, vdi_ref)
  627. continue
  628. # Unused-Only: Search for siblings
  629. # Chain length greater than two implies a VM must be holding a ref to
  630. # the base-copy (otherwise it would have coalesced), so consider this
  631. # cached image used.
  632. chain = list(_walk_vdi_chain(session, vdi_uuid))
  633. if len(chain) > 2:
  634. continue
  635. elif len(chain) == 2:
  636. # Siblings imply cached image is used
  637. root_vdi_rec = chain[-1]
  638. children = _child_vhds(session, sr_ref, [root_vdi_rec['uuid']])
  639. if len(children) > 1:
  640. continue
  641. cached_time = vdi_dict.get('cached_time')
  642. if cached_time is not None:
  643. if (int(time.time()) - int(cached_time)) / (3600 * 24) \
  644. >= keep_days:
  645. destroy_cached_vdi(vdi_uuid, vdi_ref)
  646. else:
  647. LOG.debug("vdi %s can't be destroyed because the cached time is"
  648. " not specified", vdi_uuid)
  649. return destroyed
  650. def _find_cached_images(session, sr_ref):
  651. """Return a dict {image_id: {'vdi_ref': vdi_ref, 'cached_time':
  652. cached_time}} representing all cached images.
  653. """
  654. cached_images = {}
  655. for vdi_ref, vdi_rec in _get_all_vdis_in_sr(session, sr_ref):
  656. try:
  657. image_id = vdi_rec['other_config']['image-id']
  658. except KeyError:
  659. continue
  660. cached_time = vdi_rec['other_config'].get('cached-time')
  661. cached_images[image_id] = {'vdi_ref': vdi_ref,
  662. 'cached_time': cached_time}
  663. return cached_images
  664. def _find_cached_image(session, image_id, sr_ref):
  665. """Returns the vdi-ref of the cached image."""
  666. name_label = _get_image_vdi_label(image_id)
  667. # For not pooled hosts, only name_lable is enough to get a cached image.
  668. # When in a xapi pool, each host may have a cached image using the
  669. # same name while xapi api will search all of them. Add SR to the filter
  670. # to ensure only one image returns.
  671. expr = ('field "name__label"="%(name_label)s" and field "SR" = "%(SR)s"'
  672. % {'name_label': name_label, 'SR': sr_ref})
  673. recs = session.call_xenapi("VDI.get_all_records_where", expr)
  674. number_found = len(recs)
  675. if number_found > 0:
  676. if number_found > 1:
  677. LOG.warning("Multiple base images for image: %s", image_id)
  678. return list(recs.keys())[0]
  679. def _get_resize_func_name(session):
  680. brand = session.product_brand
  681. version = session.product_version
  682. # To maintain backwards compatibility. All recent versions
  683. # should use VDI.resize
  684. if version and brand:
  685. xcp = brand == 'XCP'
  686. r1_2_or_above = (version[0] == 1 and version[1] > 1) or version[0] > 1
  687. xenserver = brand == 'XenServer'
  688. r6_or_above = version[0] > 5
  689. if (xcp and not r1_2_or_above) or (xenserver and not r6_or_above):
  690. return 'VDI.resize_online'
  691. return 'VDI.resize'
  692. def _vdi_get_virtual_size(session, vdi_ref):
  693. size = session.call_xenapi('VDI.get_virtual_size', vdi_ref)
  694. return int(size)
  695. def _vdi_resize(session, vdi_ref, new_size):
  696. resize_func_name = _get_resize_func_name(session)
  697. session.call_xenapi(resize_func_name, vdi_ref, str(new_size))
  698. def update_vdi_virtual_size(session, instance, vdi_ref, new_gb):
  699. virtual_size = _vdi_get_virtual_size(session, vdi_ref)
  700. new_disk_size = new_gb * units.Gi
  701. msg = ("Resizing up VDI %(vdi_ref)s from %(virtual_size)d "
  702. "to %(new_disk_size)d")
  703. LOG.debug(msg, {'vdi_ref': vdi_ref, 'virtual_size': virtual_size,
  704. 'new_disk_size': new_disk_size},
  705. instance=instance)
  706. if virtual_size < new_disk_size:
  707. # For resize up. Simple VDI resize will do the trick
  708. _vdi_resize(session, vdi_ref, new_disk_size)
  709. elif virtual_size == new_disk_size:
  710. LOG.debug("No need to change vdi virtual size.",
  711. instance=instance)
  712. else:
  713. # NOTE(johngarbutt): we should never get here
  714. # but if we don't raise an exception, a user might be able to use
  715. # more storage than allowed by their chosen instance flavor
  716. msg = _("VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger "
  717. "than flavor size of %(new_disk_size)d bytes.")
  718. msg = msg % {'vdi_ref': vdi_ref, 'virtual_size': virtual_size,
  719. 'new_disk_size': new_disk_size}
  720. LOG.debug(msg, instance=instance)
  721. raise exception.ResizeError(reason=msg)
  722. def resize_disk(session, instance, vdi_ref, flavor):
  723. size_gb = flavor.root_gb
  724. if size_gb == 0:
  725. reason = _("Can't resize a disk to 0 GB.")
  726. raise exception.ResizeError(reason=reason)
  727. sr_ref = safe_find_sr(session)
  728. clone_ref = _clone_vdi(session, vdi_ref)
  729. try:
  730. # Resize partition and filesystem down
  731. _auto_configure_disk(session, clone_ref, size_gb)
  732. # Create new VDI
  733. vdi_size = size_gb * units.Gi
  734. # NOTE(johannes): No resizing allowed for rescue instances, so
  735. # using instance['name'] is safe here
  736. new_ref = create_vdi(session, sr_ref, instance, instance['name'],
  737. 'root', vdi_size)
  738. new_uuid = session.call_xenapi('VDI.get_uuid', new_ref)
  739. # Manually copy contents over
  740. virtual_size = size_gb * units.Gi
  741. _copy_partition(session, clone_ref, new_ref, 1, virtual_size)
  742. return new_ref, new_uuid
  743. finally:
  744. destroy_vdi(session, clone_ref)
  745. def _auto_configure_disk(session, vdi_ref, new_gb):
  746. """Partition and resize FS to match the size specified by
  747. flavors.root_gb.
  748. This is a fail-safe to prevent accidentally destroying data on a disk
  749. erroneously marked as auto_disk_config=True.
  750. The criteria for allowing resize are:
  751. 1. 'auto_disk_config' must be true for the instance (and image).
  752. (If we've made it here, then auto_disk_config=True.)
  753. 2. The disk must have only one partition.
  754. 3. The file-system on the one partition must be ext3 or ext4.
  755. 4. We are not running in independent_compute mode (checked by
  756. vdi_attached)
  757. """
  758. if new_gb == 0:
  759. LOG.debug("Skipping auto_config_disk as destination size is 0GB")
  760. return
  761. with vdi_attached(session, vdi_ref, read_only=False) as dev:
  762. partitions = _get_partitions(dev)
  763. if len(partitions) != 1:
  764. reason = _('Disk must have only one partition.')
  765. raise exception.CannotResizeDisk(reason=reason)
  766. num, start, old_sectors, fstype, name, flags = partitions[0]
  767. if fstype not in ('ext3', 'ext4'):
  768. reason = _('Disk contains a filesystem '
  769. 'we are unable to resize: %s')
  770. raise exception.CannotResizeDisk(reason=(reason % fstype))
  771. if num != 1:
  772. reason = _('The only partition should be partition 1.')
  773. raise exception.CannotResizeDisk(reason=reason)
  774. new_sectors = new_gb * units.Gi / SECTOR_SIZE
  775. _resize_part_and_fs(dev, start, old_sectors, new_sectors, flags)
  776. def try_auto_configure_disk(session, vdi_ref, new_gb):
  777. if CONF.xenserver.independent_compute:
  778. raise exception.NotSupportedWithOption(
  779. operation='auto_configure_disk',
  780. option='CONF.xenserver.independent_compute')
  781. try:
  782. _auto_configure_disk(session, vdi_ref, new_gb)
  783. except exception.CannotResizeDisk as e:
  784. LOG.warning('Attempted auto_configure_disk failed because: %s', e)
  785. def _make_partition(session, dev, partition_start, partition_end):
  786. dev_path = utils.make_dev_path(dev)
  787. # NOTE(bobball) If this runs in Dom0, parted will error trying
  788. # to re-read the partition table and return a generic error
  789. nova.privsep.fs.create_partition_table(
  790. dev_path, 'msdos', check_exit_code=not session.is_local_connection)
  791. nova.privsep.fs.create_partition(
  792. dev_path, 'primary', partition_start, partition_end,
  793. check_exit_code=not session.is_local_connection)
  794. partition_path = utils.make_dev_path(dev, partition=1)
  795. if session.is_local_connection:
  796. # Need to refresh the partitions
  797. nova.privsep.fs.create_device_maps(dev_path)
  798. # Sometimes the partition gets created under /dev/mapper, depending
  799. # on the setup in dom0.
  800. mapper_path = '/dev/mapper/%s' % os.path.basename(partition_path)
  801. if os.path.exists(mapper_path):
  802. return mapper_path
  803. return partition_path
  804. def _generate_disk(session, instance, vm_ref, userdevice, name_label,
  805. disk_type, size_mb, fs_type, fs_label=None):
  806. """Steps to programmatically generate a disk:
  807. 1. Create VDI of desired size
  808. 2. Attach VDI to Dom0
  809. 3. Create partition
  810. 3.a. If the partition type is supported by dom0 (currently ext3,
  811. swap) then create it while the VDI is attached to dom0.
  812. 3.b. If the partition type is not supported by dom0, attach the
  813. VDI to the domU and create there.
  814. This split between DomU/Dom0 ensures that we can create most
  815. VM types in the "isolated compute" case.
  816. 4. Create VBD between instance VM and VDI
  817. """
  818. # 1. Create VDI
  819. sr_ref = safe_find_sr(session)
  820. ONE_MEG = units.Mi
  821. virtual_size = size_mb * ONE_MEG
  822. vdi_ref = create_vdi(session, sr_ref, instance, name_label, disk_type,
  823. virtual_size)
  824. try:
  825. # 2. Attach VDI to Dom0 (VBD hotplug)
  826. mkfs_in_dom0 = fs_type in ('ext3', 'swap')
  827. with vdi_attached(session, vdi_ref, read_only=False,
  828. dom0=True) as dev:
  829. # 3. Create partition
  830. partition_start = "2048"
  831. partition_end = "-"
  832. disk_management.make_partition(session, dev, partition_start,
  833. partition_end)
  834. if mkfs_in_dom0:
  835. disk_management.mkfs(session, dev, '1', fs_type, fs_label)
  836. # 3.a. dom0 does not support nfs/ext4, so may have to mkfs in domU
  837. if fs_type is not None and not mkfs_in_dom0:
  838. with vdi_attached(session, vdi_ref, read_only=False) as dev:
  839. partition_path = utils.make_dev_path(dev, partition=1)
  840. nova.privsep.fs.mkfs(fs_type, partition_path, fs_label)
  841. # 4. Create VBD between instance VM and VDI
  842. if vm_ref:
  843. create_vbd(session, vm_ref, vdi_ref, userdevice, bootable=False)
  844. except Exception:
  845. with excutils.save_and_reraise_exception():
  846. msg = "Error while generating disk number: %s" % userdevice
  847. LOG.debug(msg, instance=instance, exc_info=True)
  848. safe_destroy_vdis(session, [vdi_ref])
  849. return vdi_ref
  850. def generate_swap(session, instance, vm_ref, userdevice, name_label, swap_mb):
  851. # NOTE(jk0): We use a FAT32 filesystem for the Windows swap
  852. # partition because that is what parted supports.
  853. is_windows = instance['os_type'] == "windows"
  854. fs_type = "vfat" if is_windows else "swap"
  855. if CONF.xenserver.independent_compute and fs_type != "swap":
  856. raise exception.NotSupportedWithOption(
  857. operation='swap drives for Windows',
  858. option='CONF.xenserver.independent_compute')
  859. _generate_disk(session, instance, vm_ref, userdevice, name_label,
  860. 'swap', swap_mb, fs_type)
  861. def get_ephemeral_disk_sizes(total_size_gb):
  862. if not total_size_gb:
  863. return
  864. max_size_gb = 2000
  865. if total_size_gb % 1024 == 0:
  866. max_size_gb = 1024
  867. left_to_allocate = total_size_gb
  868. while left_to_allocate > 0:
  869. size_gb = min(max_size_gb, left_to_allocate)
  870. yield size_gb
  871. left_to_allocate -= size_gb
  872. def generate_single_ephemeral(session, instance, vm_ref, userdevice,
  873. size_gb, instance_name_label=None):
  874. if instance_name_label is None:
  875. instance_name_label = instance["name"]
  876. name_label = "%s ephemeral" % instance_name_label
  877. fs_label = "ephemeral"
  878. # TODO(johngarbutt) need to move DEVICE_EPHEMERAL from vmops to use it here
  879. label_number = int(userdevice) - 4
  880. if label_number > 0:
  881. name_label = "%s (%d)" % (name_label, label_number)
  882. fs_label = "ephemeral%d" % label_number
  883. return _generate_disk(session, instance, vm_ref, str(userdevice),
  884. name_label, 'ephemeral', size_gb * 1024,
  885. CONF.default_ephemeral_format, fs_label)
  886. def generate_ephemeral(session, instance, vm_ref, first_userdevice,
  887. instance_name_label, total_size_gb):
  888. # NOTE(johngarbutt): max possible size of a VHD disk is 2043GB
  889. sizes = get_ephemeral_disk_sizes(total_size_gb)
  890. first_userdevice = int(first_userdevice)
  891. vdi_refs = []
  892. try:
  893. for userdevice, size_gb in enumerate(sizes, start=first_userdevice):
  894. ref = generate_single_ephemeral(session, instance, vm_ref,
  895. userdevice, size_gb,
  896. instance_name_label)
  897. vdi_refs.append(ref)
  898. except Exception as exc:
  899. with excutils.save_and_reraise_exception():
  900. LOG.debug("Error when generating ephemeral disk. "
  901. "Device: %(userdevice)s Size GB: %(size_gb)s "
  902. "Error: %(exc)s", {
  903. 'userdevice': userdevice,
  904. 'size_gb': size_gb,
  905. 'exc': exc})
  906. safe_destroy_vdis(session, vdi_refs)
  907. def generate_iso_blank_root_disk(session, instance, vm_ref, userdevice,
  908. name_label, size_gb):
  909. _generate_disk(session, instance, vm_ref, userdevice, name_label,
  910. 'user', size_gb * 1024, CONF.default_ephemeral_format)
  911. def generate_configdrive(session, context, instance, vm_ref, userdevice,
  912. network_info, admin_password=None, files=None):
  913. sr_ref = safe_find_sr(session)
  914. vdi_ref = create_vdi(session, sr_ref, instance, 'config-2',
  915. 'configdrive', configdrive.CONFIGDRIVESIZE_BYTES)
  916. try:
  917. extra_md = {}
  918. if admin_password:
  919. extra_md['admin_pass'] = admin_password
  920. inst_md = instance_metadata.InstanceMetadata(
  921. instance, content=files, extra_md=extra_md,
  922. network_info=network_info, request_context=context)
  923. with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
  924. with utils.tempdir() as tmp_path:
  925. tmp_file = os.path.join(tmp_path, 'configdrive')
  926. cdb.make_drive(tmp_file)
  927. # XAPI can only import a VHD file, so convert to vhd format
  928. vhd_file = '%s.vhd' % tmp_file
  929. with compute_utils.disk_ops_semaphore:
  930. processutils.execute('qemu-img', 'convert', '-Ovpc',
  931. tmp_file, vhd_file)
  932. vhd_file_size = os.path.getsize(vhd_file)
  933. with open(vhd_file) as file_obj:
  934. volume_utils.stream_to_vdi(
  935. session, instance, 'vhd', file_obj,
  936. vhd_file_size, vdi_ref)
  937. create_vbd(session, vm_ref, vdi_ref, userdevice, bootable=False,
  938. read_only=True)
  939. except Exception:
  940. with excutils.save_and_reraise_exception():
  941. msg = "Error while generating config drive"
  942. LOG.debug(msg, instance=instance, exc_info=True)
  943. safe_destroy_vdis(session, [vdi_ref])
  944. def _create_kernel_image(context, session, instance, name_label, image_id,
  945. image_type):
  946. """Creates kernel/ramdisk file from the image stored in the cache.
  947. If the image is not present in the cache, fetch it from glance.
  948. Returns: A list of dictionaries that describe VDIs
  949. """
  950. if CONF.xenserver.independent_compute:
  951. raise exception.NotSupportedWithOption(
  952. operation='Non-VHD images',
  953. option='CONF.xenserver.independent_compute')
  954. filename = ""
  955. if CONF.xenserver.cache_images != 'none':
  956. new_image_uuid = uuidutils.generate_uuid()
  957. filename = disk_management.create_kernel_ramdisk(
  958. session, image_id, new_image_uuid)
  959. if filename == "":
  960. return _fetch_disk_image(context, session, instance, name_label,
  961. image_id, image_type)
  962. else:
  963. vdi_type = ImageType.to_string(image_type)
  964. return {vdi_type: dict(uuid=None, file=filename)}
  965. def create_kernel_and_ramdisk(context, session, instance, name_label):
  966. kernel_file = None
  967. ramdisk_file = None
  968. if instance['kernel_id']:
  969. vdis = _create_kernel_image(context, session,
  970. instance, name_label, instance['kernel_id'],
  971. ImageType.KERNEL)
  972. kernel_file = vdis['kernel'].get('file')
  973. if instance['ramdisk_id']:
  974. vdis = _create_kernel_image(context, session,
  975. instance, name_label, instance['ramdisk_id'],
  976. ImageType.RAMDISK)
  977. ramdisk_file = vdis['ramdisk'].get('file')
  978. return kernel_file, ramdisk_file
  979. def destroy_kernel_ramdisk(session, instance, kernel, ramdisk):
  980. if kernel or ramdisk:
  981. LOG.debug("Removing kernel/ramdisk files from dom0",
  982. instance=instance)
  983. disk_management.remove_kernel_ramdisk(
  984. session, kernel_file=kernel, ramdisk_file=ramdisk)
  985. def _get_image_vdi_label(image_id):
  986. return 'Glance Image %s' % image_id
  987. def _create_cached_image(context, session, instance, name_label,
  988. image_id, image_type, image_handler):
  989. sr_ref = safe_find_sr(session)
  990. sr_type = session.call_xenapi('SR.get_type', sr_ref)
  991. if CONF.use_cow_images and sr_type != "ext":
  992. LOG.warning("Fast cloning is only supported on default local SR "
  993. "of type ext. SR on this system was found to be of "
  994. "type %s. Ignoring the cow flag.", sr_type)
  995. @utils.synchronized('xenapi-image-cache' + image_id)
  996. def _create_cached_image_impl(context, session, instance, name_label,
  997. image_id, image_type, sr_ref):
  998. cache_vdi_ref = _find_cached_image(session, image_id, sr_ref)
  999. downloaded = False
  1000. if cache_vdi_ref is None:
  1001. downloaded = True
  1002. vdis = _fetch_image(context, session, instance, name_label,
  1003. image_id, image_type, image_handler)
  1004. cache_vdi_ref = session.call_xenapi(
  1005. 'VDI.get_by_uuid', vdis['root']['uuid'])
  1006. session.call_xenapi('VDI.set_name_label', cache_vdi_ref,
  1007. _get_image_vdi_label(image_id))
  1008. session.call_xenapi('VDI.set_name_description', cache_vdi_ref,
  1009. 'root')
  1010. session.call_xenapi('VDI.add_to_other_config',
  1011. cache_vdi_ref, 'image-id', str(image_id))
  1012. session.call_xenapi('VDI.add_to_other_config',
  1013. cache_vdi_ref,
  1014. 'cached-time',
  1015. str(int(time.time())))
  1016. if CONF.use_cow_images:
  1017. new_vdi_ref = _clone_vdi(session, cache_vdi_ref)
  1018. elif sr_type == 'ext':
  1019. new_vdi_ref = _safe_copy_vdi(session, sr_ref, instance,
  1020. cache_vdi_ref)
  1021. else:
  1022. new_vdi_ref = session.call_xenapi("VDI.copy", cache_vdi_ref,
  1023. sr_ref)
  1024. session.call_xenapi('VDI.set_name_label', new_vdi_ref, '')
  1025. session.call_xenapi('VDI.set_name_description', new_vdi_ref, '')
  1026. session.call_xenapi('VDI.remove_from_other_config',
  1027. new_vdi_ref, 'image-id')
  1028. vdi_uuid = session.call_xenapi('VDI.get_uuid', new_vdi_ref)
  1029. return downloaded, vdi_uuid
  1030. downloaded, vdi_uuid = _create_cached_image_impl(context, session,
  1031. instance, name_label,
  1032. image_id, image_type,
  1033. sr_ref)
  1034. vdis = {}
  1035. vdi_type = ImageType.get_role(image_type)
  1036. vdis[vdi_type] = dict(uuid=vdi_uuid, file=None)
  1037. return downloaded, vdis
  1038. def create_image(context, session, instance, name_label, image_id,
  1039. image_type, image_handler):
  1040. """Creates VDI from the image stored in the local cache. If the image
  1041. is not present in the cache, it streams it from glance.
  1042. Returns: A list of dictionaries that describe VDIs
  1043. """
  1044. cache_images = CONF.xenserver.cache_images.lower()
  1045. # Determine if the image is cacheable
  1046. if image_type == ImageType.DISK_ISO:
  1047. cache = False
  1048. elif cache_images == 'all':
  1049. cache = True
  1050. elif cache_images == 'some':
  1051. sys_meta = utils.instance_sys_meta(instance)
  1052. try:
  1053. cache = strutils.bool_from_string(sys_meta['image_cache_in_nova'])
  1054. except KeyError:
  1055. cache = False
  1056. elif cache_images == 'none':
  1057. cache = False
  1058. else:
  1059. LOG.warning("Unrecognized cache_images value '%s', defaulting to True",
  1060. CONF.xenserver.cache_images)
  1061. cache = True
  1062. # Fetch (and cache) the image
  1063. start_time = timeutils.utcnow()
  1064. if cache:
  1065. downloaded, vdis = _create_cached_image(context, session, instance,
  1066. name_label, image_id,
  1067. image_type, image_handler)
  1068. else:
  1069. vdis = _fetch_image(context, session, instance, name_label,
  1070. image_id, image_type, image_handler)
  1071. downloaded = True
  1072. duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
  1073. LOG.info("Image creation data, cacheable: %(cache)s, "
  1074. "downloaded: %(downloaded)s duration: %(duration).2f secs "
  1075. "for image %(image_id)s",
  1076. {'image_id': image_id, 'cache': cache, 'downloaded': downloaded,
  1077. 'duration': duration})
  1078. for vdi_type, vdi in vdis.items():
  1079. vdi_ref = session.call_xenapi('VDI.get_by_uuid', vdi['uuid'])
  1080. _set_vdi_info(session, vdi_ref, vdi_type, name_label, vdi_type,
  1081. instance)
  1082. return vdis
  1083. def _fetch_image(context, session, instance, name_label, image_id, image_type,
  1084. image_handler):
  1085. """Fetch image from glance based on image type.
  1086. Returns: A single filename if image_type is KERNEL or RAMDISK
  1087. A list of dictionaries that describe VDIs, otherwise
  1088. """
  1089. if image_type == ImageType.DISK_VHD:
  1090. vdis = _fetch_vhd_image(context, session, instance, image_id,
  1091. image_handler)
  1092. else:
  1093. if CONF.xenserver.independent_compute:
  1094. raise exception.NotSupportedWithOption(
  1095. operation='Non-VHD images',
  1096. option='CONF.xenserver.independent_compute')
  1097. vdis = _fetch_disk_image(context, session, instance, name_label,
  1098. image_id, image_type)
  1099. for vdi_type, vdi in vdis.items():
  1100. vdi_uuid = vdi['uuid']
  1101. LOG.debug("Fetched VDIs of type '%(vdi_type)s' with UUID"
  1102. " '%(vdi_uuid)s'",
  1103. {'vdi_type': vdi_type, 'vdi_uuid': vdi_uuid},
  1104. instance=instance)
  1105. return vdis
  1106. def _make_uuid_stack():
  1107. # NOTE(sirp): The XenAPI plugins run under Python 2.4
  1108. # which does not have the `uuid` module. To work around this,
  1109. # we generate the uuids here (under Python 2.6+) and
  1110. # pass them as arguments
  1111. return [uuidutils.generate_uuid() for i in range(MAX_VDI_CHAIN_SIZE)]
  1112. def get_compression_level():
  1113. level = CONF.xenserver.image_compression_level
  1114. if level is not None and (level < 1 or level > 9):
  1115. LOG.warning("Invalid value '%d' for image_compression_level", level)
  1116. return None
  1117. return level
  1118. def _fetch_vhd_image(context, session, instance, image_id, image_handler):
  1119. """Tell glance to download an image and put the VHDs into the SR
  1120. Returns: A list of dictionaries that describe VDIs
  1121. """
  1122. LOG.debug("Asking xapi to fetch vhd image %s", image_id,
  1123. instance=instance)
  1124. vdis = image_handler.download_image(
  1125. context, session, instance, image_id)
  1126. # Ensure we can see the import VHDs as VDIs
  1127. scan_default_sr(session)
  1128. vdi_uuid = vdis['root']['uuid']
  1129. try:
  1130. _check_vdi_size(context, session, instance, vdi_uuid)
  1131. except Exception:
  1132. with excutils.save_and_reraise_exception():
  1133. msg = "Error while checking vdi size"
  1134. LOG.debug(msg, instance=instance, exc_info=True)
  1135. for vdi in vdis.values():
  1136. vdi_uuid = vdi['uuid']
  1137. vdi_ref = session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
  1138. safe_destroy_vdis(session, [vdi_ref])
  1139. return vdis
  1140. def _get_vdi_chain_size(session, vdi_uuid):
  1141. """Compute the total size of a VDI chain, starting with the specified
  1142. VDI UUID.
  1143. This will walk the VDI chain to the root, add the size of each VDI into
  1144. the total.
  1145. """
  1146. size_bytes = 0
  1147. for vdi_rec in _walk_vdi_chain(session, vdi_uuid):
  1148. cur_vdi_uuid = vdi_rec['uuid']
  1149. vdi_size_bytes = int(vdi_rec['physical_utilisation'])
  1150. LOG.debug('vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes='
  1151. '%(vdi_size_bytes)d',
  1152. {'cur_vdi_uuid': cur_vdi_uuid,
  1153. 'vdi_size_bytes': vdi_size_bytes})
  1154. size_bytes += vdi_size_bytes
  1155. return size_bytes
  1156. def _check_vdi_size(context, session, instance, vdi_uuid):
  1157. flavor = instance.get_flavor()
  1158. allowed_size = (flavor.root_gb +
  1159. VHD_SIZE_CHECK_FUDGE_FACTOR_GB) * units.Gi
  1160. if not flavor.root_gb:
  1161. # root_gb=0 indicates that we're disabling size checks
  1162. return
  1163. size = _get_vdi_chain_size(session, vdi_uuid)
  1164. if size > allowed_size:
  1165. LOG.error("Image size %(size)d exceeded flavor "
  1166. "allowed size %(allowed_size)d",
  1167. {'size': size, 'allowed_size': allowed_size},
  1168. instance=instance)
  1169. raise exception.FlavorDiskSmallerThanImage(
  1170. flavor_size=(flavor.root_gb * units.Gi),
  1171. image_size=(size * units.Gi))
  1172. def _fetch_disk_image(context, session, instance, name_label, image_id,
  1173. image_type):
  1174. """Fetch the image from Glance
  1175. NOTE:
  1176. Unlike _fetch_vhd_image, this method does not use the Glance
  1177. plugin; instead, it streams the disks through domU to the VDI
  1178. directly.
  1179. Returns: A single filename if image_type is KERNEL_RAMDISK
  1180. A list of dictionaries that describe VDIs, otherwise
  1181. """
  1182. # FIXME(sirp): Since the Glance plugin seems to be required for the
  1183. # VHD disk, it may be worth using the plugin for both VHD and RAW and
  1184. # DISK restores
  1185. image_type_str = ImageType.to_string(image_type)
  1186. LOG.debug("Fetching image %(image_id)s, type %(image_type_str)s",
  1187. {'image_id': image_id, 'image_type_str': image_type_str},
  1188. instance=instance)
  1189. if image_type == ImageType.DISK_ISO:
  1190. sr_ref = _safe_find_iso_sr(session)
  1191. else:
  1192. sr_ref = safe_find_sr(session)
  1193. glance_image = image_utils.GlanceImage(context, image_id)
  1194. if glance_image.is_raw_tgz():
  1195. image = image_utils.RawTGZImage(glance_image)
  1196. else:
  1197. image = image_utils.RawImage(glance_image)
  1198. virtual_size = image.get_size()
  1199. vdi_size = virtual_size
  1200. LOG.debug("Size for image %(image_id)s: %(virtual_size)d",
  1201. {'image_id': image_id, 'virtual_size': virtual_size},
  1202. instance=instance)
  1203. if image_type == ImageType.DISK:
  1204. # Make room for MBR.
  1205. vdi_size += MBR_SIZE_BYTES
  1206. elif (image_type in (ImageType.KERNEL, ImageType.RAMDISK) and
  1207. vdi_size > CONF.xenserver.max_kernel_ramdisk_size):
  1208. max_size = CONF.xenserver.max_kernel_ramdisk_size
  1209. raise exception.NovaException(
  1210. _("Kernel/Ramdisk image is too large: %(vdi_size)d bytes, "
  1211. "max %(max_size)d bytes") %
  1212. {'vdi_size': vdi_size, 'max_size': max_size})
  1213. vdi_ref = create_vdi(session, sr_ref, instance, name_label,
  1214. image_type_str, vdi_size)
  1215. # From this point we have a VDI on Xen host;
  1216. # If anything goes wrong, we need to remember its uuid.
  1217. try:
  1218. filename = None
  1219. vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
  1220. with vdi_attached(session, vdi_ref, read_only=False) as dev:
  1221. _stream_disk(
  1222. session, image.stream_to, image_type, virtual_size, dev)
  1223. if image_type in (ImageType.KERNEL, ImageType.RAMDISK):
  1224. # We need to invoke a plugin for copying the
  1225. # content of the VDI into the proper path.
  1226. LOG.debug("Copying VDI %s to /boot/guest on dom0",
  1227. vdi_ref, instance=instance)
  1228. cache_image = None
  1229. if CONF.xenserver.cache_images != 'none':
  1230. cache_image = image_id
  1231. filename = disk_management.copy_vdi(session, vdi_ref, vdi_size,
  1232. image_id=cache_image)
  1233. # Remove the VDI as it is not needed anymore.
  1234. destroy_vdi(session, vdi_ref)
  1235. LOG.debug("Kernel/Ramdisk VDI %s destroyed", vdi_ref,
  1236. instance=instance)
  1237. vdi_role = ImageType.get_role(image_type)
  1238. return {vdi_role: dict(uuid=None, file=filename)}
  1239. else:
  1240. vdi_role = ImageType.get_role(image_type)
  1241. return {vdi_role: dict(uuid=vdi_uuid, file=None)}
  1242. except (session.XenAPI.Failure, IOError, OSError) as e:
  1243. # We look for XenAPI and OS failures.
  1244. LOG.exception(_("Failed to fetch glance image"), instance=instance)
  1245. e.args = e.args + ([dict(type=ImageType.to_string(image_type),
  1246. uuid=vdi_uuid,
  1247. file=filename)],)
  1248. raise
  1249. def determine_disk_image_type(image_meta):
  1250. """Disk Image Types are used to determine where the kernel will reside
  1251. within an image. To figure out which type we're dealing with, we use
  1252. the following rules:
  1253. 1. If we're using Glance, we can use the image_type field to
  1254. determine the image_type
  1255. 2. If we're not using Glance, then we need to deduce this based on
  1256. whether a kernel_id is specified.
  1257. """
  1258. if not image_meta.obj_attr_is_set("disk_format"):
  1259. return None
  1260. disk_format_map = {
  1261. 'ami': ImageType.DISK,
  1262. 'aki': ImageType.KERNEL,
  1263. 'ari': ImageType.RAMDISK,
  1264. 'raw': ImageType.DISK_RAW,
  1265. 'vhd': ImageType.DISK_VHD,
  1266. 'iso': ImageType.DISK_ISO,
  1267. }
  1268. try:
  1269. image_type = disk_format_map[image_meta.disk_format]
  1270. except KeyError:
  1271. raise exception.InvalidDiskFormat(disk_format=image_meta.disk_format)
  1272. LOG.debug("Detected %(type)s format for image %(image)s",
  1273. {'type': ImageType.to_string(image_type),
  1274. 'image': image_meta})
  1275. return image_type
  1276. def determine_vm_mode(instance, disk_image_type):
  1277. current_mode = obj_fields.VMMode.get_from_instance(instance)
  1278. if (current_mode == obj_fields.VMMode.XEN or
  1279. current_mode == obj_fields.VMMode.HVM):
  1280. return current_mode
  1281. os_type = instance['os_type']
  1282. if os_type == "linux":
  1283. return obj_fields.VMMode.XEN
  1284. if os_type == "windows":
  1285. return obj_fields.VMMode.HVM
  1286. # disk_image_type specific default for backwards compatibility
  1287. if disk_image_type == ImageType.DISK_VHD or \
  1288. disk_image_type == ImageType.DISK:
  1289. return obj_fields.VMMode.XEN
  1290. # most images run OK as HVM
  1291. return obj_fields.VMMode.HVM
  1292. def set_vm_name_label(session, vm_ref, name_label):
  1293. session.call_xenapi("VM.set_name_label", vm_ref, name_label)
  1294. def list_vms(session):
  1295. vms = session.call_xenapi("VM.get_all_records_where",
  1296. 'field "is_control_domain"="false" and '
  1297. 'field "is_a_template"="false" and '
  1298. 'field "resident_on"="%s"' % session.host_ref)
  1299. for vm_ref in vms.keys():
  1300. yield vm_ref, vms[vm_ref]
  1301. def lookup_vm_vdis(session, vm_ref):
  1302. """Look for the VDIs that are attached to the VM."""
  1303. # Firstly we get the VBDs, then the VDIs.
  1304. # TODO(Armando): do we leave the read-only devices?
  1305. vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
  1306. vdi_refs = []
  1307. if vbd_refs:
  1308. for vbd_ref in vbd_refs:
  1309. try:
  1310. vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
  1311. # Test valid VDI
  1312. vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
  1313. LOG.debug('VDI %s is still available', vdi_uuid)
  1314. vbd_other_config = session.call_xenapi("VBD.get_other_config",
  1315. vbd_ref)
  1316. if not vbd_other_config.get('osvol'):
  1317. # This is not an attached volume
  1318. vdi_refs.append(vdi_ref)
  1319. except session.XenAPI.Failure:
  1320. LOG.exception(_('"Look for the VDIs failed'))
  1321. return vdi_refs
  1322. def lookup(session, name_label, check_rescue=False):
  1323. """Look the instance up and return it if available.
  1324. :param:check_rescue: if True will return the 'name'-rescue vm if it
  1325. exists, instead of just 'name'
  1326. """
  1327. if check_rescue:
  1328. result = lookup(session, name_label + '-rescue', False)
  1329. if result:
  1330. return result
  1331. vm_refs = session.call_xenapi("VM.get_by_name_label", name_label)
  1332. n = len(vm_refs)
  1333. if n == 0:
  1334. return None
  1335. elif n > 1:
  1336. raise exception.InstanceExists(name=name_label)
  1337. else:
  1338. return vm_refs[0]
  1339. def preconfigure_instance(session, instance, vdi_ref, network_info):
  1340. """Makes alterations to the image before launching as part of spawn.
  1341. """
  1342. key = str(instance['key_data'])
  1343. net = netutils.get_injected_network_template(network_info)
  1344. metadata = instance['metadata']
  1345. # As mounting the image VDI is expensive, we only want do it once,
  1346. # if at all, so determine whether it's required first, and then do
  1347. # everything
  1348. mount_required = key or net or metadata
  1349. if not mount_required:
  1350. return
  1351. with vdi_attached(session, vdi_ref, read_only=False) as dev:
  1352. _mounted_processing(dev, key, net, metadata)
  1353. def lookup_kernel_ramdisk(session, vm):
  1354. vm_rec = session.call_xenapi("VM.get_record", vm)
  1355. if 'PV_kernel' in vm_rec and 'PV_ramdisk' in vm_rec:
  1356. return (vm_rec['PV_kernel'], vm_rec['PV_ramdisk'])
  1357. else:
  1358. return (None, None)
  1359. def is_snapshot(session, vm):
  1360. vm_rec = session.call_xenapi("VM.get_record", vm)
  1361. if 'is_a_template' in vm_rec and 'is_a_snapshot' in vm_rec:
  1362. return vm_rec['is_a_template'] and vm_rec['is_a_snapshot']
  1363. else:
  1364. return False
  1365. def get_power_state(session, vm_ref):
  1366. xapi_state = session.call_xenapi("VM.get_power_state", vm_ref)
  1367. return XENAPI_POWER_STATE[xapi_state]
  1368. def _vm_query_data_source(session, *args):
  1369. """We're getting diagnostics stats from the RRDs which are updated every
  1370. 5 seconds. It means that diagnostics information may be incomplete during
  1371. first 5 seconds of VM life. In such cases method ``query_data_source()``
  1372. may raise a ``XenAPI.Failure`` exception or may return a `NaN` value.
  1373. """
  1374. try:
  1375. value = session.VM.query_data_source(*args)
  1376. except session.XenAPI.Failure:
  1377. return None
  1378. if math.isnan(value):
  1379. return None
  1380. return value
  1381. def compile_info(session, vm_ref):
  1382. """Fill record with VM status information."""
  1383. return hardware.InstanceInfo(state=get_power_state(session, vm_ref))
  1384. def compile_instance_diagnostics(session, instance, vm_ref):
  1385. xen_power_state = session.VM.get_power_state(vm_ref)
  1386. vm_power_state = power_state.STATE_MAP[XENAPI_POWER_STATE[xen_power_state]]
  1387. config_drive = configdrive.required_by(instance)
  1388. diags = diagnostics.Diagnostics(state=vm_power_state,
  1389. driver='xenapi',
  1390. config_drive=config_drive)
  1391. _add_cpu_usage(session, vm_ref, diags)
  1392. _add_nic_usage(session, vm_ref, diags)
  1393. _add_disk_usage(session, vm_ref, diags)
  1394. _add_memory_usage(session, vm_ref, diags)
  1395. return diags
  1396. def _add_cpu_usage(session, vm_ref, diag_obj):
  1397. cpu_num = int(session.VM.get_VCPUs_max(vm_ref))
  1398. for cpu_num in range(0, cpu_num):
  1399. utilisation = _vm_query_data_source(session, vm_ref, "cpu%d" % cpu_num)
  1400. if utilisation is not None:
  1401. utilisation *= 100
  1402. diag_obj.add_cpu(id=cpu_num, utilisation=utilisation)
  1403. def _add_nic_usage(session, vm_ref, diag_obj):
  1404. vif_refs = session.VM.get_VIFs(vm_ref)
  1405. for vif_ref in vif_refs:
  1406. vif_rec = session.VIF.get_record(vif_ref)
  1407. rx_rate = _vm_query_data_source(session, vm_ref,
  1408. "vif_%s_rx" % vif_rec['device'])
  1409. tx_rate = _vm_query_data_source(session, vm_ref,
  1410. "vif_%s_tx" % vif_rec['device'])
  1411. diag_obj.add_nic(mac_address=vif_rec['MAC'],
  1412. rx_rate=rx_rate,
  1413. tx_rate=tx_rate)
  1414. def _add_disk_usage(session, vm_ref, diag_obj):
  1415. vbd_refs = session.VM.get_VBDs(vm_ref)
  1416. for vbd_ref in vbd_refs:
  1417. vbd_rec = session.VBD.get_record(vbd_ref)
  1418. read_bytes = _vm_query_data_source(session, vm_ref,
  1419. "vbd_%s_read" % vbd_rec['device'])
  1420. write_bytes = _vm_query_data_source(session, vm_ref,
  1421. "vbd_%s_write" % vbd_rec['device'])
  1422. diag_obj.add_disk(read_bytes=read_bytes, write_bytes=write_bytes)
  1423. def _add_memory_usage(session, vm_ref, diag_obj):
  1424. total_mem = _vm_query_data_source(session, vm_ref, "memory")
  1425. free_mem = _vm_query_data_source(session, vm_ref, "memory_internal_free")
  1426. used_mem = None
  1427. if total_mem is not None:
  1428. # total_mem provided from XenServer is in Bytes. Converting it to MB.
  1429. total_mem /= units.Mi
  1430. if free_mem is not None:
  1431. # free_mem provided from XenServer is in KB. Converting it to MB.
  1432. used_mem = total_mem - free_mem / units.Ki
  1433. diag_obj.memory_details = diagnostics.MemoryDiagnostics(
  1434. maximum=total_mem, used=used_mem)
  1435. def compile_diagnostics(vm_rec):
  1436. """Compile VM diagnostics data."""
  1437. try:
  1438. keys = []
  1439. diags = {}
  1440. vm_uuid = vm_rec["uuid"]
  1441. xml = _get_rrd(_get_rrd_server(), vm_uuid)
  1442. if xml:
  1443. rrd = minidom.parseString(xml)
  1444. for i, node in enumerate(rrd.firstChild.childNodes):
  1445. # Provide the last update of the information
  1446. if node.localName == 'lastupdate':
  1447. diags['last_update'] = node.firstChild.data
  1448. # Create a list of the diagnostic keys (in their order)
  1449. if node.localName == 'ds':
  1450. ref = node.childNodes
  1451. # Name and Value
  1452. if len(ref) > 6:
  1453. keys.append(ref[0].firstChild.data)
  1454. # Read the last row of the first RRA to get the latest info
  1455. if node.localName == 'rra':
  1456. rows = node.childNodes[4].childNodes
  1457. last_row = rows[rows.length - 1].childNodes
  1458. for j, value in enumerate(last_row):
  1459. diags[keys[j]] = value.firstChild.data
  1460. break
  1461. return diags
  1462. except expat.ExpatError as e:
  1463. LOG.exception(_('Unable to parse rrd of %s'), e)
  1464. return {"Unable to retrieve diagnostics": e}
  1465. def fetch_bandwidth(session):
  1466. bw = host_network.fetch_all_bandwidth(session)
  1467. return bw
  1468. def _scan_sr(session, sr_ref=None, max_attempts=4):
  1469. if sr_ref:
  1470. # NOTE(johngarbutt) xenapi will collapse any duplicate requests
  1471. # for SR.scan if there is already a scan in progress.
  1472. # However, we don't want that, because the scan may have started
  1473. # before we modified the underlying VHDs on disk through a plugin.
  1474. # Using our own mutex will reduce cases where our periodic SR scan
  1475. # in host.update_status starts racing the sr.scan after a plugin call.
  1476. @utils.synchronized('sr-scan-' + sr_ref)
  1477. def do_scan(sr_ref):
  1478. LOG.debug("Scanning SR %s", sr_ref)
  1479. attempt = 1
  1480. while True:
  1481. try:
  1482. return session.call_xenapi('SR.scan', sr_ref)
  1483. except session.XenAPI.Failure as exc:
  1484. with excutils.save_and_reraise_exception() as ctxt:
  1485. if exc.details[0] == 'SR_BACKEND_FAILURE_40':
  1486. if attempt < max_attempts:
  1487. ctxt.reraise = False
  1488. LOG.warning("Retry SR scan due to error: %s",
  1489. exc)
  1490. greenthread.sleep(2 ** attempt)
  1491. attempt += 1
  1492. do_scan(sr_ref)
  1493. def scan_default_sr(session):
  1494. """Looks for the system default SR and triggers a re-scan."""
  1495. sr_ref = safe_find_sr(session)
  1496. _scan_sr(session, sr_ref)
  1497. return sr_ref
  1498. def safe_find_sr(session):
  1499. """Same as _find_sr except raises a NotFound exception if SR cannot be
  1500. determined
  1501. """
  1502. sr_ref = _find_sr(session)
  1503. if sr_ref is None:
  1504. raise exception.StorageRepositoryNotFound()
  1505. return sr_ref
  1506. def _find_sr(session):
  1507. """Return the storage repository to hold VM images."""
  1508. host = session.host_ref
  1509. try:
  1510. tokens = CONF.xenserver.sr_matching_filter.split(':')
  1511. filter_criteria = tokens[0]
  1512. filter_pattern = tokens[1]
  1513. except IndexError:
  1514. # oops, flag is invalid
  1515. LOG.warning("Flag sr_matching_filter '%s' does not respect "
  1516. "formatting convention",
  1517. CONF.xenserver.sr_matching_filter)
  1518. return None
  1519. if filter_criteria == 'other-config':
  1520. key, value = filter_pattern.split('=', 1)
  1521. for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'):
  1522. if not (key in sr_rec['other_config'] and
  1523. sr_rec['other_config'][key] == value):
  1524. continue
  1525. for pbd_ref in sr_rec['PBDs']:
  1526. pbd_rec = session.get_rec('PBD', pbd_ref)
  1527. if pbd_rec and pbd_rec['host'] == host:
  1528. return sr_ref
  1529. elif filter_criteria == 'default-sr' and filter_pattern == 'true':
  1530. pool_ref = session.call_xenapi('pool.get_all')[0]
  1531. sr_ref = session.call_xenapi('pool.get_default_SR', pool_ref)
  1532. if sr_ref:
  1533. return sr_ref
  1534. # No SR found!
  1535. LOG.error("XenAPI is unable to find a Storage Repository to "
  1536. "install guest instances on. Please check your "
  1537. "configuration (e.g. set a default SR for the pool) "
  1538. "and/or configure the flag 'sr_matching_filter'.")
  1539. return None
  1540. def _safe_find_iso_sr(session):
  1541. """Same as _find_iso_sr except raises a NotFound exception if SR
  1542. cannot be determined
  1543. """
  1544. sr_ref = _find_iso_sr(session)
  1545. if sr_ref is None:
  1546. raise exception.NotFound(_('Cannot find SR of content-type ISO'))
  1547. return sr_ref
  1548. def _find_iso_sr(session):
  1549. """Return the storage repository to hold ISO images."""
  1550. host = session.host_ref
  1551. for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'):
  1552. LOG.debug("ISO: looking at SR %s", sr_rec)
  1553. if not sr_rec['content_type'] == 'iso':
  1554. LOG.debug("ISO: not iso content")
  1555. continue
  1556. if 'i18n-key' not in sr_rec['other_config']:
  1557. LOG.debug("ISO: iso content_type, no 'i18n-key' key")
  1558. continue
  1559. if not sr_rec['other_config']['i18n-key'] == 'local-storage-iso':
  1560. LOG.debug("ISO: iso content_type, i18n-key value not "
  1561. "'local-storage-iso'")
  1562. continue
  1563. LOG.debug("ISO: SR MATCHing our criteria")
  1564. for pbd_ref in sr_rec['PBDs']:
  1565. LOG.debug("ISO: ISO, looking to see if it is host local")
  1566. pbd_rec = session.get_rec('PBD', pbd_ref)
  1567. if not pbd_rec:
  1568. LOG.debug("ISO: PBD %s disappeared", pbd_ref)
  1569. continue
  1570. pbd_rec_host = pbd_rec['host']
  1571. LOG.debug("ISO: PBD matching, want %(pbd_rec)s, have %(host)s",
  1572. {'pbd_rec': pbd_rec, 'host': host})
  1573. if pbd_rec_host == host:
  1574. LOG.debug("ISO: SR with local PBD")
  1575. return sr_ref
  1576. return None
  1577. def _get_rrd_server():
  1578. """Return server's scheme and address to use for retrieving RRD XMLs."""
  1579. xs_url = urlparse.urlparse(CONF.xenserver.connection_url)
  1580. return [xs_url.scheme, xs_url.netloc]
  1581. def _get_rrd(server, vm_uuid):
  1582. """Return the VM RRD XML as a string."""
  1583. try:
  1584. xml = urlrequest.urlopen("%s://%s:%s@%s/vm_rrd?uuid=%s" % (
  1585. server[0],
  1586. CONF.xenserver.connection_username,
  1587. CONF.xenserver.connection_password,
  1588. server[1],
  1589. vm_uuid))
  1590. return xml.read()
  1591. except IOError:
  1592. LOG.exception(_('Unable to obtain RRD XML for VM %(vm_uuid)s with '
  1593. 'server details: %(server)s.'),
  1594. {'vm_uuid': vm_uuid, 'server': server})
  1595. return None
  1596. def _get_all_vdis_in_sr(session, sr_ref):
  1597. for vdi_ref in session.call_xenapi('SR.get_VDIs', sr_ref):
  1598. vdi_rec = session.get_rec('VDI', vdi_ref)
  1599. # Check to make sure the record still exists. It may have
  1600. # been deleted between the get_all call and get_rec call
  1601. if vdi_rec:
  1602. yield vdi_ref, vdi_rec
  1603. def get_instance_vdis_for_sr(session, vm_ref, sr_ref):
  1604. """Return opaqueRef for all the vdis which live on sr."""
  1605. for vbd_ref in session.call_xenapi('VM.get_VBDs', vm_ref):
  1606. try:
  1607. vdi_ref = session.call_xenapi('VBD.get_VDI', vbd_ref)
  1608. if sr_ref == session.call_xenapi('VDI.get_SR', vdi_ref):
  1609. yield vdi_ref
  1610. except session.XenAPI.Failure:
  1611. continue
  1612. def _get_vhd_parent_uuid(session, vdi_ref, vdi_rec=None):
  1613. if vdi_rec is None:
  1614. vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
  1615. if 'vhd-parent' not in vdi_rec['sm_config']:
  1616. return None
  1617. parent_uuid = vdi_rec['sm_config']['vhd-parent']
  1618. vdi_uuid = vdi_rec['uuid']
  1619. LOG.debug('VHD %(vdi_uuid)s has parent %(parent_uuid)s',
  1620. {'vdi_uuid': vdi_uuid, 'parent_uuid': parent_uuid})
  1621. return parent_uuid
  1622. def _walk_vdi_chain(session, vdi_uuid):
  1623. """Yield vdi_recs for each element in a VDI chain."""
  1624. scan_default_sr(session)
  1625. while True:
  1626. vdi_ref = session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
  1627. vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
  1628. yield vdi_rec
  1629. parent_uuid = _get_vhd_parent_uuid(session, vdi_ref, vdi_rec)
  1630. if not parent_uuid:
  1631. break
  1632. vdi_uuid = parent_uuid
  1633. def _is_vdi_a_snapshot(vdi_rec):
  1634. """Ensure VDI is a snapshot, and not cached image."""
  1635. is_a_snapshot = vdi_rec['is_a_snapshot']
  1636. image_id = vdi_rec['other_config'].get('image-id')
  1637. return is_a_snapshot and not image_id
  1638. def _child_vhds(session, sr_ref, vdi_uuid_list, old_snapshots_only=False):
  1639. """Return the immediate children of a given VHD.
  1640. This is not recursive, only the immediate children are returned.
  1641. """
  1642. children = set()
  1643. for ref, rec in _get_all_vdis_in_sr(session, sr_ref):
  1644. rec_uuid = rec['uuid']
  1645. if rec_uuid in vdi_uuid_list:
  1646. continue
  1647. parent_uuid = _get_vhd_parent_uuid(session, ref, rec)
  1648. if parent_uuid not in vdi_uuid_list:
  1649. continue
  1650. if old_snapshots_only and not _is_vdi_a_snapshot(rec):
  1651. continue
  1652. children.add(rec_uuid)
  1653. return list(children)
  1654. def _count_children(session, parent_vdi_uuid, sr_ref):
  1655. # Search for any other vdi which has the same parent as us to work out
  1656. # whether we have siblings and therefore if coalesce is possible
  1657. children = 0
  1658. for _ref, rec in _get_all_vdis_in_sr(session, sr_ref):
  1659. if (rec['sm_config'].get('vhd-parent') == parent_vdi_uuid):
  1660. children = children + 1
  1661. return children
  1662. def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref,
  1663. vdi_uuid_list):
  1664. """Spin until the parent VHD is coalesced into one of the VDIs in the list
  1665. vdi_uuid_list is a list of acceptable final parent VDIs for vdi_ref; once
  1666. the parent of vdi_ref is in vdi_uuid_chain we consider the coalesce over.
  1667. The use case is there are any number of VDIs between those in
  1668. vdi_uuid_list and vdi_ref that we expect to be coalesced, but any of those
  1669. in vdi_uuid_list may also be coalesced (except the base UUID - which is
  1670. guaranteed to remain)
  1671. """
  1672. # If the base disk was a leaf node, there will be no coalescing
  1673. # after a VDI snapshot.
  1674. if len(vdi_uuid_list) == 1:
  1675. LOG.debug("Old chain is single VHD, coalesce not possible.",
  1676. instance=instance)
  1677. return
  1678. # If the parent of the original disk has other children,
  1679. # there will be no coalesce because of the VDI snapshot.
  1680. # For example, the first snapshot for an instance that has been
  1681. # spawned from a cached image, will not coalesce, because of this rule.
  1682. parent_vdi_uuid = vdi_uuid_list[1]
  1683. if _count_children(session, parent_vdi_uuid, sr_ref) > 1:
  1684. LOG.debug("Parent has other children, coalesce is unlikely.",
  1685. instance=instance)
  1686. return
  1687. # When the VDI snapshot is taken, a new parent is created.
  1688. # Assuming it is not one of the above cases, that new parent
  1689. # can be coalesced, so we need to wait for that to happen.
  1690. max_attempts = CONF.xenserver.vhd_coalesce_max_attempts
  1691. # Remove the leaf node from list, to get possible good parents
  1692. # when the coalesce has completed.
  1693. # Its possible that other coalesce operation happen, so we need
  1694. # to consider the full chain, rather than just the most recent parent.
  1695. good_parent_uuids = vdi_uuid_list[1:]
  1696. for i in range(max_attempts):
  1697. # NOTE(sirp): This rescan is necessary to ensure the VM's `sm_config`
  1698. # matches the underlying VHDs.
  1699. # This can also kick XenServer into performing a pending coalesce.
  1700. _scan_sr(session, sr_ref)
  1701. parent_uuid = _get_vhd_parent_uuid(session, vdi_ref)
  1702. if parent_uuid and (parent_uuid not in good_parent_uuids):
  1703. LOG.debug("Parent %(parent_uuid)s not yet in parent list"
  1704. " %(good_parent_uuids)s, waiting for coalesce...",
  1705. {'parent_uuid': parent_uuid,
  1706. 'good_parent_uuids': good_parent_uuids},
  1707. instance=instance)
  1708. else:
  1709. LOG.debug("Coalesce detected, because parent is: %s", parent_uuid,
  1710. instance=instance)
  1711. return
  1712. greenthread.sleep(CONF.xenserver.vhd_coalesce_poll_interval)
  1713. msg = (_("VHD coalesce attempts exceeded (%d)"
  1714. ", giving up...") % max_attempts)
  1715. raise exception.NovaException(msg)
  1716. def _wait_for_device(session, dev, dom0, max_seconds):
  1717. """Wait for device node to appear."""
  1718. dev_path = utils.make_dev_path(dev)
  1719. found_path = None
  1720. if dom0:
  1721. found_path = disk_management.wait_for_dev(session, dev_path,
  1722. max_seconds)
  1723. else:
  1724. for i in range(0, max_seconds):
  1725. if os.path.exists(dev_path):
  1726. found_path = dev_path
  1727. break
  1728. time.sleep(1)
  1729. if found_path is None:
  1730. raise exception.StorageError(
  1731. reason=_('Timeout waiting for device %s to be created') % dev)
  1732. def cleanup_attached_vdis(session):
  1733. """Unplug any instance VDIs left after an unclean restart."""
  1734. this_vm_ref = _get_this_vm_ref(session)
  1735. vbd_refs = session.call_xenapi('VM.get_VBDs', this_vm_ref)
  1736. for vbd_ref in vbd_refs:
  1737. try:
  1738. vdi_ref = session.call_xenapi('VBD.get_VDI', vbd_ref)
  1739. vdi_rec = session.call_xenapi('VDI.get_record', vdi_ref)
  1740. except session.XenAPI.Failure as e:
  1741. if e.details[0] != 'HANDLE_INVALID':
  1742. raise
  1743. continue
  1744. if 'nova_instance_uuid' in vdi_rec['other_config']:
  1745. # Belongs to an instance and probably left over after an
  1746. # unclean restart
  1747. LOG.info('Disconnecting stale VDI %s from compute domU',
  1748. vdi_rec['uuid'])
  1749. unplug_vbd(session, vbd_ref, this_vm_ref)
  1750. destroy_vbd(session, vbd_ref)
  1751. @contextlib.contextmanager
  1752. def vdi_attached(session, vdi_ref, read_only=False, dom0=False):
  1753. if dom0:
  1754. this_vm_ref = _get_dom0_ref(session)
  1755. else:
  1756. # Make sure we are running as a domU.
  1757. ensure_correct_host(session)
  1758. this_vm_ref = _get_this_vm_ref(session)
  1759. vbd_ref = create_vbd(session, this_vm_ref, vdi_ref, 'autodetect',
  1760. read_only=read_only, bootable=False)
  1761. try:
  1762. LOG.debug('Plugging VBD %s ... ', vbd_ref)
  1763. session.VBD.plug(vbd_ref, this_vm_ref)
  1764. try:
  1765. LOG.debug('Plugging VBD %s done.', vbd_ref)
  1766. dev = session.call_xenapi("VBD.get_device", vbd_ref)
  1767. LOG.debug('VBD %(vbd_ref)s plugged as %(dev)s',
  1768. {'vbd_ref': vbd_ref, 'dev': dev})
  1769. _wait_for_device(session, dev, dom0,
  1770. CONF.xenserver.block_device_creation_timeout)
  1771. yield dev
  1772. finally:
  1773. # As we can not have filesystems mounted here (we cannot
  1774. # destroy the VBD with filesystems mounted), it is not
  1775. # useful to call sync.
  1776. LOG.debug('Destroying VBD for VDI %s ... ', vdi_ref)
  1777. unplug_vbd(session, vbd_ref, this_vm_ref)
  1778. finally:
  1779. try:
  1780. destroy_vbd(session, vbd_ref)
  1781. except exception.StorageError:
  1782. # destroy_vbd() will log error
  1783. pass
  1784. LOG.debug('Destroying VBD for VDI %s done.', vdi_ref)
  1785. def _get_sys_hypervisor_uuid():
  1786. with open('/sys/hypervisor/uuid') as f:
  1787. return f.readline().strip()
  1788. def _get_dom0_ref(session):
  1789. vms = session.call_xenapi("VM.get_all_records_where",
  1790. 'field "domid"="0" and '
  1791. 'field "resident_on"="%s"' %
  1792. session.host_ref)
  1793. return list(vms.keys())[0]
  1794. def get_this_vm_uuid(session):
  1795. if CONF.xenserver.independent_compute:
  1796. LOG.error("This host has been configured with the independent "
  1797. "compute flag. An operation has been attempted which is "
  1798. "incompatible with this flag, but should have been "
  1799. "caught earlier. Please raise a bug against the "
  1800. "OpenStack Nova project")
  1801. raise exception.NotSupportedWithOption(
  1802. operation='uncaught operation',
  1803. option='CONF.xenserver.independent_compute')
  1804. if session and session.is_local_connection:
  1805. # UUID is the control domain running on this host
  1806. vms = session.call_xenapi("VM.get_all_records_where",
  1807. 'field "domid"="0" and '
  1808. 'field "resident_on"="%s"' %
  1809. session.host_ref)
  1810. return vms[list(vms.keys())[0]]['uuid']
  1811. try:
  1812. return _get_sys_hypervisor_uuid()
  1813. except IOError:
  1814. # Some guest kernels (without 5c13f8067745efc15f6ad0158b58d57c44104c25)
  1815. # cannot read from uuid after a reboot. Fall back to trying xenstore.
  1816. # See https://bugs.launchpad.net/ubuntu/+source/xen-api/+bug/1081182
  1817. domid, _ = nova.privsep.xenapi.xenstore_read('domid')
  1818. vm_key, _ = nova.privsep.xenapi.xenstore_read(
  1819. '/local/domain/%s/vm' % domid.strip())
  1820. return vm_key.strip()[4:]
  1821. def _get_this_vm_ref(session):
  1822. return session.call_xenapi("VM.get_by_uuid", get_this_vm_uuid(session))
  1823. def _get_partitions(dev):
  1824. return nova.privsep.fs.list_partitions(utils.make_dev_path(dev))
  1825. def _stream_disk(session, image_service_func, image_type, virtual_size, dev):
  1826. offset = 0
  1827. if image_type == ImageType.DISK:
  1828. offset = MBR_SIZE_BYTES
  1829. _write_partition(session, virtual_size, dev)
  1830. dev_path = utils.make_dev_path(dev)
  1831. with utils.temporary_chown(dev_path):
  1832. with open(dev_path, 'wb') as f:
  1833. f.seek(offset)
  1834. image_service_func(f)
  1835. def _write_partition(session, virtual_size, dev):
  1836. dev_path = utils.make_dev_path(dev)
  1837. primary_first = MBR_SIZE_SECTORS
  1838. primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1
  1839. LOG.debug('Writing partition table %(primary_first)d %(primary_last)d'
  1840. ' to %(dev_path)s...',
  1841. {'primary_first': primary_first, 'primary_last': primary_last,
  1842. 'dev_path': dev_path})
  1843. _make_partition(session, dev, "%ds" % primary_first, "%ds" % primary_last)
  1844. LOG.debug('Writing partition table %s done.', dev_path)
  1845. def _resize_part_and_fs(dev, start, old_sectors, new_sectors, flags):
  1846. """Resize partition and fileystem.
  1847. This assumes we are dealing with a single primary partition and using
  1848. ext3 or ext4.
  1849. """
  1850. size = new_sectors - start
  1851. end = new_sectors - 1
  1852. dev_path = utils.make_dev_path(dev)
  1853. partition_path = utils.make_dev_path(dev, partition=1)
  1854. # Replay journal if FS wasn't cleanly unmounted
  1855. nova.privsep.fs.e2fsck(partition_path)
  1856. # Remove ext3 journal (making it ext2)
  1857. nova.privsep.fs.ext_journal_disable(partition_path)
  1858. if new_sectors < old_sectors:
  1859. # Resizing down, resize filesystem before partition resize
  1860. try:
  1861. nova.privsep.fs.resize2fs(partition_path, [0], size='%ds' % size)
  1862. except processutils.ProcessExecutionError as exc:
  1863. LOG.error(six.text_type(exc))
  1864. reason = _("Shrinking the filesystem down with resize2fs "
  1865. "has failed, please check if you have "
  1866. "enough free space on your disk.")
  1867. raise exception.ResizeError(reason=reason)
  1868. nova.privsep.fs.resize_partition(dev_path, start, end,
  1869. 'boot' in flags.lower())
  1870. if new_sectors > old_sectors:
  1871. # Resizing up, resize filesystem after partition resize
  1872. nova.privsep.fs.resize2fs(partition_path, [0])
  1873. # Add back journal
  1874. nova.privsep.fs.ext_journal_enable(partition_path)
  1875. def _log_progress_if_required(left, last_log_time, virtual_size):
  1876. if timeutils.is_older_than(last_log_time, PROGRESS_INTERVAL_SECONDS):
  1877. last_log_time = timeutils.utcnow()
  1878. complete_pct = float(virtual_size - left) / virtual_size * 100
  1879. LOG.debug("Sparse copy in progress, "
  1880. "%(complete_pct).2f%% complete. "
  1881. "%(left)s bytes left to copy",
  1882. {"complete_pct": complete_pct, "left": left})
  1883. return last_log_time
  1884. def _sparse_copy(src_path, dst_path, virtual_size, block_size=4096):
  1885. """Copy data, skipping long runs of zeros to create a sparse file."""
  1886. start_time = last_log_time = timeutils.utcnow()
  1887. EMPTY_BLOCK = '\0' * block_size
  1888. bytes_read = 0
  1889. skipped_bytes = 0
  1890. left = virtual_size
  1891. LOG.debug("Starting sparse_copy src=%(src_path)s dst=%(dst_path)s "
  1892. "virtual_size=%(virtual_size)d block_size=%(block_size)d",
  1893. {'src_path': src_path, 'dst_path': dst_path,
  1894. 'virtual_size': virtual_size, 'block_size': block_size})
  1895. # NOTE(sirp): we need read/write access to the devices; since we don't have
  1896. # the luxury of shelling out to a sudo'd command, we temporarily take
  1897. # ownership of the devices.
  1898. with utils.temporary_chown(src_path):
  1899. with utils.temporary_chown(dst_path):
  1900. with open(src_path, "r") as src:
  1901. with open(dst_path, "w") as dst:
  1902. data = src.read(min(block_size, left))
  1903. while data:
  1904. if data == EMPTY_BLOCK:
  1905. dst.seek(block_size, os.SEEK_CUR)
  1906. left -= block_size
  1907. bytes_read += block_size
  1908. skipped_bytes += block_size
  1909. else:
  1910. dst.write(data)
  1911. data_len = len(data)
  1912. left -= data_len
  1913. bytes_read += data_len
  1914. if left <= 0:
  1915. break
  1916. data = src.read(min(block_size, left))
  1917. greenthread.sleep(0)
  1918. last_log_time = _log_progress_if_required(
  1919. left, last_log_time, virtual_size)
  1920. duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
  1921. compression_pct = float(skipped_bytes) / bytes_read * 100
  1922. LOG.debug("Finished sparse_copy in %(duration).2f secs, "
  1923. "%(compression_pct).2f%% reduction in size",
  1924. {'duration': duration, 'compression_pct': compression_pct})
  1925. def _copy_partition(session, src_ref, dst_ref, partition, virtual_size):
  1926. # Part of disk taken up by MBR
  1927. virtual_size -= MBR_SIZE_BYTES
  1928. with vdi_attached(session, src_ref, read_only=True) as src:
  1929. src_path = utils.make_dev_path(src, partition=partition)
  1930. with vdi_attached(session, dst_ref, read_only=False) as dst:
  1931. dst_path = utils.make_dev_path(dst, partition=partition)
  1932. _write_partition(session, virtual_size, dst)
  1933. if CONF.xenserver.sparse_copy:
  1934. _sparse_copy(src_path, dst_path, virtual_size)
  1935. else:
  1936. num_blocks = virtual_size / SECTOR_SIZE
  1937. nova.privsep.xenapi.block_copy(
  1938. src_path, dst_path, DD_BLOCKSIZE, num_blocks)
  1939. def _mount_filesystem(dev_path, mount_point):
  1940. """mounts the device specified by dev_path in mount_point."""
  1941. try:
  1942. _out, err = nova.privsep.fs.mount('ext2,ext3,ext4,reiserfs',
  1943. dev_path, mount_point, None)
  1944. except processutils.ProcessExecutionError as e:
  1945. err = six.text_type(e)
  1946. return err
  1947. def _mounted_processing(device, key, net, metadata):
  1948. """Callback which runs with the image VDI attached."""
  1949. # NB: Partition 1 hardcoded
  1950. dev_path = utils.make_dev_path(device, partition=1)
  1951. with utils.tempdir() as tmpdir:
  1952. # Mount only Linux filesystems, to avoid disturbing NTFS images
  1953. err = _mount_filesystem(dev_path, tmpdir)
  1954. if not err:
  1955. try:
  1956. # This try block ensures that the umount occurs
  1957. if not agent.find_guest_agent(tmpdir):
  1958. # TODO(berrange) passing in a None filename is
  1959. # rather dubious. We shouldn't be re-implementing
  1960. # the mount/unmount logic here either, when the
  1961. # VFSLocalFS impl has direct support for mount
  1962. # and unmount handling if it were passed a
  1963. # non-None filename
  1964. vfs = vfsimpl.VFSLocalFS(
  1965. imgmodel.LocalFileImage(None, imgmodel.FORMAT_RAW),
  1966. imgdir=tmpdir)
  1967. LOG.info('Manipulating interface files directly')
  1968. # for xenapi, we don't 'inject' admin_password here,
  1969. # it's handled at instance startup time, nor do we
  1970. # support injecting arbitrary files here.
  1971. disk.inject_data_into_fs(vfs,
  1972. key, net, metadata, None, None)
  1973. finally:
  1974. nova.privsep.fs.umount(dev_path)
  1975. else:
  1976. LOG.info('Failed to mount filesystem (expected for '
  1977. 'non-linux instances): %s', err)
  1978. def ensure_correct_host(session):
  1979. """Ensure we're connected to the host we're running on. This is the
  1980. required configuration for anything that uses vdi_attached without
  1981. the dom0 flag.
  1982. """
  1983. if session.host_checked:
  1984. return
  1985. this_vm_uuid = get_this_vm_uuid(session)
  1986. try:
  1987. session.call_xenapi('VM.get_by_uuid', this_vm_uuid)
  1988. session.host_checked = True
  1989. except session.XenAPI.Failure as exc:
  1990. if exc.details[0] != 'UUID_INVALID':
  1991. raise
  1992. raise Exception(_('This domU must be running on the host '
  1993. 'specified by connection_url'))
  1994. def import_all_migrated_disks(session, instance, import_root=True):
  1995. root_vdi = None
  1996. if import_root:
  1997. root_vdi = _import_migrated_root_disk(session, instance)
  1998. eph_vdis = _import_migrate_ephemeral_disks(session, instance)
  1999. return {'root': root_vdi, 'ephemerals': eph_vdis}
  2000. def _import_migrated_root_disk(session, instance):
  2001. chain_label = instance['uuid']
  2002. vdi_label = instance['name']
  2003. return _import_migrated_vhds(session, instance, chain_label, "root",
  2004. vdi_label)
  2005. def _import_migrate_ephemeral_disks(session, instance):
  2006. ephemeral_vdis = {}
  2007. instance_uuid = instance['uuid']
  2008. ephemeral_gb = instance.old_flavor.ephemeral_gb
  2009. disk_sizes = get_ephemeral_disk_sizes(ephemeral_gb)
  2010. for chain_number, _size in enumerate(disk_sizes, start=1):
  2011. chain_label = instance_uuid + "_ephemeral_%d" % chain_number
  2012. vdi_label = "%(name)s ephemeral (%(number)d)" % dict(
  2013. name=instance['name'], number=chain_number)
  2014. ephemeral_vdi = _import_migrated_vhds(session, instance,
  2015. chain_label, "ephemeral",
  2016. vdi_label)
  2017. userdevice = 3 + chain_number
  2018. ephemeral_vdis[str(userdevice)] = ephemeral_vdi
  2019. return ephemeral_vdis
  2020. def _import_migrated_vhds(session, instance, chain_label, disk_type,
  2021. vdi_label):
  2022. """Move and possibly link VHDs via the XAPI plugin."""
  2023. imported_vhds = vm_management.receive_vhd(session, chain_label,
  2024. get_sr_path(session),
  2025. _make_uuid_stack())
  2026. # Now we rescan the SR so we find the VHDs
  2027. scan_default_sr(session)
  2028. vdi_uuid = imported_vhds['root']['uuid']
  2029. vdi_ref = session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
  2030. # Set name-label so we can find if we need to clean up a failed migration
  2031. _set_vdi_info(session, vdi_ref, disk_type, vdi_label,
  2032. disk_type, instance)
  2033. return {'uuid': vdi_uuid, 'ref': vdi_ref}
  2034. def migrate_vhd(session, instance, vdi_uuid, dest, sr_path, seq_num,
  2035. ephemeral_number=0):
  2036. LOG.debug("Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d",
  2037. {'vdi_uuid': vdi_uuid, 'seq_num': seq_num},
  2038. instance=instance)
  2039. chain_label = instance['uuid']
  2040. if ephemeral_number:
  2041. chain_label = instance['uuid'] + "_ephemeral_%d" % ephemeral_number
  2042. try:
  2043. vm_management.transfer_vhd(session, chain_label, dest, vdi_uuid,
  2044. sr_path, seq_num)
  2045. except session.XenAPI.Failure:
  2046. msg = "Failed to transfer vhd to new host"
  2047. LOG.debug(msg, instance=instance, exc_info=True)
  2048. raise exception.MigrationError(reason=msg)
  2049. def vm_ref_or_raise(session, instance_name):
  2050. vm_ref = lookup(session, instance_name)
  2051. if vm_ref is None:
  2052. raise exception.InstanceNotFound(instance_id=instance_name)
  2053. return vm_ref
  2054. def handle_ipxe_iso(session, instance, cd_vdi, network_info):
  2055. """iPXE ISOs are a mechanism to allow the customer to roll their own
  2056. image.
  2057. To use this feature, a service provider needs to configure the
  2058. appropriate Nova flags, roll an iPXE ISO, then distribute that image
  2059. to customers via Glance.
  2060. NOTE: `mkisofs` is not present by default in the Dom0, so the service
  2061. provider can either add that package manually to Dom0 or include the
  2062. `mkisofs` binary in the image itself.
  2063. """
  2064. boot_menu_url = CONF.xenserver.ipxe_boot_menu_url
  2065. if not boot_menu_url:
  2066. LOG.warning('ipxe_boot_menu_url not set, user will have to'
  2067. ' enter URL manually...', instance=instance)
  2068. return
  2069. network_name = CONF.xenserver.ipxe_network_name
  2070. if not network_name:
  2071. LOG.warning('ipxe_network_name not set, user will have to'
  2072. ' enter IP manually...', instance=instance)
  2073. return
  2074. network = None
  2075. for vif in network_info:
  2076. if vif['network']['label'] == network_name:
  2077. network = vif['network']
  2078. break
  2079. if not network:
  2080. LOG.warning("Unable to find network matching '%(network_name)s', "
  2081. "user will have to enter IP manually...",
  2082. {'network_name': network_name}, instance=instance)
  2083. return
  2084. sr_path = get_sr_path(session)
  2085. # Unpack IPv4 network info
  2086. subnet = [sn for sn in network['subnets']
  2087. if sn['version'] == 4][0]
  2088. ip = subnet['ips'][0]
  2089. ip_address = ip['address']
  2090. netmask = network_model.get_netmask(ip, subnet)
  2091. gateway = subnet['gateway']['address']
  2092. dns = subnet['dns'][0]['address']
  2093. try:
  2094. disk_management.inject_ipxe_config(session, sr_path, cd_vdi['uuid'],
  2095. boot_menu_url, ip_address, netmask,
  2096. gateway, dns,
  2097. CONF.xenserver.ipxe_mkisofs_cmd)
  2098. except session.XenAPI.Failure as exc:
  2099. _type, _method, error = exc.details[:3]
  2100. if error == 'CommandNotFound':
  2101. LOG.warning("ISO creation tool '%s' does not exist.",
  2102. CONF.xenserver.ipxe_mkisofs_cmd, instance=instance)
  2103. else:
  2104. raise
  2105. def set_other_config_pci(session, vm_ref, params):
  2106. """Set the pci key of other-config parameter to params."""
  2107. other_config = session.call_xenapi("VM.get_other_config", vm_ref)
  2108. other_config['pci'] = params
  2109. session.call_xenapi("VM.set_other_config", vm_ref, other_config)
  2110. def host_in_this_pool(session, host_ref):
  2111. rec_dict = session.host.get_all_records()
  2112. return host_ref in rec_dict.keys()