OpenStack Compute (Nova)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

fake.py 35KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984
  1. # Copyright 2010 United States Government as represented by the
  2. # Administrator of the National Aeronautics and Space Administration.
  3. # All Rights Reserved.
  4. # Copyright (c) 2010 Citrix Systems, Inc.
  5. #
  6. # Licensed under the Apache License, Version 2.0 (the "License"); you may
  7. # not use this file except in compliance with the License. You may obtain
  8. # a copy of the License at
  9. #
  10. # http://www.apache.org/licenses/LICENSE-2.0
  11. #
  12. # Unless required by applicable law or agreed to in writing, software
  13. # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  14. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  15. # License for the specific language governing permissions and limitations
  16. # under the License.
  17. """
  18. A fake (in-memory) hypervisor+api.
  19. Allows nova testing w/o a hypervisor. This module also documents the
  20. semantics of real hypervisor connections.
  21. """
  22. import collections
  23. import contextlib
  24. import time
  25. import fixtures
  26. import os_resource_classes as orc
  27. from oslo_log import log as logging
  28. from oslo_serialization import jsonutils
  29. from oslo_utils import versionutils
  30. from nova.compute import power_state
  31. from nova.compute import task_states
  32. from nova.compute import vm_states
  33. import nova.conf
  34. from nova.console import type as ctype
  35. from nova import exception
  36. from nova.objects import diagnostics as diagnostics_obj
  37. from nova.objects import fields as obj_fields
  38. from nova.objects import migrate_data
  39. from nova.virt import driver
  40. from nova.virt import hardware
  41. from nova.virt import virtapi
  42. CONF = nova.conf.CONF
  43. LOG = logging.getLogger(__name__)
  44. class FakeInstance(object):
  45. def __init__(self, name, state, uuid):
  46. self.name = name
  47. self.state = state
  48. self.uuid = uuid
  49. def __getitem__(self, key):
  50. return getattr(self, key)
  51. class Resources(object):
  52. vcpus = 0
  53. memory_mb = 0
  54. local_gb = 0
  55. vcpus_used = 0
  56. memory_mb_used = 0
  57. local_gb_used = 0
  58. def __init__(self, vcpus=8, memory_mb=8000, local_gb=500):
  59. self.vcpus = vcpus
  60. self.memory_mb = memory_mb
  61. self.local_gb = local_gb
  62. def claim(self, vcpus=0, mem=0, disk=0):
  63. self.vcpus_used += vcpus
  64. self.memory_mb_used += mem
  65. self.local_gb_used += disk
  66. def release(self, vcpus=0, mem=0, disk=0):
  67. self.vcpus_used -= vcpus
  68. self.memory_mb_used -= mem
  69. self.local_gb_used -= disk
  70. def dump(self):
  71. return {
  72. 'vcpus': self.vcpus,
  73. 'memory_mb': self.memory_mb,
  74. 'local_gb': self.local_gb,
  75. 'vcpus_used': self.vcpus_used,
  76. 'memory_mb_used': self.memory_mb_used,
  77. 'local_gb_used': self.local_gb_used
  78. }
  79. class FakeDriver(driver.ComputeDriver):
  80. # These must match the traits in
  81. # nova.tests.functional.integrated_helpers.ProviderUsageBaseTestCase
  82. capabilities = {
  83. "has_imagecache": True,
  84. "supports_evacuate": True,
  85. "supports_migrate_to_same_host": True,
  86. "supports_attach_interface": True,
  87. "supports_tagged_attach_interface": True,
  88. "supports_tagged_attach_volume": True,
  89. "supports_extend_volume": True,
  90. "supports_multiattach": True,
  91. "supports_trusted_certs": True,
  92. # Supported image types
  93. "supports_image_type_raw": True,
  94. "supports_image_type_vhd": False,
  95. }
  96. # Since we don't have a real hypervisor, pretend we have lots of
  97. # disk and ram so this driver can be used to test large instances.
  98. vcpus = 1000
  99. memory_mb = 800000
  100. local_gb = 600000
  101. """Fake hypervisor driver."""
  102. def __init__(self, virtapi, read_only=False):
  103. super(FakeDriver, self).__init__(virtapi)
  104. self.instances = {}
  105. self.resources = Resources(
  106. vcpus=self.vcpus,
  107. memory_mb=self.memory_mb,
  108. local_gb=self.local_gb)
  109. self.host_status_base = {
  110. 'hypervisor_type': 'fake',
  111. 'hypervisor_version': versionutils.convert_version_to_int('1.0'),
  112. 'hypervisor_hostname': CONF.host,
  113. 'cpu_info': {},
  114. 'disk_available_least': 0,
  115. 'supported_instances': [(
  116. obj_fields.Architecture.X86_64,
  117. obj_fields.HVType.FAKE,
  118. obj_fields.VMMode.HVM)],
  119. 'numa_topology': None,
  120. }
  121. self._mounts = {}
  122. self._interfaces = {}
  123. self.active_migrations = {}
  124. self._host = None
  125. self._nodes = None
  126. def init_host(self, host):
  127. self._host = host
  128. # NOTE(gibi): this is unnecessary complex and fragile but this is
  129. # how many current functional sample tests expect the node name.
  130. self._nodes = (['fake-mini'] if self._host == 'compute'
  131. else [self._host])
  132. def _set_nodes(self, nodes):
  133. # NOTE(gibi): this is not part of the driver interface but used
  134. # by our tests to customize the discovered nodes by the fake
  135. # driver.
  136. self._nodes = nodes
  137. def list_instances(self):
  138. return [self.instances[uuid].name for uuid in self.instances.keys()]
  139. def list_instance_uuids(self):
  140. return list(self.instances.keys())
  141. def plug_vifs(self, instance, network_info):
  142. """Plug VIFs into networks."""
  143. pass
  144. def unplug_vifs(self, instance, network_info):
  145. """Unplug VIFs from networks."""
  146. pass
  147. def spawn(self, context, instance, image_meta, injected_files,
  148. admin_password, allocations, network_info=None,
  149. block_device_info=None):
  150. if network_info:
  151. for vif in network_info:
  152. # simulate a real driver triggering the async network
  153. # allocation as it might cause an error
  154. vif.fixed_ips()
  155. # store the vif as attached so we can allow detaching it later
  156. # with a detach_interface() call.
  157. self._interfaces[vif['id']] = vif
  158. uuid = instance.uuid
  159. state = power_state.RUNNING
  160. flavor = instance.flavor
  161. self.resources.claim(
  162. vcpus=flavor.vcpus,
  163. mem=flavor.memory_mb,
  164. disk=flavor.root_gb)
  165. fake_instance = FakeInstance(instance.name, state, uuid)
  166. self.instances[uuid] = fake_instance
  167. def snapshot(self, context, instance, image_id, update_task_state):
  168. if instance.uuid not in self.instances:
  169. raise exception.InstanceNotRunning(instance_id=instance.uuid)
  170. update_task_state(task_state=task_states.IMAGE_UPLOADING)
  171. def reboot(self, context, instance, network_info, reboot_type,
  172. block_device_info=None, bad_volumes_callback=None):
  173. pass
  174. def get_host_ip_addr(self):
  175. return '192.168.0.1'
  176. def set_admin_password(self, instance, new_pass):
  177. pass
  178. def inject_file(self, instance, b64_path, b64_contents):
  179. pass
  180. def resume_state_on_host_boot(self, context, instance, network_info,
  181. block_device_info=None):
  182. pass
  183. def rescue(self, context, instance, network_info, image_meta,
  184. rescue_password):
  185. pass
  186. def unrescue(self, instance, network_info):
  187. self.instances[instance.uuid].state = power_state.RUNNING
  188. def poll_rebooting_instances(self, timeout, instances):
  189. pass
  190. def migrate_disk_and_power_off(self, context, instance, dest,
  191. flavor, network_info,
  192. block_device_info=None,
  193. timeout=0, retry_interval=0):
  194. pass
  195. def finish_revert_migration(self, context, instance, network_info,
  196. block_device_info=None, power_on=True):
  197. self.instances[instance.uuid] = FakeInstance(
  198. instance.name, power_state.RUNNING, instance.uuid)
  199. def post_live_migration_at_destination(self, context, instance,
  200. network_info,
  201. block_migration=False,
  202. block_device_info=None):
  203. pass
  204. def power_off(self, instance, timeout=0, retry_interval=0):
  205. if instance.uuid in self.instances:
  206. self.instances[instance.uuid].state = power_state.SHUTDOWN
  207. else:
  208. raise exception.InstanceNotFound(instance_id=instance.uuid)
  209. def power_on(self, context, instance, network_info,
  210. block_device_info=None):
  211. if instance.uuid in self.instances:
  212. self.instances[instance.uuid].state = power_state.RUNNING
  213. else:
  214. raise exception.InstanceNotFound(instance_id=instance.uuid)
  215. def trigger_crash_dump(self, instance):
  216. pass
  217. def soft_delete(self, instance):
  218. pass
  219. def restore(self, instance):
  220. pass
  221. def pause(self, instance):
  222. pass
  223. def unpause(self, instance):
  224. pass
  225. def suspend(self, context, instance):
  226. pass
  227. def resume(self, context, instance, network_info, block_device_info=None):
  228. pass
  229. def destroy(self, context, instance, network_info, block_device_info=None,
  230. destroy_disks=True):
  231. key = instance.uuid
  232. if key in self.instances:
  233. flavor = instance.flavor
  234. self.resources.release(
  235. vcpus=flavor.vcpus,
  236. mem=flavor.memory_mb,
  237. disk=flavor.root_gb)
  238. del self.instances[key]
  239. else:
  240. LOG.warning("Key '%(key)s' not in instances '%(inst)s'",
  241. {'key': key,
  242. 'inst': self.instances}, instance=instance)
  243. def cleanup(self, context, instance, network_info, block_device_info=None,
  244. destroy_disks=True, migrate_data=None, destroy_vifs=True):
  245. pass
  246. def attach_volume(self, context, connection_info, instance, mountpoint,
  247. disk_bus=None, device_type=None, encryption=None):
  248. """Attach the disk to the instance at mountpoint using info."""
  249. instance_name = instance.name
  250. if instance_name not in self._mounts:
  251. self._mounts[instance_name] = {}
  252. self._mounts[instance_name][mountpoint] = connection_info
  253. def detach_volume(self, context, connection_info, instance, mountpoint,
  254. encryption=None):
  255. """Detach the disk attached to the instance."""
  256. try:
  257. del self._mounts[instance.name][mountpoint]
  258. except KeyError:
  259. pass
  260. def swap_volume(self, context, old_connection_info, new_connection_info,
  261. instance, mountpoint, resize_to):
  262. """Replace the disk attached to the instance."""
  263. instance_name = instance.name
  264. if instance_name not in self._mounts:
  265. self._mounts[instance_name] = {}
  266. self._mounts[instance_name][mountpoint] = new_connection_info
  267. def extend_volume(self, connection_info, instance, requested_size):
  268. """Extend the disk attached to the instance."""
  269. pass
  270. def attach_interface(self, context, instance, image_meta, vif):
  271. if vif['id'] in self._interfaces:
  272. raise exception.InterfaceAttachFailed(
  273. instance_uuid=instance.uuid)
  274. self._interfaces[vif['id']] = vif
  275. def detach_interface(self, context, instance, vif):
  276. try:
  277. del self._interfaces[vif['id']]
  278. except KeyError:
  279. raise exception.InterfaceDetachFailed(
  280. instance_uuid=instance.uuid)
  281. def get_info(self, instance, use_cache=True):
  282. if instance.uuid not in self.instances:
  283. raise exception.InstanceNotFound(instance_id=instance.uuid)
  284. i = self.instances[instance.uuid]
  285. return hardware.InstanceInfo(state=i.state)
  286. def get_diagnostics(self, instance):
  287. return {'cpu0_time': 17300000000,
  288. 'memory': 524288,
  289. 'vda_errors': -1,
  290. 'vda_read': 262144,
  291. 'vda_read_req': 112,
  292. 'vda_write': 5778432,
  293. 'vda_write_req': 488,
  294. 'vnet1_rx': 2070139,
  295. 'vnet1_rx_drop': 0,
  296. 'vnet1_rx_errors': 0,
  297. 'vnet1_rx_packets': 26701,
  298. 'vnet1_tx': 140208,
  299. 'vnet1_tx_drop': 0,
  300. 'vnet1_tx_errors': 0,
  301. 'vnet1_tx_packets': 662,
  302. }
  303. def get_instance_diagnostics(self, instance):
  304. diags = diagnostics_obj.Diagnostics(
  305. state='running', driver='libvirt', hypervisor='kvm',
  306. hypervisor_os='ubuntu', uptime=46664, config_drive=True)
  307. diags.add_cpu(id=0, time=17300000000, utilisation=15)
  308. diags.add_nic(mac_address='01:23:45:67:89:ab',
  309. rx_octets=2070139,
  310. rx_errors=100,
  311. rx_drop=200,
  312. rx_packets=26701,
  313. rx_rate=300,
  314. tx_octets=140208,
  315. tx_errors=400,
  316. tx_drop=500,
  317. tx_packets = 662,
  318. tx_rate=600)
  319. diags.add_disk(read_bytes=262144,
  320. read_requests=112,
  321. write_bytes=5778432,
  322. write_requests=488,
  323. errors_count=1)
  324. diags.memory_details = diagnostics_obj.MemoryDiagnostics(
  325. maximum=524288, used=0)
  326. return diags
  327. def get_all_bw_counters(self, instances):
  328. """Return bandwidth usage counters for each interface on each
  329. running VM.
  330. """
  331. bw = []
  332. for instance in instances:
  333. bw.append({'uuid': instance.uuid,
  334. 'mac_address': 'fa:16:3e:4c:2c:30',
  335. 'bw_in': 0,
  336. 'bw_out': 0})
  337. return bw
  338. def get_all_volume_usage(self, context, compute_host_bdms):
  339. """Return usage info for volumes attached to vms on
  340. a given host.
  341. """
  342. volusage = []
  343. if compute_host_bdms:
  344. volusage = [{'volume': compute_host_bdms[0][
  345. 'instance_bdms'][0]['volume_id'],
  346. 'instance': compute_host_bdms[0]['instance'],
  347. 'rd_bytes': 0,
  348. 'rd_req': 0,
  349. 'wr_bytes': 0,
  350. 'wr_req': 0}]
  351. return volusage
  352. def get_host_cpu_stats(self):
  353. stats = {'kernel': 5664160000000,
  354. 'idle': 1592705190000000,
  355. 'user': 26728850000000,
  356. 'iowait': 6121490000000}
  357. stats['frequency'] = 800
  358. return stats
  359. def block_stats(self, instance, disk_id):
  360. return [0, 0, 0, 0, None]
  361. def get_console_output(self, context, instance):
  362. return 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE'
  363. def get_vnc_console(self, context, instance):
  364. return ctype.ConsoleVNC(internal_access_path='FAKE',
  365. host='fakevncconsole.com',
  366. port=6969)
  367. def get_spice_console(self, context, instance):
  368. return ctype.ConsoleSpice(internal_access_path='FAKE',
  369. host='fakespiceconsole.com',
  370. port=6969,
  371. tlsPort=6970)
  372. def get_rdp_console(self, context, instance):
  373. return ctype.ConsoleRDP(internal_access_path='FAKE',
  374. host='fakerdpconsole.com',
  375. port=6969)
  376. def get_serial_console(self, context, instance):
  377. return ctype.ConsoleSerial(internal_access_path='FAKE',
  378. host='fakerdpconsole.com',
  379. port=6969)
  380. def get_mks_console(self, context, instance):
  381. return ctype.ConsoleMKS(internal_access_path='FAKE',
  382. host='fakemksconsole.com',
  383. port=6969)
  384. def get_console_pool_info(self, console_type):
  385. return {'address': '127.0.0.1',
  386. 'username': 'fakeuser',
  387. 'password': 'fakepassword'}
  388. def refresh_security_group_rules(self, security_group_id):
  389. return True
  390. def refresh_instance_security_rules(self, instance):
  391. return True
  392. def get_available_resource(self, nodename):
  393. """Updates compute manager resource info on ComputeNode table.
  394. Since we don't have a real hypervisor, pretend we have lots of
  395. disk and ram.
  396. """
  397. cpu_info = collections.OrderedDict([
  398. ('arch', 'x86_64'),
  399. ('model', 'Nehalem'),
  400. ('vendor', 'Intel'),
  401. ('features', ['pge', 'clflush']),
  402. ('topology', {
  403. 'cores': 1,
  404. 'threads': 1,
  405. 'sockets': 4,
  406. }),
  407. ])
  408. if nodename not in self._nodes:
  409. return {}
  410. host_status = self.host_status_base.copy()
  411. host_status.update(self.resources.dump())
  412. host_status['hypervisor_hostname'] = nodename
  413. host_status['host_hostname'] = nodename
  414. host_status['host_name_label'] = nodename
  415. host_status['cpu_info'] = jsonutils.dumps(cpu_info)
  416. return host_status
  417. def update_provider_tree(self, provider_tree, nodename, allocations=None):
  418. # NOTE(yikun): If the inv record does not exists, the allocation_ratio
  419. # will use the CONF.xxx_allocation_ratio value if xxx_allocation_ratio
  420. # is set, and fallback to use the initial_xxx_allocation_ratio
  421. # otherwise.
  422. inv = provider_tree.data(nodename).inventory
  423. ratios = self._get_allocation_ratios(inv)
  424. inventory = {
  425. 'VCPU': {
  426. 'total': self.vcpus,
  427. 'min_unit': 1,
  428. 'max_unit': self.vcpus,
  429. 'step_size': 1,
  430. 'allocation_ratio': ratios[orc.VCPU],
  431. 'reserved': CONF.reserved_host_cpus,
  432. },
  433. 'MEMORY_MB': {
  434. 'total': self.memory_mb,
  435. 'min_unit': 1,
  436. 'max_unit': self.memory_mb,
  437. 'step_size': 1,
  438. 'allocation_ratio': ratios[orc.MEMORY_MB],
  439. 'reserved': CONF.reserved_host_memory_mb,
  440. },
  441. 'DISK_GB': {
  442. 'total': self.local_gb,
  443. 'min_unit': 1,
  444. 'max_unit': self.local_gb,
  445. 'step_size': 1,
  446. 'allocation_ratio': ratios[orc.DISK_GB],
  447. 'reserved': self._get_reserved_host_disk_gb_from_config(),
  448. },
  449. }
  450. provider_tree.update_inventory(nodename, inventory)
  451. def ensure_filtering_rules_for_instance(self, instance, network_info):
  452. return
  453. def get_instance_disk_info(self, instance, block_device_info=None):
  454. return
  455. def live_migration(self, context, instance, dest,
  456. post_method, recover_method, block_migration=False,
  457. migrate_data=None):
  458. post_method(context, instance, dest, block_migration,
  459. migrate_data)
  460. return
  461. def live_migration_force_complete(self, instance):
  462. return
  463. def live_migration_abort(self, instance):
  464. return
  465. def cleanup_live_migration_destination_check(self, context,
  466. dest_check_data):
  467. return
  468. def check_can_live_migrate_destination(self, context, instance,
  469. src_compute_info, dst_compute_info,
  470. block_migration=False,
  471. disk_over_commit=False):
  472. data = migrate_data.LibvirtLiveMigrateData()
  473. data.filename = 'fake'
  474. data.image_type = CONF.libvirt.images_type
  475. data.graphics_listen_addr_vnc = CONF.vnc.server_listen
  476. data.graphics_listen_addr_spice = CONF.spice.server_listen
  477. data.serial_listen_addr = None
  478. # Notes(eliqiao): block_migration and disk_over_commit are not
  479. # nullable, so just don't set them if they are None
  480. if block_migration is not None:
  481. data.block_migration = block_migration
  482. if disk_over_commit is not None:
  483. data.disk_over_commit = disk_over_commit
  484. data.disk_available_mb = 100000
  485. data.is_shared_block_storage = True
  486. data.is_shared_instance_path = True
  487. return data
  488. def check_can_live_migrate_source(self, context, instance,
  489. dest_check_data, block_device_info=None):
  490. return dest_check_data
  491. def finish_migration(self, context, migration, instance, disk_info,
  492. network_info, image_meta, resize_instance,
  493. block_device_info=None, power_on=True):
  494. injected_files = admin_password = allocations = None
  495. # Finish migration is just like spawning the guest on a destination
  496. # host during resize/cold migrate, so re-use the spawn() fake to
  497. # claim resources and track the instance on this "hypervisor".
  498. self.spawn(context, instance, image_meta, injected_files,
  499. admin_password, allocations,
  500. block_device_info=block_device_info)
  501. def confirm_migration(self, context, migration, instance, network_info):
  502. # Confirm migration cleans up the guest from the source host so just
  503. # destroy the guest to remove it from the list of tracked instances
  504. # unless it is a same-host resize.
  505. if migration.source_compute != migration.dest_compute:
  506. self.destroy(context, instance, network_info)
  507. def pre_live_migration(self, context, instance, block_device_info,
  508. network_info, disk_info, migrate_data):
  509. return migrate_data
  510. def rollback_live_migration_at_destination(self, context, instance,
  511. network_info,
  512. block_device_info,
  513. destroy_disks=True,
  514. migrate_data=None):
  515. return
  516. def unfilter_instance(self, instance, network_info):
  517. return
  518. def _test_remove_vm(self, instance_uuid):
  519. """Removes the named VM, as if it crashed. For testing."""
  520. self.instances.pop(instance_uuid)
  521. def host_power_action(self, action):
  522. """Reboots, shuts down or powers up the host."""
  523. return action
  524. def host_maintenance_mode(self, host, mode):
  525. """Start/Stop host maintenance window. On start, it triggers
  526. guest VMs evacuation.
  527. """
  528. if not mode:
  529. return 'off_maintenance'
  530. return 'on_maintenance'
  531. def set_host_enabled(self, enabled):
  532. """Sets the specified host's ability to accept new instances."""
  533. if enabled:
  534. return 'enabled'
  535. return 'disabled'
  536. def get_volume_connector(self, instance):
  537. return {'ip': CONF.my_block_storage_ip,
  538. 'initiator': 'fake',
  539. 'host': 'fakehost'}
  540. def get_available_nodes(self, refresh=False):
  541. return self._nodes
  542. def instance_on_disk(self, instance):
  543. return False
  544. def quiesce(self, context, instance, image_meta):
  545. pass
  546. def unquiesce(self, context, instance, image_meta):
  547. pass
  548. class FakeVirtAPI(virtapi.VirtAPI):
  549. @contextlib.contextmanager
  550. def wait_for_instance_event(self, instance, event_names, deadline=300,
  551. error_callback=None):
  552. # NOTE(danms): Don't actually wait for any events, just
  553. # fall through
  554. yield
  555. def update_compute_provider_status(self, context, rp_uuid, enabled):
  556. pass
  557. class SmallFakeDriver(FakeDriver):
  558. # The api samples expect specific cpu memory and disk sizes. In order to
  559. # allow the FakeVirt driver to be used outside of the unit tests, provide
  560. # a separate class that has the values expected by the api samples. So
  561. # instead of requiring new samples every time those
  562. # values are adjusted allow them to be overwritten here.
  563. vcpus = 2
  564. memory_mb = 8192
  565. local_gb = 1028
  566. class MediumFakeDriver(FakeDriver):
  567. # Fake driver that has enough resources to host more than one instance
  568. # but not that much that cannot be exhausted easily
  569. vcpus = 10
  570. memory_mb = 8192
  571. local_gb = 1028
  572. class MediumFakeDriverWithNestedCustomResources(MediumFakeDriver):
  573. # A MediumFakeDriver variant that also reports CUSTOM_MAGIC resources on
  574. # a nested resource provider
  575. vcpus = 10
  576. memory_mb = 8192
  577. local_gb = 1028
  578. child_resources = {
  579. 'CUSTOM_MAGIC': {
  580. 'total': 10,
  581. 'reserved': 0,
  582. 'min_unit': 1,
  583. 'max_unit': 10,
  584. 'step_size': 1,
  585. 'allocation_ratio': 1,
  586. }
  587. }
  588. def update_provider_tree(self, provider_tree, nodename, allocations=None):
  589. super(
  590. MediumFakeDriverWithNestedCustomResources,
  591. self).update_provider_tree(
  592. provider_tree, nodename,
  593. allocations=allocations)
  594. if not provider_tree.exists(nodename + '-child'):
  595. provider_tree.new_child(name=nodename + '-child',
  596. parent=nodename)
  597. provider_tree.update_inventory(nodename + '-child',
  598. self.child_resources)
  599. class FakeFinishMigrationFailDriver(FakeDriver):
  600. """FakeDriver variant that will raise an exception from finish_migration"""
  601. def finish_migration(self, *args, **kwargs):
  602. raise exception.VirtualInterfaceCreateException()
  603. class FakeRescheduleDriver(FakeDriver):
  604. """FakeDriver derivative that triggers a reschedule on the first spawn
  605. attempt. This is expected to only be used in tests that have more than
  606. one compute service.
  607. """
  608. # dict, keyed by instance uuid, mapped to a boolean telling us if the
  609. # instance has been rescheduled or not
  610. rescheduled = {}
  611. def spawn(self, context, instance, image_meta, injected_files,
  612. admin_password, allocations, network_info=None,
  613. block_device_info=None):
  614. if not self.rescheduled.get(instance.uuid, False):
  615. # We only reschedule on the first time something hits spawn().
  616. self.rescheduled[instance.uuid] = True
  617. raise exception.ComputeResourcesUnavailable(
  618. reason='FakeRescheduleDriver')
  619. super(FakeRescheduleDriver, self).spawn(
  620. context, instance, image_meta, injected_files,
  621. admin_password, allocations, network_info, block_device_info)
  622. class FakeRescheduleDriverWithNestedCustomResources(
  623. FakeRescheduleDriver, MediumFakeDriverWithNestedCustomResources):
  624. pass
  625. class FakeBuildAbortDriver(FakeDriver):
  626. """FakeDriver derivative that always fails on spawn() with a
  627. BuildAbortException so no reschedule is attempted.
  628. """
  629. def spawn(self, context, instance, image_meta, injected_files,
  630. admin_password, allocations, network_info=None,
  631. block_device_info=None):
  632. raise exception.BuildAbortException(
  633. instance_uuid=instance.uuid, reason='FakeBuildAbortDriver')
  634. class FakeBuildAbortDriverWithNestedCustomResources(
  635. FakeBuildAbortDriver, MediumFakeDriverWithNestedCustomResources):
  636. pass
  637. class FakeUnshelveSpawnFailDriver(FakeDriver):
  638. """FakeDriver derivative that always fails on spawn() with a
  639. VirtualInterfaceCreateException when unshelving an offloaded instance.
  640. """
  641. def spawn(self, context, instance, image_meta, injected_files,
  642. admin_password, allocations, network_info=None,
  643. block_device_info=None):
  644. if instance.vm_state == vm_states.SHELVED_OFFLOADED:
  645. raise exception.VirtualInterfaceCreateException(
  646. 'FakeUnshelveSpawnFailDriver')
  647. # Otherwise spawn normally during the initial build.
  648. super(FakeUnshelveSpawnFailDriver, self).spawn(
  649. context, instance, image_meta, injected_files,
  650. admin_password, allocations, network_info, block_device_info)
  651. class FakeUnshelveSpawnFailDriverWithNestedCustomResources(
  652. FakeUnshelveSpawnFailDriver, MediumFakeDriverWithNestedCustomResources):
  653. pass
  654. class FakeLiveMigrateDriver(FakeDriver):
  655. """FakeDriver derivative to handle force_complete and abort calls.
  656. This module serves those tests that need to abort or force-complete
  657. the live migration, thus the live migration will never be finished
  658. without the force_complete_migration or delete_migration API calls.
  659. """
  660. def __init__(self, virtapi, read_only=False):
  661. super(FakeLiveMigrateDriver, self).__init__(virtapi, read_only)
  662. self._migrating = True
  663. self._abort_migration = True
  664. def live_migration(self, context, instance, dest,
  665. post_method, recover_method, block_migration=False,
  666. migrate_data=None):
  667. self._abort_migration = False
  668. self._migrating = True
  669. count = 0
  670. while self._migrating and count < 50:
  671. time.sleep(0.1)
  672. count = count + 1
  673. if self._abort_migration:
  674. recover_method(context, instance, dest, migrate_data,
  675. migration_status='cancelled')
  676. else:
  677. post_method(context, instance, dest, block_migration,
  678. migrate_data)
  679. def live_migration_force_complete(self, instance):
  680. self._migrating = False
  681. if instance.uuid in self.instances:
  682. del self.instances[instance.uuid]
  683. def live_migration_abort(self, instance):
  684. self._abort_migration = True
  685. self._migrating = False
  686. def post_live_migration(self, context, instance, block_device_info,
  687. migrate_data=None):
  688. if instance.uuid in self.instances:
  689. del self.instances[instance.uuid]
  690. class FakeLiveMigrateDriverWithNestedCustomResources(
  691. FakeLiveMigrateDriver, MediumFakeDriverWithNestedCustomResources):
  692. pass
  693. class FakeDriverWithPciResources(SmallFakeDriver):
  694. PCI_ADDR_PF1 = '0000:01:00.0'
  695. PCI_ADDR_PF1_VF1 = '0000:01:00.1'
  696. PCI_ADDR_PF2 = '0000:02:00.0'
  697. PCI_ADDR_PF2_VF1 = '0000:02:00.1'
  698. PCI_ADDR_PF3 = '0000:03:00.0'
  699. PCI_ADDR_PF3_VF1 = '0000:03:00.1'
  700. # NOTE(gibi): Always use this fixture along with the
  701. # FakeDriverWithPciResources to make the necessary configuration for the
  702. # driver.
  703. class FakeDriverWithPciResourcesConfigFixture(fixtures.Fixture):
  704. def setUp(self):
  705. super(FakeDriverWithPciResources.
  706. FakeDriverWithPciResourcesConfigFixture, self).setUp()
  707. # Set passthrough_whitelist before the compute node starts to match
  708. # with the PCI devices reported by this fake driver.
  709. # NOTE(gibi): 0000:01:00 is tagged to physnet1 and therefore not a
  710. # match based on physnet to our sriov port
  711. # 'port_with_sriov_resource_request' as the network of that port
  712. # points to physnet2 with the attribute
  713. # 'provider:physical_network'. Nova pci handling already enforces
  714. # this rule.
  715. #
  716. # 0000:02:00 and 0000:03:00 are both tagged to physnet2 and
  717. # therefore a good match for our sriov port based on physnet.
  718. # Having two PFs on the same physnet will allow us to test the
  719. # placement allocation - physical allocation matching based on the
  720. # bandwidth allocation in the future.
  721. CONF.set_override('passthrough_whitelist', override=[
  722. jsonutils.dumps(
  723. {
  724. "address": {
  725. "domain": "0000",
  726. "bus": "01",
  727. "slot": "00",
  728. "function": ".*"},
  729. "physical_network": "physnet1",
  730. }
  731. ),
  732. jsonutils.dumps(
  733. {
  734. "address": {
  735. "domain": "0000",
  736. "bus": "02",
  737. "slot": "00",
  738. "function": ".*"},
  739. "physical_network": "physnet2",
  740. }
  741. ),
  742. jsonutils.dumps(
  743. {
  744. "address": {
  745. "domain": "0000",
  746. "bus": "03",
  747. "slot": "00",
  748. "function": ".*"},
  749. "physical_network": "physnet2",
  750. }
  751. ),
  752. ],
  753. group='pci')
  754. def get_available_resource(self, nodename):
  755. host_status = super(
  756. FakeDriverWithPciResources, self).get_available_resource(nodename)
  757. # 01:00.0 - PF - ens1
  758. # |---- 01:00.1 - VF
  759. #
  760. # 02:00.0 - PF - ens2
  761. # |---- 02:00.1 - VF
  762. #
  763. # 03:00.0 - PF - ens3
  764. # |---- 03:00.1 - VF
  765. host_status['pci_passthrough_devices'] = jsonutils.dumps([
  766. {
  767. 'address': self.PCI_ADDR_PF1,
  768. 'product_id': 'fake-product_id',
  769. 'vendor_id': 'fake-vendor_id',
  770. 'status': 'available',
  771. 'dev_type': 'type-PF',
  772. 'parent_addr': None,
  773. 'numa_node': 0,
  774. 'label': 'fake-label',
  775. },
  776. {
  777. 'address': self.PCI_ADDR_PF1_VF1,
  778. 'product_id': 'fake-product_id',
  779. 'vendor_id': 'fake-vendor_id',
  780. 'status': 'available',
  781. 'dev_type': 'type-VF',
  782. 'parent_addr': self.PCI_ADDR_PF1,
  783. 'numa_node': 0,
  784. 'label': 'fake-label',
  785. "parent_ifname": "ens1",
  786. },
  787. {
  788. 'address': self.PCI_ADDR_PF2,
  789. 'product_id': 'fake-product_id',
  790. 'vendor_id': 'fake-vendor_id',
  791. 'status': 'available',
  792. 'dev_type': 'type-PF',
  793. 'parent_addr': None,
  794. 'numa_node': 0,
  795. 'label': 'fake-label',
  796. },
  797. {
  798. 'address': self.PCI_ADDR_PF2_VF1,
  799. 'product_id': 'fake-product_id',
  800. 'vendor_id': 'fake-vendor_id',
  801. 'status': 'available',
  802. 'dev_type': 'type-VF',
  803. 'parent_addr': self.PCI_ADDR_PF2,
  804. 'numa_node': 0,
  805. 'label': 'fake-label',
  806. "parent_ifname": "ens2",
  807. },
  808. {
  809. 'address': self.PCI_ADDR_PF3,
  810. 'product_id': 'fake-product_id',
  811. 'vendor_id': 'fake-vendor_id',
  812. 'status': 'available',
  813. 'dev_type': 'type-PF',
  814. 'parent_addr': None,
  815. 'numa_node': 0,
  816. 'label': 'fake-label',
  817. },
  818. {
  819. 'address': self.PCI_ADDR_PF3_VF1,
  820. 'product_id': 'fake-product_id',
  821. 'vendor_id': 'fake-vendor_id',
  822. 'status': 'available',
  823. 'dev_type': 'type-VF',
  824. 'parent_addr': self.PCI_ADDR_PF3,
  825. 'numa_node': 0,
  826. 'label': 'fake-label',
  827. "parent_ifname": "ens3",
  828. },
  829. ])
  830. return host_status