nova/nova/virt/fake.py

563 lines
18 KiB
Python

# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A fake (in-memory) hypervisor+api.
Allows nova testing w/o a hypervisor. This module also documents the
semantics of real hypervisor connections.
"""
import collections
import contextlib
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from nova.compute import arch
from nova.compute import hv_type
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova.console import type as ctype
from nova import db
from nova import exception
from nova.i18n import _LW
from nova import utils
from nova.virt import diagnostics
from nova.virt import driver
from nova.virt import hardware
from nova.virt import virtapi
CONF = cfg.CONF
CONF.import_opt('host', 'nova.netconf')
LOG = logging.getLogger(__name__)
_FAKE_NODES = None
def set_nodes(nodes):
"""Sets FakeDriver's node.list.
It has effect on the following methods:
get_available_nodes()
get_available_resource
To restore the change, call restore_nodes()
"""
global _FAKE_NODES
_FAKE_NODES = nodes
def restore_nodes():
"""Resets FakeDriver's node list modified by set_nodes().
Usually called from tearDown().
"""
global _FAKE_NODES
_FAKE_NODES = [CONF.host]
class FakeInstance(object):
def __init__(self, name, state, uuid):
self.name = name
self.state = state
self.uuid = uuid
def __getitem__(self, key):
return getattr(self, key)
class Resources(object):
vcpus = 0
memory_mb = 0
local_gb = 0
vcpus_used = 0
memory_mb_used = 0
local_gb_used = 0
def __init__(self, vcpus=8, memory_mb=8000, local_gb=500):
self.vcpus = vcpus
self.memory_mb = memory_mb
self.local_gb = local_gb
def claim(self, vcpus=0, mem=0, disk=0):
self.vcpus_used += vcpus
self.memory_mb_used += mem
self.local_gb_used += disk
def release(self, vcpus=0, mem=0, disk=0):
self.vcpus_used -= vcpus
self.memory_mb_used -= mem
self.local_gb_used -= disk
def dump(self):
return {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': self.vcpus_used,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used
}
class FakeDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
"supports_recreate": True,
"supports_migrate_to_same_host": True
}
# Since we don't have a real hypervisor, pretend we have lots of
# disk and ram so this driver can be used to test large instances.
vcpus = 1000
memory_mb = 800000
local_gb = 600000
"""Fake hypervisor driver."""
def __init__(self, virtapi, read_only=False):
super(FakeDriver, self).__init__(virtapi)
self.instances = {}
self.resources = Resources(
vcpus=self.vcpus,
memory_mb=self.memory_mb,
local_gb=self.local_gb)
self.host_status_base = {
'hypervisor_type': 'fake',
'hypervisor_version': utils.convert_version_to_int('1.0'),
'hypervisor_hostname': CONF.host,
'cpu_info': {},
'disk_available_least': 0,
'supported_instances': jsonutils.dumps([(arch.X86_64,
hv_type.FAKE,
vm_mode.HVM)]),
'numa_topology': None,
}
self._mounts = {}
self._interfaces = {}
if not _FAKE_NODES:
set_nodes([CONF.host])
def init_host(self, host):
return
def list_instances(self):
return [self.instances[uuid].name for uuid in self.instances.keys()]
def list_instance_uuids(self):
return self.instances.keys()
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
pass
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
pass
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
uuid = instance.uuid
state = power_state.RUNNING
flavor = instance.flavor
self.resources.claim(
vcpus=flavor.vcpus,
mem=flavor.memory_mb,
disk=flavor.root_gb)
fake_instance = FakeInstance(instance.name, state, uuid)
self.instances[uuid] = fake_instance
def snapshot(self, context, instance, image_id, update_task_state):
if instance.uuid not in self.instances:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
update_task_state(task_state=task_states.IMAGE_UPLOADING)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
pass
def get_host_ip_addr(self):
return '192.168.0.1'
def set_admin_password(self, instance, new_pass):
pass
def inject_file(self, instance, b64_path, b64_contents):
pass
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
pass
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
pass
def unrescue(self, instance, network_info):
pass
def poll_rebooting_instances(self, timeout, instances):
pass
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
pass
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
pass
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
block_device_info=None):
pass
def power_off(self, instance, timeout=0, retry_interval=0):
pass
def power_on(self, context, instance, network_info,
block_device_info=None):
pass
def soft_delete(self, instance):
pass
def restore(self, instance):
pass
def pause(self, instance):
pass
def unpause(self, instance):
pass
def suspend(self, context, instance):
pass
def resume(self, context, instance, network_info, block_device_info=None):
pass
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
key = instance.uuid
if key in self.instances:
flavor = instance.flavor
self.resources.release(
vcpus=flavor.vcpus,
mem=flavor.memory_mb,
disk=flavor.root_gb)
del self.instances[key]
else:
LOG.warning(_LW("Key '%(key)s' not in instances '%(inst)s'"),
{'key': key,
'inst': self.instances}, instance=instance)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
pass
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach the disk to the instance at mountpoint using info."""
instance_name = instance.name
if instance_name not in self._mounts:
self._mounts[instance_name] = {}
self._mounts[instance_name][mountpoint] = connection_info
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach the disk attached to the instance."""
try:
del self._mounts[instance.name][mountpoint]
except KeyError:
pass
def swap_volume(self, old_connection_info, new_connection_info,
instance, mountpoint, resize_to):
"""Replace the disk attached to the instance."""
instance_name = instance.name
if instance_name not in self._mounts:
self._mounts[instance_name] = {}
self._mounts[instance_name][mountpoint] = new_connection_info
def attach_interface(self, instance, image_meta, vif):
if vif['id'] in self._interfaces:
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
self._interfaces[vif['id']] = vif
def detach_interface(self, instance, vif):
try:
del self._interfaces[vif['id']]
except KeyError:
raise exception.InterfaceDetachFailed(
instance_uuid=instance.uuid)
def get_info(self, instance):
if instance.uuid not in self.instances:
raise exception.InstanceNotFound(instance_id=instance.uuid)
i = self.instances[instance.uuid]
return hardware.InstanceInfo(state=i.state,
max_mem_kb=0,
mem_kb=0,
num_cpu=2,
cpu_time_ns=0)
def get_diagnostics(self, instance):
return {'cpu0_time': 17300000000,
'memory': 524288,
'vda_errors': -1,
'vda_read': 262144,
'vda_read_req': 112,
'vda_write': 5778432,
'vda_write_req': 488,
'vnet1_rx': 2070139,
'vnet1_rx_drop': 0,
'vnet1_rx_errors': 0,
'vnet1_rx_packets': 26701,
'vnet1_tx': 140208,
'vnet1_tx_drop': 0,
'vnet1_tx_errors': 0,
'vnet1_tx_packets': 662,
}
def get_instance_diagnostics(self, instance):
diags = diagnostics.Diagnostics(state='running', driver='fake',
hypervisor_os='fake-os', uptime=46664, config_drive=True)
diags.add_cpu(time=17300000000)
diags.add_nic(mac_address='01:23:45:67:89:ab',
rx_packets=26701,
rx_octets=2070139,
tx_octets=140208,
tx_packets = 662)
diags.add_disk(id='fake-disk-id',
read_bytes=262144,
read_requests=112,
write_bytes=5778432,
write_requests=488)
diags.memory_details.maximum = 524288
return diags
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM.
"""
bw = []
return bw
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host.
"""
volusage = []
return volusage
def get_host_cpu_stats(self):
stats = {'kernel': 5664160000000,
'idle': 1592705190000000,
'user': 26728850000000,
'iowait': 6121490000000}
stats['frequency'] = 800
return stats
def block_stats(self, instance, disk_id):
return [0, 0, 0, 0, None]
def get_console_output(self, context, instance):
return 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE'
def get_vnc_console(self, context, instance):
return ctype.ConsoleVNC(internal_access_path='FAKE',
host='fakevncconsole.com',
port=6969)
def get_spice_console(self, context, instance):
return ctype.ConsoleSpice(internal_access_path='FAKE',
host='fakespiceconsole.com',
port=6969,
tlsPort=6970)
def get_rdp_console(self, context, instance):
return ctype.ConsoleRDP(internal_access_path='FAKE',
host='fakerdpconsole.com',
port=6969)
def get_serial_console(self, context, instance):
return ctype.ConsoleSerial(internal_access_path='FAKE',
host='fakerdpconsole.com',
port=6969)
def get_console_pool_info(self, console_type):
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
return True
def refresh_security_group_members(self, security_group_id):
return True
def refresh_instance_security_rules(self, instance):
return True
def refresh_provider_fw_rules(self):
pass
def get_available_resource(self, nodename):
"""Updates compute manager resource info on ComputeNode table.
Since we don't have a real hypervisor, pretend we have lots of
disk and ram.
"""
cpu_info = collections.OrderedDict([
('arch', 'x86_64'),
('model', 'Nehalem'),
('vendor', 'Intel'),
('features', ['pge', 'clflush']),
('topology', {
'cores': 1,
'threads': 1,
'sockets': 4,
}),
])
if nodename not in _FAKE_NODES:
return {}
host_status = self.host_status_base.copy()
host_status.update(self.resources.dump())
host_status['hypervisor_hostname'] = nodename
host_status['host_hostname'] = nodename
host_status['host_name_label'] = nodename
host_status['cpu_info'] = jsonutils.dumps(cpu_info)
return host_status
def ensure_filtering_rules_for_instance(self, instance, network_info):
return
def get_instance_disk_info(self, instance, block_device_info=None):
return
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
post_method(context, instance, dest, block_migration,
migrate_data)
return
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
return
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
return {}
def check_can_live_migrate_source(self, context, instance,
dest_check_data, block_device_info=None):
return
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
return
def confirm_migration(self, migration, instance, network_info):
return
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data=None):
return
def unfilter_instance(self, instance, network_info):
return
def _test_remove_vm(self, instance_uuid):
"""Removes the named VM, as if it crashed. For testing."""
self.instances.pop(instance_uuid)
def host_power_action(self, action):
"""Reboots, shuts down or powers up the host."""
return action
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
if not mode:
return 'off_maintenance'
return 'on_maintenance'
def set_host_enabled(self, enabled):
"""Sets the specified host's ability to accept new instances."""
if enabled:
return 'enabled'
return 'disabled'
def get_volume_connector(self, instance):
return {'ip': CONF.my_block_storage_ip,
'initiator': 'fake',
'host': 'fakehost'}
def get_available_nodes(self, refresh=False):
return _FAKE_NODES
def instance_on_disk(self, instance):
return False
def quiesce(self, context, instance, image_meta):
pass
def unquiesce(self, context, instance, image_meta):
pass
class FakeVirtAPI(virtapi.VirtAPI):
def provider_fw_rule_get_all(self, context):
return db.provider_fw_rule_get_all(context)
@contextlib.contextmanager
def wait_for_instance_event(self, instance, event_names, deadline=300,
error_callback=None):
# NOTE(danms): Don't actually wait for any events, just
# fall through
yield
class SmallFakeDriver(FakeDriver):
# The api samples expect specific cpu memory and disk sizes. In order to
# allow the FakeVirt driver to be used outside of the unit tests, provide
# a separate class that has the values expected by the api samples. So
# instead of requiring new samples every time those
# values are adjusted allow them to be overwritten here.
vcpus = 1
memory_mb = 8192
local_gb = 1028