Initial code for live migration
Add initial driver methods for live migration. This support is limited to implementing the 'good path' for migration. Following change sets will implement more of the validation checking and error paths. Change-Id: I7edc4ee4ba478bf74f1030ac72c14ec07b8def78
This commit is contained in:
parent
854f5ea7d4
commit
bf76d4c828
317
nova_powervm/tests/virt/powervm/data/fake_managedsystem.txt
Normal file
317
nova_powervm/tests/virt/powervm/data/fake_managedsystem.txt
Normal file
@ -0,0 +1,317 @@
|
||||
####################################################
|
||||
# THIS IS AN AUTOMATICALLY GENERATED FILE
|
||||
# DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE
|
||||
#
|
||||
# To update file, run: create_httpresp.py -refresh KYLE_MGDSYS.txt
|
||||
#
|
||||
####################################################
|
||||
INFO{
|
||||
{'comment': None, 'status': 200, 'pw': 'passw0rd', 'reason': 'OK', 'host': '9.0.0.0', 'user': 'user', 'path': 'ManagedSystem'}
|
||||
END OF SECTION}
|
||||
HEADERS{
|
||||
{'content-length': '19970', 'x-transactionrecord-uuid': '1ced07b7-7813-4126-b509-e82fde2bec0b', 'x-powered-by': 'Servlet/3.1', 'set-cookie': 'JSESSIONID=000078gRl5DySlCnFQmpAru7Whg:27a03188-585f-448c-8c74-36ffe3f8b23a; Path=/; Secure; HttpOnly', 'x-hmc-schema-version': 'V1_3_0', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Mon, 10 Aug 2015 15:51:45 GMT', 'x-transaction-id': 'XT10047905', 'etag': '877584344', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Mon, 10 Aug 2015 15:51:45 GMT', 'x-mc-type': 'PVM', 'content-type': 'application/atom+xml'}
|
||||
END OF SECTION}
|
||||
BODY{
|
||||
|
||||
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:ns2="http://a9.com/-/spec/opensearch/1.1/" xmlns:ns3="http://www.w3.org/1999/xhtml">
|
||||
<id>70459846-6c0f-32d2-ac61-fb26a8ed810b</id>
|
||||
<updated>2015-08-10T11:51:45.425-04:00</updated>
|
||||
<link rel="SELF" href="https://9.0.0.0:12443/rest/api/uom/ManagedSystem?group=None"/>
|
||||
<generator>IBM Power Systems Management Console</generator>
|
||||
<entry>
|
||||
<id>c889bf0d-9996-33ac-84c5-d16727083a77</id>
|
||||
<title>ManagedSystem</title>
|
||||
<published>2015-08-10T11:51:45.603-04:00</published>
|
||||
<link rel="SELF" href="https://9.0.0.0:12443/rest/api/uom/ManagedSystem/c889bf0d-9996-33ac-84c5-d16727083a77?group=None"/>
|
||||
<author>
|
||||
<name>IBM Power Systems Management Console</name>
|
||||
</author>
|
||||
<etag:etag xmlns:etag="http://www.ibm.com/xmlns/systems/power/firmware/uom/mc/2012_10/" xmlns="http://www.ibm.com/xmlns/systems/power/firmware/uom/mc/2012_10/">877584313</etag:etag>
|
||||
<content type="application/vnd.ibm.powervm.uom+xml; type=ManagedSystem">
|
||||
<ManagedSystem:ManagedSystem xmlns:ManagedSystem="http://www.ibm.com/xmlns/systems/power/firmware/uom/mc/2012_10/" xmlns="http://www.ibm.com/xmlns/systems/power/firmware/uom/mc/2012_10/" xmlns:ns2="http://www.w3.org/XML/1998/namespace/k2" schemaVersion="V1_3_0">
|
||||
<Metadata>
|
||||
<Atom>
|
||||
<AtomID>c889bf0d-9996-33ac-84c5-d16727083a77</AtomID>
|
||||
<AtomCreated>0</AtomCreated>
|
||||
</Atom>
|
||||
</Metadata>
|
||||
<AssociatedIPLConfiguration group="Hypervisor" kb="CUD" kxe="false" schemaVersion="V1_3_0">
|
||||
<Metadata>
|
||||
<Atom/>
|
||||
</Metadata>
|
||||
<PowerOffWhenLastLogicalPartitionIsShutdown kb="CUD" kxe="false">false</PowerOffWhenLastLogicalPartitionIsShutdown>
|
||||
</AssociatedIPLConfiguration>
|
||||
<AssociatedLogicalPartitions kxe="false" kb="CUD"/>
|
||||
<AssociatedReservedStorageDevicePool kxe="false" kb="CUD"/>
|
||||
<AssociatedSystemCapabilities kb="CUD" kxe="false" schemaVersion="V1_3_0">
|
||||
<Metadata>
|
||||
<Atom/>
|
||||
</Metadata>
|
||||
<ActiveLogicalPartitionMobilityCapable kxe="false" kb="ROO">true</ActiveLogicalPartitionMobilityCapable>
|
||||
<BarrierSynchronizationRegisterCapable kxe="false" kb="ROO">false</BarrierSynchronizationRegisterCapable>
|
||||
<CapacityOnDemandMemoryCapable kxe="false" kb="ROO">true</CapacityOnDemandMemoryCapable>
|
||||
<CapacityOnDemandProcessorCapable kb="ROO" kxe="false">true</CapacityOnDemandProcessorCapable>
|
||||
<HostChannelAdapterCapable kxe="false" kb="ROO">false</HostChannelAdapterCapable>
|
||||
<HugePageMemoryCapable kb="ROO" kxe="false">false</HugePageMemoryCapable>
|
||||
<IBMiCapable kxe="false" kb="ROO">false</IBMiCapable>
|
||||
<InactiveLogicalPartitionMobilityCapable kb="ROO" kxe="false">true</InactiveLogicalPartitionMobilityCapable>
|
||||
<LogicalPartitionProcessorCompatibilityModeCapable kxe="false" kb="ROO">true</LogicalPartitionProcessorCompatibilityModeCapable>
|
||||
<RedundantErrorPathReportingCapable kb="ROO" kxe="false">false</RedundantErrorPathReportingCapable>
|
||||
<SharedProcessorPoolCapable kxe="false" kb="ROO">false</SharedProcessorPoolCapable>
|
||||
<Telnet5250ApplicationCapable kxe="false" kb="ROO">false</Telnet5250ApplicationCapable>
|
||||
<VirtualFiberChannelCapable kxe="false" kb="ROO">true</VirtualFiberChannelCapable>
|
||||
<VirtualSwitchCapable kxe="false" kb="ROO">true</VirtualSwitchCapable>
|
||||
</AssociatedSystemCapabilities>
|
||||
<AssociatedSystemIOConfiguration kxe="false" kb="CUD" schemaVersion="V1_3_0">
|
||||
<Metadata>
|
||||
<Atom/>
|
||||
</Metadata>
|
||||
<AvailableWWPNs kb="CUD" kxe="false">0</AvailableWWPNs>
|
||||
<HostChannelAdapters kxe="false" kb="CUD" schemaVersion="V1_3_0">
|
||||
<Metadata>
|
||||
<Atom/>
|
||||
</Metadata>
|
||||
</HostChannelAdapters>
|
||||
<HostEthernetAdapters kxe="false" kb="CUD"/>
|
||||
<IOAdapters kb="CUD" kxe="false" schemaVersion="V1_3_0">
|
||||
<Metadata>
|
||||
<Atom/>
|
||||
</Metadata>
|
||||
</IOAdapters>
|
||||
<IOBuses kxe="false" kb="CUD" schemaVersion="V1_3_0">
|
||||
<Metadata>
|
||||
<Atom/>
|
||||
</Metadata>
|
||||
</IOBuses>
|
||||
<IOSlots kxe="false" kb="CUD" schemaVersion="V1_3_0">
|
||||
<Metadata>
|
||||
<Atom/>
|
||||
</Metadata>
|
||||
<IOSlot schemaVersion="V1_3_0">
|
||||
<Metadata>
|
||||
<Atom/>
|
||||
</Metadata>
|
||||
<BusGroupingRequired kb="CUD" kxe="false">true</BusGroupingRequired>
|
||||
<Description kxe="false" kb="CUD">SAS RAID Controller, PCIe2, Dual-port 6Gb</Description>
|
||||
<FeatureCodes kb="ROO" kxe="false"/>
|
||||
<PCIClass kxe="false" kb="ROO">260</PCIClass>
|
||||
<PCISubsystemDeviceID kxe="false" kb="ROO">1023</PCISubsystemDeviceID>
|
||||
<PCIRevisionID kxe="false" kb="ROO">2</PCIRevisionID>
|
||||
<PCIVendorID kb="ROO" kxe="false">842</PCIVendorID>
|
||||
<PCISubsystemVendorID kb="ROO" kxe="false">4116</PCISubsystemVendorID>
|
||||
<SlotDynamicReconfigurationConnectorIndex kxe="false" kb="ROR">553844757</SlotDynamicReconfigurationConnectorIndex>
|
||||
<SlotDynamicReconfigurationConnectorName kxe="false" kb="CUD">U78CB.001.WZS06S2-P1-C14</SlotDynamicReconfigurationConnectorName>
|
||||
</IOSlot>
|
||||
<IOSlot schemaVersion="V1_3_0">
|
||||
<Metadata>
|
||||
<Atom/>
|
||||
</Metadata>
|
||||
<BusGroupingRequired kb="CUD" kxe="false">true</BusGroupingRequired>
|
||||
<Description kxe="false" kb="CUD">1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short</Description>
|
||||
<FeatureCodes kb="ROO" kxe="false">5899</FeatureCodes>
|
||||
<PCIClass kxe="false" kb="ROO">512</PCIClass>
|
||||
<PCISubsystemDeviceID kxe="false" kb="ROO">1056</PCISubsystemDeviceID>
|
||||
<PCIRevisionID kxe="false" kb="ROO">1</PCIRevisionID>
|
||||
<PCIVendorID kb="ROO" kxe="false">5719</PCIVendorID>
|
||||
<PCISubsystemVendorID kb="ROO" kxe="false">4116</PCISubsystemVendorID>
|
||||
<SlotDynamicReconfigurationConnectorIndex kxe="false" kb="ROR">553779220</SlotDynamicReconfigurationConnectorIndex>
|
||||
<SlotDynamicReconfigurationConnectorName kxe="false" kb="CUD">U78CB.001.WZS06S2-P1-C12</SlotDynamicReconfigurationConnectorName>
|
||||
</IOSlot>
|
||||
<IOSlot schemaVersion="V1_3_0">
|
||||
<Metadata>
|
||||
<Atom/>
|
||||
</Metadata>
|
||||
<BusGroupingRequired kb="CUD" kxe="false">true</BusGroupingRequired>
|
||||
<Description kxe="false" kb="CUD">Quad 8 Gigabit Fibre Channel LP Adapter</Description>
|
||||
<FeatureCodes kb="ROO" kxe="false"/>
|
||||
<PCIClass kxe="false" kb="ROO">4</PCIClass>
|
||||
<PCISubsystemDeviceID kxe="false" kb="ROO">1054</PCISubsystemDeviceID>
|
||||
<PCIRevisionID kxe="false" kb="ROO">2</PCIRevisionID>
|
||||
<PCIVendorID kb="ROO" kxe="false">9522</PCIVendorID>
|
||||
<PCISubsystemVendorID kb="ROO" kxe="false">4116</PCISubsystemVendorID>
|
||||
<SlotDynamicReconfigurationConnectorIndex kxe="false" kb="ROR">553713680</SlotDynamicReconfigurationConnectorIndex>
|
||||
<SlotDynamicReconfigurationConnectorName kxe="false" kb="CUD">U78CB.001.WZS06S2-P1-C7</SlotDynamicReconfigurationConnectorName>
|
||||
</IOSlot>
|
||||
<IOSlot schemaVersion="V1_3_0">
|
||||
<Metadata>
|
||||
<Atom/>
|
||||
</Metadata>
|
||||
<BusGroupingRequired kb="CUD" kxe="false">true</BusGroupingRequired>
|
||||
<Description kxe="false" kb="CUD">Empty slot</Description>
|
||||
<FeatureCodes kb="ROO" kxe="false">0</FeatureCodes>
|
||||
<PCIClass kxe="false" kb="ROO">65535</PCIClass>
|
||||
<PCISubsystemDeviceID kxe="false" kb="ROO">65535</PCISubsystemDeviceID>
|
||||
<PCIRevisionID kxe="false" kb="ROO">255</PCIRevisionID>
|
||||
<PCIVendorID kb="ROO" kxe="false">65535</PCIVendorID>
|
||||
<PCISubsystemVendorID kb="ROO" kxe="false">65535</PCISubsystemVendorID>
|
||||
<SlotDynamicReconfigurationConnectorIndex kxe="false" kb="ROR">553713683</SlotDynamicReconfigurationConnectorIndex>
|
||||
<SlotDynamicReconfigurationConnectorName kxe="false" kb="CUD">U78CB.001.WZS06S2-P1-C11</SlotDynamicReconfigurationConnectorName>
|
||||
</IOSlot>
|
||||
<IOSlot schemaVersion="V1_3_0">
|
||||
<Metadata>
|
||||
<Atom/>
|
||||
</Metadata>
|
||||
<BusGroupingRequired kb="CUD" kxe="false">true</BusGroupingRequired>
|
||||
<Description kxe="false" kb="CUD">Empty slot</Description>
|
||||
<FeatureCodes kb="ROO" kxe="false">0</FeatureCodes>
|
||||
<PCIClass kxe="false" kb="ROO">65535</PCIClass>
|
||||
<PCISubsystemDeviceID kxe="false" kb="ROO">65535</PCISubsystemDeviceID>
|
||||
<PCIRevisionID kxe="false" kb="ROO">255</PCIRevisionID>
|
||||
<PCIVendorID kb="ROO" kxe="false">65535</PCIVendorID>
|
||||
<PCISubsystemVendorID kb="ROO" kxe="false">65535</PCISubsystemVendorID>
|
||||
<SlotDynamicReconfigurationConnectorIndex kxe="false" kb="ROR">553844765</SlotDynamicReconfigurationConnectorIndex>
|
||||
<SlotDynamicReconfigurationConnectorName kxe="false" kb="CUD">U78CB.001.WZS06S2-P1-C9</SlotDynamicReconfigurationConnectorName>
|
||||
</IOSlot>
|
||||
<IOSlot schemaVersion="V1_3_0">
|
||||
<Metadata>
|
||||
<Atom/>
|
||||
</Metadata>
|
||||
<BusGroupingRequired kb="CUD" kxe="false">true</BusGroupingRequired>
|
||||
<Description kxe="false" kb="CUD">Quad 8 Gigabit Fibre Channel LP Adapter</Description>
|
||||
<FeatureCodes kb="ROO" kxe="false"/>
|
||||
<PCIClass kxe="false" kb="ROO">4</PCIClass>
|
||||
<PCISubsystemDeviceID kxe="false" kb="ROO">1054</PCISubsystemDeviceID>
|
||||
<PCIRevisionID kxe="false" kb="ROO">2</PCIRevisionID>
|
||||
<PCIVendorID kb="ROO" kxe="false">9522</PCIVendorID>
|
||||
<PCISubsystemVendorID kb="ROO" kxe="false">4116</PCISubsystemVendorID>
|
||||
<SlotDynamicReconfigurationConnectorIndex kxe="false" kb="ROR">553713688</SlotDynamicReconfigurationConnectorIndex>
|
||||
<SlotDynamicReconfigurationConnectorName kxe="false" kb="CUD">U78CB.001.WZS06S2-P1-C6</SlotDynamicReconfigurationConnectorName>
|
||||
</IOSlot>
|
||||
<IOSlot schemaVersion="V1_3_0">
|
||||
<Metadata>
|
||||
<Atom/>
|
||||
</Metadata>
|
||||
<BusGroupingRequired kb="CUD" kxe="false">true</BusGroupingRequired>
|
||||
<Description kxe="false" kb="CUD">Empty slot</Description>
|
||||
<FeatureCodes kb="ROO" kxe="false">0</FeatureCodes>
|
||||
<PCIClass kxe="false" kb="ROO">65535</PCIClass>
|
||||
<PCISubsystemDeviceID kxe="false" kb="ROO">65535</PCISubsystemDeviceID>
|
||||
<PCIRevisionID kxe="false" kb="ROO">255</PCIRevisionID>
|
||||
<PCIVendorID kb="ROO" kxe="false">65535</PCIVendorID>
|
||||
<PCISubsystemVendorID kb="ROO" kxe="false">65535</PCISubsystemVendorID>
|
||||
<SlotDynamicReconfigurationConnectorIndex kxe="false" kb="ROR">553975839</SlotDynamicReconfigurationConnectorIndex>
|
||||
<SlotDynamicReconfigurationConnectorName kxe="false" kb="CUD">U78CB.001.WZS06S2-P1-C15</SlotDynamicReconfigurationConnectorName>
|
||||
</IOSlot>
|
||||
<IOSlot schemaVersion="V1_3_0">
|
||||
<Metadata>
|
||||
<Atom/>
|
||||
</Metadata>
|
||||
<BusGroupingRequired kb="CUD" kxe="false">true</BusGroupingRequired>
|
||||
<Description kxe="false" kb="CUD">1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short</Description>
|
||||
<FeatureCodes kb="ROO" kxe="false">5899</FeatureCodes>
|
||||
<PCIClass kxe="false" kb="ROO">512</PCIClass>
|
||||
<PCISubsystemDeviceID kxe="false" kb="ROO">1056</PCISubsystemDeviceID>
|
||||
<PCIRevisionID kxe="false" kb="ROO">1</PCIRevisionID>
|
||||
<PCIVendorID kb="ROO" kxe="false">5719</PCIVendorID>
|
||||
<PCISubsystemVendorID kb="ROO" kxe="false">4116</PCISubsystemVendorID>
|
||||
<SlotDynamicReconfigurationConnectorIndex kxe="false" kb="ROR">553910302</SlotDynamicReconfigurationConnectorIndex>
|
||||
<SlotDynamicReconfigurationConnectorName kxe="false" kb="CUD">U78CB.001.WZS06S2-P1-C10</SlotDynamicReconfigurationConnectorName>
|
||||
</IOSlot>
|
||||
<IOSlot schemaVersion="V1_3_0">
|
||||
<Metadata>
|
||||
<Atom/>
|
||||
</Metadata>
|
||||
<BusGroupingRequired kb="CUD" kxe="false">true</BusGroupingRequired>
|
||||
<Description kxe="false" kb="CUD">Universal Serial Bus UHC Spec</Description>
|
||||
<FeatureCodes kb="ROO" kxe="false"/>
|
||||
<PCIClass kxe="false" kb="ROO">3075</PCIClass>
|
||||
<PCISubsystemDeviceID kxe="false" kb="ROO">1202</PCISubsystemDeviceID>
|
||||
<PCIRevisionID kxe="false" kb="ROO">2</PCIRevisionID>
|
||||
<PCIVendorID kb="ROO" kxe="false">33345</PCIVendorID>
|
||||
<PCISubsystemVendorID kb="ROO" kxe="false">4116</PCISubsystemVendorID>
|
||||
<SlotDynamicReconfigurationConnectorIndex kxe="false" kb="ROR">553713691</SlotDynamicReconfigurationConnectorIndex>
|
||||
<SlotDynamicReconfigurationConnectorName kxe="false" kb="CUD">U78CB.001.WZS06S2-P1-T2</SlotDynamicReconfigurationConnectorName>
|
||||
</IOSlot>
|
||||
</IOSlots>
|
||||
<SRIOVAdapters kxe="false" kb="CUD" schemaVersion="V1_3_0">
|
||||
<Metadata>
|
||||
<Atom/>
|
||||
</Metadata>
|
||||
</SRIOVAdapters>
|
||||
<AssociatedSystemVirtualNetwork group="SystemNetwork" kxe="false" kb="CUD" schemaVersion="V1_3_0">
|
||||
<Metadata>
|
||||
<Atom/>
|
||||
</Metadata>
|
||||
</AssociatedSystemVirtualNetwork>
|
||||
<WWPNPrefix kxe="false" kb="CUD">13857705832243593216</WWPNPrefix>
|
||||
</AssociatedSystemIOConfiguration>
|
||||
<AssociatedSystemMemoryConfiguration kxe="false" kb="CUD" schemaVersion="V1_3_0">
|
||||
<Metadata>
|
||||
<Atom/>
|
||||
</Metadata>
|
||||
<ConfigurableSystemMemory kxe="false" kb="ROR">131072</ConfigurableSystemMemory>
|
||||
<CurrentAvailableSystemMemory kb="ROR" kxe="false">87296</CurrentAvailableSystemMemory>
|
||||
<CurrentLogicalMemoryBlockSize kxe="false" kb="ROR">256</CurrentLogicalMemoryBlockSize>
|
||||
<DefaultHardwarePageTableRatio kb="COR" kxe="false">6</DefaultHardwarePageTableRatio>
|
||||
<InstalledSystemMemory kb="ROR" kxe="false">131072</InstalledSystemMemory>
|
||||
<MaximumMemoryPoolCount kb="ROR" kxe="false">1</MaximumMemoryPoolCount>
|
||||
<MemoryRegionSize kxe="false" kb="CUD">256</MemoryRegionSize>
|
||||
<MemoryUsedByHypervisor kb="ROR" kxe="false">4864</MemoryUsedByHypervisor>
|
||||
<PendingAvailableSystemMemory kxe="false" kb="CUD">87296</PendingAvailableSystemMemory>
|
||||
<PendingLogicalMemoryBlockSize kxe="false" kb="CUD">256</PendingLogicalMemoryBlockSize>
|
||||
<PendingMemoryRegionSize kxe="false" kb="CUD">256</PendingMemoryRegionSize>
|
||||
<SharedMemoryPool kxe="false" kb="CUD"/>
|
||||
</AssociatedSystemMemoryConfiguration>
|
||||
<AssociatedSystemProcessorConfiguration kb="CUD" kxe="false" schemaVersion="V1_3_0">
|
||||
<Metadata>
|
||||
<Atom/>
|
||||
</Metadata>
|
||||
<ConfigurableSystemProcessorUnits kxe="false" kb="ROR">10</ConfigurableSystemProcessorUnits>
|
||||
<CurrentAvailableSystemProcessorUnits kb="ROR" kxe="false">0.6</CurrentAvailableSystemProcessorUnits>
|
||||
<CurrentMaximumProcessorsPerAIXOrLinuxPartition kb="ROR" kxe="false">64</CurrentMaximumProcessorsPerAIXOrLinuxPartition>
|
||||
<CurrentMaximumProcessorsPerIBMiPartition kxe="false" kb="ROR">64</CurrentMaximumProcessorsPerIBMiPartition>
|
||||
<CurrentMaximumAllowedProcessorsPerPartition kb="ROR" kxe="false">256</CurrentMaximumAllowedProcessorsPerPartition>
|
||||
<CurrentMaximumProcessorsPerVirtualIOServerPartition kxe="false" kb="ROR">64</CurrentMaximumProcessorsPerVirtualIOServerPartition>
|
||||
<CurrentMaximumVirtualProcessorsPerAIXOrLinuxPartition kb="ROR" kxe="false">64</CurrentMaximumVirtualProcessorsPerAIXOrLinuxPartition>
|
||||
<CurrentMaximumVirtualProcessorsPerIBMiPartition kxe="false" kb="ROR">64</CurrentMaximumVirtualProcessorsPerIBMiPartition>
|
||||
<CurrentMaximumVirtualProcessorsPerVirtualIOServerPartition kb="ROR" kxe="false">64</CurrentMaximumVirtualProcessorsPerVirtualIOServerPartition>
|
||||
<InstalledSystemProcessorUnits kb="ROR" kxe="false">10</InstalledSystemProcessorUnits>
|
||||
<MaximumAllowedVirtualProcessorsPerPartition kb="ROR" kxe="false">256</MaximumAllowedVirtualProcessorsPerPartition>
|
||||
<MinimumProcessorUnitsPerVirtualProcessor kxe="false" kb="ROR">0.05</MinimumProcessorUnitsPerVirtualProcessor>
|
||||
<PendingAvailableSystemProcessorUnits kb="CUD" kxe="false">0.6</PendingAvailableSystemProcessorUnits>
|
||||
<SupportedPartitionProcessorCompatibilityModes kb="ROR" kxe="false">default</SupportedPartitionProcessorCompatibilityModes>
|
||||
<SupportedPartitionProcessorCompatibilityModes kb="ROR" kxe="false">POWER6</SupportedPartitionProcessorCompatibilityModes>
|
||||
<SupportedPartitionProcessorCompatibilityModes kb="ROR" kxe="false">POWER6_Plus</SupportedPartitionProcessorCompatibilityModes>
|
||||
<SupportedPartitionProcessorCompatibilityModes kb="ROR" kxe="false">POWER7</SupportedPartitionProcessorCompatibilityModes>
|
||||
<SupportedPartitionProcessorCompatibilityModes kb="ROR" kxe="false">POWER8</SupportedPartitionProcessorCompatibilityModes>
|
||||
<SharedProcessorPool kb="CUD" kxe="false"/>
|
||||
</AssociatedSystemProcessorConfiguration>
|
||||
<AssociatedSystemVirtualStorage kb="CUD" kxe="false" schemaVersion="V1_3_0">
|
||||
<Metadata>
|
||||
<Atom/>
|
||||
</Metadata>
|
||||
</AssociatedSystemVirtualStorage>
|
||||
<MachineTypeModelAndSerialNumber kxe="false" kb="ROR" schemaVersion="V1_3_0">
|
||||
<Metadata>
|
||||
<Atom/>
|
||||
</Metadata>
|
||||
<MachineType kxe="false" kb="CUR">8247</MachineType>
|
||||
<Model kxe="false" kb="CUR">21L</Model>
|
||||
<SerialNumber kb="CUR" kxe="false">9999999</SerialNumber>
|
||||
</MachineTypeModelAndSerialNumber>
|
||||
<ManufacturingDefaultConfigurationEnabled kb="ROR" kxe="false">false</ManufacturingDefaultConfigurationEnabled>
|
||||
<MaximumPartitions kb="CUD" kxe="false">200</MaximumPartitions>
|
||||
<PrimaryIPAddress kxe="false" kb="CUR">9.0.0.0</PrimaryIPAddress>
|
||||
<SecondaryIPAddress kb="CUD" kxe="false">169.254.3.147</SecondaryIPAddress>
|
||||
<State kb="ROR" kxe="false">operating</State>
|
||||
<SystemName kxe="false" kb="CUR">Server-8247-21L-SN9999999</SystemName>
|
||||
<SystemMigrationInformation kb="ROR" kxe="false" schemaVersion="V1_3_0">
|
||||
<Metadata>
|
||||
<Atom/>
|
||||
</Metadata>
|
||||
<MaximumInactiveMigrations kb="ROR" kxe="false">16</MaximumInactiveMigrations>
|
||||
<MaximumActiveMigrations kb="ROR" kxe="false">16</MaximumActiveMigrations>
|
||||
<NumberOfInactiveMigrationsInProgress kxe="false" kb="ROR">0</NumberOfInactiveMigrationsInProgress>
|
||||
<NumberOfActiveMigrationsInProgress kb="ROR" kxe="false">0</NumberOfActiveMigrationsInProgress>
|
||||
</SystemMigrationInformation>
|
||||
<IsMaster ksv="V1_3_0" kxe="false" kb="ROO">false</IsMaster>
|
||||
<IsHMCMaster ksv="V1_3_0" kxe="false" kb="ROO">false</IsHMCMaster>
|
||||
<IsManagementPartitionMaster ksv="V1_3_0" kb="ROO" kxe="false">false</IsManagementPartitionMaster>
|
||||
<AssociatedGroups ksv="V1_2_0" kb="ROO" kxe="false"/>
|
||||
<AssociatedTasks ksv="V1_2_0" kb="ROO" kxe="false"/>
|
||||
</ManagedSystem:ManagedSystem>
|
||||
</content>
|
||||
</entry>
|
||||
</feed>
|
||||
|
||||
END OF SECTION}
|
@ -19,12 +19,13 @@ from __future__ import absolute_import
|
||||
|
||||
import fixtures
|
||||
import mock
|
||||
import os
|
||||
|
||||
from nova_powervm.virt.powervm import driver
|
||||
|
||||
from nova.virt import fake
|
||||
from pypowervm.tests.wrappers.util import pvmhttp
|
||||
MS_HTTPRESP_FILE = "managedsystem.txt"
|
||||
MS_HTTPRESP_FILE = "fake_managedsystem.txt"
|
||||
|
||||
|
||||
class PyPowerVM(fixtures.Fixture):
|
||||
@ -100,7 +101,10 @@ class PowerVMComputeDriver(fixtures.Fixture):
|
||||
@mock.patch('nova_powervm.virt.powervm.driver.PowerVMDriver._get_adapter')
|
||||
@mock.patch('nova_powervm.virt.powervm.mgmt.get_mgmt_partition')
|
||||
def _init_host(self, *args):
|
||||
ms_http = pvmhttp.load_pvm_resp(MS_HTTPRESP_FILE).get_response()
|
||||
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
|
||||
'data', MS_HTTPRESP_FILE)
|
||||
ms_http = pvmhttp.load_pvm_resp(
|
||||
file_path, adapter=mock.Mock()).get_response()
|
||||
# Pretend it just returned one host
|
||||
ms_http.feed.entries = [ms_http.feed.entries[0]]
|
||||
self.drv.adapter.read.return_value = ms_http
|
||||
|
@ -39,6 +39,7 @@ import pypowervm.wrappers.managed_system as pvm_ms
|
||||
from nova_powervm.tests.virt import powervm
|
||||
from nova_powervm.tests.virt.powervm import fixtures as fx
|
||||
from nova_powervm.virt.powervm import driver
|
||||
from nova_powervm.virt.powervm import live_migration as lpm
|
||||
|
||||
MS_HTTPRESP_FILE = "managedsystem.txt"
|
||||
MS_NAME = 'HV4'
|
||||
@ -76,6 +77,8 @@ class TestPowerVMDriver(test.TestCase):
|
||||
self.drv = self.drv_fix.drv
|
||||
self.apt = self.drv_fix.pypvm.apt
|
||||
|
||||
self._setup_lpm()
|
||||
|
||||
self.disk_dvr = self.drv.disk_dvr
|
||||
self.vol_fix = self.useFixture(fx.VolumeAdapter())
|
||||
self.vol_drv = self.vol_fix.drv
|
||||
@ -88,6 +91,17 @@ class TestPowerVMDriver(test.TestCase):
|
||||
resp.entry = pvm_lpar.LPAR._bld(None).entry
|
||||
self.crt_lpar.return_value = pvm_lpar.LPAR.wrap(resp)
|
||||
|
||||
def _setup_lpm(self):
|
||||
"""Setup the lpm environment.
|
||||
|
||||
This may have to be called directly by tests since the lpm code
|
||||
cleans up the dict entry on the last expected lpm method.
|
||||
"""
|
||||
self.lpm = mock.Mock()
|
||||
self.lpm_inst = mock.Mock()
|
||||
self.lpm_inst.uuid = 'inst1'
|
||||
self.drv.live_migrations = {'inst1': self.lpm}
|
||||
|
||||
def test_driver_create(self):
|
||||
"""Validates that a driver of the PowerVM type can just be
|
||||
initialized.
|
||||
@ -928,3 +942,79 @@ class TestPowerVMDriver(test.TestCase):
|
||||
mock_stream.assert_called_with(disk_path='disk_path')
|
||||
mock_rm.assert_called_with(stg_elem='stg_elem', vios_wrap='vios_wrap',
|
||||
disk_path='disk_path')
|
||||
|
||||
@mock.patch('nova_powervm.virt.powervm.live_migration.LiveMigrationDest')
|
||||
def test_can_migrate_dest(self, mock_lpm):
|
||||
mock_lpm.return_value.check_destination.return_value = 'dest_data'
|
||||
dest_data = self.drv.check_can_live_migrate_destination(
|
||||
'context', mock.Mock(), 'src_compute_info', 'dst_compute_info')
|
||||
self.assertEqual('dest_data', dest_data)
|
||||
|
||||
def test_can_live_mig_dest_clnup(self):
|
||||
self.drv.check_can_live_migrate_destination_cleanup(
|
||||
'context', 'dest_data')
|
||||
|
||||
@mock.patch('nova_powervm.virt.powervm.live_migration.LiveMigrationSrc')
|
||||
def test_can_live_mig_src(self, mock_lpm):
|
||||
mock_lpm.return_value.check_source.return_value = (
|
||||
'src_data')
|
||||
src_data = self.drv.check_can_live_migrate_source(
|
||||
'context', mock.Mock(), 'dest_check_data')
|
||||
self.assertEqual('src_data', src_data)
|
||||
|
||||
def test_pre_live_migr(self):
|
||||
self.drv.pre_live_migration(
|
||||
'context', self.lpm_inst, 'block_device_info', 'network_info',
|
||||
'disk_info', migrate_data='migrate_data')
|
||||
|
||||
@mock.patch('nova.utils.spawn_n')
|
||||
def test_live_migration(self, mock_spawn):
|
||||
self.drv.live_migration('context', self.lpm_inst, 'dest',
|
||||
'post_method', 'recover_method')
|
||||
mock_spawn.assert_called_once_with(
|
||||
self.drv._live_migration_thread, 'context', self.lpm_inst, 'dest',
|
||||
'post_method', 'recover_method', False, None)
|
||||
|
||||
def test_live_migr_thread(self):
|
||||
mock_post_meth = mock.Mock()
|
||||
mock_rec_meth = mock.Mock()
|
||||
|
||||
# Good path
|
||||
self.drv._live_migration_thread(
|
||||
'context', self.lpm_inst, 'dest', mock_post_meth, mock_rec_meth,
|
||||
'block_mig', 'migrate_data')
|
||||
|
||||
mock_post_meth.assert_called_once_with(
|
||||
'context', self.lpm_inst, 'dest', mock.ANY, mock.ANY)
|
||||
self.assertEqual(0, mock_rec_meth.call_count)
|
||||
|
||||
# Exception path
|
||||
self._setup_lpm()
|
||||
mock_post_meth.reset_mock()
|
||||
self.lpm.live_migration.side_effect = ValueError()
|
||||
self.assertRaises(
|
||||
lpm.LiveMigrationFailed, self.drv._live_migration_thread,
|
||||
'context', self.lpm_inst, 'dest', mock_post_meth, mock_rec_meth,
|
||||
'block_mig', 'migrate_data')
|
||||
mock_rec_meth.assert_called_once_with(
|
||||
'context', self.lpm_inst, 'dest', mock.ANY, mock.ANY)
|
||||
self.lpm.rollback_live_migration.assert_called_once_with('context')
|
||||
self.assertEqual(0, mock_post_meth.call_count)
|
||||
|
||||
def test_rollbk_lpm_dest(self):
|
||||
self.drv.rollback_live_migration_at_destination(
|
||||
'context', self.lpm_inst, 'network_info', 'block_device_info')
|
||||
self.assertRaises(
|
||||
KeyError, lambda: self.drv.live_migrations[self.lpm_inst.uuid])
|
||||
|
||||
def test_post_live_mig_src(self):
|
||||
self.drv.post_live_migration_at_source('context', self.lpm_inst,
|
||||
'network_info')
|
||||
self.lpm.post_live_migration_at_source.assert_called_once_with(
|
||||
'network_info')
|
||||
|
||||
def test_post_live_mig_dest(self):
|
||||
self.drv.post_live_migration_at_destination(
|
||||
'context', self.lpm_inst, 'network_info')
|
||||
self.lpm.post_live_migration_at_destination.assert_called_once_with(
|
||||
'network_info')
|
||||
|
176
nova_powervm/tests/virt/powervm/test_live_migration.py
Normal file
176
nova_powervm/tests/virt/powervm/test_live_migration.py
Normal file
@ -0,0 +1,176 @@
|
||||
# Copyright 2015 IBM Corp.
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import mock
|
||||
|
||||
from nova import objects
|
||||
from nova import test
|
||||
|
||||
from nova_powervm.tests.virt import powervm
|
||||
from nova_powervm.tests.virt.powervm import fixtures as fx
|
||||
from nova_powervm.virt.powervm import live_migration as lpm
|
||||
|
||||
|
||||
class TestLPM(test.TestCase):
|
||||
def setUp(self):
|
||||
super(TestLPM, self).setUp()
|
||||
|
||||
self.flags(disk_driver='localdisk', group='powervm')
|
||||
self.drv_fix = self.useFixture(fx.PowerVMComputeDriver())
|
||||
self.drv = self.drv_fix.drv
|
||||
self.apt = self.drv_fix.pypvm.apt
|
||||
|
||||
self.inst = objects.Instance(**powervm.TEST_INSTANCE)
|
||||
self.lpmsrc = lpm.LiveMigrationSrc(self.drv, self.inst, {})
|
||||
self.lpmdst = lpm.LiveMigrationDest(self.drv, self.inst)
|
||||
|
||||
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM')
|
||||
@mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper')
|
||||
@mock.patch('pypowervm.wrappers.managed_system.System.migration_data',
|
||||
new_callable=mock.PropertyMock, name='MigDataProp')
|
||||
def test_lpm_source(self, mock_migrdata, mock_get_wrap, mock_cd):
|
||||
migr_data = {'active_migrations_supported': 4,
|
||||
'active_migrations_in_progress': 2}
|
||||
mock_migrdata.return_value = migr_data
|
||||
|
||||
with mock.patch.object(
|
||||
self.lpmsrc, '_check_dlpar_rmc', return_value=None):
|
||||
|
||||
# Test the bad path first, then patch in values to make suceed
|
||||
self.lpmsrc.dest_data = {'dest_proc_compat': 'a,b,c'}
|
||||
mock_wrap = mock.Mock()
|
||||
mock_get_wrap.return_value = mock_wrap
|
||||
|
||||
self.assertRaises(lpm.LiveMigrationProcCompat,
|
||||
self.lpmsrc.check_source, 'context',
|
||||
'block_device_info')
|
||||
|
||||
# Patch the proc compat fields, to get further
|
||||
pm = mock.PropertyMock(return_value='b')
|
||||
type(mock_wrap).proc_compat_mode = pm
|
||||
|
||||
self.assertRaises(lpm.LiveMigrationInvalidState,
|
||||
self.lpmsrc.check_source, 'context',
|
||||
'block_device_info')
|
||||
|
||||
pm = mock.PropertyMock(return_value='Not_Migrating')
|
||||
type(mock_wrap).migration_state = pm
|
||||
|
||||
# Finally, good path.
|
||||
self.lpmsrc.check_source('context', 'block_device_info')
|
||||
# Ensure we tried to remove the vopts.
|
||||
mock_cd.return_value.dlt_vopt.assert_called_once_with(
|
||||
mock.ANY)
|
||||
|
||||
# Ensure migration counts are validated
|
||||
migr_data['active_migrations_in_progress'] = 4
|
||||
self.assertRaises(lpm.LiveMigrationCapacity,
|
||||
self.lpmsrc.check_source, 'context',
|
||||
'block_device_info')
|
||||
|
||||
@mock.patch('pypowervm.wrappers.managed_system.System.migration_data',
|
||||
new_callable=mock.PropertyMock, name='MigDataProp')
|
||||
def test_lpm_dest(self, mock_migrdata):
|
||||
src_compute_info = {'stats': {'memory_region_size': 1}}
|
||||
dst_compute_info = {'stats': {'memory_region_size': 1}}
|
||||
|
||||
migr_data = {'active_migrations_supported': 4,
|
||||
'active_migrations_in_progress': 2}
|
||||
mock_migrdata.return_value = migr_data
|
||||
with mock.patch.object(self.drv.host_wrapper, 'refresh') as mock_rfh:
|
||||
|
||||
self.lpmdst.check_destination(
|
||||
'context', src_compute_info, dst_compute_info)
|
||||
mock_rfh.assert_called_once_with()
|
||||
|
||||
# Ensure migration counts are validated
|
||||
migr_data['active_migrations_in_progress'] = 4
|
||||
self.assertRaises(lpm.LiveMigrationCapacity,
|
||||
self.lpmdst.check_destination, 'context',
|
||||
src_compute_info, dst_compute_info)
|
||||
# Repair the stat
|
||||
migr_data['active_migrations_in_progress'] = 2
|
||||
|
||||
# Ensure diff memory sizes raises an exception
|
||||
dst_compute_info['stats']['memory_region_size'] = 2
|
||||
self.assertRaises(lpm.LiveMigrationMRS,
|
||||
self.lpmdst.check_destination, 'context',
|
||||
src_compute_info, dst_compute_info)
|
||||
|
||||
def test_pre_live_mig(self):
|
||||
self.lpmdst.pre_live_migration('context', 'block_device_info',
|
||||
'network_info', 'disk_info',
|
||||
{})
|
||||
|
||||
@mock.patch('pypowervm.tasks.migration.migrate_lpar')
|
||||
def test_live_migration(self, mock_migr):
|
||||
|
||||
self.lpmsrc.lpar_w = mock.Mock()
|
||||
self.lpmsrc.dest_data = dict(
|
||||
dest_sys_name='a', dest_ip='1', dest_user_id='neo')
|
||||
self.lpmsrc.live_migration('context', 'migrate_data')
|
||||
mock_migr.called_once_with('context')
|
||||
|
||||
# Test that we raise errors received during migration
|
||||
mock_migr.side_effect = ValueError()
|
||||
self.assertRaises(ValueError, self.lpmsrc.live_migration, 'context',
|
||||
'migrate_data')
|
||||
mock_migr.called_once_with('context')
|
||||
|
||||
def test_post_live_mig_src(self):
|
||||
self.lpmsrc.post_live_migration_at_source('network_info')
|
||||
|
||||
def test_post_live_mig_dest(self):
|
||||
self.lpmdst.post_live_migration_at_destination('network_info')
|
||||
|
||||
@mock.patch('pypowervm.tasks.migration.migrate_recover')
|
||||
def test_rollback(self, mock_migr):
|
||||
self.lpmsrc.lpar_w = mock.Mock()
|
||||
|
||||
# Test no need to rollback
|
||||
self.lpmsrc.lpar_w.migration_state = 'Not_Migrating'
|
||||
self.lpmsrc.rollback_live_migration('context')
|
||||
self.assertTrue(self.lpmsrc.lpar_w.refresh.called)
|
||||
self.assertFalse(mock_migr.called)
|
||||
|
||||
# Test calling the rollback
|
||||
self.lpmsrc.lpar_w.reset_mock()
|
||||
self.lpmsrc.lpar_w.migration_state = 'Pretend its Migrating'
|
||||
self.lpmsrc.rollback_live_migration('context')
|
||||
self.assertTrue(self.lpmsrc.lpar_w.refresh.called)
|
||||
mock_migr.assert_called_once_with(self.lpmsrc.lpar_w, force=True)
|
||||
|
||||
# Test exception from rollback
|
||||
mock_migr.reset_mock()
|
||||
self.lpmsrc.lpar_w.reset_mock()
|
||||
mock_migr.side_effect = ValueError()
|
||||
self.lpmsrc.rollback_live_migration('context')
|
||||
self.assertTrue(self.lpmsrc.lpar_w.refresh.called)
|
||||
mock_migr.assert_called_once_with(self.lpmsrc.lpar_w, force=True)
|
||||
|
||||
def test_check_dlpar_rmc(self):
|
||||
lpar_w = mock.Mock()
|
||||
lpar_w.check_dlpar_connectivity.return_value = (1, 'active')
|
||||
self.lpmsrc._check_dlpar_rmc(lpar_w)
|
||||
|
||||
lpar_w.check_dlpar_connectivity.return_value = (0, 'active')
|
||||
self.assertRaises(lpm.LiveMigrationDLPAR,
|
||||
self.lpmsrc._check_dlpar_rmc, lpar_w)
|
||||
|
||||
lpar_w.check_dlpar_connectivity.return_value = (1, 'not active')
|
||||
self.assertRaises(lpm.LiveMigrationRMC,
|
||||
self.lpmsrc._check_dlpar_rmc, lpar_w)
|
@ -32,6 +32,7 @@ import time
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import importutils
|
||||
import six
|
||||
from taskflow import engines as tf_eng
|
||||
from taskflow.patterns import linear_flow as tf_lf
|
||||
from taskflow.patterns import unordered_flow as tf_uf
|
||||
@ -48,6 +49,7 @@ from pypowervm.wrappers import managed_system as pvm_ms
|
||||
|
||||
from nova_powervm.virt.powervm.disk import driver as disk_dvr
|
||||
from nova_powervm.virt.powervm import host as pvm_host
|
||||
from nova_powervm.virt.powervm import live_migration as lpm
|
||||
from nova_powervm.virt.powervm import mgmt
|
||||
from nova_powervm.virt.powervm.tasks import image as tf_img
|
||||
from nova_powervm.virt.powervm.tasks import network as tf_net
|
||||
@ -86,6 +88,8 @@ class PowerVMDriver(driver.ComputeDriver):
|
||||
including catching up with currently running VM's on the given host.
|
||||
"""
|
||||
|
||||
# Live migrations
|
||||
self.live_migrations = {}
|
||||
# Get an adapter
|
||||
self._get_adapter()
|
||||
# First need to resolve the managed host UUID
|
||||
@ -867,24 +871,67 @@ class PowerVMDriver(driver.ComputeDriver):
|
||||
if power_on:
|
||||
vm.power_on(self.adapter, instance, self.host_uuid)
|
||||
|
||||
def check_can_live_migrate_destination(self, ctxt, instance_ref,
|
||||
def ensure_filtering_rules_for_instance(self, instance, network_info):
|
||||
"""Setting up filtering rules and waiting for its completion.
|
||||
|
||||
To migrate an instance, filtering rules to hypervisors
|
||||
and firewalls are inevitable on destination host.
|
||||
( Waiting only for filtering rules to hypervisor,
|
||||
since filtering rules to firewall rules can be set faster).
|
||||
|
||||
Concretely, the below method must be called.
|
||||
- setup_basic_filtering (for nova-basic, etc.)
|
||||
- prepare_instance_filter(for nova-instance-instance-xxx, etc.)
|
||||
|
||||
to_xml may have to be called since it defines PROJNET, PROJMASK.
|
||||
but libvirt migrates those value through migrateToURI(),
|
||||
so , no need to be called.
|
||||
|
||||
Don't use thread for this method since migration should
|
||||
not be started when setting-up filtering rules operations
|
||||
are not completed.
|
||||
|
||||
:param instance: nova.objects.instance.Instance object
|
||||
|
||||
"""
|
||||
# No op for PowerVM
|
||||
pass
|
||||
|
||||
def check_can_live_migrate_destination(self, context, instance,
|
||||
src_compute_info, dst_compute_info,
|
||||
block_migration=False,
|
||||
disk_over_commit=False):
|
||||
"""Validate the destination host is capable of live partition
|
||||
migration.
|
||||
"""Check if it is possible to execute live migration.
|
||||
|
||||
:param ctxt: security context
|
||||
:param instance_ref: instance to be migrated
|
||||
:param src_compute_info: source host information
|
||||
:param dst_compute_info: destination host information
|
||||
This runs checks on the destination host, and then calls
|
||||
back to the source host to check the results.
|
||||
|
||||
:param context: security context
|
||||
:param instance: nova.db.sqlalchemy.models.Instance
|
||||
:param src_compute_info: Info about the sending machine
|
||||
:param dst_compute_info: Info about the receiving machine
|
||||
:param block_migration: if true, prepare for block migration
|
||||
:param disk_over_commit: if true, allow disk over commit
|
||||
:return: dictionary containing destination data
|
||||
|
||||
:returns: a dict containing migration info (hypervisor-dependent)
|
||||
"""
|
||||
# dest_check_data = \
|
||||
# TODO(IBM): Implement live migration check
|
||||
pass
|
||||
LOG.info(_LI("Checking live migration capability on destination "
|
||||
"host."), instance=instance)
|
||||
|
||||
mig = lpm.LiveMigrationDest(self, instance)
|
||||
self.live_migrations[instance.uuid] = mig
|
||||
return mig.check_destination(context, src_compute_info,
|
||||
dst_compute_info)
|
||||
|
||||
def check_can_live_migrate_destination_cleanup(self, context,
|
||||
dest_check_data):
|
||||
"""Do required cleanup on dest host after check_can_live_migrate calls
|
||||
|
||||
:param context: security context
|
||||
:param dest_check_data: result of check_can_live_migrate_destination
|
||||
"""
|
||||
LOG.info(_LI("Cleaning up from checking live migration capability "
|
||||
"on destination."))
|
||||
|
||||
def check_can_live_migrate_source(self, context, instance,
|
||||
dest_check_data, block_device_info=None):
|
||||
@ -897,11 +944,13 @@ class PowerVMDriver(driver.ComputeDriver):
|
||||
:param instance: nova.db.sqlalchemy.models.Instance
|
||||
:param dest_check_data: result of check_can_live_migrate_destination
|
||||
:param block_device_info: result of _get_instance_block_device_info
|
||||
:return: a dict containing migration info (hypervisor-dependent)
|
||||
:returns: a dict containing migration info (hypervisor-dependent)
|
||||
"""
|
||||
# migrate_data = \
|
||||
# TODO(IBM): Implement live migration check
|
||||
pass
|
||||
LOG.info(_LI("Checking live migration capability on source host."),
|
||||
instance=instance)
|
||||
mig = lpm.LiveMigrationSrc(self, instance, dest_check_data)
|
||||
self.live_migrations[instance.uuid] = mig
|
||||
return mig.check_source(context, block_device_info)
|
||||
|
||||
def pre_live_migration(self, context, instance, block_device_info,
|
||||
network_info, disk_info, migrate_data=None):
|
||||
@ -914,26 +963,94 @@ class PowerVMDriver(driver.ComputeDriver):
|
||||
:param disk_info: instance disk information
|
||||
:param migrate_data: implementation specific data dict.
|
||||
"""
|
||||
# TODO(IBM): Implement migration prerequisites
|
||||
pass
|
||||
LOG.info(_LI("Pre live migration processing."),
|
||||
instance=instance)
|
||||
mig = self.live_migrations[instance.uuid]
|
||||
mig.pre_live_migration(context, block_device_info, network_info,
|
||||
disk_info, migrate_data)
|
||||
|
||||
def live_migration(self, ctxt, instance_ref, dest,
|
||||
post_method, recover_method,
|
||||
block_migration=False, migrate_data=None):
|
||||
"""Live migrates a partition from one host to another.
|
||||
def live_migration(self, context, instance, dest,
|
||||
post_method, recover_method, block_migration=False,
|
||||
migrate_data=None):
|
||||
"""Live migration of an instance to another host.
|
||||
|
||||
:param context: security context
|
||||
:param instance:
|
||||
nova.db.sqlalchemy.models.Instance object
|
||||
instance object that is migrated.
|
||||
:param dest: destination host
|
||||
:param post_method:
|
||||
post operation method.
|
||||
expected nova.compute.manager._post_live_migration.
|
||||
:param recover_method:
|
||||
recovery method when any exception occurs.
|
||||
expected nova.compute.manager._rollback_live_migration.
|
||||
:param block_migration: if true, migrate VM disk.
|
||||
:param migrate_data: implementation specific params.
|
||||
|
||||
:param ctxt: security context
|
||||
:params instance_ref: instance to be migrated.
|
||||
:params dest: destination host
|
||||
:params post_method: post operation method.
|
||||
nova.compute.manager.post_live_migration.
|
||||
:params recover_method: recovery method when any exception occurs.
|
||||
nova.compute.manager.recover_live_migration.
|
||||
:params block_migration: if true, migrate VM disk.
|
||||
:params migrate_data: implementation specific data dictionary.
|
||||
"""
|
||||
self._log_operation('live_migration', instance_ref)
|
||||
# TODO(IBM): Implement live migration
|
||||
self._log_operation('live_migration', instance)
|
||||
# Spawn off a thread to handle this migration
|
||||
n_utils.spawn_n(self._live_migration_thread, context, instance, dest,
|
||||
post_method, recover_method, block_migration,
|
||||
migrate_data)
|
||||
|
||||
def _live_migration_thread(self, context, instance, dest, post_method,
|
||||
recover_method, block_migration, migrate_data):
|
||||
"""Live migration of an instance to another host.
|
||||
|
||||
:param context: security context
|
||||
:param instance:
|
||||
nova.db.sqlalchemy.models.Instance object
|
||||
instance object that is migrated.
|
||||
:param dest: destination host
|
||||
:param post_method:
|
||||
post operation method.
|
||||
expected nova.compute.manager._post_live_migration.
|
||||
:param recover_method:
|
||||
recovery method when any exception occurs.
|
||||
expected nova.compute.manager._rollback_live_migration.
|
||||
:param block_migration: if true, migrate VM disk.
|
||||
:param migrate_data: implementation specific params.
|
||||
|
||||
"""
|
||||
try:
|
||||
mig = self.live_migrations[instance.uuid]
|
||||
try:
|
||||
mig.live_migration(context, migrate_data)
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
LOG.debug("Rolling back live migration.", instance=instance)
|
||||
mig.rollback_live_migration(context)
|
||||
recover_method(context, instance, dest,
|
||||
block_migration, migrate_data)
|
||||
raise lpm.LiveMigrationFailed(name=instance.name,
|
||||
reason=six.text_type(e))
|
||||
|
||||
LOG.debug("Calling post live migration method.", instance=instance)
|
||||
# Post method to update host in OpenStack and finish live-migration
|
||||
post_method(context, instance, dest, block_migration, migrate_data)
|
||||
finally:
|
||||
# Remove the migration record on the source side.
|
||||
del self.live_migrations[instance.uuid]
|
||||
|
||||
def rollback_live_migration_at_destination(self, context, instance,
|
||||
network_info,
|
||||
block_device_info,
|
||||
destroy_disks=True,
|
||||
migrate_data=None):
|
||||
"""Clean up destination node after a failed live migration.
|
||||
|
||||
:param context: security context
|
||||
:param instance: instance object that was being migrated
|
||||
:param network_info: instance network information
|
||||
:param block_device_info: instance block device information
|
||||
:param destroy_disks:
|
||||
if true, destroy disks at destination during cleanup
|
||||
:param migrate_data: implementation specific params
|
||||
|
||||
"""
|
||||
del self.live_migrations[instance.uuid]
|
||||
|
||||
def check_instance_shared_storage_local(self, context, instance):
|
||||
"""Check if instance files located on shared storage.
|
||||
@ -968,19 +1085,50 @@ class PowerVMDriver(driver.ComputeDriver):
|
||||
return self.disk_dvr.check_instance_shared_storage_cleanup(
|
||||
context, data)
|
||||
|
||||
def post_live_migration_at_destination(self, ctxt, instance_ref,
|
||||
def post_live_migration(self, context, instance, block_device_info,
|
||||
migrate_data=None):
|
||||
"""Post operation of live migration at source host.
|
||||
|
||||
:param context: security context
|
||||
:instance: instance object that was migrated
|
||||
:block_device_info: instance block device information
|
||||
:param migrate_data: if not None, it is a dict which has data
|
||||
"""
|
||||
pass
|
||||
|
||||
def post_live_migration_at_source(self, context, instance, network_info):
|
||||
"""Unplug VIFs from networks at source.
|
||||
|
||||
:param context: security context
|
||||
:param instance: instance object reference
|
||||
:param network_info: instance network information
|
||||
"""
|
||||
LOG.info(_LI("Post live migration processing on source host."),
|
||||
instance=instance)
|
||||
mig = self.live_migrations[instance.uuid]
|
||||
mig.post_live_migration_at_source(network_info)
|
||||
|
||||
def post_live_migration_at_destination(self, context, instance,
|
||||
network_info,
|
||||
block_migration=False,
|
||||
block_device_info=None):
|
||||
"""Performs post operations on the destination host
|
||||
following a successful live migration.
|
||||
"""Post operation of live migration at destination host.
|
||||
|
||||
:param ctxt: security context
|
||||
:param instance_ref: migrated instance
|
||||
:param network_info: dictionary of network info for instance
|
||||
:param block_migration: boolean for block migration
|
||||
:param context: security context
|
||||
:param instance: instance object that is migrated
|
||||
:param network_info: instance network information
|
||||
:param block_migration: if true, post operation of block_migration.
|
||||
"""
|
||||
# TODO(IBM): Implement post migration
|
||||
LOG.info(_LI("Post live migration processing on destination host."),
|
||||
instance=instance)
|
||||
mig = self.live_migrations[instance.uuid]
|
||||
mig.instance = instance
|
||||
mig.post_live_migration_at_destination(network_info)
|
||||
del self.live_migrations[instance.uuid]
|
||||
|
||||
def unfilter_instance(self, instance, network_info):
|
||||
"""Stop filtering instance."""
|
||||
# No op for PowerVM
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
|
@ -72,7 +72,8 @@ def build_host_resource_from_ms(ms_wrapper):
|
||||
data["supported_instances"] = POWERVM_SUPPORTED_INSTANCES
|
||||
|
||||
stats = {'proc_units': '%.2f' % float(proc_units),
|
||||
'proc_units_used': '%.2f' % pu_used
|
||||
'proc_units_used': '%.2f' % pu_used,
|
||||
'memory_region_size': ms_wrapper.memory_region_size
|
||||
}
|
||||
data["stats"] = stats
|
||||
|
||||
|
296
nova_powervm/virt/powervm/live_migration.py
Normal file
296
nova_powervm/virt/powervm/live_migration.py
Normal file
@ -0,0 +1,296 @@
|
||||
# Copyright 2015 IBM Corp.
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import abc
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LE
|
||||
from pypowervm.tasks import management_console as mgmt_task
|
||||
from pypowervm.tasks import migration as mig
|
||||
from pypowervm.wrappers import base_partition as pvm_bp
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import six
|
||||
|
||||
from nova_powervm.virt.powervm import media
|
||||
from nova_powervm.virt.powervm import vm
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class LiveMigrationFailed(exception.NovaException):
|
||||
msg_fmt = _("Live migration of instance '%(name)s' failed for reason: "
|
||||
"%(reason)s")
|
||||
|
||||
|
||||
class LiveMigrationInvalidState(exception.NovaException):
|
||||
msg_fmt = _("Live migration of instance '%(name)s' failed because the "
|
||||
"migration state is: %(state)s")
|
||||
|
||||
|
||||
class LiveMigrationRMC(exception.NovaException):
|
||||
msg_fmt = _("Live migration of instance '%(name)s' failed because RMC is "
|
||||
"not active.")
|
||||
|
||||
|
||||
class LiveMigrationDLPAR(exception.NovaException):
|
||||
msg_fmt = _("Live migration of instance '%(name)s' failed because DLPAR "
|
||||
"is not available.")
|
||||
|
||||
|
||||
class LiveMigrationMRS(exception.NovaException):
|
||||
msg_fmt = _("Cannot migrate instance '%(name)s' because the memory region "
|
||||
"size of the source (%(source_mrs)d MB) does not "
|
||||
"match the memory region size of the target "
|
||||
"(%(target_mrs)d MB).")
|
||||
|
||||
|
||||
class LiveMigrationProcCompat(exception.NovaException):
|
||||
msg_fmt = _("Cannot migrate %(name)s because its "
|
||||
"processor compatibility mode %(mode)s "
|
||||
"is not in the list of modes \"%(modes)s\" "
|
||||
"supported by the target host.")
|
||||
|
||||
|
||||
class LiveMigrationCapacity(exception.NovaException):
|
||||
msg_fmt = _("Cannot migrate %(name)s because the host %(host)s only "
|
||||
"allows %(allowed)s concurrent migrations and %(running)s "
|
||||
"migrations are currently running.")
|
||||
|
||||
|
||||
def _verify_migration_capacity(host_w, instance):
|
||||
"""Check that the counts are valid for in progress and supported."""
|
||||
mig_stats = host_w.migration_data
|
||||
if (mig_stats['active_migrations_in_progress'] >=
|
||||
mig_stats['active_migrations_supported']):
|
||||
|
||||
raise LiveMigrationCapacity(
|
||||
name=instance.name, host=host_w.system_name,
|
||||
running=mig_stats['active_migrations_in_progress'],
|
||||
allowed=mig_stats['active_migrations_supported'])
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class LiveMigration(object):
|
||||
|
||||
def __init__(self, drvr, instance, src_data, dest_data):
|
||||
self.drvr = drvr
|
||||
self.instance = instance
|
||||
self.src_data = src_data # migration data from src host
|
||||
self.dest_data = dest_data # migration data from dest host
|
||||
|
||||
|
||||
class LiveMigrationDest(LiveMigration):
|
||||
|
||||
def __init__(self, drvr, instance):
|
||||
super(LiveMigrationDest, self).__init__(drvr, instance, {}, {})
|
||||
|
||||
@staticmethod
|
||||
def _get_dest_user_id():
|
||||
"""Get the user id to use on the target host."""
|
||||
# We'll always use wlp
|
||||
return 'wlp'
|
||||
|
||||
def check_destination(self, context, src_compute_info, dst_compute_info):
|
||||
"""Check the destination host
|
||||
|
||||
Here we check the destination host to see if it's capable of migrating
|
||||
the instance to this host.
|
||||
|
||||
:param context: security context
|
||||
:param src_compute_info: Info about the sending machine
|
||||
:param dst_compute_info: Info about the receiving machine
|
||||
:returns: a dict containing migration info
|
||||
"""
|
||||
|
||||
# Refresh the host wrapper since we're pulling values that may change
|
||||
self.drvr.host_wrapper.refresh()
|
||||
|
||||
src_stats = src_compute_info['stats']
|
||||
dst_stats = dst_compute_info['stats']
|
||||
# Check the lmb sizes for compatability
|
||||
if (src_stats['memory_region_size'] !=
|
||||
dst_stats['memory_region_size']):
|
||||
raise LiveMigrationMRS(
|
||||
name=self.instance.name,
|
||||
source_mrs=src_stats['memory_region_size'],
|
||||
target_mrs=dst_stats['memory_region_size'])
|
||||
|
||||
_verify_migration_capacity(self.drvr.host_wrapper, self.instance)
|
||||
|
||||
self.dest_data['dest_ip'] = CONF.my_ip
|
||||
self.dest_data['dest_user_id'] = self._get_dest_user_id()
|
||||
self.dest_data['dest_sys_name'] = self.drvr.host_wrapper.system_name
|
||||
self.dest_data['dest_proc_compat'] = (
|
||||
','.join(self.drvr.host_wrapper.proc_compat_modes))
|
||||
|
||||
LOG.debug('src_compute_info: %s' % src_compute_info)
|
||||
LOG.debug('dst_compute_info: %s' % dst_compute_info)
|
||||
LOG.debug('Migration data: %s' % self.dest_data)
|
||||
|
||||
return self.dest_data
|
||||
|
||||
def pre_live_migration(self, context, block_device_info, network_info,
|
||||
disk_info, migrate_data):
|
||||
|
||||
"""Prepare an instance for live migration
|
||||
|
||||
:param context: security context
|
||||
:param instance: nova.objects.instance.Instance object
|
||||
:param block_device_info: instance block device information
|
||||
:param network_info: instance network information
|
||||
:param disk_info: instance disk information
|
||||
:param migrate_data: implementation specific data dict.
|
||||
"""
|
||||
LOG.debug('Running pre live migration on destination.',
|
||||
instance=self.instance)
|
||||
LOG.debug('Migration data: %s' % migrate_data)
|
||||
|
||||
# Set the ssh auth key if needed.
|
||||
src_mig_data = migrate_data.get('migrate_data', {})
|
||||
pub_key = src_mig_data.get('public_key')
|
||||
if pub_key is not None:
|
||||
mgmt_task.add_authorized_key(self.drvr.adapter, pub_key)
|
||||
|
||||
def post_live_migration_at_destination(self, network_info):
|
||||
"""Do post migration cleanup on destination host.
|
||||
|
||||
:param network_info: instance network information
|
||||
"""
|
||||
# The LPAR should be on this host now.
|
||||
LOG.debug("Post live migration at destination.",
|
||||
instance=self.instance)
|
||||
|
||||
|
||||
class LiveMigrationSrc(LiveMigration):
|
||||
|
||||
def __init__(self, drvr, instance, dest_data):
|
||||
super(LiveMigrationSrc, self).__init__(drvr, instance, {}, dest_data)
|
||||
|
||||
def check_source(self, context, block_device_info):
|
||||
"""Check the source host
|
||||
|
||||
Here we check the source host to see if it's capable of migrating
|
||||
the instance to the destination host. There may be conditions
|
||||
that can only be checked on the source side.
|
||||
|
||||
Also, get the instance ready for the migration by removing any
|
||||
virtual optical devices attached to the LPAR.
|
||||
|
||||
:param context: security context
|
||||
:param block_device_info: result of _get_instance_block_device_info
|
||||
:returns: a dict containing migration info
|
||||
"""
|
||||
|
||||
lpar_w = vm.get_instance_wrapper(
|
||||
self.drvr.adapter, self.instance, self.drvr.host_uuid)
|
||||
self.lpar_w = lpar_w
|
||||
|
||||
LOG.debug('Dest Migration data: %s' % self.dest_data)
|
||||
|
||||
# Only 'migrate_data' is sent to the destination on prelive call.
|
||||
mig_data = {'public_key': mgmt_task.get_public_key(self.drvr.adapter)}
|
||||
self.src_data['migrate_data'] = mig_data
|
||||
LOG.debug('Src Migration data: %s' % self.src_data)
|
||||
|
||||
# Check proc compatability modes
|
||||
if (lpar_w.proc_compat_mode and lpar_w.proc_compat_mode not in
|
||||
self.dest_data['dest_proc_compat'].split(',')):
|
||||
raise LiveMigrationProcCompat(
|
||||
name=self.instance.name, mode=lpar_w.proc_compat_mode,
|
||||
modes=', '.join(self.dest_data['dest_proc_compat'].split(',')))
|
||||
|
||||
# Check for RMC connection
|
||||
self._check_dlpar_rmc(lpar_w)
|
||||
|
||||
if lpar_w.migration_state != 'Not_Migrating':
|
||||
raise LiveMigrationInvalidState(name=self.instance.name,
|
||||
state=lpar_w.migration_state)
|
||||
|
||||
# Check the number of migrations for capacity
|
||||
_verify_migration_capacity(self.drvr.host_wrapper, self.instance)
|
||||
|
||||
# Remove the VOpt devices
|
||||
LOG.debug('Removing VOpt.', instance=self.instance)
|
||||
media.ConfigDrivePowerVM(self.drvr.adapter, self.drvr.host_uuid
|
||||
).dlt_vopt(lpar_w.uuid)
|
||||
LOG.debug('Removing VOpt finished.', instance=self.instance)
|
||||
|
||||
return self.src_data
|
||||
|
||||
def live_migration(self, context, migrate_data):
|
||||
"""Start the live migration.
|
||||
|
||||
:param context: security context
|
||||
:param migrate_data: migration data from src and dest host.
|
||||
"""
|
||||
LOG.debug("Starting migration.", instance=self.instance)
|
||||
LOG.debug("Migrate data: %s" % migrate_data)
|
||||
try:
|
||||
# Migrate the LPAR!
|
||||
mig.migrate_lpar(self.lpar_w, self.dest_data['dest_sys_name'],
|
||||
validate_only=False,
|
||||
tgt_mgmt_svr=self.dest_data['dest_ip'],
|
||||
tgt_mgmt_usr=self.dest_data.get('dest_user_id'))
|
||||
|
||||
except Exception:
|
||||
LOG.error(_LE("Live migration failed."), instance=self.instance)
|
||||
raise
|
||||
finally:
|
||||
LOG.debug("Finished migration.", instance=self.instance)
|
||||
|
||||
def post_live_migration_at_source(self, network_info):
|
||||
"""Do post migration cleanup on source host.
|
||||
|
||||
:param network_info: instance network information
|
||||
"""
|
||||
LOG.debug("Post live migration at source.", instance=self.instance)
|
||||
|
||||
def rollback_live_migration(self, context):
|
||||
"""Roll back a failed migration.
|
||||
|
||||
:param context: security context
|
||||
"""
|
||||
LOG.debug("Rollback live migration.", instance=self.instance)
|
||||
# If an error happened then let's try to recover
|
||||
# In most cases the recovery will happen automatically, but if it
|
||||
# doesn't, then force it.
|
||||
try:
|
||||
self.lpar_w.refresh()
|
||||
if self.lpar_w.migration_state != 'Not_Migrating':
|
||||
mig.migrate_recover(self.lpar_w, force=True)
|
||||
|
||||
except Exception as ex:
|
||||
LOG.error(_LE("Migration recover failed with error: %s"), ex,
|
||||
instance=self.instance)
|
||||
finally:
|
||||
LOG.debug("Finished migration rollback.", instance=self.instance)
|
||||
|
||||
def _check_dlpar_rmc(self, lpar_w):
|
||||
"""See if the lpar is ready for LPM.
|
||||
|
||||
:param lpar_w: LogicalPartition wrapper
|
||||
"""
|
||||
dlpar, rmc = lpar_w.check_dlpar_connectivity()
|
||||
LOG.debug("Check dlpar: %s and RMC: %s." % (dlpar, rmc),
|
||||
instance=self.instance)
|
||||
|
||||
if rmc != pvm_bp.RMCState.ACTIVE:
|
||||
raise LiveMigrationRMC(name=self.instance.name)
|
||||
if not dlpar:
|
||||
raise LiveMigrationDLPAR(name=self.instance.name)
|
Loading…
Reference in New Issue
Block a user