plugins/xenserver: Add '.py' extension
All Python files, except executables, should have a '.py' extension. This wasn't being done for files in the 'plugins/xenserver' folder, likely because it seems unnatural to call a "plugin" with a file extension. Begin working towards a resolution for this by renaming all files to include a '.py' extension. To avoid forcing operators to upgrade the Nova service and per-host plugins at the same time, continue to reference the old, non-'.py' filenames, using symlinks to ensure the references continue to work. Once Ocata has been released, these symlinks can be removed, the API version updated and the reference updated to include the '.py' extension. Change-Id: Icf4cf2424c15276a6c01778fb92f76bbdc9b2068 Closes-bug: #1302831
This commit is contained in:
parent
48a4fee10f
commit
6bb1fd776f
@ -68,7 +68,7 @@ class TestBittorrentStore(stubs.XenAPITestBaseNoDB):
|
||||
|
||||
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
|
||||
self.session.call_plugin_serialized(
|
||||
'bittorrent', 'download_vhd', **params)
|
||||
'bittorrent.py', 'download_vhd', **params)
|
||||
self.mox.ReplayAll()
|
||||
|
||||
self.store.download_image(self.context, self.session,
|
||||
|
@ -81,7 +81,7 @@ class TestGlanceStore(stubs.XenAPITestBaseNoDB):
|
||||
lambda *a, **kw: ['uuid1'])
|
||||
|
||||
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
|
||||
self.session.call_plugin_serialized('glance', 'download_vhd2',
|
||||
self.session.call_plugin_serialized('glance.py', 'download_vhd2',
|
||||
**params)
|
||||
self.mox.ReplayAll()
|
||||
|
||||
@ -102,22 +102,22 @@ class TestGlanceStore(stubs.XenAPITestBaseNoDB):
|
||||
self.flags(num_retries=2, group='glance')
|
||||
|
||||
params.pop("endpoint")
|
||||
calls = [mock.call('glance', 'download_vhd2',
|
||||
calls = [mock.call('glance.py', 'download_vhd2',
|
||||
endpoint='http://10.0.1.1:9292',
|
||||
**params),
|
||||
mock.call('glance', 'download_vhd2',
|
||||
mock.call('glance.py', 'download_vhd2',
|
||||
endpoint='http://10.0.0.1:9293',
|
||||
**params)]
|
||||
log_calls = [mock.call(mock.ANY,
|
||||
{'callback_result': 'http://10.0.1.1:9292',
|
||||
'attempts': 3, 'attempt': 1,
|
||||
'fn': 'download_vhd2',
|
||||
'plugin': 'glance'}),
|
||||
'plugin': 'glance.py'}),
|
||||
mock.call(mock.ANY,
|
||||
{'callback_result': 'http://10.0.0.1:9293',
|
||||
'attempts': 3, 'attempt': 2,
|
||||
'fn': 'download_vhd2',
|
||||
'plugin': 'glance'})]
|
||||
'plugin': 'glance.py'})]
|
||||
|
||||
glance_api_servers = ['10.0.1.1:9292',
|
||||
'http://10.0.0.1:9293']
|
||||
@ -149,7 +149,8 @@ class TestGlanceStore(stubs.XenAPITestBaseNoDB):
|
||||
params = self._get_upload_params(auto_disk_config, expected_os_type)
|
||||
|
||||
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
|
||||
self.session.call_plugin_serialized('glance', 'upload_vhd2', **params)
|
||||
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
|
||||
**params)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
self.store.upload_image(self.context, self.session, self.instance,
|
||||
@ -176,7 +177,7 @@ class TestGlanceStore(stubs.XenAPITestBaseNoDB):
|
||||
params = self._get_upload_params()
|
||||
|
||||
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
|
||||
self.session.call_plugin_serialized('glance', 'upload_vhd2',
|
||||
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
|
||||
**params).AndRaise(RuntimeError)
|
||||
self.mox.ReplayAll()
|
||||
|
||||
@ -194,21 +195,21 @@ class TestGlanceStore(stubs.XenAPITestBaseNoDB):
|
||||
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
|
||||
error_details = ["", "", "RetryableError", ""]
|
||||
error = self.session.XenAPI.Failure(details=error_details)
|
||||
self.session.call_plugin_serialized('glance', 'upload_vhd2',
|
||||
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
|
||||
**params).AndRaise(error)
|
||||
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
|
||||
error, (fake.Failure,
|
||||
error,
|
||||
mox.IgnoreArg()))
|
||||
time.sleep(0.5)
|
||||
self.session.call_plugin_serialized('glance', 'upload_vhd2',
|
||||
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
|
||||
**params).AndRaise(error)
|
||||
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
|
||||
error, (fake.Failure,
|
||||
error,
|
||||
mox.IgnoreArg()))
|
||||
time.sleep(1)
|
||||
self.session.call_plugin_serialized('glance', 'upload_vhd2',
|
||||
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
|
||||
**params).AndRaise(error)
|
||||
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
|
||||
error, (fake.Failure,
|
||||
@ -231,7 +232,7 @@ class TestGlanceStore(stubs.XenAPITestBaseNoDB):
|
||||
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
|
||||
error_details = ["", "task signaled", "", ""]
|
||||
error = self.session.XenAPI.Failure(details=error_details)
|
||||
self.session.call_plugin_serialized('glance', 'upload_vhd2',
|
||||
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
|
||||
**params).AndRaise(error)
|
||||
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
|
||||
error, (fake.Failure,
|
||||
@ -241,14 +242,14 @@ class TestGlanceStore(stubs.XenAPITestBaseNoDB):
|
||||
# Note(johngarbutt) XenServer 6.1 and later has this error
|
||||
error_details = ["", "signal: SIGTERM", "", ""]
|
||||
error = self.session.XenAPI.Failure(details=error_details)
|
||||
self.session.call_plugin_serialized('glance', 'upload_vhd2',
|
||||
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
|
||||
**params).AndRaise(error)
|
||||
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
|
||||
error, (fake.Failure,
|
||||
error,
|
||||
mox.IgnoreArg()))
|
||||
time.sleep(1)
|
||||
self.session.call_plugin_serialized('glance', 'upload_vhd2',
|
||||
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
|
||||
**params)
|
||||
self.mox.ReplayAll()
|
||||
|
||||
|
@ -19,7 +19,7 @@ from nova.tests.unit.virt.xenapi.plugins import plugin_test
|
||||
class NovaPluginVersion(plugin_test.PluginTestBase):
|
||||
def setUp(self):
|
||||
super(NovaPluginVersion, self).setUp()
|
||||
self.nova_plugin_version = self.load_plugin("nova_plugin_version")
|
||||
self.nova_plugin_version = self.load_plugin('nova_plugin_version.py')
|
||||
|
||||
def test_nova_plugin_version(self):
|
||||
session = 'fake_session'
|
||||
|
@ -169,10 +169,12 @@ class FakeSessionForVMTests(fake.SessionBase):
|
||||
"# Completed on Sun Nov 6 22:49:02 2011\n")
|
||||
|
||||
def host_call_plugin(self, _1, _2, plugin, method, _5):
|
||||
plugin = plugin.rstrip('.py')
|
||||
|
||||
if plugin == 'glance' and method in ('download_vhd2'):
|
||||
root_uuid = _make_fake_vdi()
|
||||
return pickle.dumps(dict(root=dict(uuid=root_uuid)))
|
||||
elif (plugin, method) == ("xenhost", "iptables_config"):
|
||||
elif (plugin, method) == ('xenhost', 'iptables_config'):
|
||||
return fake.as_json(out=self._fake_iptables_save_output,
|
||||
err='')
|
||||
else:
|
||||
@ -214,11 +216,13 @@ class FakeSessionForFirewallTests(FakeSessionForVMTests):
|
||||
self._test_case = test_case
|
||||
|
||||
def host_call_plugin(self, _1, _2, plugin, method, args):
|
||||
"""Mock method four host_call_plugin to be used in unit tests
|
||||
"""Mock method for host_call_plugin to be used in unit tests
|
||||
for the dom0 iptables Firewall drivers for XenAPI
|
||||
|
||||
"""
|
||||
if plugin == "xenhost" and method == "iptables_config":
|
||||
plugin = plugin.rstrip('.py')
|
||||
|
||||
if plugin == 'xenhost' and method == 'iptables_config':
|
||||
# The command to execute is a json-encoded list
|
||||
cmd_args = args.get('cmd_args', None)
|
||||
cmd = jsonutils.loads(cmd_args)
|
||||
|
@ -362,9 +362,9 @@ class CallAgentTestCase(AgentTestCaseBase):
|
||||
'timeout': '300',
|
||||
}
|
||||
expected_args.update(addl_args)
|
||||
session.VM.get_domid.assert_called_once_with("vm_ref")
|
||||
session.call_plugin.assert_called_once_with("agent", "method",
|
||||
session.call_plugin.assert_called_once_with("agent.py", "method",
|
||||
expected_args)
|
||||
session.VM.get_domid.assert_called_once_with("vm_ref")
|
||||
|
||||
def _call_agent_setup(self, session, mock_uuid,
|
||||
returncode='0', success_codes=None,
|
||||
@ -389,7 +389,7 @@ class CallAgentTestCase(AgentTestCaseBase):
|
||||
'dom_id': '42',
|
||||
'timeout': '30',
|
||||
}
|
||||
session.call_plugin.assert_called_once_with("agent", "method",
|
||||
session.call_plugin.assert_called_once_with("agent.py", "method",
|
||||
expected_args)
|
||||
session.VM.get_domid.assert_called_once_with("vm_ref")
|
||||
|
||||
|
@ -249,7 +249,11 @@ class FetchVhdImageTestCase(VMUtilsTestBase):
|
||||
self.mox.StubOutWithMock(
|
||||
self.session, 'call_plugin_serialized_with_retry')
|
||||
func = self.session.call_plugin_serialized_with_retry(
|
||||
'glance', 'download_vhd2', 0, mox.IgnoreArg(), mox.IgnoreArg(),
|
||||
'glance.py',
|
||||
'download_vhd2',
|
||||
0,
|
||||
mox.IgnoreArg(),
|
||||
mox.IgnoreArg(),
|
||||
extra_headers={'X-Auth-Token': 'auth_token',
|
||||
'X-Roles': '',
|
||||
'X-Tenant-Id': None,
|
||||
@ -268,7 +272,7 @@ class FetchVhdImageTestCase(VMUtilsTestBase):
|
||||
self.mox.StubOutWithMock(
|
||||
self.session, 'call_plugin_serialized')
|
||||
func = self.session.call_plugin_serialized(
|
||||
'bittorrent', 'download_vhd',
|
||||
'bittorrent.py', 'download_vhd',
|
||||
image_id='image_id',
|
||||
uuid_stack=["uuid_stack"],
|
||||
sr_path='sr_path',
|
||||
@ -1373,14 +1377,14 @@ class CreateKernelRamdiskTestCase(VMUtilsTestBase):
|
||||
args_kernel['cached-image'] = kernel_id
|
||||
args_kernel['new-image-uuid'] = "fake_uuid1"
|
||||
uuid.uuid4().AndReturn("fake_uuid1")
|
||||
self.session.call_plugin('kernel', 'create_kernel_ramdisk',
|
||||
self.session.call_plugin('kernel.py', 'create_kernel_ramdisk',
|
||||
args_kernel).AndReturn("k")
|
||||
|
||||
args_ramdisk = {}
|
||||
args_ramdisk['cached-image'] = ramdisk_id
|
||||
args_ramdisk['new-image-uuid'] = "fake_uuid2"
|
||||
uuid.uuid4().AndReturn("fake_uuid2")
|
||||
self.session.call_plugin('kernel', 'create_kernel_ramdisk',
|
||||
self.session.call_plugin('kernel.py', 'create_kernel_ramdisk',
|
||||
args_ramdisk).AndReturn("r")
|
||||
|
||||
self.mox.ReplayAll()
|
||||
@ -1396,7 +1400,7 @@ class CreateKernelRamdiskTestCase(VMUtilsTestBase):
|
||||
args_kernel['cached-image'] = kernel_id
|
||||
args_kernel['new-image-uuid'] = "fake_uuid1"
|
||||
uuid.uuid4().AndReturn("fake_uuid1")
|
||||
self.session.call_plugin('kernel', 'create_kernel_ramdisk',
|
||||
self.session.call_plugin('kernel.py', 'create_kernel_ramdisk',
|
||||
args_kernel).AndReturn("")
|
||||
|
||||
kernel = {"kernel": {"file": "k"}}
|
||||
@ -1419,7 +1423,7 @@ class CreateKernelRamdiskTestCase(VMUtilsTestBase):
|
||||
|
||||
if cache_images == 'all':
|
||||
uuid.uuid4().AndReturn("fake_uuid1")
|
||||
self.session.call_plugin('kernel', 'create_kernel_ramdisk',
|
||||
self.session.call_plugin('kernel.py', 'create_kernel_ramdisk',
|
||||
args_kernel).AndReturn("cached_image")
|
||||
else:
|
||||
kernel = {"kernel": {"file": "new_image", "uuid": None}}
|
||||
@ -2034,7 +2038,7 @@ class ImportMigratedDisksTestCase(VMUtilsTestBase):
|
||||
expected = {'uuid': "a", 'ref': "vdi_ref"}
|
||||
self.assertEqual(expected, result)
|
||||
mock_get_sr_path.assert_called_once_with(session)
|
||||
session.call_plugin_serialized.assert_called_once_with('migration',
|
||||
session.call_plugin_serialized.assert_called_once_with('migration.py',
|
||||
'move_vhds_into_sr', instance_uuid='chain_label',
|
||||
sr_path='sr_path', uuid_stack=mock.ANY)
|
||||
mock_scan_sr.assert_called_once_with(session)
|
||||
@ -2055,8 +2059,8 @@ class ImportMigratedDisksTestCase(VMUtilsTestBase):
|
||||
class MigrateVHDTestCase(VMUtilsTestBase):
|
||||
def _assert_transfer_called(self, session, label):
|
||||
session.call_plugin_serialized.assert_called_once_with(
|
||||
'migration', 'transfer_vhd', instance_uuid=label, host="dest",
|
||||
vdi_uuid="vdi_uuid", sr_path="sr_path", seq_num=2)
|
||||
'migration.py', 'transfer_vhd', instance_uuid=label, host="dest",
|
||||
vdi_uuid="vdi_uuid", sr_path="sr_path", seq_num=2)
|
||||
|
||||
def test_migrate_vhd_root(self):
|
||||
session = mock.Mock()
|
||||
|
@ -769,7 +769,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
|
||||
group='xenserver')
|
||||
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
|
||||
self.conn._session.call_plugin_serialized(
|
||||
'ipxe', 'inject', '/sr/path', mox.IgnoreArg(),
|
||||
'ipxe.py', 'inject', '/sr/path', mox.IgnoreArg(),
|
||||
'http://boot.example.com', '192.168.1.100', '255.255.255.0',
|
||||
'192.168.1.1', '192.168.1.3', '/root/mkisofs')
|
||||
self.conn._session.call_plugin_serialized('partition_utils.py',
|
||||
@ -4076,7 +4076,7 @@ class XenAPISessionTestCase(test.NoDBTestCase):
|
||||
session.PLUGIN_REQUIRED_VERSION = '2.4'
|
||||
|
||||
self.mox.StubOutWithMock(session, 'call_plugin_serialized')
|
||||
session.call_plugin_serialized('nova_plugin_version', 'get_version',
|
||||
session.call_plugin_serialized('nova_plugin_version.py', 'get_version',
|
||||
).AndReturn("2.4")
|
||||
|
||||
self.mox.ReplayAll()
|
||||
@ -4089,12 +4089,23 @@ class XenAPISessionTestCase(test.NoDBTestCase):
|
||||
session.PLUGIN_REQUIRED_VERSION = '2.4'
|
||||
|
||||
self.mox.StubOutWithMock(session, 'call_plugin_serialized')
|
||||
session.call_plugin_serialized('nova_plugin_version', 'get_version',
|
||||
session.call_plugin_serialized('nova_plugin_version.py', 'get_version',
|
||||
).AndReturn("2.5")
|
||||
|
||||
self.mox.ReplayAll()
|
||||
session._verify_plugin_version()
|
||||
|
||||
def test_verify_plugin_version_python_extensions(self):
|
||||
"""Validate that 2.0 is equivalent to 1.8."""
|
||||
session = self._get_mock_xapisession({})
|
||||
session.XenAPI = xenapi_fake.FakeXenAPI()
|
||||
|
||||
session.PLUGIN_REQUIRED_VERSION = '2.0'
|
||||
|
||||
with mock.patch.object(session, 'call_plugin_serialized',
|
||||
return_value='1.8'):
|
||||
session._verify_plugin_version()
|
||||
|
||||
def test_verify_plugin_version_bad_maj(self):
|
||||
session = self._get_mock_xapisession({})
|
||||
session.XenAPI = xenapi_fake.FakeXenAPI()
|
||||
@ -4102,7 +4113,7 @@ class XenAPISessionTestCase(test.NoDBTestCase):
|
||||
session.PLUGIN_REQUIRED_VERSION = '2.4'
|
||||
|
||||
self.mox.StubOutWithMock(session, 'call_plugin_serialized')
|
||||
session.call_plugin_serialized('nova_plugin_version', 'get_version',
|
||||
session.call_plugin_serialized('nova_plugin_version.py', 'get_version',
|
||||
).AndReturn("3.0")
|
||||
|
||||
self.mox.ReplayAll()
|
||||
@ -4115,7 +4126,7 @@ class XenAPISessionTestCase(test.NoDBTestCase):
|
||||
session.PLUGIN_REQUIRED_VERSION = '2.4'
|
||||
|
||||
self.mox.StubOutWithMock(session, 'call_plugin_serialized')
|
||||
session.call_plugin_serialized('nova_plugin_version', 'get_version',
|
||||
session.call_plugin_serialized('nova_plugin_version.py', 'get_version',
|
||||
).AndReturn("2.3")
|
||||
|
||||
self.mox.ReplayAll()
|
||||
@ -4127,7 +4138,7 @@ class XenAPISessionTestCase(test.NoDBTestCase):
|
||||
# Import the plugin to extract its version
|
||||
path = os.path.dirname(__file__)
|
||||
rel_path_elem = "../../../../../plugins/xenserver/xenapi/etc/xapi.d/" \
|
||||
"plugins/nova_plugin_version"
|
||||
"plugins/nova_plugin_version.py"
|
||||
for elem in rel_path_elem.split('/'):
|
||||
path = os.path.join(path, elem)
|
||||
path = os.path.realpath(path)
|
||||
|
@ -69,7 +69,7 @@ def _call_agent(session, instance, vm_ref, method, addl_args=None,
|
||||
args.update(addl_args)
|
||||
|
||||
try:
|
||||
ret = session.call_plugin('agent', method, args)
|
||||
ret = session.call_plugin('agent.py', method, args)
|
||||
except session.XenAPI.Failure as e:
|
||||
err_msg = e.details[-1].splitlines()[-1]
|
||||
if 'TIMEOUT:' in err_msg:
|
||||
|
@ -73,7 +73,7 @@ class XenAPISession(object):
|
||||
# changed in development environments.
|
||||
# MAJOR VERSION: Incompatible changes with the plugins
|
||||
# MINOR VERSION: Compatible changes, new plguins, etc
|
||||
PLUGIN_REQUIRED_VERSION = '1.7'
|
||||
PLUGIN_REQUIRED_VERSION = '1.8'
|
||||
|
||||
def __init__(self, url, user, pw):
|
||||
version_string = version.version_string_with_package()
|
||||
@ -107,7 +107,12 @@ class XenAPISession(object):
|
||||
def _verify_plugin_version(self):
|
||||
requested_version = self.PLUGIN_REQUIRED_VERSION
|
||||
current_version = self.call_plugin_serialized(
|
||||
'nova_plugin_version', 'get_version')
|
||||
'nova_plugin_version.py', 'get_version')
|
||||
|
||||
# v2.0 is the same as v1.8, with no version bumps. Remove this once
|
||||
# Ocata is released
|
||||
if requested_version == '2.0' and current_version == '1.8':
|
||||
return
|
||||
|
||||
if not versionutils.is_compatible(requested_version, current_version):
|
||||
raise self.XenAPI.Failure(
|
||||
@ -202,6 +207,17 @@ class XenAPISession(object):
|
||||
# the plugin gets executed on the right host when using XS pools
|
||||
args['host_uuid'] = self.host_uuid
|
||||
|
||||
# TODO(sfinucan): Once the required plugin version is bumped to v2.0,
|
||||
# we can assume that all files will have a '.py' extension. Until then,
|
||||
# handle hosts without this extension by rewriting all calls to plugins
|
||||
# to exclude the '.py' extension. This is made possible through the
|
||||
# temporary inclusion of symlinks to plugins.
|
||||
# NOTE(sfinucan): 'partition_utils.py' was the only plugin with a '.py'
|
||||
# extension before this change was enacted, hence this plugin is
|
||||
# excluded
|
||||
if not plugin == 'partition_utils.py':
|
||||
plugin = plugin.rstrip('.py')
|
||||
|
||||
with self._get_session() as session:
|
||||
return self._unwrap_plugin_exceptions(
|
||||
session.xenapi.host.call_plugin,
|
||||
|
@ -779,16 +779,17 @@ class SessionBase(object):
|
||||
return base64.b64encode(zlib.compress("dom_id: %s" % dom_id))
|
||||
|
||||
def _plugin_nova_plugin_version_get_version(self, method, args):
|
||||
return pickle.dumps("1.7")
|
||||
return pickle.dumps("1.8")
|
||||
|
||||
def _plugin_xenhost_query_gc(self, method, args):
|
||||
return pickle.dumps("False")
|
||||
|
||||
def _plugin_partition_utils_dot_py_make_partition(self, method, args):
|
||||
def _plugin_partition_utils_make_partition(self, method, args):
|
||||
return pickle.dumps(None)
|
||||
|
||||
def host_call_plugin(self, _1, _2, plugin, method, args):
|
||||
plugin = plugin.replace('.', '_dot_')
|
||||
plugin = plugin.rstrip('.py')
|
||||
|
||||
func = getattr(self, '_plugin_%s_%s' % (plugin, method), None)
|
||||
if not func:
|
||||
raise Exception('No simulation in host_call_plugin for %s,%s' %
|
||||
|
@ -32,7 +32,7 @@ class Dom0IptablesFirewallDriver(firewall.IptablesFirewallDriver):
|
||||
args = {}
|
||||
args.update(map(lambda x: (x, str(kwargs[x])), kwargs))
|
||||
args['cmd_args'] = jsonutils.dumps(cmd)
|
||||
ret = self._session.call_plugin('xenhost', 'iptables_config', args)
|
||||
ret = self._session.call_plugin('xenhost.py', 'iptables_config', args)
|
||||
json_ret = jsonutils.loads(ret)
|
||||
return (json_ret['out'], json_ret['err'])
|
||||
|
||||
|
@ -188,7 +188,7 @@ class HostState(object):
|
||||
" a pci device for passthrough"))
|
||||
|
||||
type_pci = self._session.call_plugin_serialized(
|
||||
'xenhost', 'get_pci_type', slot_id[0])
|
||||
'xenhost.py', 'get_pci_type', slot_id[0])
|
||||
|
||||
return {'label': '_'.join(['label',
|
||||
vendor_id[0],
|
||||
@ -203,7 +203,7 @@ class HostState(object):
|
||||
# Devices are separated by a blank line. That is why we
|
||||
# use "\n\n" as separator.
|
||||
lspci_out = self._session.call_plugin_serialized(
|
||||
'xenhost', 'get_pci_device_details')
|
||||
'xenhost.py', 'get_pci_device_details')
|
||||
pci_list = lspci_out.split("\n\n")
|
||||
|
||||
# For each device of the list, check if it uses the pciback
|
||||
@ -356,7 +356,7 @@ def call_xenhost(session, method, arg_dict):
|
||||
"""
|
||||
# Create a task ID as something that won't match any instance ID
|
||||
try:
|
||||
result = session.call_plugin('xenhost', method, args=arg_dict)
|
||||
result = session.call_plugin('xenhost.py', method, args=arg_dict)
|
||||
if not result:
|
||||
return ''
|
||||
return jsonutils.loads(result)
|
||||
|
@ -72,7 +72,7 @@ class BittorrentStore(object):
|
||||
params['torrent_url'] = lookup_fn(image_id)
|
||||
|
||||
vdis = session.call_plugin_serialized(
|
||||
'bittorrent', 'download_vhd', **params)
|
||||
'bittorrent.py', 'download_vhd', **params)
|
||||
|
||||
return vdis
|
||||
|
||||
|
@ -51,7 +51,8 @@ class GlanceStore(object):
|
||||
cb = functools.partial(retry_cb, context, instance)
|
||||
|
||||
return session.call_plugin_serialized_with_retry(
|
||||
'glance', fn, CONF.glance.num_retries, pick_glance, cb, **params)
|
||||
'glance.py', fn, CONF.glance.num_retries, pick_glance, cb,
|
||||
**params)
|
||||
|
||||
def _make_params(self, context, session, image_id):
|
||||
return {'image_id': image_id,
|
||||
|
@ -169,7 +169,7 @@ class ResourcePool(object):
|
||||
'master_addr': self._host_addr,
|
||||
'master_user': CONF.xenserver.connection_username,
|
||||
'master_pass': CONF.xenserver.connection_password, }
|
||||
self._session.call_plugin('xenhost', 'host_join', args)
|
||||
self._session.call_plugin('xenhost.py', 'host_join', args)
|
||||
except self._session.XenAPI.Failure as e:
|
||||
LOG.error(_LE("Pool-Join failed: %s"), e)
|
||||
raise exception.AggregateError(aggregate_id=aggregate_id,
|
||||
|
@ -508,4 +508,5 @@ class XenAPIOpenVswitchDriver(XenVIFDriver):
|
||||
args = {'cmd': cmd,
|
||||
'args': cmd_args
|
||||
}
|
||||
self._session.call_plugin_serialized('xenhost', 'network_config', args)
|
||||
self._session.call_plugin_serialized(
|
||||
'xenhost.py', 'network_config', args)
|
||||
|
@ -490,7 +490,7 @@ def _safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
|
||||
with snapshot_attached_here(
|
||||
session, instance, vm_ref, label) as vdi_uuids:
|
||||
imported_vhds = session.call_plugin_serialized(
|
||||
'workarounds', 'safe_copy_vdis',
|
||||
'workarounds.py', 'safe_copy_vdis',
|
||||
sr_path=get_sr_path(session, sr_ref=sr_ref),
|
||||
vdi_uuids=vdi_uuids, uuid_stack=_make_uuid_stack())
|
||||
|
||||
@ -1158,7 +1158,8 @@ def _create_kernel_image(context, session, instance, name_label, image_id,
|
||||
args = {}
|
||||
args['cached-image'] = image_id
|
||||
args['new-image-uuid'] = str(uuid.uuid4())
|
||||
filename = session.call_plugin('kernel', 'create_kernel_ramdisk', args)
|
||||
filename = session.call_plugin('kernel.py', 'create_kernel_ramdisk',
|
||||
args)
|
||||
|
||||
if filename == "":
|
||||
return _fetch_disk_image(context, session, instance, name_label,
|
||||
@ -1195,7 +1196,7 @@ def destroy_kernel_ramdisk(session, instance, kernel, ramdisk):
|
||||
if args:
|
||||
LOG.debug("Removing kernel/ramdisk files from dom0",
|
||||
instance=instance)
|
||||
session.call_plugin('kernel', 'remove_kernel_ramdisk', args)
|
||||
session.call_plugin('kernel.py', 'remove_kernel_ramdisk', args)
|
||||
|
||||
|
||||
def _get_image_vdi_label(image_id):
|
||||
@ -1550,7 +1551,7 @@ def _fetch_disk_image(context, session, instance, name_label, image_id,
|
||||
args['image-size'] = str(vdi_size)
|
||||
if CONF.xenserver.cache_images != 'none':
|
||||
args['cached-image'] = image_id
|
||||
filename = session.call_plugin('kernel', 'copy_vdi', args)
|
||||
filename = session.call_plugin('kernel.py', 'copy_vdi', args)
|
||||
|
||||
# Remove the VDI as it is not needed anymore.
|
||||
destroy_vdi(session, vdi_ref)
|
||||
@ -1793,7 +1794,7 @@ def compile_diagnostics(vm_rec):
|
||||
|
||||
|
||||
def fetch_bandwidth(session):
|
||||
bw = session.call_plugin_serialized('bandwidth', 'fetch_all_bandwidth')
|
||||
bw = session.call_plugin_serialized('bandwidth.py', 'fetch_all_bandwidth')
|
||||
return bw
|
||||
|
||||
|
||||
@ -2547,7 +2548,7 @@ def _import_migrated_vhds(session, instance, chain_label, disk_type,
|
||||
"""Move and possibly link VHDs via the XAPI plugin."""
|
||||
# TODO(johngarbutt) tidy up plugin params
|
||||
imported_vhds = session.call_plugin_serialized(
|
||||
'migration', 'move_vhds_into_sr', instance_uuid=chain_label,
|
||||
'migration.py', 'move_vhds_into_sr', instance_uuid=chain_label,
|
||||
sr_path=get_sr_path(session), uuid_stack=_make_uuid_stack())
|
||||
|
||||
# Now we rescan the SR so we find the VHDs
|
||||
@ -2573,7 +2574,7 @@ def migrate_vhd(session, instance, vdi_uuid, dest, sr_path, seq_num,
|
||||
chain_label = instance['uuid'] + "_ephemeral_%d" % ephemeral_number
|
||||
try:
|
||||
# TODO(johngarbutt) tidy up plugin params
|
||||
session.call_plugin_serialized('migration', 'transfer_vhd',
|
||||
session.call_plugin_serialized('migration.py', 'transfer_vhd',
|
||||
instance_uuid=chain_label, host=dest, vdi_uuid=vdi_uuid,
|
||||
sr_path=sr_path, seq_num=seq_num)
|
||||
except session.XenAPI.Failure:
|
||||
@ -2638,7 +2639,7 @@ def handle_ipxe_iso(session, instance, cd_vdi, network_info):
|
||||
dns = subnet['dns'][0]['address']
|
||||
|
||||
try:
|
||||
session.call_plugin_serialized("ipxe", "inject", sr_path,
|
||||
session.call_plugin_serialized('ipxe.py', 'inject', sr_path,
|
||||
cd_vdi['uuid'], boot_menu_url, ip_address, netmask,
|
||||
gateway, dns, CONF.xenserver.ipxe_mkisofs_cmd)
|
||||
except session.XenAPI.Failure as exc:
|
||||
|
@ -1807,7 +1807,7 @@ class VMOps(object):
|
||||
dom_id = self._get_last_dom_id(instance, check_rescue=True)
|
||||
|
||||
try:
|
||||
raw_console_data = self._session.call_plugin('console',
|
||||
raw_console_data = self._session.call_plugin('console.py',
|
||||
'get_console_log', {'dom_id': dom_id})
|
||||
except self._session.XenAPI.Failure:
|
||||
LOG.exception(_LE("Guest does not have a console available"))
|
||||
@ -2222,7 +2222,7 @@ class VMOps(object):
|
||||
except AttributeError:
|
||||
config_value = None
|
||||
try:
|
||||
config_value = self._make_plugin_call('config_file',
|
||||
config_value = self._make_plugin_call('config_file.py',
|
||||
'get_val',
|
||||
key='relax-xsm-sr-check')
|
||||
except Exception:
|
||||
|
@ -1,129 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2012 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
# TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true
|
||||
|
||||
"""Seed a bittorent image. This file should not be executed directly, rather it
|
||||
should be kicked off by the `bittorent` dom0 plugin."""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
import libtorrent
|
||||
|
||||
import pluginlib_nova
|
||||
|
||||
|
||||
pluginlib_nova.configure_logging('_bittorrent_seeder')
|
||||
logging = pluginlib_nova.logging
|
||||
|
||||
|
||||
def _daemonize(stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
|
||||
"""Daemonize the current process.
|
||||
|
||||
Do the UNIX double-fork magic, see Stevens' "Advanced Programming
|
||||
in the UNIX Environment" for details (ISBN 0201563177).
|
||||
|
||||
Source: http://www.jejik.com/articles/2007/02/
|
||||
a_simple_unix_linux_daemon_in_python/
|
||||
"""
|
||||
# 1st fork
|
||||
try:
|
||||
pid = os.fork()
|
||||
if pid > 0:
|
||||
# first parent returns
|
||||
return False
|
||||
except OSError, e: # noqa
|
||||
logging.error("fork #1 failed: %d (%s)" % (
|
||||
e.errno, e.strerror))
|
||||
return
|
||||
|
||||
# decouple from parent environment
|
||||
os.chdir("/")
|
||||
os.setsid()
|
||||
os.umask(0)
|
||||
|
||||
# 2nd fork
|
||||
try:
|
||||
pid = os.fork()
|
||||
if pid > 0:
|
||||
# second parent exits
|
||||
sys.exit(0)
|
||||
except OSError, e: # noqa
|
||||
logging.error("fork #2 failed: %d (%s)" % (
|
||||
e.errno, e.strerror))
|
||||
return
|
||||
|
||||
# redirect standard file descriptors
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
si = open(stdin, 'r')
|
||||
so = open(stdout, 'a+')
|
||||
se = open(stderr, 'a+', 0)
|
||||
os.dup2(si.fileno(), sys.stdin.fileno())
|
||||
os.dup2(so.fileno(), sys.stdout.fileno())
|
||||
os.dup2(se.fileno(), sys.stderr.fileno())
|
||||
return True
|
||||
|
||||
|
||||
def main(torrent_path, seed_cache_path, torrent_seed_duration,
|
||||
torrent_listen_port_start, torrent_listen_port_end):
|
||||
seed_time = time.time() + torrent_seed_duration
|
||||
logging.debug("Seeding '%s' for %d secs" % (
|
||||
torrent_path, torrent_seed_duration))
|
||||
|
||||
child = _daemonize()
|
||||
if not child:
|
||||
return
|
||||
|
||||
# At this point we're the daemonized child...
|
||||
session = libtorrent.session()
|
||||
session.listen_on(torrent_listen_port_start, torrent_listen_port_end)
|
||||
|
||||
torrent_file = open(torrent_path, 'rb')
|
||||
try:
|
||||
torrent_data = torrent_file.read()
|
||||
finally:
|
||||
torrent_file.close()
|
||||
|
||||
decoded_data = libtorrent.bdecode(torrent_data)
|
||||
|
||||
info = libtorrent.torrent_info(decoded_data)
|
||||
torrent = session.add_torrent(
|
||||
info, seed_cache_path,
|
||||
storage_mode=libtorrent.storage_mode_t.storage_mode_sparse)
|
||||
try:
|
||||
while time.time() < seed_time:
|
||||
time.sleep(5)
|
||||
finally:
|
||||
session.remove_torrent(torrent)
|
||||
|
||||
logging.debug("Seeding of '%s' finished" % torrent_path)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
(torrent_path, seed_cache_path, torrent_seed_duration,
|
||||
torrent_listen_port_start, torrent_listen_port_end) = sys.argv[1:]
|
||||
torrent_seed_duration = int(torrent_seed_duration)
|
||||
torrent_listen_port_start = int(torrent_listen_port_start)
|
||||
torrent_listen_port_end = int(torrent_listen_port_end)
|
||||
|
||||
main(torrent_path, seed_cache_path, torrent_seed_duration,
|
||||
torrent_listen_port_start, torrent_listen_port_end)
|
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/_bittorrent_seeder
Symbolic link
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/_bittorrent_seeder
Symbolic link
@ -0,0 +1 @@
|
||||
_bittorrent_seeder.py
|
129
plugins/xenserver/xenapi/etc/xapi.d/plugins/_bittorrent_seeder.py
Executable file
129
plugins/xenserver/xenapi/etc/xapi.d/plugins/_bittorrent_seeder.py
Executable file
@ -0,0 +1,129 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2012 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
# TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true
|
||||
|
||||
"""Seed a bittorent image. This file should not be executed directly, rather it
|
||||
should be kicked off by the `bittorent` dom0 plugin."""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
import libtorrent
|
||||
|
||||
import pluginlib_nova
|
||||
|
||||
|
||||
pluginlib_nova.configure_logging('_bittorrent_seeder')
|
||||
logging = pluginlib_nova.logging
|
||||
|
||||
|
||||
def _daemonize(stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
|
||||
"""Daemonize the current process.
|
||||
|
||||
Do the UNIX double-fork magic, see Stevens' "Advanced Programming
|
||||
in the UNIX Environment" for details (ISBN 0201563177).
|
||||
|
||||
Source: http://www.jejik.com/articles/2007/02/
|
||||
a_simple_unix_linux_daemon_in_python/
|
||||
"""
|
||||
# 1st fork
|
||||
try:
|
||||
pid = os.fork()
|
||||
if pid > 0:
|
||||
# first parent returns
|
||||
return False
|
||||
except OSError, e: # noqa
|
||||
logging.error("fork #1 failed: %d (%s)" % (
|
||||
e.errno, e.strerror))
|
||||
return
|
||||
|
||||
# decouple from parent environment
|
||||
os.chdir("/")
|
||||
os.setsid()
|
||||
os.umask(0)
|
||||
|
||||
# 2nd fork
|
||||
try:
|
||||
pid = os.fork()
|
||||
if pid > 0:
|
||||
# second parent exits
|
||||
sys.exit(0)
|
||||
except OSError, e: # noqa
|
||||
logging.error("fork #2 failed: %d (%s)" % (
|
||||
e.errno, e.strerror))
|
||||
return
|
||||
|
||||
# redirect standard file descriptors
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
si = open(stdin, 'r')
|
||||
so = open(stdout, 'a+')
|
||||
se = open(stderr, 'a+', 0)
|
||||
os.dup2(si.fileno(), sys.stdin.fileno())
|
||||
os.dup2(so.fileno(), sys.stdout.fileno())
|
||||
os.dup2(se.fileno(), sys.stderr.fileno())
|
||||
return True
|
||||
|
||||
|
||||
def main(torrent_path, seed_cache_path, torrent_seed_duration,
|
||||
torrent_listen_port_start, torrent_listen_port_end):
|
||||
seed_time = time.time() + torrent_seed_duration
|
||||
logging.debug("Seeding '%s' for %d secs" % (
|
||||
torrent_path, torrent_seed_duration))
|
||||
|
||||
child = _daemonize()
|
||||
if not child:
|
||||
return
|
||||
|
||||
# At this point we're the daemonized child...
|
||||
session = libtorrent.session()
|
||||
session.listen_on(torrent_listen_port_start, torrent_listen_port_end)
|
||||
|
||||
torrent_file = open(torrent_path, 'rb')
|
||||
try:
|
||||
torrent_data = torrent_file.read()
|
||||
finally:
|
||||
torrent_file.close()
|
||||
|
||||
decoded_data = libtorrent.bdecode(torrent_data)
|
||||
|
||||
info = libtorrent.torrent_info(decoded_data)
|
||||
torrent = session.add_torrent(
|
||||
info, seed_cache_path,
|
||||
storage_mode=libtorrent.storage_mode_t.storage_mode_sparse)
|
||||
try:
|
||||
while time.time() < seed_time:
|
||||
time.sleep(5)
|
||||
finally:
|
||||
session.remove_torrent(torrent)
|
||||
|
||||
logging.debug("Seeding of '%s' finished" % torrent_path)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
(torrent_path, seed_cache_path, torrent_seed_duration,
|
||||
torrent_listen_port_start, torrent_listen_port_end) = sys.argv[1:]
|
||||
torrent_seed_duration = int(torrent_seed_duration)
|
||||
torrent_listen_port_start = int(torrent_listen_port_start)
|
||||
torrent_listen_port_end = int(torrent_listen_port_end)
|
||||
|
||||
main(torrent_path, seed_cache_path, torrent_seed_duration,
|
||||
torrent_listen_port_start, torrent_listen_port_end)
|
@ -1,266 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2011 Citrix Systems, Inc.
|
||||
# Copyright 2011 OpenStack Foundation
|
||||
# Copyright 2011 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
# TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true
|
||||
|
||||
#
|
||||
# XenAPI plugin for reading/writing information to xenstore
|
||||
#
|
||||
|
||||
import base64
|
||||
import commands # noqa
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
import simplejson as json
|
||||
import time
|
||||
|
||||
import XenAPIPlugin
|
||||
|
||||
import pluginlib_nova
|
||||
pluginlib_nova.configure_logging("agent")
|
||||
import xenstore
|
||||
|
||||
|
||||
DEFAULT_TIMEOUT = 30
|
||||
PluginError = pluginlib_nova.PluginError
|
||||
_ = pluginlib_nova._
|
||||
|
||||
|
||||
class TimeoutError(StandardError):
|
||||
pass
|
||||
|
||||
|
||||
class RebootDetectedError(StandardError):
|
||||
pass
|
||||
|
||||
|
||||
def version(self, arg_dict):
|
||||
"""Get version of agent."""
|
||||
timeout = int(arg_dict.pop('timeout', DEFAULT_TIMEOUT))
|
||||
arg_dict["value"] = json.dumps({"name": "version", "value": "agent"})
|
||||
request_id = arg_dict["id"]
|
||||
arg_dict["path"] = "data/host/%s" % request_id
|
||||
xenstore.write_record(self, arg_dict)
|
||||
try:
|
||||
resp = _wait_for_agent(self, request_id, arg_dict, timeout)
|
||||
except TimeoutError, e: # noqa
|
||||
raise PluginError(e)
|
||||
return resp
|
||||
|
||||
|
||||
def key_init(self, arg_dict):
|
||||
"""Handles the Diffie-Hellman key exchange with the agent to
|
||||
establish the shared secret key used to encrypt/decrypt sensitive
|
||||
info to be passed, such as passwords. Returns the shared
|
||||
secret key value.
|
||||
"""
|
||||
timeout = int(arg_dict.pop('timeout', DEFAULT_TIMEOUT))
|
||||
# WARNING: Some older Windows agents will crash if the public key isn't
|
||||
# a string
|
||||
pub = arg_dict["pub"]
|
||||
arg_dict["value"] = json.dumps({"name": "keyinit", "value": pub})
|
||||
request_id = arg_dict["id"]
|
||||
arg_dict["path"] = "data/host/%s" % request_id
|
||||
xenstore.write_record(self, arg_dict)
|
||||
try:
|
||||
resp = _wait_for_agent(self, request_id, arg_dict, timeout)
|
||||
except TimeoutError, e: # noqa
|
||||
raise PluginError(e)
|
||||
return resp
|
||||
|
||||
|
||||
def password(self, arg_dict):
|
||||
"""Writes a request to xenstore that tells the agent to set
|
||||
the root password for the given VM. The password should be
|
||||
encrypted using the shared secret key that was returned by a
|
||||
previous call to key_init. The encrypted password value should
|
||||
be passed as the value for the 'enc_pass' key in arg_dict.
|
||||
"""
|
||||
timeout = int(arg_dict.pop('timeout', DEFAULT_TIMEOUT))
|
||||
enc_pass = arg_dict["enc_pass"]
|
||||
arg_dict["value"] = json.dumps({"name": "password", "value": enc_pass})
|
||||
request_id = arg_dict["id"]
|
||||
arg_dict["path"] = "data/host/%s" % request_id
|
||||
xenstore.write_record(self, arg_dict)
|
||||
try:
|
||||
resp = _wait_for_agent(self, request_id, arg_dict, timeout)
|
||||
except TimeoutError, e: # noqa
|
||||
raise PluginError(e)
|
||||
return resp
|
||||
|
||||
|
||||
def resetnetwork(self, arg_dict):
|
||||
"""Writes a request to xenstore that tells the agent
|
||||
to reset networking.
|
||||
"""
|
||||
timeout = int(arg_dict.pop('timeout', DEFAULT_TIMEOUT))
|
||||
arg_dict['value'] = json.dumps({'name': 'resetnetwork', 'value': ''})
|
||||
request_id = arg_dict['id']
|
||||
arg_dict['path'] = "data/host/%s" % request_id
|
||||
xenstore.write_record(self, arg_dict)
|
||||
try:
|
||||
resp = _wait_for_agent(self, request_id, arg_dict, timeout)
|
||||
except TimeoutError, e: # noqa
|
||||
raise PluginError(e)
|
||||
return resp
|
||||
|
||||
|
||||
def inject_file(self, arg_dict):
|
||||
"""Expects a file path and the contents of the file to be written. Both
|
||||
should be base64-encoded in order to eliminate errors as they are passed
|
||||
through the stack. Writes that information to xenstore for the agent,
|
||||
which will decode the file and intended path, and create it on the
|
||||
instance. The original agent munged both of these into a single entry;
|
||||
the new agent keeps them separate. We will need to test for the new agent,
|
||||
and write the xenstore records to match the agent version. We will also
|
||||
need to test to determine if the file injection method on the agent has
|
||||
been disabled, and raise a NotImplemented error if that is the case.
|
||||
"""
|
||||
timeout = int(arg_dict.pop('timeout', DEFAULT_TIMEOUT))
|
||||
b64_path = arg_dict["b64_path"]
|
||||
b64_file = arg_dict["b64_contents"]
|
||||
request_id = arg_dict["id"]
|
||||
agent_features = _get_agent_features(self, arg_dict)
|
||||
if "file_inject" in agent_features:
|
||||
# New version of the agent. Agent should receive a 'value'
|
||||
# key whose value is a dictionary containing 'b64_path' and
|
||||
# 'b64_file'. See old version below.
|
||||
arg_dict["value"] = json.dumps({"name": "file_inject",
|
||||
"value": {"b64_path": b64_path, "b64_file": b64_file}})
|
||||
elif "injectfile" in agent_features:
|
||||
# Old agent requires file path and file contents to be
|
||||
# combined into one base64 value.
|
||||
raw_path = base64.b64decode(b64_path)
|
||||
raw_file = base64.b64decode(b64_file)
|
||||
new_b64 = base64.b64encode("%s,%s" % (raw_path, raw_file))
|
||||
arg_dict["value"] = json.dumps({"name": "injectfile",
|
||||
"value": new_b64})
|
||||
else:
|
||||
# Either the methods don't exist in the agent, or they
|
||||
# have been disabled.
|
||||
raise NotImplementedError(_("NOT IMPLEMENTED: Agent does not"
|
||||
" support file injection."))
|
||||
arg_dict["path"] = "data/host/%s" % request_id
|
||||
xenstore.write_record(self, arg_dict)
|
||||
try:
|
||||
resp = _wait_for_agent(self, request_id, arg_dict, timeout)
|
||||
except TimeoutError, e: # noqa
|
||||
raise PluginError(e)
|
||||
return resp
|
||||
|
||||
|
||||
def agent_update(self, arg_dict):
|
||||
"""Expects an URL and md5sum of the contents, then directs the agent to
|
||||
update itself.
|
||||
"""
|
||||
timeout = int(arg_dict.pop('timeout', DEFAULT_TIMEOUT))
|
||||
request_id = arg_dict["id"]
|
||||
url = arg_dict["url"]
|
||||
md5sum = arg_dict["md5sum"]
|
||||
arg_dict["value"] = json.dumps({"name": "agentupdate",
|
||||
"value": "%s,%s" % (url, md5sum)})
|
||||
arg_dict["path"] = "data/host/%s" % request_id
|
||||
xenstore.write_record(self, arg_dict)
|
||||
try:
|
||||
resp = _wait_for_agent(self, request_id, arg_dict, timeout)
|
||||
except TimeoutError, e: # noqa
|
||||
raise PluginError(e)
|
||||
return resp
|
||||
|
||||
|
||||
def _get_agent_features(self, arg_dict):
|
||||
"""Return an array of features that an agent supports."""
|
||||
timeout = int(arg_dict.pop('timeout', DEFAULT_TIMEOUT))
|
||||
tmp_id = commands.getoutput("uuidgen")
|
||||
dct = {}
|
||||
dct.update(arg_dict)
|
||||
dct["value"] = json.dumps({"name": "features", "value": ""})
|
||||
dct["path"] = "data/host/%s" % tmp_id
|
||||
xenstore.write_record(self, dct)
|
||||
try:
|
||||
resp = _wait_for_agent(self, tmp_id, dct, timeout)
|
||||
except TimeoutError, e: # noqa
|
||||
raise PluginError(e)
|
||||
response = json.loads(resp)
|
||||
if response['returncode'] != 0:
|
||||
return response["message"].split(",")
|
||||
else:
|
||||
return {}
|
||||
|
||||
|
||||
def _wait_for_agent(self, request_id, arg_dict, timeout):
|
||||
"""Periodically checks xenstore for a response from the agent.
|
||||
The request is always written to 'data/host/{id}', and
|
||||
the agent's response for that request will be in 'data/guest/{id}'.
|
||||
If no value appears from the agent within the timeout specified,
|
||||
the original request is deleted and a TimeoutError is raised.
|
||||
"""
|
||||
arg_dict["path"] = "data/guest/%s" % request_id
|
||||
arg_dict["ignore_missing_path"] = True
|
||||
start = time.time()
|
||||
reboot_detected = False
|
||||
while time.time() - start < timeout:
|
||||
ret = xenstore.read_record(self, arg_dict)
|
||||
# Note: the response for None with be a string that includes
|
||||
# double quotes.
|
||||
if ret != '"None"':
|
||||
# The agent responded
|
||||
return ret
|
||||
|
||||
time.sleep(.5)
|
||||
|
||||
# NOTE(johngarbutt) If we can't find this domid, then
|
||||
# the VM has rebooted, so we must trigger domid refresh.
|
||||
# Check after the sleep to give xenstore time to update
|
||||
# after the VM reboot.
|
||||
exists_args = {
|
||||
"dom_id": arg_dict["dom_id"],
|
||||
"path": "name",
|
||||
}
|
||||
dom_id_is_present = xenstore.record_exists(exists_args)
|
||||
if not dom_id_is_present:
|
||||
reboot_detected = True
|
||||
break
|
||||
|
||||
# No response within the timeout period; bail out
|
||||
# First, delete the request record
|
||||
arg_dict["path"] = "data/host/%s" % request_id
|
||||
xenstore.delete_record(self, arg_dict)
|
||||
|
||||
if reboot_detected:
|
||||
raise RebootDetectedError(_("REBOOT: dom_id %s no longer "
|
||||
"present") % arg_dict["dom_id"])
|
||||
else:
|
||||
raise TimeoutError(_("TIMEOUT: No response from agent within"
|
||||
" %s seconds.") % timeout)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
XenAPIPlugin.dispatch(
|
||||
{"version": version,
|
||||
"key_init": key_init,
|
||||
"password": password,
|
||||
"resetnetwork": resetnetwork,
|
||||
"inject_file": inject_file,
|
||||
"agentupdate": agent_update})
|
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/agent
Symbolic link
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/agent
Symbolic link
@ -0,0 +1 @@
|
||||
agent.py
|
268
plugins/xenserver/xenapi/etc/xapi.d/plugins/agent.py
Executable file
268
plugins/xenserver/xenapi/etc/xapi.d/plugins/agent.py
Executable file
@ -0,0 +1,268 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2011 Citrix Systems, Inc.
|
||||
# Copyright 2011 OpenStack Foundation
|
||||
# Copyright 2011 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
# TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true
|
||||
|
||||
# TODO(sfinucan): Remove the symlinks in this folder once Ocata is released
|
||||
|
||||
#
|
||||
# XenAPI plugin for reading/writing information to xenstore
|
||||
#
|
||||
|
||||
import base64
|
||||
import commands # noqa
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
import simplejson as json
|
||||
import time
|
||||
|
||||
import XenAPIPlugin
|
||||
|
||||
import pluginlib_nova
|
||||
pluginlib_nova.configure_logging("agent")
|
||||
import xenstore
|
||||
|
||||
|
||||
DEFAULT_TIMEOUT = 30
|
||||
PluginError = pluginlib_nova.PluginError
|
||||
_ = pluginlib_nova._
|
||||
|
||||
|
||||
class TimeoutError(StandardError):
|
||||
pass
|
||||
|
||||
|
||||
class RebootDetectedError(StandardError):
|
||||
pass
|
||||
|
||||
|
||||
def version(self, arg_dict):
|
||||
"""Get version of agent."""
|
||||
timeout = int(arg_dict.pop('timeout', DEFAULT_TIMEOUT))
|
||||
arg_dict["value"] = json.dumps({"name": "version", "value": "agent"})
|
||||
request_id = arg_dict["id"]
|
||||
arg_dict["path"] = "data/host/%s" % request_id
|
||||
xenstore.write_record(self, arg_dict)
|
||||
try:
|
||||
resp = _wait_for_agent(self, request_id, arg_dict, timeout)
|
||||
except TimeoutError, e: # noqa
|
||||
raise PluginError(e)
|
||||
return resp
|
||||
|
||||
|
||||
def key_init(self, arg_dict):
|
||||
"""Handles the Diffie-Hellman key exchange with the agent to
|
||||
establish the shared secret key used to encrypt/decrypt sensitive
|
||||
info to be passed, such as passwords. Returns the shared
|
||||
secret key value.
|
||||
"""
|
||||
timeout = int(arg_dict.pop('timeout', DEFAULT_TIMEOUT))
|
||||
# WARNING: Some older Windows agents will crash if the public key isn't
|
||||
# a string
|
||||
pub = arg_dict["pub"]
|
||||
arg_dict["value"] = json.dumps({"name": "keyinit", "value": pub})
|
||||
request_id = arg_dict["id"]
|
||||
arg_dict["path"] = "data/host/%s" % request_id
|
||||
xenstore.write_record(self, arg_dict)
|
||||
try:
|
||||
resp = _wait_for_agent(self, request_id, arg_dict, timeout)
|
||||
except TimeoutError, e: # noqa
|
||||
raise PluginError(e)
|
||||
return resp
|
||||
|
||||
|
||||
def password(self, arg_dict):
|
||||
"""Writes a request to xenstore that tells the agent to set
|
||||
the root password for the given VM. The password should be
|
||||
encrypted using the shared secret key that was returned by a
|
||||
previous call to key_init. The encrypted password value should
|
||||
be passed as the value for the 'enc_pass' key in arg_dict.
|
||||
"""
|
||||
timeout = int(arg_dict.pop('timeout', DEFAULT_TIMEOUT))
|
||||
enc_pass = arg_dict["enc_pass"]
|
||||
arg_dict["value"] = json.dumps({"name": "password", "value": enc_pass})
|
||||
request_id = arg_dict["id"]
|
||||
arg_dict["path"] = "data/host/%s" % request_id
|
||||
xenstore.write_record(self, arg_dict)
|
||||
try:
|
||||
resp = _wait_for_agent(self, request_id, arg_dict, timeout)
|
||||
except TimeoutError, e: # noqa
|
||||
raise PluginError(e)
|
||||
return resp
|
||||
|
||||
|
||||
def resetnetwork(self, arg_dict):
|
||||
"""Writes a request to xenstore that tells the agent
|
||||
to reset networking.
|
||||
"""
|
||||
timeout = int(arg_dict.pop('timeout', DEFAULT_TIMEOUT))
|
||||
arg_dict['value'] = json.dumps({'name': 'resetnetwork', 'value': ''})
|
||||
request_id = arg_dict['id']
|
||||
arg_dict['path'] = "data/host/%s" % request_id
|
||||
xenstore.write_record(self, arg_dict)
|
||||
try:
|
||||
resp = _wait_for_agent(self, request_id, arg_dict, timeout)
|
||||
except TimeoutError, e: # noqa
|
||||
raise PluginError(e)
|
||||
return resp
|
||||
|
||||
|
||||
def inject_file(self, arg_dict):
|
||||
"""Expects a file path and the contents of the file to be written. Both
|
||||
should be base64-encoded in order to eliminate errors as they are passed
|
||||
through the stack. Writes that information to xenstore for the agent,
|
||||
which will decode the file and intended path, and create it on the
|
||||
instance. The original agent munged both of these into a single entry;
|
||||
the new agent keeps them separate. We will need to test for the new agent,
|
||||
and write the xenstore records to match the agent version. We will also
|
||||
need to test to determine if the file injection method on the agent has
|
||||
been disabled, and raise a NotImplemented error if that is the case.
|
||||
"""
|
||||
timeout = int(arg_dict.pop('timeout', DEFAULT_TIMEOUT))
|
||||
b64_path = arg_dict["b64_path"]
|
||||
b64_file = arg_dict["b64_contents"]
|
||||
request_id = arg_dict["id"]
|
||||
agent_features = _get_agent_features(self, arg_dict)
|
||||
if "file_inject" in agent_features:
|
||||
# New version of the agent. Agent should receive a 'value'
|
||||
# key whose value is a dictionary containing 'b64_path' and
|
||||
# 'b64_file'. See old version below.
|
||||
arg_dict["value"] = json.dumps({"name": "file_inject",
|
||||
"value": {"b64_path": b64_path, "b64_file": b64_file}})
|
||||
elif "injectfile" in agent_features:
|
||||
# Old agent requires file path and file contents to be
|
||||
# combined into one base64 value.
|
||||
raw_path = base64.b64decode(b64_path)
|
||||
raw_file = base64.b64decode(b64_file)
|
||||
new_b64 = base64.b64encode("%s,%s" % (raw_path, raw_file))
|
||||
arg_dict["value"] = json.dumps({"name": "injectfile",
|
||||
"value": new_b64})
|
||||
else:
|
||||
# Either the methods don't exist in the agent, or they
|
||||
# have been disabled.
|
||||
raise NotImplementedError(_("NOT IMPLEMENTED: Agent does not"
|
||||
" support file injection."))
|
||||
arg_dict["path"] = "data/host/%s" % request_id
|
||||
xenstore.write_record(self, arg_dict)
|
||||
try:
|
||||
resp = _wait_for_agent(self, request_id, arg_dict, timeout)
|
||||
except TimeoutError, e: # noqa
|
||||
raise PluginError(e)
|
||||
return resp
|
||||
|
||||
|
||||
def agent_update(self, arg_dict):
|
||||
"""Expects an URL and md5sum of the contents, then directs the agent to
|
||||
update itself.
|
||||
"""
|
||||
timeout = int(arg_dict.pop('timeout', DEFAULT_TIMEOUT))
|
||||
request_id = arg_dict["id"]
|
||||
url = arg_dict["url"]
|
||||
md5sum = arg_dict["md5sum"]
|
||||
arg_dict["value"] = json.dumps({"name": "agentupdate",
|
||||
"value": "%s,%s" % (url, md5sum)})
|
||||
arg_dict["path"] = "data/host/%s" % request_id
|
||||
xenstore.write_record(self, arg_dict)
|
||||
try:
|
||||
resp = _wait_for_agent(self, request_id, arg_dict, timeout)
|
||||
except TimeoutError, e: # noqa
|
||||
raise PluginError(e)
|
||||
return resp
|
||||
|
||||
|
||||
def _get_agent_features(self, arg_dict):
|
||||
"""Return an array of features that an agent supports."""
|
||||
timeout = int(arg_dict.pop('timeout', DEFAULT_TIMEOUT))
|
||||
tmp_id = commands.getoutput("uuidgen")
|
||||
dct = {}
|
||||
dct.update(arg_dict)
|
||||
dct["value"] = json.dumps({"name": "features", "value": ""})
|
||||
dct["path"] = "data/host/%s" % tmp_id
|
||||
xenstore.write_record(self, dct)
|
||||
try:
|
||||
resp = _wait_for_agent(self, tmp_id, dct, timeout)
|
||||
except TimeoutError, e: # noqa
|
||||
raise PluginError(e)
|
||||
response = json.loads(resp)
|
||||
if response['returncode'] != 0:
|
||||
return response["message"].split(",")
|
||||
else:
|
||||
return {}
|
||||
|
||||
|
||||
def _wait_for_agent(self, request_id, arg_dict, timeout):
|
||||
"""Periodically checks xenstore for a response from the agent.
|
||||
The request is always written to 'data/host/{id}', and
|
||||
the agent's response for that request will be in 'data/guest/{id}'.
|
||||
If no value appears from the agent within the timeout specified,
|
||||
the original request is deleted and a TimeoutError is raised.
|
||||
"""
|
||||
arg_dict["path"] = "data/guest/%s" % request_id
|
||||
arg_dict["ignore_missing_path"] = True
|
||||
start = time.time()
|
||||
reboot_detected = False
|
||||
while time.time() - start < timeout:
|
||||
ret = xenstore.read_record(self, arg_dict)
|
||||
# Note: the response for None with be a string that includes
|
||||
# double quotes.
|
||||
if ret != '"None"':
|
||||
# The agent responded
|
||||
return ret
|
||||
|
||||
time.sleep(.5)
|
||||
|
||||
# NOTE(johngarbutt) If we can't find this domid, then
|
||||
# the VM has rebooted, so we must trigger domid refresh.
|
||||
# Check after the sleep to give xenstore time to update
|
||||
# after the VM reboot.
|
||||
exists_args = {
|
||||
"dom_id": arg_dict["dom_id"],
|
||||
"path": "name",
|
||||
}
|
||||
dom_id_is_present = xenstore.record_exists(exists_args)
|
||||
if not dom_id_is_present:
|
||||
reboot_detected = True
|
||||
break
|
||||
|
||||
# No response within the timeout period; bail out
|
||||
# First, delete the request record
|
||||
arg_dict["path"] = "data/host/%s" % request_id
|
||||
xenstore.delete_record(self, arg_dict)
|
||||
|
||||
if reboot_detected:
|
||||
raise RebootDetectedError(_("REBOOT: dom_id %s no longer "
|
||||
"present") % arg_dict["dom_id"])
|
||||
else:
|
||||
raise TimeoutError(_("TIMEOUT: No response from agent within"
|
||||
" %s seconds.") % timeout)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
XenAPIPlugin.dispatch(
|
||||
{"version": version,
|
||||
"key_init": key_init,
|
||||
"password": password,
|
||||
"resetnetwork": resetnetwork,
|
||||
"inject_file": inject_file,
|
||||
"agentupdate": agent_update})
|
@ -1,64 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
"""Fetch Bandwidth data from VIF network devices."""
|
||||
|
||||
import utils
|
||||
|
||||
import pluginlib_nova
|
||||
|
||||
import re
|
||||
|
||||
|
||||
pluginlib_nova.configure_logging('bandwidth')
|
||||
|
||||
|
||||
def _read_proc_net():
|
||||
f = open('/proc/net/dev', 'r')
|
||||
try:
|
||||
return f.readlines()
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
|
||||
def _get_bandwitdth_from_proc():
|
||||
devs = [l.strip() for l in _read_proc_net()]
|
||||
# ignore headers
|
||||
devs = devs[2:]
|
||||
vif_pattern = re.compile("^vif(\d+)\.(\d+)")
|
||||
dlist = [d.split(':', 1) for d in devs if vif_pattern.match(d)]
|
||||
devmap = dict()
|
||||
for name, stats in dlist:
|
||||
slist = stats.split()
|
||||
dom, vifnum = name[3:].split('.', 1)
|
||||
dev = devmap.get(dom, {})
|
||||
# Note, we deliberately swap in and out, as instance traffic
|
||||
# shows up inverted due to going though the bridge. (mdragon)
|
||||
dev[vifnum] = dict(bw_in=int(slist[8]), bw_out=int(slist[0]))
|
||||
devmap[dom] = dev
|
||||
return devmap
|
||||
|
||||
|
||||
def fetch_all_bandwidth(session):
|
||||
return _get_bandwitdth_from_proc()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.register_plugin_calls(fetch_all_bandwidth)
|
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/bandwidth
Symbolic link
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/bandwidth
Symbolic link
@ -0,0 +1 @@
|
||||
bandwidth.py
|
64
plugins/xenserver/xenapi/etc/xapi.d/plugins/bandwidth.py
Executable file
64
plugins/xenserver/xenapi/etc/xapi.d/plugins/bandwidth.py
Executable file
@ -0,0 +1,64 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
"""Fetch Bandwidth data from VIF network devices."""
|
||||
|
||||
import utils
|
||||
|
||||
import pluginlib_nova
|
||||
|
||||
import re
|
||||
|
||||
|
||||
pluginlib_nova.configure_logging('bandwidth')
|
||||
|
||||
|
||||
def _read_proc_net():
|
||||
f = open('/proc/net/dev', 'r')
|
||||
try:
|
||||
return f.readlines()
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
|
||||
def _get_bandwitdth_from_proc():
|
||||
devs = [l.strip() for l in _read_proc_net()]
|
||||
# ignore headers
|
||||
devs = devs[2:]
|
||||
vif_pattern = re.compile("^vif(\d+)\.(\d+)")
|
||||
dlist = [d.split(':', 1) for d in devs if vif_pattern.match(d)]
|
||||
devmap = dict()
|
||||
for name, stats in dlist:
|
||||
slist = stats.split()
|
||||
dom, vifnum = name[3:].split('.', 1)
|
||||
dev = devmap.get(dom, {})
|
||||
# Note, we deliberately swap in and out, as instance traffic
|
||||
# shows up inverted due to going though the bridge. (mdragon)
|
||||
dev[vifnum] = dict(bw_in=int(slist[8]), bw_out=int(slist[0]))
|
||||
devmap[dom] = dev
|
||||
return devmap
|
||||
|
||||
|
||||
def fetch_all_bandwidth(session):
|
||||
return _get_bandwitdth_from_proc()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.register_plugin_calls(fetch_all_bandwidth)
|
@ -1,327 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2012 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
# TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true
|
||||
|
||||
"""Download images via BitTorrent."""
|
||||
|
||||
import errno
|
||||
import inspect
|
||||
import os
|
||||
import random
|
||||
import shutil
|
||||
import tempfile
|
||||
import time
|
||||
|
||||
import libtorrent
|
||||
import urllib2
|
||||
|
||||
import utils
|
||||
|
||||
import pluginlib_nova
|
||||
|
||||
|
||||
pluginlib_nova.configure_logging('bittorrent')
|
||||
logging = pluginlib_nova.logging
|
||||
|
||||
# Taken from units since we don't pull down full library
|
||||
Mi = 1024 ** 2
|
||||
DEFAULT_TORRENT_CACHE = '/images/torrents'
|
||||
DEFAULT_SEED_CACHE = '/images/seeds'
|
||||
SEEDER_PROCESS = '_bittorrent_seeder'
|
||||
DEFAULT_MMA = int(libtorrent.bandwidth_mixed_algo_t.prefer_tcp)
|
||||
DEFAULT_MORQ = 400
|
||||
DEFAULT_MQDB = 8 * Mi
|
||||
DEFAULT_MQDBLW = 0
|
||||
|
||||
|
||||
def _make_torrent_cache():
|
||||
torrent_cache_path = os.environ.get(
|
||||
'TORRENT_CACHE', DEFAULT_TORRENT_CACHE)
|
||||
|
||||
if not os.path.exists(torrent_cache_path):
|
||||
os.mkdir(torrent_cache_path)
|
||||
|
||||
return torrent_cache_path
|
||||
|
||||
|
||||
def _fetch_torrent_file(torrent_cache_path, image_id, torrent_url):
|
||||
torrent_path = os.path.join(
|
||||
torrent_cache_path, image_id + '.torrent')
|
||||
|
||||
if not os.path.exists(torrent_path):
|
||||
logging.info("Downloading %s" % torrent_url)
|
||||
|
||||
# Write contents to temporary path to ensure we don't have partially
|
||||
# completed files in the cache.
|
||||
temp_directory = tempfile.mkdtemp(dir=torrent_cache_path)
|
||||
try:
|
||||
temp_path = os.path.join(
|
||||
temp_directory, os.path.basename(torrent_path))
|
||||
temp_file = open(temp_path, 'wb')
|
||||
try:
|
||||
remote_torrent_file = urllib2.urlopen(torrent_url)
|
||||
shutil.copyfileobj(remote_torrent_file, temp_file)
|
||||
finally:
|
||||
temp_file.close()
|
||||
|
||||
os.rename(temp_path, torrent_path)
|
||||
finally:
|
||||
shutil.rmtree(temp_directory)
|
||||
|
||||
return torrent_path
|
||||
|
||||
|
||||
def _reap_old_torrent_files(torrent_cache_path, torrent_max_last_accessed):
|
||||
"""Delete any torrent files that haven't been accessed recently."""
|
||||
if not torrent_max_last_accessed:
|
||||
logging.debug("Reaping old torrent files disabled, skipping...")
|
||||
return
|
||||
|
||||
logging.debug("Preparing to reap old torrent files,"
|
||||
" torrent_max_last_accessed=%d" % torrent_max_last_accessed)
|
||||
|
||||
for fname in os.listdir(torrent_cache_path):
|
||||
torrent_path = os.path.join(torrent_cache_path, fname)
|
||||
last_accessed = time.time() - os.path.getatime(torrent_path)
|
||||
if last_accessed > torrent_max_last_accessed:
|
||||
logging.debug("Reaping '%s', last_accessed=%d" % (
|
||||
torrent_path, last_accessed))
|
||||
utils.delete_if_exists(torrent_path)
|
||||
|
||||
|
||||
def _download(torrent_path, save_as_path, torrent_listen_port_start,
|
||||
torrent_listen_port_end, torrent_download_stall_cutoff):
|
||||
session = libtorrent.session()
|
||||
session.listen_on(torrent_listen_port_start, torrent_listen_port_end)
|
||||
|
||||
mixed_mode_algorithm = os.environ.get(
|
||||
'DEFAULT_MIXED_MODE_ALGORITHM', DEFAULT_MMA)
|
||||
max_out_request_queue = os.environ.get(
|
||||
'DEFAULT_MAX_OUT_REQUEST_QUEUE', DEFAULT_MORQ)
|
||||
max_queued_disk_bytes = os.environ.get(
|
||||
'DEFAULT_MAX_QUEUED_DISK_BYTES', DEFAULT_MQDB)
|
||||
max_queued_disk_bytes_low_watermark = os.environ.get(
|
||||
'DEFAULT_MAX_QUEUED_DISK_BYTES_LOW_WATERMARK', DEFAULT_MQDBLW)
|
||||
|
||||
session_opts = {'mixed_mode_algorithm': mixed_mode_algorithm,
|
||||
'max_queued_disk_bytes': max_queued_disk_bytes,
|
||||
'max_out_request_queue': max_out_request_queue,
|
||||
'max_queued_disk_bytes_low_watermark':
|
||||
max_queued_disk_bytes_low_watermark}
|
||||
session.set_settings(session_opts)
|
||||
info = libtorrent.torrent_info(
|
||||
libtorrent.bdecode(open(torrent_path, 'rb').read()))
|
||||
|
||||
torrent = session.add_torrent(
|
||||
info, save_as_path,
|
||||
storage_mode=libtorrent.storage_mode_t.storage_mode_sparse)
|
||||
|
||||
try:
|
||||
last_progress = 0
|
||||
last_progress_updated = time.time()
|
||||
|
||||
log_time = 0
|
||||
while not torrent.is_seed():
|
||||
s = torrent.status()
|
||||
|
||||
progress = s.progress * 100
|
||||
|
||||
if progress != last_progress:
|
||||
last_progress = progress
|
||||
last_progress_updated = time.time()
|
||||
|
||||
stall_duration = time.time() - last_progress_updated
|
||||
if stall_duration > torrent_download_stall_cutoff:
|
||||
logging.error(
|
||||
"Download stalled: stall_duration=%d,"
|
||||
" torrent_download_stall_cutoff=%d" % (
|
||||
stall_duration, torrent_download_stall_cutoff))
|
||||
raise Exception("Bittorrent download stall detected, bailing!")
|
||||
|
||||
log_time += 1
|
||||
if log_time % 10 == 0:
|
||||
logging.debug(
|
||||
'%.2f%% complete (down: %.1f kb/s up: %.1f kB/s peers: %d)'
|
||||
' %s %s' % (progress, s.download_rate / 1000,
|
||||
s.upload_rate / 1000, s.num_peers, s.state,
|
||||
torrent_path))
|
||||
time.sleep(1)
|
||||
finally:
|
||||
session.remove_torrent(torrent)
|
||||
|
||||
logging.debug("Download of '%s' finished" % torrent_path)
|
||||
|
||||
|
||||
def _should_seed(seed_path, torrent_seed_duration, torrent_seed_chance,
|
||||
torrent_max_seeder_processes_per_host):
|
||||
if not torrent_seed_duration:
|
||||
logging.debug("Seeding disabled, skipping...")
|
||||
return False
|
||||
|
||||
if os.path.exists(seed_path):
|
||||
logging.debug("Seed is already present, skipping....")
|
||||
return False
|
||||
|
||||
rand = random.random()
|
||||
if rand > torrent_seed_chance:
|
||||
logging.debug("%.2f > %.2f, seeding randomly skipping..." % (
|
||||
rand, torrent_seed_chance))
|
||||
return False
|
||||
|
||||
num_active_seeders = len(list(_active_seeder_processes()))
|
||||
if (torrent_max_seeder_processes_per_host >= 0 and
|
||||
num_active_seeders >= torrent_max_seeder_processes_per_host):
|
||||
logging.debug("max number of seeder processes for this host reached"
|
||||
" (%d), skipping..." %
|
||||
torrent_max_seeder_processes_per_host)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def _seed(torrent_path, seed_cache_path, torrent_seed_duration,
|
||||
torrent_listen_port_start, torrent_listen_port_end):
|
||||
plugin_path = os.path.dirname(inspect.getabsfile(inspect.currentframe()))
|
||||
seeder_path = os.path.join(plugin_path, SEEDER_PROCESS)
|
||||
seed_cmd = map(str, [seeder_path, torrent_path, seed_cache_path,
|
||||
torrent_seed_duration, torrent_listen_port_start,
|
||||
torrent_listen_port_end])
|
||||
utils.run_command(seed_cmd)
|
||||
|
||||
|
||||
def _seed_if_needed(seed_cache_path, tarball_path, torrent_path,
|
||||
torrent_seed_duration, torrent_seed_chance,
|
||||
torrent_listen_port_start, torrent_listen_port_end,
|
||||
torrent_max_seeder_processes_per_host):
|
||||
seed_filename = os.path.basename(tarball_path)
|
||||
seed_path = os.path.join(seed_cache_path, seed_filename)
|
||||
|
||||
if _should_seed(seed_path, torrent_seed_duration, torrent_seed_chance,
|
||||
torrent_max_seeder_processes_per_host):
|
||||
logging.debug("Preparing to seed '%s' for %d secs" % (
|
||||
seed_path, torrent_seed_duration))
|
||||
utils._rename(tarball_path, seed_path)
|
||||
|
||||
# Daemonize and seed the image
|
||||
_seed(torrent_path, seed_cache_path, torrent_seed_duration,
|
||||
torrent_listen_port_start, torrent_listen_port_end)
|
||||
else:
|
||||
utils.delete_if_exists(tarball_path)
|
||||
|
||||
|
||||
def _extract_tarball(tarball_path, staging_path):
|
||||
"""Extract the tarball into the staging directory."""
|
||||
tarball_fileobj = open(tarball_path, 'rb')
|
||||
try:
|
||||
utils.extract_tarball(tarball_fileobj, staging_path)
|
||||
finally:
|
||||
tarball_fileobj.close()
|
||||
|
||||
|
||||
def _active_seeder_processes():
|
||||
"""Yields command-line of active seeder processes.
|
||||
|
||||
Roughly equivalent to performing ps | grep _bittorrent_seeder
|
||||
"""
|
||||
pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
|
||||
for pid in pids:
|
||||
try:
|
||||
cmdline = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read()
|
||||
except IOError, e: # noqa
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
if SEEDER_PROCESS in cmdline:
|
||||
yield cmdline
|
||||
|
||||
|
||||
def _reap_finished_seeds(seed_cache_path):
|
||||
"""Delete any cached seeds where the seeder process has died."""
|
||||
logging.debug("Preparing to reap finished seeds")
|
||||
missing = {}
|
||||
for fname in os.listdir(seed_cache_path):
|
||||
seed_path = os.path.join(seed_cache_path, fname)
|
||||
missing[seed_path] = None
|
||||
|
||||
for cmdline in _active_seeder_processes():
|
||||
for seed_path in missing.keys():
|
||||
seed_filename = os.path.basename(seed_path)
|
||||
if seed_filename in cmdline:
|
||||
del missing[seed_path]
|
||||
|
||||
for seed_path in missing:
|
||||
logging.debug("Reaping cached seed '%s'" % seed_path)
|
||||
utils.delete_if_exists(seed_path)
|
||||
|
||||
|
||||
def _make_seed_cache():
|
||||
seed_cache_path = os.environ.get('SEED_CACHE', DEFAULT_SEED_CACHE)
|
||||
if not os.path.exists(seed_cache_path):
|
||||
os.mkdir(seed_cache_path)
|
||||
return seed_cache_path
|
||||
|
||||
|
||||
def download_vhd(session, image_id, torrent_url, torrent_seed_duration,
|
||||
torrent_seed_chance, torrent_max_last_accessed,
|
||||
torrent_listen_port_start, torrent_listen_port_end,
|
||||
torrent_download_stall_cutoff, uuid_stack, sr_path,
|
||||
torrent_max_seeder_processes_per_host):
|
||||
"""Download an image from BitTorrent, unbundle it, and then deposit the
|
||||
VHDs into the storage repository
|
||||
"""
|
||||
seed_cache_path = _make_seed_cache()
|
||||
torrent_cache_path = _make_torrent_cache()
|
||||
|
||||
# Housekeeping
|
||||
_reap_finished_seeds(seed_cache_path)
|
||||
_reap_old_torrent_files(torrent_cache_path, torrent_max_last_accessed)
|
||||
|
||||
torrent_path = _fetch_torrent_file(
|
||||
torrent_cache_path, image_id, torrent_url)
|
||||
|
||||
staging_path = utils.make_staging_area(sr_path)
|
||||
try:
|
||||
tarball_filename = os.path.basename(torrent_path).replace(
|
||||
'.torrent', '')
|
||||
tarball_path = os.path.join(staging_path, tarball_filename)
|
||||
|
||||
# Download tarball into staging area
|
||||
_download(torrent_path, staging_path, torrent_listen_port_start,
|
||||
torrent_listen_port_end, torrent_download_stall_cutoff)
|
||||
|
||||
# Extract the tarball into the staging area
|
||||
_extract_tarball(tarball_path, staging_path)
|
||||
|
||||
# Move the VHDs from the staging area into the storage repository
|
||||
vdi_list = utils.import_vhds(sr_path, staging_path, uuid_stack)
|
||||
|
||||
# Seed image for others in the swarm
|
||||
_seed_if_needed(seed_cache_path, tarball_path, torrent_path,
|
||||
torrent_seed_duration, torrent_seed_chance,
|
||||
torrent_listen_port_start, torrent_listen_port_end,
|
||||
torrent_max_seeder_processes_per_host)
|
||||
finally:
|
||||
utils.cleanup_staging_area(staging_path)
|
||||
|
||||
return vdi_list
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.register_plugin_calls(download_vhd)
|
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/bittorrent
Symbolic link
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/bittorrent
Symbolic link
@ -0,0 +1 @@
|
||||
bittorrent.py
|
327
plugins/xenserver/xenapi/etc/xapi.d/plugins/bittorrent.py
Executable file
327
plugins/xenserver/xenapi/etc/xapi.d/plugins/bittorrent.py
Executable file
@ -0,0 +1,327 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2012 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
# TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true
|
||||
|
||||
"""Download images via BitTorrent."""
|
||||
|
||||
import errno
|
||||
import inspect
|
||||
import os
|
||||
import random
|
||||
import shutil
|
||||
import tempfile
|
||||
import time
|
||||
|
||||
import libtorrent
|
||||
import urllib2
|
||||
|
||||
import utils
|
||||
|
||||
import pluginlib_nova
|
||||
|
||||
|
||||
pluginlib_nova.configure_logging('bittorrent')
|
||||
logging = pluginlib_nova.logging
|
||||
|
||||
# Taken from units since we don't pull down full library
|
||||
Mi = 1024 ** 2
|
||||
DEFAULT_TORRENT_CACHE = '/images/torrents'
|
||||
DEFAULT_SEED_CACHE = '/images/seeds'
|
||||
SEEDER_PROCESS = '_bittorrent_seeder.py'
|
||||
DEFAULT_MMA = int(libtorrent.bandwidth_mixed_algo_t.prefer_tcp)
|
||||
DEFAULT_MORQ = 400
|
||||
DEFAULT_MQDB = 8 * Mi
|
||||
DEFAULT_MQDBLW = 0
|
||||
|
||||
|
||||
def _make_torrent_cache():
|
||||
torrent_cache_path = os.environ.get(
|
||||
'TORRENT_CACHE', DEFAULT_TORRENT_CACHE)
|
||||
|
||||
if not os.path.exists(torrent_cache_path):
|
||||
os.mkdir(torrent_cache_path)
|
||||
|
||||
return torrent_cache_path
|
||||
|
||||
|
||||
def _fetch_torrent_file(torrent_cache_path, image_id, torrent_url):
|
||||
torrent_path = os.path.join(
|
||||
torrent_cache_path, image_id + '.torrent')
|
||||
|
||||
if not os.path.exists(torrent_path):
|
||||
logging.info("Downloading %s" % torrent_url)
|
||||
|
||||
# Write contents to temporary path to ensure we don't have partially
|
||||
# completed files in the cache.
|
||||
temp_directory = tempfile.mkdtemp(dir=torrent_cache_path)
|
||||
try:
|
||||
temp_path = os.path.join(
|
||||
temp_directory, os.path.basename(torrent_path))
|
||||
temp_file = open(temp_path, 'wb')
|
||||
try:
|
||||
remote_torrent_file = urllib2.urlopen(torrent_url)
|
||||
shutil.copyfileobj(remote_torrent_file, temp_file)
|
||||
finally:
|
||||
temp_file.close()
|
||||
|
||||
os.rename(temp_path, torrent_path)
|
||||
finally:
|
||||
shutil.rmtree(temp_directory)
|
||||
|
||||
return torrent_path
|
||||
|
||||
|
||||
def _reap_old_torrent_files(torrent_cache_path, torrent_max_last_accessed):
|
||||
"""Delete any torrent files that haven't been accessed recently."""
|
||||
if not torrent_max_last_accessed:
|
||||
logging.debug("Reaping old torrent files disabled, skipping...")
|
||||
return
|
||||
|
||||
logging.debug("Preparing to reap old torrent files,"
|
||||
" torrent_max_last_accessed=%d" % torrent_max_last_accessed)
|
||||
|
||||
for fname in os.listdir(torrent_cache_path):
|
||||
torrent_path = os.path.join(torrent_cache_path, fname)
|
||||
last_accessed = time.time() - os.path.getatime(torrent_path)
|
||||
if last_accessed > torrent_max_last_accessed:
|
||||
logging.debug("Reaping '%s', last_accessed=%d" % (
|
||||
torrent_path, last_accessed))
|
||||
utils.delete_if_exists(torrent_path)
|
||||
|
||||
|
||||
def _download(torrent_path, save_as_path, torrent_listen_port_start,
|
||||
torrent_listen_port_end, torrent_download_stall_cutoff):
|
||||
session = libtorrent.session()
|
||||
session.listen_on(torrent_listen_port_start, torrent_listen_port_end)
|
||||
|
||||
mixed_mode_algorithm = os.environ.get(
|
||||
'DEFAULT_MIXED_MODE_ALGORITHM', DEFAULT_MMA)
|
||||
max_out_request_queue = os.environ.get(
|
||||
'DEFAULT_MAX_OUT_REQUEST_QUEUE', DEFAULT_MORQ)
|
||||
max_queued_disk_bytes = os.environ.get(
|
||||
'DEFAULT_MAX_QUEUED_DISK_BYTES', DEFAULT_MQDB)
|
||||
max_queued_disk_bytes_low_watermark = os.environ.get(
|
||||
'DEFAULT_MAX_QUEUED_DISK_BYTES_LOW_WATERMARK', DEFAULT_MQDBLW)
|
||||
|
||||
session_opts = {'mixed_mode_algorithm': mixed_mode_algorithm,
|
||||
'max_queued_disk_bytes': max_queued_disk_bytes,
|
||||
'max_out_request_queue': max_out_request_queue,
|
||||
'max_queued_disk_bytes_low_watermark':
|
||||
max_queued_disk_bytes_low_watermark}
|
||||
session.set_settings(session_opts)
|
||||
info = libtorrent.torrent_info(
|
||||
libtorrent.bdecode(open(torrent_path, 'rb').read()))
|
||||
|
||||
torrent = session.add_torrent(
|
||||
info, save_as_path,
|
||||
storage_mode=libtorrent.storage_mode_t.storage_mode_sparse)
|
||||
|
||||
try:
|
||||
last_progress = 0
|
||||
last_progress_updated = time.time()
|
||||
|
||||
log_time = 0
|
||||
while not torrent.is_seed():
|
||||
s = torrent.status()
|
||||
|
||||
progress = s.progress * 100
|
||||
|
||||
if progress != last_progress:
|
||||
last_progress = progress
|
||||
last_progress_updated = time.time()
|
||||
|
||||
stall_duration = time.time() - last_progress_updated
|
||||
if stall_duration > torrent_download_stall_cutoff:
|
||||
logging.error(
|
||||
"Download stalled: stall_duration=%d,"
|
||||
" torrent_download_stall_cutoff=%d" % (
|
||||
stall_duration, torrent_download_stall_cutoff))
|
||||
raise Exception("Bittorrent download stall detected, bailing!")
|
||||
|
||||
log_time += 1
|
||||
if log_time % 10 == 0:
|
||||
logging.debug(
|
||||
'%.2f%% complete (down: %.1f kb/s up: %.1f kB/s peers: %d)'
|
||||
' %s %s' % (progress, s.download_rate / 1000,
|
||||
s.upload_rate / 1000, s.num_peers, s.state,
|
||||
torrent_path))
|
||||
time.sleep(1)
|
||||
finally:
|
||||
session.remove_torrent(torrent)
|
||||
|
||||
logging.debug("Download of '%s' finished" % torrent_path)
|
||||
|
||||
|
||||
def _should_seed(seed_path, torrent_seed_duration, torrent_seed_chance,
|
||||
torrent_max_seeder_processes_per_host):
|
||||
if not torrent_seed_duration:
|
||||
logging.debug("Seeding disabled, skipping...")
|
||||
return False
|
||||
|
||||
if os.path.exists(seed_path):
|
||||
logging.debug("Seed is already present, skipping....")
|
||||
return False
|
||||
|
||||
rand = random.random()
|
||||
if rand > torrent_seed_chance:
|
||||
logging.debug("%.2f > %.2f, seeding randomly skipping..." % (
|
||||
rand, torrent_seed_chance))
|
||||
return False
|
||||
|
||||
num_active_seeders = len(list(_active_seeder_processes()))
|
||||
if (torrent_max_seeder_processes_per_host >= 0 and
|
||||
num_active_seeders >= torrent_max_seeder_processes_per_host):
|
||||
logging.debug("max number of seeder processes for this host reached"
|
||||
" (%d), skipping..." %
|
||||
torrent_max_seeder_processes_per_host)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def _seed(torrent_path, seed_cache_path, torrent_seed_duration,
|
||||
torrent_listen_port_start, torrent_listen_port_end):
|
||||
plugin_path = os.path.dirname(inspect.getabsfile(inspect.currentframe()))
|
||||
seeder_path = os.path.join(plugin_path, SEEDER_PROCESS)
|
||||
seed_cmd = map(str, [seeder_path, torrent_path, seed_cache_path,
|
||||
torrent_seed_duration, torrent_listen_port_start,
|
||||
torrent_listen_port_end])
|
||||
utils.run_command(seed_cmd)
|
||||
|
||||
|
||||
def _seed_if_needed(seed_cache_path, tarball_path, torrent_path,
|
||||
torrent_seed_duration, torrent_seed_chance,
|
||||
torrent_listen_port_start, torrent_listen_port_end,
|
||||
torrent_max_seeder_processes_per_host):
|
||||
seed_filename = os.path.basename(tarball_path)
|
||||
seed_path = os.path.join(seed_cache_path, seed_filename)
|
||||
|
||||
if _should_seed(seed_path, torrent_seed_duration, torrent_seed_chance,
|
||||
torrent_max_seeder_processes_per_host):
|
||||
logging.debug("Preparing to seed '%s' for %d secs" % (
|
||||
seed_path, torrent_seed_duration))
|
||||
utils._rename(tarball_path, seed_path)
|
||||
|
||||
# Daemonize and seed the image
|
||||
_seed(torrent_path, seed_cache_path, torrent_seed_duration,
|
||||
torrent_listen_port_start, torrent_listen_port_end)
|
||||
else:
|
||||
utils.delete_if_exists(tarball_path)
|
||||
|
||||
|
||||
def _extract_tarball(tarball_path, staging_path):
|
||||
"""Extract the tarball into the staging directory."""
|
||||
tarball_fileobj = open(tarball_path, 'rb')
|
||||
try:
|
||||
utils.extract_tarball(tarball_fileobj, staging_path)
|
||||
finally:
|
||||
tarball_fileobj.close()
|
||||
|
||||
|
||||
def _active_seeder_processes():
|
||||
"""Yields command-line of active seeder processes.
|
||||
|
||||
Roughly equivalent to performing ps | grep _bittorrent_seeder
|
||||
"""
|
||||
pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
|
||||
for pid in pids:
|
||||
try:
|
||||
cmdline = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read()
|
||||
except IOError, e: # noqa
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
if SEEDER_PROCESS in cmdline:
|
||||
yield cmdline
|
||||
|
||||
|
||||
def _reap_finished_seeds(seed_cache_path):
|
||||
"""Delete any cached seeds where the seeder process has died."""
|
||||
logging.debug("Preparing to reap finished seeds")
|
||||
missing = {}
|
||||
for fname in os.listdir(seed_cache_path):
|
||||
seed_path = os.path.join(seed_cache_path, fname)
|
||||
missing[seed_path] = None
|
||||
|
||||
for cmdline in _active_seeder_processes():
|
||||
for seed_path in missing.keys():
|
||||
seed_filename = os.path.basename(seed_path)
|
||||
if seed_filename in cmdline:
|
||||
del missing[seed_path]
|
||||
|
||||
for seed_path in missing:
|
||||
logging.debug("Reaping cached seed '%s'" % seed_path)
|
||||
utils.delete_if_exists(seed_path)
|
||||
|
||||
|
||||
def _make_seed_cache():
|
||||
seed_cache_path = os.environ.get('SEED_CACHE', DEFAULT_SEED_CACHE)
|
||||
if not os.path.exists(seed_cache_path):
|
||||
os.mkdir(seed_cache_path)
|
||||
return seed_cache_path
|
||||
|
||||
|
||||
def download_vhd(session, image_id, torrent_url, torrent_seed_duration,
|
||||
torrent_seed_chance, torrent_max_last_accessed,
|
||||
torrent_listen_port_start, torrent_listen_port_end,
|
||||
torrent_download_stall_cutoff, uuid_stack, sr_path,
|
||||
torrent_max_seeder_processes_per_host):
|
||||
"""Download an image from BitTorrent, unbundle it, and then deposit the
|
||||
VHDs into the storage repository
|
||||
"""
|
||||
seed_cache_path = _make_seed_cache()
|
||||
torrent_cache_path = _make_torrent_cache()
|
||||
|
||||
# Housekeeping
|
||||
_reap_finished_seeds(seed_cache_path)
|
||||
_reap_old_torrent_files(torrent_cache_path, torrent_max_last_accessed)
|
||||
|
||||
torrent_path = _fetch_torrent_file(
|
||||
torrent_cache_path, image_id, torrent_url)
|
||||
|
||||
staging_path = utils.make_staging_area(sr_path)
|
||||
try:
|
||||
tarball_filename = os.path.basename(torrent_path).replace(
|
||||
'.torrent', '')
|
||||
tarball_path = os.path.join(staging_path, tarball_filename)
|
||||
|
||||
# Download tarball into staging area
|
||||
_download(torrent_path, staging_path, torrent_listen_port_start,
|
||||
torrent_listen_port_end, torrent_download_stall_cutoff)
|
||||
|
||||
# Extract the tarball into the staging area
|
||||
_extract_tarball(tarball_path, staging_path)
|
||||
|
||||
# Move the VHDs from the staging area into the storage repository
|
||||
vdi_list = utils.import_vhds(sr_path, staging_path, uuid_stack)
|
||||
|
||||
# Seed image for others in the swarm
|
||||
_seed_if_needed(seed_cache_path, tarball_path, torrent_path,
|
||||
torrent_seed_duration, torrent_seed_chance,
|
||||
torrent_listen_port_start, torrent_listen_port_end,
|
||||
torrent_max_seeder_processes_per_host)
|
||||
finally:
|
||||
utils.cleanup_staging_area(staging_path)
|
||||
|
||||
return vdi_list
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.register_plugin_calls(download_vhd)
|
@ -1,34 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
import XenAPIPlugin
|
||||
|
||||
|
||||
def get_val(session, args):
|
||||
config_key = args['key']
|
||||
config_file = open('/etc/xapi.conf')
|
||||
try:
|
||||
for line in config_file:
|
||||
split = line.split('=')
|
||||
if (len(split) == 2) and (split[0].strip() == config_key):
|
||||
return split[1].strip()
|
||||
return ""
|
||||
finally:
|
||||
config_file.close()
|
||||
|
||||
if __name__ == '__main__':
|
||||
XenAPIPlugin.dispatch({"get_val": get_val})
|
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/config_file
Symbolic link
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/config_file
Symbolic link
@ -0,0 +1 @@
|
||||
config_file.py
|
34
plugins/xenserver/xenapi/etc/xapi.d/plugins/config_file.py
Executable file
34
plugins/xenserver/xenapi/etc/xapi.d/plugins/config_file.py
Executable file
@ -0,0 +1,34 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
import XenAPIPlugin
|
||||
|
||||
|
||||
def get_val(session, args):
|
||||
config_key = args['key']
|
||||
config_file = open('/etc/xapi.conf')
|
||||
try:
|
||||
for line in config_file:
|
||||
split = line.split('=')
|
||||
if (len(split) == 2) and (split[0].strip() == config_key):
|
||||
return split[1].strip()
|
||||
return ""
|
||||
finally:
|
||||
config_file.close()
|
||||
|
||||
if __name__ == '__main__':
|
||||
XenAPIPlugin.dispatch({"get_val": get_val})
|
@ -1,89 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
# TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true
|
||||
|
||||
"""
|
||||
To configure this plugin, you must set the following xenstore key:
|
||||
/local/logconsole/@ = "/var/log/xen/guest/console.%d"
|
||||
|
||||
This can be done by running:
|
||||
xenstore-write /local/logconsole/@ "/var/log/xen/guest/console.%d"
|
||||
|
||||
WARNING:
|
||||
You should ensure appropriate log rotation to ensure
|
||||
guests are not able to consume too much Dom0 disk space,
|
||||
and equally should not be able to stop other guests from logging.
|
||||
Adding and removing the following xenstore key will reopen the log,
|
||||
as will be required after a log rotate:
|
||||
/local/logconsole/<dom_id>
|
||||
"""
|
||||
|
||||
import base64
|
||||
import logging
|
||||
import zlib
|
||||
|
||||
import XenAPIPlugin
|
||||
|
||||
import pluginlib_nova
|
||||
pluginlib_nova.configure_logging("console")
|
||||
|
||||
CONSOLE_LOG_DIR = '/var/log/xen/guest'
|
||||
CONSOLE_LOG_FILE_PATTERN = CONSOLE_LOG_DIR + '/console.%d'
|
||||
|
||||
MAX_CONSOLE_BYTES = 102400
|
||||
SEEK_SET = 0
|
||||
SEEK_END = 2
|
||||
|
||||
|
||||
def _last_bytes(file_like_object):
|
||||
try:
|
||||
file_like_object.seek(-MAX_CONSOLE_BYTES, SEEK_END)
|
||||
except IOError, e: # noqa
|
||||
if e.errno == 22:
|
||||
file_like_object.seek(0, SEEK_SET)
|
||||
else:
|
||||
raise
|
||||
return file_like_object.read()
|
||||
|
||||
|
||||
def get_console_log(session, arg_dict):
|
||||
try:
|
||||
raw_dom_id = arg_dict['dom_id']
|
||||
except KeyError:
|
||||
raise pluginlib_nova.PluginError("Missing dom_id")
|
||||
try:
|
||||
dom_id = int(raw_dom_id)
|
||||
except ValueError:
|
||||
raise pluginlib_nova.PluginError("Invalid dom_id")
|
||||
|
||||
logfile = open(CONSOLE_LOG_FILE_PATTERN % dom_id, 'rb')
|
||||
try:
|
||||
try:
|
||||
log_content = _last_bytes(logfile)
|
||||
except IOError, e: # noqa
|
||||
msg = "Error reading console: %s" % e
|
||||
logging.debug(msg)
|
||||
raise pluginlib_nova.PluginError(msg)
|
||||
finally:
|
||||
logfile.close()
|
||||
|
||||
return base64.b64encode(zlib.compress(log_content))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
XenAPIPlugin.dispatch({"get_console_log": get_console_log})
|
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/console
Symbolic link
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/console
Symbolic link
@ -0,0 +1 @@
|
||||
console.py
|
89
plugins/xenserver/xenapi/etc/xapi.d/plugins/console.py
Executable file
89
plugins/xenserver/xenapi/etc/xapi.d/plugins/console.py
Executable file
@ -0,0 +1,89 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
# TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true
|
||||
|
||||
"""
|
||||
To configure this plugin, you must set the following xenstore key:
|
||||
/local/logconsole/@ = "/var/log/xen/guest/console.%d"
|
||||
|
||||
This can be done by running:
|
||||
xenstore-write /local/logconsole/@ "/var/log/xen/guest/console.%d"
|
||||
|
||||
WARNING:
|
||||
You should ensure appropriate log rotation to ensure
|
||||
guests are not able to consume too much Dom0 disk space,
|
||||
and equally should not be able to stop other guests from logging.
|
||||
Adding and removing the following xenstore key will reopen the log,
|
||||
as will be required after a log rotate:
|
||||
/local/logconsole/<dom_id>
|
||||
"""
|
||||
|
||||
import base64
|
||||
import logging
|
||||
import zlib
|
||||
|
||||
import XenAPIPlugin
|
||||
|
||||
import pluginlib_nova
|
||||
pluginlib_nova.configure_logging("console")
|
||||
|
||||
CONSOLE_LOG_DIR = '/var/log/xen/guest'
|
||||
CONSOLE_LOG_FILE_PATTERN = CONSOLE_LOG_DIR + '/console.%d'
|
||||
|
||||
MAX_CONSOLE_BYTES = 102400
|
||||
SEEK_SET = 0
|
||||
SEEK_END = 2
|
||||
|
||||
|
||||
def _last_bytes(file_like_object):
|
||||
try:
|
||||
file_like_object.seek(-MAX_CONSOLE_BYTES, SEEK_END)
|
||||
except IOError, e: # noqa
|
||||
if e.errno == 22:
|
||||
file_like_object.seek(0, SEEK_SET)
|
||||
else:
|
||||
raise
|
||||
return file_like_object.read()
|
||||
|
||||
|
||||
def get_console_log(session, arg_dict):
|
||||
try:
|
||||
raw_dom_id = arg_dict['dom_id']
|
||||
except KeyError:
|
||||
raise pluginlib_nova.PluginError("Missing dom_id")
|
||||
try:
|
||||
dom_id = int(raw_dom_id)
|
||||
except ValueError:
|
||||
raise pluginlib_nova.PluginError("Invalid dom_id")
|
||||
|
||||
logfile = open(CONSOLE_LOG_FILE_PATTERN % dom_id, 'rb')
|
||||
try:
|
||||
try:
|
||||
log_content = _last_bytes(logfile)
|
||||
except IOError, e: # noqa
|
||||
msg = "Error reading console: %s" % e
|
||||
logging.debug(msg)
|
||||
raise pluginlib_nova.PluginError(msg)
|
||||
finally:
|
||||
logfile.close()
|
||||
|
||||
return base64.b64encode(zlib.compress(log_content))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
XenAPIPlugin.dispatch({"get_console_log": get_console_log})
|
@ -1,632 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2012 OpenStack Foundation
|
||||
# Copyright (c) 2010 Citrix Systems, Inc.
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
# TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true
|
||||
|
||||
"""Handle the uploading and downloading of images via Glance."""
|
||||
|
||||
try:
|
||||
import httplib
|
||||
except ImportError:
|
||||
from six.moves import http_client as httplib
|
||||
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
import simplejson as json
|
||||
|
||||
import md5 # noqa
|
||||
import socket
|
||||
import urllib2
|
||||
from urlparse import urlparse
|
||||
|
||||
import pluginlib_nova
|
||||
import utils
|
||||
|
||||
|
||||
pluginlib_nova.configure_logging('glance')
|
||||
logging = pluginlib_nova.logging
|
||||
PluginError = pluginlib_nova.PluginError
|
||||
|
||||
SOCKET_TIMEOUT_SECONDS = 90
|
||||
|
||||
|
||||
class RetryableError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def _create_connection(scheme, netloc):
|
||||
if scheme == 'https':
|
||||
conn = httplib.HTTPSConnection(netloc)
|
||||
else:
|
||||
conn = httplib.HTTPConnection(netloc)
|
||||
conn.connect()
|
||||
return conn
|
||||
|
||||
|
||||
def _download_tarball_and_verify(request, staging_path):
|
||||
# NOTE(johngarbutt) By default, there is no timeout.
|
||||
# To ensure the script does not hang if we lose connection
|
||||
# to glance, we add this socket timeout.
|
||||
# This is here so there is no chance the timeout out has
|
||||
# been adjusted by other library calls.
|
||||
socket.setdefaulttimeout(SOCKET_TIMEOUT_SECONDS)
|
||||
|
||||
try:
|
||||
response = urllib2.urlopen(request)
|
||||
except urllib2.HTTPError, error: # noqa
|
||||
raise RetryableError(error)
|
||||
except urllib2.URLError, error: # noqa
|
||||
raise RetryableError(error)
|
||||
except httplib.HTTPException, error: # noqa
|
||||
# httplib.HTTPException and derivatives (BadStatusLine in particular)
|
||||
# don't have a useful __repr__ or __str__
|
||||
raise RetryableError('%s: %s' % (error.__class__.__name__, error))
|
||||
|
||||
url = request.get_full_url()
|
||||
logging.info("Reading image data from %s" % url)
|
||||
|
||||
callback_data = {'bytes_read': 0}
|
||||
checksum = md5.new()
|
||||
|
||||
def update_md5(chunk):
|
||||
callback_data['bytes_read'] += len(chunk)
|
||||
checksum.update(chunk)
|
||||
|
||||
try:
|
||||
try:
|
||||
utils.extract_tarball(response, staging_path, callback=update_md5)
|
||||
except Exception, error: # noqa
|
||||
raise RetryableError(error)
|
||||
finally:
|
||||
bytes_read = callback_data['bytes_read']
|
||||
logging.info("Read %d bytes from %s", bytes_read, url)
|
||||
|
||||
# Use ETag if available, otherwise content-md5(v2) or
|
||||
# X-Image-Meta-Checksum(v1)
|
||||
etag = response.info().getheader('etag', None)
|
||||
if etag is None:
|
||||
etag = response.info().getheader('content-md5', None)
|
||||
if etag is None:
|
||||
etag = response.info().getheader('x-image-meta-checksum', None)
|
||||
|
||||
# Verify checksum using ETag
|
||||
checksum = checksum.hexdigest()
|
||||
|
||||
if etag is None:
|
||||
msg = "No ETag found for comparison to checksum %(checksum)s"
|
||||
logging.info(msg % {'checksum': checksum})
|
||||
elif checksum != etag:
|
||||
msg = 'ETag %(etag)s does not match computed md5sum %(checksum)s'
|
||||
raise RetryableError(msg % {'checksum': checksum, 'etag': etag})
|
||||
else:
|
||||
msg = "Verified image checksum %(checksum)s"
|
||||
logging.info(msg % {'checksum': checksum})
|
||||
|
||||
|
||||
def _download_tarball_v1(sr_path, staging_path, image_id, glance_host,
|
||||
glance_port, glance_use_ssl, extra_headers):
|
||||
"""Download the tarball image from Glance v1 and extract it into the
|
||||
staging area. Retry if there is any failure.
|
||||
"""
|
||||
if glance_use_ssl:
|
||||
scheme = 'https'
|
||||
else:
|
||||
scheme = 'http'
|
||||
|
||||
endpoint = "%(scheme)s://%(glance_host)s:%(glance_port)d" % {
|
||||
'scheme': scheme, 'glance_host': glance_host,
|
||||
'glance_port': glance_port}
|
||||
_download_tarball_by_url_v1(sr_path, staging_path, image_id,
|
||||
endpoint, extra_headers)
|
||||
|
||||
|
||||
def _download_tarball_by_url_v1(
|
||||
sr_path, staging_path, image_id, glance_endpoint, extra_headers):
|
||||
"""Download the tarball image from Glance v1 and extract it into the
|
||||
staging area. Retry if there is any failure.
|
||||
"""
|
||||
|
||||
url = "%(glance_endpoint)s/v1/images/%(image_id)s" % {
|
||||
'glance_endpoint': glance_endpoint,
|
||||
'image_id': image_id}
|
||||
logging.info("Downloading %s with glance v1 api" % url)
|
||||
|
||||
request = urllib2.Request(url, headers=extra_headers)
|
||||
try:
|
||||
_download_tarball_and_verify(request, staging_path)
|
||||
except Exception:
|
||||
logging.exception('Failed to retrieve %(url)s' % {'url': url})
|
||||
raise
|
||||
|
||||
|
||||
def _download_tarball_by_url_v2(
|
||||
sr_path, staging_path, image_id, glance_endpoint, extra_headers):
|
||||
"""Download the tarball image from Glance v2 and extract it into the
|
||||
staging area. Retry if there is any failure.
|
||||
"""
|
||||
|
||||
url = "%(glance_endpoint)s/v2/images/%(image_id)s/file" % {
|
||||
'glance_endpoint': glance_endpoint,
|
||||
'image_id': image_id}
|
||||
logging.debug("Downloading %s with glance v2 api" % url)
|
||||
|
||||
request = urllib2.Request(url, headers=extra_headers)
|
||||
try:
|
||||
_download_tarball_and_verify(request, staging_path)
|
||||
except Exception:
|
||||
logging.exception('Failed to retrieve %(url)s' % {'url': url})
|
||||
raise
|
||||
|
||||
|
||||
def _upload_tarball_v1(staging_path, image_id, glance_host, glance_port,
|
||||
glance_use_ssl, extra_headers, properties):
|
||||
if glance_use_ssl:
|
||||
scheme = 'https'
|
||||
else:
|
||||
scheme = 'http'
|
||||
|
||||
url = '%s://%s:%s' % (scheme, glance_host, glance_port)
|
||||
_upload_tarball_by_url_v1(staging_path, image_id, url,
|
||||
extra_headers, properties)
|
||||
|
||||
|
||||
def _upload_tarball_by_url_v1(staging_path, image_id, glance_endpoint,
|
||||
extra_headers, properties):
|
||||
"""Create a tarball of the image and then stream that into Glance v1
|
||||
using chunked-transfer-encoded HTTP.
|
||||
"""
|
||||
# NOTE(johngarbutt) By default, there is no timeout.
|
||||
# To ensure the script does not hang if we lose connection
|
||||
# to glance, we add this socket timeout.
|
||||
# This is here so there is no chance the timeout out has
|
||||
# been adjusted by other library calls.
|
||||
socket.setdefaulttimeout(SOCKET_TIMEOUT_SECONDS)
|
||||
logging.debug("Uploading image %s with glance v1 api"
|
||||
% image_id)
|
||||
|
||||
url = "%(glance_endpoint)s/v1/images/%(image_id)s" % {
|
||||
'glance_endpoint': glance_endpoint,
|
||||
'image_id': image_id}
|
||||
logging.info("Writing image data to %s" % url)
|
||||
|
||||
# NOTE(sdague): this is python 2.4, which means urlparse returns a
|
||||
# tuple, not a named tuple.
|
||||
# 0 - scheme
|
||||
# 1 - host:port (aka netloc)
|
||||
# 2 - path
|
||||
parts = urlparse(url)
|
||||
|
||||
try:
|
||||
conn = _create_connection(parts[0], parts[1])
|
||||
except Exception, error: # noqa
|
||||
logging.exception('Failed to connect %(url)s' % {'url': url})
|
||||
raise RetryableError(error)
|
||||
|
||||
try:
|
||||
validate_image_status_before_upload_v1(conn, url, extra_headers)
|
||||
|
||||
try:
|
||||
# NOTE(sirp): httplib under python2.4 won't accept
|
||||
# a file-like object to request
|
||||
conn.putrequest('PUT', parts[2])
|
||||
|
||||
# NOTE(sirp): There is some confusion around OVF. Here's a summary
|
||||
# of where we currently stand:
|
||||
# 1. OVF as a container format is misnamed. We really should be
|
||||
# using OVA since that is the name for the container format;
|
||||
# OVF is the standard applied to the manifest file contained
|
||||
# within.
|
||||
# 2. We're currently uploading a vanilla tarball. In order to be
|
||||
# OVF/OVA compliant, we'll need to embed a minimal OVF
|
||||
# manifest as the first file.
|
||||
|
||||
# NOTE(dprince): In order to preserve existing Glance properties
|
||||
# we set X-Glance-Registry-Purge-Props on this request.
|
||||
headers = {
|
||||
'content-type': 'application/octet-stream',
|
||||
'transfer-encoding': 'chunked',
|
||||
'x-image-meta-is-public': 'False',
|
||||
'x-image-meta-status': 'queued',
|
||||
'x-image-meta-disk-format': 'vhd',
|
||||
'x-image-meta-container-format': 'ovf',
|
||||
'x-glance-registry-purge-props': 'False'}
|
||||
|
||||
headers.update(**extra_headers)
|
||||
|
||||
for key, value in properties.items():
|
||||
header_key = "x-image-meta-property-%s" % key.replace('_', '-')
|
||||
headers[header_key] = str(value)
|
||||
|
||||
for header, value in headers.items():
|
||||
conn.putheader(header, value)
|
||||
conn.endheaders()
|
||||
except Exception, error: # noqa
|
||||
logging.exception('Failed to upload %(url)s' % {'url': url})
|
||||
raise RetryableError(error)
|
||||
|
||||
callback_data = {'bytes_written': 0}
|
||||
|
||||
def send_chunked_transfer_encoded(chunk):
|
||||
chunk_len = len(chunk)
|
||||
callback_data['bytes_written'] += chunk_len
|
||||
try:
|
||||
conn.send("%x\r\n%s\r\n" % (chunk_len, chunk))
|
||||
except Exception, error: # noqa
|
||||
logging.exception('Failed to upload when sending chunks')
|
||||
raise RetryableError(error)
|
||||
|
||||
compression_level = properties.get('xenapi_image_compression_level')
|
||||
|
||||
utils.create_tarball(
|
||||
None, staging_path, callback=send_chunked_transfer_encoded,
|
||||
compression_level=compression_level)
|
||||
|
||||
send_chunked_transfer_encoded('') # Chunked-Transfer terminator
|
||||
|
||||
bytes_written = callback_data['bytes_written']
|
||||
logging.info("Wrote %d bytes to %s" % (bytes_written, url))
|
||||
|
||||
resp = conn.getresponse()
|
||||
if resp.status == httplib.OK:
|
||||
return
|
||||
|
||||
logging.error("Unexpected response while writing image data to %s: "
|
||||
"Response Status: %i, Response body: %s"
|
||||
% (url, resp.status, resp.read()))
|
||||
|
||||
check_resp_status_and_retry(resp, image_id, url)
|
||||
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
|
||||
def _update_image_meta_v2(conn, image_id, extra_headers, properties):
|
||||
# NOTE(sirp): There is some confusion around OVF. Here's a summary
|
||||
# of where we currently stand:
|
||||
# 1. OVF as a container format is misnamed. We really should be
|
||||
# using OVA since that is the name for the container format;
|
||||
# OVF is the standard applied to the manifest file contained
|
||||
# within.
|
||||
# 2. We're currently uploading a vanilla tarball. In order to be
|
||||
# OVF/OVA compliant, we'll need to embed a minimal OVF
|
||||
# manifest as the first file.
|
||||
body = [
|
||||
{"path": "/container_format", "value": "ovf", "op": "add"},
|
||||
{"path": "/disk_format", "value": "vhd", "op": "add"},
|
||||
{"path": "/visibility", "value": "private", "op": "add"}]
|
||||
|
||||
headers = {'Content-Type': 'application/openstack-images-v2.1-json-patch'}
|
||||
headers.update(**extra_headers)
|
||||
|
||||
for key, value in properties.items():
|
||||
prop = {"path": "/%s" % key.replace('_', '-'),
|
||||
"value": key,
|
||||
"op": "add"}
|
||||
body.append(prop)
|
||||
body = json.dumps(body)
|
||||
conn.request('PATCH', '/v2/images/%s' % image_id,
|
||||
body=body, headers=headers)
|
||||
resp = conn.getresponse()
|
||||
resp.read()
|
||||
|
||||
if resp.status == httplib.OK:
|
||||
return
|
||||
logging.error("Image meta was not updated. Status: %s, Reason: %s" % (
|
||||
resp.status, resp.reason))
|
||||
|
||||
|
||||
def _upload_tarball_by_url_v2(staging_path, image_id, glance_endpoint,
|
||||
extra_headers, properties):
|
||||
"""Create a tarball of the image and then stream that into Glance v2
|
||||
using chunked-transfer-encoded HTTP.
|
||||
"""
|
||||
# NOTE(johngarbutt) By default, there is no timeout.
|
||||
# To ensure the script does not hang if we lose connection
|
||||
# to glance, we add this socket timeout.
|
||||
# This is here so there is no chance the timeout out has
|
||||
# been adjusted by other library calls.
|
||||
socket.setdefaulttimeout(SOCKET_TIMEOUT_SECONDS)
|
||||
logging.debug("Uploading imaged %s with glance v2 api"
|
||||
% image_id)
|
||||
|
||||
url = "%(glance_endpoint)s/v2/images/%(image_id)s/file" % {
|
||||
'glance_endpoint': glance_endpoint,
|
||||
'image_id': image_id}
|
||||
|
||||
# NOTE(sdague): this is python 2.4, which means urlparse returns a
|
||||
# tuple, not a named tuple.
|
||||
# 0 - scheme
|
||||
# 1 - host:port (aka netloc)
|
||||
# 2 - path
|
||||
parts = urlparse(url)
|
||||
|
||||
try:
|
||||
conn = _create_connection(parts[0], parts[1])
|
||||
except Exception, error: # noqa
|
||||
raise RetryableError(error)
|
||||
|
||||
try:
|
||||
_update_image_meta_v2(conn, image_id, extra_headers, properties)
|
||||
|
||||
validate_image_status_before_upload_v2(conn, url, extra_headers)
|
||||
|
||||
try:
|
||||
conn.connect()
|
||||
# NOTE(sirp): httplib under python2.4 won't accept
|
||||
# a file-like object to request
|
||||
conn.putrequest('PUT', parts[2])
|
||||
|
||||
headers = {
|
||||
'content-type': 'application/octet-stream',
|
||||
'transfer-encoding': 'chunked'}
|
||||
|
||||
headers.update(**extra_headers)
|
||||
|
||||
for header, value in headers.items():
|
||||
conn.putheader(header, value)
|
||||
conn.endheaders()
|
||||
except Exception, error: # noqa
|
||||
logging.exception('Failed to upload %(url)s' % {'url': url})
|
||||
raise RetryableError(error)
|
||||
|
||||
callback_data = {'bytes_written': 0}
|
||||
|
||||
def send_chunked_transfer_encoded(chunk):
|
||||
chunk_len = len(chunk)
|
||||
callback_data['bytes_written'] += chunk_len
|
||||
try:
|
||||
conn.send("%x\r\n%s\r\n" % (chunk_len, chunk))
|
||||
except Exception, error: # noqa
|
||||
logging.exception('Failed to upload when sending chunks')
|
||||
raise RetryableError(error)
|
||||
|
||||
compression_level = properties.get('xenapi_image_compression_level')
|
||||
|
||||
utils.create_tarball(
|
||||
None, staging_path, callback=send_chunked_transfer_encoded,
|
||||
compression_level=compression_level)
|
||||
|
||||
send_chunked_transfer_encoded('') # Chunked-Transfer terminator
|
||||
|
||||
bytes_written = callback_data['bytes_written']
|
||||
logging.info("Wrote %d bytes to %s" % (bytes_written, url))
|
||||
|
||||
resp = conn.getresponse()
|
||||
if resp.status == httplib.NO_CONTENT:
|
||||
return
|
||||
|
||||
logging.error("Unexpected response while writing image data to %s: "
|
||||
"Response Status: %i, Response body: %s"
|
||||
% (url, resp.status, resp.read()))
|
||||
|
||||
check_resp_status_and_retry(resp, image_id, url)
|
||||
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
|
||||
def check_resp_status_and_retry(resp, image_id, url):
|
||||
# Note(Jesse): This branch sorts errors into those that are permanent,
|
||||
# those that are ephemeral, and those that are unexpected.
|
||||
if resp.status in (httplib.BAD_REQUEST, # 400
|
||||
httplib.UNAUTHORIZED, # 401
|
||||
httplib.PAYMENT_REQUIRED, # 402
|
||||
httplib.FORBIDDEN, # 403
|
||||
httplib.NOT_FOUND, # 404
|
||||
httplib.METHOD_NOT_ALLOWED, # 405
|
||||
httplib.NOT_ACCEPTABLE, # 406
|
||||
httplib.PROXY_AUTHENTICATION_REQUIRED, # 407
|
||||
httplib.CONFLICT, # 409
|
||||
httplib.GONE, # 410
|
||||
httplib.LENGTH_REQUIRED, # 411
|
||||
httplib.PRECONDITION_FAILED, # 412
|
||||
httplib.REQUEST_ENTITY_TOO_LARGE, # 413
|
||||
httplib.REQUEST_URI_TOO_LONG, # 414
|
||||
httplib.UNSUPPORTED_MEDIA_TYPE, # 415
|
||||
httplib.REQUESTED_RANGE_NOT_SATISFIABLE, # 416
|
||||
httplib.EXPECTATION_FAILED, # 417
|
||||
httplib.UNPROCESSABLE_ENTITY, # 422
|
||||
httplib.LOCKED, # 423
|
||||
httplib.FAILED_DEPENDENCY, # 424
|
||||
httplib.UPGRADE_REQUIRED, # 426
|
||||
httplib.NOT_IMPLEMENTED, # 501
|
||||
httplib.HTTP_VERSION_NOT_SUPPORTED, # 505
|
||||
httplib.NOT_EXTENDED, # 510
|
||||
):
|
||||
raise PluginError("Got Permanent Error response [%i] while "
|
||||
"uploading image [%s] to glance [%s]"
|
||||
% (resp.status, image_id, url))
|
||||
# NOTE(nikhil): Only a sub-set of the 500 errors are retryable. We
|
||||
# optimistically retry on 500 errors below.
|
||||
elif resp.status in (httplib.REQUEST_TIMEOUT, # 408
|
||||
httplib.INTERNAL_SERVER_ERROR, # 500
|
||||
httplib.BAD_GATEWAY, # 502
|
||||
httplib.SERVICE_UNAVAILABLE, # 503
|
||||
httplib.GATEWAY_TIMEOUT, # 504
|
||||
httplib.INSUFFICIENT_STORAGE, # 507
|
||||
):
|
||||
raise RetryableError("Got Ephemeral Error response [%i] while "
|
||||
"uploading image [%s] to glance [%s]"
|
||||
% (resp.status, image_id, url))
|
||||
else:
|
||||
# Note(Jesse): Assume unexpected errors are retryable. If you are
|
||||
# seeing this error message, the error should probably be added
|
||||
# to either the ephemeral or permanent error list.
|
||||
raise RetryableError("Got Unexpected Error response [%i] while "
|
||||
"uploading image [%s] to glance [%s]"
|
||||
% (resp.status, image_id, url))
|
||||
|
||||
|
||||
def validate_image_status_before_upload_v1(conn, url, extra_headers):
|
||||
try:
|
||||
parts = urlparse(url)
|
||||
path = parts[2]
|
||||
image_id = path.split('/')[-1]
|
||||
# NOTE(nikhil): Attempt to determine if the Image has a status
|
||||
# of 'queued'. Because data will continued to be sent to Glance
|
||||
# until it has a chance to check the Image state, discover that
|
||||
# it is not 'active' and send back a 409. Hence, the data will be
|
||||
# unnecessarily buffered by Glance. This wastes time and bandwidth.
|
||||
# LP bug #1202785
|
||||
|
||||
conn.request('HEAD', path, headers=extra_headers)
|
||||
head_resp = conn.getresponse()
|
||||
# NOTE(nikhil): read the response to re-use the conn object.
|
||||
body_data = head_resp.read(8192)
|
||||
if len(body_data) > 8:
|
||||
err_msg = ('Cannot upload data for image %(image_id)s as the '
|
||||
'HEAD call had more than 8192 bytes of data in '
|
||||
'the response body.' % {'image_id': image_id})
|
||||
raise PluginError("Got Permanent Error while uploading image "
|
||||
"[%s] to glance [%s]. "
|
||||
"Message: %s" % (image_id, url,
|
||||
err_msg))
|
||||
else:
|
||||
head_resp.read()
|
||||
|
||||
except Exception, error: # noqa
|
||||
logging.exception('Failed to HEAD the image %(image_id)s while '
|
||||
'checking image status before attempting to '
|
||||
'upload %(url)s' % {'image_id': image_id,
|
||||
'url': url})
|
||||
raise RetryableError(error)
|
||||
|
||||
if head_resp.status != httplib.OK:
|
||||
logging.error("Unexpected response while doing a HEAD call "
|
||||
"to image %s , url = %s , Response Status: "
|
||||
"%i" % (image_id, url, head_resp.status))
|
||||
|
||||
check_resp_status_and_retry(head_resp, image_id, url)
|
||||
|
||||
else:
|
||||
image_status = head_resp.getheader('x-image-meta-status')
|
||||
if image_status not in ('queued', ):
|
||||
err_msg = ('Cannot upload data for image %(image_id)s as the '
|
||||
'image status is %(image_status)s' %
|
||||
{'image_id': image_id, 'image_status': image_status})
|
||||
logging.exception(err_msg)
|
||||
raise PluginError("Got Permanent Error while uploading image "
|
||||
"[%s] to glance [%s]. "
|
||||
"Message: %s" % (image_id, url,
|
||||
err_msg))
|
||||
else:
|
||||
logging.info('Found image %(image_id)s in status '
|
||||
'%(image_status)s. Attempting to '
|
||||
'upload.' % {'image_id': image_id,
|
||||
'image_status': image_status})
|
||||
|
||||
|
||||
def validate_image_status_before_upload_v2(conn, url, extra_headers):
|
||||
try:
|
||||
parts = urlparse(url)
|
||||
path = parts[2]
|
||||
image_id = path.split('/')[-2]
|
||||
# NOTE(nikhil): Attempt to determine if the Image has a status
|
||||
# of 'queued'. Because data will continued to be sent to Glance
|
||||
# until it has a chance to check the Image state, discover that
|
||||
# it is not 'active' and send back a 409. Hence, the data will be
|
||||
# unnecessarily buffered by Glance. This wastes time and bandwidth.
|
||||
# LP bug #1202785
|
||||
|
||||
conn.request('GET', '/v2/images/%s' % image_id, headers=extra_headers)
|
||||
get_resp = conn.getresponse()
|
||||
except Exception, error: # noqa
|
||||
logging.exception('Failed to GET the image %(image_id)s while '
|
||||
'checking image status before attempting to '
|
||||
'upload %(url)s' % {'image_id': image_id,
|
||||
'url': url})
|
||||
raise RetryableError(error)
|
||||
|
||||
if get_resp.status != httplib.OK:
|
||||
logging.error("Unexpected response while doing a GET call "
|
||||
"to image %s , url = %s , Response Status: "
|
||||
"%i" % (image_id, url, get_resp.status))
|
||||
|
||||
check_resp_status_and_retry(get_resp, image_id, url)
|
||||
|
||||
else:
|
||||
body = json.loads(get_resp.read())
|
||||
image_status = body['status']
|
||||
if image_status not in ('queued', ):
|
||||
err_msg = ('Cannot upload data for image %(image_id)s as the '
|
||||
'image status is %(image_status)s' %
|
||||
{'image_id': image_id, 'image_status': image_status})
|
||||
logging.exception(err_msg)
|
||||
raise PluginError("Got Permanent Error while uploading image "
|
||||
"[%s] to glance [%s]. "
|
||||
"Message: %s" % (image_id, url,
|
||||
err_msg))
|
||||
else:
|
||||
logging.info('Found image %(image_id)s in status '
|
||||
'%(image_status)s. Attempting to '
|
||||
'upload.' % {'image_id': image_id,
|
||||
'image_status': image_status})
|
||||
get_resp.read()
|
||||
|
||||
|
||||
def download_vhd2(session, image_id, endpoint,
|
||||
uuid_stack, sr_path, extra_headers, api_version=1):
|
||||
"""Download an image from Glance v2, unbundle it, and then deposit the
|
||||
VHDs into the storage repository.
|
||||
"""
|
||||
staging_path = utils.make_staging_area(sr_path)
|
||||
try:
|
||||
# Download tarball into staging area and extract it
|
||||
# TODO(mfedosin): remove this check when v1 is deprecated.
|
||||
if api_version == 1:
|
||||
_download_tarball_by_url_v1(
|
||||
sr_path, staging_path, image_id,
|
||||
endpoint, extra_headers)
|
||||
else:
|
||||
_download_tarball_by_url_v2(
|
||||
sr_path, staging_path, image_id,
|
||||
endpoint, extra_headers)
|
||||
|
||||
# Move the VHDs from the staging area into the storage repository
|
||||
return utils.import_vhds(sr_path, staging_path, uuid_stack)
|
||||
finally:
|
||||
utils.cleanup_staging_area(staging_path)
|
||||
|
||||
|
||||
def upload_vhd2(session, vdi_uuids, image_id, endpoint, sr_path,
|
||||
extra_headers, properties, api_version=1):
|
||||
"""Bundle the VHDs comprising an image and then stream them into
|
||||
Glance.
|
||||
"""
|
||||
staging_path = utils.make_staging_area(sr_path)
|
||||
try:
|
||||
utils.prepare_staging_area(sr_path, staging_path, vdi_uuids)
|
||||
# TODO(mfedosin): remove this check when v1 is deprecated.
|
||||
if api_version == 1:
|
||||
_upload_tarball_by_url_v1(staging_path, image_id,
|
||||
endpoint, extra_headers, properties)
|
||||
else:
|
||||
_upload_tarball_by_url_v2(staging_path, image_id,
|
||||
endpoint, extra_headers, properties)
|
||||
finally:
|
||||
utils.cleanup_staging_area(staging_path)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.register_plugin_calls(download_vhd2, upload_vhd2)
|
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
Symbolic link
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
Symbolic link
@ -0,0 +1 @@
|
||||
glance.py
|
632
plugins/xenserver/xenapi/etc/xapi.d/plugins/glance.py
Executable file
632
plugins/xenserver/xenapi/etc/xapi.d/plugins/glance.py
Executable file
@ -0,0 +1,632 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2012 OpenStack Foundation
|
||||
# Copyright (c) 2010 Citrix Systems, Inc.
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
# TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true
|
||||
|
||||
"""Handle the uploading and downloading of images via Glance."""
|
||||
|
||||
try:
|
||||
import httplib
|
||||
except ImportError:
|
||||
from six.moves import http_client as httplib
|
||||
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
import simplejson as json
|
||||
|
||||
import md5 # noqa
|
||||
import socket
|
||||
import urllib2
|
||||
from urlparse import urlparse
|
||||
|
||||
import pluginlib_nova
|
||||
import utils
|
||||
|
||||
|
||||
pluginlib_nova.configure_logging('glance')
|
||||
logging = pluginlib_nova.logging
|
||||
PluginError = pluginlib_nova.PluginError
|
||||
|
||||
SOCKET_TIMEOUT_SECONDS = 90
|
||||
|
||||
|
||||
class RetryableError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def _create_connection(scheme, netloc):
|
||||
if scheme == 'https':
|
||||
conn = httplib.HTTPSConnection(netloc)
|
||||
else:
|
||||
conn = httplib.HTTPConnection(netloc)
|
||||
conn.connect()
|
||||
return conn
|
||||
|
||||
|
||||
def _download_tarball_and_verify(request, staging_path):
|
||||
# NOTE(johngarbutt) By default, there is no timeout.
|
||||
# To ensure the script does not hang if we lose connection
|
||||
# to glance, we add this socket timeout.
|
||||
# This is here so there is no chance the timeout out has
|
||||
# been adjusted by other library calls.
|
||||
socket.setdefaulttimeout(SOCKET_TIMEOUT_SECONDS)
|
||||
|
||||
try:
|
||||
response = urllib2.urlopen(request)
|
||||
except urllib2.HTTPError, error: # noqa
|
||||
raise RetryableError(error)
|
||||
except urllib2.URLError, error: # noqa
|
||||
raise RetryableError(error)
|
||||
except httplib.HTTPException, error: # noqa
|
||||
# httplib.HTTPException and derivatives (BadStatusLine in particular)
|
||||
# don't have a useful __repr__ or __str__
|
||||
raise RetryableError('%s: %s' % (error.__class__.__name__, error))
|
||||
|
||||
url = request.get_full_url()
|
||||
logging.info("Reading image data from %s" % url)
|
||||
|
||||
callback_data = {'bytes_read': 0}
|
||||
checksum = md5.new()
|
||||
|
||||
def update_md5(chunk):
|
||||
callback_data['bytes_read'] += len(chunk)
|
||||
checksum.update(chunk)
|
||||
|
||||
try:
|
||||
try:
|
||||
utils.extract_tarball(response, staging_path, callback=update_md5)
|
||||
except Exception, error: # noqa
|
||||
raise RetryableError(error)
|
||||
finally:
|
||||
bytes_read = callback_data['bytes_read']
|
||||
logging.info("Read %d bytes from %s", bytes_read, url)
|
||||
|
||||
# Use ETag if available, otherwise content-md5(v2) or
|
||||
# X-Image-Meta-Checksum(v1)
|
||||
etag = response.info().getheader('etag', None)
|
||||
if etag is None:
|
||||
etag = response.info().getheader('content-md5', None)
|
||||
if etag is None:
|
||||
etag = response.info().getheader('x-image-meta-checksum', None)
|
||||
|
||||
# Verify checksum using ETag
|
||||
checksum = checksum.hexdigest()
|
||||
|
||||
if etag is None:
|
||||
msg = "No ETag found for comparison to checksum %(checksum)s"
|
||||
logging.info(msg % {'checksum': checksum})
|
||||
elif checksum != etag:
|
||||
msg = 'ETag %(etag)s does not match computed md5sum %(checksum)s'
|
||||
raise RetryableError(msg % {'checksum': checksum, 'etag': etag})
|
||||
else:
|
||||
msg = "Verified image checksum %(checksum)s"
|
||||
logging.info(msg % {'checksum': checksum})
|
||||
|
||||
|
||||
def _download_tarball_v1(sr_path, staging_path, image_id, glance_host,
|
||||
glance_port, glance_use_ssl, extra_headers):
|
||||
"""Download the tarball image from Glance v1 and extract it into the
|
||||
staging area. Retry if there is any failure.
|
||||
"""
|
||||
if glance_use_ssl:
|
||||
scheme = 'https'
|
||||
else:
|
||||
scheme = 'http'
|
||||
|
||||
endpoint = "%(scheme)s://%(glance_host)s:%(glance_port)d" % {
|
||||
'scheme': scheme, 'glance_host': glance_host,
|
||||
'glance_port': glance_port}
|
||||
_download_tarball_by_url_v1(sr_path, staging_path, image_id,
|
||||
endpoint, extra_headers)
|
||||
|
||||
|
||||
def _download_tarball_by_url_v1(
|
||||
sr_path, staging_path, image_id, glance_endpoint, extra_headers):
|
||||
"""Download the tarball image from Glance v1 and extract it into the
|
||||
staging area. Retry if there is any failure.
|
||||
"""
|
||||
|
||||
url = "%(glance_endpoint)s/v1/images/%(image_id)s" % {
|
||||
'glance_endpoint': glance_endpoint,
|
||||
'image_id': image_id}
|
||||
logging.info("Downloading %s with glance v1 api" % url)
|
||||
|
||||
request = urllib2.Request(url, headers=extra_headers)
|
||||
try:
|
||||
_download_tarball_and_verify(request, staging_path)
|
||||
except Exception:
|
||||
logging.exception('Failed to retrieve %(url)s' % {'url': url})
|
||||
raise
|
||||
|
||||
|
||||
def _download_tarball_by_url_v2(
|
||||
sr_path, staging_path, image_id, glance_endpoint, extra_headers):
|
||||
"""Download the tarball image from Glance v2 and extract it into the
|
||||
staging area. Retry if there is any failure.
|
||||
"""
|
||||
|
||||
url = "%(glance_endpoint)s/v2/images/%(image_id)s/file" % {
|
||||
'glance_endpoint': glance_endpoint,
|
||||
'image_id': image_id}
|
||||
logging.debug("Downloading %s with glance v2 api" % url)
|
||||
|
||||
request = urllib2.Request(url, headers=extra_headers)
|
||||
try:
|
||||
_download_tarball_and_verify(request, staging_path)
|
||||
except Exception:
|
||||
logging.exception('Failed to retrieve %(url)s' % {'url': url})
|
||||
raise
|
||||
|
||||
|
||||
def _upload_tarball_v1(staging_path, image_id, glance_host, glance_port,
|
||||
glance_use_ssl, extra_headers, properties):
|
||||
if glance_use_ssl:
|
||||
scheme = 'https'
|
||||
else:
|
||||
scheme = 'http'
|
||||
|
||||
url = '%s://%s:%s' % (scheme, glance_host, glance_port)
|
||||
_upload_tarball_by_url_v1(staging_path, image_id, url,
|
||||
extra_headers, properties)
|
||||
|
||||
|
||||
def _upload_tarball_by_url_v1(staging_path, image_id, glance_endpoint,
|
||||
extra_headers, properties):
|
||||
"""Create a tarball of the image and then stream that into Glance v1
|
||||
using chunked-transfer-encoded HTTP.
|
||||
"""
|
||||
# NOTE(johngarbutt) By default, there is no timeout.
|
||||
# To ensure the script does not hang if we lose connection
|
||||
# to glance, we add this socket timeout.
|
||||
# This is here so there is no chance the timeout out has
|
||||
# been adjusted by other library calls.
|
||||
socket.setdefaulttimeout(SOCKET_TIMEOUT_SECONDS)
|
||||
logging.debug("Uploading image %s with glance v1 api"
|
||||
% image_id)
|
||||
|
||||
url = "%(glance_endpoint)s/v1/images/%(image_id)s" % {
|
||||
'glance_endpoint': glance_endpoint,
|
||||
'image_id': image_id}
|
||||
logging.info("Writing image data to %s" % url)
|
||||
|
||||
# NOTE(sdague): this is python 2.4, which means urlparse returns a
|
||||
# tuple, not a named tuple.
|
||||
# 0 - scheme
|
||||
# 1 - host:port (aka netloc)
|
||||
# 2 - path
|
||||
parts = urlparse(url)
|
||||
|
||||
try:
|
||||
conn = _create_connection(parts[0], parts[1])
|
||||
except Exception, error: # noqa
|
||||
logging.exception('Failed to connect %(url)s' % {'url': url})
|
||||
raise RetryableError(error)
|
||||
|
||||
try:
|
||||
validate_image_status_before_upload_v1(conn, url, extra_headers)
|
||||
|
||||
try:
|
||||
# NOTE(sirp): httplib under python2.4 won't accept
|
||||
# a file-like object to request
|
||||
conn.putrequest('PUT', parts[2])
|
||||
|
||||
# NOTE(sirp): There is some confusion around OVF. Here's a summary
|
||||
# of where we currently stand:
|
||||
# 1. OVF as a container format is misnamed. We really should be
|
||||
# using OVA since that is the name for the container format;
|
||||
# OVF is the standard applied to the manifest file contained
|
||||
# within.
|
||||
# 2. We're currently uploading a vanilla tarball. In order to be
|
||||
# OVF/OVA compliant, we'll need to embed a minimal OVF
|
||||
# manifest as the first file.
|
||||
|
||||
# NOTE(dprince): In order to preserve existing Glance properties
|
||||
# we set X-Glance-Registry-Purge-Props on this request.
|
||||
headers = {
|
||||
'content-type': 'application/octet-stream',
|
||||
'transfer-encoding': 'chunked',
|
||||
'x-image-meta-is-public': 'False',
|
||||
'x-image-meta-status': 'queued',
|
||||
'x-image-meta-disk-format': 'vhd',
|
||||
'x-image-meta-container-format': 'ovf',
|
||||
'x-glance-registry-purge-props': 'False'}
|
||||
|
||||
headers.update(**extra_headers)
|
||||
|
||||
for key, value in properties.items():
|
||||
header_key = "x-image-meta-property-%s" % key.replace('_', '-')
|
||||
headers[header_key] = str(value)
|
||||
|
||||
for header, value in headers.items():
|
||||
conn.putheader(header, value)
|
||||
conn.endheaders()
|
||||
except Exception, error: # noqa
|
||||
logging.exception('Failed to upload %(url)s' % {'url': url})
|
||||
raise RetryableError(error)
|
||||
|
||||
callback_data = {'bytes_written': 0}
|
||||
|
||||
def send_chunked_transfer_encoded(chunk):
|
||||
chunk_len = len(chunk)
|
||||
callback_data['bytes_written'] += chunk_len
|
||||
try:
|
||||
conn.send("%x\r\n%s\r\n" % (chunk_len, chunk))
|
||||
except Exception, error: # noqa
|
||||
logging.exception('Failed to upload when sending chunks')
|
||||
raise RetryableError(error)
|
||||
|
||||
compression_level = properties.get('xenapi_image_compression_level')
|
||||
|
||||
utils.create_tarball(
|
||||
None, staging_path, callback=send_chunked_transfer_encoded,
|
||||
compression_level=compression_level)
|
||||
|
||||
send_chunked_transfer_encoded('') # Chunked-Transfer terminator
|
||||
|
||||
bytes_written = callback_data['bytes_written']
|
||||
logging.info("Wrote %d bytes to %s" % (bytes_written, url))
|
||||
|
||||
resp = conn.getresponse()
|
||||
if resp.status == httplib.OK:
|
||||
return
|
||||
|
||||
logging.error("Unexpected response while writing image data to %s: "
|
||||
"Response Status: %i, Response body: %s"
|
||||
% (url, resp.status, resp.read()))
|
||||
|
||||
check_resp_status_and_retry(resp, image_id, url)
|
||||
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
|
||||
def _update_image_meta_v2(conn, image_id, extra_headers, properties):
|
||||
# NOTE(sirp): There is some confusion around OVF. Here's a summary
|
||||
# of where we currently stand:
|
||||
# 1. OVF as a container format is misnamed. We really should be
|
||||
# using OVA since that is the name for the container format;
|
||||
# OVF is the standard applied to the manifest file contained
|
||||
# within.
|
||||
# 2. We're currently uploading a vanilla tarball. In order to be
|
||||
# OVF/OVA compliant, we'll need to embed a minimal OVF
|
||||
# manifest as the first file.
|
||||
body = [
|
||||
{"path": "/container_format", "value": "ovf", "op": "add"},
|
||||
{"path": "/disk_format", "value": "vhd", "op": "add"},
|
||||
{"path": "/visibility", "value": "private", "op": "add"}]
|
||||
|
||||
headers = {'Content-Type': 'application/openstack-images-v2.1-json-patch'}
|
||||
headers.update(**extra_headers)
|
||||
|
||||
for key, value in properties.items():
|
||||
prop = {"path": "/%s" % key.replace('_', '-'),
|
||||
"value": key,
|
||||
"op": "add"}
|
||||
body.append(prop)
|
||||
body = json.dumps(body)
|
||||
conn.request('PATCH', '/v2/images/%s' % image_id,
|
||||
body=body, headers=headers)
|
||||
resp = conn.getresponse()
|
||||
resp.read()
|
||||
|
||||
if resp.status == httplib.OK:
|
||||
return
|
||||
logging.error("Image meta was not updated. Status: %s, Reason: %s" % (
|
||||
resp.status, resp.reason))
|
||||
|
||||
|
||||
def _upload_tarball_by_url_v2(staging_path, image_id, glance_endpoint,
|
||||
extra_headers, properties):
|
||||
"""Create a tarball of the image and then stream that into Glance v2
|
||||
using chunked-transfer-encoded HTTP.
|
||||
"""
|
||||
# NOTE(johngarbutt) By default, there is no timeout.
|
||||
# To ensure the script does not hang if we lose connection
|
||||
# to glance, we add this socket timeout.
|
||||
# This is here so there is no chance the timeout out has
|
||||
# been adjusted by other library calls.
|
||||
socket.setdefaulttimeout(SOCKET_TIMEOUT_SECONDS)
|
||||
logging.debug("Uploading imaged %s with glance v2 api"
|
||||
% image_id)
|
||||
|
||||
url = "%(glance_endpoint)s/v2/images/%(image_id)s/file" % {
|
||||
'glance_endpoint': glance_endpoint,
|
||||
'image_id': image_id}
|
||||
|
||||
# NOTE(sdague): this is python 2.4, which means urlparse returns a
|
||||
# tuple, not a named tuple.
|
||||
# 0 - scheme
|
||||
# 1 - host:port (aka netloc)
|
||||
# 2 - path
|
||||
parts = urlparse(url)
|
||||
|
||||
try:
|
||||
conn = _create_connection(parts[0], parts[1])
|
||||
except Exception, error: # noqa
|
||||
raise RetryableError(error)
|
||||
|
||||
try:
|
||||
_update_image_meta_v2(conn, image_id, extra_headers, properties)
|
||||
|
||||
validate_image_status_before_upload_v2(conn, url, extra_headers)
|
||||
|
||||
try:
|
||||
conn.connect()
|
||||
# NOTE(sirp): httplib under python2.4 won't accept
|
||||
# a file-like object to request
|
||||
conn.putrequest('PUT', parts[2])
|
||||
|
||||
headers = {
|
||||
'content-type': 'application/octet-stream',
|
||||
'transfer-encoding': 'chunked'}
|
||||
|
||||
headers.update(**extra_headers)
|
||||
|
||||
for header, value in headers.items():
|
||||
conn.putheader(header, value)
|
||||
conn.endheaders()
|
||||
except Exception, error: # noqa
|
||||
logging.exception('Failed to upload %(url)s' % {'url': url})
|
||||
raise RetryableError(error)
|
||||
|
||||
callback_data = {'bytes_written': 0}
|
||||
|
||||
def send_chunked_transfer_encoded(chunk):
|
||||
chunk_len = len(chunk)
|
||||
callback_data['bytes_written'] += chunk_len
|
||||
try:
|
||||
conn.send("%x\r\n%s\r\n" % (chunk_len, chunk))
|
||||
except Exception, error: # noqa
|
||||
logging.exception('Failed to upload when sending chunks')
|
||||
raise RetryableError(error)
|
||||
|
||||
compression_level = properties.get('xenapi_image_compression_level')
|
||||
|
||||
utils.create_tarball(
|
||||
None, staging_path, callback=send_chunked_transfer_encoded,
|
||||
compression_level=compression_level)
|
||||
|
||||
send_chunked_transfer_encoded('') # Chunked-Transfer terminator
|
||||
|
||||
bytes_written = callback_data['bytes_written']
|
||||
logging.info("Wrote %d bytes to %s" % (bytes_written, url))
|
||||
|
||||
resp = conn.getresponse()
|
||||
if resp.status == httplib.NO_CONTENT:
|
||||
return
|
||||
|
||||
logging.error("Unexpected response while writing image data to %s: "
|
||||
"Response Status: %i, Response body: %s"
|
||||
% (url, resp.status, resp.read()))
|
||||
|
||||
check_resp_status_and_retry(resp, image_id, url)
|
||||
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
|
||||
def check_resp_status_and_retry(resp, image_id, url):
|
||||
# Note(Jesse): This branch sorts errors into those that are permanent,
|
||||
# those that are ephemeral, and those that are unexpected.
|
||||
if resp.status in (httplib.BAD_REQUEST, # 400
|
||||
httplib.UNAUTHORIZED, # 401
|
||||
httplib.PAYMENT_REQUIRED, # 402
|
||||
httplib.FORBIDDEN, # 403
|
||||
httplib.NOT_FOUND, # 404
|
||||
httplib.METHOD_NOT_ALLOWED, # 405
|
||||
httplib.NOT_ACCEPTABLE, # 406
|
||||
httplib.PROXY_AUTHENTICATION_REQUIRED, # 407
|
||||
httplib.CONFLICT, # 409
|
||||
httplib.GONE, # 410
|
||||
httplib.LENGTH_REQUIRED, # 411
|
||||
httplib.PRECONDITION_FAILED, # 412
|
||||
httplib.REQUEST_ENTITY_TOO_LARGE, # 413
|
||||
httplib.REQUEST_URI_TOO_LONG, # 414
|
||||
httplib.UNSUPPORTED_MEDIA_TYPE, # 415
|
||||
httplib.REQUESTED_RANGE_NOT_SATISFIABLE, # 416
|
||||
httplib.EXPECTATION_FAILED, # 417
|
||||
httplib.UNPROCESSABLE_ENTITY, # 422
|
||||
httplib.LOCKED, # 423
|
||||
httplib.FAILED_DEPENDENCY, # 424
|
||||
httplib.UPGRADE_REQUIRED, # 426
|
||||
httplib.NOT_IMPLEMENTED, # 501
|
||||
httplib.HTTP_VERSION_NOT_SUPPORTED, # 505
|
||||
httplib.NOT_EXTENDED, # 510
|
||||
):
|
||||
raise PluginError("Got Permanent Error response [%i] while "
|
||||
"uploading image [%s] to glance [%s]"
|
||||
% (resp.status, image_id, url))
|
||||
# NOTE(nikhil): Only a sub-set of the 500 errors are retryable. We
|
||||
# optimistically retry on 500 errors below.
|
||||
elif resp.status in (httplib.REQUEST_TIMEOUT, # 408
|
||||
httplib.INTERNAL_SERVER_ERROR, # 500
|
||||
httplib.BAD_GATEWAY, # 502
|
||||
httplib.SERVICE_UNAVAILABLE, # 503
|
||||
httplib.GATEWAY_TIMEOUT, # 504
|
||||
httplib.INSUFFICIENT_STORAGE, # 507
|
||||
):
|
||||
raise RetryableError("Got Ephemeral Error response [%i] while "
|
||||
"uploading image [%s] to glance [%s]"
|
||||
% (resp.status, image_id, url))
|
||||
else:
|
||||
# Note(Jesse): Assume unexpected errors are retryable. If you are
|
||||
# seeing this error message, the error should probably be added
|
||||
# to either the ephemeral or permanent error list.
|
||||
raise RetryableError("Got Unexpected Error response [%i] while "
|
||||
"uploading image [%s] to glance [%s]"
|
||||
% (resp.status, image_id, url))
|
||||
|
||||
|
||||
def validate_image_status_before_upload_v1(conn, url, extra_headers):
|
||||
try:
|
||||
parts = urlparse(url)
|
||||
path = parts[2]
|
||||
image_id = path.split('/')[-1]
|
||||
# NOTE(nikhil): Attempt to determine if the Image has a status
|
||||
# of 'queued'. Because data will continued to be sent to Glance
|
||||
# until it has a chance to check the Image state, discover that
|
||||
# it is not 'active' and send back a 409. Hence, the data will be
|
||||
# unnecessarily buffered by Glance. This wastes time and bandwidth.
|
||||
# LP bug #1202785
|
||||
|
||||
conn.request('HEAD', path, headers=extra_headers)
|
||||
head_resp = conn.getresponse()
|
||||
# NOTE(nikhil): read the response to re-use the conn object.
|
||||
body_data = head_resp.read(8192)
|
||||
if len(body_data) > 8:
|
||||
err_msg = ('Cannot upload data for image %(image_id)s as the '
|
||||
'HEAD call had more than 8192 bytes of data in '
|
||||
'the response body.' % {'image_id': image_id})
|
||||
raise PluginError("Got Permanent Error while uploading image "
|
||||
"[%s] to glance [%s]. "
|
||||
"Message: %s" % (image_id, url,
|
||||
err_msg))
|
||||
else:
|
||||
head_resp.read()
|
||||
|
||||
except Exception, error: # noqa
|
||||
logging.exception('Failed to HEAD the image %(image_id)s while '
|
||||
'checking image status before attempting to '
|
||||
'upload %(url)s' % {'image_id': image_id,
|
||||
'url': url})
|
||||
raise RetryableError(error)
|
||||
|
||||
if head_resp.status != httplib.OK:
|
||||
logging.error("Unexpected response while doing a HEAD call "
|
||||
"to image %s , url = %s , Response Status: "
|
||||
"%i" % (image_id, url, head_resp.status))
|
||||
|
||||
check_resp_status_and_retry(head_resp, image_id, url)
|
||||
|
||||
else:
|
||||
image_status = head_resp.getheader('x-image-meta-status')
|
||||
if image_status not in ('queued', ):
|
||||
err_msg = ('Cannot upload data for image %(image_id)s as the '
|
||||
'image status is %(image_status)s' %
|
||||
{'image_id': image_id, 'image_status': image_status})
|
||||
logging.exception(err_msg)
|
||||
raise PluginError("Got Permanent Error while uploading image "
|
||||
"[%s] to glance [%s]. "
|
||||
"Message: %s" % (image_id, url,
|
||||
err_msg))
|
||||
else:
|
||||
logging.info('Found image %(image_id)s in status '
|
||||
'%(image_status)s. Attempting to '
|
||||
'upload.' % {'image_id': image_id,
|
||||
'image_status': image_status})
|
||||
|
||||
|
||||
def validate_image_status_before_upload_v2(conn, url, extra_headers):
|
||||
try:
|
||||
parts = urlparse(url)
|
||||
path = parts[2]
|
||||
image_id = path.split('/')[-2]
|
||||
# NOTE(nikhil): Attempt to determine if the Image has a status
|
||||
# of 'queued'. Because data will continued to be sent to Glance
|
||||
# until it has a chance to check the Image state, discover that
|
||||
# it is not 'active' and send back a 409. Hence, the data will be
|
||||
# unnecessarily buffered by Glance. This wastes time and bandwidth.
|
||||
# LP bug #1202785
|
||||
|
||||
conn.request('GET', '/v2/images/%s' % image_id, headers=extra_headers)
|
||||
get_resp = conn.getresponse()
|
||||
except Exception, error: # noqa
|
||||
logging.exception('Failed to GET the image %(image_id)s while '
|
||||
'checking image status before attempting to '
|
||||
'upload %(url)s' % {'image_id': image_id,
|
||||
'url': url})
|
||||
raise RetryableError(error)
|
||||
|
||||
if get_resp.status != httplib.OK:
|
||||
logging.error("Unexpected response while doing a GET call "
|
||||
"to image %s , url = %s , Response Status: "
|
||||
"%i" % (image_id, url, get_resp.status))
|
||||
|
||||
check_resp_status_and_retry(get_resp, image_id, url)
|
||||
|
||||
else:
|
||||
body = json.loads(get_resp.read())
|
||||
image_status = body['status']
|
||||
if image_status not in ('queued', ):
|
||||
err_msg = ('Cannot upload data for image %(image_id)s as the '
|
||||
'image status is %(image_status)s' %
|
||||
{'image_id': image_id, 'image_status': image_status})
|
||||
logging.exception(err_msg)
|
||||
raise PluginError("Got Permanent Error while uploading image "
|
||||
"[%s] to glance [%s]. "
|
||||
"Message: %s" % (image_id, url,
|
||||
err_msg))
|
||||
else:
|
||||
logging.info('Found image %(image_id)s in status '
|
||||
'%(image_status)s. Attempting to '
|
||||
'upload.' % {'image_id': image_id,
|
||||
'image_status': image_status})
|
||||
get_resp.read()
|
||||
|
||||
|
||||
def download_vhd2(session, image_id, endpoint,
|
||||
uuid_stack, sr_path, extra_headers, api_version=1):
|
||||
"""Download an image from Glance v2, unbundle it, and then deposit the
|
||||
VHDs into the storage repository.
|
||||
"""
|
||||
staging_path = utils.make_staging_area(sr_path)
|
||||
try:
|
||||
# Download tarball into staging area and extract it
|
||||
# TODO(mfedosin): remove this check when v1 is deprecated.
|
||||
if api_version == 1:
|
||||
_download_tarball_by_url_v1(
|
||||
sr_path, staging_path, image_id,
|
||||
endpoint, extra_headers)
|
||||
else:
|
||||
_download_tarball_by_url_v2(
|
||||
sr_path, staging_path, image_id,
|
||||
endpoint, extra_headers)
|
||||
|
||||
# Move the VHDs from the staging area into the storage repository
|
||||
return utils.import_vhds(sr_path, staging_path, uuid_stack)
|
||||
finally:
|
||||
utils.cleanup_staging_area(staging_path)
|
||||
|
||||
|
||||
def upload_vhd2(session, vdi_uuids, image_id, endpoint, sr_path,
|
||||
extra_headers, properties, api_version=1):
|
||||
"""Bundle the VHDs comprising an image and then stream them into
|
||||
Glance.
|
||||
"""
|
||||
staging_path = utils.make_staging_area(sr_path)
|
||||
try:
|
||||
utils.prepare_staging_area(sr_path, staging_path, vdi_uuids)
|
||||
# TODO(mfedosin): remove this check when v1 is deprecated.
|
||||
if api_version == 1:
|
||||
_upload_tarball_by_url_v1(staging_path, image_id,
|
||||
endpoint, extra_headers, properties)
|
||||
else:
|
||||
_upload_tarball_by_url_v2(staging_path, image_id,
|
||||
endpoint, extra_headers, properties)
|
||||
finally:
|
||||
utils.cleanup_staging_area(staging_path)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.register_plugin_calls(download_vhd2, upload_vhd2)
|
@ -1,140 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2013 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
# TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true
|
||||
|
||||
"""Inject network configuration into iPXE ISO for boot."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
|
||||
import utils
|
||||
|
||||
# FIXME(sirp): should this use pluginlib from 5.6?
|
||||
import pluginlib_nova
|
||||
pluginlib_nova.configure_logging('ipxe')
|
||||
|
||||
|
||||
ISOLINUX_CFG = """SAY iPXE ISO boot image
|
||||
TIMEOUT 30
|
||||
DEFAULT ipxe.krn
|
||||
LABEL ipxe.krn
|
||||
KERNEL ipxe.krn
|
||||
INITRD netcfg.ipxe
|
||||
"""
|
||||
|
||||
NETCFG_IPXE = """#!ipxe
|
||||
:start
|
||||
imgfree
|
||||
ifclose net0
|
||||
set net0/ip %(ip_address)s
|
||||
set net0/netmask %(netmask)s
|
||||
set net0/gateway %(gateway)s
|
||||
set dns %(dns)s
|
||||
ifopen net0
|
||||
goto menu
|
||||
|
||||
:menu
|
||||
chain %(boot_menu_url)s
|
||||
goto boot
|
||||
|
||||
:boot
|
||||
sanboot --no-describe --drive 0x80
|
||||
"""
|
||||
|
||||
|
||||
def _write_file(filename, data):
|
||||
# If the ISO was tampered with such that the destination is a symlink,
|
||||
# that could allow a malicious user to write to protected areas of the
|
||||
# dom0 filesystem. /HT to comstud for pointing this out.
|
||||
#
|
||||
# Short-term, checking that the destination is not a symlink should be
|
||||
# sufficient.
|
||||
#
|
||||
# Long-term, we probably want to perform all file manipulations within a
|
||||
# chroot jail to be extra safe.
|
||||
if os.path.islink(filename):
|
||||
raise RuntimeError('SECURITY: Cannot write to symlinked destination')
|
||||
|
||||
logging.debug("Writing to file '%s'" % filename)
|
||||
f = open(filename, 'w')
|
||||
try:
|
||||
f.write(data)
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
|
||||
def _unbundle_iso(sr_path, filename, path):
|
||||
logging.debug("Unbundling ISO '%s'" % filename)
|
||||
read_only_path = utils.make_staging_area(sr_path)
|
||||
try:
|
||||
utils.run_command(['mount', '-o', 'loop', filename, read_only_path])
|
||||
try:
|
||||
shutil.copytree(read_only_path, path)
|
||||
finally:
|
||||
utils.run_command(['umount', read_only_path])
|
||||
finally:
|
||||
utils.cleanup_staging_area(read_only_path)
|
||||
|
||||
|
||||
def _create_iso(mkisofs_cmd, filename, path):
|
||||
logging.debug("Creating ISO '%s'..." % filename)
|
||||
orig_dir = os.getcwd()
|
||||
os.chdir(path)
|
||||
try:
|
||||
utils.run_command([mkisofs_cmd, '-quiet', '-l', '-o', filename,
|
||||
'-c', 'boot.cat', '-b', 'isolinux.bin',
|
||||
'-no-emul-boot', '-boot-load-size', '4',
|
||||
'-boot-info-table', '.'])
|
||||
finally:
|
||||
os.chdir(orig_dir)
|
||||
|
||||
|
||||
def inject(session, sr_path, vdi_uuid, boot_menu_url, ip_address, netmask,
|
||||
gateway, dns, mkisofs_cmd):
|
||||
|
||||
iso_filename = '%s.img' % os.path.join(sr_path, 'iso', vdi_uuid)
|
||||
|
||||
# Create staging area so we have a unique path but remove it since
|
||||
# shutil.copytree will recreate it
|
||||
staging_path = utils.make_staging_area(sr_path)
|
||||
utils.cleanup_staging_area(staging_path)
|
||||
|
||||
try:
|
||||
_unbundle_iso(sr_path, iso_filename, staging_path)
|
||||
|
||||
# Write Configs
|
||||
_write_file(os.path.join(staging_path, 'netcfg.ipxe'),
|
||||
NETCFG_IPXE % {"ip_address": ip_address,
|
||||
"netmask": netmask,
|
||||
"gateway": gateway,
|
||||
"dns": dns,
|
||||
"boot_menu_url": boot_menu_url})
|
||||
|
||||
_write_file(os.path.join(staging_path, 'isolinux.cfg'),
|
||||
ISOLINUX_CFG)
|
||||
|
||||
_create_iso(mkisofs_cmd, iso_filename, staging_path)
|
||||
finally:
|
||||
utils.cleanup_staging_area(staging_path)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
utils.register_plugin_calls(inject)
|
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/ipxe
Symbolic link
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/ipxe
Symbolic link
@ -0,0 +1 @@
|
||||
ipxe.py
|
140
plugins/xenserver/xenapi/etc/xapi.d/plugins/ipxe.py
Executable file
140
plugins/xenserver/xenapi/etc/xapi.d/plugins/ipxe.py
Executable file
@ -0,0 +1,140 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2013 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
# TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true
|
||||
|
||||
"""Inject network configuration into iPXE ISO for boot."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
|
||||
import utils
|
||||
|
||||
# FIXME(sirp): should this use pluginlib from 5.6?
|
||||
import pluginlib_nova
|
||||
pluginlib_nova.configure_logging('ipxe')
|
||||
|
||||
|
||||
ISOLINUX_CFG = """SAY iPXE ISO boot image
|
||||
TIMEOUT 30
|
||||
DEFAULT ipxe.krn
|
||||
LABEL ipxe.krn
|
||||
KERNEL ipxe.krn
|
||||
INITRD netcfg.ipxe
|
||||
"""
|
||||
|
||||
NETCFG_IPXE = """#!ipxe
|
||||
:start
|
||||
imgfree
|
||||
ifclose net0
|
||||
set net0/ip %(ip_address)s
|
||||
set net0/netmask %(netmask)s
|
||||
set net0/gateway %(gateway)s
|
||||
set dns %(dns)s
|
||||
ifopen net0
|
||||
goto menu
|
||||
|
||||
:menu
|
||||
chain %(boot_menu_url)s
|
||||
goto boot
|
||||
|
||||
:boot
|
||||
sanboot --no-describe --drive 0x80
|
||||
"""
|
||||
|
||||
|
||||
def _write_file(filename, data):
|
||||
# If the ISO was tampered with such that the destination is a symlink,
|
||||
# that could allow a malicious user to write to protected areas of the
|
||||
# dom0 filesystem. /HT to comstud for pointing this out.
|
||||
#
|
||||
# Short-term, checking that the destination is not a symlink should be
|
||||
# sufficient.
|
||||
#
|
||||
# Long-term, we probably want to perform all file manipulations within a
|
||||
# chroot jail to be extra safe.
|
||||
if os.path.islink(filename):
|
||||
raise RuntimeError('SECURITY: Cannot write to symlinked destination')
|
||||
|
||||
logging.debug("Writing to file '%s'" % filename)
|
||||
f = open(filename, 'w')
|
||||
try:
|
||||
f.write(data)
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
|
||||
def _unbundle_iso(sr_path, filename, path):
|
||||
logging.debug("Unbundling ISO '%s'" % filename)
|
||||
read_only_path = utils.make_staging_area(sr_path)
|
||||
try:
|
||||
utils.run_command(['mount', '-o', 'loop', filename, read_only_path])
|
||||
try:
|
||||
shutil.copytree(read_only_path, path)
|
||||
finally:
|
||||
utils.run_command(['umount', read_only_path])
|
||||
finally:
|
||||
utils.cleanup_staging_area(read_only_path)
|
||||
|
||||
|
||||
def _create_iso(mkisofs_cmd, filename, path):
|
||||
logging.debug("Creating ISO '%s'..." % filename)
|
||||
orig_dir = os.getcwd()
|
||||
os.chdir(path)
|
||||
try:
|
||||
utils.run_command([mkisofs_cmd, '-quiet', '-l', '-o', filename,
|
||||
'-c', 'boot.cat', '-b', 'isolinux.bin',
|
||||
'-no-emul-boot', '-boot-load-size', '4',
|
||||
'-boot-info-table', '.'])
|
||||
finally:
|
||||
os.chdir(orig_dir)
|
||||
|
||||
|
||||
def inject(session, sr_path, vdi_uuid, boot_menu_url, ip_address, netmask,
|
||||
gateway, dns, mkisofs_cmd):
|
||||
|
||||
iso_filename = '%s.img' % os.path.join(sr_path, 'iso', vdi_uuid)
|
||||
|
||||
# Create staging area so we have a unique path but remove it since
|
||||
# shutil.copytree will recreate it
|
||||
staging_path = utils.make_staging_area(sr_path)
|
||||
utils.cleanup_staging_area(staging_path)
|
||||
|
||||
try:
|
||||
_unbundle_iso(sr_path, iso_filename, staging_path)
|
||||
|
||||
# Write Configs
|
||||
_write_file(os.path.join(staging_path, 'netcfg.ipxe'),
|
||||
NETCFG_IPXE % {"ip_address": ip_address,
|
||||
"netmask": netmask,
|
||||
"gateway": gateway,
|
||||
"dns": dns,
|
||||
"boot_menu_url": boot_menu_url})
|
||||
|
||||
_write_file(os.path.join(staging_path, 'isolinux.cfg'),
|
||||
ISOLINUX_CFG)
|
||||
|
||||
_create_iso(mkisofs_cmd, iso_filename, staging_path)
|
||||
finally:
|
||||
utils.cleanup_staging_area(staging_path)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
utils.register_plugin_calls(inject)
|
@ -1,143 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2012 OpenStack Foundation
|
||||
# Copyright (c) 2010 Citrix Systems, Inc.
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
# TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true
|
||||
|
||||
"""Handle the manipulation of kernel images."""
|
||||
|
||||
import errno
|
||||
import os
|
||||
import shutil
|
||||
|
||||
import XenAPIPlugin
|
||||
|
||||
import pluginlib_nova
|
||||
|
||||
|
||||
pluginlib_nova.configure_logging('kernel')
|
||||
logging = pluginlib_nova.logging
|
||||
exists = pluginlib_nova.exists
|
||||
optional = pluginlib_nova.optional
|
||||
with_vdi_in_dom0 = pluginlib_nova.with_vdi_in_dom0
|
||||
|
||||
|
||||
KERNEL_DIR = '/boot/guest'
|
||||
|
||||
|
||||
def _copy_vdi(dest, copy_args):
|
||||
vdi_uuid = copy_args['vdi_uuid']
|
||||
vdi_size = copy_args['vdi_size']
|
||||
cached_image = copy_args['cached-image']
|
||||
|
||||
logging.debug("copying kernel/ramdisk file from %s to /boot/guest/%s",
|
||||
dest, vdi_uuid)
|
||||
filename = KERNEL_DIR + '/' + vdi_uuid
|
||||
|
||||
# Make sure KERNEL_DIR exists, otherwise create it
|
||||
if not os.path.isdir(KERNEL_DIR):
|
||||
logging.debug("Creating directory %s", KERNEL_DIR)
|
||||
os.makedirs(KERNEL_DIR)
|
||||
|
||||
# Read data from /dev/ and write into a file on /boot/guest
|
||||
of = open(filename, 'wb')
|
||||
f = open(dest, 'rb')
|
||||
|
||||
# Copy only vdi_size bytes
|
||||
data = f.read(vdi_size)
|
||||
of.write(data)
|
||||
|
||||
if cached_image:
|
||||
# Create a cache file. If caching is enabled, kernel images do not have
|
||||
# to be fetched from glance.
|
||||
cached_image = KERNEL_DIR + '/' + cached_image
|
||||
logging.debug("copying kernel/ramdisk file from %s to /boot/guest/%s",
|
||||
dest, cached_image)
|
||||
cache_file = open(cached_image, 'wb')
|
||||
cache_file.write(data)
|
||||
cache_file.close()
|
||||
logging.debug("Done. Filename: %s", cached_image)
|
||||
|
||||
f.close()
|
||||
of.close()
|
||||
logging.debug("Done. Filename: %s", filename)
|
||||
return filename
|
||||
|
||||
|
||||
def copy_vdi(session, args):
|
||||
vdi = exists(args, 'vdi-ref')
|
||||
size = exists(args, 'image-size')
|
||||
cached_image = optional(args, 'cached-image')
|
||||
|
||||
# Use the uuid as a filename
|
||||
vdi_uuid = session.xenapi.VDI.get_uuid(vdi)
|
||||
copy_args = {'vdi_uuid': vdi_uuid,
|
||||
'vdi_size': int(size),
|
||||
'cached-image': cached_image}
|
||||
|
||||
filename = with_vdi_in_dom0(session, vdi, False,
|
||||
lambda dev:
|
||||
_copy_vdi('/dev/%s' % dev, copy_args))
|
||||
return filename
|
||||
|
||||
|
||||
def create_kernel_ramdisk(session, args):
|
||||
"""Creates a copy of the kernel/ramdisk image if it is present in the
|
||||
cache. If the image is not present in the cache, it does nothing.
|
||||
"""
|
||||
cached_image = exists(args, 'cached-image')
|
||||
image_uuid = exists(args, 'new-image-uuid')
|
||||
cached_image_filename = KERNEL_DIR + '/' + cached_image
|
||||
filename = KERNEL_DIR + '/' + image_uuid
|
||||
|
||||
if os.path.isfile(cached_image_filename):
|
||||
shutil.copyfile(cached_image_filename, filename)
|
||||
logging.debug("Done. Filename: %s", filename)
|
||||
else:
|
||||
filename = ""
|
||||
logging.debug("Cached kernel/ramdisk image not found")
|
||||
return filename
|
||||
|
||||
|
||||
def _remove_file(filepath):
|
||||
try:
|
||||
os.remove(filepath)
|
||||
except OSError, exc: # noqa
|
||||
if exc.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
|
||||
def remove_kernel_ramdisk(session, args):
|
||||
"""Removes kernel and/or ramdisk from dom0's file system."""
|
||||
kernel_file = optional(args, 'kernel-file')
|
||||
ramdisk_file = optional(args, 'ramdisk-file')
|
||||
if kernel_file:
|
||||
_remove_file(kernel_file)
|
||||
if ramdisk_file:
|
||||
_remove_file(ramdisk_file)
|
||||
return "ok"
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
XenAPIPlugin.dispatch({'copy_vdi': copy_vdi,
|
||||
'create_kernel_ramdisk': create_kernel_ramdisk,
|
||||
'remove_kernel_ramdisk': remove_kernel_ramdisk})
|
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/kernel
Symbolic link
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/kernel
Symbolic link
@ -0,0 +1 @@
|
||||
kernel.py
|
143
plugins/xenserver/xenapi/etc/xapi.d/plugins/kernel.py
Executable file
143
plugins/xenserver/xenapi/etc/xapi.d/plugins/kernel.py
Executable file
@ -0,0 +1,143 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2012 OpenStack Foundation
|
||||
# Copyright (c) 2010 Citrix Systems, Inc.
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
# TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true
|
||||
|
||||
"""Handle the manipulation of kernel images."""
|
||||
|
||||
import errno
|
||||
import os
|
||||
import shutil
|
||||
|
||||
import XenAPIPlugin
|
||||
|
||||
import pluginlib_nova
|
||||
|
||||
|
||||
pluginlib_nova.configure_logging('kernel')
|
||||
logging = pluginlib_nova.logging
|
||||
exists = pluginlib_nova.exists
|
||||
optional = pluginlib_nova.optional
|
||||
with_vdi_in_dom0 = pluginlib_nova.with_vdi_in_dom0
|
||||
|
||||
|
||||
KERNEL_DIR = '/boot/guest'
|
||||
|
||||
|
||||
def _copy_vdi(dest, copy_args):
|
||||
vdi_uuid = copy_args['vdi_uuid']
|
||||
vdi_size = copy_args['vdi_size']
|
||||
cached_image = copy_args['cached-image']
|
||||
|
||||
logging.debug("copying kernel/ramdisk file from %s to /boot/guest/%s",
|
||||
dest, vdi_uuid)
|
||||
filename = KERNEL_DIR + '/' + vdi_uuid
|
||||
|
||||
# Make sure KERNEL_DIR exists, otherwise create it
|
||||
if not os.path.isdir(KERNEL_DIR):
|
||||
logging.debug("Creating directory %s", KERNEL_DIR)
|
||||
os.makedirs(KERNEL_DIR)
|
||||
|
||||
# Read data from /dev/ and write into a file on /boot/guest
|
||||
of = open(filename, 'wb')
|
||||
f = open(dest, 'rb')
|
||||
|
||||
# Copy only vdi_size bytes
|
||||
data = f.read(vdi_size)
|
||||
of.write(data)
|
||||
|
||||
if cached_image:
|
||||
# Create a cache file. If caching is enabled, kernel images do not have
|
||||
# to be fetched from glance.
|
||||
cached_image = KERNEL_DIR + '/' + cached_image
|
||||
logging.debug("copying kernel/ramdisk file from %s to /boot/guest/%s",
|
||||
dest, cached_image)
|
||||
cache_file = open(cached_image, 'wb')
|
||||
cache_file.write(data)
|
||||
cache_file.close()
|
||||
logging.debug("Done. Filename: %s", cached_image)
|
||||
|
||||
f.close()
|
||||
of.close()
|
||||
logging.debug("Done. Filename: %s", filename)
|
||||
return filename
|
||||
|
||||
|
||||
def copy_vdi(session, args):
|
||||
vdi = exists(args, 'vdi-ref')
|
||||
size = exists(args, 'image-size')
|
||||
cached_image = optional(args, 'cached-image')
|
||||
|
||||
# Use the uuid as a filename
|
||||
vdi_uuid = session.xenapi.VDI.get_uuid(vdi)
|
||||
copy_args = {'vdi_uuid': vdi_uuid,
|
||||
'vdi_size': int(size),
|
||||
'cached-image': cached_image}
|
||||
|
||||
filename = with_vdi_in_dom0(session, vdi, False,
|
||||
lambda dev:
|
||||
_copy_vdi('/dev/%s' % dev, copy_args))
|
||||
return filename
|
||||
|
||||
|
||||
def create_kernel_ramdisk(session, args):
|
||||
"""Creates a copy of the kernel/ramdisk image if it is present in the
|
||||
cache. If the image is not present in the cache, it does nothing.
|
||||
"""
|
||||
cached_image = exists(args, 'cached-image')
|
||||
image_uuid = exists(args, 'new-image-uuid')
|
||||
cached_image_filename = KERNEL_DIR + '/' + cached_image
|
||||
filename = KERNEL_DIR + '/' + image_uuid
|
||||
|
||||
if os.path.isfile(cached_image_filename):
|
||||
shutil.copyfile(cached_image_filename, filename)
|
||||
logging.debug("Done. Filename: %s", filename)
|
||||
else:
|
||||
filename = ""
|
||||
logging.debug("Cached kernel/ramdisk image not found")
|
||||
return filename
|
||||
|
||||
|
||||
def _remove_file(filepath):
|
||||
try:
|
||||
os.remove(filepath)
|
||||
except OSError, exc: # noqa
|
||||
if exc.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
|
||||
def remove_kernel_ramdisk(session, args):
|
||||
"""Removes kernel and/or ramdisk from dom0's file system."""
|
||||
kernel_file = optional(args, 'kernel-file')
|
||||
ramdisk_file = optional(args, 'ramdisk-file')
|
||||
if kernel_file:
|
||||
_remove_file(kernel_file)
|
||||
if ramdisk_file:
|
||||
_remove_file(ramdisk_file)
|
||||
return "ok"
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
XenAPIPlugin.dispatch({'copy_vdi': copy_vdi,
|
||||
'create_kernel_ramdisk': create_kernel_ramdisk,
|
||||
'remove_kernel_ramdisk': remove_kernel_ramdisk})
|
@ -1,84 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2010 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
"""
|
||||
XenAPI Plugin for transferring data between host nodes
|
||||
"""
|
||||
import utils
|
||||
|
||||
import pluginlib_nova
|
||||
|
||||
|
||||
pluginlib_nova.configure_logging('migration')
|
||||
logging = pluginlib_nova.logging
|
||||
|
||||
|
||||
def move_vhds_into_sr(session, instance_uuid, sr_path, uuid_stack):
|
||||
"""Moves the VHDs from their copied location to the SR."""
|
||||
staging_path = "/images/instance%s" % instance_uuid
|
||||
imported_vhds = utils.import_vhds(sr_path, staging_path, uuid_stack)
|
||||
utils.cleanup_staging_area(staging_path)
|
||||
return imported_vhds
|
||||
|
||||
|
||||
def _rsync_vhds(instance_uuid, host, staging_path, user="root"):
|
||||
if not staging_path.endswith('/'):
|
||||
staging_path += '/'
|
||||
|
||||
dest_path = '/images/instance%s/' % (instance_uuid)
|
||||
|
||||
ip_cmd = ["/sbin/ip", "addr", "show"]
|
||||
output = utils.run_command(ip_cmd)
|
||||
if ' %s/' % host in output:
|
||||
# If copying to localhost, don't use SSH
|
||||
rsync_cmd = ["/usr/bin/rsync", "-av", "--progress",
|
||||
staging_path, dest_path]
|
||||
else:
|
||||
ssh_cmd = 'ssh -o StrictHostKeyChecking=no'
|
||||
rsync_cmd = ["/usr/bin/rsync", "-av", "--progress", "-e", ssh_cmd,
|
||||
staging_path, '%s@%s:%s' % (user, host, dest_path)]
|
||||
|
||||
# NOTE(hillad): rsync's progress is carriage returned, requiring
|
||||
# universal_newlines for real-time output.
|
||||
|
||||
rsync_proc = utils.make_subprocess(rsync_cmd, stdout=True, stderr=True,
|
||||
universal_newlines=True)
|
||||
while True:
|
||||
rsync_progress = rsync_proc.stdout.readline()
|
||||
if not rsync_progress:
|
||||
break
|
||||
logging.debug("[%s] %s" % (instance_uuid, rsync_progress))
|
||||
|
||||
utils.finish_subprocess(rsync_proc, rsync_cmd)
|
||||
|
||||
|
||||
def transfer_vhd(session, instance_uuid, host, vdi_uuid, sr_path, seq_num):
|
||||
"""Rsyncs a VHD to an adjacent host."""
|
||||
staging_path = utils.make_staging_area(sr_path)
|
||||
try:
|
||||
utils.prepare_staging_area(
|
||||
sr_path, staging_path, [vdi_uuid], seq_num=seq_num)
|
||||
_rsync_vhds(instance_uuid, host, staging_path)
|
||||
finally:
|
||||
utils.cleanup_staging_area(staging_path)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.register_plugin_calls(move_vhds_into_sr, transfer_vhd)
|
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/migration
Symbolic link
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/migration
Symbolic link
@ -0,0 +1 @@
|
||||
migration.py
|
84
plugins/xenserver/xenapi/etc/xapi.d/plugins/migration.py
Executable file
84
plugins/xenserver/xenapi/etc/xapi.d/plugins/migration.py
Executable file
@ -0,0 +1,84 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2010 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
"""
|
||||
XenAPI Plugin for transferring data between host nodes
|
||||
"""
|
||||
import utils
|
||||
|
||||
import pluginlib_nova
|
||||
|
||||
|
||||
pluginlib_nova.configure_logging('migration')
|
||||
logging = pluginlib_nova.logging
|
||||
|
||||
|
||||
def move_vhds_into_sr(session, instance_uuid, sr_path, uuid_stack):
|
||||
"""Moves the VHDs from their copied location to the SR."""
|
||||
staging_path = "/images/instance%s" % instance_uuid
|
||||
imported_vhds = utils.import_vhds(sr_path, staging_path, uuid_stack)
|
||||
utils.cleanup_staging_area(staging_path)
|
||||
return imported_vhds
|
||||
|
||||
|
||||
def _rsync_vhds(instance_uuid, host, staging_path, user="root"):
|
||||
if not staging_path.endswith('/'):
|
||||
staging_path += '/'
|
||||
|
||||
dest_path = '/images/instance%s/' % (instance_uuid)
|
||||
|
||||
ip_cmd = ["/sbin/ip", "addr", "show"]
|
||||
output = utils.run_command(ip_cmd)
|
||||
if ' %s/' % host in output:
|
||||
# If copying to localhost, don't use SSH
|
||||
rsync_cmd = ["/usr/bin/rsync", "-av", "--progress",
|
||||
staging_path, dest_path]
|
||||
else:
|
||||
ssh_cmd = 'ssh -o StrictHostKeyChecking=no'
|
||||
rsync_cmd = ["/usr/bin/rsync", "-av", "--progress", "-e", ssh_cmd,
|
||||
staging_path, '%s@%s:%s' % (user, host, dest_path)]
|
||||
|
||||
# NOTE(hillad): rsync's progress is carriage returned, requiring
|
||||
# universal_newlines for real-time output.
|
||||
|
||||
rsync_proc = utils.make_subprocess(rsync_cmd, stdout=True, stderr=True,
|
||||
universal_newlines=True)
|
||||
while True:
|
||||
rsync_progress = rsync_proc.stdout.readline()
|
||||
if not rsync_progress:
|
||||
break
|
||||
logging.debug("[%s] %s" % (instance_uuid, rsync_progress))
|
||||
|
||||
utils.finish_subprocess(rsync_proc, rsync_cmd)
|
||||
|
||||
|
||||
def transfer_vhd(session, instance_uuid, host, vdi_uuid, sr_path, seq_num):
|
||||
"""Rsyncs a VHD to an adjacent host."""
|
||||
staging_path = utils.make_staging_area(sr_path)
|
||||
try:
|
||||
utils.prepare_staging_area(
|
||||
sr_path, staging_path, [vdi_uuid], seq_num=seq_num)
|
||||
_rsync_vhds(instance_uuid, host, staging_path)
|
||||
finally:
|
||||
utils.cleanup_staging_area(staging_path)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.register_plugin_calls(move_vhds_into_sr, transfer_vhd)
|
@ -1,43 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2013 OpenStack Foundation
|
||||
# Copyright (c) 2013 Citrix Systems, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
"""Returns the version of the nova plugins"""
|
||||
|
||||
import utils
|
||||
|
||||
# MAJOR VERSION: Incompatible changes
|
||||
# MINOR VERSION: Compatible changes, new plugins, etc
|
||||
|
||||
# 1.0 - Initial version.
|
||||
# 1.1 - New call to check GC status
|
||||
# 1.2 - Added support for pci passthrough devices
|
||||
# 1.3 - Add vhd2 functions for doing glance operations by url
|
||||
# 1.4 - Add support of Glance v2 api
|
||||
# 1.5 - Added function for network configuration on ovs bridge
|
||||
# 1.6 - Add function for network configuration on Linux bridge
|
||||
# 1.7 - Add Partition utilities plugin
|
||||
PLUGIN_VERSION = "1.7"
|
||||
|
||||
|
||||
def get_version(session):
|
||||
return PLUGIN_VERSION
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.register_plugin_calls(get_version)
|
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/nova_plugin_version
Symbolic link
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/nova_plugin_version
Symbolic link
@ -0,0 +1 @@
|
||||
nova_plugin_version.py
|
46
plugins/xenserver/xenapi/etc/xapi.d/plugins/nova_plugin_version.py
Executable file
46
plugins/xenserver/xenapi/etc/xapi.d/plugins/nova_plugin_version.py
Executable file
@ -0,0 +1,46 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2013 OpenStack Foundation
|
||||
# Copyright (c) 2013 Citrix Systems, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
"""Returns the version of the nova plugins"""
|
||||
|
||||
import utils
|
||||
|
||||
# MAJOR VERSION: Incompatible changes
|
||||
# MINOR VERSION: Compatible changes, new plugins, etc
|
||||
|
||||
# NOTE(sfinucan): 2.0 will be equivalent to the last in the 1.x stream
|
||||
|
||||
# 1.0 - Initial version.
|
||||
# 1.1 - New call to check GC status
|
||||
# 1.2 - Added support for pci passthrough devices
|
||||
# 1.3 - Add vhd2 functions for doing glance operations by url
|
||||
# 1.4 - Add support of Glance v2 api
|
||||
# 1.5 - Added function for network configuration on ovs bridge
|
||||
# 1.6 - Add function for network configuration on Linux bridge
|
||||
# 1.7 - Add Partition utilities plugin
|
||||
# 1.8 - Add support for calling plug-ins with the .py suffix
|
||||
PLUGIN_VERSION = "1.8"
|
||||
|
||||
|
||||
def get_version(session):
|
||||
return PLUGIN_VERSION
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.register_plugin_calls(get_version)
|
@ -1,53 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
"""Handle the uploading and downloading of images via Glance."""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
|
||||
import utils
|
||||
|
||||
import pluginlib_nova
|
||||
|
||||
|
||||
pluginlib_nova.configure_logging('workarounds')
|
||||
|
||||
|
||||
def _copy_vdis(sr_path, staging_path, vdi_uuids):
|
||||
seq_num = 0
|
||||
for vdi_uuid in vdi_uuids:
|
||||
src = os.path.join(sr_path, "%s.vhd" % vdi_uuid)
|
||||
dst = os.path.join(staging_path, "%d.vhd" % seq_num)
|
||||
shutil.copyfile(src, dst)
|
||||
seq_num += 1
|
||||
|
||||
|
||||
def safe_copy_vdis(session, sr_path, vdi_uuids, uuid_stack):
|
||||
staging_path = utils.make_staging_area(sr_path)
|
||||
try:
|
||||
_copy_vdis(sr_path, staging_path, vdi_uuids)
|
||||
return utils.import_vhds(sr_path, staging_path, uuid_stack)
|
||||
finally:
|
||||
utils.cleanup_staging_area(staging_path)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.register_plugin_calls(safe_copy_vdis)
|
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/workarounds
Symbolic link
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/workarounds
Symbolic link
@ -0,0 +1 @@
|
||||
workarounds.py
|
53
plugins/xenserver/xenapi/etc/xapi.d/plugins/workarounds.py
Executable file
53
plugins/xenserver/xenapi/etc/xapi.d/plugins/workarounds.py
Executable file
@ -0,0 +1,53 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
"""Handle the uploading and downloading of images via Glance."""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
|
||||
import utils
|
||||
|
||||
import pluginlib_nova
|
||||
|
||||
|
||||
pluginlib_nova.configure_logging('workarounds')
|
||||
|
||||
|
||||
def _copy_vdis(sr_path, staging_path, vdi_uuids):
|
||||
seq_num = 0
|
||||
for vdi_uuid in vdi_uuids:
|
||||
src = os.path.join(sr_path, "%s.vhd" % vdi_uuid)
|
||||
dst = os.path.join(staging_path, "%d.vhd" % seq_num)
|
||||
shutil.copyfile(src, dst)
|
||||
seq_num += 1
|
||||
|
||||
|
||||
def safe_copy_vdis(session, sr_path, vdi_uuids, uuid_stack):
|
||||
staging_path = utils.make_staging_area(sr_path)
|
||||
try:
|
||||
_copy_vdis(sr_path, staging_path, vdi_uuids)
|
||||
return utils.import_vhds(sr_path, staging_path, uuid_stack)
|
||||
finally:
|
||||
utils.cleanup_staging_area(staging_path)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.register_plugin_calls(safe_copy_vdis)
|
@ -1,626 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2011 OpenStack Foundation
|
||||
# Copyright 2011 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
# TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true
|
||||
|
||||
#
|
||||
# XenAPI plugin for host operations
|
||||
#
|
||||
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
import simplejson as json
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
|
||||
import utils
|
||||
|
||||
import pluginlib_nova as pluginlib
|
||||
import XenAPI
|
||||
import XenAPIPlugin
|
||||
|
||||
try:
|
||||
import xmlrpclib
|
||||
except ImportError:
|
||||
import six.moves.xmlrpc_client as xmlrpclib
|
||||
|
||||
|
||||
pluginlib.configure_logging("xenhost")
|
||||
_ = pluginlib._
|
||||
|
||||
|
||||
host_data_pattern = re.compile(r"\s*(\S+) \([^\)]+\) *: ?(.*)")
|
||||
config_file_path = "/usr/etc/xenhost.conf"
|
||||
DEFAULT_TRIES = 23
|
||||
DEFAULT_SLEEP = 10
|
||||
|
||||
|
||||
def jsonify(fnc):
|
||||
def wrapper(*args, **kwargs):
|
||||
return json.dumps(fnc(*args, **kwargs))
|
||||
return wrapper
|
||||
|
||||
|
||||
class TimeoutError(StandardError):
|
||||
pass
|
||||
|
||||
|
||||
def _run_command(cmd, cmd_input=None):
|
||||
"""Wrap utils.run_command to raise PluginError on failure
|
||||
"""
|
||||
try:
|
||||
return utils.run_command(cmd, cmd_input=cmd_input)
|
||||
except utils.SubprocessException, e: # noqa
|
||||
raise pluginlib.PluginError(e.err)
|
||||
|
||||
|
||||
def _resume_compute(session, compute_ref, compute_uuid):
|
||||
"""Resume compute node on slave host after pool join.
|
||||
|
||||
This has to happen regardless of the success or failure of the join
|
||||
operation.
|
||||
"""
|
||||
try:
|
||||
# session is valid if the join operation has failed
|
||||
session.xenapi.VM.start(compute_ref, False, True)
|
||||
except XenAPI.Failure:
|
||||
# if session is invalid, e.g. xapi has restarted, then the pool
|
||||
# join has been successful, wait for xapi to become alive again
|
||||
for c in range(0, DEFAULT_TRIES):
|
||||
try:
|
||||
_run_command(["xe", "vm-start", "uuid=%s" % compute_uuid])
|
||||
return
|
||||
except pluginlib.PluginError:
|
||||
logging.exception('Waited %d seconds for the slave to '
|
||||
'become available.' % (c * DEFAULT_SLEEP))
|
||||
time.sleep(DEFAULT_SLEEP)
|
||||
raise pluginlib.PluginError('Unrecoverable error: the host has '
|
||||
'not come back for more than %d seconds'
|
||||
% (DEFAULT_SLEEP * (DEFAULT_TRIES + 1)))
|
||||
|
||||
|
||||
@jsonify
|
||||
def set_host_enabled(self, arg_dict):
|
||||
"""Sets this host's ability to accept new instances.
|
||||
It will otherwise continue to operate normally.
|
||||
"""
|
||||
enabled = arg_dict.get("enabled")
|
||||
if enabled is None:
|
||||
raise pluginlib.PluginError(
|
||||
_("Missing 'enabled' argument to set_host_enabled"))
|
||||
|
||||
host_uuid = arg_dict['host_uuid']
|
||||
if enabled == "true":
|
||||
result = _run_command(["xe", "host-enable", "uuid=%s" % host_uuid])
|
||||
elif enabled == "false":
|
||||
result = _run_command(["xe", "host-disable", "uuid=%s" % host_uuid])
|
||||
else:
|
||||
raise pluginlib.PluginError(_("Illegal enabled status: %s") % enabled)
|
||||
# Should be empty string
|
||||
if result:
|
||||
raise pluginlib.PluginError(result)
|
||||
# Return the current enabled status
|
||||
cmd = ["xe", "host-param-get", "uuid=%s" % host_uuid, "param-name=enabled"]
|
||||
host_enabled = _run_command(cmd)
|
||||
if host_enabled == "true":
|
||||
status = "enabled"
|
||||
else:
|
||||
status = "disabled"
|
||||
return {"status": status}
|
||||
|
||||
|
||||
def _write_config_dict(dct):
|
||||
conf_file = file(config_file_path, "w")
|
||||
json.dump(dct, conf_file)
|
||||
conf_file.close()
|
||||
|
||||
|
||||
def _get_config_dict():
|
||||
"""Returns a dict containing the key/values in the config file.
|
||||
If the file doesn't exist, it is created, and an empty dict
|
||||
is returned.
|
||||
"""
|
||||
try:
|
||||
conf_file = file(config_file_path)
|
||||
config_dct = json.load(conf_file)
|
||||
conf_file.close()
|
||||
except IOError:
|
||||
# File doesn't exist
|
||||
config_dct = {}
|
||||
# Create the file
|
||||
_write_config_dict(config_dct)
|
||||
return config_dct
|
||||
|
||||
|
||||
@jsonify
|
||||
def get_config(self, arg_dict):
|
||||
"""Return the value stored for the specified key, or None if no match."""
|
||||
conf = _get_config_dict()
|
||||
params = arg_dict["params"]
|
||||
try:
|
||||
dct = json.loads(params)
|
||||
except Exception:
|
||||
dct = params
|
||||
key = dct["key"]
|
||||
ret = conf.get(key)
|
||||
if ret is None:
|
||||
# Can't jsonify None
|
||||
return "None"
|
||||
return ret
|
||||
|
||||
|
||||
@jsonify
|
||||
def set_config(self, arg_dict):
|
||||
"""Write the specified key/value pair, overwriting any existing value."""
|
||||
conf = _get_config_dict()
|
||||
params = arg_dict["params"]
|
||||
try:
|
||||
dct = json.loads(params)
|
||||
except Exception:
|
||||
dct = params
|
||||
key = dct["key"]
|
||||
val = dct["value"]
|
||||
if val is None:
|
||||
# Delete the key, if present
|
||||
conf.pop(key, None)
|
||||
else:
|
||||
conf.update({key: val})
|
||||
_write_config_dict(conf)
|
||||
|
||||
|
||||
def iptables_config(session, args):
|
||||
# command should be either save or restore
|
||||
logging.debug("iptables_config:enter")
|
||||
logging.debug("iptables_config: args=%s", args)
|
||||
cmd_args = pluginlib.exists(args, 'cmd_args')
|
||||
logging.debug("iptables_config: cmd_args=%s", cmd_args)
|
||||
process_input = pluginlib.optional(args, 'process_input')
|
||||
logging.debug("iptables_config: process_input=%s", process_input)
|
||||
cmd = json.loads(cmd_args)
|
||||
cmd = map(str, cmd)
|
||||
|
||||
# either execute iptable-save or iptables-restore
|
||||
# command must be only one of these two
|
||||
# process_input must be used only with iptables-restore
|
||||
if len(cmd) > 0 and cmd[0] in ('iptables-save',
|
||||
'iptables-restore',
|
||||
'ip6tables-save',
|
||||
'ip6tables-restore'):
|
||||
result = _run_command(cmd, process_input)
|
||||
ret_str = json.dumps(dict(out=result, err=''))
|
||||
logging.debug("iptables_config:exit")
|
||||
return ret_str
|
||||
# else don't do anything and return an error
|
||||
else:
|
||||
raise pluginlib.PluginError(_("Invalid iptables command"))
|
||||
|
||||
|
||||
def _ovs_add_patch_port(args):
|
||||
bridge_name = pluginlib.exists(args, 'bridge_name')
|
||||
port_name = pluginlib.exists(args, 'port_name')
|
||||
peer_port_name = pluginlib.exists(args, 'peer_port_name')
|
||||
cmd_args = ['ovs-vsctl', '--', '--if-exists', 'del-port',
|
||||
port_name, '--', 'add-port', bridge_name, port_name,
|
||||
'--', 'set', 'interface', port_name,
|
||||
'type=patch', 'options:peer=%s' % peer_port_name]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _ovs_del_port(args):
|
||||
bridge_name = pluginlib.exists(args, 'bridge_name')
|
||||
port_name = pluginlib.exists(args, 'port_name')
|
||||
cmd_args = ['ovs-vsctl', '--', '--if-exists', 'del-port',
|
||||
bridge_name, port_name]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _ovs_del_br(args):
|
||||
bridge_name = pluginlib.exists(args, 'bridge_name')
|
||||
cmd_args = ['ovs-vsctl', '--', '--if-exists',
|
||||
'del-br', bridge_name]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _ovs_set_if_external_id(args):
|
||||
interface = pluginlib.exists(args, 'interface')
|
||||
extneral_id = pluginlib.exists(args, 'extneral_id')
|
||||
value = pluginlib.exists(args, 'value')
|
||||
cmd_args = ['ovs-vsctl', 'set', 'Interface', interface,
|
||||
'external-ids:%s=%s' % (extneral_id, value)]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _ovs_add_port(args):
|
||||
bridge_name = pluginlib.exists(args, 'bridge_name')
|
||||
port_name = pluginlib.exists(args, 'port_name')
|
||||
cmd_args = ['ovs-vsctl', '--', '--if-exists', 'del-port', port_name,
|
||||
'--', 'add-port', bridge_name, port_name]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _ip_link_get_dev(args):
|
||||
device_name = pluginlib.exists(args, 'device_name')
|
||||
cmd_args = ['ip', 'link', 'show', device_name]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _ip_link_del_dev(args):
|
||||
device_name = pluginlib.exists(args, 'device_name')
|
||||
cmd_args = ['ip', 'link', 'delete', device_name]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _ip_link_add_veth_pair(args):
|
||||
dev1_name = pluginlib.exists(args, 'dev1_name')
|
||||
dev2_name = pluginlib.exists(args, 'dev2_name')
|
||||
cmd_args = ['ip', 'link', 'add', dev1_name, 'type', 'veth', 'peer',
|
||||
'name', dev2_name]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _ip_link_set_dev(args):
|
||||
device_name = pluginlib.exists(args, 'device_name')
|
||||
option = pluginlib.exists(args, 'option')
|
||||
cmd_args = ['ip', 'link', 'set', device_name, option]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _ip_link_set_promisc(args):
|
||||
device_name = pluginlib.exists(args, 'device_name')
|
||||
option = pluginlib.exists(args, 'option')
|
||||
cmd_args = ['ip', 'link', 'set', device_name, 'promisc', option]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _brctl_add_br(args):
|
||||
bridge_name = pluginlib.exists(args, 'bridge_name')
|
||||
cmd_args = ['brctl', 'addbr', bridge_name]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _brctl_del_br(args):
|
||||
bridge_name = pluginlib.exists(args, 'bridge_name')
|
||||
cmd_args = ['brctl', 'delbr', bridge_name]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _brctl_set_fd(args):
|
||||
bridge_name = pluginlib.exists(args, 'bridge_name')
|
||||
fd = pluginlib.exists(args, 'fd')
|
||||
cmd_args = ['brctl', 'setfd', bridge_name, fd]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _brctl_set_stp(args):
|
||||
bridge_name = pluginlib.exists(args, 'bridge_name')
|
||||
option = pluginlib.exists(args, 'option')
|
||||
cmd_args = ['brctl', 'stp', bridge_name, option]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _brctl_add_if(args):
|
||||
bridge_name = pluginlib.exists(args, 'bridge_name')
|
||||
if_name = pluginlib.exists(args, 'interface_name')
|
||||
cmd_args = ['brctl', 'addif', bridge_name, if_name]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _brctl_del_if(args):
|
||||
bridge_name = pluginlib.exists(args, 'bridge_name')
|
||||
if_name = pluginlib.exists(args, 'interface_name')
|
||||
cmd_args = ['brctl', 'delif', bridge_name, if_name]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
ALLOWED_NETWORK_CMDS = {
|
||||
# allowed cmds to config OVS bridge
|
||||
'ovs_add_patch_port': _ovs_add_patch_port,
|
||||
'ovs_add_port': _ovs_add_port,
|
||||
'ovs_del_port': _ovs_del_port,
|
||||
'ovs_del_br': _ovs_del_br,
|
||||
'ovs_set_if_external_id': _ovs_set_if_external_id,
|
||||
'ip_link_add_veth_pair': _ip_link_add_veth_pair,
|
||||
'ip_link_del_dev': _ip_link_del_dev,
|
||||
'ip_link_get_dev': _ip_link_get_dev,
|
||||
'ip_link_set_dev': _ip_link_set_dev,
|
||||
'ip_link_set_promisc': _ip_link_set_promisc,
|
||||
'brctl_add_br': _brctl_add_br,
|
||||
'brctl_add_if': _brctl_add_if,
|
||||
'brctl_del_br': _brctl_del_br,
|
||||
'brctl_del_if': _brctl_del_if,
|
||||
'brctl_set_fd': _brctl_set_fd,
|
||||
'brctl_set_stp': _brctl_set_stp
|
||||
}
|
||||
|
||||
|
||||
def network_config(session, args):
|
||||
"""network config functions"""
|
||||
cmd = pluginlib.exists(args, 'cmd')
|
||||
if not isinstance(cmd, basestring):
|
||||
msg = _("invalid command '%s'") % str(cmd)
|
||||
raise pluginlib.PluginError(msg)
|
||||
return
|
||||
if cmd not in ALLOWED_NETWORK_CMDS:
|
||||
msg = _("Dom0 execution of '%s' is not permitted") % cmd
|
||||
raise pluginlib.PluginError(msg)
|
||||
return
|
||||
cmd_args = pluginlib.exists(args, 'args')
|
||||
return ALLOWED_NETWORK_CMDS[cmd](cmd_args)
|
||||
|
||||
|
||||
def _power_action(action, arg_dict):
|
||||
# Host must be disabled first
|
||||
host_uuid = arg_dict['host_uuid']
|
||||
result = _run_command(["xe", "host-disable", "uuid=%s" % host_uuid])
|
||||
if result:
|
||||
raise pluginlib.PluginError(result)
|
||||
# All running VMs must be shutdown
|
||||
result = _run_command(["xe", "vm-shutdown", "--multiple",
|
||||
"resident-on=%s" % host_uuid])
|
||||
if result:
|
||||
raise pluginlib.PluginError(result)
|
||||
cmds = {"reboot": "host-reboot",
|
||||
"startup": "host-power-on",
|
||||
"shutdown": "host-shutdown"}
|
||||
result = _run_command(["xe", cmds[action], "uuid=%s" % host_uuid])
|
||||
# Should be empty string
|
||||
if result:
|
||||
raise pluginlib.PluginError(result)
|
||||
return {"power_action": action}
|
||||
|
||||
|
||||
@jsonify
|
||||
def host_reboot(self, arg_dict):
|
||||
"""Reboots the host."""
|
||||
return _power_action("reboot", arg_dict)
|
||||
|
||||
|
||||
@jsonify
|
||||
def host_shutdown(self, arg_dict):
|
||||
"""Reboots the host."""
|
||||
return _power_action("shutdown", arg_dict)
|
||||
|
||||
|
||||
@jsonify
|
||||
def host_start(self, arg_dict):
|
||||
"""Starts the host.
|
||||
|
||||
Currently not feasible, since the host runs on the same machine as
|
||||
Xen.
|
||||
"""
|
||||
return _power_action("startup", arg_dict)
|
||||
|
||||
|
||||
@jsonify
|
||||
def host_join(self, arg_dict):
|
||||
"""Join a remote host into a pool.
|
||||
|
||||
The pool's master is the host where the plugin is called from. The
|
||||
following constraints apply:
|
||||
|
||||
- The host must have no VMs running, except nova-compute, which
|
||||
will be shut down (and restarted upon pool-join) automatically,
|
||||
- The host must have no shared storage currently set up,
|
||||
- The host must have the same license of the master,
|
||||
- The host must have the same supplemental packs as the master.
|
||||
"""
|
||||
session = XenAPI.Session(arg_dict.get("url"))
|
||||
session.login_with_password(arg_dict.get("user"),
|
||||
arg_dict.get("password"))
|
||||
compute_ref = session.xenapi.VM.get_by_uuid(arg_dict.get('compute_uuid'))
|
||||
session.xenapi.VM.clean_shutdown(compute_ref)
|
||||
try:
|
||||
if arg_dict.get("force"):
|
||||
session.xenapi.pool.join(arg_dict.get("master_addr"),
|
||||
arg_dict.get("master_user"),
|
||||
arg_dict.get("master_pass"))
|
||||
else:
|
||||
session.xenapi.pool.join_force(arg_dict.get("master_addr"),
|
||||
arg_dict.get("master_user"),
|
||||
arg_dict.get("master_pass"))
|
||||
finally:
|
||||
_resume_compute(session, compute_ref, arg_dict.get("compute_uuid"))
|
||||
|
||||
|
||||
@jsonify
|
||||
def host_data(self, arg_dict):
|
||||
"""Runs the commands on the xenstore host to return the current status
|
||||
information.
|
||||
"""
|
||||
host_uuid = arg_dict['host_uuid']
|
||||
resp = _run_command(["xe", "host-param-list", "uuid=%s" % host_uuid])
|
||||
parsed_data = parse_response(resp)
|
||||
# We have the raw dict of values. Extract those that we need,
|
||||
# and convert the data types as needed.
|
||||
ret_dict = cleanup(parsed_data)
|
||||
# Add any config settings
|
||||
config = _get_config_dict()
|
||||
ret_dict.update(config)
|
||||
return ret_dict
|
||||
|
||||
|
||||
def parse_response(resp):
|
||||
data = {}
|
||||
for ln in resp.splitlines():
|
||||
if not ln:
|
||||
continue
|
||||
mtch = host_data_pattern.match(ln.strip())
|
||||
try:
|
||||
k, v = mtch.groups()
|
||||
data[k] = v
|
||||
except AttributeError:
|
||||
# Not a valid line; skip it
|
||||
continue
|
||||
return data
|
||||
|
||||
|
||||
@jsonify
|
||||
def host_uptime(self, arg_dict):
|
||||
"""Returns the result of the uptime command on the xenhost."""
|
||||
return {"uptime": _run_command(['uptime'])}
|
||||
|
||||
|
||||
def cleanup(dct):
|
||||
"""Take the raw KV pairs returned and translate them into the
|
||||
appropriate types, discarding any we don't need.
|
||||
"""
|
||||
def safe_int(val):
|
||||
"""Integer values will either be string versions of numbers,
|
||||
or empty strings. Convert the latter to nulls.
|
||||
"""
|
||||
try:
|
||||
return int(val)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
def strip_kv(ln):
|
||||
return [val.strip() for val in ln.split(":", 1)]
|
||||
|
||||
out = {}
|
||||
|
||||
# sbs = dct.get("supported-bootloaders", "")
|
||||
# out["host_supported-bootloaders"] = sbs.split("; ")
|
||||
# out["host_suspend-image-sr-uuid"] = dct.get("suspend-image-sr-uuid", "")
|
||||
# out["host_crash-dump-sr-uuid"] = dct.get("crash-dump-sr-uuid", "")
|
||||
# out["host_local-cache-sr"] = dct.get("local-cache-sr", "")
|
||||
out["enabled"] = dct.get("enabled", "true") == "true"
|
||||
out["host_memory"] = omm = {}
|
||||
omm["total"] = safe_int(dct.get("memory-total", ""))
|
||||
omm["overhead"] = safe_int(dct.get("memory-overhead", ""))
|
||||
omm["free"] = safe_int(dct.get("memory-free", ""))
|
||||
omm["free-computed"] = safe_int(
|
||||
dct.get("memory-free-computed", ""))
|
||||
|
||||
# out["host_API-version"] = avv = {}
|
||||
# avv["vendor"] = dct.get("API-version-vendor", "")
|
||||
# avv["major"] = safe_int(dct.get("API-version-major", ""))
|
||||
# avv["minor"] = safe_int(dct.get("API-version-minor", ""))
|
||||
|
||||
out["enabled"] = dct.get("enabled", True)
|
||||
out["host_uuid"] = dct.get("uuid", None)
|
||||
out["host_name-label"] = dct.get("name-label", "")
|
||||
out["host_name-description"] = dct.get("name-description", "")
|
||||
# out["host_host-metrics-live"] = dct.get(
|
||||
# "host-metrics-live", "false") == "true"
|
||||
out["host_hostname"] = dct.get("hostname", "")
|
||||
out["host_ip_address"] = dct.get("address", "")
|
||||
oc = dct.get("other-config", "")
|
||||
out["host_other-config"] = ocd = {}
|
||||
if oc:
|
||||
for oc_fld in oc.split("; "):
|
||||
ock, ocv = strip_kv(oc_fld)
|
||||
ocd[ock] = ocv
|
||||
|
||||
capabilities = dct.get("capabilities", "")
|
||||
out["host_capabilities"] = capabilities.replace(";", "").split()
|
||||
# out["host_allowed-operations"] = dct.get(
|
||||
# "allowed-operations", "").split("; ")
|
||||
# lsrv = dct.get("license-server", "")
|
||||
# out["host_license-server"] = ols = {}
|
||||
# if lsrv:
|
||||
# for lspart in lsrv.split("; "):
|
||||
# lsk, lsv = lspart.split(": ")
|
||||
# if lsk == "port":
|
||||
# ols[lsk] = safe_int(lsv)
|
||||
# else:
|
||||
# ols[lsk] = lsv
|
||||
# sv = dct.get("software-version", "")
|
||||
# out["host_software-version"] = osv = {}
|
||||
# if sv:
|
||||
# for svln in sv.split("; "):
|
||||
# svk, svv = strip_kv(svln)
|
||||
# osv[svk] = svv
|
||||
cpuinf = dct.get("cpu_info", "")
|
||||
out["host_cpu_info"] = ocp = {}
|
||||
if cpuinf:
|
||||
for cpln in cpuinf.split("; "):
|
||||
cpk, cpv = strip_kv(cpln)
|
||||
if cpk in ("cpu_count", "family", "model", "stepping"):
|
||||
ocp[cpk] = safe_int(cpv)
|
||||
else:
|
||||
ocp[cpk] = cpv
|
||||
# out["host_edition"] = dct.get("edition", "")
|
||||
# out["host_external-auth-service-name"] = dct.get(
|
||||
# "external-auth-service-name", "")
|
||||
return out
|
||||
|
||||
|
||||
def query_gc(session, sr_uuid, vdi_uuid):
|
||||
result = _run_command(["/opt/xensource/sm/cleanup.py",
|
||||
"-q", "-u", sr_uuid])
|
||||
# Example output: "Currently running: True"
|
||||
return result[19:].strip() == "True"
|
||||
|
||||
|
||||
def get_pci_device_details(session):
|
||||
"""Returns a string that is a list of pci devices with details.
|
||||
|
||||
This string is obtained by running the command lspci. With -vmm option,
|
||||
it dumps PCI device data in machine readable form. This verbose format
|
||||
display a sequence of records separated by a blank line. We will also
|
||||
use option "-n" to get vendor_id and device_id as numeric values and
|
||||
the "-k" option to get the kernel driver used if any.
|
||||
"""
|
||||
return _run_command(["lspci", "-vmmnk"])
|
||||
|
||||
|
||||
def get_pci_type(session, pci_device):
|
||||
"""Returns the type of the PCI device (type-PCI, type-VF or type-PF).
|
||||
|
||||
pci-device -- The address of the pci device
|
||||
"""
|
||||
# We need to add the domain if it is missing
|
||||
if pci_device.count(':') == 1:
|
||||
pci_device = "0000:" + pci_device
|
||||
output = _run_command(["ls", "/sys/bus/pci/devices/" + pci_device + "/"])
|
||||
|
||||
if "physfn" in output:
|
||||
return "type-VF"
|
||||
if "virtfn" in output:
|
||||
return "type-PF"
|
||||
return "type-PCI"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Support both serialized and non-serialized plugin approaches
|
||||
_, methodname = xmlrpclib.loads(sys.argv[1])
|
||||
if methodname in ['query_gc', 'get_pci_device_details', 'get_pci_type',
|
||||
'network_config']:
|
||||
utils.register_plugin_calls(query_gc,
|
||||
get_pci_device_details,
|
||||
get_pci_type,
|
||||
network_config)
|
||||
|
||||
XenAPIPlugin.dispatch(
|
||||
{"host_data": host_data,
|
||||
"set_host_enabled": set_host_enabled,
|
||||
"host_shutdown": host_shutdown,
|
||||
"host_reboot": host_reboot,
|
||||
"host_start": host_start,
|
||||
"host_join": host_join,
|
||||
"get_config": get_config,
|
||||
"set_config": set_config,
|
||||
"iptables_config": iptables_config,
|
||||
"host_uptime": host_uptime})
|
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost
Symbolic link
1
plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost
Symbolic link
@ -0,0 +1 @@
|
||||
xenhost.py
|
626
plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost.py
Executable file
626
plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost.py
Executable file
@ -0,0 +1,626 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2011 OpenStack Foundation
|
||||
# Copyright 2011 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
|
||||
# which means the Nova xenapi plugins must use only Python 2.4 features
|
||||
|
||||
# TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true
|
||||
|
||||
#
|
||||
# XenAPI plugin for host operations
|
||||
#
|
||||
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
import simplejson as json
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
|
||||
import utils
|
||||
|
||||
import pluginlib_nova as pluginlib
|
||||
import XenAPI
|
||||
import XenAPIPlugin
|
||||
|
||||
try:
|
||||
import xmlrpclib
|
||||
except ImportError:
|
||||
import six.moves.xmlrpc_client as xmlrpclib
|
||||
|
||||
|
||||
pluginlib.configure_logging("xenhost")
|
||||
_ = pluginlib._
|
||||
|
||||
|
||||
host_data_pattern = re.compile(r"\s*(\S+) \([^\)]+\) *: ?(.*)")
|
||||
config_file_path = "/usr/etc/xenhost.conf"
|
||||
DEFAULT_TRIES = 23
|
||||
DEFAULT_SLEEP = 10
|
||||
|
||||
|
||||
def jsonify(fnc):
|
||||
def wrapper(*args, **kwargs):
|
||||
return json.dumps(fnc(*args, **kwargs))
|
||||
return wrapper
|
||||
|
||||
|
||||
class TimeoutError(StandardError):
|
||||
pass
|
||||
|
||||
|
||||
def _run_command(cmd, cmd_input=None):
|
||||
"""Wrap utils.run_command to raise PluginError on failure
|
||||
"""
|
||||
try:
|
||||
return utils.run_command(cmd, cmd_input=cmd_input)
|
||||
except utils.SubprocessException, e: # noqa
|
||||
raise pluginlib.PluginError(e.err)
|
||||
|
||||
|
||||
def _resume_compute(session, compute_ref, compute_uuid):
|
||||
"""Resume compute node on slave host after pool join.
|
||||
|
||||
This has to happen regardless of the success or failure of the join
|
||||
operation.
|
||||
"""
|
||||
try:
|
||||
# session is valid if the join operation has failed
|
||||
session.xenapi.VM.start(compute_ref, False, True)
|
||||
except XenAPI.Failure:
|
||||
# if session is invalid, e.g. xapi has restarted, then the pool
|
||||
# join has been successful, wait for xapi to become alive again
|
||||
for c in range(0, DEFAULT_TRIES):
|
||||
try:
|
||||
_run_command(["xe", "vm-start", "uuid=%s" % compute_uuid])
|
||||
return
|
||||
except pluginlib.PluginError:
|
||||
logging.exception('Waited %d seconds for the slave to '
|
||||
'become available.' % (c * DEFAULT_SLEEP))
|
||||
time.sleep(DEFAULT_SLEEP)
|
||||
raise pluginlib.PluginError('Unrecoverable error: the host has '
|
||||
'not come back for more than %d seconds'
|
||||
% (DEFAULT_SLEEP * (DEFAULT_TRIES + 1)))
|
||||
|
||||
|
||||
@jsonify
|
||||
def set_host_enabled(self, arg_dict):
|
||||
"""Sets this host's ability to accept new instances.
|
||||
It will otherwise continue to operate normally.
|
||||
"""
|
||||
enabled = arg_dict.get("enabled")
|
||||
if enabled is None:
|
||||
raise pluginlib.PluginError(
|
||||
_("Missing 'enabled' argument to set_host_enabled"))
|
||||
|
||||
host_uuid = arg_dict['host_uuid']
|
||||
if enabled == "true":
|
||||
result = _run_command(["xe", "host-enable", "uuid=%s" % host_uuid])
|
||||
elif enabled == "false":
|
||||
result = _run_command(["xe", "host-disable", "uuid=%s" % host_uuid])
|
||||
else:
|
||||
raise pluginlib.PluginError(_("Illegal enabled status: %s") % enabled)
|
||||
# Should be empty string
|
||||
if result:
|
||||
raise pluginlib.PluginError(result)
|
||||
# Return the current enabled status
|
||||
cmd = ["xe", "host-param-get", "uuid=%s" % host_uuid, "param-name=enabled"]
|
||||
host_enabled = _run_command(cmd)
|
||||
if host_enabled == "true":
|
||||
status = "enabled"
|
||||
else:
|
||||
status = "disabled"
|
||||
return {"status": status}
|
||||
|
||||
|
||||
def _write_config_dict(dct):
|
||||
conf_file = file(config_file_path, "w")
|
||||
json.dump(dct, conf_file)
|
||||
conf_file.close()
|
||||
|
||||
|
||||
def _get_config_dict():
|
||||
"""Returns a dict containing the key/values in the config file.
|
||||
If the file doesn't exist, it is created, and an empty dict
|
||||
is returned.
|
||||
"""
|
||||
try:
|
||||
conf_file = file(config_file_path)
|
||||
config_dct = json.load(conf_file)
|
||||
conf_file.close()
|
||||
except IOError:
|
||||
# File doesn't exist
|
||||
config_dct = {}
|
||||
# Create the file
|
||||
_write_config_dict(config_dct)
|
||||
return config_dct
|
||||
|
||||
|
||||
@jsonify
|
||||
def get_config(self, arg_dict):
|
||||
"""Return the value stored for the specified key, or None if no match."""
|
||||
conf = _get_config_dict()
|
||||
params = arg_dict["params"]
|
||||
try:
|
||||
dct = json.loads(params)
|
||||
except Exception:
|
||||
dct = params
|
||||
key = dct["key"]
|
||||
ret = conf.get(key)
|
||||
if ret is None:
|
||||
# Can't jsonify None
|
||||
return "None"
|
||||
return ret
|
||||
|
||||
|
||||
@jsonify
|
||||
def set_config(self, arg_dict):
|
||||
"""Write the specified key/value pair, overwriting any existing value."""
|
||||
conf = _get_config_dict()
|
||||
params = arg_dict["params"]
|
||||
try:
|
||||
dct = json.loads(params)
|
||||
except Exception:
|
||||
dct = params
|
||||
key = dct["key"]
|
||||
val = dct["value"]
|
||||
if val is None:
|
||||
# Delete the key, if present
|
||||
conf.pop(key, None)
|
||||
else:
|
||||
conf.update({key: val})
|
||||
_write_config_dict(conf)
|
||||
|
||||
|
||||
def iptables_config(session, args):
|
||||
# command should be either save or restore
|
||||
logging.debug("iptables_config:enter")
|
||||
logging.debug("iptables_config: args=%s", args)
|
||||
cmd_args = pluginlib.exists(args, 'cmd_args')
|
||||
logging.debug("iptables_config: cmd_args=%s", cmd_args)
|
||||
process_input = pluginlib.optional(args, 'process_input')
|
||||
logging.debug("iptables_config: process_input=%s", process_input)
|
||||
cmd = json.loads(cmd_args)
|
||||
cmd = map(str, cmd)
|
||||
|
||||
# either execute iptable-save or iptables-restore
|
||||
# command must be only one of these two
|
||||
# process_input must be used only with iptables-restore
|
||||
if len(cmd) > 0 and cmd[0] in ('iptables-save',
|
||||
'iptables-restore',
|
||||
'ip6tables-save',
|
||||
'ip6tables-restore'):
|
||||
result = _run_command(cmd, process_input)
|
||||
ret_str = json.dumps(dict(out=result, err=''))
|
||||
logging.debug("iptables_config:exit")
|
||||
return ret_str
|
||||
# else don't do anything and return an error
|
||||
else:
|
||||
raise pluginlib.PluginError(_("Invalid iptables command"))
|
||||
|
||||
|
||||
def _ovs_add_patch_port(args):
|
||||
bridge_name = pluginlib.exists(args, 'bridge_name')
|
||||
port_name = pluginlib.exists(args, 'port_name')
|
||||
peer_port_name = pluginlib.exists(args, 'peer_port_name')
|
||||
cmd_args = ['ovs-vsctl', '--', '--if-exists', 'del-port',
|
||||
port_name, '--', 'add-port', bridge_name, port_name,
|
||||
'--', 'set', 'interface', port_name,
|
||||
'type=patch', 'options:peer=%s' % peer_port_name]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _ovs_del_port(args):
|
||||
bridge_name = pluginlib.exists(args, 'bridge_name')
|
||||
port_name = pluginlib.exists(args, 'port_name')
|
||||
cmd_args = ['ovs-vsctl', '--', '--if-exists', 'del-port',
|
||||
bridge_name, port_name]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _ovs_del_br(args):
|
||||
bridge_name = pluginlib.exists(args, 'bridge_name')
|
||||
cmd_args = ['ovs-vsctl', '--', '--if-exists',
|
||||
'del-br', bridge_name]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _ovs_set_if_external_id(args):
|
||||
interface = pluginlib.exists(args, 'interface')
|
||||
extneral_id = pluginlib.exists(args, 'extneral_id')
|
||||
value = pluginlib.exists(args, 'value')
|
||||
cmd_args = ['ovs-vsctl', 'set', 'Interface', interface,
|
||||
'external-ids:%s=%s' % (extneral_id, value)]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _ovs_add_port(args):
|
||||
bridge_name = pluginlib.exists(args, 'bridge_name')
|
||||
port_name = pluginlib.exists(args, 'port_name')
|
||||
cmd_args = ['ovs-vsctl', '--', '--if-exists', 'del-port', port_name,
|
||||
'--', 'add-port', bridge_name, port_name]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _ip_link_get_dev(args):
|
||||
device_name = pluginlib.exists(args, 'device_name')
|
||||
cmd_args = ['ip', 'link', 'show', device_name]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _ip_link_del_dev(args):
|
||||
device_name = pluginlib.exists(args, 'device_name')
|
||||
cmd_args = ['ip', 'link', 'delete', device_name]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _ip_link_add_veth_pair(args):
|
||||
dev1_name = pluginlib.exists(args, 'dev1_name')
|
||||
dev2_name = pluginlib.exists(args, 'dev2_name')
|
||||
cmd_args = ['ip', 'link', 'add', dev1_name, 'type', 'veth', 'peer',
|
||||
'name', dev2_name]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _ip_link_set_dev(args):
|
||||
device_name = pluginlib.exists(args, 'device_name')
|
||||
option = pluginlib.exists(args, 'option')
|
||||
cmd_args = ['ip', 'link', 'set', device_name, option]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _ip_link_set_promisc(args):
|
||||
device_name = pluginlib.exists(args, 'device_name')
|
||||
option = pluginlib.exists(args, 'option')
|
||||
cmd_args = ['ip', 'link', 'set', device_name, 'promisc', option]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _brctl_add_br(args):
|
||||
bridge_name = pluginlib.exists(args, 'bridge_name')
|
||||
cmd_args = ['brctl', 'addbr', bridge_name]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _brctl_del_br(args):
|
||||
bridge_name = pluginlib.exists(args, 'bridge_name')
|
||||
cmd_args = ['brctl', 'delbr', bridge_name]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _brctl_set_fd(args):
|
||||
bridge_name = pluginlib.exists(args, 'bridge_name')
|
||||
fd = pluginlib.exists(args, 'fd')
|
||||
cmd_args = ['brctl', 'setfd', bridge_name, fd]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _brctl_set_stp(args):
|
||||
bridge_name = pluginlib.exists(args, 'bridge_name')
|
||||
option = pluginlib.exists(args, 'option')
|
||||
cmd_args = ['brctl', 'stp', bridge_name, option]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _brctl_add_if(args):
|
||||
bridge_name = pluginlib.exists(args, 'bridge_name')
|
||||
if_name = pluginlib.exists(args, 'interface_name')
|
||||
cmd_args = ['brctl', 'addif', bridge_name, if_name]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
def _brctl_del_if(args):
|
||||
bridge_name = pluginlib.exists(args, 'bridge_name')
|
||||
if_name = pluginlib.exists(args, 'interface_name')
|
||||
cmd_args = ['brctl', 'delif', bridge_name, if_name]
|
||||
return _run_command(cmd_args)
|
||||
|
||||
|
||||
ALLOWED_NETWORK_CMDS = {
|
||||
# allowed cmds to config OVS bridge
|
||||
'ovs_add_patch_port': _ovs_add_patch_port,
|
||||
'ovs_add_port': _ovs_add_port,
|
||||
'ovs_del_port': _ovs_del_port,
|
||||
'ovs_del_br': _ovs_del_br,
|
||||
'ovs_set_if_external_id': _ovs_set_if_external_id,
|
||||
'ip_link_add_veth_pair': _ip_link_add_veth_pair,
|
||||
'ip_link_del_dev': _ip_link_del_dev,
|
||||
'ip_link_get_dev': _ip_link_get_dev,
|
||||
'ip_link_set_dev': _ip_link_set_dev,
|
||||
'ip_link_set_promisc': _ip_link_set_promisc,
|
||||
'brctl_add_br': _brctl_add_br,
|
||||
'brctl_add_if': _brctl_add_if,
|
||||
'brctl_del_br': _brctl_del_br,
|
||||
'brctl_del_if': _brctl_del_if,
|
||||
'brctl_set_fd': _brctl_set_fd,
|
||||
'brctl_set_stp': _brctl_set_stp
|
||||
}
|
||||
|
||||
|
||||
def network_config(session, args):
|
||||
"""network config functions"""
|
||||
cmd = pluginlib.exists(args, 'cmd')
|
||||
if not isinstance(cmd, basestring):
|
||||
msg = _("invalid command '%s'") % str(cmd)
|
||||
raise pluginlib.PluginError(msg)
|
||||
return
|
||||
if cmd not in ALLOWED_NETWORK_CMDS:
|
||||
msg = _("Dom0 execution of '%s' is not permitted") % cmd
|
||||
raise pluginlib.PluginError(msg)
|
||||
return
|
||||
cmd_args = pluginlib.exists(args, 'args')
|
||||
return ALLOWED_NETWORK_CMDS[cmd](cmd_args)
|
||||
|
||||
|
||||
def _power_action(action, arg_dict):
|
||||
# Host must be disabled first
|
||||
host_uuid = arg_dict['host_uuid']
|
||||
result = _run_command(["xe", "host-disable", "uuid=%s" % host_uuid])
|
||||
if result:
|
||||
raise pluginlib.PluginError(result)
|
||||
# All running VMs must be shutdown
|
||||
result = _run_command(["xe", "vm-shutdown", "--multiple",
|
||||
"resident-on=%s" % host_uuid])
|
||||
if result:
|
||||
raise pluginlib.PluginError(result)
|
||||
cmds = {"reboot": "host-reboot",
|
||||
"startup": "host-power-on",
|
||||
"shutdown": "host-shutdown"}
|
||||
result = _run_command(["xe", cmds[action], "uuid=%s" % host_uuid])
|
||||
# Should be empty string
|
||||
if result:
|
||||
raise pluginlib.PluginError(result)
|
||||
return {"power_action": action}
|
||||
|
||||
|
||||
@jsonify
|
||||
def host_reboot(self, arg_dict):
|
||||
"""Reboots the host."""
|
||||
return _power_action("reboot", arg_dict)
|
||||
|
||||
|
||||
@jsonify
|
||||
def host_shutdown(self, arg_dict):
|
||||
"""Reboots the host."""
|
||||
return _power_action("shutdown", arg_dict)
|
||||
|
||||
|
||||
@jsonify
|
||||
def host_start(self, arg_dict):
|
||||
"""Starts the host.
|
||||
|
||||
Currently not feasible, since the host runs on the same machine as
|
||||
Xen.
|
||||
"""
|
||||
return _power_action("startup", arg_dict)
|
||||
|
||||
|
||||
@jsonify
|
||||
def host_join(self, arg_dict):
|
||||
"""Join a remote host into a pool.
|
||||
|
||||
The pool's master is the host where the plugin is called from. The
|
||||
following constraints apply:
|
||||
|
||||
- The host must have no VMs running, except nova-compute, which
|
||||
will be shut down (and restarted upon pool-join) automatically,
|
||||
- The host must have no shared storage currently set up,
|
||||
- The host must have the same license of the master,
|
||||
- The host must have the same supplemental packs as the master.
|
||||
"""
|
||||
session = XenAPI.Session(arg_dict.get("url"))
|
||||
session.login_with_password(arg_dict.get("user"),
|
||||
arg_dict.get("password"))
|
||||
compute_ref = session.xenapi.VM.get_by_uuid(arg_dict.get('compute_uuid'))
|
||||
session.xenapi.VM.clean_shutdown(compute_ref)
|
||||
try:
|
||||
if arg_dict.get("force"):
|
||||
session.xenapi.pool.join(arg_dict.get("master_addr"),
|
||||
arg_dict.get("master_user"),
|
||||
arg_dict.get("master_pass"))
|
||||
else:
|
||||
session.xenapi.pool.join_force(arg_dict.get("master_addr"),
|
||||
arg_dict.get("master_user"),
|
||||
arg_dict.get("master_pass"))
|
||||
finally:
|
||||
_resume_compute(session, compute_ref, arg_dict.get("compute_uuid"))
|
||||
|
||||
|
||||
@jsonify
|
||||
def host_data(self, arg_dict):
|
||||
"""Runs the commands on the xenstore host to return the current status
|
||||
information.
|
||||
"""
|
||||
host_uuid = arg_dict['host_uuid']
|
||||
resp = _run_command(["xe", "host-param-list", "uuid=%s" % host_uuid])
|
||||
parsed_data = parse_response(resp)
|
||||
# We have the raw dict of values. Extract those that we need,
|
||||
# and convert the data types as needed.
|
||||
ret_dict = cleanup(parsed_data)
|
||||
# Add any config settings
|
||||
config = _get_config_dict()
|
||||
ret_dict.update(config)
|
||||
return ret_dict
|
||||
|
||||
|
||||
def parse_response(resp):
|
||||
data = {}
|
||||
for ln in resp.splitlines():
|
||||
if not ln:
|
||||
continue
|
||||
mtch = host_data_pattern.match(ln.strip())
|
||||
try:
|
||||
k, v = mtch.groups()
|
||||
data[k] = v
|
||||
except AttributeError:
|
||||
# Not a valid line; skip it
|
||||
continue
|
||||
return data
|
||||
|
||||
|
||||
@jsonify
|
||||
def host_uptime(self, arg_dict):
|
||||
"""Returns the result of the uptime command on the xenhost."""
|
||||
return {"uptime": _run_command(['uptime'])}
|
||||
|
||||
|
||||
def cleanup(dct):
|
||||
"""Take the raw KV pairs returned and translate them into the
|
||||
appropriate types, discarding any we don't need.
|
||||
"""
|
||||
def safe_int(val):
|
||||
"""Integer values will either be string versions of numbers,
|
||||
or empty strings. Convert the latter to nulls.
|
||||
"""
|
||||
try:
|
||||
return int(val)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
def strip_kv(ln):
|
||||
return [val.strip() for val in ln.split(":", 1)]
|
||||
|
||||
out = {}
|
||||
|
||||
# sbs = dct.get("supported-bootloaders", "")
|
||||
# out["host_supported-bootloaders"] = sbs.split("; ")
|
||||
# out["host_suspend-image-sr-uuid"] = dct.get("suspend-image-sr-uuid", "")
|
||||
# out["host_crash-dump-sr-uuid"] = dct.get("crash-dump-sr-uuid", "")
|
||||
# out["host_local-cache-sr"] = dct.get("local-cache-sr", "")
|
||||
out["enabled"] = dct.get("enabled", "true") == "true"
|
||||
out["host_memory"] = omm = {}
|
||||
omm["total"] = safe_int(dct.get("memory-total", ""))
|
||||
omm["overhead"] = safe_int(dct.get("memory-overhead", ""))
|
||||
omm["free"] = safe_int(dct.get("memory-free", ""))
|
||||
omm["free-computed"] = safe_int(
|
||||
dct.get("memory-free-computed", ""))
|
||||
|
||||
# out["host_API-version"] = avv = {}
|
||||
# avv["vendor"] = dct.get("API-version-vendor", "")
|
||||
# avv["major"] = safe_int(dct.get("API-version-major", ""))
|
||||
# avv["minor"] = safe_int(dct.get("API-version-minor", ""))
|
||||
|
||||
out["enabled"] = dct.get("enabled", True)
|
||||
out["host_uuid"] = dct.get("uuid", None)
|
||||
out["host_name-label"] = dct.get("name-label", "")
|
||||
out["host_name-description"] = dct.get("name-description", "")
|
||||
# out["host_host-metrics-live"] = dct.get(
|
||||
# "host-metrics-live", "false") == "true"
|
||||
out["host_hostname"] = dct.get("hostname", "")
|
||||
out["host_ip_address"] = dct.get("address", "")
|
||||
oc = dct.get("other-config", "")
|
||||
out["host_other-config"] = ocd = {}
|
||||
if oc:
|
||||
for oc_fld in oc.split("; "):
|
||||
ock, ocv = strip_kv(oc_fld)
|
||||
ocd[ock] = ocv
|
||||
|
||||
capabilities = dct.get("capabilities", "")
|
||||
out["host_capabilities"] = capabilities.replace(";", "").split()
|
||||
# out["host_allowed-operations"] = dct.get(
|
||||
# "allowed-operations", "").split("; ")
|
||||
# lsrv = dct.get("license-server", "")
|
||||
# out["host_license-server"] = ols = {}
|
||||
# if lsrv:
|
||||
# for lspart in lsrv.split("; "):
|
||||
# lsk, lsv = lspart.split(": ")
|
||||
# if lsk == "port":
|
||||
# ols[lsk] = safe_int(lsv)
|
||||
# else:
|
||||
# ols[lsk] = lsv
|
||||
# sv = dct.get("software-version", "")
|
||||
# out["host_software-version"] = osv = {}
|
||||
# if sv:
|
||||
# for svln in sv.split("; "):
|
||||
# svk, svv = strip_kv(svln)
|
||||
# osv[svk] = svv
|
||||
cpuinf = dct.get("cpu_info", "")
|
||||
out["host_cpu_info"] = ocp = {}
|
||||
if cpuinf:
|
||||
for cpln in cpuinf.split("; "):
|
||||
cpk, cpv = strip_kv(cpln)
|
||||
if cpk in ("cpu_count", "family", "model", "stepping"):
|
||||
ocp[cpk] = safe_int(cpv)
|
||||
else:
|
||||
ocp[cpk] = cpv
|
||||
# out["host_edition"] = dct.get("edition", "")
|
||||
# out["host_external-auth-service-name"] = dct.get(
|
||||
# "external-auth-service-name", "")
|
||||
return out
|
||||
|
||||
|
||||
def query_gc(session, sr_uuid, vdi_uuid):
|
||||
result = _run_command(["/opt/xensource/sm/cleanup.py",
|
||||
"-q", "-u", sr_uuid])
|
||||
# Example output: "Currently running: True"
|
||||
return result[19:].strip() == "True"
|
||||
|
||||
|
||||
def get_pci_device_details(session):
|
||||
"""Returns a string that is a list of pci devices with details.
|
||||
|
||||
This string is obtained by running the command lspci. With -vmm option,
|
||||
it dumps PCI device data in machine readable form. This verbose format
|
||||
display a sequence of records separated by a blank line. We will also
|
||||
use option "-n" to get vendor_id and device_id as numeric values and
|
||||
the "-k" option to get the kernel driver used if any.
|
||||
"""
|
||||
return _run_command(["lspci", "-vmmnk"])
|
||||
|
||||
|
||||
def get_pci_type(session, pci_device):
|
||||
"""Returns the type of the PCI device (type-PCI, type-VF or type-PF).
|
||||
|
||||
pci-device -- The address of the pci device
|
||||
"""
|
||||
# We need to add the domain if it is missing
|
||||
if pci_device.count(':') == 1:
|
||||
pci_device = "0000:" + pci_device
|
||||
output = _run_command(["ls", "/sys/bus/pci/devices/" + pci_device + "/"])
|
||||
|
||||
if "physfn" in output:
|
||||
return "type-VF"
|
||||
if "virtfn" in output:
|
||||
return "type-PF"
|
||||
return "type-PCI"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Support both serialized and non-serialized plugin approaches
|
||||
_, methodname = xmlrpclib.loads(sys.argv[1])
|
||||
if methodname in ['query_gc', 'get_pci_device_details', 'get_pci_type',
|
||||
'network_config']:
|
||||
utils.register_plugin_calls(query_gc,
|
||||
get_pci_device_details,
|
||||
get_pci_type,
|
||||
network_config)
|
||||
|
||||
XenAPIPlugin.dispatch(
|
||||
{"host_data": host_data,
|
||||
"set_host_enabled": set_host_enabled,
|
||||
"host_shutdown": host_shutdown,
|
||||
"host_reboot": host_reboot,
|
||||
"host_start": host_start,
|
||||
"host_join": host_join,
|
||||
"get_config": get_config,
|
||||
"set_config": set_config,
|
||||
"iptables_config": iptables_config,
|
||||
"host_uptime": host_uptime})
|
@ -0,0 +1,6 @@
|
||||
---
|
||||
upgrade:
|
||||
- XenServer plugins have been renamed to include a '.py' extension. Code has
|
||||
been included to handle plugins with and without the extension, but this
|
||||
will be removed in the next release. The plugins with the extension should
|
||||
be deployed on all compute nodes to mitigate any upgrade issues.
|
Loading…
Reference in New Issue
Block a user