Add missing cloud/region/az/host_id info to nodes

To the greatest extent possible within the limitation of each provider,
this adds cloud, region, az, and host_id to nodes.

Each of AWS, Azure, GCE, IBMVPC have the cloud name hard-coded to
a value that makes sense for each driver given that each of these
are singleton clouds.  Their region and az values are added as
appropriate.

The k8s, openshift, and openshiftpods all have their cloud names set
to the k8s context name, which is the closest approximation of what
the "cloud" attribute means in its existing usage in the OpenStack
driver.  If pods are launched, the host_id value is set to the k8s
host node name, which is an approximation of the existing usage in
the OpenStack driver (where it is typically an opaque uuid that
uniquely identifies the hypervisor).

Change-Id: I53765fc3914a84d2519f5d4dda4f8dc8feda72f2
This commit is contained in:
James E. Blair 2022-08-24 10:28:13 -07:00
parent 36084ceb2d
commit 6d3b5f3bab
18 changed files with 63 additions and 12 deletions

View File

@ -82,7 +82,7 @@ CACHE_TTL = 10
class AwsInstance(statemachine.Instance):
def __init__(self, instance, quota):
def __init__(self, provider, instance, quota):
super().__init__()
self.external_id = instance.id
self.metadata = tag_list_to_dict(instance.tags)
@ -90,7 +90,9 @@ class AwsInstance(statemachine.Instance):
self.private_ipv6 = None
self.public_ipv4 = instance.public_ip_address
self.public_ipv6 = None
self.az = ''
self.cloud = 'AWS'
self.region = provider.region_name
self.az = instance.subnet.availability_zone
self.quota = quota
for iface in instance.network_interfaces[:1]:
@ -206,7 +208,8 @@ class AwsCreateStateMachine(statemachine.StateMachine):
if self.state == self.COMPLETE:
self.complete = True
return AwsInstance(self.instance, self.quota)
return AwsInstance(self.adapter.provider, self.instance,
self.quota)
class AwsAdapter(statemachine.Adapter):
@ -357,7 +360,7 @@ class AwsAdapter(statemachine.Adapter):
if instance.state["Name"].lower() == "terminated":
continue
quota = self._getQuotaForInstanceType(instance.instance_type)
yield AwsInstance(instance, quota)
yield AwsInstance(self.provider, instance, quota)
def getQuotaLimits(self):
# Get the instance types that this provider handles

View File

@ -84,8 +84,9 @@ class AzureInstance(statemachine.Instance):
self.interface_ip = (self.public_ipv4 or self.public_ipv6 or
self.private_ipv4 or self.private_ipv6)
self.cloud = 'Azure'
self.region = vm['location']
self.az = ''
self.az = vm['zones'][0]
def getQuotaInformation(self):
return quota_info_from_sku(self.sku)

View File

@ -44,7 +44,10 @@ class GceInstance(statemachine.Instance):
def __init__(self, data, quota):
super().__init__()
self.external_id = data['name']
self.az = data['zone']
self.cloud = 'Google'
zone = data['zone'].rsplit('/', 1)[1]
self.region = zone.rsplit('-', 1)[0]
self.az = zone
iface = data.get('networkInterfaces', [])
if len(iface):

View File

@ -132,6 +132,7 @@ class IBMVPCInstance(statemachine.Instance):
self.public_ipv4 = fip['address']
self.interface_ip = self.public_ipv4 or self.private_ipv4
self.cloud = 'IBM'
self.region = provider.region
self.az = vm['zone']['name']

View File

@ -47,10 +47,10 @@ class K8SLauncher(NodeLauncher):
else:
self.node.connection_type = "kubectl"
self.node.interface_ip = resource['pod']
pool = self.handler.provider.pools.get(self.node.pool)
resources = self.handler.manager.quotaNeededByLabel(
self.node.type[0], pool)
self.node.resources = resources.get_resources()
pool = self.handler.provider.pools.get(self.node.pool)
self.node.resources = self.handler.manager.quotaNeededByLabel(
self.node.type[0], pool).get_resources()
self.node.cloud = self.provider_config.context
self.zk.storeNode(self.node)
self.log.info("Resource %s is ready" % resource['name'])

View File

@ -317,6 +317,7 @@ class KubernetesProvider(Provider, QuotaSupport):
"%s: pod failed to initialize (%s)" % (
namespace, pod.status.phase))
resource["pod"] = label.name
node.host_id = pod.spec.node_name
return resource
def getRequestHandler(self, poolworker, request):

View File

@ -50,6 +50,7 @@ class OpenshiftLauncher(NodeLauncher):
self.node.shell_type = self.label.shell_type
# NOTE: resource access token may be encrypted here
self.node.connection_port = resource
self.node.cloud = self.provider_config.context
self.zk.storeNode(self.node)
self.log.info("Resource %s is ready", project)

View File

@ -257,6 +257,7 @@ class OpenshiftProvider(Provider, QuotaSupport):
raise exceptions.LaunchNodepoolException(
"%s: pod failed to initialize (%s)" % (
project, pod.status.phase))
return pod.spec.node_name
def getRequestHandler(self, poolworker, request):
return handler.OpenshiftNodeRequestHandler(poolworker, request)

View File

@ -30,7 +30,7 @@ class OpenshiftPodLauncher(OpenshiftLauncher):
self.node.interface_ip = pod_name
self.zk.storeNode(self.node)
self.handler.manager.waitForPod(project, pod_name)
pod_node_id = self.handler.manager.waitForPod(project, pod_name)
self.node.state = zk.READY
self.node.python_path = self.label.python_path
@ -47,6 +47,8 @@ class OpenshiftPodLauncher(OpenshiftLauncher):
'user': 'zuul-worker',
}
self.node.connection_type = "kubectl"
self.node.cloud = self.provider_config.context
self.node.host_id = pod_node_id
self.zk.storeNode(self.node)
self.log.info("Pod %s is ready" % self.node.external_id)

View File

@ -157,6 +157,7 @@ class StateMachineNodeLauncher(stats.StatsReporter):
node.public_ipv4 = instance.public_ipv4
node.private_ipv4 = instance.private_ipv4
node.public_ipv6 = instance.public_ipv6
node.cloud = instance.cloud
node.region = instance.region
node.az = instance.az
node.driver_data = instance.driver_data
@ -737,6 +738,7 @@ class Instance:
* public_ipv4: str
* public_ipv6: str
* private_ipv4: str
* cloud: str
* az: str
* region: str
* driver_data: any
@ -759,6 +761,7 @@ class Instance:
self.public_ipv6 = None
self.private_ipv4 = None
self.interface_ip = None
self.cloud = None
self.az = None
self.region = None
self.metadata = {}

View File

@ -168,6 +168,7 @@ class VirtualMachinesCRUD(CRUDManager):
"networkProfile": data['properties']['networkProfile'],
"provisioningState": "Creating"
}
data['zones'] = ["1"]
self.items.append(data)
disk_data = data.copy()
disk_data['name'] = 'bionic-azure-' + str(uuid.uuid4())

View File

@ -386,6 +386,10 @@ class TestDriverAws(tests.DBTestCase):
self.assertTrue(node.private_ipv4.startswith('203.0.113.'))
self.assertFalse(node.public_ipv4.startswith('203.0.113.'))
self.assertEqual(node.python_path, 'auto')
self.assertEqual(node.cloud, 'AWS')
self.assertEqual(node.region, 'us-west-2')
# Like us-west-2x where x is random
self.assertTrue(len(node.az) == len('us-west-2x'))
instance = self.ec2.Instance(node.external_id)
response = instance.describe_attribute(Attribute='ebsOptimized')
@ -498,10 +502,14 @@ class TestDriverAws(tests.DBTestCase):
instance.tags = []
instance.private_ip_address = '10.0.0.1'
instance.public_ip_address = '1.2.3.4'
instance.subnet = Dummy()
instance.subnet.availability_zone = 'us-west-2b'
iface = Dummy()
iface.ipv6_addresses = [{'Ipv6Address': 'fe80::dead:beef'}]
instance.network_interfaces = [iface]
awsi = AwsInstance(instance, None)
provider = Dummy()
provider.region_name = 'us-west-2'
awsi = AwsInstance(provider, instance, None)
self.assertEqual(awsi.public_ipv4, '1.2.3.4')
self.assertEqual(awsi.private_ipv4, '10.0.0.1')
self.assertEqual(awsi.public_ipv6, 'fe80::dead:beef')

View File

@ -88,6 +88,9 @@ class TestDriverAzure(tests.DBTestCase):
{'key1': 'value1', 'key2': 'value2'})
self.assertEqual(node.host_keys, ['ssh-rsa FAKEKEY'])
self.assertEqual(node.python_path, 'auto')
self.assertEqual(node.cloud, 'Azure')
self.assertEqual(node.region, 'centralus')
self.assertEqual(node.az, '1')
self.assertEqual(
self.fake_azure.crud['Microsoft.Compute/virtualMachines'].
items[0]['properties']['osProfile']['customData'],

View File

@ -303,6 +303,9 @@ class TestDriverGce(tests.DBTestCase):
self.assertEqual(node.connection_type, 'ssh')
self.assertEqual(node.attributes,
{'key1': 'value1', 'key2': 'value2'})
self.assertEqual(node.cloud, 'Google')
self.assertEqual(node.region, 'us-central1')
self.assertEqual(node.az, 'us-central1-a')
if host_key_checking:
nodescan.assert_called_with(
node.interface_ip,

View File

@ -95,6 +95,9 @@ class TestDriverIBMVPC(tests.DBTestCase):
self.assertEqual(node.connection_type, 'ssh')
self.assertEqual(node.host_keys, [])
self.assertEqual(node.python_path, 'auto')
self.assertEqual(node.cloud, 'IBM')
self.assertEqual(node.region, 'us-south')
self.assertEqual(node.az, 'us-south-1')
node.state = zk.USED
self.zk.storeNode(node)

View File

@ -75,6 +75,9 @@ class FakeCoreClient(object):
class FakePod:
class status:
phase = "Running"
class spec:
node_name = "k8s-default-pool-abcd-1234"
return FakePod
@ -124,6 +127,8 @@ class TestDriverKubernetes(tests.DBTestCase):
self.assertEqual(node.connection_port.get('token'), 'fake-token')
self.assertEqual(node.attributes,
{'key1': 'value1', 'key2': 'value2'})
self.assertEqual(node.cloud, 'admin-cluster.local')
self.assertEqual(node.host_id, 'k8s-default-pool-abcd-1234')
node.state = zk.DELETING
self.zk.storeNode(node)
@ -150,6 +155,8 @@ class TestDriverKubernetes(tests.DBTestCase):
self.assertIsNotNone(node.launcher)
self.assertEqual(node.connection_type, 'namespace')
self.assertEqual(node.connection_port.get('token'), 'fake-token')
self.assertEqual(node.cloud, 'admin-cluster.local')
self.assertIsNone(node.host_id)
node.state = zk.DELETING
self.zk.storeNode(node)

View File

@ -114,6 +114,9 @@ class FakeCoreClient(object):
class FakePod:
class status:
phase = "Running"
class spec:
node_name = "k8s-default-pool-abcd-1234"
return FakePod
@ -157,6 +160,8 @@ class TestDriverOpenshift(tests.DBTestCase):
self.assertEqual(node.shell_type, 'csh')
self.assertEqual(node.attributes,
{'key1': 'value1', 'key2': 'value2'})
self.assertEqual(node.cloud, 'admin-cluster.local')
self.assertIsNone(node.host_id)
node.state = zk.DELETING
self.zk.storeNode(node)

View File

@ -54,6 +54,9 @@ class FakeCoreClient(object):
class FakePod:
class status:
phase = "Running"
class spec:
node_name = "k8s-default-pool-abcd-1234"
return FakePod
def delete_namespaced_pod(self, name, project):
@ -104,6 +107,8 @@ class TestDriverOpenshiftPods(tests.DBTestCase):
self.assertIn('ca_crt', node.connection_port)
self.assertEqual(node.attributes,
{'key1': 'value1', 'key2': 'value2'})
self.assertEqual(node.cloud, 'service-account.local')
self.assertEqual(node.host_id, 'k8s-default-pool-abcd-1234')
node.state = zk.DELETING
self.zk.storeNode(node)