metastatic: Pass tenant_name to backing node request

Currently, we don't store the tenant_name on the backing node of a
metastatic node, which leads to wrong metrics (per-tenant resource
usage metrics) and tenant quota calculation (maybe other places
affected, too).
To fix this, pass the tenant_name from the original metastatic request
from zuul down to the node request issued by the metastatic driver so
this info eventually lands on the backing node.

One thing to note is that when a backing node is reused, the tenant_name
attribute will not be updated. This is the same behaviour we also have
with other backing node metadata (e.g. instance tags).

Change-Id: Iaaca8c291be50273369f0255690f78851bef26d4
This commit is contained in:
Benjamin Schanzel
2025-03-27 16:15:50 +01:00
parent fa75c998ae
commit 3f4c45b987
2 changed files with 12 additions and 5 deletions

View File

@@ -139,7 +139,10 @@ class MetastaticInstance(statemachine.Instance):
self.slot = slot
def getQuotaInformation(self):
return QuotaInformation(instances=1)
# metastatic nodes shall not be counted into used quota because the
# backing node will do so. To not count used instances twice, return
# QuotaInformation with 0 instances here.
return QuotaInformation(instances=0)
class MetastaticDeleteStateMachine(statemachine.StateMachine):
@@ -166,7 +169,7 @@ class MetastaticCreateStateMachine(statemachine.StateMachine):
COMPLETE = 'complete'
def __init__(self, adapter, hostname, label, image_external_id,
metadata):
metadata, request):
super().__init__()
self.adapter = adapter
self.attempts = 0
@@ -174,13 +177,14 @@ class MetastaticCreateStateMachine(statemachine.StateMachine):
self.metadata = metadata
self.hostname = hostname
self.label = label
self.request = request
self.node_id = metadata['nodepool_node_id']
def advance(self):
if self.state == self.START:
self.backing_node_record, self.slot = \
self.adapter._allocateBackingNode(
self.label, self.node_id)
self.label, self.node_id, self.request)
if self.backing_node_record.node_id is None:
# We need to make a new request
self.state = self.REQUESTING
@@ -280,7 +284,8 @@ class MetastaticAdapter(statemachine.Adapter):
image_external_id, metadata,
request, az, log):
return MetastaticCreateStateMachine(self, hostname, label,
image_external_id, metadata)
image_external_id, metadata,
request)
def getDeleteStateMachine(self, external_id, log):
return MetastaticDeleteStateMachine(self, external_id)
@@ -431,7 +436,7 @@ class MetastaticAdapter(statemachine.Adapter):
def _setProvider(self, provider):
self._provider = provider
def _allocateBackingNode(self, label, node_id):
def _allocateBackingNode(self, label, node_id, request):
self._init()
# if we have room for the label, allocate and return existing slot
# otherwise, make a new backing node
@@ -449,6 +454,7 @@ class MetastaticAdapter(statemachine.Adapter):
req.node_types = [label.backing_label]
req.state = zk.REQUESTED
req.requestor = self.my_id
req.tenant_name = request.tenant_name
self.zk.storeNodeRequest(req, priority='100')
backing_node_record = BackingNodeRecord(
label.name, label.max_parallel_jobs)

View File

@@ -123,6 +123,7 @@ class TestDriverMetastatic(tests.DBTestCase):
'metaattr': 'meta',
'testattr': 'metastatic',
})
self.assertEqual(bn1.tenant_name, 'tenant-1')
# Allocate a second node, should have same backing node
node2 = self._requestNode()