Add extra-resources handling to openshift drivers
This adds the extra-resources handling that was just added to the k8s driver to openshift. Change-Id: I56e5eaf6ec22d10e88420094e92041c0b39b04e5
This commit is contained in:
@@ -49,7 +49,44 @@ Selecting the openshift pods driver adds the following options to the
|
||||
:default: infinite
|
||||
:type: int
|
||||
|
||||
Maximum number of pods that can be used.
|
||||
An alias for `max-servers`.
|
||||
|
||||
.. attr:: max-cores
|
||||
:type: int
|
||||
:default: unlimited
|
||||
|
||||
Maximum number of cores usable from this provider's pools by
|
||||
default. This can be used to limit usage of the openshift
|
||||
backend. If not defined nodepool can use all cores up to the
|
||||
limit of the backend.
|
||||
|
||||
.. attr:: max-servers
|
||||
:type: int
|
||||
:default: unlimited
|
||||
|
||||
Maximum number of pods spawnable from this provider's pools by
|
||||
default. This can be used to limit the number of pods. If not
|
||||
defined nodepool can create as many servers the openshift
|
||||
backend allows.
|
||||
|
||||
.. attr:: max-ram
|
||||
:type: int
|
||||
:default: unlimited
|
||||
|
||||
Maximum ram usable from this provider's pools by default. This
|
||||
can be used to limit the amount of ram allocated by nodepool. If
|
||||
not defined nodepool can use as much ram as the openshift
|
||||
backend allows.
|
||||
|
||||
.. attr:: max-resources
|
||||
:type: dict
|
||||
:default: unlimited
|
||||
|
||||
A dictionary of other quota resource limits applicable to this
|
||||
provider's pools by default. Arbitrary limits may be supplied
|
||||
with the
|
||||
:attr:`providers.[openshiftpods].pools.labels.extra-resources`
|
||||
attribute.
|
||||
|
||||
.. attr:: pools
|
||||
:type: list
|
||||
@@ -81,6 +118,35 @@ Selecting the openshift pods driver adds the following options to the
|
||||
A dictionary of key-value pairs that will be stored with the node data
|
||||
in ZooKeeper. The keys and values can be any arbitrary string.
|
||||
|
||||
.. attr:: max-cores
|
||||
:type: int
|
||||
|
||||
Maximum number of cores usable from this pool. This can be used
|
||||
to limit usage of the kubernetes backend. If not defined nodepool can
|
||||
use all cores up to the limit of the backend.
|
||||
|
||||
.. attr:: max-servers
|
||||
:type: int
|
||||
|
||||
Maximum number of pods spawnable from this pool. This can
|
||||
be used to limit the number of pods. If not defined
|
||||
nodepool can create as many servers the kubernetes backend allows.
|
||||
|
||||
.. attr:: max-ram
|
||||
:type: int
|
||||
|
||||
Maximum ram usable from this pool. This can be used to limit
|
||||
the amount of ram allocated by nodepool. If not defined
|
||||
nodepool can use as much ram as the kubernetes backend allows.
|
||||
|
||||
.. attr:: max-resources
|
||||
:type: dict
|
||||
:default: unlimited
|
||||
|
||||
A dictionary of other quota resource limits applicable to
|
||||
this pool. Arbitrary limits may be supplied with the
|
||||
:attr:`providers.[openshiftpods].pools.labels.extra-resources` attribute.
|
||||
|
||||
.. attr:: default-label-cpu
|
||||
:type: int
|
||||
|
||||
@@ -204,6 +270,13 @@ Selecting the openshift pods driver adds the following options to the
|
||||
for the pod. If no limit is specified, this will also be
|
||||
used as the limit.
|
||||
|
||||
.. attr:: extra-resources
|
||||
:type: dict
|
||||
|
||||
Specifies any extra resources that Nodepool should
|
||||
consider in its quota calculation other than the resources
|
||||
described above (cpu, memory, storage).
|
||||
|
||||
.. attr:: cpu-limit
|
||||
:type: int
|
||||
|
||||
|
||||
@@ -66,7 +66,44 @@ Selecting the openshift driver adds the following options to the
|
||||
:default: infinite
|
||||
:type: int
|
||||
|
||||
Maximum number of projects that can be used.
|
||||
An alias for `max-servers`.
|
||||
|
||||
.. attr:: max-cores
|
||||
:type: int
|
||||
:default: unlimited
|
||||
|
||||
Maximum number of cores usable from this provider's pools by
|
||||
default. This can be used to limit usage of the openshift
|
||||
backend. If not defined nodepool can use all cores up to the
|
||||
limit of the backend.
|
||||
|
||||
.. attr:: max-servers
|
||||
:type: int
|
||||
:default: unlimited
|
||||
|
||||
Maximum number of projects spawnable from this provider's pools by
|
||||
default. This can be used to limit the number of projects. If not
|
||||
defined nodepool can create as many servers the openshift
|
||||
backend allows.
|
||||
|
||||
.. attr:: max-ram
|
||||
:type: int
|
||||
:default: unlimited
|
||||
|
||||
Maximum ram usable from this provider's pools by default. This
|
||||
can be used to limit the amount of ram allocated by nodepool. If
|
||||
not defined nodepool can use as much ram as the openshift
|
||||
backend allows.
|
||||
|
||||
.. attr:: max-resources
|
||||
:type: dict
|
||||
:default: unlimited
|
||||
|
||||
A dictionary of other quota resource limits applicable to this
|
||||
provider's pools by default. Arbitrary limits may be supplied
|
||||
with the
|
||||
:attr:`providers.[openshift].pools.labels.extra-resources`
|
||||
attribute.
|
||||
|
||||
.. attr:: pools
|
||||
:type: list
|
||||
@@ -98,6 +135,35 @@ Selecting the openshift driver adds the following options to the
|
||||
A dictionary of key-value pairs that will be stored with the node data
|
||||
in ZooKeeper. The keys and values can be any arbitrary string.
|
||||
|
||||
.. attr:: max-cores
|
||||
:type: int
|
||||
|
||||
Maximum number of cores usable from this pool. This can be used
|
||||
to limit usage of the kubernetes backend. If not defined nodepool can
|
||||
use all cores up to the limit of the backend.
|
||||
|
||||
.. attr:: max-servers
|
||||
:type: int
|
||||
|
||||
Maximum number of pods spawnable from this pool. This can
|
||||
be used to limit the number of pods. If not defined
|
||||
nodepool can create as many servers the kubernetes backend allows.
|
||||
|
||||
.. attr:: max-ram
|
||||
:type: int
|
||||
|
||||
Maximum ram usable from this pool. This can be used to limit
|
||||
the amount of ram allocated by nodepool. If not defined
|
||||
nodepool can use as much ram as the kubernetes backend allows.
|
||||
|
||||
.. attr:: max-resources
|
||||
:type: dict
|
||||
:default: unlimited
|
||||
|
||||
A dictionary of other quota resource limits applicable to
|
||||
this pool. Arbitrary limits may be supplied with the
|
||||
:attr:`providers.[openshift].pools.labels.extra-resources` attribute.
|
||||
|
||||
.. attr:: default-label-cpu
|
||||
:type: int
|
||||
|
||||
@@ -284,6 +350,15 @@ Selecting the openshift driver adds the following options to the
|
||||
MB to request for the pod. If no limit is specified, this
|
||||
will also be used as the limit.
|
||||
|
||||
.. attr:: extra-resources
|
||||
:type: dict
|
||||
|
||||
Only used by the
|
||||
:value:`providers.[openshift].pools.labels.type.pod`
|
||||
label type; specifies any extra resources that Nodepool
|
||||
should consider in its quota calculation other than the
|
||||
resources described above (cpu, memory, storage).
|
||||
|
||||
.. attr:: cpu-limit
|
||||
:type: int
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# Copyright 2018 Red Hat
|
||||
# Copyright 2023 Acme Gating, LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,7 +15,9 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from collections import defaultdict
|
||||
import math
|
||||
|
||||
import voluptuous as v
|
||||
|
||||
from nodepool.driver import ConfigPool
|
||||
@@ -38,9 +41,17 @@ class OpenshiftPool(ConfigPool):
|
||||
def load(self, pool_config, full_config):
|
||||
super().load(pool_config)
|
||||
self.name = pool_config['name']
|
||||
self.max_servers = pool_config.get(
|
||||
'max-servers', self.provider.max_servers)
|
||||
self.max_cores = pool_config.get('max-cores', self.provider.max_cores)
|
||||
self.max_ram = pool_config.get('max-ram', self.provider.max_ram)
|
||||
self.max_resources = self.provider.max_resources.copy()
|
||||
self.max_resources.update(pool_config.get('max-resources', {}))
|
||||
self.default_label_cpu = pool_config.get('default-label-cpu')
|
||||
self.default_label_memory = pool_config.get('default-label-memory')
|
||||
self.default_label_storage = pool_config.get('default-label-storage')
|
||||
self.default_label_extra_resources = pool_config.get(
|
||||
'default-label-extra_resources', {})
|
||||
self.labels = {}
|
||||
for label in pool_config.get('labels', []):
|
||||
pl = OpenshiftLabel()
|
||||
@@ -52,11 +63,13 @@ class OpenshiftPool(ConfigPool):
|
||||
pl.cpu = label.get('cpu', self.default_label_cpu)
|
||||
pl.memory = label.get('memory', self.default_label_memory)
|
||||
pl.storage = label.get('storage', self.default_label_storage)
|
||||
pl.extra_resources = self.default_label_extra_resources.copy()
|
||||
pl.extra_resources.update(label.get('extra-resources', {}))
|
||||
# The limits are the first of:
|
||||
# 1) label specific configured limit
|
||||
# 2) default label configured limit
|
||||
# 3) label specific configured request
|
||||
# 4) default label configured default
|
||||
# 4) default label configured request
|
||||
# 5) None
|
||||
default_cpu_limit = pool_config.get(
|
||||
'default-label-cpu-limit', pl.cpu)
|
||||
@@ -102,11 +115,20 @@ class OpenshiftProviderConfig(ProviderConfig):
|
||||
def load(self, config):
|
||||
self.launch_retries = int(self.provider.get('launch-retries', 3))
|
||||
self.context = self.provider['context']
|
||||
self.max_projects = self.provider.get('max-projects', math.inf)
|
||||
# We translate max-projects to max_servers to re-use quota
|
||||
# calculation methods.
|
||||
self.max_servers = self.provider.get(
|
||||
'max-projects',
|
||||
self.provider.get('max-servers', math.inf))
|
||||
self.max_cores = self.provider.get('max-cores', math.inf)
|
||||
self.max_ram = self.provider.get('max-ram', math.inf)
|
||||
self.max_resources = defaultdict(lambda: math.inf)
|
||||
for k, val in self.provider.get('max-resources', {}).items():
|
||||
self.max_resources[k] = val
|
||||
for pool in self.provider.get('pools', []):
|
||||
pp = OpenshiftPool()
|
||||
pp.load(pool, config)
|
||||
pp.provider = self
|
||||
pp.load(pool, config)
|
||||
self.pools[pp.name] = pp
|
||||
|
||||
def getSchema(self):
|
||||
@@ -137,18 +159,23 @@ class OpenshiftProviderConfig(ProviderConfig):
|
||||
'volume-mounts': list,
|
||||
'labels': dict,
|
||||
'annotations': dict,
|
||||
'extra-resources': {str: int},
|
||||
}
|
||||
|
||||
pool = ConfigPool.getCommonSchemaDict()
|
||||
pool.update({
|
||||
v.Required('name'): str,
|
||||
v.Required('labels'): [openshift_label],
|
||||
v.Optional('default-label-cpu'): int,
|
||||
v.Optional('default-label-memory'): int,
|
||||
v.Optional('default-label-storage'): int,
|
||||
v.Optional('default-label-cpu-limit'): int,
|
||||
v.Optional('default-label-memory-limit'): int,
|
||||
v.Optional('default-label-storage-limit'): int,
|
||||
'max-cores': int,
|
||||
'max-ram': int,
|
||||
'max-resources': {str: int},
|
||||
'default-label-cpu': int,
|
||||
'default-label-memory': int,
|
||||
'default-label-storage': int,
|
||||
'default-label-cpu-limit': int,
|
||||
'default-label-memory-limit': int,
|
||||
'default-label-storage-limit': int,
|
||||
'default-label-extra-resources': {str: int},
|
||||
})
|
||||
|
||||
schema = ProviderConfig.getCommonSchemaDict()
|
||||
@@ -157,6 +184,9 @@ class OpenshiftProviderConfig(ProviderConfig):
|
||||
v.Required('context'): str,
|
||||
'launch-retries': int,
|
||||
'max-projects': int,
|
||||
'max-cores': int,
|
||||
'max-ram': int,
|
||||
'max-resources': {str: int},
|
||||
})
|
||||
return v.Schema(schema)
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# Copyright 2018 Red Hat
|
||||
# Copyright 2023 Acme Gating, LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@@ -12,14 +13,15 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import math
|
||||
import logging
|
||||
|
||||
from kazoo import exceptions as kze
|
||||
|
||||
from nodepool import exceptions
|
||||
from nodepool.zk import zookeeper as zk
|
||||
from nodepool.driver.utils import NodeLauncher
|
||||
from nodepool.driver import NodeRequestHandler
|
||||
from nodepool.driver.utils import NodeLauncher, QuotaInformation
|
||||
|
||||
|
||||
class OpenshiftLauncher(NodeLauncher):
|
||||
@@ -133,10 +135,42 @@ class OpenshiftNodeRequestHandler(NodeRequestHandler):
|
||||
|
||||
return True
|
||||
|
||||
def hasRemainingQuota(self, node_types):
|
||||
if len(self.manager.listNodes()) + 1 > self.provider.max_projects:
|
||||
def hasRemainingQuota(self, ntype):
|
||||
'''
|
||||
Checks if the predicted quota is enough for an additional node of type
|
||||
ntype.
|
||||
|
||||
:param ntype: node type for the quota check
|
||||
:return: True if there is enough quota, False otherwise
|
||||
'''
|
||||
needed_quota = self.manager.quotaNeededByLabel(ntype, self.pool)
|
||||
|
||||
# Calculate remaining quota which is calculated as:
|
||||
# quota = <total nodepool quota> - <used quota> - <quota for node>
|
||||
cloud_quota = self.manager.estimatedNodepoolQuota()
|
||||
cloud_quota.subtract(
|
||||
self.manager.estimatedNodepoolQuotaUsed())
|
||||
cloud_quota.subtract(needed_quota)
|
||||
self.log.debug("Predicted remaining provider quota: %s",
|
||||
cloud_quota)
|
||||
|
||||
if not cloud_quota.non_negative():
|
||||
return False
|
||||
return True
|
||||
|
||||
# Now calculate pool specific quota. Values indicating no quota default
|
||||
# to math.inf representing infinity that can be calculated with.
|
||||
args = dict(cores=getattr(self.pool, 'max_cores', None),
|
||||
instances=self.pool.max_servers,
|
||||
ram=getattr(self.pool, 'max_ram', None))
|
||||
args.update(self.pool.max_resources)
|
||||
pool_quota = QuotaInformation(**args, default=math.inf)
|
||||
pool_quota.subtract(
|
||||
self.manager.estimatedNodepoolQuotaUsed(self.pool))
|
||||
self.log.debug("Current pool quota: %s" % pool_quota)
|
||||
pool_quota.subtract(needed_quota)
|
||||
self.log.debug("Predicted remaining pool quota: %s", pool_quota)
|
||||
|
||||
return pool_quota.non_negative()
|
||||
|
||||
def launch(self, node):
|
||||
label = self.pool.labels[node.type[0]]
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# Copyright 2018 Red Hat
|
||||
# Copyright 2023 Acme Gating, LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@@ -33,14 +34,18 @@ urllib3.disable_warnings()
|
||||
class OpenshiftProvider(Provider, QuotaSupport):
|
||||
log = logging.getLogger("nodepool.driver.openshift.OpenshiftProvider")
|
||||
|
||||
def __init__(self, provider, *args):
|
||||
def __init__(self, provider, *args, _skip_init=False):
|
||||
super().__init__()
|
||||
self.provider = provider
|
||||
self.ready = False
|
||||
_, _, self.k8s_client, self.os_client = get_client(
|
||||
self.log, provider.context, DynamicClient)
|
||||
self.project_names = set()
|
||||
for pool in provider.pools.values():
|
||||
self.project_names.add(pool.name)
|
||||
if not _skip_init:
|
||||
# The OpenshiftPods driver subclasses this but doesn't
|
||||
# want this initialization. TODO: unify the two.
|
||||
_, _, self.k8s_client, self.os_client = get_client(
|
||||
self.log, provider.context, DynamicClient)
|
||||
self.project_names = set()
|
||||
for pool in provider.pools.values():
|
||||
self.project_names.add(pool.name)
|
||||
|
||||
def start(self, zk_conn):
|
||||
self.log.debug("Starting")
|
||||
@@ -339,7 +344,8 @@ class OpenshiftProvider(Provider, QuotaSupport):
|
||||
resources["ram"] = provider_label.memory
|
||||
if provider_label.storage:
|
||||
resources["ephemeral-storage"] = provider_label.storage
|
||||
return QuotaInformation(instances=1, default=1, **resources)
|
||||
resources.update(provider_label.extra_resources)
|
||||
return QuotaInformation(instances=1, **resources)
|
||||
|
||||
def unmanagedQuotaUsed(self):
|
||||
# TODO: return real quota information about quota
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# Copyright 2018 Red Hat
|
||||
# Copyright 2023 Acme Gating, LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,7 +15,9 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from collections import defaultdict
|
||||
import math
|
||||
|
||||
import voluptuous as v
|
||||
|
||||
from nodepool.driver import ConfigPool
|
||||
@@ -33,14 +36,23 @@ class OpenshiftPodsProviderConfig(OpenshiftProviderConfig):
|
||||
def load(self, config):
|
||||
self.launch_retries = int(self.provider.get('launch-retries', 3))
|
||||
self.context = self.provider['context']
|
||||
self.max_pods = self.provider.get('max-pods', math.inf)
|
||||
# We translate max-projects to max_servers to re-use quota
|
||||
# calculation methods.
|
||||
self.max_servers = self.provider.get(
|
||||
'max-projects',
|
||||
self.provider.get('max-servers', math.inf))
|
||||
self.max_cores = self.provider.get('max-cores', math.inf)
|
||||
self.max_ram = self.provider.get('max-ram', math.inf)
|
||||
self.max_resources = defaultdict(lambda: math.inf)
|
||||
for k, val in self.provider.get('max-resources', {}).items():
|
||||
self.max_resources[k] = val
|
||||
for pool in self.provider.get('pools', []):
|
||||
# Force label type to be pod
|
||||
for label in pool.get('labels', []):
|
||||
label['type'] = 'pod'
|
||||
pp = OpenshiftPool()
|
||||
pp.load(pool, config)
|
||||
pp.provider = self
|
||||
pp.load(pool, config)
|
||||
self.pools[pp.name] = pp
|
||||
|
||||
def getSchema(self):
|
||||
@@ -70,18 +82,23 @@ class OpenshiftPodsProviderConfig(OpenshiftProviderConfig):
|
||||
'volume-mounts': list,
|
||||
'labels': dict,
|
||||
'annotations': dict,
|
||||
'extra-resources': {str: int},
|
||||
}
|
||||
|
||||
pool = ConfigPool.getCommonSchemaDict()
|
||||
pool.update({
|
||||
v.Required('name'): str,
|
||||
v.Required('labels'): [openshift_label],
|
||||
v.Optional('default-label-cpu'): int,
|
||||
v.Optional('default-label-memory'): int,
|
||||
v.Optional('default-label-storage'): int,
|
||||
v.Optional('default-label-cpu-limit'): int,
|
||||
v.Optional('default-label-memory-limit'): int,
|
||||
v.Optional('default-label-storage-limit'): int,
|
||||
'max-cores': int,
|
||||
'max-ram': int,
|
||||
'max-resources': {str: int},
|
||||
'default-label-cpu': int,
|
||||
'default-label-memory': int,
|
||||
'default-label-storage': int,
|
||||
'default-label-cpu-limit': int,
|
||||
'default-label-memory-limit': int,
|
||||
'default-label-storage-limit': int,
|
||||
'default-label-extra-resources': {str: int},
|
||||
})
|
||||
|
||||
schema = OpenshiftProviderConfig.getCommonSchemaDict()
|
||||
@@ -90,5 +107,8 @@ class OpenshiftPodsProviderConfig(OpenshiftProviderConfig):
|
||||
v.Required('context'): str,
|
||||
'launch-retries': int,
|
||||
'max-pods': int,
|
||||
'max-cores': int,
|
||||
'max-ram': int,
|
||||
'max-resources': {str: int},
|
||||
})
|
||||
return v.Schema(schema)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# Copyright 2018 Red Hat
|
||||
# Copyright 2023 Acme Gating, LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@@ -58,11 +59,6 @@ class OpenshiftPodRequestHandler(OpenshiftNodeRequestHandler):
|
||||
log = logging.getLogger("nodepool.driver.openshiftpods."
|
||||
"OpenshiftPodRequestHandler")
|
||||
|
||||
def hasRemainingQuota(self, node_types):
|
||||
if len(self.manager.listNodes()) + 1 > self.provider.max_pods:
|
||||
return False
|
||||
return True
|
||||
|
||||
def launch(self, node):
|
||||
label = self.pool.labels[node.type[0]]
|
||||
thd = OpenshiftPodLauncher(self, node, self.provider, label)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# Copyright 2018 Red Hat
|
||||
# Copyright 2023 Acme Gating, LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@@ -29,8 +30,7 @@ class OpenshiftPodsProvider(OpenshiftProvider):
|
||||
"OpenshiftPodsProvider")
|
||||
|
||||
def __init__(self, provider, *args):
|
||||
self.provider = provider
|
||||
self.ready = False
|
||||
super().__init__(provider, _skip_init=True)
|
||||
self.token, self.ca_crt, self.k8s_client, _ = get_client(
|
||||
self.log, provider.context)
|
||||
self.pod_names = set()
|
||||
|
||||
25
nodepool/tests/fixtures/openshift-pool-quota-cores.yaml
vendored
Normal file
25
nodepool/tests/fixtures/openshift-pool-quota-cores.yaml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
zookeeper-servers:
|
||||
- host: {zookeeper_host}
|
||||
port: {zookeeper_port}
|
||||
chroot: {zookeeper_chroot}
|
||||
|
||||
zookeeper-tls:
|
||||
ca: {zookeeper_ca}
|
||||
cert: {zookeeper_cert}
|
||||
key: {zookeeper_key}
|
||||
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
|
||||
providers:
|
||||
- name: openshift
|
||||
driver: openshift
|
||||
context: admin-cluster.local
|
||||
pools:
|
||||
- name: main
|
||||
max-cores: 4
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
type: pod
|
||||
image: docker.io/fedora:28
|
||||
cpu: 2
|
||||
27
nodepool/tests/fixtures/openshift-pool-quota-extra.yaml
vendored
Normal file
27
nodepool/tests/fixtures/openshift-pool-quota-extra.yaml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
zookeeper-servers:
|
||||
- host: {zookeeper_host}
|
||||
port: {zookeeper_port}
|
||||
chroot: {zookeeper_chroot}
|
||||
|
||||
zookeeper-tls:
|
||||
ca: {zookeeper_ca}
|
||||
cert: {zookeeper_cert}
|
||||
key: {zookeeper_key}
|
||||
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
|
||||
providers:
|
||||
- name: openshift
|
||||
driver: openshift
|
||||
context: admin-cluster.local
|
||||
pools:
|
||||
- name: main
|
||||
max-resources:
|
||||
mygpu: 2
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
type: pod
|
||||
image: docker.io/fedora:28
|
||||
extra-resources:
|
||||
mygpu: 1
|
||||
25
nodepool/tests/fixtures/openshift-pool-quota-ram.yaml
vendored
Normal file
25
nodepool/tests/fixtures/openshift-pool-quota-ram.yaml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
zookeeper-servers:
|
||||
- host: {zookeeper_host}
|
||||
port: {zookeeper_port}
|
||||
chroot: {zookeeper_chroot}
|
||||
|
||||
zookeeper-tls:
|
||||
ca: {zookeeper_ca}
|
||||
cert: {zookeeper_cert}
|
||||
key: {zookeeper_key}
|
||||
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
|
||||
providers:
|
||||
- name: openshift
|
||||
driver: openshift
|
||||
context: admin-cluster.local
|
||||
pools:
|
||||
- name: main
|
||||
max-ram: 2048
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
type: pod
|
||||
image: docker.io/fedora:28
|
||||
memory: 1024
|
||||
24
nodepool/tests/fixtures/openshift-pool-quota-servers.yaml
vendored
Normal file
24
nodepool/tests/fixtures/openshift-pool-quota-servers.yaml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
zookeeper-servers:
|
||||
- host: {zookeeper_host}
|
||||
port: {zookeeper_port}
|
||||
chroot: {zookeeper_chroot}
|
||||
|
||||
zookeeper-tls:
|
||||
ca: {zookeeper_ca}
|
||||
cert: {zookeeper_cert}
|
||||
key: {zookeeper_key}
|
||||
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
|
||||
providers:
|
||||
- name: openshift
|
||||
driver: openshift
|
||||
context: admin-cluster.local
|
||||
pools:
|
||||
- name: main
|
||||
max-servers: 2
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
type: pod
|
||||
image: docker.io/fedora:28
|
||||
28
nodepool/tests/fixtures/openshift-tenant-quota-cores.yaml
vendored
Normal file
28
nodepool/tests/fixtures/openshift-tenant-quota-cores.yaml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
zookeeper-servers:
|
||||
- host: {zookeeper_host}
|
||||
port: {zookeeper_port}
|
||||
chroot: {zookeeper_chroot}
|
||||
|
||||
zookeeper-tls:
|
||||
ca: {zookeeper_ca}
|
||||
cert: {zookeeper_cert}
|
||||
key: {zookeeper_key}
|
||||
|
||||
tenant-resource-limits:
|
||||
- tenant-name: tenant-1
|
||||
max-cores: 4
|
||||
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
|
||||
providers:
|
||||
- name: openshift
|
||||
driver: openshift
|
||||
context: admin-cluster.local
|
||||
pools:
|
||||
- name: main
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
type: pod
|
||||
image: docker.io/fedora:28
|
||||
cpu: 2
|
||||
29
nodepool/tests/fixtures/openshift-tenant-quota-extra.yaml
vendored
Normal file
29
nodepool/tests/fixtures/openshift-tenant-quota-extra.yaml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
zookeeper-servers:
|
||||
- host: {zookeeper_host}
|
||||
port: {zookeeper_port}
|
||||
chroot: {zookeeper_chroot}
|
||||
|
||||
zookeeper-tls:
|
||||
ca: {zookeeper_ca}
|
||||
cert: {zookeeper_cert}
|
||||
key: {zookeeper_key}
|
||||
|
||||
tenant-resource-limits:
|
||||
- tenant-name: tenant-1
|
||||
mygpu: 2
|
||||
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
|
||||
providers:
|
||||
- name: openshift
|
||||
driver: openshift
|
||||
context: admin-cluster.local
|
||||
pools:
|
||||
- name: main
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
type: pod
|
||||
image: docker.io/fedora:28
|
||||
extra-resources:
|
||||
mygpu: 1
|
||||
28
nodepool/tests/fixtures/openshift-tenant-quota-ram.yaml
vendored
Normal file
28
nodepool/tests/fixtures/openshift-tenant-quota-ram.yaml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
zookeeper-servers:
|
||||
- host: {zookeeper_host}
|
||||
port: {zookeeper_port}
|
||||
chroot: {zookeeper_chroot}
|
||||
|
||||
zookeeper-tls:
|
||||
ca: {zookeeper_ca}
|
||||
cert: {zookeeper_cert}
|
||||
key: {zookeeper_key}
|
||||
|
||||
tenant-resource-limits:
|
||||
- tenant-name: tenant-1
|
||||
max-ram: 2048
|
||||
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
|
||||
providers:
|
||||
- name: openshift
|
||||
driver: openshift
|
||||
context: admin-cluster.local
|
||||
pools:
|
||||
- name: main
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
type: pod
|
||||
image: docker.io/fedora:28
|
||||
memory: 1024
|
||||
27
nodepool/tests/fixtures/openshift-tenant-quota-servers.yaml
vendored
Normal file
27
nodepool/tests/fixtures/openshift-tenant-quota-servers.yaml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
zookeeper-servers:
|
||||
- host: {zookeeper_host}
|
||||
port: {zookeeper_port}
|
||||
chroot: {zookeeper_chroot}
|
||||
|
||||
zookeeper-tls:
|
||||
ca: {zookeeper_ca}
|
||||
cert: {zookeeper_cert}
|
||||
key: {zookeeper_key}
|
||||
|
||||
tenant-resource-limits:
|
||||
- tenant-name: tenant-1
|
||||
max-servers: 2
|
||||
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
|
||||
providers:
|
||||
- name: openshift
|
||||
driver: openshift
|
||||
context: admin-cluster.local
|
||||
pools:
|
||||
- name: main
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
type: pod
|
||||
image: docker.io/fedora:28
|
||||
24
nodepool/tests/fixtures/openshiftpods-pool-quota-cores.yaml
vendored
Normal file
24
nodepool/tests/fixtures/openshiftpods-pool-quota-cores.yaml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
zookeeper-servers:
|
||||
- host: {zookeeper_host}
|
||||
port: {zookeeper_port}
|
||||
chroot: {zookeeper_chroot}
|
||||
|
||||
zookeeper-tls:
|
||||
ca: {zookeeper_ca}
|
||||
cert: {zookeeper_cert}
|
||||
key: {zookeeper_key}
|
||||
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
|
||||
providers:
|
||||
- name: openshift
|
||||
driver: openshiftpods
|
||||
context: service-account.local
|
||||
pools:
|
||||
- name: main
|
||||
max-cores: 4
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
image: docker.io/fedora:28
|
||||
cpu: 2
|
||||
26
nodepool/tests/fixtures/openshiftpods-pool-quota-extra.yaml
vendored
Normal file
26
nodepool/tests/fixtures/openshiftpods-pool-quota-extra.yaml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
zookeeper-servers:
|
||||
- host: {zookeeper_host}
|
||||
port: {zookeeper_port}
|
||||
chroot: {zookeeper_chroot}
|
||||
|
||||
zookeeper-tls:
|
||||
ca: {zookeeper_ca}
|
||||
cert: {zookeeper_cert}
|
||||
key: {zookeeper_key}
|
||||
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
|
||||
providers:
|
||||
- name: openshift
|
||||
driver: openshiftpods
|
||||
context: service-account.local
|
||||
pools:
|
||||
- name: main
|
||||
max-resources:
|
||||
mygpu: 2
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
image: docker.io/fedora:28
|
||||
extra-resources:
|
||||
mygpu: 1
|
||||
24
nodepool/tests/fixtures/openshiftpods-pool-quota-ram.yaml
vendored
Normal file
24
nodepool/tests/fixtures/openshiftpods-pool-quota-ram.yaml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
zookeeper-servers:
|
||||
- host: {zookeeper_host}
|
||||
port: {zookeeper_port}
|
||||
chroot: {zookeeper_chroot}
|
||||
|
||||
zookeeper-tls:
|
||||
ca: {zookeeper_ca}
|
||||
cert: {zookeeper_cert}
|
||||
key: {zookeeper_key}
|
||||
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
|
||||
providers:
|
||||
- name: openshift
|
||||
driver: openshiftpods
|
||||
context: service-account.local
|
||||
pools:
|
||||
- name: main
|
||||
max-ram: 2048
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
image: docker.io/fedora:28
|
||||
memory: 1024
|
||||
23
nodepool/tests/fixtures/openshiftpods-pool-quota-servers.yaml
vendored
Normal file
23
nodepool/tests/fixtures/openshiftpods-pool-quota-servers.yaml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
zookeeper-servers:
|
||||
- host: {zookeeper_host}
|
||||
port: {zookeeper_port}
|
||||
chroot: {zookeeper_chroot}
|
||||
|
||||
zookeeper-tls:
|
||||
ca: {zookeeper_ca}
|
||||
cert: {zookeeper_cert}
|
||||
key: {zookeeper_key}
|
||||
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
|
||||
providers:
|
||||
- name: openshift
|
||||
driver: openshiftpods
|
||||
context: service-account.local
|
||||
pools:
|
||||
- name: main
|
||||
max-servers: 2
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
image: docker.io/fedora:28
|
||||
27
nodepool/tests/fixtures/openshiftpods-tenant-quota-cores.yaml
vendored
Normal file
27
nodepool/tests/fixtures/openshiftpods-tenant-quota-cores.yaml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
zookeeper-servers:
|
||||
- host: {zookeeper_host}
|
||||
port: {zookeeper_port}
|
||||
chroot: {zookeeper_chroot}
|
||||
|
||||
zookeeper-tls:
|
||||
ca: {zookeeper_ca}
|
||||
cert: {zookeeper_cert}
|
||||
key: {zookeeper_key}
|
||||
|
||||
tenant-resource-limits:
|
||||
- tenant-name: tenant-1
|
||||
max-cores: 4
|
||||
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
|
||||
providers:
|
||||
- name: openshift
|
||||
driver: openshiftpods
|
||||
context: service-account.local
|
||||
pools:
|
||||
- name: main
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
image: docker.io/fedora:28
|
||||
cpu: 2
|
||||
28
nodepool/tests/fixtures/openshiftpods-tenant-quota-extra.yaml
vendored
Normal file
28
nodepool/tests/fixtures/openshiftpods-tenant-quota-extra.yaml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
zookeeper-servers:
|
||||
- host: {zookeeper_host}
|
||||
port: {zookeeper_port}
|
||||
chroot: {zookeeper_chroot}
|
||||
|
||||
zookeeper-tls:
|
||||
ca: {zookeeper_ca}
|
||||
cert: {zookeeper_cert}
|
||||
key: {zookeeper_key}
|
||||
|
||||
tenant-resource-limits:
|
||||
- tenant-name: tenant-1
|
||||
mygpu: 2
|
||||
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
|
||||
providers:
|
||||
- name: openshift
|
||||
driver: openshiftpods
|
||||
context: service-account.local
|
||||
pools:
|
||||
- name: main
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
image: docker.io/fedora:28
|
||||
extra-resources:
|
||||
mygpu: 1
|
||||
27
nodepool/tests/fixtures/openshiftpods-tenant-quota-ram.yaml
vendored
Normal file
27
nodepool/tests/fixtures/openshiftpods-tenant-quota-ram.yaml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
zookeeper-servers:
|
||||
- host: {zookeeper_host}
|
||||
port: {zookeeper_port}
|
||||
chroot: {zookeeper_chroot}
|
||||
|
||||
zookeeper-tls:
|
||||
ca: {zookeeper_ca}
|
||||
cert: {zookeeper_cert}
|
||||
key: {zookeeper_key}
|
||||
|
||||
tenant-resource-limits:
|
||||
- tenant-name: tenant-1
|
||||
max-ram: 2048
|
||||
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
|
||||
providers:
|
||||
- name: openshift
|
||||
driver: openshiftpods
|
||||
context: service-account.local
|
||||
pools:
|
||||
- name: main
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
image: docker.io/fedora:28
|
||||
memory: 1024
|
||||
26
nodepool/tests/fixtures/openshiftpods-tenant-quota-servers.yaml
vendored
Normal file
26
nodepool/tests/fixtures/openshiftpods-tenant-quota-servers.yaml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
zookeeper-servers:
|
||||
- host: {zookeeper_host}
|
||||
port: {zookeeper_port}
|
||||
chroot: {zookeeper_chroot}
|
||||
|
||||
zookeeper-tls:
|
||||
ca: {zookeeper_ca}
|
||||
cert: {zookeeper_cert}
|
||||
key: {zookeeper_key}
|
||||
|
||||
tenant-resource-limits:
|
||||
- tenant-name: tenant-1
|
||||
max-servers: 2
|
||||
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
|
||||
providers:
|
||||
- name: openshift
|
||||
driver: openshiftpods
|
||||
context: service-account.local
|
||||
pools:
|
||||
- name: main
|
||||
labels:
|
||||
- name: pod-fedora
|
||||
image: docker.io/fedora:28
|
||||
@@ -1,4 +1,5 @@
|
||||
# Copyright (C) 2018 Red Hat
|
||||
# Copyright 2023 Acme Gating, LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -15,6 +16,7 @@
|
||||
|
||||
import fixtures
|
||||
import logging
|
||||
import time
|
||||
|
||||
from nodepool import tests
|
||||
from nodepool.zk import zookeeper as zk
|
||||
@@ -506,3 +508,82 @@ class TestDriverOpenshift(tests.DBTestCase):
|
||||
|
||||
self.assertEqual(len(self.fake_os_client.projects), 0,
|
||||
'Project must be cleaned up')
|
||||
|
||||
def _test_openshift_quota(self, config, pause=True):
|
||||
configfile = self.setup_config(config)
|
||||
pool = self.useNodepool(configfile, watermark_sleep=1)
|
||||
self.startPool(pool)
|
||||
# Start two pods to hit max-server limit
|
||||
reqs = []
|
||||
for _ in [1, 2]:
|
||||
req = zk.NodeRequest()
|
||||
req.state = zk.REQUESTED
|
||||
req.tenant_name = 'tenant-1'
|
||||
req.node_types.append('pod-fedora')
|
||||
self.zk.storeNodeRequest(req)
|
||||
reqs.append(req)
|
||||
|
||||
fulfilled_reqs = []
|
||||
for req in reqs:
|
||||
self.log.debug("Waiting for request %s", req.id)
|
||||
r = self.waitForNodeRequest(req)
|
||||
self.assertEqual(r.state, zk.FULFILLED)
|
||||
fulfilled_reqs.append(r)
|
||||
|
||||
# Now request a third pod that will hit the limit
|
||||
max_req = zk.NodeRequest()
|
||||
max_req.state = zk.REQUESTED
|
||||
max_req.tenant_name = 'tenant-1'
|
||||
max_req.node_types.append('pod-fedora')
|
||||
self.zk.storeNodeRequest(max_req)
|
||||
|
||||
# if at pool quota, the handler will get paused
|
||||
# but not if at tenant quota
|
||||
if pause:
|
||||
# The previous request should pause the handler
|
||||
pool_worker = pool.getPoolWorkers('openshift')
|
||||
while not pool_worker[0].paused_handlers:
|
||||
time.sleep(0.1)
|
||||
else:
|
||||
self.waitForNodeRequest(max_req, (zk.REQUESTED,))
|
||||
|
||||
# Delete the earlier two pods freeing space for the third.
|
||||
for req in fulfilled_reqs:
|
||||
node = self.zk.getNode(req.nodes[0])
|
||||
node.state = zk.DELETING
|
||||
self.zk.storeNode(node)
|
||||
self.waitForNodeDeletion(node)
|
||||
|
||||
# We should unpause and fulfill this now
|
||||
req = self.waitForNodeRequest(max_req, (zk.FULFILLED,))
|
||||
self.assertEqual(req.state, zk.FULFILLED)
|
||||
|
||||
def test_openshift_pool_quota_servers(self):
|
||||
# This is specified as max-projects, but named servers here for
|
||||
# parity with other driver tests.
|
||||
self._test_openshift_quota('openshift-pool-quota-servers.yaml')
|
||||
|
||||
def test_openshift_pool_quota_cores(self):
|
||||
self._test_openshift_quota('openshift-pool-quota-cores.yaml')
|
||||
|
||||
def test_openshift_pool_quota_ram(self):
|
||||
self._test_openshift_quota('openshift-pool-quota-ram.yaml')
|
||||
|
||||
def test_openshift_pool_quota_extra(self):
|
||||
self._test_openshift_quota('openshift-pool-quota-extra.yaml')
|
||||
|
||||
def test_openshift_tenant_quota_servers(self):
|
||||
self._test_openshift_quota(
|
||||
'openshift-tenant-quota-servers.yaml', pause=False)
|
||||
|
||||
def test_openshift_tenant_quota_cores(self):
|
||||
self._test_openshift_quota(
|
||||
'openshift-tenant-quota-cores.yaml', pause=False)
|
||||
|
||||
def test_openshift_tenant_quota_ram(self):
|
||||
self._test_openshift_quota(
|
||||
'openshift-tenant-quota-ram.yaml', pause=False)
|
||||
|
||||
def test_openshift_tenant_quota_extra(self):
|
||||
self._test_openshift_quota(
|
||||
'openshift-tenant-quota-extra.yaml', pause=False)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# Copyright (C) 2018 Red Hat
|
||||
# Copyright 2023 Acme Gating, LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -15,6 +16,7 @@
|
||||
|
||||
import fixtures
|
||||
import logging
|
||||
import time
|
||||
|
||||
from nodepool import tests
|
||||
from nodepool.zk import zookeeper as zk
|
||||
@@ -140,3 +142,82 @@ class TestDriverOpenshiftPods(tests.DBTestCase):
|
||||
self.zk.storeNode(node)
|
||||
|
||||
self.waitForNodeDeletion(node)
|
||||
|
||||
def _test_openshift_pod_quota(self, config, pause=True):
|
||||
configfile = self.setup_config(config)
|
||||
pool = self.useNodepool(configfile, watermark_sleep=1)
|
||||
self.startPool(pool)
|
||||
# Start two pods to hit max-server limit
|
||||
reqs = []
|
||||
for _ in [1, 2]:
|
||||
req = zk.NodeRequest()
|
||||
req.state = zk.REQUESTED
|
||||
req.tenant_name = 'tenant-1'
|
||||
req.node_types.append('pod-fedora')
|
||||
self.zk.storeNodeRequest(req)
|
||||
reqs.append(req)
|
||||
|
||||
fulfilled_reqs = []
|
||||
for req in reqs:
|
||||
self.log.debug("Waiting for request %s", req.id)
|
||||
r = self.waitForNodeRequest(req)
|
||||
self.assertEqual(r.state, zk.FULFILLED)
|
||||
fulfilled_reqs.append(r)
|
||||
|
||||
# Now request a third pod that will hit the limit
|
||||
max_req = zk.NodeRequest()
|
||||
max_req.state = zk.REQUESTED
|
||||
max_req.tenant_name = 'tenant-1'
|
||||
max_req.node_types.append('pod-fedora')
|
||||
self.zk.storeNodeRequest(max_req)
|
||||
|
||||
# if at pool quota, the handler will get paused
|
||||
# but not if at tenant quota
|
||||
if pause:
|
||||
# The previous request should pause the handler
|
||||
pool_worker = pool.getPoolWorkers('openshift')
|
||||
while not pool_worker[0].paused_handlers:
|
||||
time.sleep(0.1)
|
||||
else:
|
||||
self.waitForNodeRequest(max_req, (zk.REQUESTED,))
|
||||
|
||||
# Delete the earlier two pods freeing space for the third.
|
||||
for req in fulfilled_reqs:
|
||||
node = self.zk.getNode(req.nodes[0])
|
||||
node.state = zk.DELETING
|
||||
self.zk.storeNode(node)
|
||||
self.waitForNodeDeletion(node)
|
||||
|
||||
# We should unpause and fulfill this now
|
||||
req = self.waitForNodeRequest(max_req, (zk.FULFILLED,))
|
||||
self.assertEqual(req.state, zk.FULFILLED)
|
||||
|
||||
def test_openshift_pod_pool_quota_servers(self):
|
||||
# This is specified as max-projects, but named servers here for
|
||||
# parity with other driver tests.
|
||||
self._test_openshift_pod_quota('openshiftpods-pool-quota-servers.yaml')
|
||||
|
||||
def test_openshift_pod_pool_quota_cores(self):
|
||||
self._test_openshift_pod_quota('openshiftpods-pool-quota-cores.yaml')
|
||||
|
||||
def test_openshift_pod_pool_quota_ram(self):
|
||||
self._test_openshift_pod_quota('openshiftpods-pool-quota-ram.yaml')
|
||||
|
||||
def test_openshift_pod_pool_quota_extra(self):
|
||||
self._test_openshift_pod_quota('openshiftpods-pool-quota-extra.yaml')
|
||||
|
||||
def test_openshift_pod_tenant_quota_servers(self):
|
||||
self._test_openshift_pod_quota(
|
||||
'openshiftpods-tenant-quota-servers.yaml', pause=False)
|
||||
|
||||
def test_openshift_pod_tenant_quota_cores(self):
|
||||
self._test_openshift_pod_quota(
|
||||
'openshiftpods-tenant-quota-cores.yaml', pause=False)
|
||||
|
||||
def test_openshift_pod_tenant_quota_ram(self):
|
||||
self._test_openshift_pod_quota(
|
||||
'openshiftpods-tenant-quota-ram.yaml', pause=False)
|
||||
|
||||
def test_openshift_pod_tenant_quota_extra(self):
|
||||
self._test_openshift_pod_quota(
|
||||
'openshiftpods-tenant-quota-extra.yaml', pause=False)
|
||||
|
||||
Reference in New Issue
Block a user