Merge "Nova - Add ephemeral pool creation"
This commit is contained in:
commit
57124b49dc
@ -153,25 +153,7 @@ spec:
|
||||
- name: config-volume-{{- $root.Values.global.name }}
|
||||
mountPath: {{ $mount }}
|
||||
{{- end }}
|
||||
{{- range $ephemeralPool := $root.Values.ephemeral_pools }}
|
||||
- name: storage-init-{{- $ephemeralPool.pool_name }}
|
||||
image: {{ $root.Values.images.tags.rbd_provisioner_storage_init | quote }}
|
||||
command: [ "/bin/bash", "{{ $mount }}/check_ceph.sh" ]
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
value: {{ $root.Release.Namespace }}
|
||||
- name: POOL_NAME
|
||||
value: {{ $ephemeralPool.pool_name }}
|
||||
- name: POOL_REPLICATION
|
||||
value: {{ $ephemeralPool.replication | quote }}
|
||||
- name: POOL_CRUSH_RULE_NAME
|
||||
value: {{ $ephemeralPool.crush_rule_name | quote }}
|
||||
- name: POOL_CHUNK_SIZE
|
||||
value: {{ $ephemeralPool.chunk_size | quote }}
|
||||
volumeMounts:
|
||||
- name: config-volume-{{- $root.Values.global.name }}
|
||||
mountPath: {{ $mount }}
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
# This ConfigMap is needed because we're not using ceph's helm chart
|
||||
apiVersion: v1
|
||||
|
@ -11,12 +11,14 @@ from sysinv.common import constants
|
||||
from sysinv.common import exception
|
||||
from sysinv.common import interface
|
||||
from sysinv.common import utils
|
||||
from sysinv.common.storage_backend_conf import StorageBackendConfig
|
||||
from sysinv.openstack.common import log as logging
|
||||
from sysinv.helm import common
|
||||
from sysinv.helm import openstack
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
RBD_POOL_USER = "ephemeral"
|
||||
|
||||
DEFAULT_NOVA_PCI_ALIAS = [
|
||||
{"vendor_id": constants.NOVA_PCI_ALIAS_QAT_PF_VENDOR,
|
||||
@ -73,6 +75,9 @@ class NovaHelm(openstack.OpenstackBaseHelm):
|
||||
}
|
||||
},
|
||||
'conf': {
|
||||
'ceph': {
|
||||
'ephemeral_storage': self._get_rbd_ephemeral_storage()
|
||||
},
|
||||
'nova': {
|
||||
'libvirt': {
|
||||
'virt_type': self._get_virt_type(),
|
||||
@ -111,6 +116,15 @@ class NovaHelm(openstack.OpenstackBaseHelm):
|
||||
def _get_images_overrides(self):
|
||||
heat_image = self._operator.chart_operators[
|
||||
constants.HELM_CHART_HEAT].docker_image
|
||||
|
||||
# TODO: Remove after ceph upgrade
|
||||
# Format the name of the stx specific ceph config helper
|
||||
ceph_config_helper_image = "{}:{}/{}/{}{}:{}".format(
|
||||
self._get_management_address(), common.REGISTRY_PORT,
|
||||
common.REPO_LOC,
|
||||
common.DOCKER_SRCS[self.docker_repo_source][common.IMG_PREFIX_KEY],
|
||||
'ceph-config-helper', self.docker_repo_tag)
|
||||
|
||||
return {
|
||||
'tags': {
|
||||
'bootstrap': heat_image,
|
||||
@ -132,7 +146,8 @@ class NovaHelm(openstack.OpenstackBaseHelm):
|
||||
'nova_placement': self.docker_image,
|
||||
'nova_scheduler': self.docker_image,
|
||||
'nova_spiceproxy': self.docker_image,
|
||||
'nova_spiceproxy_assets': self.docker_image
|
||||
'nova_spiceproxy_assets': self.docker_image,
|
||||
'nova_storage_init': ceph_config_helper_image,
|
||||
}
|
||||
}
|
||||
|
||||
@ -451,3 +466,34 @@ class NovaHelm(openstack.OpenstackBaseHelm):
|
||||
|
||||
def get_region_name(self):
|
||||
return self._get_service_region_name(self.SERVICE_NAME)
|
||||
|
||||
def _get_rbd_ephemeral_storage(self):
|
||||
ephemeral_storage_conf = {}
|
||||
ephemeral_pools = []
|
||||
|
||||
# Get the values for replication and min replication from the storage
|
||||
# backend attributes.
|
||||
replication, min_replication = \
|
||||
StorageBackendConfig.get_ceph_pool_replication(self.dbapi)
|
||||
|
||||
# For now, the ephemeral pool will only be on the primary Ceph tier
|
||||
# that's using the 0 crush ruleset.
|
||||
ruleset = 0
|
||||
|
||||
# Form the dictionary with the info for the ephemeral pool.
|
||||
# If needed, multiple pools can be specified.
|
||||
ephemeral_pool = {
|
||||
'rbd_pool_name': constants.CEPH_POOL_EPHEMERAL_NAME,
|
||||
'rbd_user': RBD_POOL_USER,
|
||||
'rbd_crush_rule': ruleset,
|
||||
'rbd_replication': replication,
|
||||
'rbd_chunk_size': constants.CEPH_POOL_EPHEMERAL_PG_NUM
|
||||
}
|
||||
ephemeral_pools.append(ephemeral_pool)
|
||||
|
||||
ephemeral_storage_conf = {
|
||||
'type': 'rbd',
|
||||
'rbd_pools': ephemeral_pools
|
||||
}
|
||||
|
||||
return ephemeral_storage_conf
|
||||
|
@ -41,8 +41,6 @@ class RbdProvisionerHelm(base.BaseHelm):
|
||||
|
||||
# Get tier info.
|
||||
tiers = self.dbapi.storage_tier_get_list()
|
||||
primary_tier_name = \
|
||||
constants.SB_TIER_DEFAULT_NAMES[constants.SB_TIER_TYPE_CEPH]
|
||||
|
||||
classes = []
|
||||
for bk in ceph_bks:
|
||||
@ -67,42 +65,10 @@ class RbdProvisionerHelm(base.BaseHelm):
|
||||
}
|
||||
classes.append(cls)
|
||||
|
||||
# Get all the info for creating the ephemeral pool.
|
||||
ephemeral_pools = []
|
||||
# Right now the ephemeral pool will only use the primary tier.
|
||||
rule_name = "{0}{1}{2}".format(
|
||||
primary_tier_name,
|
||||
constants.CEPH_CRUSH_TIER_SUFFIX,
|
||||
"-ruleset").replace('-', '_')
|
||||
|
||||
sb_list_ext = self.dbapi.storage_backend_get_list_by_type(
|
||||
backend_type=constants.SB_TYPE_CEPH_EXTERNAL)
|
||||
|
||||
if sb_list_ext:
|
||||
for sb in sb_list_ext:
|
||||
if constants.SB_SVC_NOVA in sb.services:
|
||||
rbd_pool = sb.capabilities.get('ephemeral_pool')
|
||||
ephemeral_pool = {
|
||||
"pool_name": rbd_pool,
|
||||
"replication": int(sb.capabilities.get("replication")),
|
||||
"crush_rule_name": rule_name,
|
||||
"chunk_size": 64,
|
||||
}
|
||||
ephemeral_pools.append(ephemeral_pool)
|
||||
# Treat internal CEPH.
|
||||
ephemeral_pool = {
|
||||
"pool_name": constants.CEPH_POOL_EPHEMERAL_NAME,
|
||||
"replication": int(ceph_bks[0].capabilities.get("replication")),
|
||||
"crush_rule_name": rule_name,
|
||||
"chunk_size": 64,
|
||||
}
|
||||
ephemeral_pools.append(ephemeral_pool)
|
||||
|
||||
overrides = {
|
||||
common.HELM_NS_OPENSTACK: {
|
||||
"classdefaults": classdefaults,
|
||||
"classes": classes,
|
||||
"ephemeral_pools": ephemeral_pools,
|
||||
"images": self._get_images_overrides(),
|
||||
"pods": self._get_pod_overrides()
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user