[feat] upgrading daemonsets

- rolling update for daemonsets
- search pods by labels
This commit is contained in:
gardlt 2017-07-04 21:57:20 -05:00 committed by Tim Heyer
parent dda2a8dab7
commit d175e5ef92
9 changed files with 796 additions and 115 deletions

View File

@ -194,6 +194,7 @@ class Armada(object):
for entry in self.config[KEYWORD_ARMADA][KEYWORD_GROUPS]:
chart_wait = self.wait
desc = entry.get('description', 'A Chart Group')
chart_group = entry.get(KEYWORD_CHARTS, [])
@ -263,31 +264,28 @@ class Armada(object):
continue
# do actual update
self.tiller.update_release(
protoc_chart,
self.dry_run,
chart.release,
chart.namespace,
prefix,
pre_actions,
post_actions,
disable_hooks=chart.upgrade.no_hooks,
values=yaml.safe_dump(values),
wait=chart_wait,
timeout=chart_timeout)
self.tiller.update_release(protoc_chart,
prefix_chart,
chart.namespace,
pre_actions=pre_actions,
post_actions=post_actions,
dry_run=self.dry_run,
disable_hooks=chart.
upgrade.no_hooks,
values=yaml.safe_dump(values),
wait=chart_wait,
timeout=chart_timeout)
# process install
else:
LOG.info("Installing release %s", chart.release)
self.tiller.install_release(
protoc_chart,
self.dry_run,
chart.release,
chart.namespace,
prefix,
values=yaml.safe_dump(values),
wait=chart_wait,
timeout=chart_timeout)
self.tiller.install_release(protoc_chart,
prefix_chart,
chart.namespace,
dry_run=self.dry_run,
values=yaml.safe_dump(values),
wait=chart_wait,
timeout=chart_timeout)
LOG.debug("Cleaning up chart source in %s",
chartbuilder.source_directory)

View File

@ -26,10 +26,12 @@ DOMAIN = "armada"
logging.setup(CONF, DOMAIN)
class K8s(object):
'''
Object to obtain the local kube config file
'''
def __init__(self):
'''
Initialize connection to Kubernetes
@ -37,7 +39,8 @@ class K8s(object):
config.load_kube_config()
self.client = client.CoreV1Api()
self.api_client = client.BatchV1Api()
self.batch_api = client.BatchV1Api()
self.extension_api = client.ExtensionsV1beta1Api()
def delete_job_action(self, name, namespace="default"):
'''
@ -46,12 +49,24 @@ class K8s(object):
'''
try:
body = client.V1DeleteOptions()
self.api_client.delete_namespaced_job(name=name,
namespace=namespace,
body=body)
self.batch_api.delete_namespaced_job(
name=name, namespace=namespace, body=body)
except ApiException as e:
LOG.error("Exception when deleting a job: %s", e)
def get_namespace_job(self, namespace="default",
label_selector=''):
'''
:params lables - of the job
:params namespace - name of jobs
'''
try:
return self.batch_api.list_namespaced_job(
namespace, label_selector=label_selector)
except ApiException as e:
LOG.error("Exception getting a job: %s", e)
def create_job_action(self, name, namespace="default"):
'''
:params name - name of the job
@ -68,8 +83,8 @@ class K8s(object):
This will return a list of objects req namespace
'''
return self.client \
.list_namespaced_pod(namespace, label_selector=label_selector)
return self.client.list_namespaced_pod(
namespace, label_selector=label_selector)
def get_all_pods(self, label_selector=''):
'''
@ -78,8 +93,39 @@ class K8s(object):
Returns a list of pods from all namespaces
'''
return self.client \
.list_pod_for_all_namespaces(label_selector=label_selector)
return self.client.list_pod_for_all_namespaces(
label_selector=label_selector)
def get_namespace_daemonset(self, namespace='default', label=''):
'''
:param namespace - namespace of target deamonset
:param labels - specify targeted daemonset
'''
return self.extension_api.list_namespaced_daemon_set(
namespace, label_selector=label)
def create_daemon_action(self, namespace, template):
'''
:param - namespace - pod namespace
:param - template - deploy daemonset via yaml
'''
# we might need to load something here
self.extension_api.create_namespaced_daemon_set(
namespace, body=template)
def delete_daemon_action(self, name, namespace="default", body=None):
'''
:params - namespace - pod namespace
This will delete daemonset
'''
if body is None:
body = client.V1DeleteOptions()
return self.extension_api.delete_namespaced_daemon_set(
name, namespace, body)
def delete_namespace_pod(self, name, namespace="default", body=None):
'''
@ -92,18 +138,26 @@ class K8s(object):
if body is None:
body = client.V1DeleteOptions()
return self.client \
.delete_namespaced_pod(name, namespace, body)
return self.client.delete_namespaced_pod(
name, namespace, body)
def wait_for_pod_redeployment(self, old_pod_name, namespace):
'''
:param old_pod_name - name of pods
:param namespace - kubernetes namespace
'''
base_pod_pattern = re.compile('^(.+)-[a-zA-Z0-9]+$')
if not base_pod_pattern.match(old_pod_name):
LOG.error(
'Could not identify new pod after purging %s', old_pod_name)
LOG.error('Could not identify new pod after purging %s',
old_pod_name)
return
pod_base_name = base_pod_pattern.match(old_pod_name).group(1)
new_pod_name = ''
w = watch.Watch()
for event in w.stream(self.client.list_namespaced_pod, namespace):
event_name = event['object'].metadata.name
@ -117,7 +171,8 @@ class K8s(object):
new_pod_name = event_name
elif new_pod_name:
for condition in pod_conditions:
if (condition.type == 'Ready' and
condition.status == 'True'):
if (condition.type == 'Ready'
and condition.status == 'True'):
LOG.info('New pod %s deployed', new_pod_name)
w.stop()

View File

@ -13,6 +13,7 @@
# limitations under the License.
import grpc
import yaml
from hapi.services.tiller_pb2 import ReleaseServiceStub, ListReleasesRequest, \
InstallReleaseRequest, UpdateReleaseRequest, UninstallReleaseRequest
@ -46,6 +47,7 @@ DOMAIN = "armada"
logging.setup(CONF, DOMAIN)
class Tiller(object):
'''
The Tiller class supports communication and requests to the Tiller Helm
@ -139,45 +141,67 @@ class Tiller(object):
for y in release_list:
releases.extend(y.releases)
return releases
def list_charts(self):
'''
List Helm Charts from Latest Releases
def get_chart_templates(self, template_name, name, release_name, namespace,
chart, disable_hooks, values):
# returns some info
Returns a list of tuples in the form:
(name, version, chart, values, status)
'''
charts = []
for latest_release in self.list_releases():
try:
charts.append(
(latest_release.name, latest_release.version,
latest_release.chart, latest_release.config.raw,
latest_release.info.status.Code.Name(
latest_release.info.status.code)))
except IndexError:
continue
return charts
LOG.info("Template( %s ) : %s ", template_name, name)
def _pre_update_actions(self, release_name, actions, namespace):
stub = ReleaseServiceStub(self.channel)
release_request = InstallReleaseRequest(
chart=chart,
dry_run=True,
values=values,
name=name,
namespace=namespace,
wait=False)
templates = stub.InstallRelease(
release_request, self.timeout, metadata=self.metadata)
for template in yaml.load_all(
getattr(templates.release, 'manifest', [])):
if template_name == template.get('metadata', None).get(
'name', None):
LOG.info(template_name)
return template
def _pre_update_actions(self, actions, release_name, namespace, chart,
disable_hooks, values):
'''
:params actions - array of items actions
:params namespace - name of pod for actions
'''
try:
for action in actions.get('update', []):
name = action.get('name')
LOG.info('Updating %s ', name)
action_type = action.get('type')
labels = action.get('labels')
self.rolling_upgrade_pod_deployment(
name, release_name, namespace, labels,
action_type, chart, disable_hooks, values)
except Exception:
LOG.debug("Pre: Could not update anything, please check yaml")
try:
for action in actions.get('delete', []):
name = action.get('name')
action_type = action.get('type')
labels = action.get('labels', None)
self.delete_resource(release_name, name, action_type,
labels, namespace)
self.delete_resources(
release_name, name, action_type, labels, namespace)
# Ensure pods get deleted when job is deleted
if 'job' in action_type:
self.delete_resource(release_name, name, 'pod',
labels, namespace)
self.delete_resources(
release_name, name, 'pod', labels, namespace)
except Exception:
raise tiller_exceptions.PreUpdateJobDeleteException(name,
namespace)
@ -207,6 +231,7 @@ class Tiller(object):
Apply deletion logic based on type of resource
'''
label_selector = 'release_name={}'.format(release_name)
for label in resource_labels:
label_selector += ', {}={}'.format(label.keys()[0],
@ -241,10 +266,33 @@ class Tiller(object):
raise tiller_exceptions.PreUpdateJobCreateException()
LOG.debug("POST: Could not create anything, please check yaml")
def update_release(self, chart, dry_run, name, namespace, prefix,
pre_actions=None, post_actions=None,
disable_hooks=False, values=None,
wait=False, timeout=None):
def list_charts(self):
'''
List Helm Charts from Latest Releases
Returns a list of tuples in the form:
(name, version, chart, values, status)
'''
charts = []
for latest_release in self.list_releases():
try:
charts.append(
(latest_release.name, latest_release.version,
latest_release.chart, latest_release.config.raw,
latest_release.info.status.Code.Name(
latest_release.info.status.code)))
except IndexError:
continue
return charts
def update_release(self, chart, release, namespace,
dry_run=False,
pre_actions=None,
post_actions=None,
disable_hooks=False,
values=None,
wait=False,
timeout=None):
'''
Update a Helm Release
'''
@ -256,11 +304,10 @@ class Tiller(object):
else:
values = Config(raw=values)
release_name = "{}-{}".format(prefix, name)
self._pre_update_actions(release_name, pre_actions, namespace)
self._pre_update_actions(pre_actions, release, namespace, chart,
disable_hooks, values)
# build release install request
try:
stub = ReleaseServiceStub(self.channel)
release_request = UpdateReleaseRequest(
@ -268,18 +315,22 @@ class Tiller(object):
dry_run=dry_run,
disable_hooks=disable_hooks,
values=values,
name="{}-{}".format(prefix, name),
name=release,
wait=wait,
timeout=timeout)
stub.UpdateRelease(release_request, self.timeout,
metadata=self.metadata)
stub.UpdateRelease(
release_request, self.timeout, metadata=self.metadata)
except Exception:
raise tiller_exceptions.ReleaseInstallException(name, namespace)
raise tiller_exceptions.ReleaseInstallException(release, namespace)
self._post_update_actions(post_actions, namespace)
def install_release(self, chart, dry_run, name, namespace, prefix,
values=None, wait=False, timeout=None):
def install_release(self, chart, release, namespace,
dry_run=False,
values=None,
wait=False,
timeout=None):
'''
Create a Helm Release
'''
@ -298,17 +349,16 @@ class Tiller(object):
chart=chart,
dry_run=dry_run,
values=values,
name="{}-{}".format(prefix, name),
name=release,
namespace=namespace,
wait=wait,
timeout=timeout)
return stub.InstallRelease(release_request,
self.timeout,
metadata=self.metadata)
return stub.InstallRelease(
release_request, self.timeout, metadata=self.metadata)
except Exception:
raise tiller_exceptions.ReleaseInstallException(name, namespace)
raise tiller_exceptions.ReleaseInstallException(release, namespace)
def uninstall_release(self, release, disable_hooks=False, purge=True):
'''
@ -321,12 +371,11 @@ class Tiller(object):
# build release install request
try:
stub = ReleaseServiceStub(self.channel)
release_req = UninstallReleaseRequest(name=release,
disable_hooks=disable_hooks,
purge=purge)
return stub.UninstallRelease(release_req,
self.timeout,
metadata=self.metadata)
release_request = UninstallReleaseRequest(
name=release, disable_hooks=disable_hooks, purge=purge)
return stub.UninstallRelease(
release_request, self.timeout, metadata=self.metadata)
except Exception:
raise tiller_exceptions.ReleaseUninstallException(release)
@ -342,9 +391,8 @@ class Tiller(object):
valid_charts = []
for gchart in charts:
for chart in gchart.get('chart_group'):
valid_charts.append(release_prefix(prefix,
chart.get('chart')
.get('name')))
valid_charts.append(release_prefix(
prefix, chart.get('chart').get('name')))
actual_charts = [x.name for x in self.list_releases()]
chart_diff = list(set(actual_charts) - set(valid_charts))
@ -353,3 +401,97 @@ class Tiller(object):
if chart.startswith(prefix):
LOG.debug("Release: %s will be removed", chart)
self.uninstall_release(chart)
def delete_resources(self, release_name, resource_name, resource_type,
resource_labels, namespace):
'''
:params release_name - release name the specified resource is under
:params resource_name - name of specific resource
:params resource_type - type of resource e.g. job, pod, etc.
:params resource_labels - labels by which to identify the resource
:params namespace - namespace of the resource
Apply deletion logic based on type of resource
'''
label_selector = ''
if not resource_type == 'job':
label_selector = 'release_name={}'.format(release_name)
if resource_labels is not None:
for label in resource_labels:
if label_selector == '':
label_selector = '{}={}'.format(label.keys()[0],
label.values()[0])
continue
label_selector += ', {}={}'.format(label.keys()[0],
label.values()[0])
if 'job' in resource_type:
LOG.info("Deleting %s in namespace: %s", resource_name, namespace)
get_jobs = self.k8s.get_namespace_job(namespace, label_selector)
for jb in get_jobs.items:
jb_name = jb.metadata.name
self.k8s.delete_job_action(jb_name, namespace)
elif 'pod' in resource_type:
release_pods = self.k8s.get_namespace_pod(
namespace, label_selector)
for pod in release_pods.items:
pod_name = pod.metadata.name
LOG.info("Deleting %s in namespace: %s", pod_name, namespace)
self.k8s.delete_namespace_pod(pod_name, namespace)
self.k8s.wait_for_pod_redeployment(pod_name, namespace)
else:
LOG.error("Unable to execute name: %s type: %s ",
resource_name, resource_type)
def rolling_upgrade_pod_deployment(self, name, release_name, namespace,
labels, action_type, chart,
disable_hooks, values):
'''
update statefullsets (daemon, stateful)
'''
if action_type == 'daemonset':
LOG.info('Updating: %s', action_type)
label_selector = 'release_name={}'.format(release_name)
if labels is not None:
for label in labels:
label_selector += ', {}={}'.format(label.keys()[0],
label.values()[0])
get_daemonset = self.k8s.get_namespace_daemonset(
namespace=namespace, label=label_selector)
for ds in get_daemonset.items:
ds_name = ds.metadata.name
ds_labels = ds.metadata.labels
if ds_name == name:
LOG.info("Deleting %s : %s in %s", action_type, ds_name,
namespace)
self.k8s.delete_daemon_action(ds_name, namespace)
# update the daemonset yaml
template = self.get_chart_templates(
ds_name, name, release_name, namespace, chart,
disable_hooks, values)
template['metadata']['labels'] = ds_labels
template['spec']['template']['metadata'][
'labels'] = ds_labels
self.k8s.create_daemon_action(
namespace=namespace, template=template)
# delete pods
self.delete_resources(release_name, name, 'pod', labels,
namespace)
elif action_type == 'statefulset':
pass

View File

@ -92,10 +92,12 @@ class ArmadaTestCase(unittest.TestCase):
'master')
for group in armada.config.get('armada').get('charts'):
for counter, chart in enumerate(group.get('chart_group')):
self.assertEqual(chart.get('chart').get('source_dir')[0],
CHART_SOURCES[counter][0])
self.assertEqual(chart.get('chart').get('source_dir')[1],
CHART_SOURCES[counter][1])
self.assertEqual(
chart.get('chart').get('source_dir')[0],
CHART_SOURCES[counter][0])
self.assertEqual(
chart.get('chart').get('source_dir')[1],
CHART_SOURCES[counter][1])
@unittest.skip('temp')
@mock.patch('armada.handlers.armada.git')
@ -130,14 +132,12 @@ class ArmadaTestCase(unittest.TestCase):
@mock.patch.object(Armada, 'pre_flight_ops')
@mock.patch('armada.handlers.armada.ChartBuilder')
@mock.patch('armada.handlers.armada.Tiller')
def test_install(self, mock_tiller, mock_chartbuilder,
mock_pre_flight, mock_post_flight):
def test_install(self, mock_tiller, mock_chartbuilder, mock_pre_flight,
mock_post_flight):
'''Test install functionality from the sync() method'''
# instantiate Armada and Tiller objects
armada = Armada('',
wait=True,
timeout=1000)
armada = Armada('', wait=True, timeout=1000)
armada.tiller = mock_tiller
tmp_doc = yaml.safe_load_all(self.test_yaml)
armada.config = Manifest(tmp_doc).get_manifest()
@ -154,20 +154,26 @@ class ArmadaTestCase(unittest.TestCase):
armada.sync()
# check params that should be passed to tiller.install_release()
method_calls = [mock.call(mock_chartbuilder().get_helm_chart(),
armada.dry_run, chart_1['release_name'],
chart_1['namespace'],
armada.config['armada']['release_prefix'],
values=yaml.safe_dump(chart_1['values']),
wait=armada.wait,
timeout=1000),
mock.call(mock_chartbuilder().get_helm_chart(),
armada.dry_run, chart_2['release_name'],
chart_2['namespace'],
armada.config['armada']['release_prefix'],
values=yaml.safe_dump(chart_2['values']),
wait=armada.wait,
timeout=1000)]
method_calls = [
mock.call(
mock_chartbuilder().get_helm_chart(),
"{}-{}".format(armada.config['armada']['release_prefix'],
chart_1['release_name']),
chart_1['namespace'],
dry_run=armada.dry_run,
values=yaml.safe_dump(chart_1['values']),
wait=armada.wait,
timeout=1000),
mock.call(
mock_chartbuilder().get_helm_chart(),
"{}-{}".format(armada.config['armada']['release_prefix'],
chart_2['release_name']),
chart_2['namespace'],
dry_run=armada.dry_run,
values=yaml.safe_dump(chart_2['values']),
wait=armada.wait,
timeout=1000)
]
mock_tiller.install_release.assert_has_calls(method_calls)
@unittest.skip('skipping update')

View File

@ -28,22 +28,21 @@ class TillerTestCase(unittest.TestCase):
dry_run = False
name = None
namespace = None
prefix = None
initial_values = None
updated_values = mock_config(raw=initial_values)
wait = False
timeout = None
tiller.install_release(chart, dry_run, name, namespace, prefix,
values=initial_values, wait=wait,
timeout=timeout)
tiller.install_release(chart, name, namespace,
dry_run=dry_run, values=initial_values,
wait=wait, timeout=timeout)
mock_stub.assert_called_with(tiller.channel)
release_request = mock_install_request(
chart=chart,
dry_run=dry_run,
values=updated_values,
name="{}-{}".format(prefix, name),
release=name,
namespace=namespace,
wait=wait,
timeout=timeout

View File

@ -1,5 +1,5 @@
Armada - Tiller
==============+
===============
Commands

View File

@ -79,7 +79,7 @@ Chart
+-----------------+----------+------------------------------------------------------------------------+
| keyword | type | action |
+=================+==========+========================================================================+
| chart\_name | string | name for the chart |
| chart\_name | string | name for the chart |
+-----------------+----------+------------------------------------------------------------------------+
| release\_name | string | name of the release |
+-----------------+----------+------------------------------------------------------------------------+
@ -98,6 +98,53 @@ Chart
| dependencies | object | reference any chart dependencies before install |
+-----------------+----------+------------------------------------------------------------------------+
Update - Pre or Post
^^^^^^^^^^^^^^^^^^^^
+-------------+----------+---------------------------------------------------------------+
| keyword | type | action |
+=============+==========+===============================================================+
| pre | object | actions prior to updating chart |
+-------------+----------+---------------------------------------------------------------+
| post | object | actions post updating chart |
+-------------+----------+---------------------------------------------------------------+
Update - Actions
^^^^^^^^^^^^^^^^
+-------------+----------+---------------------------------------------------------------+
| keyword | type | action |
+=============+==========+===============================================================+
| update | object | updates daemonsets in pre update actions |
+-------------+----------+---------------------------------------------------------------+
| delete | object | delete jobs in pre delete actions |
+-------------+----------+---------------------------------------------------------------+
.. note::
Update actions are performed in the pre/post sections of update
Update - Actions - Update/Delete
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+-------------+----------+---------------------------------------------------------------+
| keyword | type | action |
+=============+==========+===============================================================+
| name | string | name of action |
+-------------+----------+---------------------------------------------------------------+
| type | string | type of K8s kind to execute |
+-------------+----------+---------------------------------------------------------------+
| labels | object | array of labels to query against kinds. (key: value) |
+-------------+----------+---------------------------------------------------------------+
.. note::
Update Actions only support type: 'daemonset'
Source
^^^^^^
@ -113,6 +160,52 @@ Source
| reference | string | branch of the repo |
+-------------+----------+---------------------------------------------------------------+
Example
~~~~~~~
::
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: blog-1
data:
chart_name: blog-1
release_name: blog-1
namespace: default
timeout: 100
install:
no_hook: false
upgrade:
no_hook: false
pre:
update:
- name: test-daemonset
type: daemonset
labels:
foo: bar
component: bar
rak1: enababled
delete:
- name: test-job
type: job
labels:
foo: bar
component: bar
rak1: enababled
values: {}
source:
type: git
location: https://github.com/namespace/repo
subpath: .
reference: master
dependencies: []
Defining a Chart
~~~~~~~~~~~~~~~~

387
examples/armada-aio.yaml Normal file
View File

@ -0,0 +1,387 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: helm-toolkit
data:
chart_name: helm-toolkit
release: helm-toolkit
namespace: helm-toolkit
values: {}
source:
type: git
location: git://git.openstack.org/openstack/openstack-helm
subpath: helm-toolkit
reference: master
dependencies: []
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: mariadb
data:
chart_name: mariadb
release: mariadb
namespace: openstack
timeout: 3600
install:
no_hooks: false
upgrade:
no_hooks: false
values: {}
source:
type: git
location: git://git.openstack.org/openstack/openstack-helm
subpath: mariadb
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: memcached
data:
chart_name: memcached
release: memcached
namespace: openstack
timeout: 100
install:
no_hooks: false
upgrade:
no_hooks: false
values: {}
source:
type: git
location: git://git.openstack.org/openstack/openstack-helm
subpath: memcached
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: etcd
data:
chart_name: etcd
release: etcd
namespace: openstack
timeout: 3600
install:
no_hooks: false
upgrade:
no_hooks: false
values: {}
source:
type: git
location: git://git.openstack.org/openstack/openstack-helm
subpath: etcd
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: rabbitmq
data:
chart_name: rabbitmq
release: rabbitmq
namespace: openstack
timeout: 100
install:
no_hooks: false
upgrade:
no_hooks: false
values: {}
source:
type: git
location: git://git.openstack.org/openstack/openstack-helm
subpath: rabbitmq
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ingress
data:
chart_name: ingress
release: ingress
namespace: openstack
timeout: 100
install:
no_hooks: false
upgrade:
no_hooks: false
values: {}
source:
type: git
location: git://git.openstack.org/openstack/openstack-helm
subpath: ingress
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: keystone
data:
chart_name: keystone
release: keystone
namespace: openstack
timeout: 100
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- name: keystone-db-sync
type: job
labels:
- job-name: keystone-db-sync
- name: keystone-db-init
type: job
labels:
- job-name: keystone-db-init
values:
replicas: 3
source:
type: git
location: git://git.openstack.org/openstack/openstack-helm
subpath: keystone
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: horizon
data:
chart_name: horizon
release: horizon
namespace: openstack
timeout: 100
install:
no_hooks: false
upgrade:
no_hooks: false
values: {}
source:
type: git
location: git://git.openstack.org/openstack/openstack-helm
subpath: horizon
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: glance
data:
chart_name: glance
release: glance
namespace: openstack
timeout: 100
install:
no_hooks: false
upgrade:
no_hooks: false
values: {}
source:
type: git
location: git://git.openstack.org/openstack/openstack-helm
subpath: glance
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: neutron
data:
chart_name: neutron
release: neutron
namespace: openstack
timeout: 100
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
update:
- name: neutron-l3-agent
type: daemonset
labels:
- component: l3-agent
- application: neutron
values:
pod:
replica:
server: 3
network:
interface:
tunnel: docker0
conf:
neutron:
default:
oslo:
log:
debug: true
neutron:
db:
l3_ha: False
min_l3_agents_per_router: 1
max_l3_agents_per_router: 1
l3_ha_network_type: vxlan
dhcp_agents_per_network: 1
ml2_conf:
ml2_type_flat:
neutron:
ml2:
flat_networks: public
openvswitch_agent:
agent:
neutron:
ml2:
ovs:
agent:
tunnel_types: vxlan
ovs:
neutron:
ml2:
ovs:
agent:
of_interface: ovs-ofctl
ovsdb_interface: vsctl
bridge_mappings: public:br-ex
source:
type: git
location: git://git.openstack.org/openstack/openstack-helm
subpath: neutron
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: nova
data:
chart_name: nova
release: nova
namespace: openstack
timeout: 100
install:
no_hooks: false
upgrade:
no_hooks: false
values:
ceph:
enabled: false
conf:
nova:
default:
oslo:
log:
debug: false
libvirt:
nova:
conf:
virt_type: qemu
images_type: null
images_rbd_pool: null
images_rbd_ceph_conf: null
rbd_user: null
rbd_secret_uuid: null
disk_cachemodes: null
hw_disk_discard: null
upgrade_levels:
nova:
conf:
compute: null
source:
type: git
location: git://git.openstack.org/openstack/openstack-helm
subpath: nova
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: cinder
data:
chart_name: cinder
release: cinder
namespace: openstack
timeout: 100
install:
no_hooks: false
upgrade:
no_hooks: false
values: {}
source:
type: git
location: git://git.openstack.org/openstack/openstack-helm
subpath: cinder
reference: master
dependencies:
- helm-toolkit
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: deploy-mariadb
data:
description: "Deploy Infra Database"
sequenced: True
chart_group:
- mariadb
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: openstack-infra-services
data:
description: "OpenStack Infra Services"
sequenced: False
chart_group:
- etcd
- rabbitmq
- memcached
- ingress
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: openstack-components
data:
description: "OpenStack Components"
sequenced: False
chart_group:
- keystone
- horizon
- glance
- neutron
- nova
- cinder
---
schema: armada/Manifest/v1
metadata:
schema: metadata/Document/v1
name: armada-manifest
data:
release_prefix: armada
chart_groups:
- deploy-mariadb
- openstack-infra-services
- openstack-components

View File

@ -122,7 +122,8 @@ data:
type: job
- name: keystone-db-init
type: job
values: {}
values:
replicas: 2
source:
type: git
location: git://github.com/openstack/openstack-helm