Merge remote-tracking branch 'gerrit/master' into f/centos76

Change-Id: Iea63d1ade4600321dde585d610a1a1b9356705d2
Signed-off-by: Saul Wold <sgw@linux.intel.com>
This commit is contained in:
Saul Wold
2018-12-20 12:09:38 -08:00
31 changed files with 694 additions and 1458 deletions

View File

@@ -160,6 +160,7 @@ openstack-aodh-config
python-panko
openstack-panko-api
openstack-panko-common
openstack-panko-config
# rabbitmq-server
rabbitmq-server

View File

@@ -16,6 +16,7 @@ openstack-nova-wheels
python-ceilometerclient-wheels
python-cinderclient-wheels
python-django-horizon-wheels
python-django-openstack-auth-wheels
python-glanceclient-wheels
python-gnocchiclient-wheels
python-ironicclient-wheels

View File

@@ -10,6 +10,7 @@ openstack/openstack-magnum
openstack/openstack-magnum-ui
openstack/openstack-ras
openstack/openstack-panko
openstack/openstack-panko-config
openstack/openstack-os-vif
openstack/python-aodhclient
openstack/python-ceilometer

View File

@@ -5,4 +5,4 @@ TAR="$TAR_NAME-$SHA.tar.gz"
COPY_LIST="${CGCS_BASE}/downloads/$TAR $PKG_BASE/files/*"
TIS_PATCH_VER=4
TIS_PATCH_VER=5

View File

@@ -15,6 +15,8 @@ Source0: %{name}-%{sha}.tar.gz
BuildArch: noarch
Patch01: 0001-gnocchi-chart-updates.patch
Patch02: Mariadb-Support-adoption-of-running-single-node-mari.patch
Patch03: Mariadb-Share-container-PID-namespaces-under-docker.patch
BuildRequires: helm
@@ -24,6 +26,8 @@ Openstack Helm Infra charts
%prep
%setup -n openstack-helm-infra
%patch01 -p1
%patch02 -p1
%patch03 -p1
%build
# initialize helm and build the toolkit

View File

@@ -0,0 +1,82 @@
From 977c523cef00f7919a82d268da7e55223f1864ce Mon Sep 17 00:00:00 2001
From: Pete Birley <pete@port.direct>
Date: Sat, 8 Dec 2018 16:16:11 -0600
Subject: [PATCH] Mariadb: Share container PID namespaces under docker
This PS shares pid namespaces for containers in pods under docker,
bringing running in this runtime inline with other runc based container
backends, allowing the pause process in the pod to act as a reaper.
Change-Id: Ib5fc101d930446d848246eb5ca4d554b756cb91f
Signed-off-by: Pete Birley <pete@port.direct>
---
mariadb/templates/deployment-error.yaml | 1 +
mariadb/templates/deployment-ingress.yaml | 1 +
mariadb/templates/monitoring/prometheus/exporter-deployment.yaml | 1 +
mariadb/templates/monitoring/prometheus/exporter-job-create-user.yaml | 1 +
mariadb/templates/statefulset.yaml | 1 +
5 files changed, 5 insertions(+)
diff --git a/mariadb/templates/deployment-error.yaml b/mariadb/templates/deployment-error.yaml
index c310324..78c4b18 100644
--- a/mariadb/templates/deployment-error.yaml
+++ b/mariadb/templates/deployment-error.yaml
@@ -42,6 +42,7 @@ spec:
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
spec:
+ shareProcessNamespace: true
serviceAccountName: {{ $serviceAccountName }}
affinity:
{{ tuple $envAll "mariadb" "ingress-error-pages" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
diff --git a/mariadb/templates/deployment-ingress.yaml b/mariadb/templates/deployment-ingress.yaml
index 053a08f..afe9407 100644
--- a/mariadb/templates/deployment-ingress.yaml
+++ b/mariadb/templates/deployment-ingress.yaml
@@ -137,6 +137,7 @@ spec:
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
spec:
+ shareProcessNamespace: true
serviceAccountName: {{ $serviceAccountName }}
affinity:
{{ tuple $envAll "mariadb" "ingress" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
diff --git a/mariadb/templates/monitoring/prometheus/exporter-deployment.yaml b/mariadb/templates/monitoring/prometheus/exporter-deployment.yaml
index 274a06c..00b3f6e 100644
--- a/mariadb/templates/monitoring/prometheus/exporter-deployment.yaml
+++ b/mariadb/templates/monitoring/prometheus/exporter-deployment.yaml
@@ -38,6 +38,7 @@ spec:
{{ tuple $envAll "prometheus_mysql_exporter" "exporter" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
namespace: {{ .Values.endpoints.prometheus_mysql_exporter.namespace }}
spec:
+ shareProcessNamespace: true
serviceAccountName: {{ $serviceAccountName }}
nodeSelector:
{{ .Values.labels.prometheus_mysql_exporter.node_selector_key }}: {{ .Values.labels.prometheus_mysql_exporter.node_selector_value }}
diff --git a/mariadb/templates/monitoring/prometheus/exporter-job-create-user.yaml b/mariadb/templates/monitoring/prometheus/exporter-job-create-user.yaml
index df7a147..b9f7b56 100644
--- a/mariadb/templates/monitoring/prometheus/exporter-job-create-user.yaml
+++ b/mariadb/templates/monitoring/prometheus/exporter-job-create-user.yaml
@@ -30,6 +30,7 @@ spec:
labels:
{{ tuple $envAll "prometheus_mysql_exporter" "create-sql-user" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
spec:
+ shareProcessNamespace: true
serviceAccountName: {{ $serviceAccountName }}
restartPolicy: OnFailure
nodeSelector:
diff --git a/mariadb/templates/statefulset.yaml b/mariadb/templates/statefulset.yaml
index de0fac2..c6aa451 100644
--- a/mariadb/templates/statefulset.yaml
+++ b/mariadb/templates/statefulset.yaml
@@ -91,6 +91,7 @@ spec:
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
spec:
+ shareProcessNamespace: true
serviceAccountName: {{ $serviceAccountName }}
affinity:
{{ tuple $envAll "mariadb" "server" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
--
1.8.3.1

View File

@@ -0,0 +1,329 @@
From 896385354e535d68f7ee06074bb8266c0f1b7055 Mon Sep 17 00:00:00 2001
From: Pete Birley <pete@port.direct>
Date: Sat, 1 Dec 2018 18:52:39 -0600
Subject: [PATCH] Mariadb: Support adoption of running single node mariadb
deployment
This PS updates the mariadb chart to both support adoption of a
single instance of mariadb running the bash driven chart, which
did not support reforming a galera cluster by tracking state using
a configmap. Additionally basic logic is added for upgrading the
database as part of the normal rolling update flow.
Change-Id: I412de507112b38d6d2534e89f2a02f84bef3da63
Signed-off-by: Pete Birley <pete@port.direct>
---
mariadb/templates/bin/_start.py.tpl | 168 +++++++++++++++++++++++----------
mariadb/templates/etc/_00-base.cnf.tpl | 8 +-
2 files changed, 123 insertions(+), 53 deletions(-)
diff --git a/mariadb/templates/bin/_start.py.tpl b/mariadb/templates/bin/_start.py.tpl
index 8a0236b..4d4428c 100644
--- a/mariadb/templates/bin/_start.py.tpl
+++ b/mariadb/templates/bin/_start.py.tpl
@@ -48,6 +48,10 @@ logger.addHandler(ch)
local_hostname = socket.gethostname()
logger.info("This instance hostname: {0}".format(local_hostname))
+# Get the instance number
+instance_number = local_hostname.split("-")[-1]
+logger.info("This instance number: {0}".format(instance_number))
+
# Setup k8s client credentials and check api version
kubernetes.config.load_incluster_config()
kubernetes_version = kubernetes.client.VersionApi().get_code().git_version
@@ -109,6 +113,7 @@ def ensure_state_configmap(pod_namespace, configmap_name, configmap_body):
except:
k8s_api_instance.create_namespaced_config_map(
namespace=pod_namespace, body=configmap_body)
+
return False
@@ -351,13 +356,36 @@ def get_cluster_state():
except:
logger.info("The cluster configmap \"{0}\" does not exist.".format(
state_configmap_name))
+ time.sleep(default_sleep)
+ leader_expiry_raw = datetime.utcnow() + timedelta(
+ seconds=cluster_leader_ttl)
+ leader_expiry = "{0}Z".format(leader_expiry_raw.isoformat("T"))
+ if check_for_active_nodes():
+ # NOTE(portdirect): here we make the assumption that the 1st pod
+ # in an existing statefulset is the one to adopt as leader.
+ leader = "{0}-0".format("-".join(
+ local_hostname.split("-")[:-1]))
+ state = "live"
+ logger.info(
+ "The cluster is running already though unmanaged \"{0}\" will be declared leader in a \"{1}\" state".
+ format(leader, state))
+ else:
+ leader = local_hostname
+ state = "new"
+ logger.info(
+ "The cluster is new \"{0}\" will be declared leader in a \"{1}\" state".
+ format(leader, state))
+
initial_configmap_body = {
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": {
"name": state_configmap_name,
"annotations": {
- "openstackhelm.openstack.org/cluster.state": "new"
+ "openstackhelm.openstack.org/cluster.state": state,
+ "openstackhelm.openstack.org/leader.node": leader,
+ "openstackhelm.openstack.org/leader.expiry":
+ leader_expiry
}
},
"data": {}
@@ -369,14 +397,11 @@ def get_cluster_state():
return state
-def declare_myself_cluser_leader(ttl):
- """Declare the current pod as the cluster leader.
-
- Keyword arguments:
- ttl -- the ttl for the leader period
- """
+def declare_myself_cluser_leader():
+ """Declare the current pod as the cluster leader."""
logger.info("Declaring myself current cluster leader")
- leader_expiry_raw = datetime.utcnow() + timedelta(seconds=120)
+ leader_expiry_raw = datetime.utcnow() + timedelta(
+ seconds=cluster_leader_ttl)
leader_expiry = "{0}Z".format(leader_expiry_raw.isoformat("T"))
set_configmap_annotation(
key='openstackhelm.openstack.org/leader.node', value=local_hostname)
@@ -393,10 +418,10 @@ def deadmans_leader_election():
if iso8601.parse_date(leader_expiry).replace(
tzinfo=None) < datetime.utcnow().replace(tzinfo=None):
logger.info("Current cluster leader has expired")
- declare_myself_cluser_leader(ttl=cluster_leader_ttl)
+ declare_myself_cluser_leader()
elif local_hostname == leader_node:
logger.info("Renewing cluster leader lease")
- declare_myself_cluser_leader(ttl=cluster_leader_ttl)
+ declare_myself_cluser_leader()
def get_grastate_val(key):
@@ -452,43 +477,47 @@ def update_grastate_configmap():
def update_grastate_on_restart():
"""Update the grastate.dat on node restart."""
logger.info("Updating grastate info for node")
- if get_grastate_val(key='seqno') == '-1':
- logger.info(
- "Node shutdown was not clean, getting position via wsrep-recover")
-
- def recover_wsrep_position():
- """Extract recoved wsrep position from uncleanly exited node."""
- wsrep_recover = subprocess.Popen(
- [
- 'mysqld', '--bind-address=127.0.0.1',
- '--wsrep_cluster_address=gcomm://', '--wsrep-recover'
- ],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- out, err = wsrep_recover.communicate()
- for item in err.split("\n"):
- if "WSREP: Recovered position:" in item:
- line = item.strip().split()
- wsrep_rec_pos = line[-1].split(':')[-1]
- return wsrep_rec_pos
-
- set_grastate_val(key='seqno', value=recover_wsrep_position())
- else:
- logger.info("Node shutdown was clean, using grastate.dat")
+ if os.path.exists('/var/lib/mysql/grastate.dat'):
+ if get_grastate_val(key='seqno') == '-1':
+ logger.info(
+ "Node shutdown was not clean, getting position via wsrep-recover"
+ )
+
+ def recover_wsrep_position():
+ """Extract recoved wsrep position from uncleanly exited node."""
+ wsrep_recover = subprocess.Popen(
+ [
+ 'mysqld', '--bind-address=127.0.0.1',
+ '--wsrep_cluster_address=gcomm://', '--wsrep-recover'
+ ],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ out, err = wsrep_recover.communicate()
+ for item in err.split("\n"):
+ if "WSREP: Recovered position:" in item:
+ line = item.strip().split()
+ wsrep_rec_pos = line[-1].split(':')[-1]
+ return wsrep_rec_pos
+
+ set_grastate_val(key='seqno', value=recover_wsrep_position())
+ else:
+ logger.info("Node shutdown was clean, using grastate.dat")
- update_grastate_configmap()
+ update_grastate_configmap()
+ else:
+ logger.info("No grastate.dat exists I am a new node")
-def check_for_active_nodes(endpoints_name=direct_svc_name,
- namespace=pod_namespace):
- """Check K8s endpoints to see if there are active Mariadb Instances.
+
+def get_active_endpoints(endpoints_name=direct_svc_name,
+ namespace=pod_namespace):
+ """Returns a list of active endpoints.
Keyword arguments:
endpoints_name -- endpoints to check for active backends
(default direct_svc_name)
namespace -- namespace to check for endpoints (default pod_namespace)
"""
- logger.info("Checking for active nodes")
endpoints = k8s_api_instance.read_namespaced_endpoints(
name=endpoints_name, namespace=pod_namespace)
endpoints_dict = endpoints.to_dict()
@@ -496,6 +525,20 @@ def check_for_active_nodes(endpoints_name=direct_svc_name,
i for i, s in enumerate(endpoints_dict['subsets']) if 'addresses' in s
][0]
active_endpoints = endpoints_dict['subsets'][addresses_index]['addresses']
+ return active_endpoints
+
+
+def check_for_active_nodes(endpoints_name=direct_svc_name,
+ namespace=pod_namespace):
+ """Check K8s endpoints to see if there are active Mariadb Instances.
+
+ Keyword arguments:
+ endpoints_name -- endpoints to check for active backends
+ (default direct_svc_name)
+ namespace -- namespace to check for endpoints (default pod_namespace)
+ """
+ logger.info("Checking for active nodes")
+ active_endpoints = get_active_endpoints()
if active_endpoints and len(active_endpoints) >= 1:
return True
else:
@@ -608,7 +651,11 @@ def launch_leader_election():
def run_mysqld(cluster='existing'):
- """Launch the mysqld instance for the pod.
+ """Launch the mysqld instance for the pod. This will also run mysql upgrade
+ if we are the 1st replica, and the rest of the cluster is already running.
+ This senario will be triggerd either following a rolling update, as this
+ works in reverse order for statefulset. Or restart of the 1st instance, in
+ which case the comand should be a no-op.
Keyword arguments:
cluster -- whether we going to form a cluster 'new' or joining an existing
@@ -621,18 +668,28 @@ def run_mysqld(cluster='existing'):
mysqld_cmd = ['mysqld']
if cluster == 'new':
mysqld_cmd.append('--wsrep-new-cluster')
+ else:
+ if int(instance_number) == 0:
+ active_endpoints = get_active_endpoints()
+ if active_endpoints and len(active_endpoints) == (
+ int(mariadb_replicas) - 1):
+ run_cmd_with_logging([
+ 'mysql_upgrade',
+ '--defaults-file=/etc/mysql/admin_user.cnf'
+ ], logger)
+
run_cmd_with_logging(mysqld_cmd, logger)
def mysqld_reboot():
"""Reboot a mysqld cluster."""
- declare_myself_cluser_leader(ttl=cluster_leader_ttl)
+ declare_myself_cluser_leader()
set_grastate_val(key='safe_to_bootstrap', value='1')
run_mysqld(cluster='new')
def sigterm_shutdown(x, y):
- """Shutdown the instnace of mysqld on shutdown signal."""
+ """Shutdown the instance of mysqld on shutdown signal."""
logger.info("Got a sigterm from the container runtime, time to go.")
stop_mysqld()
@@ -642,15 +699,26 @@ signal.signal(signal.SIGTERM, sigterm_shutdown)
# Main logic loop
if get_cluster_state() == 'new':
- set_configmap_annotation(
- key='openstackhelm.openstack.org/cluster.state', value='init')
- declare_myself_cluser_leader(ttl=cluster_leader_ttl)
- launch_leader_election()
- mysqld_bootstrap()
- update_grastate_configmap()
- set_configmap_annotation(
- key='openstackhelm.openstack.org/cluster.state', value='live')
- run_mysqld(cluster='new')
+ leader_node = get_configmap_value(
+ type='annotation', key='openstackhelm.openstack.org/leader.node')
+ if leader_node == local_hostname:
+ set_configmap_annotation(
+ key='openstackhelm.openstack.org/cluster.state', value='init')
+ declare_myself_cluser_leader()
+ launch_leader_election()
+ mysqld_bootstrap()
+ update_grastate_configmap()
+ set_configmap_annotation(
+ key='openstackhelm.openstack.org/cluster.state', value='live')
+ run_mysqld(cluster='new')
+ else:
+ logger.info("Waiting for cluster to start running")
+ while not get_cluster_state() == 'live':
+ time.sleep(default_sleep)
+ while not check_for_active_nodes():
+ time.sleep(default_sleep)
+ launch_leader_election()
+ run_mysqld()
elif get_cluster_state() == 'init':
logger.info("Waiting for cluster to start running")
while not get_cluster_state() == 'live':
diff --git a/mariadb/templates/etc/_00-base.cnf.tpl b/mariadb/templates/etc/_00-base.cnf.tpl
index fc0b079..949d867 100644
--- a/mariadb/templates/etc/_00-base.cnf.tpl
+++ b/mariadb/templates/etc/_00-base.cnf.tpl
@@ -21,7 +21,7 @@ collation_server=utf8_unicode_ci
skip-character-set-client-handshake
# Logging
-slow_query_log=on
+slow_query_log=off
slow_query_log_file=/var/log/mysql/mariadb-slow.log
log_warnings=2
@@ -75,9 +75,11 @@ table_definition_cache=1024
# TODO(tomasz.paszkowski): This needs to by dynamic based on available RAM.
innodb_buffer_pool_size=1024M
innodb_doublewrite=0
+innodb_file_format=Barracuda
innodb_file_per_table=1
innodb_flush_method=O_DIRECT
innodb_io_capacity=500
+innodb_locks_unsafe_for_binlog=1
innodb_log_file_size=128M
innodb_old_blocks_time=1000
innodb_read_io_threads=8
@@ -93,9 +95,9 @@ wsrep_on=1
wsrep_provider=/usr/lib/galera/libgalera_smm.so
wsrep_provider_options="gmcast.listen_addr=tcp://0.0.0.0:{{ tuple "oslo_db" "direct" "wsrep" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}"
wsrep_slave_threads=12
-# FIX_ME(portdirect): https://mariadb.com/kb/en/library/mariabackup-overview/#granting-privileges-for-ssts
wsrep_sst_auth=root:{{ .Values.endpoints.oslo_db.auth.admin.password }}
-wsrep_sst_method=mariabackup
+# FIXME(portdirect): use rsync for compatibility between image variations
+wsrep_sst_method=rsync
[mysqldump]
max-allowed-packet=16M
--
1.8.3.1

View File

@@ -1,4 +1,4 @@
From f4d666c7fd832307dccb7f0096a48c917179449d Mon Sep 17 00:00:00 2001
From 56c73d9c0714f4fb5dd673dc84d4cd4579de2306 Mon Sep 17 00:00:00 2001
From: Angie Wang <angie.wang@windriver.com>
Date: Fri, 19 Oct 2018 14:46:27 -0400
Subject: [PATCH 1/1] ceilometer chart updates
@@ -19,11 +19,11 @@ We should try to upstream above changes.
ceilometer/templates/configmap-bin.yaml | 2 +
ceilometer/templates/configmap-etc.yaml | 6 +
ceilometer/templates/daemonset-compute.yaml | 4 +
ceilometer/templates/daemonset-ipmi.yaml | 105 ++
ceilometer/templates/daemonset-ipmi.yaml | 113 ++
ceilometer/templates/deployment-central.yaml | 4 +
ceilometer/templates/deployment-notification.yaml | 20 +
ceilometer/values.yaml | 1532 ++++++++++-----------
9 files changed, 901 insertions(+), 787 deletions(-)
9 files changed, 910 insertions(+), 786 deletions(-)
create mode 100644 ceilometer/templates/bin/_ceilometer-ipmi.sh.tpl
create mode 100644 ceilometer/templates/daemonset-ipmi.yaml
@@ -109,10 +109,10 @@ index 686572a..bff2e15 100644
subPath: ceilometer-compute.sh
diff --git a/ceilometer/templates/daemonset-ipmi.yaml b/ceilometer/templates/daemonset-ipmi.yaml
new file mode 100644
index 0000000..3119ace
index 0000000..a41d60d
--- /dev/null
+++ b/ceilometer/templates/daemonset-ipmi.yaml
@@ -0,0 +1,105 @@
@@ -0,0 +1,113 @@
+{{/*
+Copyright (c) 2018 Wind River Systems, Inc.
+
@@ -163,6 +163,8 @@ index 0000000..3119ace
+ - name: ceilometer-ipmi
+{{ tuple $envAll "ceilometer_ipmi" | include "helm-toolkit.snippets.image" | indent 10 }}
+{{ tuple $envAll $envAll.Values.pod.resources.ipmi | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+ securityContext:
+ privileged: true
+ command:
+ - /tmp/ceilometer-ipmi.sh
+ volumeMounts:
@@ -204,6 +206,9 @@ index 0000000..3119ace
+ mountPath: /tmp/ceilometer-ipmi.sh
+ subPath: ceilometer-ipmi.sh
+ readOnly: true
+ - name: ipmi-device
+ mountPath: {{ .Values.ipmi_device }}
+ readOnly: true
+{{ if $mounts_ceilometer_ipmi.volumeMounts }}{{ toYaml $mounts_ceilometer_ipmi.volumeMounts | indent 12 }}{{ end }}
+ volumes:
+ - name: pod-etc-ceilometer
@@ -216,6 +221,9 @@ index 0000000..3119ace
+ configMap:
+ name: ceilometer-bin
+ defaultMode: 0555
+ - name: ipmi-device
+ hostPath:
+ path: {{ .Values.ipmi_device }}
+{{ if $mounts_ceilometer_ipmi.volumes }}{{ toYaml $mounts_ceilometer_ipmi.volumes | indent 8 }}{{ end }}
+{{- end }}
diff --git a/ceilometer/templates/deployment-central.yaml b/ceilometer/templates/deployment-central.yaml
@@ -279,7 +287,7 @@ index 06fda3d..45d7ecb 100644
{{ if $mounts_ceilometer_notification.volumes }}{{ toYaml $mounts_ceilometer_notification.volumes | indent 8 }}{{ end }}
{{- end }}
diff --git a/ceilometer/values.yaml b/ceilometer/values.yaml
index 5021967..5908a4e 100644
index 5021967..7947eb7 100644
--- a/ceilometer/values.yaml
+++ b/ceilometer/values.yaml
@@ -29,6 +29,9 @@ labels:
@@ -292,7 +300,16 @@ index 5021967..5908a4e 100644
collector:
node_selector_key: openstack-control-plane
node_selector_value: enabled
@@ -113,1041 +116,971 @@ conf:
@@ -80,6 +83,8 @@ network:
enabled: false
port: 38777
+ipmi_device: /dev/ipmi0
+
conf:
ceilometer:
DEFAULT:
@@ -113,1041 +118,971 @@ conf:
auth_type: password
interface: internal
notification:
@@ -749,24 +766,23 @@ index 5021967..5908a4e 100644
- event_type: identity.authenticate
- traits:
- action:
- fields: payload.action
- eventTime:
- fields: payload.eventTime
- eventType:
- fields: payload.eventType
+ traits: &identity_authenticate
+ typeURI:
+ fields: payload.typeURI
id:
fields: payload.id
+ id:
+ fields: payload.id
+ action:
fields: payload.action
- eventTime:
- fields: payload.eventTime
eventType:
fields: payload.eventType
- id:
- fields: payload.id
- initiator_host_addr:
- fields: payload.initiator.host.address
- initiator_host_agent:
- fields: payload.initiator.host.agent
+ action:
+ fields: payload.action
+ eventType:
+ fields: payload.eventType
+ eventTime:
+ fields: payload.eventTime
+ outcome:
@@ -2050,7 +2066,7 @@ index 5021967..5908a4e 100644
event_pipeline:
sinks:
- name: event_sink
@@ -1620,6 +1553,22 @@ dependencies:
@@ -1620,6 +1555,22 @@ dependencies:
service: mongodb
- endpoint: internal
service: metric
@@ -2073,7 +2089,7 @@ index 5021967..5908a4e 100644
collector:
jobs:
- ceilometer-db-init-mongodb
@@ -1928,6 +1877,9 @@ pod:
@@ -1928,6 +1879,9 @@ pod:
ceilometer_central:
init_container: null
ceilometer_central:
@@ -2083,7 +2099,7 @@ index 5021967..5908a4e 100644
ceilometer_collector:
init_container: null
ceilometer_collector:
@@ -1996,6 +1948,13 @@ pod:
@@ -1996,6 +1950,13 @@ pod:
limits:
memory: "1024Mi"
cpu: "2000m"
@@ -2097,7 +2113,7 @@ index 5021967..5908a4e 100644
jobs:
db_init:
requests:
@@ -2073,6 +2032,7 @@ manifests:
@@ -2073,6 +2034,7 @@ manifests:
deployment_central: true
deployment_collector: true
daemonset_compute: true

View File

@@ -0,0 +1,2 @@
SRC_DIR="files"
TIS_PATCH_VER=0

View File

@@ -0,0 +1,35 @@
Summary: openstack-panko-config
Name: openstack-panko-config
Version: 1.0
Release: %{tis_patch_ver}%{?_tis_dist}
License: Apache-2.0
Group: openstack
Packager: StarlingX
URL: unknown
BuildArch: noarch
Source: %name-%version.tar.gz
Requires: openstack-panko-common
Requires: openstack-panko-api
Summary: package StarlingX configuration files of openstack-panko to system folder.
%description
package StarlingX configuration files of openstack-panko to system folder.
%prep
%setup
%build
%install
%{__install} -d %{buildroot}%{_bindir}
%{__install} -m 0755 panko-expirer-active %{buildroot}%{_bindir}/panko-expirer-active
%post
if test -s %{_sysconfdir}/logrotate.d/openstack-panko ; then
echo '#See /etc/logrotate.d/syslog for panko rules' > %{_sysconfdir}/logrotate.d/openstack-panko
fi
%files
%{_bindir}/panko-expirer-active

View File

@@ -0,0 +1,60 @@
#!/bin/bash
#
# Wrapper script to run panko-expirer when on active controller only
#
PANKO_EXPIRER_INFO="/var/run/panko-expirer.info"
PANKO_EXPIRER_CMD="/usr/bin/nice -n 2 /usr/bin/panko-expirer"
function is_active_pgserver()
{
# Determine whether we're running on the same controller as the service.
local service=postgres
local enabledactive=$(/usr/bin/sm-query service $service| grep enabled-active)
if [ "x$enabledactive" == "x" ]
then
# enabled-active not found for that service on this controller
return 1
else
# enabled-active found for that resource
return 0
fi
}
if is_active_pgserver
then
if [ ! -f ${PANKO_EXPIRER_INFO} ]
then
echo skip_count=0 > ${PANKO_EXPIRER_INFO}
fi
source ${PANKO_EXPIRER_INFO}
sudo -u postgres psql -d sysinv -c "SELECT alarm_id, entity_instance_id from i_alarm;" | grep -P "^(?=.*100.101)(?=.*${HOSTNAME})" &>/dev/null
if [ $? -eq 0 ]
then
source /etc/platform/platform.conf
if [ "${system_type}" = "All-in-one" ]
then
source /etc/init.d/task_affinity_functions.sh
idle_core=$(get_most_idle_core)
if [ "$idle_core" -ne "0" ]
then
sh -c "exec taskset -c $idle_core ${PANKO_EXPIRER_CMD}"
sed -i "/skip_count/s/=.*/=0/" ${PANKO_EXPIRER_INFO}
exit 0
fi
fi
if [ "$skip_count" -lt "3" ]
then
newval=$(($skip_count+1))
sed -i "/skip_count/s/=.*/=$newval/" ${PANKO_EXPIRER_INFO}
exit 0
fi
fi
eval ${PANKO_EXPIRER_CMD}
sed -i "/skip_count/s/=.*/=0/" ${PANKO_EXPIRER_INFO}
fi
exit 0

View File

@@ -1 +1 @@
TIS_PATCH_VER=5
TIS_PATCH_VER=6

View File

@@ -5,9 +5,8 @@ Subject: [PATCH 1/1] panko config
---
SOURCES/panko-dist.conf | 2 +-
SOURCES/panko-expirer-active | 27 +++++++++++++++++++++++++++
SPECS/openstack-panko.spec | 22 +++++++++++++++++-----
3 files changed, 45 insertions(+), 6 deletions(-)
SPECS/openstack-panko.spec | 17 ++++++++++++++++-
2 files changed, 17 insertions(+), 2 deletions(-)
create mode 100644 SOURCES/panko-expirer-active
diff --git a/SOURCES/panko-dist.conf b/SOURCES/panko-dist.conf
@@ -19,142 +18,66 @@ index c33a2ee..ac6f79f 100644
-log_dir = /var/log/panko
+#log_dir = /var/log/panko
use_stderr = False
diff --git a/SOURCES/panko-expirer-active b/SOURCES/panko-expirer-active
new file mode 100644
index 0000000..7d526e0
--- /dev/null
+++ b/SOURCES/panko-expirer-active
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+#
+# Wrapper script to run panko-expirer when on active controller only
+#
+PANKO_EXPIRER_INFO="/var/run/panko-expirer.info"
+PANKO_EXPIRER_CMD="/usr/bin/nice -n 2 /usr/bin/panko-expirer"
+
+function is_active_pgserver()
+{
+ # Determine whether we're running on the same controller as the service.
+ local service=postgres
+ local enabledactive=$(/usr/bin/sm-query service $service| grep enabled-active)
+ if [ "x$enabledactive" == "x" ]
+ then
+ # enabled-active not found for that service on this controller
+ return 1
+ else
+ # enabled-active found for that resource
+ return 0
+ fi
+}
+
+if is_active_pgserver
+then
+ if [ ! -f ${PANKO_EXPIRER_INFO} ]
+ then
+ echo skip_count=0 > ${PANKO_EXPIRER_INFO}
+ fi
+
+ source ${PANKO_EXPIRER_INFO}
+ sudo -u postgres psql -d sysinv -c "SELECT alarm_id, entity_instance_id from i_alarm;" | grep -P "^(?=.*100.101)(?=.*${HOSTNAME})" &>/dev/null
+ if [ $? -eq 0 ]
+ then
+ source /etc/platform/platform.conf
+ if [ "${system_type}" = "All-in-one" ]
+ then
+ source /etc/init.d/task_affinity_functions.sh
+ idle_core=$(get_most_idle_core)
+ if [ "$idle_core" -ne "0" ]
+ then
+ sh -c "exec taskset -c $idle_core ${PANKO_EXPIRER_CMD}"
+ sed -i "/skip_count/s/=.*/=0/" ${PANKO_EXPIRER_INFO}
+ exit 0
+ fi
+ fi
+
+ if [ "$skip_count" -lt "3" ]
+ then
+ newval=$(($skip_count+1))
+ sed -i "/skip_count/s/=.*/=$newval/" ${PANKO_EXPIRER_INFO}
+ exit 0
+ fi
+ fi
+
+ eval ${PANKO_EXPIRER_CMD}
+ sed -i "/skip_count/s/=.*/=0/" ${PANKO_EXPIRER_INFO}
+fi
+
+exit 0
diff --git a/SPECS/openstack-panko.spec b/SPECS/openstack-panko.spec
index d12da57..90471d9 100644
--- a/SPECS/openstack-panko.spec
+++ b/SPECS/openstack-panko.spec
@@ -4,20 +4,26 @@
@@ -4,20 +4,27 @@
Name: openstack-panko
Version: 3.0.0
-Release: 1%{?dist}
+Release: 1%{?_tis_dist}.%{tis_patch_ver}
Summary: Panko provides Event storage and REST API
License: ASL 2.0
URL: http://github.com/openstack/panko
Source0: https://tarballs.openstack.org/%{pypi_name}/%{pypi_name}-%{upstream_version}.tar.gz
Source1: %{pypi_name}-dist.conf
-Source2: %{pypi_name}.logrotate
+# WRS
+Source2: panko-expirer-active
Source2: %{pypi_name}.logrotate
+
+# WRS: Include patches here
+Patch1: 0001-modify-panko-api.patch
+Patch2: 0002-Change-event-list-descending.patch
+Patch3: 0003-Fix-event-query-to-sqlalchemy-with-non-admin-user.patch
+
BuildArch: noarch
BuildRequires: python-setuptools
BuildRequires: python-pbr
BuildRequires: python2-devel
BuildRequires: openstack-macros
+BuildRequires: python-tenacity >= 3.1.0
%description
HTTP API to store events.
@@ -116,6 +122,9 @@ This package contains documentation files for panko.
@@ -116,6 +123,11 @@ This package contains documentation files for panko.
%prep
%setup -q -n %{pypi_name}-%{upstream_version}
+# WRS: Apply patches here
+%patch1 -p1
+%patch2 -p1
+%patch3 -p1
+
find . \( -name .gitignore -o -name .placeholder \) -delete
find panko -name \*.py -exec sed -i '/\/usr\/bin\/env python/{d;q}' {} +
@@ -158,6 +167,8 @@ mkdir -p %{buildroot}/%{_var}/log/%{name}
@@ -158,6 +170,8 @@ mkdir -p %{buildroot}/%{_var}/log/%{name}
install -p -D -m 640 %{SOURCE1} %{buildroot}%{_datadir}/panko/panko-dist.conf
install -p -D -m 640 etc/panko/panko.conf %{buildroot}%{_sysconfdir}/panko/panko.conf
install -p -D -m 640 etc/panko/api_paste.ini %{buildroot}%{_sysconfdir}/panko/api_paste.ini
+# WRS
+install -p -D -m 640 panko/api/panko-api.py %{buildroot}%{_datadir}/panko/panko-api.py
#TODO(prad): build the docs at run time, once the we get rid of postgres setup dependency
@@ -169,8 +180,8 @@ install -d -m 755 %{buildroot}%{_sharedstatedir}/panko
install -d -m 755 %{buildroot}%{_sharedstatedir}/panko/tmp
install -d -m 755 %{buildroot}%{_localstatedir}/log/panko
-# Install logrotate
-install -p -D -m 644 %{SOURCE2} %{buildroot}%{_sysconfdir}/logrotate.d/%{name}
+# WRS
+install -p -D -m 755 %{SOURCE2} %{buildroot}%{_bindir}/panko-expirer-active
# Remove all of the conf files that are included in the buildroot/usr/etc dir since we installed them above
rm -f %{buildroot}/usr/etc/panko/*
@@ -201,14 +212,15 @@ exit 0
@@ -201,14 +215,15 @@ exit 0
%{_bindir}/panko-api
%{_bindir}/panko-dbsync
%{_bindir}/panko-expirer
+%{_bindir}/panko-expirer-active
%files common
%dir %{_sysconfdir}/panko
+%{_datadir}/panko/panko-api.*
@@ -162,10 +85,10 @@ index d12da57..90471d9 100644
%config(noreplace) %attr(-, root, panko) %{_sysconfdir}/panko/policy.json
%config(noreplace) %attr(-, root, panko) %{_sysconfdir}/panko/panko.conf
%config(noreplace) %attr(-, root, panko) %{_sysconfdir}/panko/api_paste.ini
-%config(noreplace) %attr(-, root, panko) %{_sysconfdir}/logrotate.d/%{name}
%config(noreplace) %attr(-, root, panko) %{_sysconfdir}/logrotate.d/%{name}
%dir %attr(0755, panko, root) %{_localstatedir}/log/panko
%defattr(-, panko, panko, -)
--
--
1.8.3.1

View File

@@ -1,32 +0,0 @@
From 4e791be412662ae1f97cfd4ff5a90ea6337e49a4 Mon Sep 17 00:00:00 2001
From: Angie Wang <angie.Wang@windriver.com>
Date: Thu, 16 Nov 2017 15:25:08 -0500
Subject: [PATCH 1/1] spec change event list descending
---
SPECS/openstack-panko.spec | 2 ++
1 file changed, 2 insertions(+)
diff --git a/SPECS/openstack-panko.spec b/SPECS/openstack-panko.spec
index 90471d9..95497b4 100644
--- a/SPECS/openstack-panko.spec
+++ b/SPECS/openstack-panko.spec
@@ -16,6 +16,7 @@ Source2: panko-expirer-active
# WRS: Include patches here
Patch1: 0001-modify-panko-api.patch
+Patch2: 0002-Change-event-list-descending.patch
BuildArch: noarch
@@ -124,6 +125,7 @@ This package contains documentation files for panko.
# WRS: Apply patches here
%patch1 -p1
+%patch2 -p1
find . \( -name .gitignore -o -name .placeholder \) -delete
--
1.8.3.1

View File

@@ -1,32 +0,0 @@
From aad89aa79de1e9f0b35afa1ba587c10591a889e0 Mon Sep 17 00:00:00 2001
From: Angie Wang <angie.Wang@windriver.com>
Date: Mon, 11 Dec 2017 16:29:23 -0500
Subject: [PATCH 1/1] spec fix event query to sqlalchemy with non admin user
---
SPECS/openstack-panko.spec | 2 ++
1 file changed, 2 insertions(+)
diff --git a/SPECS/openstack-panko.spec b/SPECS/openstack-panko.spec
index 95497b4..87a6a5a 100644
--- a/SPECS/openstack-panko.spec
+++ b/SPECS/openstack-panko.spec
@@ -17,6 +17,7 @@ Source2: panko-expirer-active
# WRS: Include patches here
Patch1: 0001-modify-panko-api.patch
Patch2: 0002-Change-event-list-descending.patch
+Patch3: 0003-Fix-event-query-to-sqlalchemy-with-non-admin-user.patch
BuildArch: noarch
@@ -126,6 +127,7 @@ This package contains documentation files for panko.
# WRS: Apply patches here
%patch1 -p1
%patch2 -p1
+%patch3 -p1
find . \( -name .gitignore -o -name .placeholder \) -delete
--
1.8.3.1

View File

@@ -1,3 +1 @@
0001-panko-config.patch
0002-spec-change-event-list-descending.patch
0003-spec-fix-event-query-to-sqlalchemy-with-non-admin-us.patch

View File

@@ -28,16 +28,16 @@ start()
fi
. $PLATFORM_CONF_FILE
if [[ "$nodetype" == "compute" || "$subfunction" == *"compute"* ]] ; then
if [ ! -f $VOLATILE_COMPUTE_CONFIG_COMPLETE ]; then
if [[ "$nodetype" == "worker" || "$subfunction" == *"worker"* ]] ; then
if [ ! -f $VOLATILE_WORKER_CONFIG_COMPLETE ]; then
# Do not start polling until compute manifests have been applied
echo "Waiting for $VOLATILE_COMPUTE_CONFIG_COMPLETE"
echo "Waiting for $VOLATILE_WORKER_CONFIG_COMPLETE"
exit 0
elif [ -f $VOLATILE_DISABLE_COMPUTE_SERVICES ]; then
elif [ -f $VOLATILE_DISABLE_WORKER_SERVICES ]; then
# Do not start polling if compute services are disabled. This can
# happen during an upgrade when controller-1 is running a newer
# load than controller-0.
echo "Waiting for $VOLATILE_DISABLE_COMPUTE_SERVICES"
echo "Waiting for $VOLATILE_DISABLE_WORKER_SERVICES"
exit 0
fi
fi

View File

@@ -14,13 +14,13 @@ debounce = 20 ; number of seconds that a process needs to remain
startuptime = 5 ; Seconds to wait after process start before starting the debounce monitor
mode = passive ; Monitoring mode: passive (default) or active
; passive: process death monitoring (default: always)
; active : heartbeat monitoring, i.e. request / response messaging
; active : heartbeat monitoring, i.e. request / response messaging
; ignore : do not monitor or stop monitoring
subfunction = compute ; Optional label.
subfunction = worker ; Optional label.
; Manage this process in the context of a combo host subfunction
; Choices: compute or storage.
; Choices: worker or storage.
; when specified pmond will wait for
; /var/run/.compute_config_complete or
; /var/run/.worker_config_complete or
; /var/run/.storage_config_complete
; ... before managing this process with the specified subfunction
; Excluding this label will cause this process to be managed by default on startup

View File

@@ -0,0 +1,58 @@
From 1a5349cf73177d155a3309737635bc1ae22ae051 Mon Sep 17 00:00:00 2001
From: Don Penney <don.penney@windriver.com>
Date: Mon, 17 Dec 2018 09:16:20 -0600
Subject: [PATCH] Build python wheel
---
SPECS/python-django-openstack-auth.spec | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/SPECS/python-django-openstack-auth.spec b/SPECS/python-django-openstack-auth.spec
index 89acf21..5e9e8e0 100644
--- a/SPECS/python-django-openstack-auth.spec
+++ b/SPECS/python-django-openstack-auth.spec
@@ -39,6 +39,8 @@ Summary: Django authentication backend for OpenStack Keystone
%{?python_provide:%python_provide python2-django-openstack-auth}
BuildRequires: python2-devel
BuildRequires: python-setuptools
+BuildRequires: python2-pip
+BuildRequires: python2-wheel
BuildRequires: python-sphinx
BuildRequires: python-keystoneclient
BuildRequires: python-iso8601
@@ -135,6 +137,7 @@ find . -name "django.po" -exec rm -f '{}' \;
%{__python} setup.py build
+%py2_build_wheel
%if 0%{?with_python3}
%{__python3} setup.py build
@@ -145,6 +148,8 @@ find . -name "django.po" -exec rm -f '{}' \;
%install
%{__python2} setup.py install --skip-build --root %{buildroot}
+mkdir -p $RPM_BUILD_ROOT/wheels
+install -m 644 dist/*.whl $RPM_BUILD_ROOT/wheels/
cp -r openstack_auth/locale %{buildroot}/%{python_sitelib}/openstack_auth
@@ -189,6 +194,15 @@ rm -rf %{buildroot}/%{python3_sitelib}/openstack_auth/tests
%{python3_sitelib}/%{pypi_name}-*.egg-info
%endif
+%package wheels
+Summary: %{name} wheels
+
+%description wheels
+Contains python wheels for %{name}
+
+%files wheels
+/wheels/*
+
%changelog
* Mon Aug 21 2017 Alfredo Moralejo <amoralej@redhat.com> 3.5.0-1
- Update to 3.5.0
--
1.8.3.1

View File

@@ -5,3 +5,4 @@
0005-meta-cache-authorized-tenants-in-cookie-to-improve-performance.patch
0006-meta-Distributed-Keystone.patch
0007-meta-patch-for-distributed-keystone-fix.patch
0008-Build-python-wheel.patch

File diff suppressed because it is too large Load Diff

View File

@@ -22,10 +22,9 @@ Source4: openstack-dashboard-httpd-logging.conf
# logrotate config
Source5: python-django-horizon-logrotate.conf
# WRS
# STX
Source7: horizon.init
Source8: horizon-clearsessions
Source10: local_settings.py
Source11: horizon-patching-restart
Source12: horizon-region-exclusions.csv
Source13: guni_config.py
@@ -40,7 +39,7 @@ BuildArch: noarch
BuildRequires: python-django
Requires: python-django
# WRS
# STX
BuildRequires: cgts-client
Requires: cgts-client
@@ -204,7 +203,7 @@ BuildRequires: python-pint
BuildRequires: pytz
BuildRequires: systemd
# WRS
# STX
BuildRequires: systemd-devel
%description -n openstack-dashboard
@@ -214,7 +213,7 @@ mostly consisting of JavaScript and CSS to tie it altogether as a standalone
site.
# Turn OFF sphinx documentation in WRS environment
# Turn OFF sphinx documentation in STX environment
# Mock does not have /dev/log so sphinx-build will always fail
%if 0%{?with_doc}
%package doc
@@ -249,15 +248,13 @@ Customization module for OpenStack Dashboard to provide a branded logo.
%prep
%autosetup -n horizon-%{upstream_version} -S git
# WRS remove troublesome files introduced by tox
# STX remove troublesome files introduced by tox
rm -f openstack_dashboard/test/.secret_key_store
rm -f openstack_dashboard/test/*.secret_key_store.lock
rm -f openstack_dashboard/local/.secret_key_store
rm -f openstack_dashboard/local/*.secret_key_store.lock
rm -rf horizon.egg-info
cp %{SOURCE10} openstack_dashboard/local/local_settings.py
# drop config snippet
cp -p %{SOURCE4} .
cp -p %{SOURCE13} .
@@ -276,7 +273,7 @@ sed -i "/^COMPRESS_PARSER = .*/a COMPRESS_OFFLINE = True" openstack_dashboard/se
# set COMPRESS_OFFLINE=True
sed -i 's:COMPRESS_OFFLINE.=.False:COMPRESS_OFFLINE = True:' openstack_dashboard/settings.py
# WRS: MANIFEST needs .eslintrc files for angular
# STX: MANIFEST needs .eslintrc files for angular
echo "include .eslintrc" >> MANIFEST.in
# MANIFEST needs to include json and pot files under openstack_dashboard
echo "recursive-include openstack_dashboard *.json *.pot .eslintrc" >> MANIFEST.in
@@ -294,17 +291,17 @@ cd openstack_dashboard && django-admin compilemessages && cd ..
# Further reading why not remove upstream egg metadata:
# https://github.com/emonty/python-oslo-messaging/commit/f632684eb2d582253601e8da7ffdb8e55396e924
# https://fedorahosted.org/fpc/ticket/488
# WRS: 2 problems. 1 we dont have an egg yet. 2 there are no .mo files
# STX: 2 problems. 1 we dont have an egg yet. 2 there are no .mo files
#echo >> horizon.egg-info/SOURCES.txt
#ls */locale/*/LC_MESSAGES/django*mo >> horizon.egg-info/SOURCES.txt
export PBR_VERSION=%{version}
%{__python} setup.py build
# WRS: package our own local_setting.py and run compression on the controller
# compress css, js etc.
#cp openstack_dashboard/local/local_settings.py.example openstack_dashboard/local/local_settings.py
cp openstack_dashboard/local/local_settings.py.example openstack_dashboard/local/local_settings.py
# get it ready for compressing later in puppet-horizon
# WRS turn off compression because /dev/log does not exist in mock
# STX: run compression on the controller
# STX: turn off compression because /dev/log does not exist in mock
#%{__python} manage.py collectstatic --noinput --clear
#%{__python} manage.py compress --force
@@ -315,7 +312,7 @@ export PYTHONPATH="$( pwd ):$PYTHONPATH"
sphinx-build -b html doc/source html
# undo hack
#cp openstack_dashboard/local/local_settings.py.example openstack_dashboard/local/local_settings.py
cp openstack_dashboard/local/local_settings.py.example openstack_dashboard/local/local_settings.py
# Fix hidden-file-or-dir warnings
rm -fr html/.doctrees html/.buildinfo
@@ -330,7 +327,7 @@ export PBR_VERSION=%{version}
mkdir -p $RPM_BUILD_ROOT/wheels
install -m 644 dist/*.whl $RPM_BUILD_ROOT/wheels/
# WRS
# STX
install -d -m 755 %{buildroot}/opt/branding
mkdir -p %{buildroot}%{_sysconfdir}/rc.d/init.d
install -m 755 -D -p %{SOURCE7} %{buildroot}%{_sysconfdir}/rc.d/init.d/horizon
@@ -354,7 +351,7 @@ cp %{SOURCE3} %{buildroot}%{_unitdir}/httpd.service.d/openstack-dashboard.conf
mv %{buildroot}%{python_sitelib}/openstack_dashboard \
%{buildroot}%{_datadir}/openstack-dashboard
cp manage.py %{buildroot}%{_datadir}/openstack-dashboard
# WRS
# STX
cp guni_config.py %{buildroot}%{_datadir}/openstack-dashboard
rm -rf %{buildroot}%{python_sitelib}/openstack_dashboard
@@ -364,6 +361,7 @@ find %{buildroot} -name djangojs.po -exec rm '{}' \;
# Move config to /etc, symlink it back to /usr/share
mv %{buildroot}%{_datadir}/openstack-dashboard/openstack_dashboard/local/local_settings.py.example %{buildroot}%{_sysconfdir}/openstack-dashboard/local_settings
# STX: we do not want to have this symlink, puppet will overwrite the content of local_settings
#ln -s ../../../../../%{_sysconfdir}/openstack-dashboard/local_settings %{buildroot}%{_datadir}/openstack-dashboard/openstack_dashboard/local/local_settings.py
mv %{buildroot}%{_datadir}/openstack-dashboard/openstack_dashboard/conf/*.json %{buildroot}%{_sysconfdir}/openstack-dashboard
@@ -377,7 +375,7 @@ grep "\/site-packages\/horizon" django.lang > horizon.lang
mkdir -p %{buildroot}%{_datadir}/openstack-dashboard/static
cp -a openstack_dashboard/static/* %{buildroot}%{_datadir}/openstack-dashboard/static
cp -a horizon/static/* %{buildroot}%{_datadir}/openstack-dashboard/static
# WRS: there is no static folder, since compress step was skipped
# STX: there is no static folder, since compress step was skipped
#cp -a static/* %{buildroot}%{_datadir}/openstack-dashboard/static
# create /var/run/openstack-dashboard/ and own it
@@ -444,7 +442,7 @@ systemctl daemon-reload >/dev/null 2>&1 || :
%{_datadir}/openstack-dashboard/openstack_dashboard/dashboards/identity
%{_datadir}/openstack-dashboard/openstack_dashboard/dashboards/project
%{_datadir}/openstack-dashboard/openstack_dashboard/dashboards/settings
# WRS
# STX
%{_datadir}/openstack-dashboard/openstack_dashboard/dashboards/__init__.py*
%{_datadir}/openstack-dashboard/openstack_dashboard/django_pyscss_fix
%{_datadir}/openstack-dashboard/openstack_dashboard/enabled
@@ -482,7 +480,7 @@ systemctl daemon-reload >/dev/null 2>&1 || :
%attr(755,root,root) %dir %{_unitdir}/httpd.service.d
%config(noreplace) %{_unitdir}/httpd.service.d/openstack-dashboard.conf
# WRS
# STX
%dir /opt/branding
%config(noreplace) /opt/branding/horizon-region-exclusions.csv
%{_sysconfdir}/rc.d/init.d/horizon

View File

@@ -5,10 +5,11 @@ PROJECT_REPO=https://github.com/starlingx-staging/stx-horizon.git
PIP_PACKAGES="pycrypto python-ceilometerclient python-cephclient \
sm_client cgtsclient cgcs_patch sysinv nfv_client \
tsconfig coverage pyudev fm_api fm_core configutilities platform_util \
controllerconfig ldap distributedcloud_client pylint"
controllerconfig ldap distributedcloud_client starlingx-dashboard pylint"
PROFILES="fluent horizon apache"
CUSTOMIZATION="\
ln -s /bin/true /usr/bin/a2enmod && \
sed -i 's/Listen 80/#Listen 80/' /etc/httpd/conf/httpd.conf \
sed -i 's/Listen 80/#Listen 80/' /etc/httpd/conf/httpd.conf && \
cp -r /var/lib/openstack/lib/python2.7/site-packages/starlingx_dashboard/themes/starlingx/ /var/lib/openstack/lib/python2.7/site-packages/openstack_dashboard/themes/starlingx/ \
"

View File

@@ -12,13 +12,13 @@ debounce = 20 ; number of seconds that a process needs to remain
startuptime = 5 ; Seconds to wait after process start before starting the debounce monitor
mode = passive ; Monitoring mode: passive (default) or active
; passive: process death monitoring (default: always)
; active : heartbeat monitoring, i.e. request / response messaging
; active : heartbeat monitoring, i.e. request / response messaging
; ignore : do not monitor or stop monitoring
subfunction = compute ; Optional label.
; Manage this process in the context of a combo host subfunction
; Choices: compute or storage.
subfunction = worker ; Optional label.
; Manage this process in the context of a combo host subfunction
; Choices: worker or storage.
; when specified pmond will wait for
; /var/run/.compute_config_complete or
; /var/run/.storage_config_complete
; /var/run/.worker_config_complete or
; /var/run/.storage_config_complete
; ... before managing this process with the specified subfunction
; Excluding this label will cause this process to be managed by default on startup

View File

@@ -12,13 +12,13 @@ debounce = 20 ; number of seconds that a process needs to remain
startuptime = 5 ; Seconds to wait after process start before starting the debounce monitor
mode = passive ; Monitoring mode: passive (default) or active
; passive: process death monitoring (default: always)
; active : heartbeat monitoring, i.e. request / response messaging
; active : heartbeat monitoring, i.e. request / response messaging
; ignore : do not monitor or stop monitoring
subfunction = compute ; Optional label.
; Manage this process in the context of a combo host subfunction
; Choices: compute or storage.
subfunction = worker ; Optional label.
; Manage this process in the context of a combo host subfunction
; Choices: worker or storage.
; when specified pmond will wait for
; /var/run/.compute_config_complete or
; /var/run/.storage_config_complete
; /var/run/.worker_config_complete or
; /var/run/.storage_config_complete
; ... before managing this process with the specified subfunction
; Excluding this label will cause this process to be managed by default on startup

View File

@@ -12,13 +12,13 @@ debounce = 20 ; number of seconds that a process needs to remain
startuptime = 5 ; Seconds to wait after process start before starting the debounce monitor
mode = passive ; Monitoring mode: passive (default) or active
; passive: process death monitoring (default: always)
; active : heartbeat monitoring, i.e. request / response messaging
; active : heartbeat monitoring, i.e. request / response messaging
; ignore : do not monitor or stop monitoring
subfunction = compute ; Optional label.
; Manage this process in the context of a combo host subfunction
; Choices: compute or storage.
subfunction = worker ; Optional label.
; Manage this process in the context of a combo host subfunction
; Choices: worker or storage.
; when specified pmond will wait for
; /var/run/.compute_config_complete or
; /var/run/.storage_config_complete
; /var/run/.worker_config_complete or
; /var/run/.storage_config_complete
; ... before managing this process with the specified subfunction
; Excluding this label will cause this process to be managed by default on startup

View File

@@ -1,7 +1,7 @@
[Unit]
Description=KVM Timer Advance Setup
After=openstack-nova-compute-setup.service
Before=nova-compute.service goenabled-compute.service
Before=nova-compute.service goenabled-worker.service
[Service]
Type=simple

View File

@@ -29,7 +29,7 @@ declare -i GLOBAL_RC=$PATCH_STATUS_OK
#
# Processes that run with compute subfunction
#
if is_compute || is_cpe
if is_worker || is_cpe
then
processes_to_restart="nova-compute"
/usr/local/sbin/patch-restart-processes nova-compute

View File

@@ -3,7 +3,7 @@ LABEL=stx-nova
PROJECT=nova
PROJECT_REPO=https://github.com/starlingx-staging/stx-nova.git
PIP_PACKAGES="pycrypto tsconfig cgtsclient httplib2 pylint"
DIST_PACKAGES="openssh-clients openssh-server libvirt "
DIST_PACKAGES="openssh-clients openssh-server libvirt pam-config"
PROFILES="fluent nova ceph linuxbridge openvswitch configdrive qemu apache"

View File

@@ -1,13 +1,13 @@
#!/bin/bash
#
# Copyright (c) 2013-2017 Wind River Systems, Inc.
# Copyright (c) 2013-2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script is intended to set up the cpusets for use by nova-compute.
# It should only run on compute nodes, and it must be run after the
# It should only run on worker nodes, and it must be run after the
# /etc/nova/nova.conf file has been modified by packstack since it
# extracts the "vcpu_pin_set" value from that file.
#
@@ -62,9 +62,9 @@ start()
# Do not continue if the host has not been configured. We can't do
# anything until the nova.conf file has been updated.
if [ ! -f ${INITIAL_COMPUTE_CONFIG_COMPLETE} ]
if [ ! -f ${INITIAL_WORKER_CONFIG_COMPLETE} ]
then
log "Initial compute configuration is not complete, nothing to do"
log "Initial worker configuration is not complete, nothing to do"
exit 0
fi

View File

@@ -1,7 +1,7 @@
[Unit]
Description=OpenStack Nova Compute Server Pre-Startup
After=syslog.target compute-config-gate.service
Before=nova-compute.service goenabled-compute.service
After=syslog.target worker-config-gate.service
Before=nova-compute.service goenabled-worker.service
[Service]
Type=oneshot