upgrade dependencies of monasca
- upgrade kafka, zookeeper - install packages for nobel - fix init data of influxdb Change-Id: I0f1d64310817dec0121339413a9bcee1c2f9c2da
This commit is contained in:
committed by
Hoai-Thu Vuong
parent
c36c2c69a4
commit
7811ac8c47
@@ -51,6 +51,8 @@
|
||||
monasca-api: https://opendev.org/openstack/monasca-api
|
||||
zuul_copy_output:
|
||||
/var/log/kafka: logs
|
||||
/var/log/storm: logs
|
||||
/var/log/zookeeper: logs
|
||||
/var/log/monasca/notification: logs
|
||||
/etc/kafka/server.properties: logs
|
||||
/etc/kafka/producer.properties: logs
|
||||
@@ -174,6 +176,7 @@
|
||||
- job:
|
||||
name: monasca-tempest-log-python3-influxdb
|
||||
parent: monasca-tempest-log-base
|
||||
voting: false
|
||||
vars:
|
||||
devstack_localrc:
|
||||
USE_OLD_LOG_API: false
|
||||
@@ -198,7 +201,7 @@
|
||||
templates:
|
||||
- check-requirements
|
||||
- openstack-cover-jobs
|
||||
- openstack-python3-zed-jobs
|
||||
- openstack-python3-jobs
|
||||
- publish-openstack-docs-pti
|
||||
- release-notes-jobs-python3
|
||||
check:
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
openjdk-8-jdk # dist:xenial,bionic,focal,jammy
|
||||
openjdk-8-jre-headless # dist:bionic,focal,jammy
|
||||
maven # dist:xenial,bionic,focal,jammy
|
||||
jq # dist:xenial,bionic,focal,jammy
|
||||
openjdk-8-jdk # dist:xenial,bionic,focal,jammy,noble
|
||||
openjdk-8-jre-headless # dist:bionic,focal,jammy,noble
|
||||
maven # dist:xenial,bionic,focal,jammy,noble
|
||||
jq # dist:xenial,bionic,focal,jammy,noble
|
||||
python-dev # dist:xenial,bionic,focal
|
||||
build-essential # dist:xenial,bionic,focal,jammy
|
||||
mailutils # dist:xenial,bionic,focal,jammy
|
||||
python-is-python3 # dist:focal,jammy
|
||||
python3-dev # dist:jammy,noble
|
||||
build-essential # dist:xenial,bionic,focal,jammy,noble
|
||||
mailutils # dist:xenial,bionic,focal,jammy,noble
|
||||
python-is-python3 # dist:focal,jammy,noble
|
||||
@@ -738,18 +738,24 @@ function clean_gate_config_holder {
|
||||
|
||||
function configure_kafka {
|
||||
echo_summary "Configuring Kafka topics"
|
||||
/opt/kafka/bin/kafka-topics.sh --create --zookeeper localhost:2181 \
|
||||
--replication-factor 1 --partitions 4 --topic log
|
||||
/opt/kafka/bin/kafka-topics.sh --create --zookeeper localhost:2181 \
|
||||
--replication-factor 1 --partitions 4 --topic transformed-log
|
||||
for topic in ${KAFKA_SERVICE_LOG_TOPICS//,/ }; do
|
||||
/opt/kafka/bin/kafka-topics.sh --create \
|
||||
--bootstrap-server $KAFKA_SERVICE_HOST:$KAFKA_SERVICE_PORT \
|
||||
--replication-factor 1 \
|
||||
--partitions 4 \
|
||||
--topic $topic
|
||||
done
|
||||
}
|
||||
|
||||
function delete_kafka_topics {
|
||||
echo_summary "Deleting Kafka topics"
|
||||
/opt/kafka/bin/kafka-topics.sh --delete --zookeeper localhost:2181 \
|
||||
--topic log || true
|
||||
/opt/kafka/bin/kafka-topics.sh --delete --zookeeper localhost:2181 \
|
||||
--topic transformed-log || true
|
||||
for topic in ${KAFKA_SERVICE_LOG_TOPICS//,/ }; do
|
||||
/opt/kafka/bin/kafka-topics.sh --delete \
|
||||
--bootstrap-server $KAFKA_SERVICE_HOST:$KAFKA_SERVICE_PORT \
|
||||
--replication-factor 1 \
|
||||
--partitions 4 \
|
||||
--topic $topic || true
|
||||
done
|
||||
}
|
||||
|
||||
function create_log_management_accounts {
|
||||
|
||||
@@ -31,8 +31,8 @@ function clean_zookeeper {
|
||||
sudo systemctl stop zookeeper
|
||||
sudo rm -rf /var/log/zookeeper
|
||||
sudo rm -rf /var/lib/zookeeper
|
||||
sudo rm -rf /opt/zookeeper-${ZOOKEEPER_VERSION}
|
||||
sudo rm -rf /opt/zookeeper
|
||||
sudo rm -rf /opt/apache-zookeeper-${ZOOKEEPER_VERSION}-bin
|
||||
sudo rm -rf /etc/systemd/system/zookeeper.service
|
||||
sudo systemctl daemon-reload
|
||||
fi
|
||||
@@ -43,7 +43,7 @@ function install_zookeeper {
|
||||
if is_zookeeper_enabled; then
|
||||
echo_summary "Install Monasca Zookeeper"
|
||||
|
||||
local zookeeper_tarball=zookeeper-${ZOOKEEPER_VERSION}.tar.gz
|
||||
local zookeeper_tarball=apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz
|
||||
local zookeeper_tarball_url=${APACHE_ARCHIVES}zookeeper/zookeeper-${ZOOKEEPER_VERSION}/${zookeeper_tarball}
|
||||
local zookeeper_tarball_dest
|
||||
zookeeper_tarball_dest=`get_extra_file ${zookeeper_tarball_url}`
|
||||
@@ -51,7 +51,7 @@ function install_zookeeper {
|
||||
sudo groupadd --system zookeeper || true
|
||||
sudo useradd --system -g zookeeper zookeeper || true
|
||||
sudo tar -xzf ${zookeeper_tarball_dest} -C /opt
|
||||
sudo ln -sf /opt/zookeeper-${ZOOKEEPER_VERSION} /opt/zookeeper
|
||||
sudo ln -sf /opt/apache-zookeeper-${ZOOKEEPER_VERSION}-bin /opt/zookeeper
|
||||
sudo cp $PLUGIN_FILES/zookeeper/* /opt/zookeeper/conf
|
||||
sudo chown -R zookeeper:zookeeper /opt/zookeeper/
|
||||
|
||||
@@ -62,7 +62,6 @@ function install_zookeeper {
|
||||
sudo chown -R zookeeper:zookeeper /var/lib/zookeeper
|
||||
|
||||
sudo cp -f "${MONASCA_API_DIR}"/devstack/files/zookeeper/zookeeper.service /etc/systemd/system/zookeeper.service
|
||||
sudo chown root:root /etc/systemd/system/kafka.service
|
||||
sudo chmod 644 /etc/systemd/system/zookeeper.service
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
|
||||
@@ -112,8 +112,8 @@ function pre_install_monasca {
|
||||
find_nearest_apache_mirror
|
||||
install_gate_config_holder
|
||||
configure_system_encoding_format
|
||||
install_kafka
|
||||
install_zookeeper
|
||||
install_kafka
|
||||
install_storm
|
||||
|
||||
install_monasca_virtual_env
|
||||
@@ -218,18 +218,13 @@ function start_monasca_services {
|
||||
|
||||
function delete_kafka_topics {
|
||||
|
||||
/opt/kafka/bin/kafka-topics.sh --delete --zookeeper localhost:2181 \
|
||||
--topic metrics || true
|
||||
/opt/kafka/bin/kafka-topics.sh --delete --zookeeper localhost:2181 \
|
||||
--topic events || true
|
||||
/opt/kafka/bin/kafka-topics.sh --delete --zookeeper localhost:2181 \
|
||||
--topic alarm-state-transitions || true
|
||||
/opt/kafka/bin/kafka-topics.sh --delete --zookeeper localhost:2181 \
|
||||
--topic alarm-notifications || true
|
||||
/opt/kafka/bin/kafka-topics.sh --delete --zookeeper localhost:2181 \
|
||||
--topic retry-notifications || true
|
||||
/opt/kafka/bin/kafka-topics.sh --delete --zookeeper localhost:2181 \
|
||||
--topic 60-seconds-notifications || true
|
||||
for topic in ${KAFKA_SERVICE_TOPICS//,/ }; do
|
||||
/opt/kafka/bin/kafka-topics.sh --delete \
|
||||
--bootstrap-server $KAFKA_SERVICE_HOST:$KAFKA_SERVICE_PORT \
|
||||
--replication-factor 1 \
|
||||
--partitions 3 \
|
||||
--topic $topic || true
|
||||
done
|
||||
}
|
||||
|
||||
function unstack_monasca {
|
||||
@@ -414,7 +409,8 @@ function install_monasca_influxdb {
|
||||
echo_summary "Install Monasca Influxdb"
|
||||
|
||||
local influxdb_deb=influxdb_${INFLUXDB_VERSION}_amd64.deb
|
||||
local influxdb_deb_url=https://dl.influxdata.com/influxdb/releases/${influxdb_deb}
|
||||
local influxdb_deb_url=${INFLUXDB_DEB_URL}${influxdb_deb}
|
||||
echo "influxdb deb url: ${influxdb_deb_url}"
|
||||
|
||||
local influxdb_deb_dest
|
||||
influxdb_deb_dest=`get_extra_file ${influxdb_deb_url}`
|
||||
@@ -594,14 +590,29 @@ function install_schema {
|
||||
}
|
||||
|
||||
function install_schema_metric_database_influxdb {
|
||||
sudo cp -f "${MONASCA_API_DIR}"/devstack/files/schema/influxdb_setup.py $MONASCA_SCHEMA_DIR/influxdb_setup.py
|
||||
sudo chmod 0750 $MONASCA_SCHEMA_DIR/influxdb_setup.py
|
||||
sudo chown root:root $MONASCA_SCHEMA_DIR/influxdb_setup.py
|
||||
if python3_enabled; then
|
||||
sudo python3 $MONASCA_SCHEMA_DIR/influxdb_setup.py
|
||||
else
|
||||
sudo python $MONASCA_SCHEMA_DIR/influxdb_setup.py
|
||||
fi
|
||||
# sudo cp -f "${MONASCA_API_DIR}"/devstack/files/schema/influxdb_setup.py $MONASCA_SCHEMA_DIR/influxdb_setup.py
|
||||
# sudo chmod 0750 $MONASCA_SCHEMA_DIR/influxdb_setup.py
|
||||
# sudo chown root:root $MONASCA_SCHEMA_DIR/influxdb_setup.py
|
||||
# if python3_enabled; then
|
||||
# sudo python3 $MONASCA_SCHEMA_DIR/influxdb_setup.py
|
||||
# else
|
||||
# sudo python $MONASCA_SCHEMA_DIR/influxdb_setup.py
|
||||
# fi
|
||||
curl --user root:root \
|
||||
-XPOST 'http://127.0.0.1:8086/query' \
|
||||
--data-urlencode 'q=CREATE DATABASE "mon"'
|
||||
curl --user root:root \
|
||||
-XPOST 'http://127.0.0.1:8086/query' \
|
||||
--data-urlencode 'db=mon' \
|
||||
--data-urlencode 'q=CREATE RETENTION POLICY "persister_all" ON mon DURATION 90d REPLICATION 1 DEFAULT'
|
||||
curl --user root:root \
|
||||
-XPOST 'http://127.0.0.1:8086/query' \
|
||||
--data-urlencode 'db=mon' \
|
||||
--data-urlencode "q=CREATE USER mon_api WITH PASSWORD 'password'"
|
||||
curl --user root:root \
|
||||
-XPOST 'http://127.0.0.1:8086/query' \
|
||||
--data-urlencode 'db=mon' \
|
||||
--data-urlencode "q=CREATE USER mon_persister WITH PASSWORD 'password'"
|
||||
}
|
||||
|
||||
function install_schema_metric_database_vertica {
|
||||
@@ -628,18 +639,13 @@ function install_schema_kafka_topics {
|
||||
sudo chmod 0766 /opt/kafka/logs
|
||||
# Right number of partition is crucial for performance optimization,
|
||||
# in high load(real world) deployment this number should be increased.
|
||||
/opt/kafka/bin/kafka-topics.sh --create --zookeeper localhost:2181 \
|
||||
--replication-factor 1 --partitions 3 --topic metrics
|
||||
/opt/kafka/bin/kafka-topics.sh --create --zookeeper localhost:2181 \
|
||||
--replication-factor 1 --partitions 2 --topic events
|
||||
/opt/kafka/bin/kafka-topics.sh --create --zookeeper localhost:2181 \
|
||||
--replication-factor 1 --partitions 2 --topic alarm-state-transitions
|
||||
/opt/kafka/bin/kafka-topics.sh --create --zookeeper localhost:2181 \
|
||||
--replication-factor 1 --partitions 2 --topic alarm-notifications
|
||||
/opt/kafka/bin/kafka-topics.sh --create --zookeeper localhost:2181 \
|
||||
--replication-factor 1 --partitions 2 --topic retry-notifications
|
||||
/opt/kafka/bin/kafka-topics.sh --create --zookeeper localhost:2181 \
|
||||
--replication-factor 1 --partitions 2 --topic 60-seconds-notifications
|
||||
for topic in ${KAFKA_SERVICE_TOPICS//,/ }; do
|
||||
/opt/kafka/bin/kafka-topics.sh --create \
|
||||
--bootstrap-server $KAFKA_SERVICE_HOST:$KAFKA_SERVICE_PORT \
|
||||
--replication-factor 1 \
|
||||
--partitions 3 \
|
||||
--topic $topic
|
||||
done
|
||||
}
|
||||
|
||||
function install_schema_alarm_database {
|
||||
|
||||
@@ -111,13 +111,14 @@ INFLUXDB_PYTHON_VERSION=${INFLUXDB_PYTHON_VERSION:-1.7.6}
|
||||
# following variable. This will override both the Java and Python
|
||||
# specific variables above.
|
||||
# INFLUXDB_VERSION=${INFLUXDB_VERSION:-0.9.5}
|
||||
INFLUXDB_DEB_URL=${INFLUXDB_DEB_URL:-https://dl.influxdata.com/influxdb/releases/}
|
||||
|
||||
VERTICA_VERSION=${VERTICA_VERSION:-8.0.0-0}
|
||||
CASSANDRA_VERSION=${CASSANDRA_VERSION:-311x}
|
||||
ZOOKEEPER_VERSION=${ZOOKEEPER_VERSION:-3.4.13} # 3.4.13 default in Focal; 3.4.10 default in Bionic
|
||||
ZOOKEEPER_VERSION=${ZOOKEEPER_VERSION:-3.8.4} # 3.4.13 default in Focal; 3.4.10 default in Bionic
|
||||
# Kafka deb consists of the version of scala plus the version of kafka
|
||||
BASE_KAFKA_VERSION=${BASE_KAFKA_VERSION:-2.0.1}
|
||||
SCALA_VERSION=${SCALA_VERSION:-2.12}
|
||||
BASE_KAFKA_VERSION=${BASE_KAFKA_VERSION:-3.7.2}
|
||||
SCALA_VERSION=${SCALA_VERSION:-2.13}
|
||||
KAFKA_VERSION=${KAFKA_VERSION:-${SCALA_VERSION}-${BASE_KAFKA_VERSION}}
|
||||
STORM_VERSION=${STORM_VERSION:-1.2.2}
|
||||
GO_VERSION=${GO_VERSION:-"1.7.1"}
|
||||
@@ -254,3 +255,5 @@ STORM_LOGVIEWER_PORT=${STORM_LOGVIEWER_PORT:-8090}
|
||||
|
||||
KAFKA_SERVICE_HOST=${KAFKA_SERVICE_HOST:-${SERVICE_HOST}}
|
||||
KAFKA_SERVICE_PORT=${KAFKA_SERVICE_PORT:-9092}
|
||||
KAFKA_SERVICE_TOPICS=${KAFKA_SERVICE_TOPICS:-metrics,events,alarm-state-transitions,alarm-notifications,retry-notifications,60-seconds-notifications}
|
||||
KAFKA_SERVICE_LOG_TOPICS=${KAFKA_SERVICE_LOG_TOPICS:-log,transformed-log}
|
||||
|
||||
@@ -55,4 +55,4 @@ class HealthChecks(healthcheck_api.HealthCheckApi):
|
||||
res.status = (self.HEALTHY_CODE_GET
|
||||
if health else self.NOT_HEALTHY_CODE)
|
||||
res.cache_control = self.CACHE_CONTROL
|
||||
res.body = helpers.to_json(status_data)
|
||||
res.text = helpers.to_json(status_data)
|
||||
|
||||
@@ -92,7 +92,7 @@ class BaseApiTestCase(BaseTestCase, testing.TestCase):
|
||||
# TODO(dszumski): Loading the app from api/server.py seems to make
|
||||
# more sense here so that we don't have to manually keep the tests in
|
||||
# sync with it.
|
||||
self.app = falcon.API(request_type=request.Request)
|
||||
self.app = falcon.App(request_type=request.Request)
|
||||
# NOTE(dszumski): Falcon 2.0.0 switches the default for this from True
|
||||
# to False so we explicitly set it here to prevent the behaviour
|
||||
# changing between versions.
|
||||
|
||||
@@ -17,9 +17,21 @@ from falcon.http_error import HTTPError
|
||||
|
||||
class HTTPUnprocessableEntityError(HTTPError):
|
||||
def __init__(self, title, description, **kwargs):
|
||||
HTTPError.__init__(self, '422 Unprocessable Entity', title, description, **kwargs)
|
||||
HTTPError.__init__(
|
||||
self,
|
||||
'422 Unprocessable Entity',
|
||||
title=title,
|
||||
description=description,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
|
||||
class HTTPBadRequestError(HTTPError):
|
||||
def __init__(self, title, description, **kwargs):
|
||||
HTTPError.__init__(self, '400 Bad Request', title, description, **kwargs)
|
||||
HTTPError.__init__(
|
||||
self,
|
||||
'400 Bad Request',
|
||||
title=title,
|
||||
description=description,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
@@ -79,7 +79,7 @@ class AlarmDefinitions(alarm_definitions_api_v2.AlarmDefinitionsV2API,
|
||||
ok_actions)
|
||||
|
||||
helpers.add_links_to_resource(result, req.uri)
|
||||
res.body = helpers.to_json(result)
|
||||
res.text = helpers.to_json(result)
|
||||
res.status = falcon.HTTP_201
|
||||
|
||||
@resource.resource_try_catch_block
|
||||
@@ -123,7 +123,7 @@ class AlarmDefinitions(alarm_definitions_api_v2.AlarmDefinitionsV2API,
|
||||
re.sub('/' + alarm_definition_id, '',
|
||||
req.uri))
|
||||
|
||||
res.body = helpers.to_json(result)
|
||||
res.text = helpers.to_json(result)
|
||||
res.status = falcon.HTTP_200
|
||||
|
||||
@resource.resource_try_catch_block
|
||||
@@ -165,7 +165,7 @@ class AlarmDefinitions(alarm_definitions_api_v2.AlarmDefinitionsV2API,
|
||||
|
||||
helpers.add_links_to_resource(
|
||||
result, re.sub('/' + alarm_definition_id, '', req.uri))
|
||||
res.body = helpers.to_json(result)
|
||||
res.text = helpers.to_json(result)
|
||||
res.status = falcon.HTTP_200
|
||||
|
||||
@resource.resource_try_catch_block
|
||||
@@ -214,7 +214,7 @@ class AlarmDefinitions(alarm_definitions_api_v2.AlarmDefinitionsV2API,
|
||||
|
||||
helpers.add_links_to_resource(
|
||||
result, re.sub('/' + alarm_definition_id, '', req.uri))
|
||||
res.body = helpers.to_json(result)
|
||||
res.text = helpers.to_json(result)
|
||||
res.status = falcon.HTTP_200
|
||||
|
||||
@resource.resource_try_catch_block
|
||||
|
||||
@@ -70,7 +70,7 @@ class Alarms(alarms_api_v2.AlarmsV2API,
|
||||
|
||||
result = self._alarm_show(req.uri, req.project_id, alarm_id)
|
||||
|
||||
res.body = helpers.to_json(result)
|
||||
res.text = helpers.to_json(result)
|
||||
res.status = falcon.HTTP_200
|
||||
|
||||
@resource.resource_try_catch_block
|
||||
@@ -96,7 +96,7 @@ class Alarms(alarms_api_v2.AlarmsV2API,
|
||||
|
||||
result = self._alarm_show(req.uri, req.project_id, alarm_id)
|
||||
|
||||
res.body = helpers.to_json(result)
|
||||
res.text = helpers.to_json(result)
|
||||
res.status = falcon.HTTP_200
|
||||
|
||||
@resource.resource_try_catch_block
|
||||
@@ -150,13 +150,13 @@ class Alarms(alarms_api_v2.AlarmsV2API,
|
||||
query_parms, offset,
|
||||
req.limit)
|
||||
|
||||
res.body = helpers.to_json(result)
|
||||
res.text = helpers.to_json(result)
|
||||
res.status = falcon.HTTP_200
|
||||
|
||||
else:
|
||||
result = self._alarm_show(req.uri, req.project_id, alarm_id)
|
||||
|
||||
res.body = helpers.to_json(result)
|
||||
res.text = helpers.to_json(result)
|
||||
res.status = falcon.HTTP_200
|
||||
|
||||
def _alarm_update(self, tenant_id, alarm_id, new_state, lifecycle_state,
|
||||
@@ -395,7 +395,7 @@ class AlarmsCount(alarms_api_v2.AlarmsCountV2API, alarming.Alarming):
|
||||
|
||||
result = self._alarms_count(req.uri, req.project_id, query_parms, offset, req.limit)
|
||||
|
||||
res.body = helpers.to_json(result)
|
||||
res.text = helpers.to_json(result)
|
||||
res.status = falcon.HTTP_200
|
||||
|
||||
def _alarms_count(self, req_uri, tenant_id, query_parms, offset, limit):
|
||||
@@ -487,7 +487,7 @@ class AlarmsStateHistory(alarms_api_v2.AlarmsStateHistoryV2API,
|
||||
req.uri, offset,
|
||||
req.limit)
|
||||
|
||||
res.body = helpers.to_json(result)
|
||||
res.text = helpers.to_json(result)
|
||||
res.status = falcon.HTTP_200
|
||||
|
||||
def _alarm_history_list(self, tenant_id, start_timestamp,
|
||||
|
||||
@@ -42,11 +42,11 @@ def from_json(req):
|
||||
:raises falcon.HTTPBadRequest:
|
||||
"""
|
||||
try:
|
||||
return req.media
|
||||
return req.get_media()
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
raise falcon.HTTPBadRequest('Bad request',
|
||||
'Request body is not valid JSON')
|
||||
raise falcon.HTTPBadRequest(title='Bad request',
|
||||
description='Request body is not valid JSON')
|
||||
|
||||
|
||||
def to_json(data):
|
||||
@@ -67,8 +67,9 @@ def to_json(data):
|
||||
|
||||
def validate_json_content_type(req):
|
||||
if req.content_type not in ['application/json']:
|
||||
raise falcon.HTTPBadRequest('Bad request', 'Bad content type. Must be '
|
||||
'application/json')
|
||||
raise falcon.HTTPBadRequest(
|
||||
title='Bad request',
|
||||
description='Bad content type. Must be application/json')
|
||||
|
||||
|
||||
def validate_authorization(http_request, authorized_rules_list):
|
||||
@@ -247,8 +248,8 @@ def get_query_endtime_timestamp(req, required=True):
|
||||
def validate_start_end_timestamps(start_timestamp, end_timestamp=None):
|
||||
if end_timestamp:
|
||||
if not start_timestamp < end_timestamp:
|
||||
raise falcon.HTTPBadRequest('Bad request',
|
||||
'start_time must be before end_time')
|
||||
raise falcon.HTTPBadRequest(title='Bad request',
|
||||
description='start_time must be before end_time')
|
||||
|
||||
|
||||
def _convert_time_string(date_time_string):
|
||||
|
||||
@@ -120,7 +120,7 @@ class Metrics(metrics_api_v2.MetricsV2API):
|
||||
dimensions, req.uri,
|
||||
offset, req.limit,
|
||||
start_timestamp, end_timestamp)
|
||||
res.body = helpers.to_json(result)
|
||||
res.text = helpers.to_json(result)
|
||||
res.status = falcon.HTTP_200
|
||||
|
||||
|
||||
@@ -158,7 +158,7 @@ class MetricsMeasurements(metrics_api_v2.MetricsMeasurementsV2API):
|
||||
req.limit, merge_metrics_flag,
|
||||
group_by)
|
||||
|
||||
res.body = helpers.to_json(result)
|
||||
res.text = helpers.to_json(result)
|
||||
res.status = falcon.HTTP_200
|
||||
|
||||
def _measurement_list(self, tenant_id, name, dimensions, start_timestamp,
|
||||
@@ -215,7 +215,7 @@ class MetricsStatistics(metrics_api_v2.MetricsStatisticsV2API):
|
||||
offset, req.limit, merge_metrics_flag,
|
||||
group_by)
|
||||
|
||||
res.body = helpers.to_json(result)
|
||||
res.text = helpers.to_json(result)
|
||||
res.status = falcon.HTTP_200
|
||||
|
||||
def _metric_statistics(self, tenant_id, name, dimensions, start_timestamp,
|
||||
@@ -259,7 +259,7 @@ class MetricsNames(metrics_api_v2.MetricsNamesV2API):
|
||||
offset = helpers.get_query_param(req, 'offset')
|
||||
result = self._list_metric_names(tenant_id, dimensions,
|
||||
req.uri, offset, req.limit)
|
||||
res.body = helpers.to_json(result)
|
||||
res.text = helpers.to_json(result)
|
||||
res.status = falcon.HTTP_200
|
||||
|
||||
def _list_metric_names(self, tenant_id, dimensions, req_uri, offset,
|
||||
@@ -297,7 +297,7 @@ class DimensionValues(metrics_api_v2.DimensionValuesV2API):
|
||||
result = self._dimension_values(tenant_id, req.uri, metric_name,
|
||||
dimension_name, offset, req.limit,
|
||||
start_timestamp, end_timestamp)
|
||||
res.body = helpers.to_json(result)
|
||||
res.text = helpers.to_json(result)
|
||||
res.status = falcon.HTTP_200
|
||||
|
||||
def _dimension_values(self, tenant_id, req_uri, metric_name,
|
||||
@@ -338,7 +338,7 @@ class DimensionNames(metrics_api_v2.DimensionNamesV2API):
|
||||
result = self._dimension_names(tenant_id, req.uri, metric_name,
|
||||
offset, req.limit,
|
||||
start_timestamp, end_timestamp)
|
||||
res.body = helpers.to_json(result)
|
||||
res.text = helpers.to_json(result)
|
||||
res.status = falcon.HTTP_200
|
||||
|
||||
def _dimension_names(self, tenant_id, req_uri, metric_name, offset, limit,
|
||||
|
||||
@@ -55,7 +55,7 @@ class Notifications(notifications_api_v2.NotificationsV2API):
|
||||
notification, self.valid_periods, require_all=require_all)
|
||||
except schemas_exceptions.ValidationException as ex:
|
||||
LOG.exception(ex)
|
||||
raise falcon.HTTPBadRequest('Bad Request', str(ex))
|
||||
raise falcon.HTTPBadRequest(title='Bad Request', description=str(ex))
|
||||
|
||||
def _validate_name_not_conflicting(self, tenant_id, name, expected_id=None):
|
||||
try:
|
||||
@@ -90,8 +90,9 @@ class Notifications(notifications_api_v2.NotificationsV2API):
|
||||
"Found no notification method type {}."
|
||||
"Did you install/enable the plugin for that type?"
|
||||
.format(nmt))
|
||||
raise falcon.HTTPBadRequest('Bad Request', "Not a valid notification method type {} "
|
||||
.format(nmt))
|
||||
raise falcon.HTTPBadRequest(
|
||||
title='Bad Request',
|
||||
description="Not a valid notification method type {} ".format(nmt))
|
||||
|
||||
def _create_notification(self, tenant_id, notification, uri):
|
||||
|
||||
@@ -208,7 +209,7 @@ class Notifications(notifications_api_v2.NotificationsV2API):
|
||||
notification = helpers.from_json(req)
|
||||
self._parse_and_validate_notification(notification)
|
||||
result = self._create_notification(req.project_id, notification, req.uri)
|
||||
res.body = helpers.to_json(result)
|
||||
res.text = helpers.to_json(result)
|
||||
res.status = falcon.HTTP_201
|
||||
|
||||
@resource.resource_try_catch_block
|
||||
@@ -243,7 +244,7 @@ class Notifications(notifications_api_v2.NotificationsV2API):
|
||||
notification_method_id,
|
||||
req.uri)
|
||||
|
||||
res.body = helpers.to_json(result)
|
||||
res.text = helpers.to_json(result)
|
||||
res.status = falcon.HTTP_200
|
||||
|
||||
@resource.resource_try_catch_block
|
||||
@@ -260,7 +261,7 @@ class Notifications(notifications_api_v2.NotificationsV2API):
|
||||
self._parse_and_validate_notification(notification, require_all=True)
|
||||
result = self._update_notification(notification_method_id, req.project_id,
|
||||
notification, req.uri)
|
||||
res.body = helpers.to_json(result)
|
||||
res.text = helpers.to_json(result)
|
||||
res.status = falcon.HTTP_200
|
||||
|
||||
@resource.resource_try_catch_block
|
||||
@@ -272,5 +273,5 @@ class Notifications(notifications_api_v2.NotificationsV2API):
|
||||
self._parse_and_validate_notification(notification, require_all=True)
|
||||
result = self._update_notification(notification_method_id, req.project_id,
|
||||
notification, req.uri)
|
||||
res.body = helpers.to_json(result)
|
||||
res.text = helpers.to_json(result)
|
||||
res.status = falcon.HTTP_200
|
||||
|
||||
@@ -40,5 +40,5 @@ class NotificationsType(notificationstype_api_v2.NotificationsTypeV2API):
|
||||
# are not that many rows
|
||||
result = self._list_notifications(req.uri, req.limit)
|
||||
|
||||
res.body = helpers.to_json(result)
|
||||
res.text = helpers.to_json(result)
|
||||
res.status = falcon.HTTP_200
|
||||
|
||||
@@ -34,24 +34,25 @@ def resource_try_catch_block(fun):
|
||||
raise falcon.HTTPNotFound
|
||||
|
||||
except exceptions.MultipleMetricsException as ex:
|
||||
raise falcon.HTTPConflict("MultipleMetrics", str(ex))
|
||||
raise falcon.HTTPConflict(title="MultipleMetrics", description=str(ex))
|
||||
|
||||
except exceptions.AlreadyExistsException as ex:
|
||||
raise falcon.HTTPConflict(ex.__class__.__name__, str(ex))
|
||||
raise falcon.HTTPConflict(title=ex.__class__.__name__, description=str(ex))
|
||||
|
||||
except exceptions.InvalidUpdateException as ex:
|
||||
raise HTTPUnprocessableEntityError(ex.__class__.__name__, str(ex))
|
||||
raise HTTPUnprocessableEntityError(title=ex.__class__.__name__, description=str(ex))
|
||||
|
||||
except exceptions.RepositoryException as ex:
|
||||
LOG.exception(ex)
|
||||
msg = " ".join(map(str, ex.args[0].args))
|
||||
raise falcon.HTTPInternalServerError('The repository was unable '
|
||||
'to process your request',
|
||||
msg)
|
||||
raise falcon.HTTPInternalServerError(
|
||||
title='The repository was unable to process your request',
|
||||
description=msg
|
||||
)
|
||||
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
raise falcon.HTTPInternalServerError('Service unavailable',
|
||||
str(ex))
|
||||
raise falcon.HTTPInternalServerError(title='Service unavailable',
|
||||
description=str(ex))
|
||||
|
||||
return try_it
|
||||
|
||||
@@ -34,4 +34,4 @@ class Version2(object):
|
||||
'status': 'CURRENT',
|
||||
'updated': "2013-03-06T00:00:00.000Z"
|
||||
}
|
||||
res.body = helpers.to_json(result)
|
||||
res.text = helpers.to_json(result)
|
||||
|
||||
@@ -53,12 +53,12 @@ class Versions(versions_api.VersionsAPI):
|
||||
VERSIONS[version]['links'][0]['href'] = (
|
||||
req_uri + version)
|
||||
result['elements'].append(VERSIONS[version])
|
||||
res.body = helpers.to_json(result)
|
||||
res.text = helpers.to_json(result)
|
||||
res.status = falcon.HTTP_200
|
||||
else:
|
||||
if version_id in VERSIONS:
|
||||
VERSIONS[version_id]['links'][0]['href'] = req_uri
|
||||
res.body = helpers.to_json(VERSIONS[version_id])
|
||||
res.text = helpers.to_json(VERSIONS[version_id])
|
||||
res.status = falcon.HTTP_200
|
||||
else:
|
||||
raise HTTPUnprocessableEntityError('Invalid version',
|
||||
|
||||
@@ -4,9 +4,9 @@ env
|
||||
# Download maven 3 if the system maven isn't maven 3
|
||||
VERSION=`mvn -v | grep "Apache Maven 3"`
|
||||
if [ -z "${VERSION}" ]; then
|
||||
curl http://archive.apache.org/dist/maven/binaries/apache-maven-3.2.1-bin.tar.gz > apache-maven-3.2.1-bin.tar.gz
|
||||
tar -xvzf apache-maven-3.2.1-bin.tar.gz
|
||||
MVN=${PWD}/apache-maven-3.2.1/bin/mvn
|
||||
curl https://dlcdn.apache.org/maven/maven-3/3.9.9/binaries/apache-maven-3.9.9-bin.tar.gz > apache-maven-3.9.9-bin.tar.gz
|
||||
tar -xvzf apache-maven-3.9.9-bin.tar.gz
|
||||
MVN=${PWD}/apache-maven-3.9.9/bin/mvn
|
||||
else
|
||||
MVN=mvn
|
||||
fi
|
||||
@@ -55,5 +55,5 @@ if [ $RUN_BUILD = "true" ]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
rm -fr apache-maven-3.2.1*
|
||||
rm -fr apache-maven-3.9.9*
|
||||
exit $RC
|
||||
|
||||
@@ -6,7 +6,7 @@ bandit>=1.1.0 # Apache-2.0
|
||||
bashate>=0.5.1 # Apache-2.0
|
||||
hacking>=3.0.1,<3.1.0 # Apache-2.0
|
||||
Babel!=2.4.0,>=2.3.4 # BSD
|
||||
coverage!=4.4,>=4.0 # Apache-2.0
|
||||
coverage>=4.4.1 # Apache-2.0
|
||||
cassandra-driver!=3.6.0,>=3.3.0 # Apache-2.0
|
||||
fixtures>=3.0.0 # Apache-2.0/BSD
|
||||
httplib2>=0.9.1 # MIT
|
||||
|
||||
Reference in New Issue
Block a user