From dc55d6322292e9a4393781848ab75779a1e2115d Mon Sep 17 00:00:00 2001 From: Alexander Chadin <a.chadin@servionica.ru> Date: Fri, 4 Aug 2017 15:37:54 +0300 Subject: [PATCH] Fix specification warnings This patch set fixes all specification warnings and set warning-is-error flag to True. Change-Id: I9a6132cfd0d010db21c148be53b6bb5523eb6f5c --- doc/source/conf.py | 1 + requirements.txt | 6 +- setup.cfg | 1 + specs/newton/implemented/sd-strategy.rst | 4 +- .../implemented/watcher-notifications-ovo.rst | 2 +- .../approved/build-baremetal-data-model.rst | 4 +- .../workload-characterization-grammar.rst | 2 +- .../add-power-on-and-off-in-watcher.rst | 1 + .../implemented/audit-tag-vm-metadata.rst | 15 ++- specs/pike/implemented/cancel-action-plan.rst | 15 +-- .../dynamic-action-description.rst | 1 + .../implemented/jsonschema-validation.rst | 1 + .../implemented/noisy_neighbor_strategy.rst | 4 +- .../services-versioned-notifications-api.rst | 98 +++++++++---------- specs/pike/implemented/stale-action-plan.rst | 2 +- 15 files changed, 83 insertions(+), 74 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index bd82021..93b0331 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -32,6 +32,7 @@ extensions = ['redirect', 'sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.viewcode', + 'sphinx.ext.mathjax', 'oslosphinx', 'yasfb', ] diff --git a/requirements.txt b/requirements.txt index ea04bc1..74e9502 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ oslosphinx -pbr>=0.6,<1.0 -sphinx>=1.1.2,<1.2 +pbr>=2.0.0,!=2.1.0 # Apache-2.0 +sphinx>=1.6.2 # BSD testrepository>=0.0.18 -testtools>=0.9.34 +testtools>=1.4.0 yasfb>=0.5.1 doc8 # Apache-2.0 diff --git a/setup.cfg b/setup.cfg index 7266910..983b5ab 100644 --- a/setup.cfg +++ b/setup.cfg @@ -15,6 +15,7 @@ classifier = all_files = 1 build-dir = doc/build source-dir = doc/source +warning-is-error = 1 [pbr] warnerrors = False diff --git a/specs/newton/implemented/sd-strategy.rst b/specs/newton/implemented/sd-strategy.rst index fa0bc70..02458e2 100644 --- a/specs/newton/implemented/sd-strategy.rst +++ b/specs/newton/implemented/sd-strategy.rst @@ -89,13 +89,13 @@ probable difference in the number of node vcpus. Therefore we transform VM CPU utilization to overall host CPU utilization. The following pseudocode shows the right way of transforming: -:math:`cpu_impact = cpu_util * vm_vcpus/host_vcpus` +:math:`cpu_{impact} = cpu_{util} * {vm_vcpus}/{host_vcpus}` After that the load values of nodes are normalised and the new standard deviation is calculated with them. The total standard deviation is calculated as weighted arithmetic mean: -:math:`\sum weight_metric*sd_metric`, where: +:math:`\sum weight_{metric}*sd_{metric}`, where: * weight_metric is the weight of metric. The value of weight is to be in range(0, 1). Each weight of metric is set by `watcher input parameters`_. diff --git a/specs/ocata/implemented/watcher-notifications-ovo.rst b/specs/ocata/implemented/watcher-notifications-ovo.rst index 54d1201..24b350c 100644 --- a/specs/ocata/implemented/watcher-notifications-ovo.rst +++ b/specs/ocata/implemented/watcher-notifications-ovo.rst @@ -70,7 +70,7 @@ number that will facilitate retro-compatibility whenever adding new fields. The basic structure for all notifications will be the same as the one that is used in Nova for the `versioned-notification-api blueprint`_, i.e.: -.. code-block:: json +.. code-block:: none { "priority": "INFO", diff --git a/specs/pike/approved/build-baremetal-data-model.rst b/specs/pike/approved/build-baremetal-data-model.rst index 179c0a1..aba21ea 100644 --- a/specs/pike/approved/build-baremetal-data-model.rst +++ b/specs/pike/approved/build-baremetal-data-model.rst @@ -99,7 +99,7 @@ Proposed change 2) ironic node-create ironic node-create [-c <chassis>] - -d <driver> + -d <driver> [-i <key=value>] [-p <key=value>] [-e <key=value>] @@ -113,6 +113,7 @@ ironic node-create -d your_driver agent_ipmitool \ -i ipmi_password=<ipmi_password> \ -e compute_node=compute.node.id \ -u ironic_node_uuid + compute.node.id is the compute node ID saved in CCDM (Compute Cluster Data Model) @@ -120,6 +121,7 @@ compute.node.id is the compute node ID saved in CCDM 2) On watcher, build the Baremetal Cluster Data Model (BCDM) by periodically requesting Ironic service. The Baremetal Cluster Data Model structure is shown as followings + { "uuid": "4f37180e-c310-4327-a286-d5ab9ffc6497", "power_state": "power on", diff --git a/specs/pike/approved/workload-characterization-grammar.rst b/specs/pike/approved/workload-characterization-grammar.rst index 325fcf5..22068e0 100644 --- a/specs/pike/approved/workload-characterization-grammar.rst +++ b/specs/pike/approved/workload-characterization-grammar.rst @@ -129,7 +129,7 @@ Example : **Parse and validate workload grammar** -Use `TOSCA-Parser_ module to parse a TOSCA serialized workload +Use `TOSCA-Parser`_ module to parse a TOSCA serialized workload grammar and validate it by applying values from telemetry or other metrics. diff --git a/specs/pike/implemented/add-power-on-and-off-in-watcher.rst b/specs/pike/implemented/add-power-on-and-off-in-watcher.rst index 3a108c2..89b8fe1 100644 --- a/specs/pike/implemented/add-power-on-and-off-in-watcher.rst +++ b/specs/pike/implemented/add-power-on-and-off-in-watcher.rst @@ -17,6 +17,7 @@ without VMs. And when the workload increases watcher will trigger a "power on" request to fullfill the service requirements. This feature includes four sub-features: + * Build a new baremetal data model in Watcher. * Add new actions "power on" and "power off" in Watcher. * Implement a new strategy based on 'baremetal' and 'compute' data models, diff --git a/specs/pike/implemented/audit-tag-vm-metadata.rst b/specs/pike/implemented/audit-tag-vm-metadata.rst index d07deed..14f5066 100644 --- a/specs/pike/implemented/audit-tag-vm-metadata.rst +++ b/specs/pike/implemented/audit-tag-vm-metadata.rst @@ -43,7 +43,7 @@ the Audit Scope to exclude VMs by metadata. Example - -.. code-block:: json +.. code-block:: none "scope": [ {"host_aggregates": [ @@ -135,19 +135,19 @@ Work Items ---------- * Platform owners or automations external to watcher will set the VM Metadata -.. code-block:: json +.. code-block:: none # nova meta vm_name set optimize=True * Enhance the current compute cluster data model to now include the VM -metadata in its representation. + metadata in its representation. * Capability in ``Audit Scope`` to exclude by instance_metadata -(https://github.com/openstack/watcher/blob/54f0758fc3ac47edb4bc3f6eb5e56bf53d4e02f8/watcher/decision_engine/scope/default.py). + (https://github.com/openstack/watcher/blob/54f0758fc3ac47edb4bc3f6eb5e56bf53d4e02f8/watcher/decision_engine/scope/default.py). * Modify base strategy to filter VMs by metadata based on configuration -option + option .. code-block:: python @@ -178,7 +178,7 @@ Testing * Unit tests on watcher `Audit`_ and `Strategy`_ * Unit tests for audit scope. Also tempest scenarios to create an Audit -Template and define an Audit scope with "instance_metadata" + Template and define an Audit scope with "instance_metadata" Documentation Impact ==================== @@ -196,6 +196,5 @@ History N/A .. _Audit: http://docs.openstack.org/developer/watcher/glossary.html#audit -.. _Strategy: -http://docs.openstack.org/developer/watcher/glossary.html#strategies +.. _Strategy: http://docs.openstack.org/developer/watcher/glossary.html#strategies .. _Audit Scope: https://specs.openstack.org/openstack/watcher-specs/specs/newton/approved/define-the-audit-scope.html diff --git a/specs/pike/implemented/cancel-action-plan.rst b/specs/pike/implemented/cancel-action-plan.rst index 8755ab1..cdc38c0 100644 --- a/specs/pike/implemented/cancel-action-plan.rst +++ b/specs/pike/implemented/cancel-action-plan.rst @@ -127,12 +127,15 @@ Work Items method for all the tasks(actions) added in the flow. In the revert method of action, current action state is checked and based on the action state (PENDING, ONGOING, SUCCEEDED) revert method will execute defined steps. - e.g. if action_state is PENDING: - # update action_state to CANCELLED - if action_state is ONGOING: - # call abort_action - if action state is SUCCEEDED: - # do nothing + + .. code-block:: python + + e.g. if action_state is PENDING: + # update action_state to CANCELLED + if action_state is ONGOING: + # call abort_action + if action state is SUCCEEDED: + # do nothing * Before starting any new action watcher-applier will check for the state of action plan, if its state is updated to CANCELLING it will trigger an diff --git a/specs/pike/implemented/dynamic-action-description.rst b/specs/pike/implemented/dynamic-action-description.rst index d8336a9..04767d0 100644 --- a/specs/pike/implemented/dynamic-action-description.rst +++ b/specs/pike/implemented/dynamic-action-description.rst @@ -126,6 +126,7 @@ Alternatives "event_type": "action_info.emit", "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" } + The implementation is as follows: https://review.openstack.org/#/c/454638/ But in my test, The number of received notifications is often less than diff --git a/specs/pike/implemented/jsonschema-validation.rst b/specs/pike/implemented/jsonschema-validation.rst index c46a0bc..a3f96ef 100644 --- a/specs/pike/implemented/jsonschema-validation.rst +++ b/specs/pike/implemented/jsonschema-validation.rst @@ -104,6 +104,7 @@ Work Items ---------- * Remove voluptuous schemas + As of now, voluptuous is used to: - validate the input parameters of a Watcher action when it gets to the Applier. diff --git a/specs/pike/implemented/noisy_neighbor_strategy.rst b/specs/pike/implemented/noisy_neighbor_strategy.rst index 97d2ac5..7f906c4 100644 --- a/specs/pike/implemented/noisy_neighbor_strategy.rst +++ b/specs/pike/implemented/noisy_neighbor_strategy.rst @@ -52,7 +52,7 @@ Proposed change solution" * Use Ceilometer client to get following metrics for detecting LLC noisy -neighbor: + neighbor: * cpu_l3_cache -- LLC occupancy of a VM @@ -61,7 +61,7 @@ neighbor: * Monitor L3 cache of all VMs in order of their "watcher-priority", that is set in VM metadata. Example: L3 cache of VM with watcher-priority as "1" will be monitored - before VM with watcher-priority as "8". + before VM with watcher-priority as "8". * If L3 cache of a VM goes down by more than the threshold, mark it as high priority. Then start monitoring L3 cache of VMs in reverse order of diff --git a/specs/pike/implemented/services-versioned-notifications-api.rst b/specs/pike/implemented/services-versioned-notifications-api.rst index e868ac1..969fd26 100644 --- a/specs/pike/implemented/services-versioned-notifications-api.rst +++ b/specs/pike/implemented/services-versioned-notifications-api.rst @@ -54,63 +54,63 @@ Here below is suggestion of background job structure: .. code-block:: python -class APISchedulingService(scheduling.BackgroundSchedulerService): + class APISchedulingService(scheduling.BackgroundSchedulerService): - def __init__(self, gconfig=None, **options): - self.services_status = {} - gconfig = None or {} - super(APISchedulingService, self).__init__(gconfig, **options) + def __init__(self, gconfig=None, **options): + self.services_status = {} + gconfig = None or {} + super(APISchedulingService, self).__init__(gconfig, **options) - def get_services_status(self, context): - services = objects.service.Service.list(context) - for service in services: - result = self.get_service_status(context, service.name) - if service.id not in self.services_status.keys(): - self.services_status[service.id] = result - continue - if self.services_status[service.id] != result: - self.services_status[service.id] = result - notifications.service.send_service_update(context, service, - state=result) + def get_services_status(self, context): + services = objects.service.Service.list(context) + for service in services: + result = self.get_service_status(context, service.name) + if service.id not in self.services_status.keys(): + self.services_status[service.id] = result + continue + if self.services_status[service.id] != result: + self.services_status[service.id] = result + notifications.service.send_service_update(context, service, + state=result) - def get_service_status(self, context, name): - service = objects.Service.get(context, id) - last_heartbeat = (service.last_seen_up or service.updated_at - or service.created_at) - if isinstance(last_heartbeat, six.string_types): - last_heartbeat = timeutils.parse_strtime(last_heartbeat) - else: - last_heartbeat = last_heartbeat.replace(tzinfo=None) - elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow()) - is_up = abs(elapsed) <= CONF.service_down_time - if not is_up: - LOG.warning(_LW('Seems service %(name)s on host %(host)s is down. ' - 'Last heartbeat was %(lhb)s.' - 'Elapsed time is %(el)s'), - {'name': service.name, - 'host': service.host, - 'lhb': str(last_heartbeat), 'el': str(elapsed)}) - return objects.service.ServiceStatus.FAILED + def get_service_status(self, context, name): + service = objects.Service.get(context, id) + last_heartbeat = (service.last_seen_up or service.updated_at + or service.created_at) + if isinstance(last_heartbeat, six.string_types): + last_heartbeat = timeutils.parse_strtime(last_heartbeat) + else: + last_heartbeat = last_heartbeat.replace(tzinfo=None) + elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow()) + is_up = abs(elapsed) <= CONF.service_down_time + if not is_up: + LOG.warning(_LW('Seems service %(name)s on host %(host)s is down. ' + 'Last heartbeat was %(lhb)s.' + 'Elapsed time is %(el)s'), + {'name': service.name, + 'host': service.host, + 'lhb': str(last_heartbeat), 'el': str(elapsed)}) + return objects.service.ServiceStatus.FAILED - return objects.service.ServiceStatus.ACTIVE + return objects.service.ServiceStatus.ACTIVE - def start(self): - """Start service.""" - context = watcher_context.make_context(is_admin=True) - self.add_job(self.get_services_status, name='service_status', - trigger='interval', jobstore='default', args=[context], - next_run_time=datetime.datetime.now(), seconds=60) - super(APISchedulingService, self).start() + def start(self): + """Start service.""" + context = watcher_context.make_context(is_admin=True) + self.add_job(self.get_services_status, name='service_status', + trigger='interval', jobstore='default', args=[context], + next_run_time=datetime.datetime.now(), seconds=60) + super(APISchedulingService, self).start() - def stop(self): - """Stop service.""" - self.shutdown() + def stop(self): + """Stop service.""" + self.shutdown() - def wait(self): - """Wait for service to complete.""" + def wait(self): + """Wait for service to complete.""" - def reset(self): - """Reset service.""" + def reset(self): + """Reset service.""" Moreover, we will rely on `oslo.versionedobjects`_ to version the payloads of diff --git a/specs/pike/implemented/stale-action-plan.rst b/specs/pike/implemented/stale-action-plan.rst index 073d819..b6bebd1 100644 --- a/specs/pike/implemented/stale-action-plan.rst +++ b/specs/pike/implemented/stale-action-plan.rst @@ -264,7 +264,7 @@ Work Items Here is the list of foreseen work items: * Add config parameter **action_plan_expiry** and - **check_periodic_interval**(SCENARIO_1). + **check_periodic_interval** (SCENARIO_1). * Modify the **DefaultActionPlanHandler** class, trigger to check the state of action plan(SCENARIO_2). * Currently, when receives the specific event (such as nova notification