Fix doc build warnings

Enable treading warnings as errors so that no new broken stuff gets
merged. Remove obsolete way of doing it, it's a nop.

Fix bugs found:
* Add missing sphinx modules to properly generate content.
* Misformatting of comments and files
* Do not index the windows modules, they import modules that do not
  exist on Linux.
* Place api docs in reference/api according to
  http://specs.openstack.org/openstack/docs-specs/specs/pike/os-manuals-migration.html
  and reference it. Update .gitignore for this.
* Include missing files in tree.

Change-Id: I57d3124ac9571f189cd0e10e4cf7e805b8e09045
This commit is contained in:
Andreas Jaeger 2017-09-22 09:08:54 +02:00
parent e63a2fc853
commit 037cf3fa81
14 changed files with 99 additions and 79 deletions

3
doc/.gitignore vendored
View File

@ -1,3 +1,2 @@
build/
source/ref/
source/api/
source/reference/api/

View File

@ -1,5 +1,39 @@
[DEFAULT]
#
# From freezer-scheduler
#
# Specifies the client_id used when contacting the service.
# If not specified it will be automatically created
# using the tenant-id and the machine hostname. (string value)
#client_id = <None>
# Prevents the scheduler from using the api service (boolean value)
#no_api = false
# Used to store/retrieve files on local storage, including those exchanged with
# the api service. Default value is /etc/freezer/scheduler/conf.d (Env:
# FREEZER_SCHEDULER_CONF_D) (string value)
#jobs_dir = /etc/freezer/scheduler/conf.d
# Specifies the api-polling interval in seconds. Defaults to 60 seconds
# (integer value)
#interval = 60
# Prevents the scheduler from running in daemon mode (boolean value)
#no_daemon = false
# Initialize freezer scheduler with insecure mode (boolean value)
#insecure = false
# Allow Freezer Scheduler to deny jobs that execute commands for security
# reasons (boolean value)
#disable_exec = false
# Number of jobs that can be executed at the same time (integer value)
#concurrent_jobs = 1
#
# From oslo.log
#
@ -108,37 +142,3 @@
# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false
#
# From scheduler
#
# Specifies the client_id used when contacting the service.
# If not specified it will be automatically created
# using the tenant-id and the machine hostname. (string value)
#client_id = <None>
# Prevents the scheduler from using the api service (boolean value)
#no_api = false
# Used to store/retrieve files on local storage, including those exchanged with
# the api service. Default value is /etc/freezer/scheduler/conf.d (Env:
# FREEZER_SCHEDULER_CONF_D) (string value)
#jobs_dir = /etc/freezer/scheduler/conf.d
# Specifies the api-polling interval in seconds. Defaults to 60 seconds
# (integer value)
#interval = 60
# Prevents the scheduler from running in daemon mode (boolean value)
#no_daemon = false
# Initialize freezer scheduler with insecure mode (boolean value)
#insecure = false
# Allow Freezer Scheduler to deny jobs that execute commands for security
# reasons (boolean value)
#disable_exec = false
# Number of jobs that can be executed at the same time (integer value)
#concurrent_jobs = 1

View File

@ -431,7 +431,7 @@ OPTIONS
:Type: string
:Default: ``<None>``
Exclude files,given as a PATTERN.Ex: --exclude '*.log' will exclude any file with name ending with .log. Default no exclude
Exclude files, given as a PATTERN.Ex: --exclude '\*.log' will exclude any file with name ending with .log. Default no exclude
.. oslo.config:option:: dereference_symlink

View File

@ -41,6 +41,8 @@ import sys
extensions = ['openstackdocstheme',
'oslo_config.sphinxconfiggen',
'oslo_config.sphinxext',
'sphinx.ext.autodoc',
]
config_generator_config_file = (
@ -99,6 +101,15 @@ bug_tag = 'doc'
# # List of directories, relative to source directory, that shouldn't be searched
# # for source files.
# exclude_trees = []
# A list of glob-style patterns that should be excluded when looking for
# source files. They are matched against the source file names relative to the
# source directory, using slashes as directory separators on all platforms.
exclude_patterns = [
# Missing win32serviceutil module on linux
#'api/freezer.scheduler.win_daemon*',
]
#
# # The reST default role (used for this markup: `text`) to use
# # for all documents.

View File

@ -1,13 +1,8 @@
.. Freezer documentation master file, created by
sphinx-quickstart on Thu Feb 4 22:27:35 2016.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to Freezer's documentation!
===================================
Install Guides
-----------
--------------
.. toctree::
:maxdepth: 2
@ -20,6 +15,7 @@ User Guides
:maxdepth: 2
user/index
cli/index
Admin Guides
------------
@ -34,7 +30,7 @@ Dev Guides
:maxdepth: 2
contributor/index
reference/index
About Freezer
=============

View File

@ -0,0 +1,7 @@
Reference
=========
.. toctree::
:maxdepth: 2
api/autoindex

View File

@ -50,24 +50,28 @@ class BackupEngine(object):
This class is an abstraction over all implementations.
Workflow:
1) invoke backup
1.1) try to download metadata for incremental
1.2) create a dataflow between backup_stream and storage.write_backup
Backup_stream is producer of data, for tar backup
it creates a gnutar subprocess and start to read data from stdout
Storage write_backup is consumer of data, it creates a thread
that store data in storage.
Both streams communicate in non-blocking mode
1.3) invoke post_backup - now it uploads metadata file
1) try to download metadata for incremental
2) create a dataflow between backup_stream and storage.write_backup
Backup_stream is producer of data, for tar backup
it creates a gnutar subprocess and start to read data from stdout
Storage write_backup is consumer of data, it creates a thread
that store data in storage.
Both streams communicate in non-blocking mode
3) invoke post_backup - now it uploads metadata file
2) restore backup
2.1) define all incremental backups
2.2) for each incremental backup create a dataflow between
storage.read_backup and restore_stream
Read_backup is data producer, it reads data chunk by chunk from
the specified storage and pushes the chunks into a queue.
Restore stream is a consumer, that is actually does restore (for
tar it is a thread that creates gnutar subprocess and feeds chunks
to stdin of this thread.
1) define all incremental backups
2) for each incremental backup create a dataflow between
storage.read_backup and restore_stream
Read_backup is data producer, it reads data chunk by chunk from
the specified storage and pushes the chunks into a queue.
Restore stream is a consumer, that is actually does restore (for
tar it is a thread that creates gnutar subprocess and feeds chunks
to stdin of this thread.
:type storage: freezer.storage.base.Storage
"""

View File

@ -601,7 +601,7 @@ class RsyncEngine(engine.BackupEngine):
Return the meta data as a dict structure and a binary string
:param fs_path: file abs path
:param new_level
:param new_level:
:return: file data structure
"""
@ -741,7 +741,7 @@ class RsyncEngine(engine.BackupEngine):
"""Compute the file or fs tree path signatures.
:param fs_path:
:param manifest_path
:param manifest_path:
:param write_queue:
:return:
"""

View File

@ -151,7 +151,7 @@ class Rsyncv2Engine(engine.BackupEngine):
"""Restore the provided backup into restore_abs_path.
Decrypt backup content if encrypted.
Freezer rsync header data structure:
Freezer rsync header data structure::
[ {
'path': '' (path to file),
@ -652,7 +652,7 @@ class Rsyncv2Engine(engine.BackupEngine):
Return blocks of changed data.
:param fs_path:
:param manifest_path
:param manifest_path:
:param write_queue:
:return:
"""

View File

@ -223,9 +223,10 @@ class RestoreOs(object):
2) Download and upload to glance
3) Create volume from glance
4) Delete
:param restore_from_timestamp:
:type restore_from_timestamp: int
:param volume_id - id of attached cinder volume
:param volume_id: - id of attached cinder volume
"""
(info, image) = self._create_image(volume_id, restore_from_timestamp)
length = int(info["x-object-meta-length"])

View File

@ -195,7 +195,7 @@ def get_lvm_info(path):
where the path is mounted on.
:param path: the original file system path where backup needs
to be executed
to be executed
:returns: a dict containing the keys 'volgroup', 'srcvol' and 'snap_path'
"""

View File

@ -194,14 +194,15 @@ class Backup(object):
"""
Internal freezer representation of backup.
Includes:
name (hostname_backup_name) of backup
timestamp of backup (when it was executed)
level of backup (freezer supports incremental backup)
Completed full backup has level 0 and can be restored without any
additional information.
Levels 1, 2, ... means that our backup is incremental and contains
only smart portion of information (that was actually changed
since the last backup)
* name (hostname_backup_name) of backup
* timestamp of backup (when it was executed)
* level of backup (freezer supports incremental backup):
Completed full backup has level 0 and can be restored without any
additional information.
Levels 1, 2, ... means that our backup is incremental and contains
only smart portion of information (that was actually changed
since the last backup)
"""
def __init__(self, engine, hostname_backup_name,

View File

@ -163,11 +163,12 @@ class TestBackupFSLocalstorage(common.TestFS):
class TestBackupSSH(common.TestFS):
"""
Tests are executed if the following env vars are defined:
- FREEZER_TEST_SSH_KEY
- FREEZER_TEST_SSH_USERNAME
- FREEZER_TEST_SSH_HOST
- FREEZER_TEST_CONTAINER
(directory on the remote machine used to store backups)
(directory on the remote machine used to store backups)
"""
@unittest.skipIf(not common.TestFS.use_ssh,

View File

@ -48,6 +48,7 @@ setup-hooks =
source-dir = doc/source
build-dir = doc/build
all_files = 1
warning-is-error = 1
[files]
packages =
@ -69,7 +70,6 @@ tempest.test_plugins =
[pbr]
# Have pbr generate the module indexes like sphinx autodoc
autodoc_index_modules = True
# Treat sphinx warnings as errors during the docs build; this helps us keep
# the documentation clean.
warnerrors = true
api_doc_dir = reference/api
autodoc_exclude_modules =
freezer.scheduler.win*