Bump hacking to 0.9.x series

Require at least 0.9.1 because 0.9.0. had a minor bug.

This change also fixes all found issues.

Rewrote some docstrings to fit H405.

Change-Id: Ifeef11b783dbe70b2652d2b9ea29d5b20f69f2ce
This commit is contained in:
Christian Berendt 2014-06-10 16:57:10 +02:00 committed by Gauvain Pocentek
parent 8bfbbf01a1
commit d449a6bcd2
9 changed files with 66 additions and 68 deletions

View File

@ -26,13 +26,12 @@ import os
import pickle import pickle
import re import re
import sys import sys
import git
import stevedore
import xml.sax.saxutils import xml.sax.saxutils
import git
from hooks import HOOKS from hooks import HOOKS
import openstack.common.config.generator as generator import openstack.common.config.generator as generator
import stevedore
TABLE_HEADER = '''<?xml version="1.0" encoding="UTF-8"?> TABLE_HEADER = '''<?xml version="1.0" encoding="UTF-8"?>

View File

@ -31,9 +31,7 @@ PROJECTS = ['ceilometer', 'cinder', 'glance', 'heat', 'keystone', 'neutron',
def setup_venv(branch, novenvupdate): def setup_venv(branch, novenvupdate):
"""Uses the autohelp-wrapper script to generate a virtualenv for a given """Setup a virtual environment for `branch`."""
branch.
"""
dirname = os.path.join('venv', branch.replace('/', '_')) dirname = os.path.join('venv', branch.replace('/', '_'))
if novenvupdate and os.path.exists(dirname): if novenvupdate and os.path.exists(dirname):
return return
@ -46,9 +44,7 @@ def setup_venv(branch, novenvupdate):
def get_options(project, branch, args): def get_options(project, branch, args):
"""Calls the autohelp script in a venv to get the list of known """Get the list of known options for a project."""
options.
"""
print("Working on %(project)s (%(branch)s)" % {'project': project, print("Working on %(project)s (%(branch)s)" % {'project': project,
'branch': branch}) 'branch': branch})
# Checkout the required branch # Checkout the required branch
@ -136,9 +132,7 @@ def dbk_append_header(parent, cells):
def diff(old_list, new_list): def diff(old_list, new_list):
"""Compare the old and new lists of options to generate lists of modified """Compare the old and new lists of options."""
options.
"""
new_opts = [] new_opts = []
changed_default = [] changed_default = []
deprecated_opts = [] deprecated_opts = []

View File

@ -15,7 +15,6 @@
import glob import glob
import os import os
import sys import sys
from xml.dom import minidom from xml.dom import minidom
import xml.sax.saxutils import xml.sax.saxutils
@ -118,25 +117,27 @@ def new_section_file(sample, current_section):
replace(']', '').replace(':', '-') replace(']', '').replace(':', '-')
+ '.xml') + '.xml')
section_file = open(section_filename, 'w') section_file = open(section_filename, 'w')
section_file.write('<?xml version="1.0" encoding="UTF-8"?>\n\ section_file.write(
<!-- The tool that generated this table lives in the\n\ ('<?xml version="1.0" encoding="UTF-8"?>\n'
openstack-doc-tools repository. The editions made in\n\ ' <!-- The tool that generated this table lives in the\n'
this file will *not* be lost if you run the script again. -->\n\ ' openstack-doc-tools repository. The editions made in\n'
<para xmlns="http://docbook.org/ns/docbook" version="5.0">\n\ ' this file will *not* be lost if you run the script '
<table rules="all">\n\ 'again. -->\n'
<caption>Description of configuration options for <literal>' ' <para xmlns="http://docbook.org/ns/docbook" version="5.0">\n'
+ current_section + '</literal> in <literal>' ' <table rules="all">\n'
+ os.path.basename(sample) + ' <caption>Description of configuration options for <literal>'
'</literal></caption>\n\ + current_section + '</literal> in <literal>'
<col width="50%"/>\n\ + os.path.basename(sample) +
<col width="50%"/>\n\ '</literal></caption>\n'
<thead>\n\ ' <col width="50%"/>\n'
<tr>\n\ ' <col width="50%"/>\n'
<th>Configuration option = Default value</th>\n\ ' <thead>\n'
<th>Description</th>\n\ ' <tr>\n'
</tr>\n\ ' <th>Configuration option = Default value</th>\n'
</thead>\n\ ' <th>Description</th>\n'
<tbody>') ' </tr>\n'
' </thead>\n'
' <tbody>'))
return section_file return section_file
@ -167,9 +168,9 @@ def create_new_tables(repo, verbose):
""" """
if current_section != line.strip('#').strip(): if current_section != line.strip('#').strip():
if section_file is not None: if section_file is not None:
section_file.write('\n </tbody>\n\ section_file.write(('\n </tbody>\n'
</table>\n\ ' </table>\n'
</para>') ' </para>'))
section_file.close() section_file.close()
current_section = line.strip('#').strip() current_section = line.strip('#').strip()
section_file = new_section_file(sample, current_section) section_file = new_section_file(sample, current_section)
@ -201,9 +202,9 @@ def create_new_tables(repo, verbose):
'</td><td>' + option_desc + '</td>\n' + '</td><td>' + option_desc + '</td>\n' +
' </tr>') ' </tr>')
if section_file is not None: if section_file is not None:
section_file.write('\n </tbody>\n\ section_file.write(('\n </tbody>\n'
</table>\n\ ' </table>\n'
</para>') '</para>'))
section_file.close() section_file.close()

View File

@ -170,17 +170,17 @@ def _list_opts(obj):
def print_group_opts(group, opts_by_module): def print_group_opts(group, opts_by_module):
print("[%s]" % group) print("[%s]" % group)
print print('')
global OPTION_COUNT global OPTION_COUNT
for mod, opts in opts_by_module: for mod, opts in opts_by_module:
OPTION_COUNT += len(opts) OPTION_COUNT += len(opts)
print('#') print('#')
print('# Options defined in %s' % mod) print('# Options defined in %s' % mod)
print('#') print('#')
print print('')
for opt in opts: for opt in opts:
_print_opt(opt) _print_opt(opt)
print print('')
def _get_my_ip(): def _get_my_ip():
@ -246,7 +246,7 @@ def _print_opt(opt):
opt_default = [''] opt_default = ['']
for default in opt_default: for default in opt_default:
print('#%s=%s' % (opt_name, default)) print('#%s=%s' % (opt_name, default))
print print('')
except Exception: except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name) sys.stderr.write('Error in option "%s"\n' % opt_name)
sys.exit(1) sys.exit(1)

View File

@ -36,8 +36,11 @@ import yaml
class DownloadRetfListingFailed(Exception): class DownloadRetfListingFailed(Exception):
"""Exception will be raised when the download of the RETF """Exception for failed downloads of the RETF listing.
Exception will be raised when the download of the RETF
listing failed or the destination file could not be written. listing failed or the destination file could not be written.
""" """
pass pass

View File

@ -13,7 +13,6 @@
# under the License. # under the License.
''' '''
Usage: Usage:
test.py [path] test.py [path]
@ -29,7 +28,6 @@ Requires:
- Python 2.7 or greater - Python 2.7 or greater
- lxml Python library - lxml Python library
- Maven - Maven
''' '''
import gzip import gzip
@ -43,10 +41,10 @@ import subprocess
import sys import sys
from lxml import etree from lxml import etree
from oslo.config import cfg
import os_doc_tools import os_doc_tools
from os_doc_tools.common import check_output # noqa from os_doc_tools.common import check_output # noqa
from oslo.config import cfg
# These are files that are known to not pass syntax or niceness checks # These are files that are known to not pass syntax or niceness checks
@ -120,19 +118,19 @@ def get_wadl_schema():
def validation_failed(schema, doc): def validation_failed(schema, doc):
"""Return True if the parsed doc fails against the schema """Return True if the parsed doc fails against the schema.
This will ignore validation failures of the type: IDREF attribute linkend This will ignore validation failures of the type: IDREF attribute linkend
references an unknown ID. This is because we are validating individual references an unknown ID. This is because we are validating individual
files that are being imported, and sometimes the reference isn't present files that are being imported, and sometimes the reference isn't present
in the current file. in the current file.
""" """
return not schema.validate(doc) and \ return (not schema.validate(doc) and
any(log.type_name != "DTD_UNKNOWN_ID" for log in schema.error_log) any(log.type_name != "DTD_UNKNOWN_ID" for log in schema.error_log))
def verify_section_tags_have_xmid(doc): def verify_section_tags_have_xmid(doc):
"""Check that all section tags have an xml:id attribute """Check that all section tags have an xml:id attribute.
Will throw an exception if there's at least one missing. Will throw an exception if there's at least one missing.
""" """
@ -144,11 +142,13 @@ def verify_section_tags_have_xmid(doc):
def verify_attribute_profiling(doc, attribute, known_values): def verify_attribute_profiling(doc, attribute, known_values):
"""Check for elements with attribute profiling set that conflicts with """Check for conflicts in attribute profiling.
the attribute profiling of nodes below them in the DOM
tree. This picks up cases where content is accidentally Check for elements with attribute profiling set that conflicts with
omitted via conflicting profiling. Checks known_values also for the attribute profiling of nodes below them in the DOM
supported profiling values. tree. This picks up cases where content is accidentally
omitted via conflicting profiling. Checks known_values also for
supported profiling values.
""" """
ns = {"docbook": "http://docbook.org/ns/docbook"} ns = {"docbook": "http://docbook.org/ns/docbook"}
@ -234,8 +234,8 @@ def verify_whitespace_niceness(docfile):
if affected_lines: if affected_lines:
if (msg): if (msg):
msg += "\n " msg += "\n "
msg += "trailing or unnecessary whitespaces found in lines: %s"\ msg += ("trailing or unnecessary whitespaces found in lines: %s"
% (", ".join(affected_lines)) % (", ".join(affected_lines)))
if tab_lines: if tab_lines:
if (msg): if (msg):
msg += "\n " msg += "\n "
@ -349,9 +349,7 @@ def filter_dirs(dirs):
def check_deleted_files(rootdir, file_exceptions, verbose): def check_deleted_files(rootdir, file_exceptions, verbose):
"""Check whether files got deleted and verify that no other file """Checking that no removed files are referenced."""
references them.
"""
print("Checking that no removed files are referenced...") print("Checking that no removed files are referenced...")
deleted_files = get_modified_files(rootdir, "--diff-filter=D") deleted_files = get_modified_files(rootdir, "--diff-filter=D")
@ -480,7 +478,7 @@ def is_testable_xml_file(path, exceptions):
filename = os.path.basename(path) filename = os.path.basename(path)
return (filename.endswith('.xml') and not filename == 'pom.xml' and return (filename.endswith('.xml') and not filename == 'pom.xml' and
not filename in exceptions) filename not in exceptions)
def is_testable_file(path, exceptions): def is_testable_file(path, exceptions):
@ -492,7 +490,7 @@ def is_testable_file(path, exceptions):
filename = os.path.basename(path) filename = os.path.basename(path)
return (filename.endswith(('.xml', '.xsd', '.xsl', '.wadl', return (filename.endswith(('.xml', '.xsd', '.xsl', '.wadl',
'.xjb', '.json')) and '.xjb', '.json')) and
not filename == 'pom.xml' and not filename in exceptions) not filename == 'pom.xml' and filename not in exceptions)
def is_wadl(filename): def is_wadl(filename):
@ -848,11 +846,14 @@ def build_book(book, publish_path, log_path):
def is_book_master(filename): def is_book_master(filename):
"""Returns True if filename is one of the special filenames used for the """Check if a file is a book master file.
Returns True if filename is one of the special filenames used for the
book master files. book master files.
We do not parse pom.xml for the includes directive to determine We do not parse pom.xml for the includes directive to determine
the top-level files and thus have to use a heuristic. the top-level files and thus have to use a heuristic.
""" """
return ((filename.startswith(('bk-', 'bk_', 'st-', 'api-')) return ((filename.startswith(('bk-', 'bk_', 'st-', 'api-'))

View File

@ -49,8 +49,9 @@ class IgnoreDuplicateUrls(object):
class ExportSitemap(object): class ExportSitemap(object):
'''Write found URLs to a sitemap file, based on '''Write found URLs to a sitemap file.
http://doc.scrapy.org/en/latest/topics/exporters.html.
Based on http://doc.scrapy.org/en/latest/topics/exporters.html.
''' '''
def __init__(self): def __init__(self):

View File

@ -14,11 +14,10 @@ import posixpath
import time import time
import urlparse import urlparse
from generator import items
from scrapy.contrib.linkextractors import sgml from scrapy.contrib.linkextractors import sgml
from scrapy.contrib import spiders from scrapy.contrib import spiders
from generator import items
class SitemapSpider(spiders.CrawlSpider): class SitemapSpider(spiders.CrawlSpider):
name = 'sitemap' name = 'sitemap'

View File

@ -1,4 +1,4 @@
# Hacking already pins down pep8, pyflakes and flake8 # Hacking already pins down pep8, pyflakes and flake8
hacking>=0.8.0,<0.9 hacking>=0.9.1,<0.10
pylint==0.25.2 pylint==0.25.2
sphinx>=1.2.1,<1.3 sphinx>=1.2.1,<1.3