Changed ansible directory structure

Moved roles to the parent directory
so that different adapters don't have
to have duplicated roles in their
respective paths.

patch2: fix some code to follow new pep8 standards.
Change-Id: I748c1730be8045c1cb83f91eaa9f0f551cd20a6f
This commit is contained in:
Xicheng Chang 2015-05-21 20:22:19 -07:00
parent ea07256545
commit 3d4e5d0af6
45 changed files with 362 additions and 327 deletions

View File

@ -205,9 +205,10 @@ class _APIClient(Client):
return self._put(url, data=data)
def execute(self, cluster_data, hosts_data, resp_results):
"""The process including create or update a cluster and the cluster
configuration, add or update a host in the cluster, and deploy
the updated hosts.
"""The process includes creating or updating a cluster.
The cluster configuration, add or update a host in the cluster,
and deploy the updated hosts.
:param cluster_data: the dictionary of cluster data
"""

View File

@ -24,8 +24,10 @@ from compass.apiclient.restful import Client
class AddSwitch(object):
"""A utility class that handles adding a switch and retrieving
corresponding machines associated with the switch.
"""A utility class.
Handles adding a switch and retrieving corresponding machines
associated with the switch.
"""
def __init__(self, server_url):
@ -33,8 +35,7 @@ class AddSwitch(object):
self._client = Client(server_url)
def add_switch(self, queue, ip, snmp_community):
"""Add a switch with SNMP credentials and retrieve attached
server machines.
"""Add a switch with SNMP credentials.
:param queue: The result holder for the machine details.
:type queue: A Queue object(thread-safe).

View File

@ -158,7 +158,9 @@ class ServerPowerMgmt(object):
class HostPowerMgmt(object):
"""Power management for hosts installed OS by OS installer. OS installer
will poweron/poweroff/reset host.
will poweron/poweroff/reset host.
"""
@staticmethod
def poweron(host_id, user):

View File

@ -77,7 +77,6 @@ class HdsCheck(base.BaseCheck):
def check_apt_snmp(self, pkg_module):
"""do apt health check."""
## TODO(xicheng): add ubuntu package check here
return None
def check_snmp_mibs(self):

View File

@ -63,6 +63,7 @@ class ActionHelper(object):
@staticmethod
def get_adapter_info(adapter_id, cluster_id, user):
"""Get adapter information. Return a dictionary as below,
{
"id": 1,
"name": "xxx",
@ -93,6 +94,7 @@ class ActionHelper(object):
...
}
To view a complete output, please refer to backend doc.
"""
adapter_info = adapter_db.get_adapter(adapter_id, user=user)
metadata = cluster_db.get_cluster_metadata(cluster_id, user=user)
@ -111,6 +113,7 @@ class ActionHelper(object):
@staticmethod
def get_cluster_info(cluster_id, user):
"""Get cluster information.Return a dictionary as below,
{
"id": 1,
"adapter_id": 1,
@ -149,33 +152,34 @@ class ActionHelper(object):
@staticmethod
def get_hosts_info(cluster_id, hosts_id_list, user):
"""Get hosts information. Return a dictionary as below,
{
"hosts": {
1($host_id): {
"reinstall_os": True,
"mac": "xxx",
"name": "xxx",
"roles": [xxx, yyy]
},
"networks": {
"eth0": {
"ip": "192.168.1.1",
"netmask": "255.255.255.0",
"is_mgmt": True,
"is_promiscuous": False,
"subnet": "192.168.1.0/24"
},
"eth1": {...}
},
"os_config": {},
"package_config": {},
"deployed_os_config": {},
"deployed_package_config": {}
},
2: {...},
....
}
}
{
"hosts": {
1($host_id): {
"reinstall_os": True,
"mac": "xxx",
"name": "xxx",
"roles": [xxx, yyy]
},
"networks": {
"eth0": {
"ip": "192.168.1.1",
"netmask": "255.255.255.0",
"is_mgmt": True,
"is_promiscuous": False,
"subnet": "192.168.1.0/24"
},
"eth1": {...}
},
"os_config": {},
"package_config": {},
"deployed_os_config": {},
"deployed_package_config": {}
},
2: {...},
....
}
}
"""
hosts_info = {}
for host_id in hosts_id_list:

View File

@ -38,8 +38,9 @@ class ItemNotFound(HTTPException):
class BadRequest(HTTPException):
"""Define the exception for invalid/missing parameters or a user makes
a request in invalid state and cannot be processed at this moment.
"""Define the exception for invalid/missing parameters.
User making a request in invalid state cannot be processed.
"""
def __init__(self, message):
super(BadRequest, self).__init__(message, 400)
@ -52,23 +53,19 @@ class Unauthorized(HTTPException):
class UserDisabled(HTTPException):
"""Define the exception that a disabled user tries to do some operations.
"""
"""Define the exception for disabled users."""
def __init__(self, message):
super(UserDisabled, self).__init__(message, 403)
class Forbidden(HTTPException):
"""Define the exception that a user tries to do some operations without
valid permissions.
"""
"""Define the exception for invalid permissions."""
def __init__(self, message):
super(Forbidden, self).__init__(message, 403)
class BadMethod(HTTPException):
"""Define the exception for invoking unsupprted or unimplemented methods.
"""
"""Define the exception for invoking unsupported methods."""
def __init__(self, message):
super(BadMethod, self).__init__(message, 405)

View File

@ -286,7 +286,7 @@ class Client(object):
return self._delete('/switches/%s/machines/%s' %
(switch_id, machine_id))
## test these
# test these
def poll_switch(self, switch_id):
data = {}
data['find_machines'] = None
@ -306,7 +306,7 @@ class Client(object):
data = {}
data['set_machines'] = group_machines
return self._post('/switches/%s/action' % switch_id, data=data)
## end
# end
def list_switchmachines(self, switch_ip_int=None, port=None, vlans=None,
mac=None, tag=None, location=None):
@ -621,7 +621,7 @@ class Client(object):
def delete_cluster_config(self, cluster_id):
return self._delete('/clusters/%s/config' % cluster_id)
## test these
# test these
def add_hosts_to_cluster(self, cluster_id, hosts):
data = {}
data['add_hosts'] = hosts

View File

@ -28,7 +28,7 @@ COMPASS_SERVER_URL = 'http://127.0.0.1/api'
SWITCH_IP = '10.145.81.220'
SWITCH_SNMP_VERSION = 'v2c'
SWITCH_SNMP_COMMUNITY = 'public'
#MACHINES_TO_ADD = ['00:11:20:30:40:01']
# MACHINES_TO_ADD = ['00:11:20:30:40:01']
CLUSTER_NAME = 'cluster2'
HOST_NAME_PREFIX = 'host'
SERVER_USERNAME = 'root'
@ -38,11 +38,11 @@ SERVICE_PASSWORD = 'service'
CONSOLE_USERNAME = 'console'
CONSOLE_PASSWORD = 'console'
HA_VIP = ''
#NAMESERVERS = '192.168.10.6'
# NAMESERVERS = '192.168.10.6'
SEARCH_PATH = 'ods.com'
#GATEWAY = '192.168.10.6'
#PROXY = 'http://192.168.10.6:3128'
#NTP_SERVER = '192.168.10.6'
# GATEWAY = '192.168.10.6'
# PROXY = 'http://192.168.10.6:3128'
# NTP_SERVER = '192.168.10.6'
MANAGEMENT_IP_START = '192.168.10.130'
MANAGEMENT_IP_END = '192.168.10.254'
MANAGEMENT_IP_GATEWAY = '192.168.10.1'
@ -70,7 +70,7 @@ STORAGE_PROMISC = 0
HOME_PERCENTAGE = 5
TMP_PERCENTAGE = 5
VAR_PERCENTAGE = 10
#ROLES_LIST = [['os-dashboard']]
# ROLES_LIST = [['os-dashboard']]
PRESET_VALUES = {
'NAMESERVERS': '192.168.10.1',

View File

@ -275,8 +275,7 @@ class Client(object):
return self._get('/machines/%s' % machine_id)
def get_clusters(self):
"""Lists the details for all clusters.
"""
"""Lists the details for all clusters."""
return self._get('/clusters')
def get_cluster(self, cluster_id):

View File

@ -1772,8 +1772,7 @@ def update_cluster_host_state_internal(
for clusterhost_in_cluster in cluster.clusterhosts:
if (
clusterhost_in_cluster.clusterhost_id
==
clusterhost.clusterhost_id
== clusterhost.clusterhost_id
):
continue
if not clusterhost_in_cluster.state.ready:
@ -1876,8 +1875,7 @@ def update_clusterhost_state_internal(
for clusterhost_in_cluster in cluster.clusterhosts:
if (
clusterhost_in_cluster.clusterhost_id
==
clusterhost.clusterhost_id
== clusterhost.clusterhost_id
):
continue
if not clusterhost_in_cluster.state.ready:

View File

@ -33,9 +33,11 @@ MAPPER = {
def validate_config(session, config, id_name, id_value, patch=True):
"""Validates the given config value according to the config
metadata of the asscoiated os_id or adapter_id. Returns
a tuple (status, message).
"""Validates config.
Validates the given config value according to the config
metadata of the asscoiated os_id or adapter_id. Returns
a tuple (status, message).
"""
if id_name not in MAPPER.keys():
return (False, "Invalid id type %s" % id_name)

View File

@ -49,6 +49,7 @@ class Unauthorized(DatabaseException):
class UserDisabled(DatabaseException):
"""Define the exception that a disabled user tries to do some operations.
"""
def __init__(self, message):
super(UserDisabled, self).__init__(message)
@ -57,7 +58,9 @@ class UserDisabled(DatabaseException):
class Forbidden(DatabaseException):
"""Define the exception that a user is trying to make some action
without the right permission.
without the right permission.
"""
def __init__(self, message):
super(Forbidden, self).__init__(message)
@ -73,6 +76,7 @@ class NotAcceptable(DatabaseException):
class InvalidParameter(DatabaseException):
"""Define the exception that the request has invalid or missing parameters.
"""
def __init__(self, message):
super(InvalidParameter, self).__init__(message)
@ -81,6 +85,7 @@ class InvalidParameter(DatabaseException):
class InvalidResponse(DatabaseException):
"""Define the exception that the response is invalid.
"""
def __init__(self, message):
super(InvalidResponse, self).__init__(message)

View File

@ -541,6 +541,7 @@ class HostNetwork(BASE, TimestampMixin, HelperMixin):
class ClusterHostLogHistory(BASE, LogHistoryMixin):
"""clusterhost installing log history for each file.
"""
__tablename__ = 'clusterhost_log_history'
@ -577,6 +578,7 @@ class ClusterHostLogHistory(BASE, LogHistoryMixin):
class HostLogHistory(BASE, LogHistoryMixin):
"""host installing log history for each file.
"""
__tablename__ = 'host_log_history'

View File

@ -32,12 +32,12 @@ from flask.ext.login import UserMixin
from itsdangerous import URLSafeTimedSerializer
BASE = declarative_base()
#TODO(grace) SECRET_KEY should be generated when installing compass
#and save to a config file or DB
# TODO(grace) SECRET_KEY should be generated when installing compass
# and save to a config file or DB
SECRET_KEY = "abcd"
#This is used for generating a token by user's ID and
#decode the ID from this token
# This is used for generating a token by user's ID and
# decode the ID from this token
login_serializer = URLSafeTimedSerializer(SECRET_KEY)

View File

@ -92,8 +92,9 @@ class DeployManager(object):
self.clean_progress()
def deploy_os(self):
"""Deploy OS to hosts which need to in the cluster. Return OS deployed
config.
"""Deploy OS to hosts which need to in the cluster.
Return OS deployed config.
"""
if not self.os_installer:
return {}
@ -113,8 +114,9 @@ class DeployManager(object):
return self.os_installer.deploy()
def deploy_target_system(self):
"""Deploy target system to all hosts in the cluster. Return package
deployed config.
"""Deploy target system to all hosts in the cluster.
Return package deployed config.
"""
if not self.pk_installer:
return {}

View File

@ -41,8 +41,9 @@ class BaseConfigManager(object):
return self.__get_cluster_item(const.OS_VERSION)
def get_cluster_baseinfo(self):
"""Get cluster base information, including cluster_id, os_version,
and cluster_name.
"""Get cluster base information.
Including cluster_id, os_version and cluster_name.
"""
attr_names = [const.ID, const.NAME, const.OS_VERSION]
@ -351,21 +352,22 @@ class BaseConfigManager(object):
def _get_cluster_roles_mapping_helper(self):
"""The ouput format will be as below, for example:
{
"controller": [{
"hostname": "xxx",
"management": {
"interface": "eth0",
"ip": "192.168.1.10",
"netmask": "255.255.255.0",
"subnet": "192.168.1.0/24",
"is_mgmt": True,
"is_promiscuous": False
},
...
}],
...
}
{
"controller": [{
"hostname": "xxx",
"management": {
"interface": "eth0",
"ip": "192.168.1.10",
"netmask": "255.255.255.0",
"subnet": "192.168.1.0/24",
"is_mgmt": True,
"is_promiscuous": False
},
...
}],
...
}
"""
mapping = {}
hosts_id_list = self.get_host_id_list()

View File

@ -60,8 +60,9 @@ class BaseInstaller(object):
def get_tmpl_vars_from_metadata(self, metadata, config):
"""Get variables dictionary for rendering templates from metadata.
:param dict metadata: The metadata dictionary.
:param dict config: The
:param dict metadata: The metadata dictionary.
:param dict config: The
"""
template_vars = {}
self._get_tmpl_vars_helper(metadata, config, template_vars)
@ -69,18 +70,19 @@ class BaseInstaller(object):
return template_vars
def _get_key_mapping(self, metadata, key, is_regular_key):
"""Get the keyword which the input key maps to. This keyword will be
added to dictionary used to render templates.
"""Get the keyword which the input key maps to.
If the key in metadata has a mapping to another keyword which is
used for templates, then return this keyword. If the key is started
with '$', which is a variable in metadata, return the key itself as
the mapping keyword. If the key has no mapping, return None.
This keyword will be added to dictionary used to render templates.
:param dict metadata: metadata/submetadata dictionary.
:param str key: The keyword defined in metadata.
:param bool is_regular_key: False when the key defined in metadata
is a variable(starting with '$').
If the key in metadata has a mapping to another keyword which is
used for templates, then return this keyword. If the key is started
with '$', which is a variable in metadata, return the key itself as
the mapping keyword. If the key has no mapping, return None.
:param dict metadata: metadata/submetadata dictionary.
:param str key: The keyword defined in metadata.
:param bool is_regular_key: False when the key defined in metadata
is a variable(starting with '$').
"""
mapping_to = key
if is_regular_key:
@ -92,12 +94,14 @@ class BaseInstaller(object):
return mapping_to
def _get_submeta_by_key(self, metadata, key):
"""Get submetadata dictionary based on current metadata key. And
determines the input key is a regular string keyword or a variable
keyword defined in metadata, which starts with '$'.
"""Get submetadata dictionary.
:param dict metadata: The metadata dictionary.
:param str key: The keyword defined in the metadata.
Based on current metadata key. And
determines the input key is a regular string keyword or a variable
keyword defined in metadata, which starts with '$'.
:param dict metadata: The metadata dictionary.
:param str key: The keyword defined in the metadata.
"""
if key in metadata:
return (True, metadata[key])

View File

@ -89,10 +89,11 @@ class CobblerInstaller(OSInstaller):
def get_supported_oses(self):
"""get supported os versions.
.. note::
In cobbler, we treat profile name as the indicator
of os version. It is just a simple indicator
and not accurate.
note::
In cobbler, we treat profile name as the indicator
of os version. It is just a simple indicator
and not accurate.
"""
profiles = self.remote.get_profiles()
oses = []
@ -102,20 +103,21 @@ class CobblerInstaller(OSInstaller):
def deploy(self):
"""Sync cobbler to catch up the latest update config and start to
install OS. Return both cluster and hosts deploy configs. The return
format:
{
"cluster": {
"id": 1,
"deployed_os_config": {},
},
"hosts": {
1($clusterhost_id): {
"deployed_os_config": {...},
},
....
}
}
install OS. Return both cluster and hosts deploy configs. The return
format:
{
"cluster": {
"id": 1,
"deployed_os_config": {},
},
"hosts": {
1($clusterhost_id): {
"deployed_os_config": {...},
},
....
}
}
"""
host_ids = self.config_manager.get_hosts_id_list_for_os_installation()
if not host_ids:
@ -183,13 +185,14 @@ class CobblerInstaller(OSInstaller):
def set_package_installer_config(self, package_configs):
"""Cobbler can install and configure package installer right after
OS installation compelets by setting package_config info provided
by package installer.
:param dict package_configs: The dict of config generated by package
installer for each clusterhost. The IDs
of clusterhosts are the keys of
package_configs.
OS installation compelets by setting package_config info provided
by package installer.
:param dict package_configs: The dict of config generated by package
installer for each clusterhost. The IDs
of clusterhosts are the keys of
package_configs.
"""
self.pk_installer_config = package_configs
@ -309,7 +312,8 @@ class CobblerInstaller(OSInstaller):
def delete_single_host(self, host_id):
"""Delete the host from cobbler server and clean up the installation
progress.
progress.
"""
hostname = self.config_manager.get_hostname(host_id)
try:
@ -321,8 +325,7 @@ class CobblerInstaller(OSInstaller):
logging.exception(ex)
def _get_host_tmpl_vars_dict(self, host_id, global_vars_dict, **kwargs):
"""Generate template variables dictionary.
"""
"""Generate template variables dictionary."""
vars_dict = {}
if global_vars_dict:
# Set cluster template vars_dict from cluster os_config.

View File

@ -95,15 +95,15 @@ class AnsibleInstaller(PKInstaller):
self.__class__.__name__, self.NAME, self.installer_url)
def generate_installer_config(self):
"""Render ansible config file by OS installing right after
OS is installed successfully.
The output format:
{
'1'($host_id/clusterhost_id):{
'tool': 'ansible',
},
.....
}
"""Render ansible config file by OS installing.
The output format:
{
'1'($host_id/clusterhost_id):{
'tool': 'ansible',
},
.....
}
"""
host_ids = self.config_manager.get_host_id_list()
os_installer_configs = {}
@ -119,21 +119,23 @@ class AnsibleInstaller(PKInstaller):
return "-".join((dist_sys_name, cluster_name))
def _get_cluster_tmpl_vars(self):
"""Generate template variables dict based on cluster level config.
The vars_dict will be:
{
"baseinfo": {
"id":1,
"name": "cluster01",
...
},
"package_config": {
.... //mapped from original package config based on metadata
},
"role_mapping": {
....
}
}
"""Generate template variables dict
Generates based on cluster level config.
The vars_dict will be:
{
"baseinfo": {
"id":1,
"name": "cluster01",
...
},
"package_config": {
.... //mapped from original package config based on metadata
},
"role_mapping": {
....
}
}
"""
cluster_vars_dict = {}
# set cluster basic information to vars_dict
@ -242,7 +244,7 @@ class AnsibleInstaller(PKInstaller):
files = self.runner_files
for dir in dirs:
shutil.copytree(
os.path.join(self.adapter_dir, dir),
os.path.join(self.ansible_dir, dir),
os.path.join(
ansible_run_destination,
dir
@ -289,24 +291,26 @@ class AnsibleInstaller(PKInstaller):
self.serialize_config(cfg_config, cfg_destination)
def deploy(self):
"""Start to deploy a distributed system. Return both cluster and hosts
deployed configs. The return format:
{
"cluster": {
"id": 1,
"deployed_package_config": {
"roles_mapping": {...},
"service_credentials": {...},
....
}
},
"hosts": {
1($clusterhost_id): {
"deployed_package_config": {...}
},
....
}
}
"""Start to deploy a distributed system.
Return both cluster and hosts deployed configs.
The return format:
{
"cluster": {
"id": 1,
"deployed_package_config": {
"roles_mapping": {...},
"service_credentials": {...},
....
}
},
"hosts": {
1($clusterhost_id): {
"deployed_package_config": {...}
},
....
}
}
"""
host_list = self.config_manager.get_host_id_list()
if not host_list:

View File

@ -103,8 +103,9 @@ class ChefInstaller(PKInstaller):
return "-".join((dist_sys_name, cluster_name))
def get_create_databag(self, databag_name):
"""Get databag object from chef server. Create the databag if it
does not exist.
"""Get databag object from chef server.
Creates the databag if it does not exist.
"""
import chef
databag = None
@ -116,11 +117,13 @@ class ChefInstaller(PKInstaller):
return databag
def get_create_node(self, node_name, env_name=None):
"""Get chef node if existing, otherwise create one and set its
environment.
"""Get chef node
:param str node_name: The name for this node.
:param str env_name: The environment name for this node.
Gets the node if existing, otherwise create one and set its
environment.
:param str node_name: The name for this node.
:param str env_name: The environment name for this node.
"""
import chef
if not self.chef_api:
@ -194,8 +197,9 @@ class ChefInstaller(PKInstaller):
def add_roles(self, node, roles):
"""Add roles to the node.
:param object node: The node object.
:param list roles: The list of roles for this node.
:param object node: The node object.
:param list roles: The list of roles for this node.
"""
if node is None:
raise Exception("Node is None!")
@ -217,13 +221,15 @@ class ChefInstaller(PKInstaller):
logging.debug('Runlist for node %s is %s', node.name, node.run_list)
def _generate_node_attributes(self, roles, host_vars_dict):
"""Generate node attributes from templates according to its roles. The
templates are named by roles without '-'. Return the dictionary
of attributes defined in the templates.
"""Generate node attributes.
:param list roles: The roles for this node, used to load the
specific template.
:param dict host_vars_dict: The dict used in cheetah searchList to
Generates from templates according to its roles. The
templates are named by roles without '-'. Return the dictionary
of attributes defined in the templates.
:param list roles: The roles for this node, used to load the
specific template.
:param dict host_vars_dict: The dict used in cheetah searchList to
render attributes from templates.
"""
if not roles:
@ -296,12 +302,11 @@ class ChefInstaller(PKInstaller):
env.save()
def upload_environment(self, env_name, global_vars_dict):
"""Generate environment attributes based on the template file and
upload it to chef server.
"""Generate environment attributes
:param str env_name: The environment name.
:param dict vars_dict: The dictionary used in cheetah searchList to
render attributes from templates.
:param str env_name: The environment name.
:param dict vars_dict: The dictionary used in cheetah searchList to
render attributes from templates.
"""
env_config = self._generate_env_attributes(global_vars_dict)
env = self.get_create_environment(env_name)
@ -341,14 +346,15 @@ class ChefInstaller(PKInstaller):
databagitem.save()
def _get_host_tmpl_vars(self, host_id, global_vars_dict):
"""Generate templates variables dictionary for cheetah searchList based
on host package config.
"""Generate templates variables dictionary.
:param int host_id: The host ID.
:param dict global_vars_dict: The vars_dict got from cluster level
package_config.
For cheetah searchList based on host package config.
The output format is the same as cluster_vars_dict.
:param int host_id: The host ID.
:param dict global_vars_dict: The vars_dict got from cluster level
package_config.
The output format is the same as cluster_vars_dict.
"""
host_vars_dict = {}
if global_vars_dict:
@ -378,20 +384,21 @@ class ChefInstaller(PKInstaller):
def _get_cluster_tmpl_vars(self):
"""Generate template variables dict based on cluster level config.
The vars_dict will be:
{
"baseinfo": {
"id":1,
"name": "cluster01",
...
},
"package_config": {
.... //mapped from original package config based on metadata
},
"role_mapping": {
....
}
}
The vars_dict will be:
{
"baseinfo": {
"id":1,
"name": "cluster01",
...
},
"package_config": {
.... //mapped from original package config based on metadata
},
"role_mapping": {
....
}
}
"""
cluster_vars_dict = {}
# set cluster basic information to vars_dict
@ -422,23 +429,24 @@ class ChefInstaller(PKInstaller):
def deploy(self):
"""Start to deploy a distributed system. Return both cluster and hosts
deployed configs. The return format:
{
"cluster": {
"id": 1,
"deployed_package_config": {
"roles_mapping": {...},
"service_credentials": {...},
....
}
},
"hosts": {
1($clusterhost_id): {
"deployed_package_config": {...}
},
....
}
}
deployed configs. The return format:
{
"cluster": {
"id": 1,
"deployed_package_config": {
"roles_mapping": {...},
"service_credentials": {...},
....
}
},
"hosts": {
1($clusterhost_id): {
"deployed_package_config": {...}
},
....
}
}
"""
host_list = self.config_manager.get_host_id_list()
if not host_list:
@ -498,19 +506,20 @@ class ChefInstaller(PKInstaller):
def generate_installer_config(self):
"""Render chef config file (client.rb) by OS installing right after
OS is installed successfully.
The output format:
{
'1'($host_id/clusterhost_id):{
'tool': 'chef',
'chef_url': 'https://xxx',
'chef_client_name': '$host_name',
'chef_node_name': '$host_name',
'chef_server_ip': 'xxx',(op)
'chef_server_dns': 'xxx' (op)
},
.....
}
OS is installed successfully.
The output format:
{
'1'($host_id/clusterhost_id):{
'tool': 'chef',
'chef_url': 'https://xxx',
'chef_client_name': '$host_name',
'chef_node_name': '$host_name',
'chef_server_ip': 'xxx',(op)
'chef_server_dns': 'xxx' (op)
},
.....
}
"""
host_ids = self.config_manager.get_host_id_list()
os_installer_configs = {}
@ -552,7 +561,8 @@ class ChefInstaller(PKInstaller):
def get_supported_dist_systems(self):
"""get target systems from chef. All target_systems for compass will
be stored in the databag called "compass".
be stored in the databag called "compass".
"""
databag = self.__get_compass_databag()
target_systems = {}

View File

@ -31,7 +31,8 @@ class BaseVendor(object):
def is_this_vendor(self, sys_info, **kwargs):
"""Determine if the host is associated with this vendor.
This function must be implemented by vendor itself
This function must be implemented by vendor itself
"""
raise NotImplementedError
@ -48,10 +49,11 @@ class BaseSnmpVendor(BaseVendor):
self._matched_names = matched_names
def is_this_vendor(self, sys_info, **kwargs):
"""Determine if the switch belongs to this vendor by matching the
system information retrieved from the switch.
:param str sys_info: the system information retrieved from a switch
Return True
"""Determine if the switch belongs to this vendor.
Matching the system information retrieved from the switch.
:param str sys_info: the system information retrieved from a switch
Return True
"""
if sys_info:
for name in self._matched_names:
@ -62,14 +64,16 @@ class BaseSnmpVendor(BaseVendor):
class BasePlugin(object):
"""Extended by vendor's plugin, which processes request and
retrieve info directly from the switch.
"""Extended by vendor's plugin.
This plugin processes request and retrieve info directly from the switch.
"""
__metaclass__ = ABCMeta
def process_data(self, oper='SCAN', **kwargs):
"""Each vendors will have some plugins to do some operations.
Plugin will process request data and return expected result.
Plugin will process request data and return expected result.
:param oper: operation function name.
:param kwargs: key-value pairs of arguments

View File

@ -39,8 +39,9 @@ class HDManager(object):
self.snmp_sysdescr = 'sysDescr.0'
def learn(self, host, credential, vendor, req_obj, oper="SCAN", **kwargs):
"""Insert/update record of switch_info. Get expected results from
switch according to sepcific operation.
"""Insert/update record of switch_info.
Get expected results from switch according to sepcific operation.
:param req_obj: the object of a machine
:param host: switch IP address

View File

@ -278,7 +278,8 @@ def snmpwalk_by_cl(host, credential, oid, timeout=5, retries=3):
def exec_command(command):
"""Execute command.
Return a tuple: returncode, output and error message(None if no error).
Return a tuple: returncode, output and error message(None if no error).
"""
sub_p = subprocess.Popen(command,
shell=True,

View File

@ -17,7 +17,7 @@
from compass.hdsdiscovery import base
#Vendor_loader will load vendor instance by CLASS_NAME
# Vendor_loader will load vendor instance by CLASS_NAME
CLASS_NAME = 'Appliance'

View File

@ -28,8 +28,8 @@ class Mac(base.BaseSnmpMacPlugin):
def __init__(self, host, credential):
self.host = host
#self.credential = credential
#return
# self.credential = credential
# return
def scan(self):
"""Implemnets the scan method in BasePlugin class.

View File

@ -16,7 +16,7 @@
from compass.hdsdiscovery import base
#Vendor_loader will load vendor instance by CLASS_NAME
# Vendor_loader will load vendor instance by CLASS_NAME
CLASS_NAME = 'Arista'

View File

@ -16,7 +16,7 @@
from compass.hdsdiscovery import base
#Vendor_loader will load vendor instance by CLASS_NAME
# Vendor_loader will load vendor instance by CLASS_NAME
CLASS_NAME = 'Hp'

View File

@ -16,7 +16,7 @@
from compass.hdsdiscovery import base
#Vendor_loader will load vendor instance by CLASS_NAME
# Vendor_loader will load vendor instance by CLASS_NAME
CLASS_NAME = "Huawei"

View File

@ -20,7 +20,7 @@ from compass.hdsdiscovery import base
from compass.hdsdiscovery import utils
#Vendor_loader will load vendor instance by CLASS_NAME
# Vendor_loader will load vendor instance by CLASS_NAME
CLASS_NAME = "OVSwitch"

View File

@ -16,7 +16,7 @@
from compass.hdsdiscovery import base
#Vendor_loader will load vendor instance by CLASS_NAME
# Vendor_loader will load vendor instance by CLASS_NAME
CLASS_NAME = 'Pica8'

View File

@ -211,43 +211,37 @@ def clean_package_installer(
@celery.task(name='compass.tasks.poweron_host')
def poweron_host(host_id):
"""Deploy the given cluster.
"""
"""Deploy the given cluster."""
pass
@celery.task(name='compass.tasks.poweroff_host')
def poweroff_host(host_id):
"""Deploy the given cluster.
"""
"""Deploy the given cluster."""
pass
@celery.task(name='compass.tasks.reset_host')
def reset_host(host_id):
"""Deploy the given cluster.
"""
"""Deploy the given cluster."""
pass
@celery.task(name='compass.tasks.poweron_machine')
def poweron_machine(machine_id):
"""Deploy the given cluster.
"""
"""Deploy the given cluster."""
pass
@celery.task(name='compass.tasks.poweroff_machine')
def poweroff_machine(machine_id):
"""Deploy the given cluster.
"""
"""Deploy the given cluster."""
pass
@celery.task(name='compass.tasks.reset_machine')
def reset_machine(machine_id):
"""Deploy the given cluster.
"""
"""Deploy the given cluster."""
pass
@ -256,8 +250,7 @@ def os_installed(
host_id, clusterhosts_ready,
clusters_os_ready
):
"""callback when os is installed.
"""
"""callback when os is installed."""
try:
install_callback.os_installed(
host_id, clusterhosts_ready,
@ -271,8 +264,7 @@ def os_installed(
def package_installed(
cluster_id, host_id, cluster_ready, host_ready
):
"""callback when package is installed.
"""
"""callback when package is installed."""
try:
install_callback.package_installed(
cluster_id, host_id, cluster_ready, host_ready
@ -285,8 +277,7 @@ def package_installed(
def cluster_installed(
cluster_id, clusterhosts_ready
):
"""callback when package is installed.
"""
"""callback when package is installed."""
try:
install_callback.cluster_installed(
cluster_id, clusterhosts_ready
@ -297,8 +288,7 @@ def cluster_installed(
@celery.task(name='compass.tasks.update_progress')
def update_clusters_progress():
"""Calculate the installing progress of the given cluster.
"""
"""Calculate the installing progress of the given cluster."""
logging.info('update_clusters_progress')
try:
update_progress.update_progress()

View File

@ -123,7 +123,7 @@ class TestProgressCalculator(unittest2.TestCase):
if not self.flavor_id:
raise Exception('flavor id not found')
#add cluster
# add cluster
cluster.add_cluster(
adapter_id=self.adapter_id,
os_id=self.os_id,
@ -139,7 +139,7 @@ class TestProgressCalculator(unittest2.TestCase):
for list_cluster in list_clusters:
self.cluster_id = list_cluster['id']
#add switch
# add switch
switch.add_switch(
ip=SWITCH_IP,
user=self.user_object,
@ -154,12 +154,12 @@ class TestProgressCalculator(unittest2.TestCase):
port='1'
)
#get machine information
# get machine information
list_machines = machine.list_machines(user=self.user_object)
for list_machine in list_machines:
self.machine_id = list_machine['id']
#add cluster host
# add cluster host
cluster.add_cluster_host(
self.cluster_id,
user=self.user_object,
@ -171,7 +171,7 @@ class TestProgressCalculator(unittest2.TestCase):
self.host_id = list_clusterhost['host_id']
self.clusterhost_id = list_clusterhost['clusterhost_id']
#add subnet
# add subnet
network.add_subnet(
subnet=SUBNET,
user=self.user_object,
@ -182,7 +182,7 @@ class TestProgressCalculator(unittest2.TestCase):
for list_subnet in list_subnets:
self.subnet_id = list_subnet['id']
#add host network
# add host network
host.add_host_network(
self.host_id,
user=self.user_object,
@ -192,14 +192,14 @@ class TestProgressCalculator(unittest2.TestCase):
is_mgmt=True
)
#get clusterhost
# get clusterhost
list_clusterhosts = cluster.list_clusterhosts(
user=self.user_object
)
for list_clusterhost in list_clusterhosts:
self.clusterhost_id = list_clusterhost['id']
#update host state
# update host state
self.list_hosts = host.list_hosts(user=self.user_object)
for list_host in self.list_hosts:
self.host_id = list_host['id']
@ -209,14 +209,14 @@ class TestProgressCalculator(unittest2.TestCase):
state='INSTALLING'
)
#update cluster state
# update cluster state
cluster.update_cluster_state(
self.cluster_id,
user=self.user_object,
state='INSTALLING'
)
#update clusterhost state
# update clusterhost state
cluster.update_clusterhost_state(
self.clusterhost_id,
user=self.user_object,

View File

@ -85,7 +85,7 @@ class ApiTestCase(unittest2.TestCase):
resp = json.loads(resp)
self.token = resp['token']
#create a cluster
# create a cluster
adapter_name, adapter_id, os_id, flavor_id = (
self._get_adapter_info()
)
@ -105,7 +105,7 @@ class ApiTestCase(unittest2.TestCase):
data['flavor_id'] = flavor_id
self.post(url, data)
#create a switch
# create a switch
url = '/switches'
datas = [
{
@ -537,7 +537,7 @@ class TestSubnetAPI(ApiTestCase):
self.assertEqual(return_value.status_code, 200)
self.assertTrue(item in data.items() for item in resp.items())
# give a non-existed id
# give a non-existed id
url = '/subnets/99'
data = {
'subnet': '192.168.100.0/24',
@ -813,7 +813,7 @@ class TestHostAPI(ApiTestCase):
self.assertEqual([], resp)
def test_show_host(self):
#show a host successfully
# show a host successfully
url = '/hosts/1'
return_value = self.get(url)
resp = json.loads(return_value.get_data())

View File

@ -408,7 +408,7 @@ class TestUpdateCluster(ClusterTestCase):
name='cluster_editable'
)
#reinstall
# reinstall
self.assertRaises(
exception.Forbidden,
cluster.update_cluster,
@ -441,7 +441,7 @@ class TestDelCluster(ClusterTestCase):
self.assertNotEqual(1, del_cluster['id'])
def test_is_cluster_editable(self):
#state is INSTALLING
# state is INSTALLING
cluster.update_cluster_state(
self.cluster_id,
user=self.user_object,
@ -1596,7 +1596,7 @@ class TestUpdateClusterHosts(ClusterTestCase):
result = item
self.assertNotIn(self.host_id[0], result)
#add host
# add host
cluster.update_cluster_hosts(
self.cluster_id,
user=self.user_object,

View File

@ -55,7 +55,7 @@ class MetadataTestCase(unittest2.TestCase):
adapter.load_adapters()
metadata.load_metadatas()
#Get a os_id and adapter_id
# Get a os_id and adapter_id
self.user_object = (
user_api.get_user_object(
setting.COMPASS_ADMIN_EMAIL

View File

@ -203,8 +203,9 @@ class TestPatchSwitch(BaseTest):
)
expected = {
'credentials': {
'version': '2c',
'community': 'public'}
'version': '2c',
'community': 'public'
}
}
self.assertTrue(
all(item in patch_switch.items() for item in expected.items())

View File

@ -55,12 +55,12 @@ class HuaweiTest(unittest2.TestCase):
def test_is_this_vendor(self):
"""test device vendor is haiwei."""
#Incorrect system information
# Incorrect system information
incorrect_sys_info = "xxx"
self.assertFalse(
self.huawei.is_this_vendor(incorrect_sys_info))
#Correct vendor
# Correct vendor
self.assertTrue(
self.huawei.is_this_vendor(self.sys_info))
@ -86,7 +86,7 @@ class HuaweiMacTest(unittest2.TestCase):
self.assertIsNone(self.mac_plugin.process_data('GET'))
# SNMP Walk Timeout
#utils.snmpwalk_by_cl = Mock(return_value=None)
# utils.snmpwalk_by_cl = Mock(return_value=None)
mock_snmpwalk.return_value = None
self.assertIsNone(self.mac_plugin.process_data())
@ -101,7 +101,7 @@ class HuaweiMacTest(unittest2.TestCase):
{"mac": "28:6e:d4:64:c7:4a", "port": "2", "vlan": "88"},
{"mac": "00:0c:29:35:dc:02", "port": "3", "vlan": "88"}
]
#utils.snmpwalk_by_cl = Mock(return_value=mock_snmp_walk_result)
# utils.snmpwalk_by_cl = Mock(return_value=mock_snmp_walk_result)
mock_snmpwalk.return_value = mock_snmp_walk_result
self.mac_plugin.get_port = Mock()
self.mac_plugin.get_port.side_effect = ["1", "2", "3"]
@ -196,14 +196,14 @@ class HDManagerTest(unittest2.TestCase):
@patch('compass.hdsdiscovery.hdmanager.HDManager.get_sys_info')
def test_is_valid_vendor(self, sys_info_mock):
"""test is_valid_vendor."""
#non-exsiting vendor under vendors directory
# non-exsiting vendor under vendors directory
self.assertFalse(
self.manager.is_valid_vendor(self.correct_host,
self.correct_credential,
'xxxx')
)
#No system description retrieved
# No system description retrieved
sys_info_mock.return_value = (None, 'TIMEOUT')
self.assertFalse(
self.manager.is_valid_vendor(self.correct_host,
@ -211,7 +211,7 @@ class HDManagerTest(unittest2.TestCase):
'pica8')
)
#Incorrect vendor name
# Incorrect vendor name
sys_info = 'Pica8 XorPlus Platform Software'
sys_info_mock.return_value = (sys_info, '')
self.assertFalse(
@ -220,7 +220,7 @@ class HDManagerTest(unittest2.TestCase):
'huawei')
)
#Correct vendor name
# Correct vendor name
self.assertTrue(
self.manager.is_valid_vendor(self.correct_host,
self.correct_credential,
@ -229,12 +229,12 @@ class HDManagerTest(unittest2.TestCase):
def test_learn(self):
"""test learn."""
#non-exsiting plugin
# non-exsiting plugin
self.assertIsNone(self.manager.learn(self.correct_host,
self.correct_credential,
'huawei', 'xxx'))
#non-existing vendor
# non-existing vendor
self.assertIsNone(self.manager.learn(self.correct_host,
self.correct_credential,
'xxxx', 'mac'))

View File

@ -61,8 +61,7 @@ OPTIONS = Flags()
def init():
"""Init flag parsing.
"""
"""Init flag parsing."""
OPTIONS.parse_args()

View File

@ -263,10 +263,12 @@ def get_switch_machines_from_file(filename):
def execute_cli_by_ssh(cmd, host, username, password=None,
keyfile='/root/.ssh/id_rsa', nowait=False):
"""SSH to execute script on remote machine
:param host: ip of the remote machine
:param username: username to access the remote machine
:param password: password to access the remote machine
:param cmd: command to execute
"""
if not cmd:
logging.error("No command found!")

View File

@ -70,7 +70,7 @@ EXTERNAL_NETWORK_GATEWAY: 203.0.113.1
FLOATING_IP_START: 203.0.113.101
FLOATING_IP_END: 203.0.113.200
build_in_image: http://cdn.download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
build_in_image: http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
build_in_image_name: cirros-0.3.3-x86_64-disk.img
physical_device: /dev/sdb

View File

@ -131,7 +131,7 @@ EXTERNAL_NETWORK_GATEWAY: 203.0.113.1
FLOATING_IP_START: 203.0.113.101
FLOATING_IP_END: 203.0.113.200
build_in_image: http://cdn.download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
build_in_image: http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
build_in_image_name: cirros-0.3.3-x86_64-disk.img
physical_device: /dev/sdb

View File

@ -83,7 +83,7 @@ EXTERNAL_NETWORK_GATEWAY: 203.0.113.1
FLOATING_IP_START: 203.0.113.101
FLOATING_IP_END: 203.0.113.200
build_in_image: http://cdn.download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
build_in_image: http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
build_in_image_name: cirros-0.3.3-x86_64-disk.img
physical_device: /dev/sdb

View File

@ -208,11 +208,13 @@ def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""Extract all members.
From the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator

View File

@ -5,5 +5,5 @@ testtools>=0.9.32
testrepository>=0.0.17
mimeparse
coverage>=3.6
hacking>=0.8.0,<0.9
hacking
pycrypto<=2.0.1

View File

@ -31,7 +31,7 @@ commands = python setup.py testr --coverage --testr-args='{posargs}'
downloadcache = ~/cache/pip
[flake8]
ignore = H302,H304,H233,H803,F401
ignore = H302,H304,H233,H803,F401,H104,H236,H237,H238
show-source = true
builtins = _
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,tools,build