Deprecate fuel_upgrade_system/fuel_upgrade directory

Change-Id: I822c25d5b98d0b09d5a4baee3a3cf48c620b9033
Related-Bug: #1475219
This commit is contained in:
Vladimir Kozhukalov 2015-10-29 16:36:05 +03:00
parent ff842dd813
commit 10b14a719d
68 changed files with 0 additions and 10362 deletions

View File

@ -31,15 +31,6 @@ maintainers:
email: mmosesohn@mirantis.com
IRC: mattymo
- fuel_upgrade_system/fuel_upgrade/:
- name: Evgeny Li
email: eli@mirantis.com
IRC: evgeniyl
- name: Igor Kalnitsky
email: ikalnitsky@mirantis.com
IRC: ikalnitsky
- nailgun/:
- name: Aleksandr Kislitskii
email: akislitsky@mirantis.com

View File

@ -1,3 +0,0 @@
include fuel_upgrade/templates/*
include fuel_upgrade/*.yaml
include requirements.txt

View File

@ -1,13 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,256 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import logging
import requests
import six
from fuel_upgrade import errors
from fuel_upgrade import utils
from fuel_upgrade.clients import NailgunClient
from fuel_upgrade.clients import OSTFClient
logger = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class BaseBeforeUpgradeChecker(object):
"""Base class for before ugprade checkers"""
@abc.abstractmethod
def check(self):
"""Run check"""
class CheckNoRunningTasks(BaseBeforeUpgradeChecker):
"""Checks that there is no running tasks
:param config: config object where property endpoints
returns dict with nailgun host and port
"""
def __init__(self, context):
nailgun = context.config.endpoints['nginx_nailgun']
self.nailgun_client = NailgunClient(**nailgun)
def check(self):
"""Checks in nailgun that there are no running tasks"""
logger.info('Check nailgun tasks')
try:
tasks = self.nailgun_client.get_tasks()
except requests.ConnectionError:
raise errors.NailgunIsNotRunningError(
'Cannot connect to rest api service')
logger.debug('Nailgun tasks %s', tasks)
running_tasks = filter(
lambda t: t['status'] == 'running', tasks)
if running_tasks:
tasks_msg = ['id={0} cluster={1} name={2}'.format(
t.get('id'),
t.get('cluster'),
t.get('name')) for t in running_tasks]
error_msg = 'Cannot run upgrade, tasks are running: {0}'.format(
' '.join(tasks_msg))
raise errors.CannotRunUpgrade(error_msg)
class CheckNoRunningOstf(BaseBeforeUpgradeChecker):
"""Checks that there's no running OSTF tasks.
:param context: a context object with config and required space info
"""
def __init__(self, context):
self.ostf = OSTFClient(**context.config.endpoints['ostf'])
def check(self):
logger.info('Check OSTF tasks')
try:
tasks = self.ostf.get_tasks()
except requests.ConnectionError:
raise errors.OstfIsNotRunningError(
'Cannot connect to OSTF service.')
logger.debug('OSTF tasks: %s', tasks)
running_tasks = filter(
lambda t: t['status'] == 'running', tasks)
if running_tasks:
raise errors.CannotRunUpgrade(
'Cannot run upgrade since there are OSTF running tasks.')
class CheckFreeSpace(BaseBeforeUpgradeChecker):
"""Checks that there is enough free space on devices
:param list upgraders: list of upgarde engines
"""
def __init__(self, context):
self.required_spaces = context.required_free_spaces
def check(self):
"""Check free space"""
logger.info('Check if devices have enough free space')
logger.debug(
'Required spaces from upgrade '
'engines %s', self.required_spaces)
mount_points = self.space_required_for_mount_points()
logger.debug(
'Mount points and sum of required spaces '
'%s', mount_points)
error_mount_point = self.list_of_error_mount_points(mount_points)
logger.debug(
"Mount points which don't have "
"enough free space %s", error_mount_point)
self.check_result(error_mount_point)
def space_required_for_mount_points(self):
"""Generates list of mount points with sum of required space
:returns: dict where key is mount point
and value is required free space
"""
sum_of_spaces = {}
for required_space in self.required_spaces:
if not required_space:
continue
for path, size in sorted(required_space.items()):
mount_path = utils.find_mount_point(path)
sum_of_spaces.setdefault(mount_path, 0)
sum_of_spaces[mount_path] += size
return sum_of_spaces
def list_of_error_mount_points(self, mount_points):
"""Returns list of devices which don't have enough free space
:param list mount_points: elements are dicts
where key is path to mount point
and value is required space for
this mount point
:returns: list where elements are dicts
{'path': 'path to mount point',
'size': 'required free space'
'available': 'available free space'}
"""
free_space_error_devices = []
for path, required_size in sorted(mount_points.items()):
free_space = utils.calculate_free_space(path)
if free_space < required_size:
free_space_error_devices.append({
'path': path,
'size': required_size,
'available': free_space})
return free_space_error_devices
def check_result(self, error_devices):
"""Checks if there are devices with not enough free space for upgrades
:raises: NotEnoughFreeSpaceOnDeviceError
"""
if not error_devices:
return
devices_msg = [
'device {0} ('
'required {1}MB, '
'available {2}MB, '
'not enough {3}MB'
')'.format(
d['path'],
d['size'],
d['available'],
d['size'] - d['available']) for d in error_devices]
err_msg = 'Not enough free space on device: {0}'.format(
', '.join(devices_msg))
raise errors.NotEnoughFreeSpaceOnDeviceError(err_msg)
class CheckUpgradeVersions(BaseBeforeUpgradeChecker):
"""Checks that it is possible to upgarde from current version to new one.
:param config: config object
"""
def __init__(self, context):
config = context.config
#: version of fuel which user wants to upgrade from
self.from_version = config.from_version
#: version of fuel which user wants to upgrade to
self.to_version = config.new_version
def check(self):
"""Compares two versions previous and new
:raises: WrongVersionError
"""
logger.info('Check upgrade versions')
result = utils.compare_version(self.from_version, self.to_version)
err_msg = None
if result == 0:
err_msg = 'Cannot upgrade to the same version of fuel ' \
'{0} -> {1}'.format(
self.from_version, self.to_version)
elif result == -1:
err_msg = 'Cannot upgrade from higher version of fuel ' \
'to lower {0} -> {1}'.format(
self.from_version, self.to_version)
if err_msg:
raise errors.WrongVersionError(err_msg)
class CheckRequiredVersion(BaseBeforeUpgradeChecker):
"""Checks that user's going to upgrade Fuel from the required version.
:param context: a context object with config and required space info
"""
def __init__(self, context):
#: version of fuel which user wants to upgrade from
self.from_version = context.config.from_version
#: a list of versions from which user can upgrade
self.can_upgrade_from = context.config.can_upgrade_from
def check(self):
logger.info('Check required Fuel version')
if self.from_version not in self.can_upgrade_from:
raise errors.WrongVersionError(
'Cannot upgrade from Fuel {0}. You can upgrade only from '
'one of next versions: {1}'.format(
self.from_version, ', '.join(self.can_upgrade_from)))

View File

@ -1,111 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from fuel_upgrade.engines.docker_engine import DockerInitializer
from fuel_upgrade.engines.docker_engine import DockerUpgrader
from fuel_upgrade.engines.host_system import HostSystemUpgrader
from fuel_upgrade.engines.openstack import OpenStackUpgrader
from fuel_upgrade.before_upgrade_checker import CheckFreeSpace
from fuel_upgrade.before_upgrade_checker import CheckNoRunningOstf
from fuel_upgrade.before_upgrade_checker import CheckNoRunningTasks
from fuel_upgrade.before_upgrade_checker import CheckRequiredVersion
from fuel_upgrade.before_upgrade_checker import CheckUpgradeVersions
logger = logging.getLogger(__name__)
class AttrDict(dict):
"""Dict as object where keys are object parameters"""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class CheckerManager(object):
"""Checker manager
:param list upgraders: list of :class:`BaseUpgrader` implementations
:param config: :class:`Config` object
"""
#: Mapping of checkers to upgrade engines
CHECKERS_MAPPING = {
DockerUpgrader: [
CheckUpgradeVersions,
CheckRequiredVersion,
CheckFreeSpace,
CheckNoRunningTasks,
CheckNoRunningOstf],
OpenStackUpgrader: [
CheckFreeSpace,
CheckNoRunningTasks],
HostSystemUpgrader: [
CheckUpgradeVersions,
CheckFreeSpace],
DockerInitializer: []}
def __init__(self, upgraders, config):
#: list of upgraders
self.upgraders = upgraders
required_free_spaces = [
upgarde.required_free_space
for upgarde in self.upgraders]
#: context which checkers initialized with
self.context = AttrDict(
config=config,
required_free_spaces=required_free_spaces)
def check(self):
"""Runs checks"""
for checker in self._checkers():
logger.debug('Start checker %s...', checker.__class__.__name__)
checker.check()
def _checkers(self):
"""Returns list initialized of checkers
:returns: list of :class:`BaseBeforeUpgradeChecker` objects
"""
checkers_classes = []
for engine, checkers in six.iteritems(self.CHECKERS_MAPPING):
if self._is_engine_enabled(engine):
checkers_classes.extend(checkers)
return [checker(self.context) for checker in set(checkers_classes)]
def _is_engine_enabled(self, engine_class):
"""Checks if engine in the list
:param list engines_list: list of engines
:param engine_class: engine class
:returns: True if engine in the list
False if engine not in the list
"""
engines = filter(
lambda engine: isinstance(engine, engine_class),
self.upgraders)
if engines:
return True
return False

View File

@ -1,184 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import getpass
import logging
import requests
import sys
from fuel_upgrade.logger import configure_logger
from fuel_upgrade import errors
from fuel_upgrade import messages
from fuel_upgrade import utils
from fuel_upgrade.checker_manager import CheckerManager
from fuel_upgrade.config import build_config
from fuel_upgrade.upgrade import UpgradeManager
from fuel_upgrade.engines.docker_engine import DockerInitializer
from fuel_upgrade.engines.docker_engine import DockerUpgrader
from fuel_upgrade.engines.host_system import HostSystemUpgrader
from fuel_upgrade.engines.openstack import OpenStackUpgrader
from fuel_upgrade.engines.raise_error import RaiseErrorUpgrader
from fuel_upgrade.pre_upgrade_hooks import PreUpgradeHookManager
logger = logging.getLogger(__name__)
#: A dict with supported systems.
#: The key is used for system option in CLI.
SUPPORTED_SYSTEMS = {
'host-system': HostSystemUpgrader,
'docker-init': DockerInitializer,
'docker': DockerUpgrader,
'openstack': OpenStackUpgrader,
'raise-error': RaiseErrorUpgrader,
}
#: A list of tuples of incompatible systems.
#: That's mean, if two of this systems has appered in user input
#: we gonna to show error that's impossible to do.
UNCOMPATIBLE_SYSTEMS = (
('docker-init', 'docker'),
)
def handle_exception(exc):
logger.exception('%s', exc)
print(messages.header)
# TODO(ikalnitsky): use some kind of map instead of condition stairs
if isinstance(exc, requests.ConnectionError):
print(messages.docker_is_dead)
elif isinstance(exc, errors.UpgradeVerificationError):
print(messages.health_checker_failed)
elif isinstance(exc, errors.NailgunIsNotRunningError):
print(messages.nailgun_is_not_running)
elif isinstance(exc, errors.OstfIsNotRunningError):
print(messages.ostf_is_not_running)
elif isinstance(exc, errors.CommandError):
print(exc)
sys.exit(-1)
def parse_args(args):
"""Parse arguments and return them"""
parser = argparse.ArgumentParser(
description='fuel-upgrade is an upgrade system for fuel-master node')
parser.add_argument(
'systems', choices=SUPPORTED_SYSTEMS.keys(), nargs='+',
help='systems to upgrade')
parser.add_argument(
'--src', required=True, help='path to update file')
parser.add_argument(
'--no-checker', action='store_true',
help='do not check before upgrade')
parser.add_argument(
'--no-rollback', action='store_true',
help='do not rollback in case of errors')
parser.add_argument(
'--password', help="admin user password")
rv = parser.parse_args(args)
# check input systems for compatibility
for uncompatible_systems in UNCOMPATIBLE_SYSTEMS:
if all(u_system in rv.systems for u_system in uncompatible_systems):
parser.error(
'the following systems are incompatible and can not be'
'used at the same time: "{0}"'.format(
', '.join(uncompatible_systems)
)
)
# check input systems have no duplicates
if len(rv.systems) != len(set(rv.systems)):
parser.error(
'the following systems are listed more than one times: "{0}"'
.format(', '.join(sorted(utils.get_non_unique(rv.systems))))
)
return rv
def is_engine_in_list(engines_list, engine_class):
"""Checks if engine in the list
:param list engines_list: list of engines
:param engine_class: engine class
:returns: True if engine in the list
False if engine not in the list
"""
engines = filter(
lambda engine: isinstance(engine, engine_class),
engines_list)
if engines:
return True
return False
def run_upgrade(args):
"""Run upgrade on master node
:param args: argparse object
"""
# Get admin password
if not args.password:
args.password = getpass.getpass('Admin Password: ')
# recheck pasword again
if not args.password:
raise errors.CommandError(messages.no_password_provided)
# Initialize config
config = build_config(args.src, args.password)
logger.debug('Configuration data: %s', config)
# Initialize upgrade engines
upgraders_to_use = [
SUPPORTED_SYSTEMS[system](config)
for system in args.systems]
# Initialize checkers
if not args.no_checker:
checker_manager = CheckerManager(upgraders_to_use, config)
checker_manager.check()
# Initialize pre upgrade hook manager
hook_manager = PreUpgradeHookManager(upgraders_to_use, config)
hook_manager.run()
# Initialize upgrade manager with engines and checkers
upgrade_manager = UpgradeManager(
upgraders_to_use, config, args.no_rollback)
upgrade_manager.run()
def main():
"""Entry point"""
configure_logger('/var/log/fuel_upgrade.log')
try:
run_upgrade(parse_args(sys.argv[1:]))
except Exception as exc:
handle_exception(exc)

View File

@ -1,20 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from fuel_upgrade.clients.keystone_client import KeystoneClient
from fuel_upgrade.clients.nailgun_client import NailgunClient
from fuel_upgrade.clients.ostf_client import OSTFClient
from fuel_upgrade.clients.supervisor_client import SupervisorClient

View File

@ -1,100 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import socket
import requests
from fuel_upgrade import errors
from fuel_upgrade import utils
logger = logging.getLogger(__name__)
class KeystoneClient(object):
"""Simple keystone authentification client
:param str username: is user name
:param str password: is user password
:param str auth_url: authentification url
:param str tenant_name: tenant name
"""
def __init__(self, username=None, password=None,
auth_url=None, tenant_name=None):
self.auth_url = auth_url
self.tenant_name = tenant_name
self.username = username
self.password = password
@property
def request(self):
"""Creates authentification session if required
:returns: :class:`requests.Session` object
"""
session = requests.Session()
# NOTE(ikalnitsky):
# After starting a new Keystone container, the first attempt to
# get a token fails. Unfortunately, the logs keep silent and say
# nothing. As a workaround, we can just increase retries number.
#
# See https://bugs.launchpad.net/fuel/+bug/1399144 for details.
try:
token = utils.wait_for_true(self.get_token, timeout=10)
session.headers.update({'X-Auth-Token': token})
except errors.TimeoutError:
logger.exception(
'Cannot retrieve an auth token - an unauthenticated '
'request will be performed.')
return session
def get_token(self):
"""Retrieves auth token from keystone
:returns: authentification token or None in case of error
NOTE(eli): for 5.0.x versions of fuel we don't
have keystone and fuel access control feature,
as result this client should work with and without
authentication, in order to do this, we are
trying to create Keystone client and in case if
it fails we don't use authentication
"""
try:
resp = requests.post(
self.auth_url,
headers={'content-type': 'application/json'},
data=json.dumps({
'auth': {
'tenantName': self.tenant_name,
'passwordCredentials': {
'username': self.username,
'password': self.password}}})).json()
return (isinstance(resp, dict) and
resp.get('access', {}).get('token', {}).get('id'))
except (ValueError,
socket.timeout,
requests.exceptions.RequestException) as exc:
logger.debug('Cannot authenticate in keystone: %s', exc)
return None

View File

@ -1,153 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from fuel_upgrade.clients import KeystoneClient
from fuel_upgrade.utils import http_retry
class NailgunClient(object):
"""NailgunClient is a simple wrapper around Nailgun API.
:param str host: nailgun's host address
:param (str|int) port: nailgun's port number
:param dict keystone_credentials: keystone credentials where
`username` is user name
`password` is user password
`auth_url` authentification url
`tenant_name` tenant name
"""
api_url = 'http://{host}:{port}/api/v1'
def __init__(self, host=None, port=None, keystone_credentials={}):
#: an url to nailgun's restapi service
self.api_url = self.api_url.format(host=host, port=port)
#: keystone credentials for authentification
self.keystone_client = KeystoneClient(**keystone_credentials)
@http_retry(status_codes=[500, 502])
def get_releases(self):
"""Returns a list with all releases."""
r = self.request.get(
'{api_url}/releases/'.format(api_url=self.api_url))
if r.status_code not in (200, ):
r.raise_for_status()
return r.json()
@http_retry(status_codes=[500, 502])
def create_release(self, release):
"""Add a new release to nailgun database.
:param release: a given release information, as dict
"""
r = self.request.post(
'{api_url}/releases/'.format(api_url=self.api_url),
data=json.dumps(release))
if r.status_code not in (201, ):
r.raise_for_status()
return r.json()
@http_retry(status_codes=[500, 502])
def remove_release(self, release_id):
"""Remove release from Nailgun with a given ID.
:param release_id: a release id to be removed, as int
"""
r = self.request.delete(
'{api_url}/releases/{id}/'.format(
api_url=self.api_url,
id=release_id))
if r.status_code not in (200, 204, ):
r.raise_for_status()
# generally, the delete request should returns 204 No Content
# so we don't want to parse a response as json
return r.text
@http_retry(status_codes=[500, 502])
def create_notification(self, notification):
"""Add a new notification to nailgun database.
:param notification: a given notification information, as dict
"""
r = self.request.post(
'{api_url}/notifications/'.format(api_url=self.api_url),
data=json.dumps(notification))
if r.status_code not in (201, ):
r.raise_for_status()
return r.json()
@http_retry(status_codes=[500, 502])
def remove_notification(self, notification_id):
"""Remove notification from Nailgun with a given ID.
:param notification_id: a notification id to be removed, as int
"""
r = self.request.delete(
'{api_url}/notifications/{id}/'.format(
api_url=self.api_url,
id=notification_id))
if r.status_code not in (200, 204):
r.raise_for_status()
# generally, the delete request should returns 204 No Content
# so we don't want to parse a response as json
return r.text
@http_retry(status_codes=[500, 502])
def get_tasks(self):
"""Retrieve list of tasks from nailgun
:returns: list of tasks
"""
r = self.request.get('{api_url}/tasks'.format(api_url=self.api_url))
if r.status_code not in (200, ):
r.raise_for_status()
return r.json()
@property
def request(self):
"""Creates authentification session if required
:returns: :class:`requests.Session` object
"""
return self.keystone_client.request
@http_retry(status_codes=[500, 502])
def put_deployment_tasks(self, release, tasks):
"""Update deployment tasks for certain release
:param release: release as dict
:param tasks: deployment tasks as lists of dicts
"""
r = self.request.put(
'{api_url}/releases/{release_id}/deployment_tasks'.format(
api_url=self.api_url, release_id=release['id']),
data=json.dumps(tasks))
if r.status_code not in (200, ):
r.raise_for_status()
return r.json()

View File

@ -1,69 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from fuel_upgrade.clients import KeystoneClient
class OSTFClient(object):
"""OSTFClient is a simple wrapper around OSTF API.
:param str host: ostf's host address
:param (str|int) port: ostf's port number
:param dict keystone_credentials: keystone credentials where
`username` is user name
`password` is user password
`auth_url` authentification url
`tenant_name` tenant name
"""
api_url = 'http://{host}:{port}'
def __init__(self, host=None, port=None, keystone_credentials={}):
#: an url to nailgun's restapi service
self.api_url = self.api_url.format(host=host, port=port)
#: keystone credentials for authentification
self.keystone_client = KeystoneClient(**keystone_credentials)
@property
def request(self):
"""Creates authentification session if required
:returns: :class:`requests.Session` object
"""
return self.keystone_client.request
def get(self, path):
"""Retrieve list of tasks from nailgun
:returns: list of tasks
"""
result = self.request.get('{api_url}{path}'.format(
api_url=self.api_url, path=path))
return result
def get_tasks(self):
"""Retrieve list of tasks from OSTF
:returns: list of tasks
"""
r = self.request.get(
'{api_url}/v1/testruns'.format(api_url=self.api_url))
if r.status_code not in (200, ):
r.raise_for_status()
return r.json()

View File

@ -1,204 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib
import logging
import os
import socket
import xmlrpclib
from xmlrpclib import Fault
from fuel_upgrade import utils
logger = logging.getLogger(__name__)
class UnixSocketHTTPConnection(httplib.HTTPConnection):
def connect(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(self.host)
class UnixSocketHTTP(httplib.HTTP):
_connection_class = UnixSocketHTTPConnection
class UnixSocketTransport(xmlrpclib.Transport, object):
"""Http transport for UNIX socket"""
def __init__(self, socket_path):
"""Create object
:params socket_path: path to the socket
"""
self.socket_path = socket_path
super(UnixSocketTransport, self).__init__()
def make_connection(self, host):
return UnixSocketHTTP(self.socket_path)
class SupervisorClient(object):
"""RPC Client for supervisor"""
templates_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'templates'))
def __init__(self, config, from_version):
"""Create supervisor client
:param config: config object
"""
self.config = config
self.from_version = from_version
self.supervisor_template_path = os.path.join(
self.templates_dir, 'supervisor.conf')
self.supervisor_common_template_path = os.path.join(
self.templates_dir, 'common.conf')
self.supervisor_config_dir = self.get_config_path(
self.config.new_version)
self.previous_supervisor_config_path = self.get_config_path(
self.from_version)
utils.create_dir_if_not_exists(self.supervisor_config_dir)
self.supervisor = self.get_supervisor()
def get_config_path(self, version):
"""Creates path to supervisor config with specific version
:param version: version of config
:returns: path to supervisor config
"""
return os.path.join(
self.config.supervisor['configs_prefix'], version)
def get_supervisor(self):
"""Returns supervisor rpc object"""
server = xmlrpclib.Server(
'http://unused_variable',
transport=UnixSocketTransport(
self.config.supervisor['endpoint']))
return server.supervisor
def switch_to_new_configs(self):
"""Switch to new version of configs for supervisor
Creates symlink on special directory.
"""
current_cfg_path = self.config.supervisor['current_configs_prefix']
utils.symlink(self.supervisor_config_dir, current_cfg_path)
self.supervisor.reloadConfig()
def switch_to_previous_configs(self):
"""Switch to previous version of fuel"""
current_cfg_path = self.config.supervisor['current_configs_prefix']
utils.symlink(
self.previous_supervisor_config_path,
current_cfg_path)
self.supervisor.reloadConfig()
def start_all_services(self):
"""Stops all processes"""
logger.info('Start all services')
self.supervisor.startAllProcesses()
def stop_all_services(self):
"""Stops all processes"""
logger.info('Stop all services')
self.supervisor.stopAllProcesses()
def restart_and_wait(self):
"""Restart supervisor and wait untill it will be available"""
logger.info('Restart supervisor')
self.supervisor.restart()
all_processes = utils.wait_for_true(
lambda: self.get_all_processes_safely() is not None,
timeout=self.config.supervisor['restart_timeout'])
logger.debug('List of supervisor processes %s', all_processes)
def start(self, service_name):
"""Start the process under supervisor
:param str service_name: name of supervisor's process
"""
logger.debug('Start supervisor process %s', service_name)
self.supervisor.startProcess(service_name)
def get_all_processes_safely(self):
"""Retrieves list of processes from supervisor
Doesn't raise errors if there is no running supervisor.
:returns: list of processes in case of success or
None in case of error
"""
try:
return self.supervisor.getAllProcessInfo()
except (IOError, Fault):
return None
def generate_configs(self, services):
"""Generates supervisor configs for services
:param services: list of dicts where
`config_name` - is the name of the config
`service_name` - is the name of the service
`command` - command to run
`autostart` - run the service on supervisor start
"""
logger.info(
'Generate supervisor configs for services %s', services)
for service in services:
self.generate_config(
service['config_name'],
service['service_name'],
service['command'],
autostart=service['autostart'])
def generate_config(self, config_name, service_name,
command, autostart=True):
"""Generates config for each service
:param str config_name: is the name of the config
:param str service_name: is the name of the service
:param str command: command to run
:param bool autostart: run the service on supervisor start
"""
config_path = os.path.join(
self.supervisor_config_dir,
'{0}'.format('{0}.conf'.format(config_name)))
log_path = '/var/log/{0}.log'.format(service_name)
params = {
'service_name': service_name,
'command': command,
'log_path': log_path,
'autostart': 'true' if autostart else 'false'}
utils.render_template_to_file(
self.supervisor_template_path, config_path, params)
def remove_new_configs(self):
"""Remove new version of configs from the filesystem"""
utils.remove(self.supervisor_config_dir)

View File

@ -1,621 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Module with config generation logic
Why python based config?
* in first versions it was yaml based config,
during some time it became really hard to support
because in yaml config it's impossible to share
values between parameters
* also we decided not to use any template language
because you need to learn yet another sublanguage,
and it's hard to create variables nesting more than 1
"""
import glob
import logging
from os.path import basename
from os.path import exists
from os.path import join
import six
import yaml
from fuel_upgrade.utils import normversion
logger = logging.getLogger(__name__)
class Config(object):
"""Config object, allow to call first level keys as object attributes.
:param dict config_dict: config dict
"""
def __init__(self, config_dict):
# NOTE(eli): initialize _config
# with __setattr__ to prevent maximum
# recursion depth exceeded error
super(Config, self).__setattr__('_config', config_dict)
def __getattr__(self, name):
return self._config[name]
def __setattr__(self, name, value):
self._config[name] = value
def __repr__(self):
return str(self._config)
def read_yaml_config(path):
"""Reads yaml config
:param str path: path to config
:returns: deserialized object
"""
return yaml.load(open(path, 'r'))
def get_version_from_config(path):
"""Retrieves version from config file
:param str path: path to config
"""
return read_yaml_config(path)['VERSION']['release']
def build_config(update_path, admin_password):
"""Builds config
:param str update_path: path to upgrade
:param str admin_password: admin user password
:returns: :class:`Config` object
"""
return Config(config(update_path, admin_password))
def from_fuel_version(current_version_path, from_version_path):
"""Get version of fuel which user run upgrade from"""
# NOTE(eli): If this file exists, then user
# already ran this upgrade script which was
# for some reasons interrupted
if exists(from_version_path):
from_version = get_version_from_config(from_version_path)
logger.debug('Retrieve version from %s, '
'version is %s', from_version_path, from_version)
return from_version
return get_version_from_config(current_version_path)
def get_endpoints(astute_config, admin_password):
"""Returns services endpoints
:returns: dict where key is the a name of endpoint
value is dict with host, port and authentication
information
"""
master_ip = astute_config['ADMIN_NETWORK']['ipaddress']
# Set default user/password because in
# 5.0.X releases we didn't have this data
# in astute file
fuel_access = astute_config.get(
'FUEL_ACCESS', {'user': 'admin'})
rabbitmq_access = astute_config.get(
'astute', {'user': 'naily', 'password': 'naily'})
rabbitmq_mcollective_access = astute_config.get(
'mcollective', {'user': 'mcollective', 'password': 'marionette'})
keystone_credentials = {
'username': fuel_access['user'],
'password': admin_password,
'auth_url': 'http://{0}:5000/v2.0/tokens'.format(master_ip),
'tenant_name': 'admin'}
return {
'nginx_nailgun': {
'port': 8000,
'host': '0.0.0.0',
'keystone_credentials': keystone_credentials},
'nginx_repo': {
'port': 8080,
'host': '0.0.0.0'},
'ostf': {
'port': 8777,
'host': '127.0.0.1',
'keystone_credentials': keystone_credentials},
'cobbler': {
'port': 80,
'host': '127.0.0.1'},
'postgres': {
'port': 5432,
'host': '127.0.0.1'},
'rsync': {
'port': 873,
'host': '127.0.0.1'},
'rsyslog': {
'port': 514,
'host': '127.0.0.1'},
'keystone': {
'port': 5000,
'host': '127.0.0.1'},
'keystone_admin': {
'port': 35357,
'host': '127.0.0.1'},
'rabbitmq': {
'user': rabbitmq_access['user'],
'password': rabbitmq_access['password'],
'port': 15672,
'host': '127.0.0.1'},
'rabbitmq_mcollective': {
'port': 15672,
'host': '127.0.0.1',
'user': rabbitmq_mcollective_access['user'],
'password': rabbitmq_mcollective_access['password']}}
def get_host_system(update_path, new_version):
"""Returns host-system settings.
The function was designed to build a dictionary with settings for
host-sytem upgrader. Why we can't just use static settings? Because
we need to build paths to latest centos repos (tarball could contain
a few openstack releases, so we need to pick right centos repo) and
to latest puppet manifests.
:param update_path: path to update folder
:param new_version: fuel version to install
:returns: a host-system upgrade settings
"""
openstack_versions = glob.glob(
join(update_path, 'repos', '[0-9.-]*{0}'.format(new_version)))
openstack_versions = [basename(v) for v in openstack_versions]
openstack_version = sorted(openstack_versions, reverse=True)[0]
return {
'install_packages': [
'fuel-{0}'.format(normversion(new_version)),
],
'manifest_path': join(
'/etc/puppet', openstack_version,
'modules/nailgun/examples/host-upgrade.pp'),
'puppet_modules_path': join(
'/etc/puppet', openstack_version, 'modules'),
'repo_config_path': join(
'/etc/yum.repos.d',
'{0}_nailgun.repo'.format(new_version)),
'repo_aux_config_path': '/etc/yum.repos.d/auxiliary.repo',
'repos': {
'src': join(update_path, 'repos', '[0-9.-]*'),
'dst': join('/var', 'www', 'nailgun')},
'repo_master': join(
'file:/var/www/nailgun', openstack_version, 'centos/x86_64'),
}
def config(update_path, admin_password):
"""Generates configuration data for upgrade
:param str update_path: path to upgrade
:param str admin_password: admin user password
:retuns: huuuge dict with all required
for ugprade parameters
"""
fuel_config_path = '/etc/fuel/'
can_upgrade_from = ['7.0']
current_fuel_version_path = '/etc/fuel/version.yaml'
new_upgrade_version_path = join(update_path, 'config/version.yaml')
current_version = get_version_from_config(current_fuel_version_path)
new_version = get_version_from_config(new_upgrade_version_path)
new_version_path = join('/etc/fuel', new_version, 'version.yaml')
version_files_mask = '/var/lib/fuel_upgrade/*/version.yaml'
working_directory = join('/var/lib/fuel_upgrade', new_version)
from_version_path = join(working_directory, 'version.yaml')
from_version = from_fuel_version(
current_fuel_version_path, from_version_path)
previous_version_path = join('/etc/fuel', from_version, 'version.yaml')
container_data_path = join('/var/lib/fuel/container_data', new_version)
astute_keys_path = join(working_directory, 'astute')
cobbler_container_config_path = '/var/lib/cobbler/config'
cobbler_config_path = join(working_directory, 'cobbler_configs')
cobbler_config_files_for_verifier = join(
cobbler_config_path, 'config/systems.d/*.json')
# Keep only 3 latest database files
keep_db_backups_count = 3
db_backup_timeout = 120
db_backup_interval = 4
current_fuel_astute_path = '/etc/fuel/astute.yaml'
astute = read_yaml_config(current_fuel_astute_path)
# unix pattern that is used to match deployment tasks stored in library
deployment_tasks_file_pattern = '*tasks.yaml'
supervisor = {
'configs_prefix': '/etc/supervisord.d/',
'current_configs_prefix': '/etc/supervisord.d/current',
'endpoint': '/var/run/supervisor.sock',
'restart_timeout': 600}
checker = {
'timeout': 900,
'interval': 3}
endpoints = get_endpoints(astute, admin_password)
# Configuration data for docker client
docker = {
'url': 'unix://var/run/docker.sock',
'api_version': '1.10',
'http_timeout': 160,
'stop_container_timeout': 20,
'dir': '/var/lib/docker'}
# Docker image description section
image_prefix = 'fuel/'
# Path to the Docker images to be loaded
images = '/var/www/nailgun/docker/images/fuel-images.tar'
# Docker containers description section
container_prefix = 'fuel-core-'
master_ip = astute['ADMIN_NETWORK']['ipaddress']
volumes = {
'volume_logs': [
('/var/log/docker-logs', {'bind': '/var/log', 'ro': False})],
'volume_repos': [
('/var/www/nailgun', {'bind': '/var/www/nailgun', 'ro': False}),
('/etc/yum.repos.d', {'bind': '/etc/yum.repos.d', 'ro': False})],
'volume_ssh_keys': [
('/root/.ssh', {'bind': '/root/.ssh', 'ro': False})],
'volume_fuel_configs': [
('/etc/fuel', {'bind': '/etc/fuel', 'ro': False})],
'volume_upgrade_directory': [
(working_directory, {'bind': '/tmp/upgrade', 'ro': True})],
'volume_dump': [
('/dump', {'bind': '/var/www/nailgun/dump', 'ro': False})],
'volume_puppet_manifests': [
('/etc/puppet', {'bind': '/etc/puppet', 'ro': True})],
'volume_keys': [
('/var/lib/fuel/keys', {'bind': '/var/lib/fuel/keys',
'ro': False})],
'volume_fuel_ibp': [
('/var/lib/fuel/ibp', {'bind': '/var/lib/fuel/ipb',
'ro': False})],
'volume_postgres_data': [
('{0}/postgres'.format(container_data_path), {
'bind': '/var/lib/pgsql',
'ro': False})],
'volume_cobbler_data': [
('{0}/cobbler'.format(container_data_path), {
'bind': '/var/lib/cobbler',
'ro': False})],
}
containers = [
{'id': 'nailgun',
'supervisor_config': True,
'from_image': 'nailgun',
'privileged': True,
'network_mode': 'host',
'port_bindings': {
'8001': [
('127.0.0.1', 8001),
(master_ip, 8001)]},
'ports': [8001],
'links': [
{'id': 'postgres', 'alias': 'db'},
{'id': 'rabbitmq', 'alias': 'rabbitmq'}],
'binds': [
'volume_logs',
'volume_repos',
'volume_ssh_keys',
'volume_fuel_configs'],
'volumes': [
'/usr/share/nailgun/static']},
{'id': 'astute',
'supervisor_config': True,
'from_image': 'astute',
'network_mode': 'host',
'links': [
{'id': 'rabbitmq', 'alias': 'rabbitmq'}],
'binds': [
'volume_logs',
'volume_repos',
'volume_ssh_keys',
'volume_fuel_configs',
'volume_upgrade_directory',
'volume_keys']},
{'id': 'cobbler',
'supervisor_config': True,
'after_container_creation_command': (
"bash -c 'cp -rn /tmp/upgrade/cobbler_configs/config/* "
"/var/lib/cobbler/config/'"),
'from_image': 'cobbler',
'privileged': True,
'network_mode': 'host',
'port_bindings': {
'80': ('0.0.0.0', 80),
'443': ('0.0.0.0', 443),
'53/udp': [
('127.0.0.1', 53),
(master_ip, 53)],
'67/udp': [
('127.0.0.1', 67),
(master_ip, 67)],
'69/udp': [
('127.0.0.1', 69),
(master_ip, 69)]},
'ports': [
[53, 'udp'],
[53, 'tcp'],
67,
[69, 'udp'],
[69, 'tcp'],
80,
443],
'binds': [
'volume_logs',
'volume_repos',
'volume_ssh_keys',
'volume_fuel_configs',
'volume_cobbler_data',
'volume_upgrade_directory']},
{'id': 'mcollective',
'supervisor_config': True,
'from_image': 'mcollective',
'privileged': True,
'network_mode': 'host',
'binds': [
'volume_logs',
'volume_repos',
'volume_ssh_keys',
'volume_dump',
'volume_fuel_configs',
'volume_fuel_ibp',
'volume_keys']},
{'id': 'rsync',
'supervisor_config': True,
'from_image': 'rsync',
'network_mode': 'host',
'port_bindings': {
'873': [
('127.0.0.1', 873),
(master_ip, 873)]},
'ports': [873],
'binds': [
'volume_logs',
'volume_repos',
'volume_fuel_configs',
'volume_puppet_manifests']},
{'id': 'rsyslog',
'supervisor_config': True,
'from_image': 'rsyslog',
'network_mode': 'host',
'port_bindings': {
'514': [
('127.0.0.1', 514),
(master_ip, 514)],
'514/udp': [
('127.0.0.1', 514),
(master_ip, 514)],
'25150': [
('127.0.0.1', 25150),
(master_ip, 25150)]},
'ports': [[514, 'udp'], 514],
'binds': [
'volume_logs',
'volume_repos',
'volume_fuel_configs']},
{'id': 'keystone',
'supervisor_config': True,
'from_image': 'keystone',
'network_mode': 'host',
'port_bindings': {
'5000': ('0.0.0.0', 5000),
'35357': ('0.0.0.0', 35357)},
'ports': [5000, 35357],
'links': [
{'id': 'postgres', 'alias': 'postgres'}],
'binds': [
'volume_logs',
'volume_repos',
'volume_fuel_configs']},
{'id': 'nginx',
'supervisor_config': True,
'from_image': 'nginx',
'network_mode': 'host',
'port_bindings': {
'8000': ('0.0.0.0', 8000),
'8080': ('0.0.0.0', 8080)},
'ports': [8000, 8080],
'links': [
{'id': 'nailgun', 'alias': 'nailgun'},
{'id': 'ostf', 'alias': 'ostf'}],
'binds': [
'volume_logs',
'volume_repos',
'volume_dump',
'volume_fuel_configs'],
'volumes_from': ['nailgun']},
{'id': 'rabbitmq',
'supervisor_config': True,
'from_image': 'rabbitmq',
'network_mode': 'host',
'port_bindings': {
'4369': [
('127.0.0.1', 4369),
(master_ip, 4369)],
'5672': [
('127.0.0.1', 5672),
(master_ip, 5672)],
'15672': [
('127.0.0.1', 15672),
(master_ip, 15672)],
'61613': [
('127.0.0.1', 61613),
(master_ip, 61613)]},
'ports': [5672, 4369, 15672, 61613],
'binds': [
'volume_logs',
'volume_repos',
'volume_fuel_configs']},
{'id': 'ostf',
'supervisor_config': True,
'from_image': 'ostf',
'network_mode': 'host',
'port_bindings': {
'8777': [
('127.0.0.1', 8777),
(master_ip, 8777)]},
'ports': [8777],
'links': [
{'id': 'postgres', 'alias': 'db'},
{'id': 'rabbitmq', 'alias': 'rabbitmq'}],
'binds': [
'volume_logs',
'volume_repos',
'volume_ssh_keys',
'volume_fuel_configs']},
{'id': 'postgres',
'after_container_creation_command': (
"su postgres -c ""\"psql -f /tmp/upgrade/pg_dump_all.sql "
"postgres\""),
'supervisor_config': True,
'from_image': 'postgres',
'network_mode': 'host',
'port_bindings': {
'5432': [
('127.0.0.1', 5432),
(master_ip, 5432)]},
'ports': [5432],
'binds': [
'volume_logs',
'volume_repos',
'volume_fuel_configs',
'volume_postgres_data',
'volume_upgrade_directory']}]
# Since we dropped fuel storage containers we should provide an
# alternative DRY mechanism for mounting volumes directly into
# containers. So below code performs such job and unfolds containers
# an abstract declarative "binds" format into docker-py's "binds"
# format.
for container in containers:
binds = {}
for volume in container.get('binds', []):
binds.update(volumes[volume])
container['binds'] = binds
# unfortunately, docker-py has bad design and we must add to
# containers' "volumes" list those folders that will be mounted
# into container
if 'volumes' not in container:
container['volumes'] = []
for _, volume in six.iteritems(binds):
container['volumes'].append(volume['bind'])
# Openstack Upgrader settings. Please note, that "[0-9.-]*" is
# a glob pattern for matching our os versions
openstack = {
'releases': join(update_path, 'releases', '[0-9.-]*.yaml'),
'metadata': join(update_path, 'releases', 'metadata.yaml'),
'puppets': {
'src': join(update_path, 'puppet', '[0-9.-]*'),
'dst': join('/etc', 'puppet')},
'release_versions': {
'src': join(update_path, 'release_versions', '*.yaml'),
'dst': '/etc/fuel/release_versions'}}
# Config for host system upgarde engine
host_system = get_host_system(update_path, new_version)
# the repos that will be added if upgrade is succeed
master_node_repos = [
{
'name': 'mos{0}-updates'.format(new_version),
'baseurl': 'http://mirror.fuel-infra.org/mos-repos/'
'centos/mos{0}-centos6-fuel/updates/'
'x86_64/'.format(new_version),
'gpgcheck': 0,
'skip_if_unavailable': 1,
},
{
'name': 'mos{0}-security'.format(new_version),
'baseurl': 'http://mirror.fuel-infra.org/mos-repos/'
'centos/mos{0}-centos6-fuel/security/'
'x86_64/'.format(new_version),
'gpgcheck': 0,
'skip_if_unavailable': 1,
}
]
return locals()

View File

@ -1,15 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,56 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class UpgradeEngine(object):
"""Base class for all upgraders.
The main purpose of this class is to declare interface, which must be
respected by all upgraders.
"""
def __init__(self, config):
"""Extract some base parameters and save it internally."""
self.config = config
@abc.abstractmethod
def upgrade(self):
"""Run upgrade process."""
@abc.abstractmethod
def rollback(self):
"""Rollback all the changes, usually used in case of failed upgrade"""
def backup(self):
"""Perform backup actions"""
return NotImplemented
@abc.abstractproperty
def required_free_space(self):
"""Required free space for upgrade
Must return dict where key is path to directory
and value is required free space in megabytes.
Example:
{
"/var/www/nailgun": 2000,
"/var/lib/docker": 5000,
"/etc/supervisor.d": 10,
}
"""

View File

@ -1,631 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import logging
import os
import time
from copy import deepcopy
import docker
import requests
from fuel_upgrade.clients import SupervisorClient
from fuel_upgrade.engines.base import UpgradeEngine
from fuel_upgrade.health_checker import FuelUpgradeVerify
from fuel_upgrade import errors
from fuel_upgrade import utils
logger = logging.getLogger(__name__)
class DockerUpgrader(UpgradeEngine):
"""Docker management system for upgrades"""
def __init__(self, *args, **kwargs):
super(DockerUpgrader, self).__init__(*args, **kwargs)
self.working_directory = self.config.working_directory
utils.create_dir_if_not_exists(self.working_directory)
self.docker_client = docker.Client(
base_url=self.config.docker['url'],
version=self.config.docker['api_version'],
timeout=self.config.docker['http_timeout'])
self.new_release_containers = self.make_new_release_containers_list()
self.cobbler_config_path = self.config.cobbler_config_path.format(
working_directory=self.working_directory)
self.upgrade_verifier = FuelUpgradeVerify(self.config)
self.from_version = self.config.from_version
self.supervisor = SupervisorClient(self.config, self.from_version)
def backup(self):
self.save_db()
self.save_cobbler_configs()
def upgrade(self):
"""Method with upgrade logic"""
# Point to new supervisor configs and restart supervisor in
# order to apply them
self.switch_to_new_configs()
self.supervisor.restart_and_wait()
# Stop docker containers (it's safe, since at this time supervisor's
# configs are empty.
self.stop_fuel_containers()
# Upload new docker images and create containers
self.upload_images()
self.create_and_start_new_containers()
# Generate supervisor configs for new containers and restart
# supervisor in order to apply them. Note, supervisor's processes
# will be attached to running docker containers automatically.
self.generate_configs(autostart=True)
self.supervisor.restart_and_wait()
# Verify that all services up and running
self.upgrade_verifier.verify()
def rollback(self):
"""Method which contains rollback logic"""
self.supervisor.switch_to_previous_configs()
self.supervisor.stop_all_services()
self.stop_fuel_containers()
self.supervisor.restart_and_wait()
self.supervisor.remove_new_configs()
@property
def required_free_space(self):
"""Required free space to run upgrade
* space for docker
* several megabytes for configs
* reserve several megabytes for working directory
where we keep postgresql dump and cobbler configs
:returns: dict where key is path to directory
and value is required free space
"""
return {
self.config.docker['dir']: self._calculate_images_size(),
self.config.supervisor['configs_prefix']: 10,
self.config.fuel_config_path: 10,
self.working_directory: 150}
def _calculate_images_size(self):
return utils.files_size([self.config.images])
def save_db(self):
"""Saves postgresql database into the file"""
logger.debug('Backup database')
pg_dump_path = os.path.join(self.working_directory, 'pg_dump_all.sql')
pg_dump_files = utils.VersionedFile(pg_dump_path)
pg_dump_tmp_path = pg_dump_files.next_file_name()
utils.wait_for_true(
lambda: self.make_pg_dump(pg_dump_tmp_path, pg_dump_path),
timeout=self.config.db_backup_timeout,
interval=self.config.db_backup_interval)
valid_dumps = filter(utils.verify_postgres_dump,
pg_dump_files.sorted_files())
if valid_dumps:
utils.hardlink(valid_dumps[0], pg_dump_path, overwrite=True)
map(utils.remove_if_exists,
valid_dumps[self.config.keep_db_backups_count:])
else:
raise errors.DatabaseDumpError(
'Failed to make database dump, there '
'are no valid database backup '
'files, {0}'.format(pg_dump_path))
def make_pg_dump(self, pg_dump_tmp_path, pg_dump_path):
"""Run postgresql dump in container
:param str pg_dump_tmp_path: path to temporary dump file
:param str pg_dump_path: path to dump which will be restored
in the new container, if this file is
exists, it means the user already
ran upgrade and for some reasons it
failed
:returns: True if db was successfully dumped or if dump exists
False if container isn't running or dump isn't succeed
"""
try:
container_name = self.make_container_name(
'postgres', self.from_version)
self.exec_cmd_in_container(
container_name,
"su postgres -c 'pg_dumpall --clean' > {0}".format(
pg_dump_tmp_path))
except (errors.ExecutedErrorNonZeroExitCode,
errors.CannotFindContainerError) as exc:
utils.remove_if_exists(pg_dump_tmp_path)
if not utils.file_exists(pg_dump_path):
logger.debug('Failed to make database dump %s', exc)
return False
logger.debug(
'Failed to make database dump, '
'will be used dump from previous run: %s', exc)
return True
def save_cobbler_configs(self):
"""Copy config files from container"""
container_name = self.make_container_name(
'cobbler', self.from_version)
try:
utils.exec_cmd('docker cp {0}:{1} {2}'.format(
container_name,
self.config.cobbler_container_config_path,
self.cobbler_config_path))
except errors.ExecutedErrorNonZeroExitCode:
utils.rmtree(self.cobbler_config_path)
raise
self.verify_cobbler_configs()
def verify_cobbler_configs(self):
"""Verify that cobbler config directory contains valid data"""
configs = glob.glob(
self.config.cobbler_config_files_for_verifier.format(
cobbler_config_path=self.cobbler_config_path))
# NOTE(eli): cobbler config directory should
# contain at least one file (default.json)
if len(configs) < 1:
raise errors.WrongCobblerConfigsError(
'Cannot find json files in directory {0}'.format(
self.cobbler_config_path))
for config in configs:
if not utils.check_file_is_valid_json(config):
raise errors.WrongCobblerConfigsError(
'Invalid json config {0}'.format(config))
def upload_images(self):
"""Uploads images to docker"""
logger.info('Start image uploading')
if not os.path.exists(self.config.images):
logger.warn('Cannot find docker images "%s"', self.config.images)
return
# NOTE(eli): docker-py binding
# doesn't have equal call for
# image importing which equals to
# `docker load`
utils.exec_cmd('docker load -i "{0}"'.format(self.config.images))
def create_and_start_new_containers(self):
"""Create containers in the right order"""
logger.info('Started containers creation')
graph = self.build_dependencies_graph(self.new_release_containers)
logger.debug('Built dependencies graph %s', graph)
containers_to_creation = utils.topological_sorting(graph)
logger.debug('Resolved creation order %s', containers_to_creation)
for container_id in containers_to_creation:
container = self.container_by_id(container_id)
logger.debug('Start container %s', container)
links = self.get_container_links(container)
created_container = self.create_container(
container['image_name'],
name=container.get('container_name'),
volumes=container.get('volumes'),
ports=container.get('ports'),
detach=False)
volumes_from = []
for container_id in container.get('volumes_from', []):
volume_container = self.container_by_id(container_id)
volumes_from.append(volume_container['container_name'])
# NOTE(ikalnitsky):
# Conflicting options: --net=host can't be used with links.
# Still, we need links at least for resolving containers
# start order.
if container.get('network_mode') == 'host':
links = None
self.start_container(
created_container,
port_bindings=container.get('port_bindings'),
links=links,
volumes_from=volumes_from,
binds=container.get('binds'),
network_mode=container.get('network_mode'),
privileged=container.get('privileged', False))
if container.get('after_container_creation_command'):
self.run_after_container_creation_command(container)
def run_after_container_creation_command(self, container):
"""Runs command in container with retries in case of error
:param container: dict with container information
"""
command = container['after_container_creation_command']
def execute():
self.exec_cmd_in_container(container['container_name'], command)
self.exec_with_retries(
execute, errors.ExecutedErrorNonZeroExitCode,
'', retries=30, interval=4)
def exec_cmd_in_container(self, container_name, cmd):
"""Execute command in running container
:param name: name of the container, like fuel-core-5.1-nailgun
"""
db_container_id = self.container_docker_id(container_name)
utils.exec_cmd("dockerctl shell {0} {1}".format(db_container_id, cmd))
def get_ports(self, container):
"""Docker binding accepts ports as tuple, convert from list to tuple
FIXME(eli): https://github.com/dotcloud/docker-py/blob/
73434476b32136b136e1cdb0913fd123126f2a52/
docker/client.py#L111-L114
"""
ports = container.get('ports')
if ports is None:
return
return [port if not isinstance(port, list) else tuple(port)
for port in ports]
def exec_with_retries(
self, func, exceptions, message, retries=0, interval=0):
# TODO(eli): refactor it and make retries
# as a decorator
intervals = retries * [interval]
for interval in intervals:
try:
return func()
except exceptions as exc:
if str(exc).endswith(message):
time.sleep(interval)
continue
raise
return func()
def get_container_links(self, container):
links = []
if container.get('links'):
for container_link in container.get('links'):
link_container = self.container_by_id(
container_link['id'])
links.append((
link_container['container_name'],
container_link['alias']))
return links
@classmethod
def build_dependencies_graph(cls, containers):
"""Builds graph which based on container's `volumes_from` and `link`
:returns: dict where keys are nodes and
values are lists of dependencies
"""
graph = {}
for container in containers:
graph[container['id']] = sorted(set(
container.get('volumes_from', []) +
[link['id'] for link in container.get('links', [])]))
return graph
def generate_configs(self, autostart=True):
"""Generates supervisor configs and saves them to configs directory"""
configs = []
for container in self.new_release_containers:
params = {
'config_name': container['id'],
'service_name': self.make_service_name(container['id']),
'command': 'docker start -a {0}'.format(
container['container_name']),
'autostart': autostart
}
if container['supervisor_config']:
configs.append(params)
self.supervisor.generate_configs(configs)
def make_service_name(self, container_name):
return 'docker-{0}'.format(container_name)
def switch_to_new_configs(self):
"""Switches supervisor to new configs"""
self.supervisor.switch_to_new_configs()
def volumes_dependencies(self, container):
"""Get list of `volumes` dependencies
:param contaienr: dict with information about container
"""
return self.dependencies_names(container, 'volumes_from')
def link_dependencies(self, container):
"""Get list of `link` dependencies
:param contaienr: dict with information about container
"""
return self.dependencies_names(container, 'link')
def dependencies_names(self, container, key):
"""Returns list of dependencies for specified key
:param contaienr: dict with information about container
:param key: key which will be used for dependencies retrieving
:returns: list of container names
"""
names = []
if container.get(key):
for container_id in container.get(key):
container = self.container_by_id(container_id)
names.append(container['container_name'])
return names
def stop_fuel_containers(self):
"""Use docker API to shutdown containers"""
containers = self.docker_client.containers(limit=-1)
containers_to_stop = filter(
lambda c: c['Image'].startswith(self.config.image_prefix),
containers)
for container in containers_to_stop:
logger.debug('Stop container: %s', container)
self.stop_container(container['Id'])
def _get_docker_container_public_ports(self, containers):
"""Returns public ports
:param containers: list of dicts with information about
containers which have `Ports` list
with items where exist `PublicPort`
field
:returns: list of public ports
"""
container_ports = []
for container in containers:
container_ports.extend(container['Ports'])
return [container_port['PublicPort']
for container_port in container_ports]
def stop_container(self, container_id):
"""Stop docker container
:param container_id: container id
"""
logger.debug('Stop container: %s', container_id)
try:
self.docker_client.stop(
container_id, self.config.docker['stop_container_timeout'])
except requests.exceptions.Timeout:
# NOTE(eli): docker use SIGTERM signal
# to stop container if timeout expired
# docker use SIGKILL to stop container.
# Here we just want to make sure that
# container was stopped.
logger.warn(
'Couldn\'t stop ctonainer, try '
'to stop it again: %s', container_id)
self.docker_client.stop(
container_id, self.config.docker['stop_container_timeout'])
def start_container(self, container, **params):
"""Start containers
:param container: container name
:param params: dict of arguments for container starting
"""
logger.debug('Start container "%s": %s', container['Id'], params)
self.docker_client.start(container['Id'], **params)
def create_container(self, image_name, **params):
"""Create container
:param image_name: name of image
:param params: parameters format equals to
create_container call of docker
client
"""
# We have to delete container because we cannot
# have several containers with the same name
container_name = params.get('name')
if container_name is not None:
self._delete_container_if_exist(container_name)
new_params = deepcopy(params)
new_params['ports'] = self.get_ports(new_params)
logger.debug('Create container from image %s: %s',
image_name, new_params)
def func_create():
return self.docker_client.create_container(
image_name,
**new_params)
return self.exec_with_retries(
func_create,
docker.errors.APIError,
"Can't set cookie",
retries=3,
interval=2)
def make_new_release_containers_list(self):
"""Returns list of dicts with information for new containers"""
new_containers = []
for container in self.config.containers:
new_container = deepcopy(container)
new_container['image_name'] = self.make_image_name(
container['from_image'])
new_container['container_name'] = self.make_container_name(
container['id'])
new_containers.append(new_container)
return new_containers
def make_container_name(self, container_id, version=None):
"""Returns container name
:params container_id: container's id
:returns: name of the container
"""
if version is None:
version = self.config.new_version
return '{0}{1}-{2}'.format(
self.config.container_prefix, version, container_id)
def make_image_name(self, image_id):
"""Makes full image name
:param image_id: image id from config file
:returns: full name
"""
return '{0}{1}_{2}'.format(
self.config.image_prefix,
image_id,
self.config.new_version)
def container_by_id(self, container_id):
"""Get container from new release by id
:param container_id: id of container
"""
filtered_containers = filter(
lambda c: c['id'] == container_id,
self.new_release_containers)
if not filtered_containers:
raise errors.CannotFindContainerError(
'Cannot find container with id {0}'.format(container_id))
return filtered_containers[0]
def container_docker_id(self, name):
"""Returns running container with specified name
:param name: name of the container
:returns: id of the container or None if not found
:raises CannotFindContainerError:
"""
containers_with_name = self._get_containers_by_name(name)
running_containers = filter(
lambda c: c['Status'].startswith('Up'),
containers_with_name)
if not running_containers:
raise errors.CannotFindContainerError(
'Cannot find running container with name "{0}"'.format(name))
return running_containers[0]['Id']
def _delete_container_if_exist(self, container_name):
"""Deletes docker container if it exists
:param container_name: name of container
"""
found_containers = self._get_containers_by_name(container_name)
for container in found_containers:
self.stop_container(container['Id'])
logger.debug('Delete container %s', container)
# TODO(eli): refactor it and make retries
# as a decorator
def func_remove():
self.docker_client.remove_container(container['Id'])
self.exec_with_retries(
func_remove,
docker.errors.APIError,
'Error running removeDevice',
retries=3,
interval=2)
def _get_containers_by_name(self, container_name):
return filter(
lambda c: '/{0}'.format(container_name) in c['Names'],
self.docker_client.containers(all=True))
def _delete_containers_for_image(self, image):
"""Deletes docker containers for specified image
:param image: name of image
"""
all_containers = self.docker_client.containers(all=True)
containers = filter(
# NOTE(eli): We must use convertation to
# str because in some cases Image is integer
lambda c: str(c.get('Image')).startswith(image),
all_containers)
for container in containers:
logger.debug('Try to stop container %s which '
'depends on image %s', container['Id'], image)
self.docker_client.stop(container['Id'])
logger.debug('Delete container %s which '
'depends on image %s', container['Id'], image)
self.docker_client.remove_container(container['Id'])
class DockerInitializer(DockerUpgrader):
"""Initial implementation of docker initializer
Used for master node initialization
"""
def upgrade(self):
self.upload_images()
self.stop_fuel_containers()
self.create_containers()
self.stop_fuel_containers()
self.generate_configs()
self.switch_to_new_configs()
# Reload configs and run new services
self.supervisor.restart_and_wait()
def rollback(self):
logger.warn("DockerInitializer doesn't support rollback")

View File

@ -1,174 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import os
from fuel_upgrade.clients import SupervisorClient
from fuel_upgrade.engines.base import UpgradeEngine
from fuel_upgrade import utils
class HostSystemUpgrader(UpgradeEngine):
"""Upgrader for master node host system.
Required for upgrading of packages which
are not under docker, for example fuelclient,
dockerctl.
* add local repo with new packages
* run puppet apply
"""
templates_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../templates'))
def __init__(self, *args, **kwargs):
super(HostSystemUpgrader, self).__init__(*args, **kwargs)
#: host system upgarder specific configs
self.host_system_config = self.config.host_system
#: path to puppet manifests
self.manifest_path = self.host_system_config['manifest_path']
#: path to puppet modules
self.puppet_modules_path = self.host_system_config[
'puppet_modules_path']
#: path to repo template
self.repo_template_path = os.path.join(
self.templates_dir, 'nailgun.repo')
#: new version of fuel
self.version = self.config.new_version
#: path to repository config
self.repo_config_path = self.host_system_config['repo_config_path']
#: packages to be installed before running puppet
self.packages = self.host_system_config['install_packages']
self.supervisor = SupervisorClient(
self.config, self.config.from_version)
@property
def required_free_space(self):
"""Required free space to run upgrade
Requires a lot of disk spaces for new repos and repo config.
:returns: dict where key is path to directory
and value is required free space in megabytes
"""
sources = glob.glob(self.host_system_config['repos']['src'])
repos_size = sum(map(utils.dir_size, sources))
return {
self.host_system_config['repos']['dst']: repos_size,
self.repo_config_path: 10,
}
def upgrade(self):
"""Run host system upgrade process"""
# The workaround we need in order to fix [1]. In few words,
# when new Docker is installed the containers MUST NOT start
# again because in this case puppet inside them will install
# latest packages and breaks dependencies in some soft.
#
# [1]: https://bugs.launchpad.net/fuel/+bug/1455419
self.supervisor.stop_all_services()
# The workaround we need in order to fix [1]. In few words,
# when the repositories is being installed, auxiliary repo is removed,
# the config should be removed as well.
#
# [1]: https://bugs.launchpad.net/fuel/+bug/1495481
self.remove_repo_config()
self.install_repos()
self.update_repo()
self.install_packages()
self.run_puppet()
def rollback(self):
"""The only thing which we can rollback here is yum config"""
self.remove_repo_config()
self.remove_repos()
self.supervisor.start_all_services()
def install_repos(self):
sources = glob.glob(self.host_system_config['repos']['src'])
for source in sources:
destination = os.path.join(
self.host_system_config['repos']['dst'],
os.path.basename(source))
utils.copy(source, destination)
def remove_repos(self):
sources = glob.glob(self.host_system_config['repos']['src'])
for source in sources:
destination = os.path.join(
self.host_system_config['repos']['dst'],
os.path.basename(source))
utils.remove(destination)
def update_repo(self):
"""Add new centos repository"""
utils.render_template_to_file(
self.repo_template_path,
self.repo_config_path,
{
'name': '{0}_nailgun'.format(self.version),
'baseurl': self.host_system_config['repo_master'],
'gpgcheck': 0,
'skip_if_unavailable': 0,
})
utils.exec_cmd('yum clean all')
def install_packages(self):
"""Install packages for new release"""
for package in self.packages:
utils.exec_cmd('yum install -v -y {0}'.format(package))
def run_puppet(self):
"""Run puppet to upgrade host system"""
utils.exec_cmd(
'puppet apply -d -v '
'{0} --modulepath={1}'.format(
self.manifest_path, self.puppet_modules_path))
def remove_repo_config(self):
"""Remove yum repository config"""
utils.remove_if_exists(self.repo_config_path)
# One more damn hack! We have to remove auxiliary repo config
# if we're rollbacking to the Fuel version that doesn't have
# auxiliary repo at all.
if utils.compare_version(self.config.from_version, '6.1') > 0:
utils.remove_if_exists(
self.host_system_config['repo_aux_config_path'])
else:
# By some pity reason we're managing auxiliary repo in puppet
# manifests, but there's no management code for rollbacking.
# Therefore, we need to clean-up its artifacts in case of
# upgrade rollback procedure here; otherwise another try
# of upgrade will fail.
path, name = os.path.split(
self.host_system_config['repo_aux_config_path'])
utils.remove_if_exists(os.path.join(path, '{0}_{1}'.format(
self.config.new_version,
name)))

View File

@ -1,230 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import io
import logging
import os
import requests
import six
from fuel_upgrade.clients import NailgunClient
from fuel_upgrade.engines.base import UpgradeEngine
from fuel_upgrade import utils
logger = logging.getLogger(__name__)
class OpenStackUpgrader(UpgradeEngine):
"""OpenStack Upgrader.
The class is designed to do the following tasks:
* install manifests in the system
* add new releases to nailgun's database
* add notification about new releases
"""
def __init__(self, *args, **kwargs):
super(OpenStackUpgrader, self).__init__(*args, **kwargs)
#: a list of releases to install
self.releases = self._read_releases()
#: a nailgun object - api wrapper
self.nailgun = NailgunClient(**self.config.endpoints['nginx_nailgun'])
self._reset_state()
def upgrade(self):
self._reset_state()
self.install_puppets()
self.install_releases()
self.install_versions()
def rollback(self):
self.remove_releases()
self.remove_puppets()
self.remove_versions()
def install_puppets(self):
logger.info('Installing puppet manifests...')
sources = glob.glob(self.config.openstack['puppets']['src'])
for source in sources:
destination = os.path.join(
self.config.openstack['puppets']['dst'],
os.path.basename(source))
utils.copy(source, destination)
def remove_puppets(self):
logger.info('Removing puppet manifests...')
sources = glob.glob(self.config.openstack['puppets']['src'])
for source in sources:
destination = os.path.join(
self.config.openstack['puppets']['dst'],
os.path.basename(source))
utils.remove(destination)
def install_versions(self):
"""Copy openstack release versions"""
logger.info('Copy openstack release versions...')
release_versions_cfg = self.config.openstack['release_versions']
versions = glob.glob(release_versions_cfg['src'])
utils.create_dir_if_not_exists(release_versions_cfg['dst'])
for version_file in versions:
dst = os.path.join(
release_versions_cfg['dst'],
os.path.basename(version_file))
utils.copy(version_file, dst)
def remove_versions(self):
"""Copy openstack release versions"""
logger.info('Copy openstack release versions...')
release_versions_cfg = self.config.openstack['release_versions']
versions = glob.glob(release_versions_cfg['src'])
for version_file in versions:
dst = os.path.join(
release_versions_cfg['dst'],
os.path.basename(version_file))
utils.remove(dst)
def install_releases(self):
# add only new releases to nailgun and inject paths to
# base repo if needed
existing_releases = self.nailgun.get_releases()
releases = self._get_unique_releases(self.releases, existing_releases)
# upload unexisting releases
for release in releases:
# register new release
logger.debug('Register a new release: %s (%s)',
release['name'],
release['version'])
response = self.nailgun.create_release(release)
# save release id for futher possible rollback
self._rollback_ids['release'].append(response['id'])
self.upload_release_deployment_tasks(response)
if not release.get('state', 'available') == 'available':
continue
# add notification abot successfull releases
logger.debug('Add notification about new release: %s (%s)',
release['name'],
release['version'])
response = self.nailgun.create_notification({
'topic': 'release',
'message': 'New release available: {0} ({1})'.format(
release['name'],
release['version'],
),
})
# save notification id for futher possible rollback
self._rollback_ids['notification'].append(response['id'])
def upload_release_deployment_tasks(self, release):
"""Upload deployment tasks for release
Performs os.walk by puppet src, matches all files with tasks
of given pattern and uploads this for release.
:param release: dict representation of release
"""
tasks = []
release_puppet_path = os.path.join(
self.config.openstack['puppets']['dst'], release['version'])
for file_path in utils.iterfiles_filter(
release_puppet_path,
self.config.deployment_tasks_file_pattern):
tasks.extend(utils.read_from_yaml(file_path))
self.nailgun.put_deployment_tasks(release, tasks)
def remove_releases(self):
"""Remove all releases that are created by current session."""
for release_id in reversed(self._rollback_ids['release']):
try:
logger.debug('Removing release with ID=%s', release_id)
self.nailgun.remove_release(release_id)
except (
requests.exceptions.HTTPError
) as exc:
logger.exception('%s', six.text_type(exc))
for notif_id in reversed(self._rollback_ids['notification']):
try:
logger.debug('Removing notification with ID=%s', notif_id)
self.nailgun.remove_notification(notif_id)
except (
requests.exceptions.HTTPError
) as exc:
logger.exception('%s', six.text_type(exc))
def _reset_state(self):
"""Remove rollback IDs from the arrays."""
#: a list of ids that have to be removed in case of rollback
self._rollback_ids = {
'release': [],
'notification': [],
}
@classmethod
def _get_unique_releases(cls, releases, existing_releases):
"""Returns a list of releases that aren't exist yet.
:param releases: a list of releases to filter
:param existing_releases: a list of existing releases
:returns: a list of unique releases
"""
existing_releases = [
(r['name'], r['version']) for r in existing_releases
]
unique = lambda r: (r['name'], r['version']) not in existing_releases
return [r for r in releases if unique(r)]
def _read_releases(self):
"""Returns a list of releases in a dict representation."""
releases = []
# read releases from a set of files
for release_yaml in glob.glob(self.config.openstack['releases']):
with io.open(release_yaml, 'r', encoding='utf-8') as f:
releases.extend(utils.load_fixture(f))
return releases
@property
def required_free_space(self):
spaces = {
self.config.openstack['puppets']['dst']:
glob.glob(self.config.openstack['puppets']['src']), }
for dst, srcs in six.iteritems(spaces):
size = 0
for src in srcs:
size += utils.dir_size(src)
spaces[dst] = size
return spaces

View File

@ -1,50 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from fuel_upgrade.engines.base import UpgradeEngine
from fuel_upgrade import errors
class RaiseErrorUpgrader(UpgradeEngine):
"""The test upgrader intended to use in system tests.
In order to test the rollback feature we used to inject raising error
code in one of our upgraders in-place::
self.fuel_web.modify_python_file(self.env.get_admin_remote(),
"61i \ \ \ \ \ \ \ \ raise errors."
"ExecutedErrorNonZeroExitCode('{0}')"
.format('Some bad error'),
'/var/upgrade/site-packages/'
'fuel_upgrade/engines/'
'openstack.py')
It's a bad design decision which leads to time-to-time falls in tests due
to changes in the upgrader's code. So the class is going to solve this
issue by providing a special upgrader which will always fail.
"""
error_message = 'Something Goes Wrong'
def upgrade(self):
raise errors.FuelUpgradeException(self.error_message)
def rollback(self):
return NotImplemented
@property
def required_free_space(self):
return {}

View File

@ -1,85 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class FuelUpgradeException(Exception):
pass
class ExecutedErrorNonZeroExitCode(FuelUpgradeException):
pass
class CannotRunUpgrade(FuelUpgradeException):
pass
class CommandError(FuelUpgradeException):
pass
class DockerExecutedErrorNonZeroExitCode(FuelUpgradeException):
pass
class DockerFailedToBuildImageError(FuelUpgradeException):
pass
class CyclicDependenciesError(FuelUpgradeException):
pass
class CannotFindContainerError(FuelUpgradeException):
pass
class CannotFindImageError(FuelUpgradeException):
pass
class TimeoutError(FuelUpgradeException):
pass
class DatabaseDumpError(FuelUpgradeException):
pass
class UpgradeVerificationError(FuelUpgradeException):
pass
class UnsupportedImageTypeError(FuelUpgradeException):
pass
class WrongCobblerConfigsError(FuelUpgradeException):
pass
class NotEnoughFreeSpaceOnDeviceError(FuelUpgradeException):
pass
class WrongVersionError(FuelUpgradeException):
pass
class NailgunIsNotRunningError(FuelUpgradeException):
pass
class OstfIsNotRunningError(FuelUpgradeException):
pass

View File

@ -1,434 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import logging
import socket
import xmlrpclib
import requests
import six
from fuel_upgrade import errors
from fuel_upgrade import utils
from fuel_upgrade.clients import NailgunClient
from fuel_upgrade.clients import OSTFClient
logger = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class BaseChecker(object):
"""Base class for all checkers
:param endpoints: dict of endpoints
"""
def __init__(self, endpoints):
self.endpoints = endpoints
@abc.abstractproperty
def checker_name(self):
"""Name of the checker"""
@abc.abstractmethod
def check(self):
"""Check if server alive."""
def safe_get(self, url, auth=None, timeout=0.5):
"""Make get request to specified url
In case of errors returns None and doesn't
raise exceptions
:param url: url to service
:param auth: tuple where first item is username second is password
:param timeout: connection timeout
:returns: tuple where first item is dict or None in case of error
second item is status code or None in case of error
"""
def get_request():
result = requests.get(url, auth=auth, timeout=timeout)
try:
body = result.json()
except ValueError:
body = result.text
return {'body': body, 'code': result.status_code}
return self.make_safe_request(get_request)
def make_safe_request(self, method):
"""Execute passed method and supress HTTP related errors
:param method: callable object
:returns: result of method call or None in case of error
"""
try:
return method()
except (requests.exceptions.ConnectionError,
requests.exceptions.Timeout,
requests.exceptions.HTTPError,
ValueError,
socket.timeout):
return None
def check_if_port_open(self, ip, port):
"""Checks if port is open
:param ip: ip address
:param port: port
:returns: False if there is no open port
True if there is open port
"""
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(0.5)
result = sock.connect_ex((ip, port))
except socket.timeout:
return False
if result == 0:
return True
return False
def get_xmlrpc(self, url):
"""Creates xmlrpc object
:param url: path to rpc server
:returns: ServerProxy object
"""
try:
server = xmlrpclib.ServerProxy(url)
except socket.error:
return None
return server
class OSTFChecker(BaseChecker):
@property
def checker_name(self):
return 'ostf'
def check(self):
resp = self.safe_get('http://{host}:{port}/'.format(
**self.endpoints['ostf']))
return resp and (resp['code'] == 401 or resp['code'] == 200)
class RabbitChecker(BaseChecker):
@property
def checker_name(self):
return 'rabbitmq'
def check(self):
resp = self.safe_get(
'http://{host}:{port}/api/nodes'.format(
**self.endpoints['rabbitmq']),
auth=(self.endpoints['rabbitmq']['user'],
self.endpoints['rabbitmq']['password']))
return resp and resp['code'] == 200 and resp['body']
class CobblerChecker(BaseChecker):
@property
def checker_name(self):
return 'cobbler'
def check(self):
server = self.get_xmlrpc('http://{host}:{port}/cobbler_api'.format(
**self.endpoints['cobbler']))
if server is None:
return False
try:
profiles = server.get_profiles()
except (xmlrpclib.Fault, xmlrpclib.ProtocolError, socket.error):
return False
# Check that there are bootstrap, ubuntu, centos profiles
return len(profiles) >= 3
class PostgresChecker(BaseChecker):
@property
def checker_name(self):
return 'postgres'
def check(self):
return self.check_if_port_open(
self.endpoints['postgres']['host'],
self.endpoints['postgres']['port'])
class RsyncChecker(BaseChecker):
@property
def checker_name(self):
return 'rsync'
def check(self):
return self.check_if_port_open(
self.endpoints['rsync']['host'],
self.endpoints['rsync']['port'])
class RsyslogChecker(BaseChecker):
@property
def checker_name(self):
return 'rsyslog'
def check(self):
return self.check_if_port_open(
self.endpoints['rsyslog']['host'],
self.endpoints['rsyslog']['port'])
class MCollectiveChecker(BaseChecker):
@property
def checker_name(self):
return 'mcollective'
def check(self):
resp = self.safe_get(
'http://{host}:{port}/api/exchanges'.format(
**self.endpoints['rabbitmq_mcollective']),
auth=(self.endpoints['rabbitmq_mcollective']['user'],
self.endpoints['rabbitmq_mcollective']['password']))
if not resp or \
not isinstance(resp['body'], list) or \
resp['code'] != 200:
return False
exchanges = filter(lambda e: isinstance(e, dict), resp['body'])
mcollective_broadcast = filter(
lambda e: e.get('name') == 'mcollective_broadcast', exchanges)
mcollective_directed = filter(
lambda e: e.get('name') == 'mcollective_directed', exchanges)
return mcollective_directed and mcollective_broadcast
class NginxChecker(BaseChecker):
@property
def checker_name(self):
return 'nginx'
def check(self):
resp_nailgun = self.safe_get(
'http://{host}:{port}/'.format(**self.endpoints['nginx_nailgun']))
resp_repo = self.safe_get(
'http://{host}:{port}/'.format(**self.endpoints['nginx_repo']))
return resp_nailgun is not None and resp_repo is not None
class IntegrationCheckerNginxNailgunChecker(BaseChecker):
@property
def checker_name(self):
return 'integration_nginx_nailgun'
def check(self):
resp = self.safe_get(
'http://{host}:{port}/api/v1/version'.format(
**self.endpoints['nginx_nailgun']))
return resp and resp['code'] == 200
class IntegrationOSTFKeystoneChecker(BaseChecker):
@property
def checker_name(self):
return 'integration_ostf_keystone'
def check(self):
ostf_client = OSTFClient(**self.endpoints['ostf'])
def get_request():
resp = ostf_client.get('/')
return resp.status_code
code = self.make_safe_request(get_request)
return code == 200
class KeystoneChecker(BaseChecker):
@property
def checker_name(self):
return 'keystone'
def check(self):
resp_keystone = self.safe_get(
'http://{host}:{port}/v2.0'.format(
**self.endpoints['keystone']))
resp_admin_keystone = self.safe_get(
'http://{host}:{port}/v2.0'.format(
**self.endpoints['keystone_admin']))
return (resp_keystone and
resp_admin_keystone and
resp_keystone['code'] == 200 and
resp_admin_keystone['code'] == 200)
class IntegrationCheckerPostgresqlNailgunNginx(BaseChecker):
@property
def checker_name(self):
return 'integration_postgres_nailgun_nginx'
def check(self):
nailgun_client = NailgunClient(**self.endpoints['nginx_nailgun'])
def get_releases():
releases = nailgun_client.get_releases()
return releases
releases = self.make_safe_request(get_releases)
return isinstance(releases, list) and len(releases) > 1
class IntegrationCheckerRabbitMQAstuteNailgun(BaseChecker):
@property
def checker_name(self):
return 'integration_rabbitmq_astute_nailgun'
def check(self):
resp = self.safe_get(
'http://{host}:{port}/api/exchanges'.format(
**self.endpoints['rabbitmq']),
auth=(self.endpoints['rabbitmq']['user'],
self.endpoints['rabbitmq']['password']))
if not resp or \
not isinstance(resp['body'], list) or \
resp['code'] != 200:
return False
exchanges = filter(lambda e: isinstance(e, dict), resp['body'])
naily = filter(lambda e: e.get('name') == 'naily_service', exchanges)
nailgun = filter(lambda e: e.get('name') == 'nailgun', exchanges)
return naily and nailgun
class FuelUpgradeVerify(object):
"""Verifies that fuel upgrade is succeed
:param config: config object
:param checkers: list of classes which implement :class:`BaseChecker`
"""
def __init__(self, config, checkers=None):
self.config = config
# Set default checkers
if checkers is None:
check_classes = [
OSTFChecker,
RabbitChecker,
CobblerChecker,
PostgresChecker,
RsyncChecker,
RsyslogChecker,
MCollectiveChecker,
KeystoneChecker,
NginxChecker,
IntegrationOSTFKeystoneChecker,
IntegrationCheckerNginxNailgunChecker,
IntegrationCheckerPostgresqlNailgunNginx,
IntegrationCheckerRabbitMQAstuteNailgun]
self.checkers = [check_class(config.endpoints)
for check_class in check_classes]
else:
self.checkers = checkers
self.expected_services = [
checker.checker_name for checker in self.checkers]
def verify(self):
"""Run fuel verification"""
try:
utils.wait_for_true(
self.check_if_all_services_ready,
timeout=self.config.checker['timeout'],
interval=self.config.checker['interval'])
except errors.TimeoutError:
raise errors.UpgradeVerificationError(
'Failed to run services {0}'.format(
self._get_non_running_services()))
def check_if_all_services_ready(self):
"""Checks if all services are ready
:returns: True if all services are ready
False if there are some services which are not ready
"""
not_running_services = self._get_non_running_services()
if not_running_services:
logger.info('Failed checkers: %s', not_running_services)
return False
return True
def _get_non_running_services(self):
"""Get list of services which are not running
:returns: list
"""
return list(set(self.expected_services) -
set(self._get_running_services()))
def _get_running_services(self):
"""Get list of services which are running
:returns: list
"""
running_services = []
for checker in self.checkers:
logger.debug('Start %s checker', checker.checker_name)
if checker.check():
running_services.append(checker.checker_name)
return running_services

View File

@ -1,82 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import sys
from fuel_upgrade.config import Config
from fuel_upgrade.utils import sanitize
class SanitizingLogger(logging.Logger):
"""Logger subclass which sanitizes passed positional arguments.
It traverses the arguments one by one and recursively looks them up for
dictionaries. If a key of the dictionary contains a keyword listed in
`SanitizingLogger.keywords` corresponding value is masked.
Instances of the following types are sanitized:
- dict
- list containing dicts
- fuel_upgrade.config.Config
arguments of other types are not changed.
Example:
>>> auth_params = {'password': 'secure_password'}
>>> auth_info = [{'admin_token': 'secure_token'}]
>>> logging.setLoggerClass(SanitizingLogger)
>>> logger = logging.getLogger()
>>> logger.info("%s %s %s %s", 'Auth', 'password:', auth_params, auth_info)
Auth password: {'password': '******'} [{'admin_token': '******'}]
"""
keywords = ('password', 'token')
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None,
extra=None):
_args = []
for arg in args:
if isinstance(arg, Config):
_arg = sanitize(arg._config, self.keywords)
else:
_arg = sanitize(arg, self.keywords)
_args.append(_arg)
return logging.Logger.makeRecord(self, name, level, fn, lno, msg,
tuple(_args), exc_info, func, extra)
def configure_logger(path):
logging.setLoggerClass(SanitizingLogger)
logger = logging.getLogger('fuel_upgrade')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s %(levelname)s %(process)d (%(module)s) %(message)s',
"%Y-%m-%d %H:%M:%S")
if sys.stdout.isatty():
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
file_handler = logging.FileHandler(path)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger

View File

@ -1,80 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
header = '=' * 50
docker_is_dead = """
Docker died during the upgrade. Run these commands and then restart the upgrade
Make sure that docker is dead
# docker ps
2014/08/19 14:28:15 Cannot connect to the Docker daemon.
Is 'docker -d' running on this host?
# umount -l $(grep '/dev/mapper/docker-' /proc/mounts | awk '{ print $2}')
# rm /var/run/docker.pid
# service docker start
Run upgrade again from the directory with unarchived upgrade tar-ball
# ./upgrade.sh
You can track the issue here
https://bugs.launchpad.net/fuel/+bug/1359725
"""
nailgun_is_not_running = """
REST API service (nailgun) is not running. Unable to verify that
there are no running tasks.
Use `--no-check` as a parameter for upgrade script to skip all
pre-upgrade checks.
"""
no_password_provided = """
Expecting a password provided via --password or prompted response
"""
health_checker_failed = """
Couldn't start some of the services, try to run upgrade again.
"""
ostf_is_not_running = """
Health Checker (OSTF) is not running. Unable to verify that
there are no running tasks.
Use `--no-check` as a parameter for upgrade script to skip all
pre-upgrade checks.
"""
update_your_master_node = """
Both updates and security repos were setuped. If you want to
retrieve latest updates make sure you have access to these repos
and run:
# yum update
# dockerctl destroy all
# dockerctl start all
For more details, check out Fuel documentation at
http://docs.mirantis.com/fuel
"""

View File

@ -1,17 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from fuel_upgrade.pre_upgrade_hooks.manager import PreUpgradeHookManager

View File

@ -1,97 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import copy
import six
from fuel_upgrade.config import read_yaml_config
from fuel_upgrade import utils
@six.add_metaclass(abc.ABCMeta)
class PreUpgradeHookBase(object):
"""Abstract class for pre upgrade hooks
:param list upgraders: list of :class:`BaseUpgrader` implementations
:param config: :class:`Config` object
"""
def __init__(self, upgraders, config):
#: config for upgrade
self.config = config
#: list of upgrade engines
self.upgraders = upgraders
@abc.abstractmethod
def check_if_required(self):
"""Return True if check is required and False if is not required"""
@abc.abstractmethod
def run(self):
"""Run pre upgrade hook"""
@abc.abstractproperty
def enable_for_engines(self):
"""Return list of upgrade engines which the hook is required for"""
@property
def is_required(self):
"""Checks if it's required to run the hook
:returns: True if required, False if is not required
"""
return self.is_enabled_for_engines and self.check_if_required()
@property
def is_enabled_for_engines(self):
"""Checks if engine in the list
:returns: True if engine in the list
False if engine not in the list
"""
for engine in self.enable_for_engines:
for upgrade in self.upgraders:
if isinstance(upgrade, engine):
return True
return False
def update_astute_config(self, defaults=None, overwrites=None):
"""Update astute config and backup old one
Read astute.yaml config file, update it with new config,
copy old file to backup location and save new astute.yaml.
"""
# NOTE(ikalnitsky): we need to re-read astute.yaml in order protect
# us from loosing some useful injection of another hook
astute_config = copy.deepcopy(defaults or {})
astute_config = utils.dict_merge(
astute_config,
read_yaml_config(self.config.current_fuel_astute_path))
astute_config = utils.dict_merge(
astute_config,
overwrites or {})
# NOTE(eli): Just save file for backup in case
# if user wants to restore it manually
utils.copy_file(
self.config.current_fuel_astute_path,
'{0}_{1}'.format(self.config.current_fuel_astute_path,
self.config.from_version),
overwrite=False)
utils.save_as_yaml(self.config.current_fuel_astute_path, astute_config)

View File

@ -1,72 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
from fuel_upgrade.engines.host_system import HostSystemUpgrader
from fuel_upgrade.pre_upgrade_hooks.base import PreUpgradeHookBase
from fuel_upgrade import utils
logger = logging.getLogger(__name__)
class FixHostSystemRepoHook(PreUpgradeHookBase):
"""Fix repo for host system
During 5.0.1 upgrade we add repository
where as repository path we set path to
repository which is from upgrade tar ball.
When user deletes this information he deletes
the repo. As result we can get broken repo
which fails during the next upgrade [1].
[1] https://bugs.launchpad.net/fuel/+bug/1358686
"""
#: this hook is required only for host-system engine
enable_for_engines = [HostSystemUpgrader]
#: path to 5.0.1 repository which is created by upgrade script
repo_path = '/var/www/nailgun/5.0.1/centos/x86_64'
#: path to the file for yum repo
yum_repo_file = '/etc/yum.repos.d/5.0.1_nailgun.repo'
#: path to repo template
repo_template = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', 'templates', 'nailgun.repo'))
def __init__(self, *args, **kwargs):
super(FixHostSystemRepoHook, self).__init__(*args, **kwargs)
def check_if_required(self):
"""Check if hack is required
The hack is required if we're going to upgrade from 5.0.1
and only repo path for 5.0.1 is exists
"""
return (self.config.from_version == '5.0.1' and
utils.file_exists(self.repo_path) and
utils.file_exists(self.yum_repo_file))
def run(self):
"""Change repo path"""
utils.render_template_to_file(
self.repo_template,
self.yum_repo_file,
{'repo_path': self.repo_path, 'version': '5.0.1'})

View File

@ -1,83 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from fuel_upgrade import utils
from fuel_upgrade.engines.docker_engine import DockerUpgrader
from fuel_upgrade.engines.host_system import HostSystemUpgrader
from fuel_upgrade.pre_upgrade_hooks.base import PreUpgradeHookBase
logger = logging.getLogger(__name__)
class AddCredentialsHook(PreUpgradeHookBase):
"""Feature `access control on master node` was introduced in 5.1 release
In this feature [1] fuelmenu generates credenitals
and saves them in /etc/astute.yaml file.
Before upgrade for this featuer we need to
add default credentials to the file.
[1] https://blueprints.launchpad.net/fuel/+spec/access-control-master-node
"""
#: This hook required only for docker and host system engines
enable_for_engines = [DockerUpgrader, HostSystemUpgrader]
#: Default credentials
credentials = {
"astute": {
"user": "naily",
"password": "naily"},
"cobbler": {
"user": "cobbler",
"password": "cobbler"},
"mcollective": {
"user": "mcollective",
"password": "marionette"},
"postgres": {
"keystone_dbname": "keystone",
"keystone_user": "keystone",
"keystone_password": "keystone",
"nailgun_dbname": "nailgun",
"nailgun_user": "nailgun",
"nailgun_password": "nailgun",
"ostf_dbname": "ostf",
"ostf_user": "ostf",
"ostf_password": "ostf"},
"keystone": {
"admin_token": utils.generate_uuid_string()},
"FUEL_ACCESS": {
"user": "admin",
"password": "admin"}}
def check_if_required(self):
"""Checks if it's required to run upgrade
:returns: True - if it is required to run this hook
False - if it is not required to run this hook
"""
is_required = not all(key in self.config.astute
for key in self.credentials.keys())
return is_required
def run(self):
"""Adds default credentials to config file"""
self.update_astute_config(defaults=self.credentials)

View File

@ -1,83 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from fuel_upgrade.engines.openstack import OpenStackUpgrader
from fuel_upgrade.pre_upgrade_hooks.base import PreUpgradeHookBase
from fuel_upgrade.utils import copy
from fuel_upgrade.utils import iterfiles
class FixPuppetManifests(PreUpgradeHookBase):
"""Install new puppets for some releases in order to deliver fixes.
Openstack Patching was introduced in Fuel 5.1, but we need to distribute
some fixes for old puppet manifests of both 5.0 and 5.0.1 releases in
order to provide working rollback feature.
"""
#: this hook is required only for openstack engine
enable_for_engines = [OpenStackUpgrader]
#: a path to puppet scripts with fixes
src_path = os.path.join('{update_path}', 'config', '{version}')
#: a path to puppet destination
dst_path = os.path.join('/etc', 'puppet')
def __init__(self, *args, **kwargs):
super(FixPuppetManifests, self).__init__(*args, **kwargs)
# get source/destination directory pairs to install manifests
if os.path.exists(os.path.join(self.dst_path, '5.0.1')):
# we've detected that the master node was previously upgraded
# from 5.0 to 5.0.1, so we have to install patched puppets
# for both 5.0 and 5.0.1 releases
self._copypairs = [
(
self.src_path.format(
update_path=self.config.update_path, version='5.0'),
self.dst_path
),
(
self.src_path.format(
update_path=self.config.update_path, version='5.0.1'),
os.path.join(self.dst_path, '5.0.1')
)]
else:
# we've detected that the master node's previous installation
# was fresh, so we have to install patched puppets only for
# the current release
self._copypairs = [
(
self.src_path.format(
update_path=self.config.update_path,
version=self.config.from_version),
self.dst_path
)]
def check_if_required(self):
"""The hack is required if we're going to upgrade from 5.0 or 5.0.1."""
return self.config.from_version in ('5.0', '5.0.1')
def run(self):
"""Install patched manifests to the master node."""
for srcpath, dstpath in self._copypairs:
# we can't just copy folder as is, since it's not a full and
# overwrite mode will erase entire old content
for srcfile in iterfiles(srcpath):
copy(srcfile, srcfile.replace(srcpath, dstpath))

View File

@ -1,67 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
from fuel_upgrade.engines.docker_engine import DockerUpgrader
from fuel_upgrade.engines.host_system import HostSystemUpgrader
from fuel_upgrade.pre_upgrade_hooks.base import PreUpgradeHookBase
logger = logging.getLogger(__name__)
class SyncDnsHook(PreUpgradeHookBase):
"""Synchronize DNS hook
Bug `Fix dns domain and search settings on Fuel Master` was introduced
in 5.1 release [1].
In this feature fuelmenu parses existing DNS
settings and applies them as a default instead
of its own in /etc/fuel/astute.yaml.
Before upgrade for this feature, we need to
correct /etc/fuel/astute.yaml to match
/etc/resolv.conf.
[1] Fix dns domain and search settings on Fuel Master
"""
#: This hook required only for docker and host system engines
enable_for_engines = [DockerUpgrader, HostSystemUpgrader]
def check_if_required(self):
"""Checks if it's required to run upgrade
:returns: True - if it is required to run this hook
False - if it is not required to run this hook
"""
astute_domain = self.config.astute['DNS_DOMAIN']
astute_search = self.config.astute['DNS_SEARCH']
hostname, sep, realdomain = os.uname()[1].partition('.')
is_required = not all([astute_domain == realdomain,
realdomain in astute_search])
return is_required
def run(self):
"""Replaces config file with current DNS domain"""
hostname, sep, realdomain = os.uname()[1].partition('.')
self.update_astute_config(overwrites={
'DNS_DOMAIN': realdomain,
'DNS_SEARCH': realdomain,
})

View File

@ -1,70 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from os.path import join
from fuel_upgrade.engines.openstack import OpenStackUpgrader
from fuel_upgrade.pre_upgrade_hooks.base import PreUpgradeHookBase
from fuel_upgrade import utils
class CopyOpenstackReleaseVersions(PreUpgradeHookBase):
"""Copy openstack release version files.
In previous versions of fuel, openstack packages
and manifests had the same version as the rest
of the system, nailgun, astute.
In 5.1 was introduced patching, as result openstack
packages and manifests can be delivered separately.
And this bundle have separate version.
Release versions are stored in `/etc/fuel/release_versions/`
directory.
"""
#: this hook is required only for openstack engine
enable_for_engines = [OpenStackUpgrader]
#: path to release versions directory
release_dir = '/etc/fuel/release_versions'
#: version file path for 5.0
version_path_5_0 = '/etc/fuel/5.0/version.yaml'
dst_version_path_5_0 = join(release_dir, '2014.1-5.0.yaml')
#: version file path for 5.0.1
version_path_5_0_1 = '/etc/fuel/5.0.1/version.yaml'
dst_version_path_5_0_1 = join(release_dir, '2014.1.1-5.0.1.yaml')
def check_if_required(self):
"""The hack is required if we're going to upgrade from 5.0 or 5.0.1."""
return self.config.from_version in ('5.0', '5.0.1')
def run(self):
"""Copy version files"""
utils.create_dir_if_not_exists(self.release_dir)
utils.copy_if_exists(self.version_path_5_0, self.dst_version_path_5_0)
# NOTE(eli): in case of failed upgrade
# from 5.0 to 5.0.1 file for 5.0.1 can
# be there, but in fact 5.0.1 was not
# installed
if self.config.from_version == '5.0.1':
utils.copy_if_exists(
self.version_path_5_0_1,
self.dst_version_path_5_0_1)

View File

@ -1,68 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from fuel_upgrade import utils
from fuel_upgrade.engines.docker_engine import DockerUpgrader
from fuel_upgrade.engines.host_system import HostSystemUpgrader
from fuel_upgrade.pre_upgrade_hooks.base import PreUpgradeHookBase
logger = logging.getLogger(__name__)
class AddKeystoneCredentialsHook(PreUpgradeHookBase):
"""`Fuel master access control improvements` was introduced in 6.0 release
In this feature [1] fuelmenu generates keystone credenitals
and saves them in /etc/astute.yaml file.
Before upgrade for this featuer we need to
add new keystone credentials to the file.
[1] https://blueprints.launchpad.net/fuel/+spec/
access-control-master-node-improvments
"""
#: This hook required only for docker and host system engines
enable_for_engines = [DockerUpgrader, HostSystemUpgrader]
#: New credentials
keystone_config = {
'keystone': {
"nailgun_user": "nailgun",
"nailgun_password": utils.generate_uuid_string(),
"ostf_user": "ostf",
"ostf_password": utils.generate_uuid_string(),
}
}
def check_if_required(self):
"""Checks if it's required to run upgrade
:returns: True - if it is required to run this hook
False - if it is not required to run this hook
"""
is_required = not all(
key in self.config.astute.get('keystone', {})
for key in self.keystone_config['keystone'].keys())
return is_required
def run(self):
"""Adds default credentials to config file"""
self.update_astute_config(defaults=self.keystone_config)

View File

@ -1,61 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from fuel_upgrade import utils
from fuel_upgrade.engines.docker_engine import DockerUpgrader
from fuel_upgrade.engines.host_system import HostSystemUpgrader
from fuel_upgrade.pre_upgrade_hooks.base import PreUpgradeHookBase
logger = logging.getLogger(__name__)
class AddFuelwebX8664LinkForUbuntu(PreUpgradeHookBase):
"""Add link for repo/ubuntu/x86_64 -> repo/ubuntu/fuelweb/x86_64
In Fuel 6.0 we have dropped legacy 'fuelweb' folder from the repos,
but we can't do this for old (already installed) repos. Unfortunately,
this leads us to the issue when cobbler using new puppets/configs tries
to load Ubuntu installer from repo/ubuntu/x86_64 when it's located
in repo/ubuntu/fuelweb/x86_64.
Generally, it's another issue that we use old installers for both
centos/ubuntu, but it's not fixed yet, so we need to introduce
such hack.
"""
#: this hook required only for docker and host system engines
enable_for_engines = [DockerUpgrader, HostSystemUpgrader]
#: link to old ubuntu x86_64
ubuntu_x86_64_old = '/var/www/nailgun/ubuntu/fuelweb/x86_64'
ubuntu_x86_64_new = '/var/www/nailgun/ubuntu/x86_64'
def check_if_required(self):
"""Checks if it's required to run upgrade
:returns: True - if it is required to run this hook
False - if it is not required to run this hook
"""
return all([
utils.file_exists(self.ubuntu_x86_64_old),
not utils.file_exists(self.ubuntu_x86_64_new)])
def run(self):
"""Add link for repo/ubuntu/x86_64 -> repo/ubuntu/fuelweb/x86_64"""
utils.symlink(self.ubuntu_x86_64_old, self.ubuntu_x86_64_new)

View File

@ -1,57 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from fuel_upgrade.engines.docker_engine import DockerUpgrader
from fuel_upgrade.pre_upgrade_hooks.base import PreUpgradeHookBase
logger = logging.getLogger(__name__)
class AddDhcpGateway(PreUpgradeHookBase):
"""Inject dhcp_gateway setting into astute.yaml
Since Fuel 6.1 we have a new field in astute.yaml - "dhcp_gateway".
It's mandatory to have that field because it will be used by native
provisioning as gateway. Without it, we won't be able to use native
provisioning with external repos.
"""
#: this hook required only for docker engine
enable_for_engines = [DockerUpgrader]
#: network settings to be injected into astute.yaml
_admin_network = {
'ADMIN_NETWORK': {
'dhcp_gateway': None,
}
}
def __init__(self, *args, **kwargs):
super(AddDhcpGateway, self).__init__(*args, **kwargs)
gw = self.config.master_ip
self._admin_network['ADMIN_NETWORK']['dhcp_gateway'] = gw
def check_if_required(self):
inject = set(self._admin_network['ADMIN_NETWORK'])
exists = set(self.config.astute.get('ADMIN_NETWORK', {}))
return inject - exists
def run(self):
"""Adds dhcp gateway to astute.yaml"""
self.update_astute_config(defaults=self._admin_network)

View File

@ -1,59 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from fuel_upgrade import utils
from fuel_upgrade.engines.host_system import HostSystemUpgrader
from fuel_upgrade.pre_upgrade_hooks.base import PreUpgradeHookBase
logger = logging.getLogger(__name__)
class AddMonitordKeystoneCredentialsHook(PreUpgradeHookBase):
"""Monitoring service Keystone credentials: [1].
This patch updates the astute.yaml file adding 'monitord' user credentials.
This user is required to create Fuel notifications when disk space on
master node is getting low. We don't want to use the standard 'admin' user
because when user changes password via UI it's not reflected in the
astute.yaml file.
[1] https://bugs.launchpad.net/fuel/+bug/1371757
"""
# : This hook required only for docker and host system engines
enable_for_engines = [HostSystemUpgrader]
# : New credentials
keystone_config = {
'keystone': {
"monitord_user": "monitord",
"monitord_password": utils.generate_uuid_string(),
}
}
def check_if_required(self):
return len(
set(self.keystone_config['keystone']).difference(
self.config.astute.get('keystone', {})
)
)
def run(self):
"""Adds default credentials to config file"""
self.update_astute_config(defaults=self.keystone_config)

View File

@ -1,71 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
from fuel_upgrade.engines.docker_engine import DockerUpgrader
from fuel_upgrade.pre_upgrade_hooks.base import PreUpgradeHookBase
from fuel_upgrade import errors
from fuel_upgrade import utils
logger = logging.getLogger(__name__)
class MoveKeysHook(PreUpgradeHookBase):
"""Move keys from astute container to new path mounted in all containers
In 6.1 we move generating keys for granular deployment
to tasks and now keys are in the directory mounted in
all containers so we need to move old keys to new dir
to have all keys in one place.
"""
# this hook is required only for docker upgrade engine
enable_for_engines = [DockerUpgrader]
# src keys where we keep keys for fuel <6.1
src_path = '/var/lib/astute/'
# new keys destination
dst_path = '/var/lib/fuel/keys/'
def check_if_required(self):
"""The hack is required if we're going to upgrade from version<6.1."""
return utils.compare_version(self.config.from_version, '6.1') > 0
def run(self):
"""Move files to new directory"""
if not utils.file_exists(self.dst_path):
os.makedirs(self.dst_path)
container_name = '{0}{1}-astute'.format(
self.config.container_prefix, self.config.from_version)
try:
utils.exec_cmd('docker cp {0}:{1} {2}'.format(
container_name,
self.src_path,
self.dst_path))
# we need move and remove folder, because docker copy also folder
# not only content
utils.exec_cmd('mv {0}astute/* {0}'.format(self.dst_path))
utils.exec_cmd('rm -r {0}astute/'.format(self.dst_path))
except errors.ExecutedErrorNonZeroExitCode as exc:
# Error means that user didn't run deployment on his
# env, because this directory is created only then
logger.warning(
'Cannot copy astute keys %s',
exc)

View File

@ -1,69 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from fuel_upgrade.engines.docker_engine import DockerUpgrader
from fuel_upgrade.pre_upgrade_hooks.base import PreUpgradeHookBase
from fuel_upgrade.utils import copy_file
from fuel_upgrade.utils import remove
from fuel_upgrade.utils import safe_exec_cmd
class FixDhcrelayConf(PreUpgradeHookBase):
"""Fix supervisor's dhcrelay.conf
Since Fuel 6.1 we're going to use docker with host networking, so
we don't require dhcrelay anymore. Still, if something goes wrong
and rollback was performed, we need to launch dhcrelay again
(because it was shutdown by host manifests).
In order to run it when we want to use old containers, we need to:
* add dhcrelay.conf to versioned supervisor folder
* remove dhcrelay.conf from global supervisor scope
"""
#: this hook required only for --net=host containers
enable_for_engines = [DockerUpgrader]
#: copy from
_save_from = os.path.join('/etc', 'supervisord.d', 'dhcrelay.conf')
#: save path
_save_to = os.path.join(
'/etc', 'supervisord.d', '{version}', 'dhcrelay.conf')
def __init__(self, *args, **kwargs):
super(FixDhcrelayConf, self).__init__(*args, **kwargs)
self._save_to = self._save_to.format(version=self.config.from_version)
def check_if_required(self):
if os.path.exists(self._save_from) and \
not os.path.exists(self._save_to):
return True
return False
def run(self):
# save dhcrelay.conf to versioned folder
copy_file(self._save_from, self._save_to)
# remove dhcrelay.conf from global supervisor scope
remove(self._save_from)
# stop dhcrelay in supervisord, otherwise it will be re-ran
# automatically
safe_exec_cmd('supervisorctl stop dhcrelay_monitor')

View File

@ -1,58 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import stat
from fuel_upgrade.engines.docker_engine import DockerUpgrader
from fuel_upgrade.pre_upgrade_hooks.base import PreUpgradeHookBase
from fuel_upgrade import utils
class FixDhcrelayMonitor(PreUpgradeHookBase):
"""Fix dhcrelay_monitor wrapper for dhcrelay
Since Fuel 6.1 we're going to use docker with host networking, so
we don't require dhcrelay anymore. Still, if something goes wrong
and rollback was performed, we need to launch dhcrelay again
(because it was shutdown by host manifests). In order to do it
properly, we need to get cobbler container's ip address, but
we don't have such hook in dockerctl anymore.
This hoos is intended to inject code with "retrieve ip address"
to dhcrelay_monitor directly.
"""
enable_for_engines = [DockerUpgrader]
_save_from = os.path.join(
os.path.dirname(__file__), '..', 'templates', 'dhcrelay_monitor')
_save_to = '/usr/local/bin/dhcrelay_monitor'
def check_if_required(self):
# not required if fuel is already higher than 6.1
if utils.compare_version('6.1', self.config.from_version) >= 0:
return False
return True
def run(self):
utils.copy(self._save_from, self._save_to, overwrite=True)
# make sure that the file is still executable
st = os.stat(self._save_to)
os.chmod(self._save_to, st.st_mode | stat.S_IEXEC)

View File

@ -1,103 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import logging
import os
import re
from io import open
from fuel_upgrade.engines.docker_engine import DockerUpgrader
from fuel_upgrade.engines.host_system import HostSystemUpgrader
from fuel_upgrade.pre_upgrade_hooks.base import PreUpgradeHookBase
from fuel_upgrade import utils
logger = logging.getLogger(__name__)
class SetFixedVersionInSupervisor(PreUpgradeHookBase):
"""Set fixed version in containers' supervisor configs.
Currently, containers' supervisor configs don't have the Fuel version
in the command line. It means that when supervisor tries to start a
container its version retrieved on fly from the '/etc/fuel/version.yaml'.
Since Fuel 6.1, the '/etc/fuel/version.yaml` has to be new, because
otherwise the 'host-upgrade.pp' will use incorrect current version.
Unfortunately, if '/etc/fuel/version.yaml' is new and the puppet
upgrades Docker package, the Docker containers will be stopped and
they won't up again because 'detecting container version on fly' will
give us wrong result (e.g. 6.1 instead of 6.0). So, the only thing
we can do is to set fixed container version in supervisor's configs,
so it won't rely on current state of '/etc/fuel/version.yaml'.
"""
#: this hook is required only for docker upgrade engine
enable_for_engines = [HostSystemUpgrader, DockerUpgrader]
#: a list of containers for which we have to change supervisor configs.
#: please note, it's better to have an explicit list, because user
#: may have custom supervisor confs and we don't want to touch them.
_containers = [
'astute',
'cobbler',
'keystone',
'mcollective',
'nailgun',
'nginx',
'ostf',
'postgres',
'rabbitmq',
'rsync',
'rsyslog',
]
def __init__(self, *args, **kwargs):
super(SetFixedVersionInSupervisor, self).__init__(*args, **kwargs)
#: a function that recieves input text, replace command string
#: and returns result
self._replace = functools.partial(
re.compile(r'command=dockerctl start (\w+) --attach').sub,
r'command=docker start -a fuel-core-{version}-\1'.format(
version=self.config.from_version))
def _set_version_in(self, confname):
with open(confname, 'rt', encoding='utf-8') as f:
data = self._replace(f.read())
with open(confname, 'wt', encoding='utf-8') as f:
f.write(data)
def check_if_required(self):
# should be applied if from_version < 6.1
return utils.compare_version(self.config.from_version, '6.1') > 0
def run(self):
for container in self._containers:
confname = '/etc/supervisord.d/{version}/{container}.conf'.format(
version=self.config.from_version,
container=container)
if os.path.exists(confname):
self._set_version_in(confname)
else:
logger.info('Could not find supervisor conf: "%s"', confname)
# apply updated configurations without actual restart
utils.safe_exec_cmd('supervisorctl update')

View File

@ -1,106 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import re
from fuel_upgrade.engines.host_system import HostSystemUpgrader
from fuel_upgrade.pre_upgrade_hooks.base import PreUpgradeHookBase
from fuel_upgrade.utils import compare_version
from fuel_upgrade.utils import exec_cmd_iterator
from fuel_upgrade.utils import safe_exec_cmd
class RecreateNailgunInPriveleged(PreUpgradeHookBase):
"""Recreate Nailgun container in priveleged mode.
Since Docker 0.11 all access to both /proc and /sys are restricted
and requires a privileged mode. Unfortunately, it affects us because
Nailgun container fails to execute the following line:
sysctl -w net.core.somaxconn=4096
So we have to recreate Nailgun container in privileged mode in order
to be compatible with both old and new Docker.
https://github.com/docker/docker/issues/5703
"""
#: this hook required only for updating docker package
enable_for_engines = [HostSystemUpgrader]
#: regexp that extracts version from 'docker --version' output
_docker_version = re.compile('Docker version ([0-9.]+)')
def __init__(self, *args, **kwargs):
super(RecreateNailgunInPriveleged, self).__init__(*args, **kwargs)
from_version = self.config.from_version
self._container = 'fuel-core-{0}-nailgun'.format(from_version)
self._image = 'fuel/nailgun_{0}'.format(from_version)
def check_if_required(self):
# not required if fuel is already higher than 6.1
if compare_version('6.1', self.config.from_version) > 0:
return False
# not required if container is in privileged mode already
container = json.loads('\n'.join(
exec_cmd_iterator('docker inspect {0}'.format(self._container))))
if container[0].get('HostConfig', {}).get('Privileged'):
return False
# not required if docker is already higher than 0.11
output = '\n'.join(exec_cmd_iterator('docker --version'))
match = self._docker_version.match(output)
if match:
version = match.group(1)
return compare_version('0.11.0', version) < 0
return False
def _stop_container(self):
safe_exec_cmd('docker stop {0}'.format(self._container))
def _destroy_container(self):
safe_exec_cmd('docker rm -f {0}'.format(self._container))
def _create_container(self):
command = ' '.join([
'docker run -d -t --privileged',
'-p {BIND_ADMIN}:8001:8001',
'-p {BIND_LOCAL}:8001:8001',
'-v /etc/nailgun',
'-v /var/log/docker-logs:/var/log',
'-v /var/www/nailgun:/var/www/nailgun:rw',
'-v /etc/yum.repos.d:/etc/yum.repos.d:rw',
'-v /etc/fuel:/etc/fuel:ro',
'-v /root/.ssh:/root/.ssh:ro',
'--name={CONTAINER}',
'{IMAGE}'])
command = command.format(
BIND_ADMIN=self.config.master_ip,
BIND_LOCAL='127.0.0.1',
CONTAINER=self._container,
IMAGE=self._image)
safe_exec_cmd(command)
def run(self):
self._stop_container()
self._destroy_container()
self._create_container()

View File

@ -1,91 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from fuel_upgrade.pre_upgrade_hooks.from_5_0_1_to_any_fix_host_system_repo \
import FixHostSystemRepoHook
from fuel_upgrade.pre_upgrade_hooks.from_5_0_to_any_add_credentials \
import AddCredentialsHook
from fuel_upgrade.pre_upgrade_hooks.from_5_0_to_any_fix_puppet_manifests \
import FixPuppetManifests
from fuel_upgrade.pre_upgrade_hooks.from_5_0_to_any_sync_dns \
import SyncDnsHook
from fuel_upgrade.pre_upgrade_hooks. \
from_5_0_x_to_any_copy_openstack_release_versions \
import CopyOpenstackReleaseVersions
from fuel_upgrade.pre_upgrade_hooks.from_5_1_to_any_add_keystone_credentials \
import AddKeystoneCredentialsHook
from fuel_upgrade.pre_upgrade_hooks.from_5_1_to_any_ln_fuelweb_x86_64 \
import AddFuelwebX8664LinkForUbuntu
from fuel_upgrade.pre_upgrade_hooks.from_6_0_to_any_add_dhcp_gateway \
import AddDhcpGateway
from fuel_upgrade.pre_upgrade_hooks.from_6_0_to_any_add_monitord_credentials \
import AddMonitordKeystoneCredentialsHook
from fuel_upgrade.pre_upgrade_hooks.from_6_0_to_any_copy_keys \
import MoveKeysHook
from fuel_upgrade.pre_upgrade_hooks.from_any_to_6_1_dhcrelay_conf \
import FixDhcrelayConf
from fuel_upgrade.pre_upgrade_hooks.from_any_to_6_1_dhcrelay_monitor \
import FixDhcrelayMonitor
from fuel_upgrade.pre_upgrade_hooks.from_any_to_6_1_fix_version_in_supervisor \
import SetFixedVersionInSupervisor
from fuel_upgrade.pre_upgrade_hooks.from_any_to_6_1_recreate_containers \
import RecreateNailgunInPriveleged
logger = logging.getLogger(__name__)
class PreUpgradeHookManager(object):
"""Runs hooks before upgrade if required
:param list upgraders: list of :class:`BaseUpgrader` implementations
:param config: :class:`Config` object
"""
#: List of hook clases
hook_list = [
AddCredentialsHook,
AddDhcpGateway,
AddFuelwebX8664LinkForUbuntu,
AddKeystoneCredentialsHook,
AddMonitordKeystoneCredentialsHook,
FixPuppetManifests,
FixHostSystemRepoHook,
SyncDnsHook,
CopyOpenstackReleaseVersions,
MoveKeysHook,
RecreateNailgunInPriveleged,
FixDhcrelayConf,
FixDhcrelayMonitor,
SetFixedVersionInSupervisor,
]
def __init__(self, upgraders, config):
#: Pre upgrade hook objects
self.pre_upgrade_hooks = [hook_class(upgraders, config)
for hook_class in self.hook_list]
def run(self):
"""Run hooks if required"""
for hook in self.pre_upgrade_hooks:
hook_name = hook.__class__.__name__
if hook.is_required:
logger.debug('Run pre upgarde hook %s', hook_name)
hook.run()
else:
logger.debug('Skip pre upgrade hook %s', hook_name)

View File

@ -1,33 +0,0 @@
#!/bin/bash
function setup_dhcrelay_for_cobbler {
VERSION=$(awk '/release/{gsub(/"/, "");print $2}' /etc/fuel/version.yaml)
container="fuel-core-${VERSION}-cobbler"
cobbler_ip=$(docker inspect --format='{{.NetworkSettings.IPAddress}}' $container)
admin_interface=$(grep interface: /etc/fuel/astute.yaml | cut -d':' -f2 | tr -d ' ')
cat > /etc/sysconfig/dhcrelay << EOF
# Command line options here
DHCRELAYARGS=""
# DHCPv4 only
INTERFACES="$admin_interface docker0"
# DHCPv4 only
DHCPSERVERS="$cobbler_ip"
EOF
rpm -q dhcp 2>&1 > /dev/null || yum --quiet -y install dhcp
chkconfig dhcrelay on
service dhcrelay restart
}
if ! grep -q docker0 /proc/net/dev; then
sleep 10
fi
setup_dhcrelay_for_cobbler
( ps -p "`cat /var/run/dhcrelay.pid 2>/dev/null`" &>/dev/null || /etc/init.d/dhcrelay start )
if [ $? -eq 0 ]; then
pid=$(cat /var/run/dhcrelay.pid)
while test -d "/proc/$pid/fd"; do
sleep 30
done
fi

View File

@ -1,5 +0,0 @@
[${name}]
name=${name}
baseurl=${baseurl}
gpgcheck=${gpgcheck}
skip_if_unavailable=${skip_if_unavailable}

View File

@ -1,17 +0,0 @@
[program:${service_name}]
command=${command}
process_name=%(program_name)s
numprocs=1
numprocs_start=0
autostart=${autostart}
autorestart=true
startsecs=10
startretries=1000000
exitcodes=0,2
stopsignal=INT
stopwaitsecs=60
redirect_stderr=true
stdout_logfile=${log_path}
stdout_capture_maxbytes=0
stdout_events_enabled=false
serverurl=AUTO

View File

@ -1,15 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,102 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from unittest.case import TestCase
except ImportError:
# Required for python 2.6
from unittest2.case import TestCase
from StringIO import StringIO
import mock
import requests
from fuel_upgrade import config
class FakeFile(StringIO):
"""Context manager that fakes a file with StringIO object
NOTE(eli): We cannot use mock_open from mock library
here, because it hangs when we use 'with' statement,
and when we want to read file by chunks.
"""
def __enter__(self):
return self
def __exit__(self, *args):
pass
class BaseTestCase(TestCase):
"""Base class for test cases"""
def method_was_not_called(self, method):
"""Checks that mocked method was not called"""
self.assertEqual(method.call_count, 0)
def called_once(self, method):
"""Checks that mocked method was called once"""
self.assertEqual(method.call_count, 1)
def called_times(self, method, count):
"""Checks that mocked method was called `count` times"""
self.assertEqual(method.call_count, count)
@property
@mock.patch('fuel_upgrade.config.glob.glob', return_value=['2014.1.1-5.1'])
@mock.patch('fuel_upgrade.config.get_version_from_config',
side_effect=['0', '9999'])
@mock.patch('fuel_upgrade.config.from_fuel_version', return_value='0')
@mock.patch('fuel_upgrade.config.read_yaml_config',
return_value={'ADMIN_NETWORK': {'ipaddress': '0.0.0.0'}})
def fake_config(self, _, __, ___, ____):
update_path = '/tmp/upgrade_path'
admin_password = 'admin'
conf = config.build_config(update_path, admin_password)
conf.astute = {
'ADMIN_NETWORK': {
'ipaddress': '0.0.0.0'
}
}
return conf
def mock_open(self, text, filename='some.yaml'):
"""Mocks builtin open function.
Usage example:
with mock.patch(
'__builtin__.open',
self.mock_open('file content')
):
# call some methods that are used open() to read some
# stuff internally
"""
fileobj = FakeFile(text)
setattr(fileobj, 'name', filename)
return mock.MagicMock(return_value=fileobj)
def mock_requests_response(self, status_code, body):
"""Creates a response object with custom status code and body."""
rv = requests.Response()
rv.status_code = status_code
rv.encoding = 'utf-8'
rv.raw = FakeFile(body)
return rv

View File

@ -1,229 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import requests
import six
from fuel_upgrade.before_upgrade_checker import CheckFreeSpace
from fuel_upgrade.before_upgrade_checker import CheckNoRunningOstf
from fuel_upgrade.before_upgrade_checker import CheckNoRunningTasks
from fuel_upgrade.before_upgrade_checker import CheckRequiredVersion
from fuel_upgrade.before_upgrade_checker import CheckUpgradeVersions
from fuel_upgrade import errors
from fuel_upgrade.tests.base import BaseTestCase
class TestCheckNoRunningTasks(BaseTestCase):
def setUp(self):
config = mock.MagicMock()
config.endpoints = {
'nginx_nailgun': {'host': '127.0.0.1', 'port': 1234}}
self.config = config
@mock.patch('fuel_upgrade.before_upgrade_checker.NailgunClient.get_tasks',
return_value=[{
'status': 'running', 'id': 'id',
'cluster': 123, 'name': 'task_name'}])
def test_check_raises_error(self, get_tasks_mock):
checker = CheckNoRunningTasks(self.config)
self.assertRaisesRegexp(
errors.CannotRunUpgrade,
'Cannot run upgrade, tasks are running: '
'id=id cluster=123 name=task_name',
checker.check)
self.called_once(get_tasks_mock)
@mock.patch('fuel_upgrade.before_upgrade_checker.NailgunClient.get_tasks',
return_value=[{
'status': 'ready', 'id': 'id',
'cluster': 123, 'name': 'task_name'}])
def test_check_upgrade_opportunity_does_not_raise_error(
self, get_tasks_mock):
checker = CheckNoRunningTasks(self.config)
checker.check()
self.called_once(get_tasks_mock)
@mock.patch('fuel_upgrade.before_upgrade_checker.NailgunClient.get_tasks',
side_effect=requests.ConnectionError(''))
def test_check_raises_error_if_nailgun_is_not_running(
self, get_tasks_mock):
checker = CheckNoRunningTasks(self.config)
self.assertRaisesRegexp(
errors.NailgunIsNotRunningError,
'Cannot connect to rest api service',
checker.check)
self.called_once(get_tasks_mock)
class TestCheckNoRunningOstf(BaseTestCase):
def setUp(self):
config = mock.MagicMock()
config.endpoints = {'ostf': {'host': '127.0.0.1', 'port': 1234}}
self.checker = CheckNoRunningOstf(config)
@mock.patch('fuel_upgrade.before_upgrade_checker.OSTFClient.get_tasks',
return_value=[{'status': 'running'}])
def test_check_raises_error(self, get_mock):
self.assertRaisesRegexp(
errors.CannotRunUpgrade,
'Cannot run upgrade since there are OSTF running tasks.',
self.checker.check)
self.called_once(get_mock)
@mock.patch('fuel_upgrade.before_upgrade_checker.OSTFClient.get_tasks',
return_value=[{'status': 'finished'}])
def test_check_upgrade_opportunity_does_not_raise_error(self, get_mock):
self.checker.check()
self.called_once(get_mock)
@mock.patch('fuel_upgrade.before_upgrade_checker.OSTFClient.get_tasks',
side_effect=requests.ConnectionError(''))
def test_check_raises_error_if_ostf_is_not_running(self, get_mock):
self.assertRaisesRegexp(
errors.OstfIsNotRunningError,
'Cannot connect to OSTF service.',
self.checker.check)
self.called_once(get_mock)
@mock.patch('fuel_upgrade.before_upgrade_checker.utils.find_mount_point',
side_effect=['/var', '/var', '/etc'])
class TestCheckFreeSpace(BaseTestCase):
def setUp(self):
context = mock.MagicMock()
context.required_free_spaces = [
{'/var/lib/docker': 10},
{'/etc/fuel': 10, '/vat/www': 10},
None]
self.context = context
@mock.patch('fuel_upgrade.before_upgrade_checker.utils.'
'calculate_free_space', return_value=100)
def test_check(self, calculate_free_space_mock, find_mount_point_mock):
checker = CheckFreeSpace(self.context)
checker.check()
self.called_times(find_mount_point_mock, 3)
self.called_times(calculate_free_space_mock, 2)
@mock.patch('fuel_upgrade.before_upgrade_checker.utils.'
'calculate_free_space', return_value=9)
def test_check_raises_errors(
self, calculate_free_space_mock, find_mount_point_mock):
checker = CheckFreeSpace(self.context)
err_msg = "Not enough free space on device: " +\
"device /etc (required 10MB, available 9MB, not enough 1MB), " +\
"device /var (required 20MB, available 9MB, not enough 11MB)"
with self.assertRaises(errors.NotEnoughFreeSpaceOnDeviceError) as exc:
checker.check()
self.assertEqual(str(exc.exception), err_msg)
self.called_times(find_mount_point_mock, 3)
self.called_times(calculate_free_space_mock, 2)
@mock.patch('fuel_upgrade.before_upgrade_checker.utils.'
'calculate_free_space')
def test_space_required_for_mount_points(
self, calculate_free_space_mock, find_mount_point_mock):
checker = CheckFreeSpace(self.context)
mount_points = checker.space_required_for_mount_points()
self.assertEqual(mount_points, {'/etc': 10, '/var': 20})
@mock.patch('fuel_upgrade.before_upgrade_checker.utils.'
'calculate_free_space', return_value=9)
def test_list_of_error_mount_points(
self, calculate_free_space_mock, find_mount_point_mock):
checker = CheckFreeSpace(self.context)
error_mount_points = checker.list_of_error_mount_points({
'/etc': 100, '/var': 2})
self.assertEqual(
error_mount_points,
[{'available': 9, 'path': '/etc', 'size': 100}])
class TestCheckUpgradeVersions(BaseTestCase):
def setUp(self):
context = mock.MagicMock(config=self.fake_config)
self.checker = CheckUpgradeVersions(context)
@mock.patch(
'fuel_upgrade.before_upgrade_checker.utils.compare_version',
return_value=1)
def test_check(self, compare_mock):
self.checker.check()
compare_mock.assert_called_once_with('0', '9999')
@mock.patch(
'fuel_upgrade.before_upgrade_checker.utils.compare_version',
return_value=0)
def test_check_same_version_error(self, compare_mock):
err_msg = 'Cannot upgrade to the same version of fuel 0 -> 9999'
self.assertRaisesRegexp(
errors.WrongVersionError,
err_msg,
self.checker.check)
compare_mock.assert_called_once_with('0', '9999')
@mock.patch(
'fuel_upgrade.before_upgrade_checker.utils.compare_version',
return_value=-1)
def test_check_higher_version_error(self, compare_mock):
err_msg = 'Cannot upgrade from higher version of ' +\
'fuel to lower 0 -> 9999'
self.assertRaisesRegexp(
errors.WrongVersionError,
err_msg,
self.checker.check)
compare_mock.assert_called_once_with('0', '9999')
class TestCheckRequiredVersions(BaseTestCase):
def get_checker(self, user_conf={}):
config = self.fake_config
for key, value in six.iteritems(user_conf):
setattr(config, key, value)
return CheckRequiredVersion(mock.Mock(config=config))
def test_check_support_version(self):
checker = self.get_checker({
'from_version': '5.1.1',
'can_upgrade_from': ['5.1.1']})
checker.check()
def test_check_unsupport_version(self):
checker = self.get_checker({
'from_version': '5.1',
'can_upgrade_from': ['5.1.1']})
self.assertRaises(errors.WrongVersionError, checker.check)

View File

@ -1,86 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from fuel_upgrade.checker_manager import CheckerManager
from fuel_upgrade.tests.base import BaseTestCase
class TestCheckerManager(BaseTestCase):
def setUp(self):
self.config = self.fake_config
class Upgrader1(mock.MagicMock):
pass
class Upgrader2(mock.MagicMock):
pass
class Upgrader3(mock.MagicMock):
pass
class Checker1(mock.MagicMock):
pass
class Checker2(mock.MagicMock):
pass
class Checker3(mock.MagicMock):
pass
self.checker_classes = [Checker1, Checker2, Checker3]
self.checker_mapping = {
Upgrader1: [self.checker_classes[0], self.checker_classes[1]],
Upgrader2: [self.checker_classes[0], self.checker_classes[2]],
Upgrader3: []}
self.upgraders = [Upgrader1(), Upgrader2(), Upgrader3()]
self.required_free_space_mocks = []
# Mock property
for upgarde in self.upgraders:
required_free_space_mock = mock.PropertyMock()
type(upgarde).required_free_space = required_free_space_mock
self.required_free_space_mocks.append(required_free_space_mock)
self.checker_manager = CheckerManager(self.upgraders, self.config)
def test_init(self):
self.checker_manager.check()
for required_free_space_mock in self.required_free_space_mocks:
self.called_once(required_free_space_mock)
def test_check(self):
checkers = [c() for c in self.checker_classes]
with mock.patch('fuel_upgrade.checker_manager.'
'CheckerManager._checkers',
return_value=checkers):
self.checker_manager.check()
for checker in checkers:
self.called_once(checker.check)
def test_checkers(self):
with mock.patch(
'fuel_upgrade.checker_manager.'
'CheckerManager.CHECKERS_MAPPING',
new_callable=mock.PropertyMock(
return_value=self.checker_mapping)):
checekrs = self.checker_manager._checkers()
self.assertEqual(len(checekrs), 3)

View File

@ -1,96 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from fuel_upgrade import errors
from fuel_upgrade import messages
from fuel_upgrade.cli import parse_args
from fuel_upgrade.cli import run_upgrade
from fuel_upgrade.tests.base import BaseTestCase
@mock.patch('fuel_upgrade.engines.host_system.SupervisorClient', mock.Mock())
@mock.patch('fuel_upgrade.cli.CheckerManager', mock.Mock())
@mock.patch('fuel_upgrade.cli.PreUpgradeHookManager', mock.Mock())
@mock.patch('fuel_upgrade.cli.UpgradeManager', mock.Mock())
@mock.patch('fuel_upgrade.cli.build_config')
class TestAdminPassword(BaseTestCase):
default_args = ['host-system', '--src', '/path']
def get_args(self, args):
return parse_args(args)
def test_use_password_arg(self, mbuild_config):
password = '12345678'
args = self.get_args(self.default_args + ['--password', password])
run_upgrade(args)
mbuild_config.assert_called_once_with(
mock.ANY, password
)
@mock.patch('fuel_upgrade.cli.getpass')
def test_ask_for_password(self, mgetpass, mbuild_config):
password = '987654321'
mgetpass.getpass.return_value = password
args = self.get_args(self.default_args)
run_upgrade(args)
mbuild_config.assert_called_once_with(
mock.ANY, password
)
@mock.patch('fuel_upgrade.cli.getpass')
def test_no_password_provided(self, mgetpass, mbuild_config):
password = ''
mgetpass.getpass.return_value = password
with self.assertRaisesRegexp(errors.CommandError,
messages.no_password_provided):
args = self.get_args(self.default_args)
run_upgrade(args)
class TestArgumentsParser(BaseTestCase):
default_args = ['--src', '/path']
def test_parse_list_of_systems(self):
systems = ['host-system', 'docker']
args = parse_args(systems + self.default_args)
self.assertEqual(systems, args.systems)
@mock.patch('argparse.ArgumentParser.error')
def test_error_if_systems_have_duplicates(self, error_mock):
parse_args(
['host-system', 'docker', 'openstack', 'openstack', 'docker'] +
self.default_args
)
self.assertEqual(1, error_mock.call_count)
self.assertEqual(1, len(error_mock.call_args[0]))
self.assertIn('"docker, openstack"', error_mock.call_args[0][0])
@mock.patch('argparse.ArgumentParser.error')
def test_error_if_systems_are_incompatible(self, error_mock):
parse_args(
['docker', 'docker-init'] + self.default_args
)
self.assertEqual(1, error_mock.call_count)
self.assertEqual(1, len(error_mock.call_args[0]))
self.assertIn('"docker-init, docker"', error_mock.call_args[0][0])

View File

@ -1,435 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from fuel_upgrade.engines.docker_engine import DockerUpgrader
from fuel_upgrade import errors
from fuel_upgrade.tests.base import BaseTestCase
class TestDockerUpgrader(BaseTestCase):
def setUp(self):
# NOTE (eli): mocking doesn't work correctly
# when we try to patch docker client with
# class decorator, it's the reason why
# we have to do it explicitly
self.docker_patcher = mock.patch(
'fuel_upgrade.engines.docker_engine.docker.Client')
self.docker_mock_class = self.docker_patcher.start()
self.docker_mock = mock.MagicMock()
self.docker_mock_class.return_value = self.docker_mock
self.supervisor_patcher = mock.patch(
'fuel_upgrade.engines.docker_engine.SupervisorClient')
self.supervisor_class = self.supervisor_patcher.start()
self.supervisor_mock = mock.MagicMock()
self.supervisor_class.return_value = self.supervisor_mock
with mock.patch('fuel_upgrade.engines.docker_engine.utils'):
self.upgrader = DockerUpgrader(self.fake_config)
self.upgrader.upgrade_verifier = mock.MagicMock()
self.pg_dump_path = '/var/lib/fuel_upgrade/9999/pg_dump_all.sql'
def tearDown(self):
self.docker_patcher.stop()
self.supervisor_patcher.stop()
def mock_methods(self, obj, methods):
for method in methods:
setattr(obj, method, mock.MagicMock())
def test_upgrade(self):
mocked_methods = [
'stop_fuel_containers',
'save_db',
'save_cobbler_configs',
'upload_images',
'create_and_start_new_containers',
'generate_configs',
'switch_to_new_configs']
self.mock_methods(self.upgrader, mocked_methods)
self.upgrader.upgrade()
self.assertEqual(
self.upgrader.generate_configs.call_args_list,
[mock.call(autostart=True)])
self.called_once(self.upgrader.stop_fuel_containers)
self.assertEqual(self.supervisor_mock.restart_and_wait.call_count, 2)
self.called_once(self.upgrader.upgrade_verifier.verify)
def test_rollback(self):
self.upgrader.stop_fuel_containers = mock.MagicMock()
self.upgrader.rollback()
self.called_times(self.upgrader.stop_fuel_containers, 1)
self.called_once(self.supervisor_mock.switch_to_previous_configs)
self.called_once(self.supervisor_mock.stop_all_services)
self.called_once(self.supervisor_mock.restart_and_wait)
self.called_once(self.supervisor_mock.remove_new_configs)
def test_stop_fuel_containers(self):
non_fuel_images = [
'first_image_1.0', 'second_image_2.0', 'third_image_2.0']
fuel_images = [
'fuel/image_1.0', 'fuel/image_2.0']
all_images = [{'Image': v, 'Id': i}
for i, v in enumerate(non_fuel_images + fuel_images)]
ports = [1, 2, 3]
self.upgrader._get_docker_container_public_ports = mock.MagicMock(
return_value=ports)
self.docker_mock.containers.return_value = all_images
self.upgrader.stop_fuel_containers()
self.assertEqual(
self.docker_mock.stop.call_args_list,
[mock.call(3, 20), mock.call(4, 20)])
@mock.patch('fuel_upgrade.engines.docker_engine.utils.exec_cmd')
@mock.patch('fuel_upgrade.engines.docker_engine.os.path.exists',
return_value=True)
def test_upload_images(self, _, exec_mock):
self.upgrader.upload_images()
exec_mock.assert_call_with(
'docker load -i "/tmp/upgrade_path/images/fuel-images.tar"')
def test_create_containers(self):
fake_containers = [
{'id': 'id1',
'container_name': 'name1',
'image_name': 'i_name1',
'volumes_from': ['id2']},
{'id': 'id2',
'image_name': 'i_name2',
'container_name': 'name2',
'after_container_creation_command': 'cmd',
'supervisor_config': True}]
self.upgrader.new_release_containers = fake_containers
def mocked_create_container(*args, **kwargs):
"""Return name of the container"""
return kwargs['name']
self.upgrader.create_container = mock.MagicMock(
side_effect=mocked_create_container)
self.upgrader.start_container = mock.MagicMock()
self.upgrader.run_after_container_creation_command = mock.MagicMock()
self.upgrader.create_and_start_new_containers()
create_container_calls = [
mock.call('i_name2', detach=False, ports=None,
volumes=None, name='name2'),
mock.call('i_name1', detach=False, ports=None,
volumes=None, name='name1')]
start_container_calls = [
mock.call('name2', volumes_from=[],
binds=None, port_bindings=None,
privileged=False, links=[],
network_mode=None),
mock.call('name1', volumes_from=['name2'],
binds=None, port_bindings=None,
privileged=False, links=[],
network_mode=None)]
self.assertEqual(
self.upgrader.create_container.call_args_list,
create_container_calls)
self.assertEqual(
self.upgrader.start_container.call_args_list,
start_container_calls)
self.called_once(self.upgrader.run_after_container_creation_command)
def test_run_after_container_creation_command(self):
self.upgrader.exec_with_retries = mock.MagicMock()
self.upgrader.run_after_container_creation_command({
'after_container_creation_command': 'cmd',
'container_name': 'name'})
args, kwargs = self.upgrader.exec_with_retries.call_args
self.assertEqual(args[1], errors.ExecutedErrorNonZeroExitCode)
self.assertEqual(kwargs, {'retries': 30, 'interval': 4})
def test_create_container(self):
self.upgrader.create_container(
'image_name', param1=1, param2=2, ports=[1234])
self.docker_mock.create_container.assert_called_once_with(
'image_name', param2=2, param1=1, ports=[1234])
def test_start_container(self):
self.upgrader.start_container(
{'Id': 'container_id'}, param1=1, param2=2)
self.docker_mock.start.assert_called_once_with(
'container_id', param2=2, param1=1)
def test_build_dependencies_graph(self):
containers = [
{'id': '1', 'volumes_from': ['2'], 'links': [{'id': '3'}]},
{'id': '2', 'volumes_from': [], 'links': []},
{'id': '3', 'volumes_from': [], 'links': [{'id': '2'}]}]
actual_graph = self.upgrader.build_dependencies_graph(containers)
expected_graph = {
'1': ['2', '3'],
'2': [],
'3': ['2']}
self.assertEqual(actual_graph, expected_graph)
def test_get_container_links(self):
fake_containers = [
{'id': 'id1', 'container_name': 'container_name1',
'links': [{'id': 'id2', 'alias': 'alias2'}]},
{'id': 'id2', 'container_name': 'container_name2'}]
self.upgrader.new_release_containers = fake_containers
links = self.upgrader.get_container_links(fake_containers[0])
self.assertEqual(links, [('container_name2', 'alias2')])
def test_get_ports(self):
ports = self.upgrader.get_ports({'ports': [[53, 'udp'], 100]})
self.assertEqual([(53, 'udp'), 100], ports)
def test_generate_configs(self):
fake_containers = [
{'id': 'id1', 'container_name': 'container_name1',
'supervisor_config': False},
{'id': 'id2', 'container_name': 'container_name2',
'supervisor_config': True}]
self.upgrader.new_release_containers = fake_containers
self.upgrader.generate_configs()
self.supervisor_mock.generate_configs.assert_called_once_with(
[{'config_name': 'id2',
'service_name': 'docker-id2',
'command': 'docker start -a container_name2',
'autostart': True}])
def test_switch_to_new_configs(self):
self.upgrader.switch_to_new_configs()
self.supervisor_mock.switch_to_new_configs.assert_called_once_with()
@mock.patch('fuel_upgrade.engines.docker_engine.utils.exec_cmd')
def test_exec_cmd_in_container(self, exec_cmd_mock):
name = 'container_name'
cmd = 'some command'
self.upgrader.container_docker_id = mock.MagicMock(return_value=name)
self.upgrader.exec_cmd_in_container(name, cmd)
self.called_once(self.upgrader.container_docker_id)
exec_cmd_mock.assert_called_once_with(
"dockerctl shell {0} {1}".format(name, cmd))
@mock.patch('fuel_upgrade.engines.docker_engine.'
'utils.exec_cmd')
@mock.patch('fuel_upgrade.engines.docker_engine.'
'DockerUpgrader.verify_cobbler_configs')
def test_save_cobbler_configs(self, verify_mock, exec_cmd_mock):
self.upgrader.save_cobbler_configs()
exec_cmd_mock.assert_called_once_with(
'docker cp fuel-core-0-cobbler:/var/lib/cobbler/config '
'/var/lib/fuel_upgrade/9999/cobbler_configs')
self.called_once(verify_mock)
@mock.patch('fuel_upgrade.engines.docker_engine.utils.rmtree')
@mock.patch('fuel_upgrade.engines.docker_engine.utils.exec_cmd',
side_effect=errors.ExecutedErrorNonZeroExitCode())
def test_save_cobbler_configs_removes_dir_in_case_of_error(
self, exec_cmd_mock, rm_mock):
with self.assertRaises(errors.ExecutedErrorNonZeroExitCode):
self.upgrader.save_cobbler_configs()
cobbler_config_path = '/var/lib/fuel_upgrade/9999/cobbler_configs'
exec_cmd_mock.assert_called_once_with(
'docker cp fuel-core-0-cobbler:/var/lib/cobbler/config '
'{0}'.format(cobbler_config_path))
rm_mock.assert_called_once_with(cobbler_config_path)
@mock.patch('fuel_upgrade.engines.docker_engine.glob.glob',
return_value=['1.json'])
@mock.patch('fuel_upgrade.engines.docker_engine.utils.'
'check_file_is_valid_json')
def test_verify_cobbler_configs(self, json_checker_mock, glob_mock):
self.upgrader.verify_cobbler_configs()
glob_mock.assert_called_once_with(
'/var/lib/fuel_upgrade/9999/'
'cobbler_configs/config/systems.d/*.json')
json_checker_mock.assert_called_once_with('1.json')
@mock.patch('fuel_upgrade.engines.docker_engine.glob.glob',
return_value=[])
def test_verify_cobbler_configs_raises_error_if_not_enough_systems(
self, glob_mock):
with self.assertRaises(errors.WrongCobblerConfigsError):
self.upgrader.verify_cobbler_configs()
self.called_once(glob_mock)
@mock.patch('fuel_upgrade.engines.docker_engine.glob.glob',
return_value=['1.json'])
@mock.patch('fuel_upgrade.engines.docker_engine.utils.'
'check_file_is_valid_json', return_value=False)
def test_verify_cobbler_configs_raises_error_if_invalid_file(
self, json_checker_mock, glob_mock):
with self.assertRaises(errors.WrongCobblerConfigsError):
self.upgrader.verify_cobbler_configs()
self.called_once(glob_mock)
self.called_once(json_checker_mock)
def test_get_docker_container_public_ports(self):
docker_ports_mapping = [
{'Ports': [
{'PublicPort': 514},
{'PublicPort': 515}]},
{'Ports': [
{'PublicPort': 516},
{'PublicPort': 517}]}]
self.assertEquals(
[514, 515, 516, 517],
self.upgrader._get_docker_container_public_ports(
docker_ports_mapping))
@mock.patch('fuel_upgrade.engines.docker_engine.utils.files_size',
return_value=5)
def test_required_free_space(self, _):
self.assertEqual(
self.upgrader.required_free_space,
{'/var/lib/fuel_upgrade/9999': 150,
'/var/lib/docker': 5,
'/etc/fuel/': 10,
'/etc/supervisord.d/': 10})
@mock.patch('fuel_upgrade.engines.docker_engine.utils')
def test_save_db_succeed(self, mock_utils):
with mock.patch('fuel_upgrade.engines.docker_engine.'
'utils.VersionedFile') as version_mock:
version_mock.return_value.next_file_name.return_value = 'file3'
version_mock.return_value.sorted_files.return_value = [
'file3', 'file2', 'file1']
self.upgrader.save_db()
self.called_once(mock_utils.wait_for_true)
mock_utils.hardlink.assert_called_once_with(
'file3',
'/var/lib/fuel_upgrade/9999/pg_dump_all.sql',
overwrite=True)
@mock.patch('fuel_upgrade.engines.docker_engine.utils')
def test_save_db_error_first_dump_is_invalid(self, mock_utils):
with mock.patch('fuel_upgrade.engines.docker_engine.'
'utils.VersionedFile') as version_mock:
version_mock.return_value.filter_files.return_value = []
self.assertRaises(errors.DatabaseDumpError, self.upgrader.save_db)
self.method_was_not_called(mock_utils.hardlink)
@mock.patch('fuel_upgrade.engines.docker_engine.utils')
def test_save_db_removes_old_dump_files(self, mock_utils):
mock_utils.file_exists.return_value = True
with mock.patch('fuel_upgrade.engines.docker_engine.'
'utils.VersionedFile') as version_mock:
version_mock.return_value.sorted_files.return_value = [
'file1', 'file2', 'file3', 'file4', 'file5']
self.upgrader.save_db()
self.assertEqual(
mock_utils.remove_if_exists.call_args_list,
[mock.call('file4'), mock.call('file5')])
@mock.patch('fuel_upgrade.engines.docker_engine.'
'DockerUpgrader.exec_cmd_in_container')
@mock.patch('fuel_upgrade.engines.docker_engine.utils')
def test_make_pg_dump_succeed(self, mock_utils, exec_mock):
self.assertTrue(
self.upgrader.make_pg_dump('tmp_path', self.pg_dump_path))
self.method_was_not_called(mock_utils.file_exists)
self.method_was_not_called(mock_utils.remove_if_exists)
exec_mock.assert_called_once_with(
'fuel-core-0-postgres',
"su postgres -c 'pg_dumpall --clean' > tmp_path")
@mock.patch('fuel_upgrade.engines.docker_engine.'
'DockerUpgrader.exec_cmd_in_container',
side_effect=errors.ExecutedErrorNonZeroExitCode())
@mock.patch('fuel_upgrade.engines.docker_engine.utils')
def test_make_pg_dump_error_failed_to_execute_dump_command(
self, mock_utils, _):
mock_utils.file_exists.return_value = False
self.assertFalse(
self.upgrader.make_pg_dump('tmp_path', self.pg_dump_path))
mock_utils.file_exists.assert_called_once_with(self.pg_dump_path)
mock_utils.remove_if_exists.assert_called_once_with('tmp_path')
@mock.patch('fuel_upgrade.engines.docker_engine.'
'DockerUpgrader.exec_cmd_in_container',
side_effect=errors.CannotFindContainerError())
@mock.patch('fuel_upgrade.engines.docker_engine.utils')
def test_make_pg_dump_error_failed_because_of_stopped_container(
self, mock_utils, exec_cmd_mock):
mock_utils.file_exists.return_value = False
self.assertFalse(
self.upgrader.make_pg_dump('tmp_path', self.pg_dump_path))
mock_utils.file_exists.assert_called_once_with(self.pg_dump_path)
mock_utils.remove_if_exists.assert_called_once_with('tmp_path')
@mock.patch('fuel_upgrade.engines.docker_engine.'
'DockerUpgrader.exec_cmd_in_container',
side_effect=errors.ExecutedErrorNonZeroExitCode())
@mock.patch('fuel_upgrade.engines.docker_engine.utils')
def test_make_pg_dump_second_run_failed_to_execute_dump_command(
self, mock_utils, exec_cmd_mock):
mock_utils.file_exists.return_value = True
with mock.patch('fuel_upgrade.engines.docker_engine.'
'utils.VersionedFile') as version_mock:
version_mock.return_value.sorted_files.return_value = [
'file1', 'file2']
self.assertTrue(
self.upgrader.make_pg_dump('tmp_path', self.pg_dump_path))
mock_utils.file_exists.assert_called_once_with(self.pg_dump_path)
self.called_once(mock_utils.remove_if_exists)
@mock.patch('fuel_upgrade.engines.docker_engine.'
'DockerUpgrader.exec_cmd_in_container',
side_effect=errors.CannotFindContainerError())
@mock.patch('fuel_upgrade.engines.docker_engine.utils')
def test_make_pg_dump_second_run_failed_because_of_stopped_container(
self, mock_utils, _):
mock_utils.file_exists.return_value = True
with mock.patch('fuel_upgrade.engines.docker_engine.'
'utils.VersionedFile') as version_mock:
version_mock.return_value.sorted_files.return_value = ['file1']
self.assertTrue(
self.upgrader.make_pg_dump('tmp_path', self.pg_dump_path))

View File

@ -1,406 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import requests
import socket
from fuel_upgrade.health_checker import BaseChecker
from fuel_upgrade.health_checker import FuelUpgradeVerify
from fuel_upgrade.tests.base import BaseTestCase
from fuel_upgrade import health_checker
from fuel_upgrade import errors
class TestBaseChecker(BaseTestCase):
def setUp(self):
class BaseCheckerImplementation(BaseChecker):
@property
def checker_name(self):
return 'base_checker'
def check(self):
pass
self.base_checker = BaseCheckerImplementation(None)
def make_get_request(
self, url='http://some_url.html',
auth=('some_user', 'some_password')):
result = self.base_checker.safe_get(url, auth=auth)
return result
@mock.patch('fuel_upgrade.health_checker.requests.get')
def test_safe_get_request_succeed(self, requests_get):
json_resp = {'attr': 'value'}
result = mock.MagicMock()
result.json.return_value = json_resp
result.status_code = 200
requests_get.return_value = result
params = {'url': 'http://url', 'auth': ('user', 'password')}
resp = self.make_get_request(**params)
requests_get.assert_called_once_with(
params['url'],
auth=params['auth'],
timeout=0.5)
self.assertEquals(resp['body'], json_resp)
self.assertEquals(resp['code'], 200)
@mock.patch('fuel_upgrade.health_checker.requests.get')
def test_safe_get_exception_raised(self, requests_get):
exceptions = [
requests.exceptions.ConnectionError(),
requests.exceptions.Timeout(),
requests.exceptions.HTTPError(),
ValueError(),
socket.timeout()]
for exception in exceptions:
requests_get.side_effect = exception
resp = self.make_get_request()
self.assertEquals(resp, None)
@mock.patch('fuel_upgrade.health_checker.requests.get')
def test_safe_get_non_json_response(self, requests_get):
result_txt = 'Text result'
result = mock.MagicMock()
result.json.side_effect = ValueError()
result.status_code = 400
result.text = result_txt
requests_get.return_value = result
resp = self.make_get_request()
self.assertEquals(resp['body'], result_txt)
self.assertEquals(resp['code'], 400)
def mock_socket_obj(self, socket_mock):
socket_obj = mock.MagicMock()
socket_mock.return_value = socket_obj
return socket_obj
@mock.patch('fuel_upgrade.health_checker.socket.socket')
def test_check_if_port_open_success(self, socket_mock):
ip = '127.0.0.1'
port = 1234
socket_obj = self.mock_socket_obj(socket_mock)
socket_obj.connect_ex.return_value = 0
result = self.base_checker.check_if_port_open(ip, port)
socket_obj.settimeout.assert_called_once_with(0.5)
socket_mock.assert_called_once_with(socket.AF_INET, socket.SOCK_STREAM)
self.assertEquals(result, True)
@mock.patch('fuel_upgrade.health_checker.socket.socket')
def test_check_if_port_open_fail(self, socket_mock):
socket_obj = self.mock_socket_obj(socket_mock)
socket_obj.connect_ex.return_value = 1
result = self.base_checker.check_if_port_open('127.0.0.1', 90)
self.assertEquals(result, False)
@mock.patch('fuel_upgrade.health_checker.socket.socket')
def test_check_if_port_open_timeout_exception(self, socket_mock):
socket_obj = self.mock_socket_obj(socket_mock)
socket_obj.connect_ex.side_effect = socket.timeout()
result = self.base_checker.check_if_port_open('127.0.0.1', 90)
self.assertEquals(result, False)
@mock.patch('fuel_upgrade.health_checker.xmlrpclib.ServerProxy')
def test_get_xmlrpc(self, xmlrpc_mock):
server_mock = mock.MagicMock()
xmlrpc_mock.return_value = server_mock
url = 'http://127.0.0.1'
result = self.base_checker.get_xmlrpc(url)
xmlrpc_mock.assert_called_once_with(url)
self.assertEquals(result, server_mock)
@mock.patch('fuel_upgrade.health_checker.xmlrpclib.ServerProxy')
def test_get_xmlrpc_connection_error(self, xmlrpc_mock):
xmlrpc_mock.side_effect = socket.timeout()
url = 'http://127.0.0.1'
result = self.base_checker.get_xmlrpc(url)
xmlrpc_mock.assert_called_once_with(url)
self.assertEquals(result, None)
class TestFuelUpgradeVerify(BaseTestCase):
def setUp(self):
self.config = {'timeout': 1, 'interval': 2}
self.mock_config = mock.MagicMock()
self.mock_config.checker.return_value = self.config
self.checkers = [mock.MagicMock(checker_name=1),
mock.MagicMock(checker_name=2)]
self.verifier = FuelUpgradeVerify(
self.mock_config, checkers=self.checkers)
def checkers_returns(self, return_value):
for checker in self.checkers:
checker.check.return_value = return_value # flake8: noqa
@mock.patch('fuel_upgrade.health_checker.utils.wait_for_true')
def test_verify(self, wait_for_true_mock):
self.checkers[0].check.return_value = False
wait_for_true_mock.side_effect = errors.TimeoutError()
with self.assertRaisesRegexp(
errors.UpgradeVerificationError,
'Failed to run services \[1\]'):
self.verifier.verify()
def test_check_if_all_services_ready_returns_true(self):
self.checkers_returns(True)
result = self.verifier.check_if_all_services_ready()
self.assertEquals(result, True)
def test_check_if_all_services_ready_returns_false(self):
self.checkers_returns(False)
result = self.verifier.check_if_all_services_ready()
self.assertEquals(result, False)
class TestCheckers(BaseTestCase):
safe_get_sucess = {'body': {'a': 3}, 'code': 200}
def assert_checker_false(self, checker):
self.assertFalse(checker(self.fake_config.endpoints).check())
def assert_checker_true(self, checker):
self.assertTrue(checker(self.fake_config.endpoints).check())
@mock.patch('fuel_upgrade.health_checker.BaseChecker.safe_get')
def test_nailgun_checker_returns_true(self, get_mock):
get_mock.return_value = self.safe_get_sucess
self.assert_checker_true(
health_checker.IntegrationCheckerNginxNailgunChecker)
@mock.patch('fuel_upgrade.health_checker.BaseChecker.safe_get')
def test_nailgun_checker_returns_false(self, get_mock):
get_mock.return_value = None
self.assert_checker_false(
health_checker.IntegrationCheckerNginxNailgunChecker)
@mock.patch('fuel_upgrade.health_checker.BaseChecker.safe_get')
def test_ostf_checker_returns_true(self, get_mock):
positive_results = [
{'body': None, 'code': 401},
{'body': None, 'code': 200}]
for result in positive_results:
get_mock.return_value = result
self.assert_checker_true(health_checker.OSTFChecker)
@mock.patch('fuel_upgrade.health_checker.BaseChecker.safe_get')
def test_ostf_checker_returns_false(self, get_mock):
get_mock.return_value = {'body': None, 'code': 500}
self.assert_checker_false(health_checker.OSTFChecker)
@mock.patch('fuel_upgrade.health_checker.BaseChecker.safe_get')
def test_rabbit_checker_returns_true(self, get_mock):
get_mock.return_value = self.safe_get_sucess
self.assert_checker_true(health_checker.RabbitChecker)
@mock.patch('fuel_upgrade.health_checker.BaseChecker.safe_get')
def test_rabbit_checker_returns_false_wrong_code(self, get_mock):
negative_results = [
{'body': [1, 2], 'code': 500},
{'body': [], 'code': 200}]
for result in negative_results:
get_mock.return_value = result
self.assert_checker_false(health_checker.RabbitChecker)
@mock.patch('fuel_upgrade.health_checker.BaseChecker.get_xmlrpc')
def test_cobbler_checker_returns_true(self, xmlrpc_mock):
server_mock = mock.MagicMock()
xmlrpc_mock.return_value = server_mock
server_mock.get_profiles.return_value = [1, 2, 3]
self.assert_checker_true(health_checker.CobblerChecker)
@mock.patch('fuel_upgrade.health_checker.BaseChecker.get_xmlrpc')
def test_cobbler_checker_returns_false_profiles_error(self, xmlrpc_mock):
server_mock = mock.MagicMock()
xmlrpc_mock.return_value = server_mock
server_mock.get_profiles.return_value = [1, 2]
self.assert_checker_false(health_checker.CobblerChecker)
@mock.patch('fuel_upgrade.health_checker.BaseChecker.get_xmlrpc')
def test_cobbler_checker_returns_false_exception_error(self, xmlrpc_mock):
server_mock = mock.MagicMock()
xmlrpc_mock.return_value = server_mock
server_mock.get_profiles.side_effect = socket.error()
self.assert_checker_false(health_checker.CobblerChecker)
@mock.patch('fuel_upgrade.health_checker.BaseChecker.check_if_port_open')
def test_socket_checkers_return_true(self, port_checker_mock):
port_checker_mock.return_value = True
for socket_checker in [health_checker.PostgresChecker,
health_checker.RsyncChecker,
health_checker.RsyslogChecker]:
self.assert_checker_true(socket_checker)
@mock.patch('fuel_upgrade.health_checker.BaseChecker.check_if_port_open')
def test_socket_checkers_return_false(self, port_checker_mock):
port_checker_mock.return_value = False
for socket_checker in [health_checker.PostgresChecker,
health_checker.RsyncChecker,
health_checker.RsyslogChecker]:
self.assert_checker_false(socket_checker)
@mock.patch('fuel_upgrade.health_checker.BaseChecker.safe_get')
def test_mcollective_checker_returns_true(self, get_mock):
result = [{'name': 'mcollective_broadcast'},
{'name': 'mcollective_directed'}]
get_mock.return_value = {'body': result, 'code': 200}
self.assert_checker_true(health_checker.MCollectiveChecker)
@mock.patch('fuel_upgrade.health_checker.BaseChecker.safe_get')
def test_mcollective_checker_returns_false(self, get_mock):
wrong_results = [
None,
{'body': [{'name': 'mcollective_broadcast'},
{'name': 'mcollective_directed'}],
'code': 400},
{'body': [{'name': 'mcollective_broadcast'}],
'code': 200},
{'body': None,
'code': 200},
{'body': ['str', None],
'code': 200}]
for result in wrong_results:
get_mock.return_value = result
self.assert_checker_false(health_checker.MCollectiveChecker)
@mock.patch('fuel_upgrade.health_checker.BaseChecker.safe_get')
def test_nginx_checker_returns_true(self, get_mock):
get_mock.return_value = {'body': None, 'code': 400}
self.assert_checker_true(health_checker.NginxChecker)
@mock.patch('fuel_upgrade.health_checker.BaseChecker.safe_get')
def test_nginx_checker_returns_false(self, get_mock):
get_mock.return_value = None
self.assert_checker_false(health_checker.NginxChecker)
@mock.patch('fuel_upgrade.health_checker.BaseChecker.safe_get')
def test_keystone_checker_returns_true(self, get_mock):
get_mock.return_value = {'body': {}, 'code': 200}
self.assert_checker_true(health_checker.KeystoneChecker)
@mock.patch('fuel_upgrade.health_checker.BaseChecker.safe_get')
def test_keystone_checker_returns_false(self, get_mock):
negative_results = [
{'body': {}, 'code': 400},
{'body': None, 'code': None}]
for result in negative_results:
get_mock.return_value = result
self.assert_checker_false(health_checker.KeystoneChecker)
@mock.patch('fuel_upgrade.health_checker.NailgunClient')
def test_integration_postgres_nailgun_nginx_returns_true(self, nailgun):
nailgun.return_value.get_releases.return_value = [1, 2]
self.assert_checker_true(
health_checker.IntegrationCheckerPostgresqlNailgunNginx)
@mock.patch('fuel_upgrade.health_checker.NailgunClient')
def test_integration_postgres_nailgun_nginx_empty_list(self, nailgun):
nailgun.return_value.get_releases.return_value = []
self.assert_checker_false(
health_checker.IntegrationCheckerPostgresqlNailgunNginx)
@mock.patch('fuel_upgrade.health_checker.NailgunClient')
def test_integration_postgres_nailgun_nginx_raises_errors(self, nailgun):
side_effects = [requests.exceptions.ConnectionError(),
requests.exceptions.Timeout(),
requests.exceptions.HTTPError(),
ValueError()]
for side_effect in side_effects:
nailgun.return_value.get_releases.side_effect = side_effect
self.assert_checker_false(
health_checker.IntegrationCheckerPostgresqlNailgunNginx)
@mock.patch('fuel_upgrade.health_checker.BaseChecker.safe_get')
def test_integration_rabbitmq_astute_nailgun_returns_true(self, get_mock):
result = {'body': [{'name': 'naily_service'},
{'name': 'nailgun'}],
'code': 200}
get_mock.return_value = result
self.assert_checker_true(
health_checker.IntegrationCheckerRabbitMQAstuteNailgun)
@mock.patch('fuel_upgrade.health_checker.BaseChecker.safe_get')
def test_integration_rabbitmq_astute_nailgun_returns_false(self, get_mock):
negative_results = [
None,
{'body': None,
'code': 200},
{'body': [{'name': 'naily_service'}],
'code': 200},
{'body': [{'name': 'nailgun'}],
'code': 200},
{'body': [{'name': 'nailgun'}, {'name': 'naily_service'}],
'code': 400},
{'body': [{}], 'code': 200}]
for result in negative_results:
get_mock.return_value = result
self.assert_checker_false(
health_checker.IntegrationCheckerRabbitMQAstuteNailgun)
@mock.patch('fuel_upgrade.health_checker.BaseChecker.make_safe_request')
def test_nailgun_checker_returns_true(self, make_request_mock):
make_request_mock.return_value = 200
self.assert_checker_true(
health_checker.IntegrationOSTFKeystoneChecker)
@mock.patch('fuel_upgrade.health_checker.BaseChecker.make_safe_request')
def test_nailgun_checker_returns_false(self, make_request_mock):
make_request_mock.return_value = 401
self.assert_checker_false(
health_checker.IntegrationOSTFKeystoneChecker)

View File

@ -1,143 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from fuel_upgrade.engines.host_system import HostSystemUpgrader
from fuel_upgrade.tests.base import BaseTestCase
class TestHostSystemUpgrader(BaseTestCase):
def setUp(self):
with mock.patch('fuel_upgrade.engines.host_system.SupervisorClient'):
self.upgrader = HostSystemUpgrader(self.fake_config)
self.upgrader.supervisor = mock.Mock()
@mock.patch.multiple(
'fuel_upgrade.engines.host_system.HostSystemUpgrader',
install_repos=mock.DEFAULT,
update_repo=mock.DEFAULT,
run_puppet=mock.DEFAULT,
remove_repo_config=mock.DEFAULT
)
@mock.patch('fuel_upgrade.engines.host_system.utils')
def test_upgrade(self, mock_utils, install_repos, update_repo,
run_puppet, remove_repo_config):
self.upgrader.upgrade()
self.called_once(install_repos)
self.called_once(run_puppet)
self.called_once(update_repo)
self.called_once(remove_repo_config)
self.called_once(self.upgrader.supervisor.stop_all_services)
mock_utils.exec_cmd.assert_called_with(
'yum install -v -y fuel-9999.0.0')
@mock.patch('fuel_upgrade.engines.host_system.utils')
def test_update_repo(self, utils_mock):
self.upgrader.update_repo()
templates_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../templates'))
utils_mock.render_template_to_file.assert_called_once_with(
'{0}/nailgun.repo'.format(templates_path),
'/etc/yum.repos.d/9999_nailgun.repo',
{
'name': '9999_nailgun',
'baseurl': 'file:/var/www/nailgun/2014.1.1-5.1/centos/x86_64',
'gpgcheck': 0,
'skip_if_unavailable': 0,
})
@mock.patch('fuel_upgrade.engines.host_system.utils')
def test_run_puppet(self, utils_mock):
self.upgrader.run_puppet()
utils_mock.exec_cmd.assert_called_once_with(
'puppet apply -d -v '
'/etc/puppet/2014.1.1-5.1/modules/nailgun/examples'
'/host-upgrade.pp '
'--modulepath=/etc/puppet/2014.1.1-5.1/modules')
@mock.patch.multiple(
'fuel_upgrade.engines.host_system.HostSystemUpgrader',
remove_repos=mock.DEFAULT,
remove_repo_config=mock.DEFAULT,
)
def test_rollback(self, remove_repos, remove_repo_config):
self.upgrader.rollback()
self.called_once(remove_repos)
self.called_once(remove_repo_config)
self.called_once(self.upgrader.supervisor.start_all_services)
@mock.patch('fuel_upgrade.engines.host_system.utils.remove_if_exists')
def test_remove_repo_config(self, remove_mock):
self.upgrader.config.from_version = '6.0'
self.upgrader.remove_repo_config()
self.assertEqual(remove_mock.call_args_list, [
mock.call('/etc/yum.repos.d/9999_nailgun.repo'),
mock.call('/etc/yum.repos.d/auxiliary.repo'),
])
@mock.patch('fuel_upgrade.engines.host_system.utils.remove_if_exists')
def test_remove_repo_config_for_fuel_ge_61(self, remove_mock):
self.upgrader.config.from_version = '6.1'
self.upgrader.config.new_version = '7.0'
self.upgrader.remove_repo_config()
self.assertEqual(remove_mock.call_args_list, [
mock.call('/etc/yum.repos.d/9999_nailgun.repo'),
mock.call('/etc/yum.repos.d/7.0_auxiliary.repo'),
])
@mock.patch('fuel_upgrade.engines.host_system.utils.copy')
@mock.patch('fuel_upgrade.engines.host_system.glob.glob')
def test_install_repos(self, glob, copy):
glob.return_value = ['one', 'two']
self.upgrader.install_repos()
self.called_times(copy, 2)
copy.assert_has_calls([
mock.call('one', '/var/www/nailgun/one'),
mock.call('two', '/var/www/nailgun/two')])
glob.assert_called_with('/tmp/upgrade_path/repos/[0-9.-]*')
@mock.patch('fuel_upgrade.engines.host_system.utils.remove')
@mock.patch('fuel_upgrade.engines.host_system.glob.glob')
def test_remove_repos(self, glob, remove):
glob.return_value = ['one', 'two']
self.upgrader.remove_repos()
self.called_times(remove, 2)
remove.assert_has_calls([
mock.call('/var/www/nailgun/one'),
mock.call('/var/www/nailgun/two')])
glob.assert_called_with('/tmp/upgrade_path/repos/[0-9.-]*')
@mock.patch(
'fuel_upgrade.engines.openstack.utils.os.path.isdir',
return_value=True)
@mock.patch(
'fuel_upgrade.engines.openstack.utils.dir_size', return_value=42)
@mock.patch(
'fuel_upgrade.engines.openstack.glob.glob', return_value=['1', '2'])
def test_required_free_space(self, glob, __, ___):
self.assertEqual(
self.upgrader.required_free_space,
{'/etc/yum.repos.d/9999_nailgun.repo': 10,
'/var/www/nailgun': 84})
glob.assert_called_with('/tmp/upgrade_path/repos/[0-9.-]*')

View File

@ -1,64 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import requests
from fuel_upgrade.clients import KeystoneClient
from fuel_upgrade.tests import base
class TestKeystoneClient(base.BaseTestCase):
token = {'access': {'token': {'id': 'auth_token'}}}
def setUp(self):
self.credentials = {
'username': 'some_user',
'password': 'some_password',
'auth_url': 'http://127.0.0.1:5000/v2',
'tenant_name': 'some_tenant'}
self.keystone = KeystoneClient(**self.credentials)
@mock.patch('fuel_upgrade.clients.keystone_client.requests.post')
@mock.patch('fuel_upgrade.clients.keystone_client.requests.Session')
def test_makes_authenticated_requests(self, session, post_mock):
post_mock.return_value.json.return_value = self.token
self.keystone.request
session.return_value.headers.update.assert_called_once_with(
{'X-Auth-Token': 'auth_token'})
@mock.patch('fuel_upgrade.clients.keystone_client.requests.Session')
@mock.patch('fuel_upgrade.clients.keystone_client.requests.post',
side_effect=requests.exceptions.HTTPError(''))
def test_does_not_fail_without_keystone(self, _, __):
with mock.patch('fuel_upgrade.utils.time.time') as time:
# Unfortunately, in Python 2.6 the itertools.count() doesn't
# support the step argument, so we need to implement our own
# bicycle.
def timestamp(start, step):
x = start
while True:
yield x
x += step
# We need such infinity generator, because time.time() is used
# by our loggers, so we can't predict how often it will be called.
time.side_effect = timestamp(0, 15)
self.keystone.request
self.assertEqual(self.keystone.get_token(), None)

View File

@ -1,133 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import requests
from fuel_upgrade.clients import NailgunClient
from fuel_upgrade.tests import base
class TestNailgunClient(base.BaseTestCase):
def setUp(self):
mock_keystone = mock.MagicMock()
self.mock_request = mock_keystone.request
with mock.patch(
'fuel_upgrade.clients.nailgun_client.KeystoneClient',
return_value=mock_keystone):
self.nailgun = NailgunClient('127.0.0.1', 8000)
def test_create_release(self):
# test normal bahavior
self.mock_request.post.return_value = self.mock_requests_response(
201, '{ "id": "42" }')
response = self.nailgun.create_release({
'name': 'Havana on Ubuntu 12.04'})
self.assertEqual(response, {'id': '42'})
# test failed result
self.mock_request.post.return_value.status_code = 409
self.assertRaises(
requests.exceptions.HTTPError,
self.nailgun.create_release,
{'name': 'Havana on Ubuntu 12.04'})
def test_delete_release(self):
# test normal bahavior
for status in (200, 204):
self.mock_request.delete.return_value = \
self.mock_requests_response(status, 'No Content')
response = self.nailgun.remove_release(42)
self.assertEqual(response, 'No Content')
# test failed result
self.mock_request.delete.return_value = self.mock_requests_response(
409, 'Conflict')
self.assertRaises(
requests.exceptions.HTTPError,
self.nailgun.remove_release,
42)
def test_create_notification(self):
# test normal bahavior
self.mock_request.post.return_value = self.mock_requests_response(
201,
'{ "id": "42" }')
response = self.nailgun.create_notification({
'topic': 'release',
'message': 'New release available!'})
self.assertEqual(response, {'id': '42'})
# test failed result
self.mock_request.post.return_value.status_code = 409
self.assertRaises(
requests.exceptions.HTTPError,
self.nailgun.create_notification,
{'topic': 'release',
'message': 'New release available!'})
def test_delete_notification(self):
# test normal bahavior
for status in (200, 204):
self.mock_request.delete.return_value = \
self.mock_requests_response(status, 'No Content')
response = self.nailgun.remove_notification(42)
self.assertEqual(response, 'No Content')
# test failed result
self.mock_request.delete.return_value = self.mock_requests_response(
409, 'Conflict')
self.assertRaises(
requests.exceptions.HTTPError,
self.nailgun.remove_notification,
42)
def test_get_tasks(self):
# test positive cases
self.mock_request.get.return_value = self.mock_requests_response(
200, '[1,2,3]')
response = self.nailgun.get_tasks()
self.assertEqual(response, [1, 2, 3])
# test negative cases
self.mock_request.get.return_value = self.mock_requests_response(
502, 'Bad gateway')
self.assertRaises(
requests.exceptions.HTTPError, self.nailgun.get_tasks)
def test_put_deployment_tasks(self):
release = {'id': '1'}
tasks = []
self.mock_request.put.return_value = self.mock_requests_response(
200, '[]')
response = self.nailgun.put_deployment_tasks(release, tasks)
self.assertEqual(response, tasks)
self.mock_request.put.return_value = self.mock_requests_response(
502, 'Bad gateway')
self.assertRaises(
requests.exceptions.HTTPError,
self.nailgun.put_deployment_tasks,
release, tasks)

View File

@ -1,325 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import requests
from fuel_upgrade.engines.openstack import OpenStackUpgrader
from fuel_upgrade.tests.base import BaseTestCase
class TestOpenStackUpgrader(BaseTestCase):
releases_raw = '''
[{
"pk": 1,
"fields": {
"name": "releases name",
"version": "2014.1",
"operating_system": "CentOS",
}
}]
'''
metadata_raw = '''
diff_releases:
2012.2-6.0: 2014.1.1-5.1
'''
tasks = [{'id': 'first'}]
@mock.patch(
'fuel_upgrade.engines.openstack.glob.glob', return_value=['path'])
def setUp(self, _):
"""Create upgrader with mocked data."""
with mock.patch('fuel_upgrade.engines.openstack.io.open',
self.mock_open(self.releases_raw)):
self.upgrader = OpenStackUpgrader(self.fake_config)
def test_constructor_load_releases(self):
self.assertEqual(len(self.upgrader.releases), 1)
@mock.patch(
'fuel_upgrade.engines.openstack.OpenStackUpgrader.install_versions')
@mock.patch(
'fuel_upgrade.engines.openstack.OpenStackUpgrader.install_releases')
@mock.patch(
'fuel_upgrade.engines.openstack.OpenStackUpgrader.install_puppets')
def test_upgrade(self, pup, rel, ver):
self.upgrader.upgrade()
self.called_once(pup)
self.called_once(rel)
self.called_once(ver)
@mock.patch('fuel_upgrade.engines.openstack.glob.glob',
return_value=['/upgrade/file1.yaml', '/upgrade/file2.yaml'])
@mock.patch('fuel_upgrade.engines.openstack.utils')
def test_install_versions(self, mock_utils, mock_glob):
self.upgrader.install_versions()
release_versions_path = '/etc/fuel/release_versions'
mock_utils.create_dir_if_not_exists.assert_called_once_with(
release_versions_path)
self.assertEqual(
mock_utils.copy.call_args_list,
[mock.call(
'/upgrade/file1.yaml',
'{0}/file1.yaml'.format(release_versions_path)),
mock.call(
'/upgrade/file2.yaml',
'{0}/file2.yaml'.format(release_versions_path))])
@mock.patch('fuel_upgrade.engines.openstack.glob.glob',
return_value=['/upgrade/file1.yaml'])
@mock.patch('fuel_upgrade.engines.openstack.utils')
def test_remove_versions(self, mock_utils, mock_glob):
self.upgrader.remove_versions()
self.assertEqual(
mock_utils.remove.call_args_list,
[mock.call('/etc/fuel/release_versions/file1.yaml')])
@mock.patch(
'fuel_upgrade.engines.openstack.OpenStackUpgrader.install_releases')
@mock.patch(
'fuel_upgrade.engines.openstack.OpenStackUpgrader.install_puppets')
def test_upgrade_with_errors(self, pup, rel):
class MyException(Exception):
pass
pup.side_effect = MyException('Folder does no exist')
self.assertRaises(MyException, self.upgrader.upgrade)
self.called_once(pup)
self.method_was_not_called(rel)
@mock.patch(
'fuel_upgrade.engines.openstack.OpenStackUpgrader.remove_versions')
@mock.patch(
'fuel_upgrade.engines.openstack.OpenStackUpgrader.remove_puppets')
@mock.patch(
'fuel_upgrade.engines.openstack.OpenStackUpgrader.remove_releases')
def test_rollback(self, rel, pup, ver):
self.upgrader.rollback()
self.called_once(rel)
self.called_once(pup)
self.called_once(ver)
@mock.patch('fuel_upgrade.engines.openstack.utils.copy')
@mock.patch('fuel_upgrade.engines.openstack.glob.glob')
def test_install_puppets(self, glob, copy):
glob.return_value = ['one', 'two']
self.upgrader.install_puppets()
self.called_times(copy, 2)
copy.assert_has_calls([
mock.call('one', '/etc/puppet/one'),
mock.call('two', '/etc/puppet/two')])
@mock.patch('fuel_upgrade.engines.openstack.utils.remove')
@mock.patch('fuel_upgrade.engines.openstack.glob.glob')
def test_remove_puppets(self, glob, remove):
glob.return_value = ['one', 'two']
self.upgrader.remove_puppets()
self.called_times(remove, 2)
remove.assert_has_calls([
mock.call('/etc/puppet/one'),
mock.call('/etc/puppet/two')])
@mock.patch(
'fuel_upgrade.utils.iterfiles_filter',
return_value=['/fake/path/tasks.yaml'])
@mock.patch(
'fuel_upgrade.engines.openstack.NailgunClient.put_deployment_tasks')
@mock.patch(
'fuel_upgrade.engines.openstack.NailgunClient.create_notification')
@mock.patch(
'fuel_upgrade.engines.openstack.NailgunClient.create_release')
@mock.patch(
'fuel_upgrade.engines.openstack.NailgunClient.get_releases',
return_value=[])
def test_install_releases(self, _, mock_cr, mock_cn, mock_pd, mock_files):
# test one release
release_response = {'id': '1', 'version': '111'}
mock_cr.return_value = release_response
mock_cn.return_value = {'id': '100'}
with mock.patch('fuel_upgrade.engines.openstack.utils.read_from_yaml',
return_value=self.tasks):
self.upgrader.install_releases()
self.called_once(mock_files)
self.called_once(mock_cr)
self.called_once(mock_cn)
mock_pd.assert_called_with(release_response, self.tasks)
for type_ in ('release', 'notification'):
self.assertEqual(len(self.upgrader._rollback_ids[type_]), 1)
@mock.patch(
'fuel_upgrade.engines.openstack.glob.glob', return_value=['path'])
@mock.patch(
'fuel_upgrade.utils.iterfiles_filter',
return_value=['/fake/path/tasks.yaml'])
@mock.patch(
'fuel_upgrade.engines.openstack.NailgunClient.put_deployment_tasks')
@mock.patch(
'fuel_upgrade.engines.openstack.NailgunClient.create_notification')
@mock.patch(
'fuel_upgrade.engines.openstack.NailgunClient.create_release')
@mock.patch(
'fuel_upgrade.engines.openstack.NailgunClient.get_releases',
return_value=[])
def test_install_releases_is_not_deployable(self, _, mock_cr, mock_cn,
mock_pd, mock_files, gl):
# use already parsed text, because mock_open returns input without any
# changes, but we expect yaml parsed json
releases_raw = ''' [
{
"pk": 1,
"fields": {
"name": "releases name",
"version": "2014.1",
"operating_system": "CentOS",
}
}, {
"pk": 2,
"fields": {
"name": "Undeployable releases name",
"version": "2014.1",
"operating_system": "CentOS",
"state": "unavailable",
}
}
]
'''
with mock.patch('fuel_upgrade.engines.openstack.io.open',
self.mock_open(releases_raw)):
upgrader = OpenStackUpgrader(self.fake_config)
# test one release
release_response = [{'id': '1', 'version': '111'},
{'id': '2', 'version': '222'}]
mock_cr.side_effect = release_response
mock_cn.return_value = {'id': '100'}
with mock.patch('fuel_upgrade.engines.openstack.utils.read_from_yaml',
return_value=self.tasks):
upgrader.install_releases()
self.called_times(mock_files, 2)
self.called_times(mock_cr, 2)
# notification should be called only once
self.called_once(mock_cn)
msg = 'New release available: releases name (2014.1)'
mock_cn.assert_called_with({'topic': 'release', 'message': msg})
self.called_times(mock_pd, 2)
self.assertEqual(len(upgrader._rollback_ids['release']), 2)
# notification should be called only once
self.assertEqual(len(upgrader._rollback_ids['notification']), 1)
@mock.patch(
'fuel_upgrade.engines.openstack.NailgunClient.put_deployment_tasks')
@mock.patch(
'fuel_upgrade.engines.openstack.NailgunClient.create_notification')
@mock.patch(
'fuel_upgrade.engines.openstack.NailgunClient.create_release')
@mock.patch(
'fuel_upgrade.engines.openstack.NailgunClient.get_releases',
return_value=[])
def test_install_releases_with_errors(self, _, mock_cr, mock_cn, mock_pd):
mock_cr.return_value = {'id': '1', 'version': '111'}
mock_cn.side_effect = requests.exceptions.HTTPError('Something wrong')
self.assertRaises(
requests.exceptions.HTTPError, self.upgrader.install_releases)
self.called_once(mock_cr)
self.called_once(mock_cn)
self.assertEqual(len(self.upgrader._rollback_ids['release']), 1)
self.assertEqual(len(self.upgrader._rollback_ids['notification']), 0)
@mock.patch(
'fuel_upgrade.engines.openstack.NailgunClient.remove_notification')
@mock.patch(
'fuel_upgrade.engines.openstack.NailgunClient.remove_release')
def test_remove_releases(self, r_release, r_notification):
self.upgrader._rollback_ids['release'] = [1, 3]
self.upgrader._rollback_ids['notification'] = [2, 4]
self.upgrader.remove_releases()
r_release.assert_has_calls([
mock.call(3),
mock.call(1)])
r_notification.assert_has_calls([
mock.call(4),
mock.call(2)])
def test_get_unique_releases(self):
releases = [
{
'name': 'Ubuntu',
'version': 'A',
},
{
'name': 'Centos',
'version': 'A',
},
]
existing_releases = [
{
'name': 'Ubuntu',
'version': 'A',
},
{
'name': 'Centos',
'version': 'B',
},
]
expected_releases = [
{
'name': 'Centos',
'version': 'A',
},
]
self.assertEqual(
self.upgrader._get_unique_releases(releases, existing_releases),
expected_releases)
@mock.patch(
'fuel_upgrade.engines.openstack.utils.os.path.isdir',
return_value=True)
@mock.patch(
'fuel_upgrade.engines.openstack.utils.dir_size', return_value=42)
@mock.patch(
'fuel_upgrade.engines.openstack.glob.glob', return_value=['1', '2'])
def test_required_free_space(self, glob, _, __):
result = self.upgrader.required_free_space
self.assertEqual(result, {
'/etc/puppet': 84,
})

View File

@ -1,36 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from fuel_upgrade.clients import OSTFClient
from fuel_upgrade.tests import base
class TestOSTFClient(base.BaseTestCase):
def setUp(self):
mock_keystone = mock.MagicMock()
self.mock_request = mock_keystone.request
with mock.patch(
'fuel_upgrade.clients.ostf_client.KeystoneClient',
return_value=mock_keystone):
self.ostf = OSTFClient('127.0.0.1', 8777)
def test_get(self):
self.ostf.get('/some_path')
self.mock_request.get.assert_called_once_with(
'http://127.0.0.1:8777/some_path')

View File

@ -1,938 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import os
import textwrap
import mock
import six
from fuel_upgrade.tests.base import BaseTestCase
from fuel_upgrade.tests.base import FakeFile
from fuel_upgrade.pre_upgrade_hooks.base import PreUpgradeHookBase
from fuel_upgrade.pre_upgrade_hooks.from_5_0_1_to_any_fix_host_system_repo \
import FixHostSystemRepoHook
from fuel_upgrade.pre_upgrade_hooks.from_5_0_to_any_add_credentials \
import AddCredentialsHook
from fuel_upgrade.pre_upgrade_hooks.from_5_0_to_any_fix_puppet_manifests \
import FixPuppetManifests
from fuel_upgrade.pre_upgrade_hooks.from_5_0_to_any_sync_dns \
import SyncDnsHook
from fuel_upgrade.pre_upgrade_hooks import PreUpgradeHookManager
from fuel_upgrade.pre_upgrade_hooks. \
from_5_0_x_to_any_copy_openstack_release_versions \
import CopyOpenstackReleaseVersions
from fuel_upgrade.pre_upgrade_hooks.from_5_1_to_any_add_keystone_credentials \
import AddKeystoneCredentialsHook
from fuel_upgrade.pre_upgrade_hooks.from_5_1_to_any_ln_fuelweb_x86_64 \
import AddFuelwebX8664LinkForUbuntu
from fuel_upgrade.pre_upgrade_hooks.from_6_0_to_any_add_dhcp_gateway \
import AddDhcpGateway
from fuel_upgrade.pre_upgrade_hooks.from_6_0_to_any_add_monitord_credentials \
import AddMonitordKeystoneCredentialsHook
from fuel_upgrade.pre_upgrade_hooks.from_6_0_to_any_copy_keys \
import MoveKeysHook
from fuel_upgrade.pre_upgrade_hooks.from_any_to_6_1_dhcrelay_conf \
import FixDhcrelayConf
from fuel_upgrade.pre_upgrade_hooks.from_any_to_6_1_dhcrelay_monitor \
import FixDhcrelayMonitor
from fuel_upgrade.pre_upgrade_hooks.from_any_to_6_1_fix_version_in_supervisor \
import SetFixedVersionInSupervisor
from fuel_upgrade.pre_upgrade_hooks.from_any_to_6_1_recreate_containers \
import RecreateNailgunInPriveleged
class TestPreUpgradeHooksBase(BaseTestCase):
HookClass = None
def setUp(self):
class Upgrader1(mock.MagicMock):
pass
class Upgrader2(mock.MagicMock):
pass
self.upgraders_cls = [Upgrader1, Upgrader2]
self.upgraders = [upgrade_cls() for upgrade_cls in self.upgraders_cls]
def get_hook(self, conf={}):
config = self.fake_config
for key, value in six.iteritems(conf):
setattr(config, key, value)
return self.HookClass(self.upgraders, config)
class TestAddCredentialsHook(TestPreUpgradeHooksBase):
HookClass = AddCredentialsHook
def setUp(self):
super(TestAddCredentialsHook, self).setUp()
self.additional_keys = [
'astute',
'cobbler',
'mcollective',
'postgres',
'keystone',
'FUEL_ACCESS']
def test_is_required_returns_true(self):
hook = self.get_hook({'astute': {}})
self.assertTrue(hook.check_if_required())
def test_is_required_returns_false(self):
hook = self.get_hook({
'astute': {
'astute': {},
'cobbler': {},
'mcollective': {},
'postgres': {},
'keystone': {},
'FUEL_ACCESS': {}}})
self.assertFalse(hook.check_if_required())
@mock.patch('fuel_upgrade.pre_upgrade_hooks.base.read_yaml_config')
@mock.patch('fuel_upgrade.pre_upgrade_hooks.base.utils.copy_file')
@mock.patch('fuel_upgrade.pre_upgrade_hooks.base.utils.save_as_yaml')
def test_run(self,
utils_save_as_yaml_mock,
utils_copy_file_mock,
read_yaml_config_mock):
file_key = 'this_key_was_here_before_upgrade'
hook = self.get_hook({'astute': {file_key: file_key}})
read_yaml_config_mock.return_value = hook.config.astute
hook.run()
utils_copy_file_mock.assert_called_once_with(
'/etc/fuel/astute.yaml',
'/etc/fuel/astute.yaml_0',
overwrite=False)
agrs = utils_save_as_yaml_mock.call_args
self.assertEqual(agrs[0][0], '/etc/fuel/astute.yaml')
# Check that the key which was in file
# won't be overwritten
self.additional_keys.append(file_key)
# Check that all required keys are in method call
self.assertTrue(all(
key in self.additional_keys
for key in agrs[0][1].keys()))
class TestAddFuelwebX8664LinkForUbuntu(TestPreUpgradeHooksBase):
HookClass = AddFuelwebX8664LinkForUbuntu
@mock.patch(
'fuel_upgrade.pre_upgrade_hooks.from_5_1_to_any_ln_fuelweb_x86_64.'
'utils.file_exists', side_effect=[True, False])
def test_is_required_returns_true(self, file_exists_mock):
hook = self.get_hook({'new_version': '6.0'})
self.assertTrue(hook.check_if_required())
@mock.patch(
'fuel_upgrade.pre_upgrade_hooks.from_5_1_to_any_ln_fuelweb_x86_64.'
'utils.file_exists', side_effect=[False, False])
def test_is_required_returns_false_1(self, file_exists_mock):
hook = self.get_hook({'new_version': '6.0'})
self.assertFalse(hook.check_if_required())
@mock.patch(
'fuel_upgrade.pre_upgrade_hooks.from_5_1_to_any_ln_fuelweb_x86_64.'
'utils.file_exists', side_effect=[True, True])
def test_is_required_returns_false_2(self, file_exists_mock):
hook = self.get_hook({'new_version': '6.0'})
self.assertFalse(hook.check_if_required())
@mock.patch(
'fuel_upgrade.pre_upgrade_hooks.from_5_1_to_any_ln_fuelweb_x86_64.'
'utils.symlink')
def test_run(self, symlink_mock):
hook = self.get_hook({'new_version': '6.0'})
hook.run()
self.called_once(symlink_mock)
class TestAddKeystoneCredentialsHook(TestPreUpgradeHooksBase):
HookClass = AddKeystoneCredentialsHook
def setUp(self):
super(TestAddKeystoneCredentialsHook, self).setUp()
self.keystone_keys = [
'nailgun_user',
'nailgun_password',
'ostf_user',
'ostf_password',
]
def test_is_required_returns_true(self):
hook = self.get_hook({})
self.assertTrue(hook.check_if_required())
def test_is_required_returns_false(self):
hook = self.get_hook({
'astute': {
'keystone': {
'nailgun_user': '',
'nailgun_password': '',
'ostf_user': '',
'ostf_password': '',
}
}
})
self.assertFalse(hook.check_if_required())
@mock.patch('fuel_upgrade.pre_upgrade_hooks.base.read_yaml_config')
@mock.patch('fuel_upgrade.pre_upgrade_hooks.base.utils.copy_file')
@mock.patch('fuel_upgrade.pre_upgrade_hooks.base.utils.save_as_yaml')
def test_run(self,
utils_save_as_yaml_mock,
utils_copy_file_mock,
read_yaml_config_mock):
file_key = 'this_key_was_here_before_upgrade'
hook = self.get_hook({
'astute': {
'keystone': {file_key: file_key}}
})
read_yaml_config_mock.return_value = hook.config.astute
hook.run()
utils_copy_file_mock.assert_called_once_with(
'/etc/fuel/astute.yaml',
'/etc/fuel/astute.yaml_0',
overwrite=False)
agrs = utils_save_as_yaml_mock.call_args
self.assertEqual(agrs[0][0], '/etc/fuel/astute.yaml')
# Check that the key which was in
self.keystone_keys.append(file_key)
# Check that all required keys are in method call
self.assertTrue(all(
key in self.keystone_keys
for key in agrs[0][1]['keystone'].keys()))
class TestSyncDnsHook(TestPreUpgradeHooksBase):
HookClass = SyncDnsHook
def setUp(self):
super(TestSyncDnsHook, self).setUp()
self.additional_keys = [
'DNS_DOMAIN',
'DNS_SEARCH']
def test_is_required_returns_true(self):
hook = self.get_hook({
'astute': {
'DNS_DOMAIN': 'veryunlikelydomain',
'DNS_SEARCH': 'veryunlikelydomain'}})
self.assertTrue(hook.check_if_required())
def test_is_required_returns_false(self):
hostname, sep, realdomain = os.uname()[1].partition('.')
hook = self.get_hook({
'astute': {
'DNS_DOMAIN': realdomain,
'DNS_SEARCH': realdomain}})
self.assertFalse(hook.check_if_required())
@mock.patch('fuel_upgrade.pre_upgrade_hooks.base.read_yaml_config')
@mock.patch('fuel_upgrade.pre_upgrade_hooks.base.utils.copy_file')
@mock.patch('fuel_upgrade.pre_upgrade_hooks.base.utils.save_as_yaml')
def test_run(self,
utils_save_as_yaml_mock,
utils_copy_file_mock,
read_yaml_config):
file_key = 'this_key_was_here_before_upgrade'
hook = self.get_hook({'astute': {file_key: file_key}})
read_yaml_config.return_value = hook.config.astute
hook.run()
utils_copy_file_mock.assert_called_once_with(
'/etc/fuel/astute.yaml',
'/etc/fuel/astute.yaml_0',
overwrite=False)
args = utils_save_as_yaml_mock.call_args
self.assertEqual(args[0][0], '/etc/fuel/astute.yaml')
# Check that the key which was in file
# won't be overwritten
self.additional_keys.append(file_key)
# Check that all required keys are in method call
self.assertTrue(all(
key in self.additional_keys
for key in args[0][1].keys()))
class TestFixPuppetManifestHook(TestPreUpgradeHooksBase):
iterfiles_returns = [
'/tmp/upgrade_path/config/5.0/modules/package/lib/puppet'
'/provider/package/yum.rb',
'/tmp/upgrade_path/config/5.0/manifests/centos-versions.yaml']
def setUp(self):
super(TestFixPuppetManifestHook, self).setUp()
conf = self.fake_config
conf.from_version = '5.0'
self.hook = FixPuppetManifests(self.upgraders, conf)
def test_is_required_returns_true(self):
self.hook.config.from_version = '5.0'
self.assertTrue(self.hook.check_if_required())
self.hook.config.from_version = '5.0.1'
self.assertTrue(self.hook.check_if_required())
def test_is_required_returns_false(self):
self.hook.config.from_version = '5.1'
self.assertFalse(self.hook.check_if_required())
@mock.patch(
'fuel_upgrade.pre_upgrade_hooks.from_5_0_to_any_fix_puppet_manifests.'
'iterfiles', return_value=iterfiles_returns)
@mock.patch(
'fuel_upgrade.pre_upgrade_hooks.from_5_0_to_any_fix_puppet_manifests.'
'copy')
def test_run(self, copy, _):
self.hook.run()
copy.assert_has_calls([
mock.call(
'/tmp/upgrade_path/config/5.0/modules/package/lib'
'/puppet/provider/package/yum.rb',
'/etc/puppet/modules/package/lib/puppet/provider/package'
'/yum.rb'),
mock.call(
'/tmp/upgrade_path/config/5.0/manifests'
'/centos-versions.yaml',
'/etc/puppet/manifests/centos-versions.yaml')])
class TestFixHostSystemRepoHook(TestPreUpgradeHooksBase):
def setUp(self):
super(TestFixHostSystemRepoHook, self).setUp()
conf = self.fake_config
conf.from_version = '5.0.1'
self.hook = FixHostSystemRepoHook(self.upgraders, conf)
@mock.patch(
'fuel_upgrade.pre_upgrade_hooks.'
'from_5_0_1_to_any_fix_host_system_repo.'
'utils.file_exists', return_value=True)
def test_is_required_returns_true(self, exists_mock):
self.hook.config.from_version = '5.0.1'
self.assertTrue(self.hook.check_if_required())
self.assertEqual(
exists_mock.call_args_list,
[mock.call('/var/www/nailgun/5.0.1/centos/x86_64'),
mock.call('/etc/yum.repos.d/5.0.1_nailgun.repo')])
def test_is_required_returns_false(self):
self.hook.config.from_version = '5.0'
self.assertFalse(self.hook.check_if_required())
self.hook.config.from_version = '5.1'
self.assertFalse(self.hook.check_if_required())
@mock.patch(
'fuel_upgrade.pre_upgrade_hooks.'
'from_5_0_1_to_any_fix_host_system_repo.'
'utils.file_exists', return_value=False)
def test_is_required_returns_false_if_repo_file_does_not_exist(self, _):
self.assertFalse(self.hook.check_if_required())
@mock.patch(
'fuel_upgrade.pre_upgrade_hooks.'
'from_5_0_1_to_any_fix_host_system_repo.'
'utils.file_exists', side_effect=[True, False])
def test_is_required_returns_false_repo_does_not_exist(self, _):
self.assertFalse(self.hook.check_if_required())
@mock.patch(
'fuel_upgrade.pre_upgrade_hooks.'
'from_5_0_1_to_any_fix_host_system_repo.utils')
def test_run(self, mock_utils):
self.hook.run()
args, _ = mock_utils.render_template_to_file.call_args_list[0]
# The first argument is a path to
# template in upgrade script directory
# it can be different and depends on
# code allocation
self.assertTrue(args[0].endswith('templates/nailgun.repo'))
self.assertEqual(
args[1:],
('/etc/yum.repos.d/5.0.1_nailgun.repo',
{'repo_path': '/var/www/nailgun/5.0.1/centos/x86_64',
'version': '5.0.1'}))
class TestPreUpgradeHookBase(TestPreUpgradeHooksBase):
def get_hook(self, check_if_required=False, enable_for_engines=[]):
class PreUpgradeHook(PreUpgradeHookBase):
def check_if_required(self):
return check_if_required
@property
def enable_for_engines(self):
return enable_for_engines
def run(self):
pass
return PreUpgradeHook(self.upgraders, self.fake_config)
@mock.patch('fuel_upgrade.pre_upgrade_hooks.base.'
'PreUpgradeHookBase.is_enabled_for_engines',
return_value=False)
def test_is_required_returns_false(self, _):
self.assertFalse(self.get_hook().is_required)
@mock.patch('fuel_upgrade.pre_upgrade_hooks.base.'
'PreUpgradeHookBase.is_enabled_for_engines',
return_value=True)
def test_is_required_returns_true(self, _):
self.assertTrue(self.get_hook(check_if_required=True).is_required)
def test_is_enabled_for_engines_returns_true(self):
self.assertTrue(
self.get_hook(
check_if_required=True,
enable_for_engines=[self.upgraders_cls[0]]).is_required)
def test_is_enabled_for_engines_returns_false(self):
class SomeEngine(object):
pass
self.assertFalse(
self.get_hook(
check_if_required=True,
enable_for_engines=[SomeEngine]).is_required)
@mock.patch('fuel_upgrade.pre_upgrade_hooks.base.read_yaml_config')
@mock.patch('fuel_upgrade.pre_upgrade_hooks.base.utils.copy_file')
@mock.patch('fuel_upgrade.pre_upgrade_hooks.base.utils.save_as_yaml')
def test_update_astute_config(self,
utils_save_as_yaml_mock,
utils_copy_file_mock,
read_yaml_config_mock):
hook = self.get_hook()
read_yaml_config_mock.return_value = {
'a': 1,
'dict': {
'a': 1,
'b': 2,
}
}
defaults = {'b': 2, 'dict': {'a': 5, 'c': 6}}
hook.update_astute_config(defaults=defaults)
args = utils_save_as_yaml_mock.call_args
self.assertDictEqual(
args[0][1],
{'a': 1, 'b': 2, 'dict': {'a': 1, 'b': 2, 'c': 6}})
defaults = {'a': 2, 'dict': {'c': 5}}
hook.update_astute_config(defaults=defaults)
args = utils_save_as_yaml_mock.call_args
self.assertDictEqual(
args[0][1],
{'a': 1, 'dict': {'a': 1, 'b': 2, 'c': 5}})
overwrites = {'a': 2, 'dict': {'a': 5}}
hook.update_astute_config(overwrites=overwrites)
args = utils_save_as_yaml_mock.call_args
self.assertDictEqual(
args[0][1],
{'a': 2, 'dict': {'a': 5, 'b': 2}})
overwrites = {'b': 2, 'dict': {'c': 5}}
hook.update_astute_config(overwrites=overwrites)
args = utils_save_as_yaml_mock.call_args
self.assertDictEqual(
args[0][1],
{'a': 1, 'b': 2, 'dict': {'a': 1, 'b': 2, 'c': 5}})
class TestPreUpgradeHookManager(TestPreUpgradeHooksBase):
def setUp(self):
super(TestPreUpgradeHookManager, self).setUp()
self.required_hooks = [mock.MagicMock(), mock.MagicMock()]
for hook in self.required_hooks:
type(hook).is_required = mock.PropertyMock(return_value=True)
self.not_required_hooks = [mock.MagicMock()]
for hook in self.not_required_hooks:
type(hook).is_required = mock.PropertyMock(return_value=False)
self.hooks = []
self.hooks.extend(self.required_hooks)
self.hooks.extend(self.not_required_hooks)
self.hook_manager = PreUpgradeHookManager(
self.upgraders, self.fake_config)
def test_run(self):
self.hook_manager.pre_upgrade_hooks = self.hooks
self.hook_manager.run()
for hook in self.required_hooks:
self.called_once(hook.run)
for hook in self.not_required_hooks:
self.method_was_not_called(hook.run)
class TestCopyOpenstackReleaseVersions(TestPreUpgradeHooksBase):
iterfiles_returns = [
'/tmp/upgrade_path/config/5.0/modules/package/lib/puppet'
'/provider/package/yum.rb',
'/tmp/upgrade_path/config/5.0/manifests/centos-versions.yaml']
def setUp(self):
super(TestCopyOpenstackReleaseVersions, self).setUp()
conf = self.fake_config
conf.from_version = '5.0.1'
self.hook = CopyOpenstackReleaseVersions(self.upgraders, conf)
def test_is_required_returns_true(self):
self.hook.config.from_version = '5.0'
self.assertTrue(self.hook.check_if_required())
self.hook.config.from_version = '5.0.1'
self.assertTrue(self.hook.check_if_required())
def test_is_required_returns_false(self):
self.hook.config.from_version = '5.1'
self.assertFalse(self.hook.check_if_required())
@mock.patch(
'fuel_upgrade.pre_upgrade_hooks.'
'from_5_0_x_to_any_copy_openstack_release_versions.utils')
def test_run(self, mock_utils):
self.hook.run()
self.assertEqual(
mock_utils.create_dir_if_not_exists.call_args_list,
[mock.call(self.hook.release_dir)])
self.assertEqual(
mock_utils.copy_if_exists.call_args_list,
[mock.call(self.hook.version_path_5_0,
self.hook.dst_version_path_5_0),
mock.call(self.hook.version_path_5_0_1,
self.hook.dst_version_path_5_0_1)])
@mock.patch(
'fuel_upgrade.pre_upgrade_hooks.'
'from_5_0_x_to_any_copy_openstack_release_versions.utils')
def test_run_from_5_0(self, mock_utils):
self.hook.config.from_version = '5.0'
self.hook.run()
self.assertEqual(
mock_utils.copy_if_exists.call_args_list,
[mock.call(self.hook.version_path_5_0,
self.hook.dst_version_path_5_0)])
class TestAddMonitordKeystoneCredentialsHook(TestPreUpgradeHooksBase):
HookClass = AddMonitordKeystoneCredentialsHook
def setUp(self):
super(TestAddMonitordKeystoneCredentialsHook, self).setUp()
self.monitord_keys = [
'monitord_user',
'monitord_password',
]
def test_is_required_returns_true(self):
hook = self.get_hook({})
self.assertTrue(hook.check_if_required())
def test_is_required_returns_false(self):
hook = self.get_hook({
'astute': {
'keystone': {
'monitord_user': '',
'monitord_password': '',
}
}
})
self.assertFalse(hook.check_if_required())
@mock.patch('fuel_upgrade.pre_upgrade_hooks.base.read_yaml_config')
@mock.patch('fuel_upgrade.pre_upgrade_hooks.base.utils.copy_file')
@mock.patch('fuel_upgrade.pre_upgrade_hooks.base.utils.save_as_yaml')
def test_run(self, msave_as_yaml, mcopy_file, mread_yaml_config):
file_key = 'this_key_was_here_before_upgrade'
file_value = 'some value'
hook = self.get_hook({
'astute': {
'keystone': {file_key: file_value}}
})
mread_yaml_config.return_value = hook.config.astute
hook.run()
mcopy_file.assert_called_once_with(
'/etc/fuel/astute.yaml',
'/etc/fuel/astute.yaml_0',
overwrite=False)
args = msave_as_yaml.call_args
self.assertEqual(args[0][0], '/etc/fuel/astute.yaml')
# Check that all required keys are in method call
called_config = args[0][1]['keystone']
self.assertTrue(set(self.monitord_keys).issubset(called_config))
# Check that nothing else was changed
self.assertEqual(called_config[file_key], file_value)
class TestAddDhcpGatewayHook(TestPreUpgradeHooksBase):
HookClass = AddDhcpGateway
def test_is_required_returns_true(self):
hook = self.get_hook({})
self.assertTrue(hook.check_if_required())
def test_is_required_returns_false(self):
hook = self.get_hook({
'astute': {
'ADMIN_NETWORK': {
'dhcp_gateway': '10.20.0.2',
}
}})
self.assertFalse(hook.check_if_required())
@mock.patch('fuel_upgrade.pre_upgrade_hooks.base.read_yaml_config')
@mock.patch('fuel_upgrade.pre_upgrade_hooks.base.utils.copy_file')
@mock.patch('fuel_upgrade.pre_upgrade_hooks.base.utils.save_as_yaml')
def test_run(self, msave_as_yaml, mcopy_file, mread_yaml_config):
hook = self.get_hook({
'astute': {
'ADMIN_NETWORK': {
'a': 1,
'b': 2,
}
}})
mread_yaml_config.return_value = hook.config.astute
hook.run()
mcopy_file.assert_called_once_with(
'/etc/fuel/astute.yaml',
'/etc/fuel/astute.yaml_0',
overwrite=False)
args = msave_as_yaml.call_args
self.assertEqual(args[0][0], '/etc/fuel/astute.yaml')
# Check that all required keys are in method call
admin_network = args[0][1]['ADMIN_NETWORK']
self.assertEqual(admin_network, {
'a': 1,
'b': 2,
'dhcp_gateway': '0.0.0.0',
})
class TestMoveKeysHook(TestPreUpgradeHooksBase):
def setUp(self):
super(TestMoveKeysHook, self).setUp()
conf = self.fake_config
conf.from_version = '6.0'
self.hook = MoveKeysHook(self.upgraders, conf)
def test_is_required_returns_true(self):
self.hook.config.from_version = '6.0'
self.assertTrue(self.hook.check_if_required())
self.hook.config.from_version = '6.0.1'
self.assertTrue(self.hook.check_if_required())
def test_is_required_returns_false(self):
self.hook.config.from_version = '6.1'
self.assertFalse(self.hook.check_if_required())
self.hook.config.from_version = '6.2'
self.assertFalse(self.hook.check_if_required())
@mock.patch(
'fuel_upgrade.pre_upgrade_hooks.from_6_0_to_any_copy_keys.utils.'
'file_exists', return_value=True)
@mock.patch(
'fuel_upgrade.pre_upgrade_hooks.from_6_0_to_any_copy_keys.utils.'
'exec_cmd')
def test_run(self, cmd_exec, f_exist):
self.hook.run()
f_exist.assert_called_once_with(self.hook.dst_path)
cmd_exec.assert_has_calls(
[mock.call('docker cp fuel-core-6.0-astute:/var/lib/astute/ '
'/var/lib/fuel/keys/'),
mock.call('mv /var/lib/fuel/keys/astute/* /var/lib/fuel/keys/'),
mock.call('rm -r /var/lib/fuel/keys/astute/')])
class TestRecreateNailgunInPriveleged(TestPreUpgradeHooksBase):
HookClass = RecreateNailgunInPriveleged
@mock.patch(
'fuel_upgrade.pre_upgrade_hooks.from_any_to_6_1_recreate_containers.'
'exec_cmd_iterator')
def test_is_required_returns_true(self, mock_exec):
testcases = [
(
['[{ "HostConfig": { "Privileged": false } }]'],
['Docker version 0.10.0, build dc9c28f/0.10.0'],
),
(
['[{ "HostConfig": { "Privileged": false } }]'],
['Docker version 0.8.0, build a768964'],
)]
hook = self.get_hook({'from_version': '6.0'})
for case in testcases:
mock_exec.side_effect = case
self.assertTrue(hook.check_if_required())
@mock.patch(
'fuel_upgrade.pre_upgrade_hooks.from_any_to_6_1_recreate_containers.'
'exec_cmd_iterator')
def test_is_required_returns_false(self, mock_exec):
testcases = [
(
['[{ "HostConfig": { "Privileged": false } }]'],
['Docker version 0.11.0, build dc9c28f/0.11.0'],
),
(
['[{ "HostConfig": { "Privileged": false } }]'],
['Docker version 1.4.1, build d344625'],
),
(
['[{ "HostConfig": { "Privileged": true } }]'],
['Docker version 0.10.0, build dc9c28f/0.10.0'],
),
]
hook = self.get_hook({'from_version': '6.0'})
for case in testcases:
mock_exec.side_effect = case
self.assertFalse(hook.check_if_required())
@mock.patch(
'fuel_upgrade.pre_upgrade_hooks.from_any_to_6_1_recreate_containers.'
'safe_exec_cmd')
def test_run(self, mock_safe_exec_cmd):
hook = self.get_hook()
hook.run()
mock_safe_exec_cmd.assert_has_calls([
mock.call('docker stop fuel-core-0-nailgun'),
mock.call('docker rm -f fuel-core-0-nailgun'),
mock.call(
'docker run -d -t --privileged '
'-p 0.0.0.0:8001:8001 '
'-p 127.0.0.1:8001:8001 '
'-v /etc/nailgun -v /var/log/docker-logs:/var/log '
'-v /var/www/nailgun:/var/www/nailgun:rw '
'-v /etc/yum.repos.d:/etc/yum.repos.d:rw '
'-v /etc/fuel:/etc/fuel:ro '
'-v /root/.ssh:/root/.ssh:ro '
'--name=fuel-core-0-nailgun '
'fuel/nailgun_0')])
class TestFixDhcrelayConf(TestPreUpgradeHooksBase):
HookClass = FixDhcrelayConf
@mock.patch(
'fuel_upgrade.pre_upgrade_hooks.from_any_to_6_1_dhcrelay_conf.'
'os.path.exists', side_effect=[True, False])
def test_is_required_returns_true(self, _):
hook = self.get_hook()
self.assertTrue(hook.check_if_required())
@mock.patch(
'fuel_upgrade.pre_upgrade_hooks.from_any_to_6_1_dhcrelay_conf.'
'os.path.exists',)
def test_is_required_returns_false(self, mock_exists):
testcases = [
# save_from exists, save_as exists
(True, True),
(False, True),
(False, False),
]
hook = self.get_hook()
for case in testcases:
mock_exists.side_effect = case
self.assertFalse(hook.check_if_required())
@mock.patch(
'fuel_upgrade.pre_upgrade_hooks.from_any_to_6_1_dhcrelay_conf.'
'safe_exec_cmd')
@mock.patch(
'fuel_upgrade.pre_upgrade_hooks.from_any_to_6_1_dhcrelay_conf.'
'remove')
@mock.patch(
'fuel_upgrade.pre_upgrade_hooks.from_any_to_6_1_dhcrelay_conf.'
'copy_file')
def test_run(self, mock_cp, mock_rm, mock_exec):
hook = self.get_hook()
hook.run()
mock_cp.assert_called_with(
'/etc/supervisord.d/dhcrelay.conf',
'/etc/supervisord.d/0/dhcrelay.conf')
mock_rm.assert_called_with(
'/etc/supervisord.d/dhcrelay.conf')
mock_exec.assert_called_with(
'supervisorctl stop dhcrelay_monitor')
class TestFixDhcrelayMontitor(TestPreUpgradeHooksBase):
HookClass = FixDhcrelayMonitor
def test_is_required_returns_true(self):
hook = self.get_hook({'from_version': '6.0'})
self.assertTrue(hook.check_if_required())
def test_is_required_returns_false(self):
hook = self.get_hook({'from_version': '6.1'})
self.assertFalse(hook.check_if_required())
@mock.patch(
'fuel_upgrade.pre_upgrade_hooks.from_any_to_6_1_dhcrelay_monitor.'
'os')
@mock.patch(
'fuel_upgrade.pre_upgrade_hooks.from_any_to_6_1_dhcrelay_monitor.'
'utils.copy')
def test_run(self, mock_copy, mock_os):
hook = self.get_hook()
hook.run()
self.assertIn(
'templates/dhcrelay_monitor', mock_copy.call_args[0][0])
self.assertEqual(
'/usr/local/bin/dhcrelay_monitor', mock_copy.call_args[0][1])
class TestSetFixedVersionInSupervisor(TestPreUpgradeHooksBase):
_module = 'fuel_upgrade.pre_upgrade_hooks.' \
'from_any_to_6_1_fix_version_in_supervisor'
_supervisor_conf = textwrap.dedent('''\
[program:docker-astute]
command=dockerctl start astute --attach
numprocs=1
numprocs_start=0
priority=30
autostart=true
autorestart=true
''')
_supervisor_conf_patched = textwrap.dedent('''\
[program:docker-astute]
command=docker start -a fuel-core-6.0.1-astute
numprocs=1
numprocs_start=0
priority=30
autostart=true
autorestart=true
''')
HookClass = SetFixedVersionInSupervisor
def test_is_required_returns_true(self):
hook = self.get_hook({'from_version': '6.0'})
self.assertTrue(hook.check_if_required())
def test_is_required_returns_false(self):
hook = self.get_hook({'from_version': '6.1'})
self.assertFalse(hook.check_if_required())
@mock.patch('{0}.utils.safe_exec_cmd'.format(_module))
@mock.patch('{0}.os.path.exists'.format(_module),
side_effect=itertools.chain([True], itertools.repeat(False)))
def test_run_patches(self, _, m_exec):
f_read = FakeFile(self._supervisor_conf)
f_write = FakeFile()
with mock.patch('{0}.open'.format(self._module)) as m_open:
hook = self.get_hook({'from_version': '6.0.1'})
m_open.side_effect = [f_read, f_write]
hook.run()
m_open.assert_has_calls([
mock.call('/etc/supervisord.d/6.0.1/astute.conf',
'rt', encoding='utf-8'),
mock.call('/etc/supervisord.d/6.0.1/astute.conf',
'wt', encoding='utf-8')])
self.assertEquals(f_write.getvalue(), self._supervisor_conf_patched)
m_exec.assert_called_once_with('supervisorctl update')
@mock.patch('{0}.utils.safe_exec_cmd'.format(_module))
@mock.patch('{0}.os.path.exists'.format(_module),
return_value=False)
def test_run_do_not_patch(self, _, m_exec):
with mock.patch('{0}.open'.format(self._module)) as m_open:
hook = self.get_hook({'from_version': '6.0.1'})
hook.run()
self.assertEqual(m_open.call_count, 0)
m_exec.assert_called_once_with('supervisorctl update')

View File

@ -1,31 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from fuel_upgrade.engines.raise_error import RaiseErrorUpgrader
from fuel_upgrade import errors
from fuel_upgrade.tests.base import BaseTestCase
class TestRaiseErrorUpgrader(BaseTestCase):
def setUp(self):
self.upgrader = RaiseErrorUpgrader(self.fake_config)
def test_upgrade_raise_error(self):
self.assertRaisesRegexp(
errors.FuelUpgradeException,
RaiseErrorUpgrader.error_message,
self.upgrader.upgrade)

View File

@ -1,120 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import xmlrpclib
from fuel_upgrade.clients import SupervisorClient
from fuel_upgrade.tests.base import BaseTestCase
@mock.patch('fuel_upgrade.clients.supervisor_client.os')
class TestSupervisorClient(BaseTestCase):
def setUp(self):
self.utils_patcher = mock.patch(
'fuel_upgrade.clients.supervisor_client.utils')
self.utils_mock = self.utils_patcher.start()
self.supervisor = SupervisorClient(self.fake_config, '0')
type(self.supervisor).supervisor = mock.PropertyMock()
self.new_version_supervisor_path = '/etc/supervisord.d/9999'
self.previous_version_supervisor_path = '/etc/supervisord.d/0'
def tearDown(self):
self.utils_patcher.stop()
def test_switch_to_new_configs(self, os_mock):
self.supervisor.switch_to_new_configs()
self.utils_mock.symlink.assert_called_once_with(
self.new_version_supervisor_path,
self.fake_config.supervisor['current_configs_prefix'])
self.supervisor.supervisor.reloadConfig.assert_called_once_with()
def test_switch_to_previous_configs(self, os_mock):
self.supervisor.switch_to_previous_configs()
self.utils_mock.symlink.assert_called_once_with(
self.previous_version_supervisor_path,
self.fake_config.supervisor['current_configs_prefix'])
self.supervisor.supervisor.reloadConfig.assert_called_once_with()
def test_stop_all_services(self, _):
self.supervisor.stop_all_services()
self.supervisor.supervisor.stopAllProcesses.assert_called_once_with()
@mock.patch('fuel_upgrade.clients.supervisor_client.SupervisorClient.'
'get_all_processes_safely')
def test_restart_and_wait(self, _, __):
self.supervisor.restart_and_wait()
self.supervisor.supervisor.restart.assert_called_once_with()
timeout = self.utils_mock.wait_for_true.call_args[1]['timeout']
self.assertEqual(timeout, 600)
# since wait_for_true is mocked in all tests, let's check that
# callback really calls get_all_processes_safely function
callback = self.utils_mock.wait_for_true.call_args[0][0]
callback()
self.supervisor.get_all_processes_safely.assert_called_once_with()
def test_get_all_processes_safely(self, _):
self.supervisor.get_all_processes_safely()
self.supervisor.supervisor.getAllProcessInfo.assert_called_once_with()
def test_get_all_processes_safely_does_not_raise_error(self, _):
for exc in (IOError(), xmlrpclib.Fault('', '')):
self.supervisor.supervisor.getAllProcessInfo.side_effect = exc
self.assertIsNone(self.supervisor.get_all_processes_safely())
def test_generate_configs(self, _):
services = [
{'config_name': 'config_name1',
'service_name': 'service_name1',
'command': 'cmd1',
'autostart': True},
{'config_name': 'config_name2',
'service_name': 'service_name2',
'command': 'cmd2',
'autostart': False}]
self.supervisor.generate_config = mock.MagicMock()
self.supervisor.generate_configs(services)
self.assertEqual(
self.supervisor.generate_config.call_args_list,
[mock.call('config_name1', 'service_name1',
'cmd1', autostart=True),
mock.call('config_name2', 'service_name2',
'cmd2', autostart=False)])
def test_generate_config(self, _):
config_path = '/config/path'
with mock.patch('fuel_upgrade.clients.supervisor_client.os.path.join',
return_value=config_path):
self.supervisor.generate_config(
'confing_name1', 'docker-service_name1', 'command1')
self.utils_mock.render_template_to_file.assert_called_once_with(
self.supervisor.supervisor_template_path,
config_path,
{'service_name': 'docker-service_name1',
'command': 'command1',
'log_path': '/var/log/docker-service_name1.log',
'autostart': 'true'})
def test_remove_new_configs(self, _):
self.supervisor.remove_new_configs()
self.utils_mock.remove.assert_called_with('/etc/supervisord.d/9999')

View File

@ -1,209 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from fuel_upgrade.engines.host_system import HostSystemUpgrader
from fuel_upgrade.tests.base import BaseTestCase
from fuel_upgrade.upgrade import UpgradeManager
class TestUpgradeManager(BaseTestCase):
def setUp(self):
super(TestUpgradeManager, self).setUp()
self.version_mock = mock.MagicMock()
self.version_patcher = mock.patch(
'fuel_upgrade.upgrade.VersionFile', return_value=self.version_mock)
self.version_patcher.start()
def tearDown(self):
self.version_patcher.stop()
super(TestUpgradeManager, self).tearDown()
def default_args(self, **kwargs):
default = {
'upgraders': [mock.Mock()],
'config': self.fake_config,
'no_rollback': False}
default.update(kwargs)
return default
def test_run_rollback_in_case_of_errors(self):
upgrader = UpgradeManager(**self.default_args())
engine_mock = upgrader._upgraders[0]
engine_mock.upgrade.side_effect = Exception('Upgrade failed')
self.assertRaisesRegexp(
Exception, 'Upgrade failed', upgrader.run)
self.called_once(self.version_mock.save_current)
self.called_once(self.version_mock.switch_to_new)
engine_mock.upgrade.assert_called_once_with()
engine_mock.rollback.assert_called_once_with()
self.called_once(self.version_mock.switch_to_previous)
def test_run_rollback_for_used_engines(self):
upgrader = UpgradeManager(**self.default_args(
upgraders=[mock.Mock(), mock.Mock(), mock.Mock()],
))
upgrader._upgraders[1].upgrade.side_effect = Exception('Failed')
self.assertRaisesRegexp(Exception, 'Failed', upgrader.run)
self.called_once(upgrader._upgraders[0].upgrade)
self.called_once(upgrader._upgraders[0].rollback)
self.called_once(upgrader._upgraders[1].upgrade)
self.called_once(upgrader._upgraders[1].rollback)
self.method_was_not_called(upgrader._upgraders[2].upgrade)
self.method_was_not_called(upgrader._upgraders[2].rollback)
def test_run_backup_for_all_engines(self):
upgrader = UpgradeManager(**self.default_args(
upgraders=[mock.Mock(), mock.Mock()],
))
upgrader.run()
self.called_once(upgrader._upgraders[0].backup)
self.called_once(upgrader._upgraders[1].backup)
def test_run_backup_fails(self):
upgrader = UpgradeManager(**self.default_args(
upgraders=[mock.Mock(), mock.Mock()],
))
upgrader._upgraders[1].backup.side_effect = Exception('Backup fails')
self.assertRaisesRegexp(
Exception, 'Backup fails', upgrader.run)
self.called_once(upgrader._upgraders[0].backup)
self.called_once(upgrader._upgraders[1].backup)
self.method_was_not_called(upgrader._upgraders[0].rollback)
self.method_was_not_called(upgrader._upgraders[1].rollback)
def test_run_upgrade_for_all_engines(self):
upgrader = UpgradeManager(**self.default_args(
upgraders=[mock.Mock(), mock.Mock()],
))
upgrader.run()
self.called_once(upgrader._upgraders[0].upgrade)
self.method_was_not_called(upgrader._upgraders[0].rollback)
self.called_once(upgrader._upgraders[1].upgrade)
self.method_was_not_called(upgrader._upgraders[1].rollback)
def test_does_not_run_rollback_if_disabled(self):
upgrader = UpgradeManager(**self.default_args(no_rollback=True))
engine_mock = upgrader._upgraders[0]
engine_mock.upgrade.side_effect = Exception('Upgrade failed')
self.assertRaisesRegexp(
Exception, 'Upgrade failed', upgrader.run)
engine_mock.upgrade.assert_called_once_with()
self.method_was_not_called(engine_mock.rollback)
def test_upgrade_succed(self):
upgrader = UpgradeManager(**self.default_args())
engine_mock = upgrader._upgraders[0]
upgrader.run()
engine_mock.upgrade.assert_called_once_with()
self.method_was_not_called(engine_mock.rollback)
self.called_once(self.version_mock.save_current)
self.called_once(self.version_mock.switch_to_new)
self.method_was_not_called(self.version_mock.switch_to_previous)
def test_upgrade_run_on_success_methods(self):
upgrader = UpgradeManager(**self.default_args())
upgrader._on_success = mock.Mock()
upgrader.run()
self.called_once(upgrader._on_success)
def test_upgrade_does_not_fail_if_on_success_method_raise_error(self):
upgrader = UpgradeManager(**self.default_args())
upgrader._on_success = mock.Mock()
upgrader._on_success.side_effect = Exception('error')
upgrader.run()
@mock.patch('fuel_upgrade.engines.host_system.SupervisorClient')
def test_hostsystem_rollback_is_first(self, _):
args = self.default_args()
hostsystem = HostSystemUpgrader(args['config'])
hostsystem.upgrade = mock.Mock()
hostsystem.rollback = mock.Mock()
def check_call():
hostsystem.rollback.assert_called_once_with()
# there's no way to check call order of different mocks, so
# let's use this trick - check that all mock calls were
# after hostsystem rollback call.
args['upgraders'] = [
hostsystem,
mock.Mock(rollback=mock.Mock(side_effect=check_call)),
mock.Mock(rollback=mock.Mock(side_effect=check_call))]
upgrader = UpgradeManager(**args)
upgrader._used_upgraders = args['upgraders']
upgrader.rollback()
@mock.patch('fuel_upgrade.upgrade.utils')
@mock.patch('fuel_upgrade.upgrade.glob.glob',
return_value=['file1', 'file2'])
def test_on_success(self, glob_mock, utils_mock):
upgrader = UpgradeManager(**self.default_args())
upgrader._on_success()
glob_mock.assert_called_once_with(self.fake_config.version_files_mask)
self.assertEqual(
utils_mock.remove.call_args_list,
[mock.call('file1'), mock.call('file2')])
templates_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../templates'))
utils_mock.render_template_to_file.assert_has_calls([
mock.call(
'{0}/nailgun.repo'.format(templates_path),
'/etc/yum.repos.d/mos9999-updates.repo',
{
'name': 'mos9999-updates',
'baseurl': 'http://mirror.fuel-infra.org/mos-repos/'
'centos/mos9999-centos6-fuel/updates/x86_64/',
'gpgcheck': 0,
'skip_if_unavailable': 1,
}),
mock.call(
'{0}/nailgun.repo'.format(templates_path),
'/etc/yum.repos.d/mos9999-security.repo',
{
'name': 'mos9999-security',
'baseurl': 'http://mirror.fuel-infra.org/mos-repos/'
'centos/mos9999-centos6-fuel/security/x86_64/',
'gpgcheck': 0,
'skip_if_unavailable': 1,
}),
])

View File

@ -1,788 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from copy import deepcopy
import os
import StringIO
import subprocess
import textwrap
import urllib2
import requests
import six
import yaml
import mock
from mock import patch
from fuel_upgrade import errors
from fuel_upgrade.tests.base import BaseTestCase
from fuel_upgrade import utils
from fuel_upgrade.utils import create_dir_if_not_exists
from fuel_upgrade.utils import exec_cmd
from fuel_upgrade.utils import exec_cmd_iterator
from fuel_upgrade.utils import get_request
from fuel_upgrade.utils import http_retry
from fuel_upgrade.utils import sanitize
from fuel_upgrade.utils import topological_sorting
from fuel_upgrade.utils import wait_for_true
class TestUtils(BaseTestCase):
def make_process_mock(self, return_code=0):
process_mock = mock.Mock()
process_mock.stdout = ['Stdout line 1', 'Stdout line 2']
process_mock.returncode = return_code
return process_mock
def test_exec_cmd_executes_sucessfuly(self):
cmd = 'some command'
process_mock = self.make_process_mock()
with patch.object(
subprocess, 'Popen', return_value=process_mock) as popen_mock:
exec_cmd(cmd)
popen_mock.assert_called_once_with(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
@mock.patch('fuel_upgrade.utils.exec_cmd',
side_effect=errors.ExecutedErrorNonZeroExitCode())
def test_safe_exec_cmd(self, exec_mock):
cmd = 'some command'
utils.safe_exec_cmd(cmd)
exec_mock.assert_called_once_with(cmd)
def test_exec_cmd_raises_error_in_case_of_non_zero_exit_code(self):
cmd = 'some command'
return_code = 1
process_mock = self.make_process_mock(return_code=return_code)
with patch.object(subprocess, 'Popen', return_value=process_mock):
self.assertRaisesRegexp(
errors.ExecutedErrorNonZeroExitCode,
'Shell command executed with "{0}" '
'exit code: {1} '.format(return_code, cmd),
exec_cmd, cmd)
def test_exec_cmd_iterator_executes_sucessfuly(self):
cmd = 'some command'
process_mock = self.make_process_mock()
with patch.object(
subprocess, 'Popen', return_value=process_mock) as popen_mock:
for line in exec_cmd_iterator(cmd):
self.assertTrue(line.startswith('Stdout line '))
popen_mock.assert_called_once_with(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
def test_exec_cmd_iterator_raises_error_in_case_of_non_zero_exit_code(
self):
cmd = 'some command'
return_code = 1
process_mock = self.make_process_mock(return_code=return_code)
with patch.object(subprocess, 'Popen', return_value=process_mock):
with self.assertRaisesRegexp(
errors.ExecutedErrorNonZeroExitCode,
'Shell command executed with "{0}" '
'exit code: {1} '.format(return_code, cmd)):
for line in exec_cmd_iterator(cmd):
self.assertTrue(line.startswith('Stdout line '))
def test_get_request(self):
url = 'http://some-url.com/path'
response = mock.MagicMock()
response.read.return_value = '{"key": "value"}'
response.getcode.return_value = 200
with patch.object(
urllib2, 'urlopen', return_value=response) as urlopen:
resp = get_request(url)
self.assertEqual(({'key': 'value'}, 200), resp)
urlopen.assert_called_once_with(url)
def test_topological_sorting(self):
graph = {
'D': ['C', 'G'],
'E': ['A', 'D'],
'A': [],
'B': ['A'],
'C': ['A'],
'G': []
}
order = topological_sorting(graph)
self.assertEqual(order, ['A', 'B', 'C', 'G', 'D', 'E'])
def test_topological_sorting_raises_cycle_dependencies_error(self):
graph = {
'A': ['C', 'D'],
'B': ['A'],
'C': ['B'],
'D': []
}
self.assertRaisesRegexp(
errors.CyclicDependenciesError,
"Cyclic dependencies error ",
topological_sorting,
graph)
@mock.patch('fuel_upgrade.utils.os.makedirs')
def test_create_dir_if_not_exists_does_not_create_dir(self, mock_makedirs):
path = 'some_path'
with mock.patch(
'fuel_upgrade.utils.os.path.isdir',
return_value=True) as mock_isdir:
create_dir_if_not_exists(path)
mock_isdir.assert_called_once_with(path)
self.method_was_not_called(mock_makedirs)
@mock.patch('fuel_upgrade.utils.os.makedirs')
def test_create_dir_if_not_exists(self, mock_makedirs):
path = 'some_path'
with mock.patch(
'fuel_upgrade.utils.os.path.isdir',
return_value=False) as mock_isdir:
create_dir_if_not_exists(path)
mock_isdir.assert_called_once_with(path)
mock_makedirs.assert_called_once_with(path)
def test_wait_for_true_does_not_raise_errors(self):
self.assertEqual(wait_for_true(lambda: True, timeout=0), True)
def test_wait_for_true_raises_timeout_error(self):
self.assertRaisesRegexp(
errors.TimeoutError,
'Failed to execute command with timeout 0',
wait_for_true,
lambda: False,
timeout=0)
@mock.patch('fuel_upgrade.utils.os.path.isdir', return_value=True)
@mock.patch('fuel_upgrade.utils.copy_dir')
def test_copy_with_dir(self, copy_mock, _):
from_path = '/from_path'
to_path = '/to_path'
utils.copy(from_path, to_path)
copy_mock.assert_called_once_with(from_path, to_path, True, True)
@mock.patch('fuel_upgrade.utils.os.path.isdir', return_value=False)
@mock.patch('fuel_upgrade.utils.copy_file')
def test_copy_with_file(self, copy_mock, _):
from_path = '/from_path'
to_path = '/to_path'
utils.copy(from_path, to_path)
copy_mock.assert_called_once_with(from_path, to_path, True)
@mock.patch('fuel_upgrade.utils.os.path.isdir', return_value=False)
@mock.patch('fuel_upgrade.utils.shutil.copy')
def test_copy_file(self, copy_mock, _):
from_path = '/from_path.txt'
to_path = '/to_path.txt'
utils.copy_file(from_path, to_path)
copy_mock.assert_called_once_with(from_path, to_path)
@mock.patch('fuel_upgrade.utils.os.path.isdir', return_value=True)
@mock.patch('fuel_upgrade.utils.shutil.copy')
def test_copy_file_to_dir(self, copy_mock, _):
from_path = '/from_path.txt'
to_path = '/to_path'
utils.copy_file(from_path, to_path)
copy_mock.assert_called_once_with(from_path, '/to_path/from_path.txt')
@mock.patch('fuel_upgrade.utils.os.path.isdir', return_value=False)
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=True)
@mock.patch('fuel_upgrade.utils.shutil.copy')
def test_copy_file_do_not_overwrite(self, copy_mock, _, __):
from_path = '/from_path.txt'
to_path = '/to_path.txt'
utils.copy_file(from_path, to_path, overwrite=False)
self.method_was_not_called(copy_mock)
@mock.patch('fuel_upgrade.utils.shutil.copytree')
def test_copy_dir(self, copy_mock):
from_path = '/from_path'
to_path = '/to_path'
utils.copy_dir(from_path, to_path)
copy_mock.assert_called_once_with(from_path, to_path, symlinks=True)
@mock.patch('fuel_upgrade.utils.os.path.lexists', return_value=True)
@mock.patch('fuel_upgrade.utils.shutil.copytree')
@mock.patch('fuel_upgrade.utils.remove')
def test_copy_dir_overwrite(self, rm_mock, copy_mock, _):
from_path = '/from_path'
to_path = '/to_path'
utils.copy_dir(from_path, to_path)
rm_mock.assert_called_once_with(to_path, ignore_errors=True)
copy_mock.assert_called_once_with(from_path, to_path, symlinks=True)
def test_file_contains_lines_returns_true(self):
with mock.patch(
'__builtin__.open',
self.mock_open("line 1\n line2\n line3")):
self.assertTrue(
utils.file_contains_lines('/some/path', ['line 1', 'line3']))
def test_file_contains_lines_returns_false(self):
with mock.patch(
'__builtin__.open',
self.mock_open("line 1\n line2\n line3")):
self.assertFalse(
utils.file_contains_lines('/some/path', ['line 4', 'line3']))
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=True)
@mock.patch('fuel_upgrade.utils.os.symlink')
@mock.patch('fuel_upgrade.utils.remove')
def test_symlink(self, remove_mock, symlink_mock, _):
from_path = '/tmp/from/path'
to_path = '/tmp/to/path'
utils.symlink(from_path, to_path)
symlink_mock.assert_called_once_with(from_path, to_path)
remove_mock.assert_called_once_with(to_path)
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=False)
@mock.patch('fuel_upgrade.utils.os.symlink')
@mock.patch('fuel_upgrade.utils.remove')
def test_symlink_no_exist(self, remove_mock, symlink_mock, _):
from_path = '/tmp/from/path'
to_path = '/tmp/to/path'
utils.symlink(from_path, to_path)
symlink_mock.assert_called_once_with(from_path, to_path)
self.called_once(remove_mock)
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=True)
@mock.patch('fuel_upgrade.utils.symlink')
def test_symlink_if_src_exists_ok(self, symlink_mock, _):
from_path = '/tmp/from/path'
to_path = '/tmp/to/path'
utils.symlink_if_src_exists(from_path, to_path)
symlink_mock.assert_called_once_with(from_path, to_path,
overwrite=True)
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=False)
@mock.patch('fuel_upgrade.utils.symlink')
def test_symlink_if_src_exists_not_exists(self, symlink_mock, _):
from_path = '/tmp/from/path'
to_path = '/tmp/to/path'
utils.symlink_if_src_exists(from_path, to_path)
self.method_was_not_called(symlink_mock)
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=True)
@mock.patch('fuel_upgrade.utils.os.remove')
def test_remove_if_exists(self, remove_mock, exists_mock):
path = '/tmp/some/path'
utils.remove_if_exists(path)
remove_mock.assert_called_once_with(path)
exists_mock.assert_called_once_with(path)
def test_load_fixture(self):
fixture = StringIO.StringIO('''
- &base
fields:
a: 1
b: 2
c: 3
- pk: 1
extend: *base
fields:
a: 13
- pk: 2
extend: *base
fields:
d: 42
''')
setattr(fixture, 'name', 'some.yaml')
result = utils.load_fixture(fixture)
self.assertEqual(len(result), 2)
self.assertEqual(result[0], {
'a': 13,
'b': 2,
'c': 3,
})
self.assertEqual(result[1], {
'a': 1,
'b': 2,
'c': 3,
'd': 42,
})
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=True)
@mock.patch('fuel_upgrade.utils.shutil.rmtree')
def test_rmtree(self, rm_mock, exists_mock):
path = '/some/file/path'
utils.rmtree(path)
rm_mock.assert_called_once_with(path, ignore_errors=True)
exists_mock.assert_called_once_with(path)
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=False)
@mock.patch('fuel_upgrade.utils.shutil.rmtree')
def test_rmtree_no_errors_if_file_does_not_exist(
self, rm_mock, exists_mock):
path = '/some/file/path'
utils.rmtree(path)
self.method_was_not_called(rm_mock)
exists_mock.assert_called_once_with(path)
def test_check_file_is_valid_json(self):
path = '/path/to/file.json'
with mock.patch(
'__builtin__.open',
self.mock_open('{"valid": "json"}')):
self.assertTrue(utils.check_file_is_valid_json(path))
def test_check_file_is_valid_json_returns_false(self):
path = '/path/to/file.json'
with mock.patch(
'__builtin__.open',
self.mock_open('{"invalid: "json"}')):
self.assertFalse(utils.check_file_is_valid_json(path))
def test_check_file_is_valid_json_false_if_problems_with_access(self):
path = '/path/to/file.json'
with mock.patch('__builtin__.open', side_effect=IOError()):
self.assertFalse(utils.check_file_is_valid_json(path))
def test_byte_to_megabyte(self):
self.assertEqual(utils.byte_to_megabyte(0), 0)
self.assertEqual(utils.byte_to_megabyte(1048576), 1)
def test_calculate_free_space(self):
dev_info = mock.Mock()
dev_info.f_bsize = 1048576
dev_info.f_bavail = 2
with mock.patch('fuel_upgrade.utils.os.statvfs',
return_value=dev_info) as st_mock:
self.assertEqual(utils.calculate_free_space('/tmp/dir'), 2)
st_mock.assert_called_once_with('/tmp/dir/')
@mock.patch('fuel_upgrade.utils.os.path.ismount',
side_effect=[False, False, True])
def test_find_mount_point(self, mock_ismount):
path = '/dir1/dir2/dir3/dir4'
self.assertEqual(utils.find_mount_point(path), '/dir1/dir2')
self.called_times(mock_ismount, 3)
@mock.patch('fuel_upgrade.utils.os.path.getsize', return_value=1048576)
@mock.patch('fuel_upgrade.utils.os.walk',
return_value=[('', '', ['file1', 'file2'])])
@mock.patch('fuel_upgrade.utils.os.path.isfile',
return_value=True)
def test_dir_size(self, _, __, ___):
path = '/path/dir'
self.assertEqual(utils.dir_size(path), 2)
@mock.patch('fuel_upgrade.utils.os.path.getsize', return_value=1048576)
@mock.patch('fuel_upgrade.utils.os.path.isfile', return_value=True)
def test_files_size(self, _, __):
path = ['/path/file1', '/path/file2']
self.assertEqual(utils.files_size(path), 2)
def test_compare_version(self):
self.assertEqual(utils.compare_version('0.1', '0.2'), 1)
self.assertEqual(utils.compare_version('0.1', '0.1.5'), 1)
self.assertEqual(utils.compare_version('0.2', '0.1'), -1)
self.assertEqual(utils.compare_version('0.2', '0.2'), 0)
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=True)
@mock.patch('fuel_upgrade.utils.copy')
def test_copy_if_does_not_exist_file_exists(self, copy_mock, exists_mock):
utils.copy_if_does_not_exist('from', 'to')
exists_mock.assert_called_once_with('to')
self.method_was_not_called(copy_mock)
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=False)
@mock.patch('fuel_upgrade.utils.copy')
def test_copy_if_does_not_exist_file_does_not_exist(
self, copy_mock, exists_mock):
utils.copy_if_does_not_exist('from', 'to')
exists_mock.assert_called_once_with('to')
copy_mock.assert_called_once_with('from', 'to')
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=False)
@mock.patch('fuel_upgrade.utils.copy')
def test_copy_if_exists_file_does_not_exist(
self, copy_mock, exists_mock):
utils.copy_if_exists('from', 'to')
exists_mock.assert_called_once_with('from')
self.method_was_not_called(copy_mock)
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=True)
@mock.patch('fuel_upgrade.utils.copy')
def test_copy_if_exists_file_exists(
self, copy_mock, exists_mock):
utils.copy_if_exists('from', 'to')
exists_mock.assert_called_once_with('from')
copy_mock.assert_called_once_with('from', 'to')
@mock.patch('fuel_upgrade.utils.os.rename')
def test_rename(self, rename_mock):
utils.rename('source', 'destination')
rename_mock.assert_called_once_with('source', 'destination')
@mock.patch('fuel_upgrade.utils.os.path.lexists', return_value=True)
@mock.patch('fuel_upgrade.utils.os.path.isdir', return_value=False)
@mock.patch('fuel_upgrade.utils.os.remove')
def test_remove_file(self, remove_mock, _, __):
utils.remove('path')
remove_mock.assert_called_once_with('path')
@mock.patch('fuel_upgrade.utils.os.path.lexists', return_value=True)
@mock.patch('fuel_upgrade.utils.os.path.islink', return_value=True)
@mock.patch('fuel_upgrade.utils.os.path.isdir', return_value=True)
@mock.patch('fuel_upgrade.utils.os.remove')
def test_remove_link_to_dir(self, remove_mock, _, __, ___):
utils.remove('path')
remove_mock.assert_called_once_with('path')
@mock.patch('fuel_upgrade.utils.os.path.lexists', return_value=False)
@mock.patch('fuel_upgrade.utils.os.path.isdir', return_value=False)
@mock.patch('fuel_upgrade.utils.os.remove')
def test_remove_file_does_not_exist(self, remove_mock, _, __):
utils.remove('path')
self.method_was_not_called(remove_mock)
@mock.patch('fuel_upgrade.utils.os.path.lexists', return_value=True)
@mock.patch('fuel_upgrade.utils.os.path.isdir', return_value=True)
@mock.patch('fuel_upgrade.utils.shutil.rmtree')
def test_remove_dir(self, remove_mock, _, __):
utils.remove('path')
remove_mock.assert_called_once_with('path', ignore_errors=True)
@mock.patch('fuel_upgrade.utils.yaml')
def test_save_as_yaml(self, yaml_mock):
path = '/tmp/path'
data = {'a': 'b'}
mock_open = self.mock_open('')
with mock.patch('__builtin__.open', mock_open):
utils.save_as_yaml(path, data)
yaml_mock.dump.assert_called_once_with(data, default_flow_style=False)
@mock.patch('fuel_upgrade.utils.yaml')
def test_read_from_yaml(self, yaml_mock):
path = '/tmp/path'
data = yaml.dump({'a': 'b'})
mock_open = self.mock_open(data)
with mock.patch('fuel_upgrade.utils.io.open', mock_open):
utils.read_from_yaml(path)
yaml_mock.load.assert_called_once_with(data)
def test_generate_uuid_string(self):
random_string = utils.generate_uuid_string()
self.assertEqual(len(random_string), 36)
self.assertTrue(isinstance(random_string, str))
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=True)
@mock.patch('fuel_upgrade.utils.file_contains_lines', returns_value=True)
def test_verify_postgres_dump(self, file_contains_mock, exists_mock):
pg_dump_path = '/tmp/some/path'
utils.verify_postgres_dump(pg_dump_path)
patterns = [
'-- PostgreSQL database cluster dump',
'-- PostgreSQL database dump',
'-- PostgreSQL database dump complete',
'-- PostgreSQL database cluster dump complete']
exists_mock.assert_called_once_with(pg_dump_path)
file_contains_mock.assert_called_once_with(pg_dump_path, patterns)
def test_file_extension(self):
cases = [
('', ''),
('asdf', ''),
('asdf.', ''),
('asdf.txt', 'txt'),
('asdf.txt.trtr', 'trtr')]
for case in cases:
self.assertEqual(utils.file_extension(case[0]), case[1])
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=True)
def test_file_exists_returns_true(self, exists_mock):
self.assertTrue(utils.file_exists('path'))
exists_mock.assert_called_once_with('path')
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=False)
def test_file_exists_returns_false(self, exists_mock):
self.assertFalse(utils.file_exists('path'))
exists_mock.assert_called_once_with('path')
@mock.patch('fuel_upgrade.utils.os.walk')
def test_iterfiles(self, walk):
for _ in utils.iterfiles('path/to/dir'):
pass
walk.assert_called_once_with('path/to/dir', topdown=True)
@mock.patch('fuel_upgrade.utils.os.walk')
def test_iterfiles_filter(self, walk):
expected_files = ['/fake/path/1', '/fake/path/2']
walk.return_value = [('/fake/path', '', '1'), ('/fake/path', '', '2')]
files = list(utils.iterfiles_filter('/fake/path', '*1'))
self.assertEqual(files, expected_files[:1])
def test_render_template_to_file(self):
template_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '../templates/nailgun.repo'))
with open(template_path, 'r') as f:
template = f.read()
mopen = mock.mock_open(read_data=template)
with mock.patch('__builtin__.open', mopen, create=True):
utils.render_template_to_file('mocked', 'mocked', {
'name': 'mos7.0-updates',
'baseurl': 'http://mirror.fuel-infra.org/mos-repos/centos/'
'mos7.0-centos6-fuel/updates/x86_64/',
'gpgcheck': 0,
'skip_if_unavailable': 1,
})
mopen().write.assert_called_once(textwrap.dedent('''\
[mos7.0-updates]
name=mos7.0-updates
baseurl=http://mirror.fuel-infra.org/mos-repos\
/centos/mos7.0-centos6-fuel/updates/x86_64/
gpgcheck=0
skip_if_unavailable=1
'''))
class TestVersionedFile(BaseTestCase):
def setUp(self):
self.path = '/tmp/path.ext'
self.versioned_file = utils.VersionedFile(self.path)
@mock.patch('fuel_upgrade.utils.glob.glob', return_value=[])
def test_next_file_name_empty_dir(self, _):
self.assertEqual(
self.versioned_file.next_file_name(),
'{0}.1'.format(self.path))
@mock.patch('fuel_upgrade.utils.glob.glob',
return_value=['/tmp/path.ext',
'/tmp/path.ext.10',
'/tmp/path.ext.6'])
def test_next_file_name_with_files(self, _):
self.assertEqual(
self.versioned_file.next_file_name(),
'{0}.11'.format(self.path))
@mock.patch('fuel_upgrade.utils.glob.glob',
return_value=['/tmp/path.ext',
'/tmp/path.ext.10',
'/tmp/path.ext.6'])
def test_sorted_files(self, _):
self.assertEqual(
self.versioned_file.sorted_files(),
['/tmp/path.ext.10', '/tmp/path.ext.6'])
def test_normversion(self):
cases = [
# (input, output)
('6', '6.0.0'),
('6.0', '6.0.0'),
('6.1', '6.1.0'),
('6.1.0', '6.1.0'),
('6.1.1', '6.1.1'),
('6.1.1.1', '6.1.1.1'),
]
for input_, output in cases:
self.assertEqual(utils.normversion(input_), output)
class TestSanitizer(BaseTestCase):
original = {
'admin_password': 'r00tme',
'not_a_pass': 1,
'nested': {
'password_here': 'JuYReDm4',
'nested_again': [
'apassword!',
'jcqOyKEf',
{'login': 'root', 'a_password': '8xMflcaD', 'x': 55.2},
{
'and_again': {
'UPPERCASE_PASSWORD': 'VpE8gqKN',
'password_as_list': ['it', 'will', 'be', 'changed'],
'admin_token': 'Ab8ph9qO'
}
}
]
}
}
expected = {
'admin_password': '******',
'not_a_pass': 1,
'nested': {
'password_here': '******',
'nested_again': [
'apassword!',
'jcqOyKEf',
{'login': 'root', 'a_password': '******', 'x': 55.2},
{
'and_again': {
'UPPERCASE_PASSWORD': 'VpE8gqKN',
'password_as_list': '******',
'admin_token': '******'
}
}
]
}
}
expected_custom_mask = {
'admin_password': 'XXX',
'not_a_pass': 1,
'nested': {
'password_here': 'XXX',
'nested_again': [
'apassword!',
'jcqOyKEf',
{'login': 'root', 'a_password': 'XXX', 'x': 55.2},
{
'and_again': {
'UPPERCASE_PASSWORD': 'VpE8gqKN',
'password_as_list': 'XXX',
'admin_token': 'XXX'
}
}
]
}
}
def test_hide_data(self):
self.assertEqual(
sanitize(self.original, ['password', 'token']),
self.expected
)
def test_original_object_unchanged(self):
copy_conf = deepcopy(self.original)
sanitize(self.original, ['password', 'token'])
self.assertEqual(self.original, copy_conf)
def test_custom_mask(self):
self.assertEqual(
sanitize(self.original, ['password', 'token'], mask='XXX'),
self.expected_custom_mask
)
@mock.patch('fuel_upgrade.utils.time.sleep')
class TestHttpRetry(BaseTestCase):
def _get_http_error(self, error_code):
response = mock.Mock(status_code=error_code)
return requests.HTTPError(response=response)
def test_do_not_retry_on_not_interesting_errors(self, msleep):
method = mock.Mock(
side_effect=self._get_http_error(404),
__name__='fn')
wrapped_method = http_retry(status_codes=[500])(method)
self.assertRaises(requests.HTTPError, wrapped_method)
self.called_once(method)
self.method_was_not_called(msleep)
def test_do_retry_on_interesting_errors(self, msleep):
method = mock.Mock(
side_effect=self._get_http_error(500),
__name__='fn')
wrapped_method = http_retry(status_codes=[500], attempts=13)(method)
self.assertRaises(requests.HTTPError, wrapped_method)
self.called_times(method, 13)
self.called_times(msleep, 12)
def test_do_sleep_on_attempts(self, msleep):
method = mock.Mock(
side_effect=self._get_http_error(500),
__name__='fn')
wrapped_method = http_retry(
status_codes=[500], attempts=2, interval=42)(method)
self.assertRaises(requests.HTTPError, wrapped_method)
msleep.assert_called_once_with(42)
def test_decorated_method_use_arguments(self, _):
method = mock.Mock(__name__='fn')
wrapped_method = http_retry(status_codes=[500])(method)
wrapped_method(42, 'test')
method.assert_called_once_with(42, 'test')
def test_stop_retrying_if_success(self, msleep):
method = mock.Mock(
side_effect=[self._get_http_error(500), 'return value'],
__name__='fn')
wrapped_method = http_retry(status_codes=[500], attempts=13)(method)
result = wrapped_method()
self.assertEqual(result, 'return value')
self.called_times(method, 2)
self.called_once(msleep)
class TestGetNonUnique(BaseTestCase):
def test_get_duplicates(self):
self.assertItemsEqual([2, 3], utils.get_non_unique([2, 2, 2, 3, 3, 1]))
def test_empty_if_no_duplicates(self):
self.assertEqual([], list(utils.get_non_unique(six.moves.range(3))))
def test_empty_if_empty_input(self):
self.assertEqual([], list(utils.get_non_unique([])))

View File

@ -1,54 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from fuel_upgrade.tests.base import BaseTestCase
from fuel_upgrade.version_file import VersionFile
class TestVersionFile(BaseTestCase):
def setUp(self):
self.version_file = VersionFile(self.fake_config)
@mock.patch('fuel_upgrade.version_file.utils')
def test_save_current(self, mock_utils):
self.version_file.save_current()
mock_utils.copy_if_does_not_exist.assert_called_once_with(
'/etc/fuel/version.yaml',
'/var/lib/fuel_upgrade/9999/version.yaml')
@mock.patch('fuel_upgrade.version_file.utils')
def test_switch_to_new(self, mock_utils):
self.version_file.switch_to_new()
mock_utils.create_dir_if_not_exists.assert_called_once_with(
'/etc/fuel/9999')
mock_utils.copy.assert_called_once_with(
'/tmp/upgrade_path/config/version.yaml',
'/etc/fuel/9999/version.yaml')
mock_utils.symlink.assert_called_once_with(
'/etc/fuel/9999/version.yaml',
'/etc/fuel/version.yaml')
@mock.patch('fuel_upgrade.version_file.utils.symlink')
def test_switch_to_previous(self, symlink_mock):
self.version_file.switch_to_previous()
symlink_mock.assert_called_once_with(
'/etc/fuel/0/version.yaml',
'/etc/fuel/version.yaml')

View File

@ -1,170 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import logging
import os
import six
from fuel_upgrade import messages
from fuel_upgrade import utils
from fuel_upgrade.version_file import VersionFile
from fuel_upgrade.engines.host_system import HostSystemUpgrader
logger = logging.getLogger(__name__)
class UpgradeManager(object):
"""Upgrade manager is used to orchestrate upgrading process.
:param upgraders: a list with upgrader classes to use; each upgrader
must inherit the :class:`BaseUpgrader`
:param no_rollback: call :meth:`BaseUpgrader.rollback` method
in case of exception during execution
"""
def __init__(self, upgraders, config, no_rollback=True):
#: an object with configuration context
self._config = config
#: a list of upgraders to use
self._upgraders = upgraders
#: a list of used upgraders (needs by rollback feature)
self._used_upgraders = []
#: should we make rollback in case of error?
self._rollback = not no_rollback
#: version.yaml manager
self._version_file = VersionFile(self._config)
self._version_file.save_current()
def run(self):
"""Runs consequentially all registered upgraders.
.. note:: in case of exception the `rollback` method will be called
"""
logger.info('*** START UPGRADING')
self._version_file.switch_to_new()
for upgrader in self._upgraders:
logger.debug('%s: backuping...', upgrader.__class__.__name__)
try:
upgrader.backup()
except Exception as exc:
logger.exception(
'%s: failed to backup: "%s"',
upgrader.__class__.__name__, exc)
logger.error('*** UPGRADE FAILED')
raise
for upgrader in self._upgraders:
logger.debug('%s: upgrading...', upgrader.__class__.__name__)
self._used_upgraders.append(upgrader)
try:
upgrader.upgrade()
except Exception as exc:
logger.exception(
'%s: failed to upgrade: "%s"',
upgrader.__class__.__name__, exc)
if self._rollback:
self.rollback()
logger.error('*** UPGRADE FAILED')
raise
try:
self._on_success()
except Exception as exc:
logger.exception(
'Could not complete on_success actions due to %s',
six.text_type(exc))
logger.info('*** UPGRADING MASTER NODE DONE SUCCESSFULLY')
logger.info('*** PLEASE REBOOT YOUR BOOTSTRAP NODES IN ORDER TO MAKE'
' SURE THAT THEY USE THE LATEST BOOTSTRAP IMAGE')
def _on_success(self):
"""Do some useful job if upgrade was done successfully
Remove saved version files for all upgrades
NOTE(eli): It solves several problems:
1. user runs upgrade 5.0 -> 5.1 which fails
upgrade system saves version which we upgrade
from in file working_dir/5.1/version.yaml.
Then user runs upgrade 5.0 -> 5.0.1 which
successfully upgraded. Then user runs again
upgrade 5.0.1 -> 5.1, but there is saved file
working_dir/5.1/version.yaml which contains
5.0 version, and upgrade system thinks that
it's upgrading from 5.0 version, as result
it tries to make database dump from wrong
version of container.
2. without this hack user can run upgrade
second time and loose his data, this hack
prevents this case because before upgrade
checker will use current version instead
of saved version to determine version which
we run upgrade from.
"""
for version_file in glob.glob(self._config.version_files_mask):
utils.remove(version_file)
self._setup_update_repos()
def _setup_update_repos(self):
"""Setup updates/security repos on master node."""
template = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'templates', 'nailgun.repo')
for repo in self._config.master_node_repos:
destination = '{0}.repo'.format(
os.path.join('/etc/yum.repos.d', repo['name']))
utils.render_template_to_file(template, destination, repo)
logger.warning(messages.update_your_master_node)
def rollback(self):
logger.debug('Run rollback')
# because of issue #1452378 [1], we have to perform HostSystem's
# rollback before others. so, move it to the end of list.
#
# [1]: https://bugs.launchpad.net/fuel/+bug/1452378
hostsystem = next((
upgrader for upgrader in self._used_upgraders
if isinstance(upgrader, HostSystemUpgrader)),
None)
if hostsystem is not None:
self._used_upgraders.remove(hostsystem)
self._used_upgraders.append(hostsystem)
# do rollback in reverse order for all the upgraders
while self._used_upgraders:
upgrader = self._used_upgraders.pop()
logger.debug('%s: rollbacking...', upgrader.__class__.__name__)
upgrader.rollback()
self._version_file.switch_to_previous()

View File

@ -1,880 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from fnmatch import fnmatch
import functools
import glob
import io
import json
import logging
import os
import re
import shutil
import subprocess
import time
import urllib2
import uuid
from copy import deepcopy
from distutils.version import StrictVersion
from mako.template import Template
import requests
import six
from six.moves import range
import yaml
from fuel_upgrade import errors
logger = logging.getLogger(__name__)
def exec_cmd(cmd):
"""Execute command with logging.
Ouput of stdout and stderr will be written
in log.
:param cmd: shell command
"""
logger.debug('Execute command "%s"', cmd)
child = subprocess.Popen(
cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
logger.debug('Stdout and stderr of command "%s":', cmd)
for line in child.stdout:
logger.debug(line.rstrip())
_wait_and_check_exit_code(cmd, child)
def safe_exec_cmd(cmd):
"""Execute command with logging.
Ouput of stdout and stderr will be written
in log. Doesn't raise error in case
of non zero exit code.
:param cmd: shell command
"""
try:
exec_cmd(cmd)
except errors.ExecutedErrorNonZeroExitCode as exc:
logger.warn(exc)
def exec_cmd_iterator(cmd):
"""Execute command with logging.
:param cmd: shell command
:returns: generator where yeach item
is line from stdout
"""
logger.debug('Execute command "%s"', cmd)
child = subprocess.Popen(
cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
logger.debug('Stdout and stderr of command "%s":', cmd)
for line in child.stdout:
logger.debug(line.rstrip())
yield line
_wait_and_check_exit_code(cmd, child)
def _wait_and_check_exit_code(cmd, child):
"""Wait for child and check it's exit code
:param cmd: command
:param child: object which returned by subprocess.Popen
:raises: ExecutedErrorNonZeroExitCode
"""
child.wait()
exit_code = child.returncode
if exit_code != 0:
raise errors.ExecutedErrorNonZeroExitCode(
'Shell command executed with "{0}" '
'exit code: {1} '.format(exit_code, cmd))
logger.debug('Command "%s" successfully executed', cmd)
def get_request(url):
"""Make http get request and deserializer json response
:param url: url
:returns list|dict: deserialized response
"""
logger.debug('GET request to %s', url)
response = urllib2.urlopen(url)
response_data = response.read()
response_code = response.getcode()
logger.debug('GET response from %s, code %d, data: %s',
url, response_code, response_data)
return json.loads(response_data), response_code
def topological_sorting(dep_graph):
"""Implementation of topological sorting algorithm
http://en.wikipedia.org/wiki/Topological_sorting
:param dep_graph: graph of dependencies, where key is
a node and value is a list of dependencies
:returns: list of nodes
:raises CyclicDependencies:
"""
sorted_nodes = []
graph = deepcopy(dep_graph)
while graph:
cyclic = True
for node, dependencies in sorted(graph.items(), key=lambda n: n[0]):
for dependency in dependencies:
if dependency in graph:
break
else:
cyclic = False
del graph[node]
sorted_nodes.append(node)
if cyclic:
raise errors.CyclicDependenciesError(
'Cyclic dependencies error {0}'.format(graph))
return sorted_nodes
def create_dir_if_not_exists(dir_path):
"""Creates directory if it doesn't exist
:param dir_path: directory path
"""
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
def render_template_to_file(src, dst, params):
"""Render mako template and write it to specified file
:param src: path to template
:param dst: path where rendered template will be saved
"""
logger.debug('Render template from %s to %s with params: %s',
src, dst, params)
with open(src, 'r') as f:
template_cfg = f.read()
with open(dst, 'w') as f:
rendered_cfg = Template(template_cfg).render(**params)
f.write(rendered_cfg)
def wait_for_true(check, timeout=60, interval=0.5):
"""Execute command with retries
:param check: callable object
:param timeout: timeout
:returns: result of call method
:raises TimeoutError:
"""
start_time = time.time()
while True:
result = check()
if result:
return result
if time.time() - start_time > timeout:
raise errors.TimeoutError(
'Failed to execute '
'command with timeout {0}'.format(timeout))
time.sleep(interval)
def symlink(source, destination, overwrite=True):
"""Creates a symbolic link to the resource.
:param source: symlink from
:param destination: symlink to
:param overwrite: overwrite a destination if True
"""
logger.debug(
'Symlinking "%s" -> "%s" [overwrite=%d]',
source, destination, overwrite)
if overwrite or not os.path.exists(destination):
remove(destination)
os.symlink(source, destination)
else:
logger.debug('Skip symlinking process')
def symlink_if_src_exists(source, destination, overwrite=True):
"""Creates a symbolic link to the resource but only if source exists.
:param source: symlink from
:param destination: symlink to
:param overwrite: overwrite a destination if True
"""
if not os.path.exists(source):
logger.debug(
'Skip creating symlink, because "%s" does not exists', source)
return
symlink(source, destination, overwrite=overwrite)
def hardlink(source, destination, overwrite=True):
"""Creates a hardlink link to the resource.
:param source: hardlink from
:param destination: hardlink to
:param overwrite: overwrite a destination if True
"""
logger.debug(
'Creating hardlink "%s" -> "%s" [overwrite=%d]',
source, destination, overwrite)
if overwrite or not os.path.exists(destination):
remove_if_exists(destination)
os.link(source, destination)
else:
logger.debug('Skip hardlink creation process')
def remove_if_exists(path):
"""Removes files if it exists
:param path: path to file for removal
"""
if os.path.exists(path):
logger.debug('Remove file "%s"', path)
os.remove(path)
def file_contains_lines(file_path, patterns):
"""Checks if file contains lines which described by patterns
:param file_path: path to file
:param patterns: list of strings
:returns: True if file matches all patterns
False if file doesn't match one or more patterns
"""
logger.debug(
'Check if file "%s" matches to pattern "%s"', file_path, patterns)
regexps = [re.compile(pattern) for pattern in patterns]
with open(file_path, 'r') as f:
for line in f:
for i, regexp in enumerate(regexps):
result = regexp.search(line)
if result:
del regexps[i]
if regexps:
logger.warn('Cannot find lines %s in file %s', regexps, file_path)
return False
return True
def copy_if_does_not_exist(from_path, to_path):
"""Copies destination does not exist
:param from_path: src path
:param to_path: dst path
"""
if os.path.exists(to_path):
logger.debug(
'Skip file copying, because file %s '
'already exists', to_path)
return
copy(from_path, to_path)
def copy_if_exists(from_path, to_path):
"""Copies destination if it exists
:param from_path: src path
:param to_path: dst path
"""
if not os.path.exists(from_path):
logger.debug(
'Skip file copying, because file %s '
'does not exist', from_path)
return
copy(from_path, to_path)
def copy(source, destination, overwrite=True, symlinks=True):
"""Copy a given file or directory from one place to another.
Both `source` and `destination` should be a path to either file or
directory. In case `source` is a path to file, the `destination` could
be a path to directory.
:param source: copy from
:param destination: copy to
:param overwrite: overwrite destination if True
:param symlinks: resolve symlinks if True
"""
if os.path.isdir(source):
copy_dir(source, destination, overwrite, symlinks)
else:
copy_file(source, destination, overwrite)
def copy_file(source, destination, overwrite=True):
"""Copy a given source file to a given destination.
:param source: copy from
:param destination: copy to
:param overwrite: overwrite destination if True
"""
logger.debug(
'Copying "%s" -> "%s" [overwrite=%d]',
source, destination, overwrite)
# tranform destinatio to path/to/file, not path/to/dir
if os.path.isdir(destination):
basename = os.path.basename(source)
destination = os.path.join(destination, basename)
# copy only if overwrite is true or destination doesn't exist
if overwrite or not os.path.exists(destination):
shutil.copy(source, destination)
else:
logger.debug('Skip copying process')
def copy_dir(source, destination, overwrite=True, symlinks=True):
"""Copy a given directory to a given destination.
:param source: copy from
:param destination: copy to
:param overwrite: overwrite destination if True
:param symlinks: resolve symlinks if True
"""
logger.debug(
'Copying "%s" -> "%s" [overwrite=%d symlinks=%d]',
source, destination, overwrite, symlinks)
if overwrite or not os.path.lexists(destination):
if os.path.lexists(destination):
remove(destination, ignore_errors=True)
shutil.copytree(source, destination, symlinks=True)
else:
logger.debug('Skip copying process')
def remove(path, ignore_errors=True):
"""Remove a given path, no matter what it is: file or directory.
:param path: a file or directory to remove
:param ignore_errors: ignore some errors and non existense if True
"""
logger.debug('Removing "%s"', path)
if ignore_errors and not os.path.lexists(path):
return
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path, ignore_errors=ignore_errors)
else:
os.remove(path)
def rmtree(source, ignore_errors=True):
"""Remove directory
:param str source: path to directory
:param bool ignore_errors: ignores error if True
"""
logger.debug('Removing %s', source)
if os.path.exists(source):
shutil.rmtree(source, ignore_errors=ignore_errors)
def rename(source, destination, overwrite=True):
"""Rename some source into a given destination.
In Unix terms, it's a move operation.
:param str source: a source to be renamed
:param str destination: rename to
"""
logger.debug(
'Renaming "%s" -> "%s" [overwrite=%d]',
source, destination, overwrite)
if overwrite or not os.path.exists(destination):
os.rename(source, destination)
def dict_merge(a, b):
"""Recursively merges two given dictionaries.
:param a: a first dict
:param b: a second dict
:returns: a result dict (merge result of a and b)
"""
if not isinstance(b, dict):
return deepcopy(b)
result = deepcopy(a)
for k, v in b.iteritems():
if k in result and isinstance(result[k], dict):
result[k] = dict_merge(result[k], v)
else:
result[k] = deepcopy(v)
return result
def load_fixture(fileobj, loader=None):
"""Loads a fixture from a given `fileobj`
Process the fixture with our extended markup
that provides an inherit feature.
:param fileobj: a file-like object with fixture
:para, loader: a fixture loader; use default one if None
"""
# a key that's used to mark some item as abstract
pk_key = 'pk'
# a key that's used to tell some item inherit data
# from an abstract one
inherit_key = 'extend'
# a list of supported loaders; the loader should be a func
# that receives a file-like object
supported_loaders = {
'.json': json.load,
'.yaml': yaml.load,
'.yml': yaml.load,
}
def extend(obj):
if inherit_key in obj:
obj[inherit_key] = extend(obj[inherit_key])
return dict_merge(obj.get(inherit_key, {}), obj)
# try to get loader from a given fixture if loader is None
if loader is None:
_, ext = os.path.splitext(fileobj.name)
loader = supported_loaders[ext]
fixture = loader(fileobj)
# render fixture
fixture = filter(lambda obj: obj.get(pk_key) is not None, fixture)
for i in range(0, len(fixture)):
fixture[i] = extend(fixture[i])
fixture[i].pop(inherit_key, None)
return [f['fields'] for f in fixture]
def check_file_is_valid_json(path):
"""Checks if file contains valid json
:param str path: path to json file
:returns: True if valid False if invalid
"""
try:
json.load(open(path, 'r'))
except (ValueError, IOError):
return False
return True
def calculate_free_space(path):
"""Calculate free space
:param str path: path to directory for free space calculation
:returns: free space in megabytes
"""
# NOTE(eli): to calculate the size of mount point
# need to add `/` symbol at the end of the path
directory = '{0}/'.format(path)
device_info = os.statvfs(directory)
return byte_to_megabyte(device_info.f_bsize * device_info.f_bavail)
def byte_to_megabyte(byte):
"""Convert bytes to megabytes
:param byte: quantity of bytes
:returns: megabytes
"""
return byte / 1024 ** 2
def find_mount_point(path):
"""Tries to find mount point of directory
:param str path: path to
:returns: path to mount point
"""
path = os.path.abspath(path)
while not os.path.ismount(path):
path = os.path.dirname(path)
return path
def files_size(files_list):
"""Returns size of files
:param list path: list of files
:returns: sum of files sizes
"""
size = sum(
os.path.getsize(f) for f in files_list if os.path.isfile(f))
return byte_to_megabyte(size)
def dir_size(path):
"""Returns size of file or directory
:param str path: path to the directory
:returns: size of the directory
"""
total_size = 0
for dirpath, _, filenames in os.walk(path, followlinks=True):
for f in filenames:
fp = os.path.join(dirpath, f)
if os.path.isfile(fp):
total_size += os.path.getsize(fp)
return byte_to_megabyte(total_size)
def compare_version(v1, v2):
"""Compare two versions
:param str v1: version 1
:param str v2: version 2
:returns: 0 - versions are equal
-1 - version 1 is higher than version 2
1 - version 2 is higher than version 1
"""
version1 = StrictVersion(v1)
version2 = StrictVersion(v2)
if version1 == version2:
return 0
elif version1 > version2:
return -1
else:
return 1
def get_required_size_for_actions(actions, update_path):
"""Returns a size on disk that will be required for completing actions list
:param actions: a list of actions
:returns: a size
"""
rv = {}
for action in actions:
# copy / copy_from_update case
if action['name'] == 'copy':
src = action['from']
dst = action['to']
if not os.path.isdir(dst):
dst = os.path.dirname(dst)
if dst not in rv:
rv[dst] = 0
if os.path.isdir(src):
rv[dst] += dir_size(src)
else:
rv[dst] += files_size(src)
return rv
def save_as_yaml(path, data):
"""Saves data as yaml data structure in file
:param str path: path to file to save data
:param data: data to save as yaml
"""
logger.debug('Update file %s with data %s', path, data)
with open(path, 'w') as f:
astute_str = yaml.dump(data, default_flow_style=False)
f.write(astute_str)
def read_from_yaml(path):
"""Opens file, reads data from it and deserializes it from yaml
:param str path: path to file
"""
with io.open(path, 'r', encoding='utf-8') as f:
data = yaml.load(f.read())
logger.debug('Read data %s from file %s', data, file)
return data
def generate_uuid_string():
"""Generates uuid string
:returns: generated uuid
"""
return str(uuid.uuid4())
def verify_postgres_dump(pg_dump_path):
"""Checks that postgresql dump is correct
:param str pg_dump_path: path to postgresql dump
"""
if not os.path.exists(pg_dump_path):
return False
patterns = [
'-- PostgreSQL database cluster dump',
'-- PostgreSQL database dump',
'-- PostgreSQL database dump complete',
'-- PostgreSQL database cluster dump complete']
return file_contains_lines(pg_dump_path, patterns)
def file_extension(file_path):
"""Retrieves extension from file name
:param str file_path: path to the file or file name
:returns: file's extension
"""
_, ext = os.path.splitext(file_path)
return ext[1:]
def file_exists(file_path):
"""Checks if file exists
:param str file_path: path to the file
:returns: True if file exists
False id doesn't
"""
return os.path.exists(file_path)
def iterfiles(path):
"""Iterate over all files in the ``path`` directory.
:param path: a path to find in
"""
for root, dirnames, filenames in os.walk(path, topdown=True):
for filename in filenames:
yield os.path.join(root, filename)
def extract_env_version(release_version):
"""Returns environment version based on release version.
A release version consists of 'OSt' and 'MOS' versions: '2014.1.1-5.0.2'
so we need to extract 'MOS' version and returns it as result.
:param release_version: a string which represents a release version
:returns: an environment version
"""
separator = '-'
# unfortunately, Fuel 5.0 didn't has an env version in release_version
# so we need to handle that special case
if release_version == '2014.1':
return '5.0'
# we need to extract a second part since it's what we're looking for
return release_version.split(separator)[1]
def sanitize(obj, keywords, mask='******'):
"""Find and hide private data in obj using keywords.
:param obj: object to be sanitized
:param keywords: describe keywords to be found in obj
:param mask: a string for substitution of sanitized values
:return: sanitized copy of obj
"""
def _helper(_obj):
if isinstance(_obj, dict):
for option in _obj:
if any([key in option for key in keywords]):
_obj[option] = mask
else:
_helper(_obj[option])
elif isinstance(_obj, (list, set, tuple)):
for value in _obj:
_helper(value)
return _obj
# Making sure the original object remains untouched
obj_copy = deepcopy(obj)
return _helper(obj_copy)
def iterfiles_filter(dir_path, file_pattern):
"""Returns generator of paths to files that satisfy file_patterns condtion
:param dir_path: path to directory, e.g /etc/puppet/
:param file_pattern: unix filepattern to match files
"""
for file_path in iterfiles(dir_path):
if fnmatch(file_path, file_pattern):
yield file_path
def normversion(version):
"""Normalize a given version to have exactly three components.
:param version: a version to be normalized
:returns: a normalized version
"""
components = version.split('.')
if len(components) < 3:
for _ in range(0, 3 - len(components)):
components.append('0')
return '.'.join(components)
class VersionedFile(object):
"""Set of methods for versioned files.
If `basename` is '/tmp/file.ext' it allows
to get and filter list of files with names
'/tmp/file.ext.N' where N is integer.
:param str basename: prefix for versioned files
"""
def __init__(self, basename):
#: prefix for all versioned files
self.basename = basename
self._pattern = '{0}.{{0}}'.format(self.basename)
def next_file_name(self):
"""Returns free file name
If directory has file '/tmp/file.ext.10'
method returns file name '/tmp/file.ext.11'.
If it does not have any files it returns
file name '/tmp/file.ext.0', where '/tmp/file.ext'
is example of `basename`
:returns: file name
"""
return self._pattern.format(self._get_last_number() + 1)
def sorted_files(self):
"""Files sorted by extension
:returns: list of sorted by extension files
"""
return sorted(
self._files_with_numeric_extension(),
key=lambda f: int(file_extension(f)),
reverse=True)
def _get_last_number(self):
"""Retrieves last number from file name
:returns: last file number, if there is no files, returns 0
"""
files = self.sorted_files()
if not files:
return 0
return int(file_extension(files[0]))
def _files_with_numeric_extension(self):
"""Fiels with numeric extension
:returns: files which have digit extension
"""
return filter(
lambda f: file_extension(f).isdigit(),
glob.glob(self._pattern.format('*')))
class http_retry(object):
"""Retry entire method if it raises HTTP error with specific status code.
:param status_codes: retry if one of the status codes was raised
:param count: a number of attempts
:param interval: an interval between attempts in seconds
"""
def __init__(self, status_codes, attempts=3, interval=2):
self._status_codes = status_codes
self._attempts = attempts
self._interval = interval
def __call__(self, fn):
@functools.wraps(fn)
def _wrapped(*args, **kwargs):
for attempt in range(1, self._attempts + 1):
try:
return fn(*args, **kwargs)
except requests.HTTPError as exc:
logger.exception(
'HTTP request ends with %d (attempt %d/%d)',
exc.response.status_code, attempt, self._attempts)
# we should stop perform retries if
# - status_code is not interesting for us
# - it's the last attempt
if any([
exc.response.status_code not in self._status_codes,
attempt == self._attempts,
]):
raise
time.sleep(self._interval)
return _wrapped
def get_non_unique(iterable):
"""returns the non unique items without keeping the order."""
counter = collections.defaultdict(int)
for i in iterable:
counter[i] += 1
return [k for k, v in six.iteritems(counter) if v > 1]

View File

@ -1,82 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
from fuel_upgrade import utils
logger = logging.getLogger(__name__)
class VersionFile(object):
"""Fuel version file manager
:param config: :class:`Config` object
"""
def __init__(self, config):
#: src path to new version yaml file
self.src_new_version_file = config.new_upgrade_version_path
#: dst path to new version yaml file
self.dst_new_version_file = config.new_version_path
#: path to current version file
self.current_version_file = config.current_fuel_version_path
#: path to previous version file
self.previous_version_file = config.previous_version_path
#: path to saved version yaml
self.store_version_file = config.from_version_path
def save_current(self):
"""Save current version in working directory if it was not saved before
This action is important in case
when upgrade script was interrupted
after symlinking of version.yaml file.
"""
utils.create_dir_if_not_exists(os.path.dirname(
self.store_version_file))
utils.copy_if_does_not_exist(
self.current_version_file,
self.store_version_file)
def switch_to_new(self):
"""Switch version file to new version
* creates new version yaml file
* and creates symlink to /etc/fuel/version.yaml
"""
logger.info('Switch version file to new version')
utils.create_dir_if_not_exists(os.path.dirname(
self.dst_new_version_file))
utils.copy(
self.src_new_version_file,
self.dst_new_version_file)
utils.symlink(
self.dst_new_version_file,
self.current_version_file)
def switch_to_previous(self):
"""Switch version file symlink to previous version"""
logger.info('Switch current version file to previous version')
utils.symlink(self.previous_version_file, self.current_version_file)

View File

@ -1,6 +0,0 @@
argparse==1.2.1
PyYAML==3.10
Mako==0.9.1
requests==2.2.1
six==1.5.2
docker-py==0.3.2

View File

@ -1,60 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from setuptools import find_packages
from setuptools import setup
def parse_requirements_txt():
"""Parses requirements.txt
Returns arrays with `install_requires`
packages and with `dependency_links` sources.
"""
root = os.path.dirname(os.path.abspath(__file__))
requirements = []
with open(os.path.join(root, 'requirements.txt'), 'r') as f:
for line in f.readlines():
line = line.rstrip()
if not line or line.startswith('#'):
continue
requirements.append(line)
return requirements
REQUIREMENTS = parse_requirements_txt()
setup(
name='fuel_upgrade',
version='0.1.0',
description='Upgrade system for Fuel-master node',
long_description="""Upgrade system for Fuel-master node""",
classifiers=[
"Programming Language :: Python",
"Topic :: System :: Software Distribution"],
author='Mirantis Inc.',
author_email='product@mirantis.com',
url='http://mirantis.com',
keywords='fuel upgrade mirantis',
packages=find_packages(),
zip_safe=False,
install_requires=REQUIREMENTS,
include_package_data=True,
entry_points={
'console_scripts': [
'fuel-upgrade = fuel_upgrade.cli:main']})

View File

@ -1,6 +0,0 @@
-r requirements.txt
hacking==0.7
mock==1.0.1
nose==1.1.2
nose2==0.4.1
nose-timer==0.2.0

View File

@ -1,38 +0,0 @@
[tox]
minversion = 1.6
skipsdist = True
envlist = py26,py27,pep8
[testenv]
usedevelop = True
install_command = pip install {packages}
setenv = VIRTUAL_ENV={envdir}
deps = -r{toxinidir}/test-requirements.txt
commands =
nosetests {posargs:fuel_upgrade}
[tox:jenkins]
downloadcache = ~/cache/pip
[testenv:pep8]
deps = hacking==0.10
usedevelop = False
commands =
flake8 {posargs:.}
[testenv:venv]
commands = {posargs:}
[testenv:devenv]
envdir = devenv
usedevelop = True
[flake8]
ignore = H234,H302,H802
exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build,tools,__init__.py,docs
show-pep8 = True
show-source = True
count = True
[hacking]
import_exceptions = testtools.matchers

View File

@ -27,8 +27,6 @@ function usage {
echo " -p, --flake8 Run FLAKE8 and HACKING compliance check"
echo " -P, --no-flake8 Don't run static code checks"
echo " -t, --tests Run a given test files"
echo " -u, --upgrade Run tests for UPGRADE system"
echo " -U, --no-upgrade Don't run tests for UPGRADE system"
echo " -w, --webui Run all UI tests"
echo " -W, --no-webui Don't run all UI tests"
echo " -e, --extensions Run EXTENSIONS unit/integration tests"
@ -55,8 +53,6 @@ function process_options {
-n|--nailgun) nailgun_tests=1;;
-N|--no-nailgun) no_nailgun_tests=1;;
-x|--performance) performance_tests=1;;
-u|--upgrade) upgrade_system=1;;
-U|--no-upgrade) no_upgrade_system=1;;
-p|--flake8) flake8_checks=1;;
-P|--no-flake8) no_flake8_checks=1;;
-w|--webui) ui_lint_checks=1; ui_unit_tests=1; ui_func_tests=1;;
@ -95,7 +91,6 @@ testropts="--with-timer --timer-warning=10 --timer-ok=2 --timer-top-n=10"
# nosetest xunit options
NAILGUN_XUNIT=${NAILGUN_XUNIT:-"$ROOT/nailgun.xml"}
FUELUPGRADE_XUNIT=${FUELUPGRADE_XUNIT:-"$ROOT/fuelupgrade.xml"}
EXTENSIONS_XUNIT=${EXTENSIONS_XUNIT:-"$ROOT/extensions.xml"}
NAILGUN_PORT=${NAILGUN_PORT:-5544}
TEST_NAILGUN_DB=${TEST_NAILGUN_DB:-nailgun}
@ -111,8 +106,6 @@ mkdir -p $ARTIFACTS
nailgun_tests=0
no_nailgun_tests=0
performance_tests=0
upgrade_system=0
no_upgrade_system=0
flake8_checks=0
no_flake8_checks=0
ui_lint_checks=0
@ -154,7 +147,6 @@ function run_tests {
$ui_lint_checks -eq 0 && \
$ui_unit_tests -eq 0 && \
$ui_func_tests -eq 0 && \
$upgrade_system -eq 0 && \
$extensions_tests -eq 0 && \
$flake8_checks -eq 0 ]]; then
@ -162,7 +154,6 @@ function run_tests {
if [ $no_ui_lint_checks -ne 1 ]; then ui_lint_checks=1; fi
if [ $no_ui_unit_tests -ne 1 ]; then ui_unit_tests=1; fi
if [ $no_ui_func_tests -ne 1 ]; then ui_func_tests=1; fi
if [ $no_upgrade_system -ne 1 ]; then upgrade_system=1; fi
if [ $no_flake8_checks -ne 1 ]; then flake8_checks=1; fi
if [ $no_extensions_tests -ne 1 ]; then extensions_tests=1; fi
@ -194,11 +185,6 @@ function run_tests {
run_ui_func_tests || errors+=" ui_func_tests"
fi
if [ $upgrade_system -eq 1 ]; then
echo "Starting upgrade system tests..."
run_upgrade_system_tests || errors+=" upgrade_system_tests"
fi
if [ $extensions_tests -eq 1 ]; then
echo "Starting Extensions tests..."
run_extensions_tests || errors+=" extensions_tests"
@ -337,32 +323,6 @@ function run_ui_func_tests {
}
# Run tests for fuel upgrade system
#
# Arguments:
#
# $@ -- tests to be run; with no arguments all tests will be run
function run_upgrade_system_tests {
local UPGRADE_TESTS="$ROOT/fuel_upgrade_system/fuel_upgrade/fuel_upgrade/tests/"
local result=0
if [ $# -ne 0 ]; then
# run selected tests
TESTS="$@"
$TESTRTESTS -vv $testropts $TESTS || result=1
else
# run all tests
pushd $ROOT/fuel_upgrade_system/fuel_upgrade >> /dev/null
TOXENV=$TOXENV \
tox -- -vv $testropts $UPGRADE_TESTS --xunit-file $FUELUPGRADE_XUNIT || result=1
popd >> /dev/null
fi
return $result
}
function run_flake8_subproject {
local DIRECTORY=$1
local result=0
@ -399,7 +359,6 @@ function run_extensions_tests {
function run_flake8 {
local result=0
run_flake8_subproject nailgun && \
run_flake8_subproject fuel_upgrade_system/fuel_upgrade && \
run_flake8_subproject fuel_upgrade_system/fuel_package_updates && \
return $result
}
@ -582,8 +541,6 @@ function guess_test_run {
if [[ $1 == *functional* && $1 == *.js ]]; then
run_ui_func_tests $1
elif [[ $1 == *fuel_upgrade_system* ]]; then
run_upgrade_system_tests $1
else
run_nailgun_tests $1
fi