created s3 cinder backup driver

Co-Authored-By: Masahiro Okuri <okuri.masahiro@jp.fujitsu.com>
Change-Id: If24871ad6bf7558ae71e560141bf61ede61f81f5
Implements: blueprint support-s3-backup-driver
This commit is contained in:
Jegor van Opdorp 2020-08-17 14:33:45 +02:00 committed by Hironori Shiina
parent 02809ef4ec
commit 337248138e
11 changed files with 1102 additions and 3 deletions

376
cinder/backup/drivers/s3.py Normal file
View File

@ -0,0 +1,376 @@
# Copyright (C) 2020 leafcloud b.v.
# Copyright (C) 2020 FUJITSU LIMITED
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a backup service that uses S3 as the backend
**Related Flags**
:backup_s3_endpoint_url: The url where the S3 server is listening.
(default: None)
:backup_s3_store_bucket: The S3 bucket to be used to store
the Cinder backup data. (default: volumebackups)
:backup_s3_store_access_key: The S3 query token access key. (default: None)
:backup_s3_store_secret_key: The S3 query token secret key. (default: None)
:backup_s3_sse_customer_key: The SSECustomerKey.
backup_s3_sse_customer_algorithm must be set at
the same time to enable SSE. (default: None)
:backup_s3_sse_customer_algorithm: The SSECustomerAlgorithm.
backup_s3_sse_customer_key must be set at
the same time to enable SSE. (default: None)
:backup_s3_object_size: The size in bytes of S3 backup objects.
(default: 52428800)
:backup_s3_block_size: The size in bytes that changes are tracked
for incremental backups. backup_s3_object_size
has to be multiple of backup_s3_block_size.
(default: 32768).
:backup_s3_md5_validation: Enable or Disable md5 validation in the s3 backend.
(default: True)
:backup_s3_http_proxy: Address or host for the http proxy server.
(default: '')
:backup_s3_https_proxy: Address or host for the https proxy server.
(default: '')
:backup_s3_timeout: The time in seconds till a timeout exception is thrown.
(default: 60)
:backup_s3_max_pool_connections: The maximum number of connections
to keep in a connection pool. (default: 10)
:backup_s3_retry_max_attempts: An integer representing the maximum number of
retry attempts that will be made on
a single request. (default: 4)
:backup_s3_retry_mode: A string representing the type of retry mode.
e.g: legacy, standard, adaptive. (default: legacy)
:backup_s3_verify_ssl: Enable or Disable ssl verify.
(default: True)
:backup_s3_ca_cert_file: A filename of the CA cert bundle to use.
(default: None)
:backup_s3_enable_progress_timer: Enable or Disable the timer to send the
periodic progress notifications to
Ceilometer when backing up the volume to the
S3 backend storage. (default: True)
:backup_compression_algorithm: Compression algorithm to use for volume
backups. Supported options are:
None (to disable), zlib, bz2
and zstd. (default: zlib)
"""
import base64
import functools
import io
import itertools as it
import socket
import boto3
from botocore.config import Config
from botocore import exceptions as boto_exc
from botocore.vendored.requests.packages.urllib3 import exceptions as \
urrlib_exc
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils.secretutils import md5
from oslo_utils import timeutils
from cinder.backup import chunkeddriver
from cinder import exception
from cinder.i18n import _
from cinder import interface
LOG = logging.getLogger(__name__)
s3backup_service_opts = [
cfg.StrOpt('backup_s3_endpoint_url',
help=_('The url where the S3 server is listening.')),
cfg.StrOpt('backup_s3_store_access_key', secret=True,
help=_('The S3 query token access key.')),
cfg.StrOpt('backup_s3_store_secret_key', secret=True,
help=_('The S3 query token secret key.')),
cfg.StrOpt('backup_s3_store_bucket', default='volumebackups',
help=_('The S3 bucket to be used '
'to store the Cinder backup data.')),
cfg.IntOpt('backup_s3_object_size', default=52428800,
help='The size in bytes of S3 backup objects'),
cfg.IntOpt('backup_s3_block_size', default=32768,
help='The size in bytes that changes are tracked '
'for incremental backups. backup_s3_object_size '
'has to be multiple of backup_s3_block_size.'),
cfg.BoolOpt('backup_s3_enable_progress_timer', default=True,
help='Enable or Disable the timer to send the periodic '
'progress notifications to Ceilometer when backing '
'up the volume to the S3 backend storage. The '
'default value is True to enable the timer.'),
cfg.StrOpt('backup_s3_http_proxy', default='',
help='Address or host for the http proxy server.'),
cfg.StrOpt('backup_s3_https_proxy', default='',
help='Address or host for the https proxy server.'),
cfg.FloatOpt('backup_s3_timeout', default=60,
help='The time in seconds till '
'a timeout exception is thrown.'),
cfg.IntOpt('backup_s3_max_pool_connections', default=10,
help='The maximum number of connections '
'to keep in a connection pool.'),
cfg.IntOpt('backup_s3_retry_max_attempts', default=4,
help='An integer representing the maximum number of '
'retry attempts that will be made on a single request.'),
cfg.StrOpt('backup_s3_retry_mode', default='legacy',
help='A string representing the type of retry mode. '
'e.g: legacy, standard, adaptive'),
cfg.BoolOpt('backup_s3_verify_ssl', default=True,
help='Enable or Disable ssl verify.'),
cfg.StrOpt('backup_s3_ca_cert_file', default=None,
help='path/to/cert/bundle.pem '
'- A filename of the CA cert bundle to use.'),
cfg.BoolOpt('backup_s3_md5_validation', default=True,
help='Enable or Disable md5 validation in the s3 backend.'),
cfg.StrOpt('backup_s3_sse_customer_key', default=None, secret=True,
help='The SSECustomerKey. backup_s3_sse_customer_algorithm '
'must be set at the same time to enable SSE.'),
cfg.StrOpt('backup_s3_sse_customer_algorithm', default=None,
help='The SSECustomerAlgorithm. backup_s3_sse_customer_key '
'must be set at the same time to enable SSE.')
]
CONF = cfg.CONF
CONF.register_opts(s3backup_service_opts)
CONF.import_opt('backup_compression_algorithm', 'cinder.backup.chunkeddriver')
class S3ConnectionFailure(exception.BackupDriverException):
message = _("S3 connection failure: %(reason)s")
class S3ClientError(exception.BackupDriverException):
message = _("S3 client error: %(reason)s")
def _wrap_exception(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except boto_exc.ClientError as err:
raise S3ClientError(reason=err)
except Exception as err:
raise S3ConnectionFailure(reason=err)
return func_wrapper
@interface.backupdriver
class S3BackupDriver(chunkeddriver.ChunkedBackupDriver):
"""Provides backup, restore and delete of backup objects within S3."""
def __init__(self, context, db=None):
chunk_size_bytes = CONF.backup_s3_object_size
sha_block_size_bytes = CONF.backup_s3_block_size
backup_bucket = CONF.backup_s3_store_bucket
enable_progress_timer = CONF.backup_s3_enable_progress_timer
super(S3BackupDriver, self).__init__(context, chunk_size_bytes,
sha_block_size_bytes,
backup_bucket,
enable_progress_timer,
db)
config_args = dict(
connect_timeout=CONF.backup_s3_timeout,
read_timeout=CONF.backup_s3_timeout,
max_pool_connections=CONF.backup_s3_max_pool_connections,
retries={
'max_attempts': CONF.backup_s3_retry_max_attempts,
'mode': CONF.backup_s3_retry_mode})
if CONF.backup_s3_http_proxy:
config_args['proxies'] = {'http': CONF.backup_s3_http_proxy}
if CONF.backup_s3_https_proxy:
config_args.setdefault('proxies', {}).update(
{'https': CONF.backup_s3_https_proxy})
conn_args = {
'aws_access_key_id': CONF.backup_s3_store_access_key,
'aws_secret_access_key': CONF.backup_s3_store_secret_key,
'endpoint_url': CONF.backup_s3_endpoint_url,
'config': Config(**config_args)}
if CONF.backup_s3_verify_ssl:
conn_args['verify'] = CONF.backup_s3_ca_cert_file
else:
conn_args['verify'] = False
self.conn = boto3.client('s3', **conn_args)
@staticmethod
def get_driver_options():
backup_opts = [CONF._opts['backup_compression_algorithm']['opt']]
return s3backup_service_opts + backup_opts
@_wrap_exception
def put_container(self, bucket):
"""Create the bucket if not exists."""
try:
self.conn.head_bucket(Bucket=bucket)
except boto_exc.ClientError as e:
# NOTE: If it was a 404 error, then the bucket does not exist.
error_code = e.response['Error']['Code']
if error_code != '404':
raise
self.conn.create_bucket(Bucket=bucket)
@_wrap_exception
def get_container_entries(self, bucket, prefix):
"""Get bucket entry names."""
paginator = self.conn.get_paginator('list_objects_v2')
page_iterator = paginator.paginate(Bucket=bucket,
Prefix=prefix)
result = [obj_dict.get('Key') for obj_dict in it.chain.from_iterable(
page.get('Contents') for page in page_iterator)]
return result
def get_object_writer(self, bucket, object_name, extra_metadata=None):
"""Return a writer object.
Returns a writer object that stores a chunk of volume data in a
S3 object store.
"""
return S3ObjectWriter(bucket, object_name, self.conn)
def get_object_reader(self, bucket, object_name, extra_metadata=None):
"""Return reader object.
Returns a reader object that retrieves a chunk of backed-up volume data
from a S3 object store.
"""
return S3ObjectReader(bucket, object_name, self.conn)
@_wrap_exception
def delete_object(self, bucket, object_name):
"""Deletes a backup object from a S3 object store."""
self.conn.delete_object(
Bucket=bucket,
Key=object_name)
def _generate_object_name_prefix(self, backup):
"""Generates a S3 backup object name prefix.
prefix = volume_volid/timestamp/az_saz_backup_bakid
volid is volume id.
timestamp is time in UTC with format of YearMonthDateHourMinuteSecond.
saz is storage_availability_zone.
bakid is backup id for volid.
"""
az = 'az_%s' % self.az
backup_name = '%s_backup_%s' % (az, backup.id)
volume = 'volume_%s' % (backup.volume_id)
timestamp = timeutils.utcnow().strftime("%Y%m%d%H%M%S")
prefix = volume + '/' + timestamp + '/' + backup_name
LOG.debug('generate_object_name_prefix: %s', prefix)
return prefix
def update_container_name(self, backup, container):
"""Use the bucket name as provided - don't update."""
return
def get_extra_metadata(self, backup, volume):
"""S3 driver does not use any extra metadata."""
return
def check_for_setup_error(self):
required_options = ('backup_s3_endpoint_url',
'backup_s3_store_access_key',
'backup_s3_store_secret_key')
for opt in required_options:
val = getattr(CONF, opt, None)
if not val:
raise exception.InvalidConfigurationValue(option=opt,
value=val)
if ((not CONF.backup_s3_sse_customer_algorithm)
!= (not CONF.backup_s3_sse_customer_key)):
LOG.warning("Both the backup_s3_sse_customer_algorithm and "
"backup_s3_sse_customer_key options must be set "
"to enable SSE. SSE is disabled.")
try:
self.conn.list_buckets()
except Exception:
LOG.exception("Cannot list s3 buckets during backup "
"driver initialization.")
raise
class S3ObjectWriter(object):
def __init__(self, bucket, object_name, conn):
self.bucket = bucket
self.object_name = object_name
self.conn = conn
self.data = bytearray()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def write(self, data):
self.data += data
@_wrap_exception
def close(self):
reader = io.BytesIO(self.data)
contentmd5 = base64.b64encode(
md5(self.data, usedforsecurity=False).digest()).decode('utf-8')
put_args = {'Bucket': self.bucket,
'Body': reader,
'Key': self.object_name,
'ContentLength': len(self.data)}
if CONF.backup_s3_md5_validation:
put_args['ContentMD5'] = contentmd5
if (CONF.backup_s3_sse_customer_algorithm
and CONF.backup_s3_sse_customer_key):
put_args.update(
SSECustomerAlgorithm=CONF.backup_s3_sse_customer_algorithm,
SSECustomerKey=CONF.backup_s3_sse_customer_key)
self.conn.put_object(**put_args)
return contentmd5
class S3ObjectReader(object):
def __init__(self, bucket, object_name, conn):
self.bucket = bucket
self.object_name = object_name
self.conn = conn
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
@_wrap_exception
def read(self):
get_args = {'Bucket': self.bucket,
'Key': self.object_name}
if (CONF.backup_s3_sse_customer_algorithm
and CONF.backup_s3_sse_customer_key):
get_args.update(
SSECustomerAlgorithm=CONF.backup_s3_sse_customer_algorithm,
SSECustomerKey=CONF.backup_s3_sse_customer_key)
# NOTE: these retries account for errors that occur when streaming
# down the data from s3 (i.e. socket errors and read timeouts that
# occur after recieving an OK response from s3). Other retryable
# exceptions such as throttling errors and 5xx errors are already
# retried by botocore.
last_exception = None
for i in range(CONF.backup_s3_retry_max_attempts):
try:
resp = self.conn.get_object(**get_args)
return resp.get('Body').read()
except (socket.timeout, socket.error,
urrlib_exc.ReadTimeoutError,
boto_exc.IncompleteReadError) as e:
last_exception = e
continue
raise S3ClientError(reason=last_exception)

View File

@ -69,11 +69,12 @@ CONF.register_opts(backup_cmd_opts)
LOG = None
# NOTE(mriedem): The default backup driver uses swift and performs read/write
# NOTE: The default backup driver uses swift and performs read/write
# operations in a thread. swiftclient will log requests and responses at DEBUG
# level, which can cause a thread switch and break the backup operation. So we
# set a default log level of WARN for swiftclient to try and avoid this issue.
_EXTRA_DEFAULT_LOG_LEVELS = ['swiftclient=WARN']
# set a default log level of WARN for swiftclient and boto to try and avoid
# this issue.
_EXTRA_DEFAULT_LOG_LEVELS = ['swiftclient=WARN', 'botocore=WARN']
def _launch_backup_process(launcher, num_process, _semaphore):

View File

@ -38,6 +38,7 @@ from cinder.backup.drivers import gcs as cinder_backup_drivers_gcs
from cinder.backup.drivers import glusterfs as cinder_backup_drivers_glusterfs
from cinder.backup.drivers import nfs as cinder_backup_drivers_nfs
from cinder.backup.drivers import posix as cinder_backup_drivers_posix
from cinder.backup.drivers import s3 as cinder_backup_drivers_s3
from cinder.backup.drivers import swift as cinder_backup_drivers_swift
from cinder.backup import manager as cinder_backup_manager
from cinder.cmd import backup as cinder_cmd_backup
@ -222,6 +223,7 @@ def list_opts():
cinder_backup_drivers_glusterfs.glusterfsbackup_service_opts,
cinder_backup_drivers_nfs.nfsbackup_service_opts,
cinder_backup_drivers_posix.posixbackup_service_opts,
cinder_backup_drivers_s3.s3backup_service_opts,
cinder_backup_drivers_swift.swiftbackup_service_opts,
cinder_backup_manager.backup_manager_opts,
cinder_cmd_backup.backup_cmd_opts,

View File

@ -0,0 +1,621 @@
# Copyright (C) 2020 leafcloud b.v.
# Copyright (C) 2020 FUJITSU LIMITED
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for Backup s3 code."""
import bz2
import filecmp
import hashlib
import os
import shutil
import tempfile
import threading
from unittest import mock
import zlib
from eventlet import tpool
from moto import mock_s3
from oslo_utils import units
from cinder.backup.drivers import s3 as s3_dr
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.tests.unit.backup import fake_s3_client
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import test
class FakeMD5(object):
def __init__(self, *args, **kwargs):
pass
@classmethod
def digest(cls):
return 's3cindermd5'.encode('utf-8')
@classmethod
def hexdigest(cls):
return 's3cindermd5'
def s3_client(func):
@mock.patch.object(s3_dr.boto3, 'client',
fake_s3_client.FakeS3Boto3.Client)
@mock.patch.object(hashlib, 'md5', FakeMD5)
def func_wrapper(self, *args, **kwargs):
return func(self, *args, **kwargs)
return func_wrapper
def fake_backup_metadata(self, backup, object_meta):
raise exception.BackupDriverException(reason=_('fake'))
def fake_delete(self, backup):
raise exception.BackupOperationError()
def _fake_delete_object(self, bucket_name, object_name):
raise AssertionError('delete_object method should not be called.')
class BackupS3TestCase(test.TestCase):
"""Test Case for s3."""
_DEFAULT_VOLUME_ID = 'c7eb81f4-bec6-4730-a60f-8888885874df'
def _create_volume_db_entry(self, volume_id=_DEFAULT_VOLUME_ID):
vol = {'id': volume_id,
'size': 1,
'status': 'available',
'volume_type_id': self.vt['id']}
return db.volume_create(self.ctxt, vol)['id']
def _create_backup_db_entry(self,
volume_id=_DEFAULT_VOLUME_ID,
container=s3_dr.CONF.backup_s3_store_bucket,
parent_id=None,
status=None,
service_metadata=None):
try:
db.volume_get(self.ctxt, volume_id)
except exception.NotFound:
self._create_volume_db_entry(volume_id=volume_id)
kwargs = {'size': 1,
'container': container,
'volume_id': volume_id,
'parent_id': parent_id,
'user_id': fake.USER_ID,
'project_id': fake.PROJECT_ID,
'status': status,
'service_metadata': service_metadata,
}
backup = objects.Backup(context=self.ctxt, **kwargs)
backup.create()
return backup
def _write_effective_compression_file(self, data_size):
"""Ensure file contents can be effectively compressed."""
self.volume_file.seek(0)
self.volume_file.write(bytes([65] * data_size))
self.volume_file.seek(0)
def setUp(self):
super(BackupS3TestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.volume_file = tempfile.NamedTemporaryFile()
self.temp_dir = tempfile.mkdtemp()
self.addCleanup(self.volume_file.close)
# Remove tempdir.
self.addCleanup(shutil.rmtree, self.temp_dir)
self.size_volume_file = 0
for _i in range(0, 64):
self.volume_file.write(os.urandom(units.Ki))
self.size_volume_file += 1024
notify_patcher = mock.patch(
'cinder.volume.volume_utils.notify_about_backup_usage')
notify_patcher.start()
self.addCleanup(notify_patcher.stop)
self.flags(backup_s3_endpoint_url=None)
self.flags(backup_s3_store_access_key='s3cinderaccesskey')
self.flags(backup_s3_store_secret_key='s3cindersecretkey')
self.flags(backup_s3_sse_customer_key='s3aeskey')
@mock_s3
def test_backup_correctly_configured(self):
self.service = s3_dr.S3BackupDriver(self.ctxt)
self.assertIsInstance(self.service, s3_dr.S3BackupDriver)
@mock_s3
def test_backup(self):
volume_id = 'b09b1ad4-5f0e-4d3f-8b9e-0000004f5ec2'
container_name = 'test-bucket'
backup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
result = service.backup(backup, self.volume_file)
self.assertIsNone(result)
@mock_s3
def test_backup_uncompressed(self):
volume_id = '2b9f10a3-42b4-4fdf-b316-000000ceb039'
backup = self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='none')
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
@mock_s3
def test_backup_bz2(self):
volume_id = 'dc0fee35-b44e-4f13-80d6-000000e1b50c'
backup = self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='bz2')
service = s3_dr.S3BackupDriver(self.ctxt)
self._write_effective_compression_file(self.size_volume_file)
service.backup(backup, self.volume_file)
@mock_s3
def test_backup_zlib(self):
volume_id = '5cea0535-b6fb-4531-9a38-000000bea094'
backup = self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='zlib')
service = s3_dr.S3BackupDriver(self.ctxt)
self._write_effective_compression_file(self.size_volume_file)
service.backup(backup, self.volume_file)
@mock_s3
def test_backup_zstd(self):
volume_id = '471910a0-a197-4259-9c50-0fc3d6a07dbc'
backup = self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='zstd')
service = s3_dr.S3BackupDriver(self.ctxt)
self._write_effective_compression_file(self.size_volume_file)
service.backup(backup, self.volume_file)
@mock_s3
def test_backup_default_container(self):
volume_id = '9552017f-c8b9-4e4e-a876-00000053349c'
backup = self._create_backup_db_entry(volume_id=volume_id,
container=None)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
self.assertEqual('volumebackups', backup.container)
@mock_s3
def test_backup_custom_container(self):
volume_id = '1da9859e-77e5-4731-bd58-000000ca119e'
container_name = 'fake99'
backup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
self.assertEqual(container_name, backup.container)
@mock_s3
def test_backup_shafile(self):
volume_id = '6465dad4-22af-48f7-8a1a-000000218907'
backup = self._create_backup_db_entry(volume_id=volume_id)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
# Verify sha contents
content1 = service._read_sha256file(backup)
self.assertEqual(64 * units.Ki / content1['chunk_size'],
len(content1['sha256s']))
@mock_s3
def test_backup_cmp_shafiles(self):
volume_id = '1a99ac67-c534-4fe3-b472-0000001785e2'
backup = self._create_backup_db_entry(volume_id=volume_id)
service1 = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service1.backup(backup, self.volume_file)
# Create incremental backup with no change to contents
deltabackup = self._create_backup_db_entry(volume_id=volume_id,
container=None,
parent_id=backup.id)
service2 = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service2.backup(deltabackup, self.volume_file)
# Compare shas from both files
content1 = service1._read_sha256file(backup)
content2 = service2._read_sha256file(deltabackup)
self.assertEqual(len(content1['sha256s']), len(content2['sha256s']))
self.assertEqual(set(content1['sha256s']), set(content2['sha256s']))
@mock_s3
def test_backup_delta_two_objects_change(self):
volume_id = '30dab288-265a-4583-9abe-000000d42c67'
self.flags(backup_s3_object_size=8 * units.Ki)
self.flags(backup_s3_block_size=units.Ki)
backup = self._create_backup_db_entry(volume_id=volume_id)
service1 = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service1.backup(backup, self.volume_file)
# Create incremental backup with no change to contents
self.volume_file.seek(2 * 8 * units.Ki)
self.volume_file.write(os.urandom(units.Ki))
self.volume_file.seek(4 * 8 * units.Ki)
self.volume_file.write(os.urandom(units.Ki))
deltabackup = self._create_backup_db_entry(volume_id=volume_id,
container=None,
parent_id=backup.id)
service2 = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service2.backup(deltabackup, self.volume_file)
content1 = service1._read_sha256file(backup)
content2 = service2._read_sha256file(deltabackup)
# Verify that two shas are changed at index 16 and 32
self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16])
self.assertNotEqual(content1['sha256s'][32], content2['sha256s'][32])
@mock_s3
def test_backup_delta_two_blocks_in_object_change(self):
volume_id = 'b943e84f-aa67-4331-9ab2-000000cf19ba'
self.flags(backup_s3_object_size=8 * units.Ki)
self.flags(backup_s3_block_size=units.Ki)
backup = self._create_backup_db_entry(volume_id=volume_id)
service1 = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service1.backup(backup, self.volume_file)
# Create incremental backup with no change to contents
self.volume_file.seek(16 * units.Ki)
self.volume_file.write(os.urandom(units.Ki))
self.volume_file.seek(20 * units.Ki)
self.volume_file.write(os.urandom(units.Ki))
deltabackup = self._create_backup_db_entry(volume_id=volume_id,
container=None,
parent_id=backup.id)
service2 = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service2.backup(deltabackup, self.volume_file)
# Verify that two shas are changed at index 16 and 20
content1 = service1._read_sha256file(backup)
content2 = service2._read_sha256file(deltabackup)
self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16])
self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20])
@mock_s3
@mock.patch('cinder.backup.drivers.s3.S3BackupDriver.'
'_send_progress_end')
@mock.patch('cinder.backup.drivers.s3.S3BackupDriver.'
'_send_progress_notification')
def test_backup_default_container_notify(self, _send_progress,
_send_progress_end):
volume_id = '87dd0eed-2598-4ebd-8ebb-000000ac578a'
backup = self._create_backup_db_entry(volume_id=volume_id,
container=None)
# If the backup_object_number_per_notification is set to 1,
# the _send_progress method will be called for sure.
s3_dr.CONF.set_override("backup_object_number_per_notification", 1)
s3_dr.CONF.set_override("backup_s3_enable_progress_timer", False)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
self.assertTrue(_send_progress.called)
self.assertTrue(_send_progress_end.called)
# If the backup_object_number_per_notification is increased to
# another value, the _send_progress method will not be called.
_send_progress.reset_mock()
_send_progress_end.reset_mock()
s3_dr.CONF.set_override("backup_object_number_per_notification",
10)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
self.assertFalse(_send_progress.called)
self.assertTrue(_send_progress_end.called)
# If the timer is enabled, the _send_progress will be called,
# since the timer can trigger the progress notification.
_send_progress.reset_mock()
_send_progress_end.reset_mock()
s3_dr.CONF.set_override("backup_object_number_per_notification",
10)
s3_dr.CONF.set_override("backup_s3_enable_progress_timer", True)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
self.assertTrue(_send_progress.called)
self.assertTrue(_send_progress_end.called)
@mock_s3
@mock.patch.object(s3_dr.S3BackupDriver, '_backup_metadata',
fake_backup_metadata)
def test_backup_backup_metadata_fail(self):
"""Test of when an exception occurs in backup().
In backup(), after an exception occurs in
self._backup_metadata(), we want to check the process of an
exception handler.
"""
volume_id = '020d9142-339c-4876-a445-000000f1520c'
backup = self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='none')
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
# We expect that an exception be notified directly.
self.assertRaises(exception.BackupDriverException,
service.backup,
backup, self.volume_file)
@mock_s3
@mock.patch.object(s3_dr.S3BackupDriver, '_backup_metadata',
fake_backup_metadata)
@mock.patch.object(s3_dr.S3BackupDriver, 'delete_backup',
fake_delete)
def test_backup_backup_metadata_fail2(self):
"""Test of when an exception occurs in an exception handler.
In backup(), after an exception occurs in
self._backup_metadata(), we want to check the process when the
second exception occurs in self.delete_backup().
"""
volume_id = '2164421d-f181-4db7-b9bd-000000eeb628'
backup = self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='none')
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
# We expect that the second exception is notified.
self.assertRaises(exception.BackupOperationError,
service.backup,
backup, self.volume_file)
@mock_s3
def test_delete(self):
volume_id = '9ab256c8-3175-4ad8-baa1-0000007f9d31'
object_prefix = 'test_prefix'
backup = self._create_backup_db_entry(volume_id=volume_id,
service_metadata=object_prefix)
service = s3_dr.S3BackupDriver(self.ctxt)
service.delete_backup(backup)
@mock_s3
@mock.patch.object(s3_dr.S3BackupDriver, 'delete_object',
_fake_delete_object)
def test_delete_without_object_prefix(self):
volume_id = 'ee30d649-72a6-49a5-b78d-000000edb6b1'
backup = self._create_backup_db_entry(volume_id=volume_id)
service = s3_dr.S3BackupDriver(self.ctxt)
service.delete_backup(backup)
@mock_s3
def test_get_compressor(self):
service = s3_dr.S3BackupDriver(self.ctxt)
compressor = service._get_compressor('None')
self.assertIsNone(compressor)
compressor = service._get_compressor('zlib')
self.assertEqual(zlib, compressor)
self.assertIsInstance(compressor, tpool.Proxy)
compressor = service._get_compressor('bz2')
self.assertEqual(bz2, compressor)
self.assertIsInstance(compressor, tpool.Proxy)
self.assertRaises(ValueError, service._get_compressor, 'fake')
@mock_s3
def test_prepare_output_data_effective_compression(self):
"""Test compression works on a native thread."""
# Use dictionary to share data between threads
thread_dict = {}
original_compress = zlib.compress
def my_compress(data):
thread_dict['compress'] = threading.current_thread()
return original_compress(data)
self.mock_object(zlib, 'compress', side_effect=my_compress)
service = s3_dr.S3BackupDriver(self.ctxt)
# Set up buffer of 128 zeroed bytes
fake_data = b'\0' * 128
result = service._prepare_output_data(fake_data)
self.assertEqual('zlib', result[0])
self.assertGreater(len(fake_data), len(result[1]))
self.assertNotEqual(threading.current_thread(),
thread_dict['compress'])
@mock_s3
def test_prepare_output_data_no_compression(self):
self.flags(backup_compression_algorithm='none')
service = s3_dr.S3BackupDriver(self.ctxt)
# Set up buffer of 128 zeroed bytes
fake_data = b'\0' * 128
result = service._prepare_output_data(fake_data)
self.assertEqual('none', result[0])
self.assertEqual(fake_data, result[1])
@mock_s3
def test_prepare_output_data_ineffective_compression(self):
service = s3_dr.S3BackupDriver(self.ctxt)
# Set up buffer of 128 zeroed bytes
fake_data = b'\0' * 128
# Pre-compress so that compression in the driver will be ineffective.
already_compressed_data = service.compressor.compress(fake_data)
result = service._prepare_output_data(already_compressed_data)
self.assertEqual('none', result[0])
self.assertEqual(already_compressed_data, result[1])
@mock_s3
def test_no_config_option(self):
# With no config option to connect driver should raise exception.
self.flags(backup_s3_endpoint_url=None)
self.flags(backup_s3_store_access_key=None)
self.flags(backup_s3_store_secret_key=None)
self.assertRaises(exception.InvalidConfigurationValue,
s3_dr.S3BackupDriver.check_for_setup_error,
self)
@s3_client
def test_create_backup_fail(self):
volume_id = 'b09b1ad4-5f0e-4d3f-8b9e-0000004f5ec3'
container_name = 's3_api_failure'
backup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
self.assertRaises(s3_dr.S3ClientError,
service.backup,
backup, self.volume_file)
@s3_client
def test_create_backup_faili2(self):
volume_id = '2a59c20e-0b79-4f57-aa63-5be208df48f6'
container_name = 's3_connection_error'
backup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
self.assertRaises(s3_dr.S3ConnectionFailure,
service.backup,
backup, self.volume_file)
@mock_s3
def test_restore(self):
volume_id = 'c2a81f09-f480-4325-8424-00000071685b'
backup = self._create_backup_db_entry(
volume_id=volume_id,
status=objects.fields.BackupStatus.RESTORING)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
with tempfile.NamedTemporaryFile() as volume_file:
service.restore(backup, volume_id, volume_file)
@mock_s3
def test_restore_delta(self):
volume_id = '04d83506-bcf7-4ff5-9c65-00000051bd2e'
self.flags(backup_s3_object_size=8 * units.Ki)
self.flags(backup_s3_block_size=units.Ki)
backup = self._create_backup_db_entry(volume_id=volume_id)
service1 = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service1.backup(backup, self.volume_file)
# Create incremental backup with no change to contents
self.volume_file.seek(16 * units.Ki)
self.volume_file.write(os.urandom(units.Ki))
self.volume_file.seek(20 * units.Ki)
self.volume_file.write(os.urandom(units.Ki))
deltabackup = self._create_backup_db_entry(
volume_id=volume_id,
status=objects.fields.BackupStatus.RESTORING,
parent_id=backup.id)
self.volume_file.seek(0)
service2 = s3_dr.S3BackupDriver(self.ctxt)
service2.backup(deltabackup, self.volume_file, True)
with tempfile.NamedTemporaryFile() as restored_file:
service2.restore(deltabackup, volume_id,
restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
@s3_client
def test_restore_fail(self):
volume_id = '651496c7-0d8b-45f3-bfe8-9ef6ad30910f'
container_name = 's3_api_failure'
backup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = s3_dr.S3BackupDriver(self.ctxt)
with tempfile.NamedTemporaryFile() as volume_file:
self.assertRaises(s3_dr.S3ClientError,
service.restore,
backup, volume_id, volume_file)
@s3_client
def test_restore_faili2(self):
volume_id = '87f3f2c2-1a79-48c1-9d98-47c4cab7bf00'
container_name = 's3_connection_error'
backup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = s3_dr.S3BackupDriver(self.ctxt)
with tempfile.NamedTemporaryFile() as volume_file:
self.assertRaises(s3_dr.S3ConnectionFailure,
service.restore,
backup, volume_id, volume_file)
@mock_s3
def test_backup_md5_validation(self):
volume_id = 'c0a79eb2-ef56-4de2-b3b9-3861fcdf7fad'
self.flags(backup_s3_md5_validation=True)
backup = self._create_backup_db_entry(volume_id=volume_id)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
@mock_s3
def test_backup_sse(self):
volume_id = 'c0a79eb2-ef56-4de2-b3b9-3861fcdf7fad'
self.flags(backup_s3_sse_customer_algorithm='AES256')
self.flags(backup_s3_sse_customer_key='sse_key')
backup = self._create_backup_db_entry(volume_id=volume_id)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
@mock_s3
def test_restore_sse(self):
volume_id = 'c0a79eb2-ef56-4de2-b3b9-3861fcdf7fad'
self.flags(backup_s3_sse_customer_algorithm='AES256')
self.flags(backup_s3_sse_customer_key='sse_key')
backup = self._create_backup_db_entry(
volume_id=volume_id,
status=objects.fields.BackupStatus.RESTORING)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
with tempfile.NamedTemporaryFile() as volume_file:
service.restore(backup, volume_id, volume_file)

View File

@ -0,0 +1,67 @@
# Copyright (C) 2020 leafcloud b.v.
# Copyright (C) 2020 FUJITSU LIMITED
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from botocore.exceptions import ClientError
from botocore.exceptions import ConnectionError
class FakeS3Boto3(object):
"""Logs calls instead of executing."""
def __init__(self, *args, **kwargs):
pass
@classmethod
def Client(cls, *args, **kargs):
return FakeBoto3Client()
class FakeBoto3Client(object):
"""Logging calls instead of executing."""
def __init__(self, *args, **kwargs):
pass
def list_objects(self, *args, **kwargs):
return {u'Contents': [{u'Key': u'backup_001'},
{u'Key': u'backup_002'},
{u'Key': u'backup_003'}]}
def list_buckets(self, *args, **kwargs):
return {u'Buckets': [{u'Name': u's3cinderbucket'},
{u'Name': u's3bucket'}]}
def head_bucket(self, *args, **kwargs):
pass
def get_object(self, Bucket, *args, **kwargs):
if Bucket == 's3_api_failure':
raise ClientError(
error_response={
'Error': {'Code': 'MyCode', 'Message': 'MyMessage'}},
operation_name='myoperation')
if Bucket == 's3_connection_error':
raise ConnectionError(error='MyMessage')
def create_bucket(self, *args, **kwargs):
pass
def put_object(self, Bucket, *args, **kwargs):
if Bucket == 's3_api_failure':
raise ClientError(
error_response={
'Error': {'Code': 'MyCode', 'Message': 'MyMessage'}},
operation_name='myoperation')
if Bucket == 's3_connection_error':
raise ConnectionError(error='MyMessage')

View File

@ -13,6 +13,7 @@ Backup drivers
backup/posix-backup-driver.rst
backup/swift-backup-driver.rst
backup/gcs-backup-driver.rst
backup/s3-backup-driver.rst
This section describes how to configure the cinder-backup service and
its drivers.

View File

@ -0,0 +1,20 @@
========================
S3 Storage backup driver
========================
The S3 backup driver backs up volumes to any type of Amazon S3
and S3 compatible object storages.
To enable the S3 backup driver, include the following option
in the ``cinder.conf`` file:
.. code-block:: ini
backup_driver = cinder.backup.drivers.s3.S3BackupDriver
The following configuration options are available for the S3 backup driver.
.. config-table::
:config-target: S3 backup driver
cinder.backup.drivers.s3

View File

@ -2,6 +2,7 @@ alembic==1.4.2
amqp==2.6.1
automaton==2.2.0
bcrypt==3.2.0
boto3==1.16.51
cachetools==4.1.1
castellan==3.6.0
certifi==2020.6.20
@ -44,6 +45,7 @@ linecache2==1.0.0
lxml==4.5.2
Mako==1.1.3
MarkupSafe==1.1.1
moto==1.3.15
msgpack==1.0.0
mypy==0.782
netaddr==0.8.0

View File

@ -0,0 +1,7 @@
---
features:
- |
Added new backup driver to enable backing up cinder volumes to S3-compatible storage.
See the reference `S3 backup driver
<https://docs.openstack.org/cinder/latest/configuration/block-storage/backup/s3-backup-driver.html>`_
for more information.

View File

@ -64,3 +64,4 @@ castellan>=3.6.0 # Apache-2.0
cryptography>=3.1 # BSD/Apache-2.0
cursive>=0.2.2 # Apache-2.0
zstd>=1.4.5.1 # BSD
boto3>=1.16.51 # Apache-2.0

View File

@ -25,3 +25,4 @@ reno>=3.2.0 # Apache-2.0
Pygments>=2.6.1 # BSD license
mypy>=0.782 # MIT
moto>=1.3.15 # Apache-2.0