Merge "Support modern compression algorithms in cinder backup"
This commit is contained in:
commit
e19f4dc02c
@ -53,7 +53,8 @@ backup_opts = [
|
|||||||
ignore_case=True,
|
ignore_case=True,
|
||||||
choices=['none', 'off', 'no',
|
choices=['none', 'off', 'no',
|
||||||
'zlib', 'gzip',
|
'zlib', 'gzip',
|
||||||
'bz2', 'bzip2'],
|
'bz2', 'bzip2',
|
||||||
|
'zstd'],
|
||||||
help='Compression algorithm ("none" to disable)'),
|
help='Compression algorithm ("none" to disable)'),
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -92,6 +93,9 @@ class ChunkedBackupDriver(driver.BackupDriver):
|
|||||||
elif algorithm.lower() in ('bz2', 'bzip2'):
|
elif algorithm.lower() in ('bz2', 'bzip2'):
|
||||||
import bz2 as compressor
|
import bz2 as compressor
|
||||||
result = compressor
|
result = compressor
|
||||||
|
elif algorithm.lower() == 'zstd':
|
||||||
|
import zstd as compressor
|
||||||
|
result = compressor
|
||||||
else:
|
else:
|
||||||
result = None
|
result = None
|
||||||
if result:
|
if result:
|
||||||
|
@ -28,6 +28,7 @@ import zlib
|
|||||||
|
|
||||||
from eventlet import tpool
|
from eventlet import tpool
|
||||||
from oslo_utils import units
|
from oslo_utils import units
|
||||||
|
import zstd
|
||||||
|
|
||||||
from cinder.backup.drivers import gcs as google_dr
|
from cinder.backup.drivers import gcs as google_dr
|
||||||
from cinder import context
|
from cinder import context
|
||||||
@ -220,6 +221,15 @@ class GoogleBackupDriverTestCase(test.TestCase):
|
|||||||
self._write_effective_compression_file(self.size_volume_file)
|
self._write_effective_compression_file(self.size_volume_file)
|
||||||
service.backup(backup, self.volume_file)
|
service.backup(backup, self.volume_file)
|
||||||
|
|
||||||
|
@gcs_client
|
||||||
|
def test_backup_zstd(self):
|
||||||
|
volume_id = '471910a0-a197-4259-9c50-0fc3d6a07dbc'
|
||||||
|
backup = self._create_backup_db_entry(volume_id=volume_id)
|
||||||
|
self.flags(backup_compression_algorithm='zstd')
|
||||||
|
service = google_dr.GoogleBackupDriver(self.ctxt)
|
||||||
|
self._write_effective_compression_file(self.size_volume_file)
|
||||||
|
service.backup(backup, self.volume_file)
|
||||||
|
|
||||||
@gcs_client
|
@gcs_client
|
||||||
def test_backup_default_container(self):
|
def test_backup_default_container(self):
|
||||||
volume_id = '9552017f-c8b9-4e4e-a876-00000053349c'
|
volume_id = '9552017f-c8b9-4e4e-a876-00000053349c'
|
||||||
@ -572,6 +582,9 @@ class GoogleBackupDriverTestCase(test.TestCase):
|
|||||||
compressor = service._get_compressor('bz2')
|
compressor = service._get_compressor('bz2')
|
||||||
self.assertEqual(bz2, compressor)
|
self.assertEqual(bz2, compressor)
|
||||||
self.assertIsInstance(compressor, tpool.Proxy)
|
self.assertIsInstance(compressor, tpool.Proxy)
|
||||||
|
compressor = service._get_compressor('zstd')
|
||||||
|
self.assertEqual(zstd, compressor)
|
||||||
|
self.assertIsInstance(compressor, tpool.Proxy)
|
||||||
self.assertRaises(ValueError, service._get_compressor, 'fake')
|
self.assertRaises(ValueError, service._get_compressor, 'fake')
|
||||||
|
|
||||||
@gcs_client
|
@gcs_client
|
||||||
|
@ -31,6 +31,7 @@ from os_brick import exception as brick_exception
|
|||||||
from os_brick.remotefs import remotefs as remotefs_brick
|
from os_brick.remotefs import remotefs as remotefs_brick
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
import six
|
import six
|
||||||
|
import zstd
|
||||||
|
|
||||||
from cinder.backup.drivers import nfs
|
from cinder.backup.drivers import nfs
|
||||||
from cinder import context
|
from cinder import context
|
||||||
@ -287,6 +288,15 @@ class BackupNFSTestCase(test.TestCase):
|
|||||||
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
|
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
|
||||||
service.backup(backup, self.volume_file)
|
service.backup(backup, self.volume_file)
|
||||||
|
|
||||||
|
def test_backup_zstd(self):
|
||||||
|
volume_id = fake.VOLUME_ID
|
||||||
|
self._create_backup_db_entry(volume_id=volume_id)
|
||||||
|
self.flags(backup_compression_algorithm='zstd')
|
||||||
|
service = nfs.NFSBackupDriver(self.ctxt)
|
||||||
|
self._write_effective_compression_file(self.size_volume_file)
|
||||||
|
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
|
||||||
|
service.backup(backup, self.volume_file)
|
||||||
|
|
||||||
def test_backup_default_container(self):
|
def test_backup_default_container(self):
|
||||||
volume_id = fake.VOLUME_ID
|
volume_id = fake.VOLUME_ID
|
||||||
self._create_backup_db_entry(volume_id=volume_id,
|
self._create_backup_db_entry(volume_id=volume_id,
|
||||||
@ -745,6 +755,32 @@ class BackupNFSTestCase(test.TestCase):
|
|||||||
self.assertNotEqual(threading.current_thread(),
|
self.assertNotEqual(threading.current_thread(),
|
||||||
self.thread_dict['thread'])
|
self.thread_dict['thread'])
|
||||||
|
|
||||||
|
def test_restore_zstd(self):
|
||||||
|
self.thread_original_method = zstd.decompress
|
||||||
|
self.mock_object(zstd, 'decompress', side_effect=self._store_thread)
|
||||||
|
volume_id = fake.VOLUME_ID
|
||||||
|
|
||||||
|
self._create_backup_db_entry(volume_id=volume_id)
|
||||||
|
self.flags(backup_compression_algorithm='zstd')
|
||||||
|
file_size = 1024 * 3
|
||||||
|
self.flags(backup_file_size=file_size)
|
||||||
|
self.flags(backup_sha_block_size_bytes=1024)
|
||||||
|
service = nfs.NFSBackupDriver(self.ctxt)
|
||||||
|
self._write_effective_compression_file(file_size)
|
||||||
|
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
|
||||||
|
backup.status = objects.fields.BackupStatus.RESTORING
|
||||||
|
backup.save()
|
||||||
|
service.backup(backup, self.volume_file)
|
||||||
|
|
||||||
|
with tempfile.NamedTemporaryFile() as restored_file:
|
||||||
|
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
|
||||||
|
service.restore(backup, volume_id, restored_file)
|
||||||
|
self.assertTrue(filecmp.cmp(self.volume_file.name,
|
||||||
|
restored_file.name))
|
||||||
|
|
||||||
|
self.assertNotEqual(threading.current_thread(),
|
||||||
|
self.thread_dict['thread'])
|
||||||
|
|
||||||
def test_restore_abort_delta(self):
|
def test_restore_abort_delta(self):
|
||||||
volume_id = fake.VOLUME_ID
|
volume_id = fake.VOLUME_ID
|
||||||
count = set()
|
count = set()
|
||||||
@ -877,6 +913,9 @@ class BackupNFSTestCase(test.TestCase):
|
|||||||
compressor = service._get_compressor('bz2')
|
compressor = service._get_compressor('bz2')
|
||||||
self.assertEqual(compressor, bz2)
|
self.assertEqual(compressor, bz2)
|
||||||
self.assertIsInstance(compressor, tpool.Proxy)
|
self.assertIsInstance(compressor, tpool.Proxy)
|
||||||
|
compressor = service._get_compressor('zstd')
|
||||||
|
self.assertEqual(zstd, compressor)
|
||||||
|
self.assertIsInstance(compressor, tpool.Proxy)
|
||||||
self.assertRaises(ValueError, service._get_compressor, 'fake')
|
self.assertRaises(ValueError, service._get_compressor, 'fake')
|
||||||
|
|
||||||
def create_buffer(self, size):
|
def create_buffer(self, size):
|
||||||
|
@ -28,6 +28,7 @@ import ddt
|
|||||||
from eventlet import tpool
|
from eventlet import tpool
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from swiftclient import client as swift
|
from swiftclient import client as swift
|
||||||
|
import zstd
|
||||||
|
|
||||||
from cinder.backup import chunkeddriver
|
from cinder.backup import chunkeddriver
|
||||||
from cinder.backup.drivers import swift as swift_dr
|
from cinder.backup.drivers import swift as swift_dr
|
||||||
@ -334,6 +335,15 @@ class BackupSwiftTestCase(test.TestCase):
|
|||||||
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
|
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
|
||||||
service.backup(backup, self.volume_file)
|
service.backup(backup, self.volume_file)
|
||||||
|
|
||||||
|
def test_backup_zstd(self):
|
||||||
|
volume_id = '471910a0-a197-4259-9c50-0fc3d6a07dbc'
|
||||||
|
self._create_backup_db_entry(volume_id=volume_id)
|
||||||
|
self.flags(backup_compression_algorithm='zstd')
|
||||||
|
service = swift_dr.SwiftBackupDriver(self.ctxt)
|
||||||
|
self._write_effective_compression_file(self.size_volume_file)
|
||||||
|
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
|
||||||
|
service.backup(backup, self.volume_file)
|
||||||
|
|
||||||
@mock.patch.object(db, 'backup_update', wraps=db.backup_update)
|
@mock.patch.object(db, 'backup_update', wraps=db.backup_update)
|
||||||
def test_backup_default_container(self, backup_update_mock):
|
def test_backup_default_container(self, backup_update_mock):
|
||||||
volume_id = '9552017f-c8b9-4e4e-a876-00000053349c'
|
volume_id = '9552017f-c8b9-4e4e-a876-00000053349c'
|
||||||
@ -839,6 +849,9 @@ class BackupSwiftTestCase(test.TestCase):
|
|||||||
compressor = service._get_compressor('bz2')
|
compressor = service._get_compressor('bz2')
|
||||||
self.assertEqual(bz2, compressor)
|
self.assertEqual(bz2, compressor)
|
||||||
self.assertIsInstance(compressor, tpool.Proxy)
|
self.assertIsInstance(compressor, tpool.Proxy)
|
||||||
|
compressor = service._get_compressor('zstd')
|
||||||
|
self.assertEqual(zstd, compressor)
|
||||||
|
self.assertIsInstance(compressor, tpool.Proxy)
|
||||||
self.assertRaises(ValueError, service._get_compressor, 'fake')
|
self.assertRaises(ValueError, service._get_compressor, 'fake')
|
||||||
|
|
||||||
def test_prepare_output_data_effective_compression(self):
|
def test_prepare_output_data_effective_compression(self):
|
||||||
|
@ -208,6 +208,9 @@ class ChunkedDriverTestCase(test.TestCase):
|
|||||||
for algo in ['bz2', 'bzip2']:
|
for algo in ['bz2', 'bzip2']:
|
||||||
self.assertTrue('bz' in str(self.driver._get_compressor(algo)))
|
self.assertTrue('bz' in str(self.driver._get_compressor(algo)))
|
||||||
|
|
||||||
|
def test_get_compressor_zstd(self):
|
||||||
|
self.assertTrue('zstd' in str(self.driver._get_compressor('zstd')))
|
||||||
|
|
||||||
def test_get_compressor_invalid(self):
|
def test_get_compressor_invalid(self):
|
||||||
self.assertRaises(ValueError, self.driver._get_compressor, 'winzip')
|
self.assertRaises(ValueError, self.driver._get_compressor, 'winzip')
|
||||||
|
|
||||||
|
@ -133,10 +133,10 @@ appropriate for your environment:
|
|||||||
backup_sha_block_size_bytes = 32768
|
backup_sha_block_size_bytes = 32768
|
||||||
backup_file_size = 1999994880
|
backup_file_size = 1999994880
|
||||||
|
|
||||||
The option ``backup_compression_algorithm`` can be set to ``bz2`` or ``none``.
|
The option ``backup_compression_algorithm`` can be set to ``zlib``, ``bz2``,
|
||||||
The latter can be a useful setting when the server providing the share for the
|
``zstd`` or ``none``. The value ``none`` can be a useful setting when the
|
||||||
backup repository itself performs deduplication or compression on the backup
|
server providing the share for the backup repository itself performs
|
||||||
data.
|
deduplication or compression on the backup data.
|
||||||
|
|
||||||
The option ``backup_file_size`` must be a multiple of
|
The option ``backup_file_size`` must be a multiple of
|
||||||
``backup_sha_block_size_bytes``. It is effectively the maximum file size to be
|
``backup_sha_block_size_bytes``. It is effectively the maximum file size to be
|
||||||
|
@ -162,3 +162,4 @@ rsd-lib==1.1.0
|
|||||||
storpool==4.0.0
|
storpool==4.0.0
|
||||||
storpool.spopenstack==2.2.1
|
storpool.spopenstack==2.2.1
|
||||||
dfs_sdk==1.2.25
|
dfs_sdk==1.2.25
|
||||||
|
zstd==1.4.5.0
|
||||||
|
@ -63,3 +63,4 @@ google-api-python-client>=1.4.2 # Apache-2.0
|
|||||||
castellan>=1.3.0 # Apache-2.0
|
castellan>=1.3.0 # Apache-2.0
|
||||||
cryptography>=2.1.4 # BSD/Apache-2.0
|
cryptography>=2.1.4 # BSD/Apache-2.0
|
||||||
cursive>=0.2.1 # Apache-2.0
|
cursive>=0.2.1 # Apache-2.0
|
||||||
|
zstd>=1.4.5.0 # BSD
|
||||||
|
Loading…
Reference in New Issue
Block a user