Ramp up rbd resize to avoid excessive calls

Change the RBD store to resize the image by up to 8GiB
at the time to not resize on every write.

Trim the image in Ceph after all data has been written
to the actual size in case we overshot the resize.

Partial-Bug: #1792710
Related-to: spec-lite-Ceph-Store-Optimization

Change-Id: I7f0bffda222b663d4316c5d6c03fdbd0d3337035
(cherry picked from commit c43f19e845)
This commit is contained in:
Erno Kuvaja 2020-08-17 16:35:03 +01:00 committed by Abhishek Kekane
parent bbd97858d9
commit ca0c58b527
2 changed files with 88 additions and 16 deletions

View File

@ -1,4 +1,5 @@
# Copyright 2010-2011 Josh Durgin # Copyright 2010-2011 Josh Durgin
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved. # All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -32,7 +33,7 @@ from glance_store import capabilities
from glance_store.common import utils from glance_store.common import utils
from glance_store import driver from glance_store import driver
from glance_store import exceptions from glance_store import exceptions
from glance_store.i18n import _, _LE, _LI from glance_store.i18n import _, _LE, _LI, _LW
from glance_store import location from glance_store import location
try: try:
@ -327,6 +328,8 @@ class Store(driver.Store):
reason=reason) reason=reason)
if self.backend_group: if self.backend_group:
self._set_url_prefix() self._set_url_prefix()
self.size = 0
self.resize_amount = self.WRITE_CHUNKSIZE
def _set_url_prefix(self): def _set_url_prefix(self):
fsid = None fsid = None
@ -470,6 +473,18 @@ class Store(driver.Store):
# Such exception is not dangerous for us so it will be just logged # Such exception is not dangerous for us so it will be just logged
LOG.debug("Snapshot %s is unprotected already" % snap_name) LOG.debug("Snapshot %s is unprotected already" % snap_name)
def _resize_on_write(self, image, image_size, bytes_written, chunk_length):
"""Handle the rbd resize when needed."""
if image_size != 0 or self.size >= bytes_written + chunk_length:
return self.size
new_size = self.size + self.resize_amount
LOG.debug("resizing image to %s KiB" % (new_size / units.Ki))
image.resize(new_size)
# Note(jokke): We double how much we grow the image each time
# up to 8gigs to avoid resizing for each write on bigger images
self.resize_amount = min(self.resize_amount * 2, 8 * units.Gi)
return new_size
@driver.back_compat_add @driver.back_compat_add
@capabilities.check @capabilities.check
def add(self, image_id, image_file, image_size, hashing_algo, context=None, def add(self, image_id, image_file, image_size, hashing_algo, context=None,
@ -516,9 +531,9 @@ class Store(driver.Store):
LOG.debug('creating image %s with order %d and size %d', LOG.debug('creating image %s with order %d and size %d',
image_name, order, image_size) image_name, order, image_size)
if image_size == 0: if image_size == 0:
LOG.warning(_("since image size is zero we will be doing " LOG.warning(_LW("Since image size is zero we will be "
"resize-before-write for each chunk which " "doing resize-before-write which will be "
"will be considerably slower than normal")) "slower than normal"))
try: try:
loc = self._create_image(fsid, conn, ioctx, image_name, loc = self._create_image(fsid, conn, ioctx, image_name,
@ -534,24 +549,27 @@ class Store(driver.Store):
chunks = utils.chunkreadable(image_file, chunks = utils.chunkreadable(image_file,
self.WRITE_CHUNKSIZE) self.WRITE_CHUNKSIZE)
for chunk in chunks: for chunk in chunks:
# If the image size provided is zero we need to do # NOTE(jokke): If we don't know image size we need
# a resize for the amount we are writing. This will # to resize it on write. The resize amount will
# be slower so setting a higher chunk size may # ramp up to 8 gigs.
# speed things up a bit. chunk_length = len(chunk)
if image_size == 0: self.size = self._resize_on_write(image,
chunk_length = len(chunk) image_size,
length = offset + chunk_length bytes_written,
bytes_written += chunk_length chunk_length)
LOG.debug(_("resizing image to %s KiB") %
(length / units.Ki))
image.resize(length)
LOG.debug(_("writing chunk at offset %s") % LOG.debug(_("writing chunk at offset %s") %
(offset)) (offset))
offset += image.write(chunk, offset) offset += image.write(chunk, offset)
bytes_written += chunk_length
os_hash_value.update(chunk) os_hash_value.update(chunk)
checksum.update(chunk) checksum.update(chunk)
if verifier: if verifier:
verifier.update(chunk) verifier.update(chunk)
# Lets trim the image in case we overshoot with resize
if image_size == 0:
image.resize(bytes_written)
if loc.snapshot: if loc.snapshot:
image.create_snap(loc.snapshot) image.create_snap(loc.snapshot)
image.protect_snap(loc.snapshot) image.protect_snap(loc.snapshot)

View File

@ -124,7 +124,7 @@ class MockRBD(object):
raise NotImplementedError() raise NotImplementedError()
def resize(self, *args, **kwargs): def resize(self, *args, **kwargs):
raise NotImplementedError() pass
def discard(self, offset, length): def discard(self, offset, length):
raise NotImplementedError() raise NotImplementedError()
@ -167,6 +167,60 @@ class MockRBD(object):
RBD_FEATURE_LAYERING = 1 RBD_FEATURE_LAYERING = 1
class TestReSize(base.StoreBaseTest,
test_store_capabilities.TestStoreCapabilitiesChecking):
def setUp(self):
"""Establish a clean test environment."""
super(TestReSize, self).setUp()
rbd_store.rados = MockRados
rbd_store.rbd = MockRBD
self.store = rbd_store.Store(self.conf)
self.store.configure()
self.store_specs = {'pool': 'fake_pool',
'image': 'fake_image',
'snapshot': 'fake_snapshot'}
self.location = rbd_store.StoreLocation(self.store_specs,
self.conf)
self.hash_algo = 'sha256'
def test_add_w_image_size_zero_less_resizes(self):
"""Assert that correct size is returned even though 0 was provided."""
# TODO(jokke): use the FakeData iterator once it exists.
data_len = 57 * units.Mi
data_iter = six.BytesIO(b'*' * data_len)
with mock.patch.object(rbd_store.rbd.Image, 'resize') as resize:
with mock.patch.object(rbd_store.rbd.Image, 'write') as write:
ret = self.store.add(
'fake_image_id', data_iter, 0, self.hash_algo)
# We expect to trim at the end so +1
expected = 1
expected_calls = []
data_len_temp = data_len
resize_amount = self.store.WRITE_CHUNKSIZE
while data_len_temp > 0:
expected_calls.append(resize_amount + (data_len -
data_len_temp))
data_len_temp -= resize_amount
resize_amount *= 2
expected += 1
self.assertEqual(expected, resize.call_count)
resize.assert_has_calls([mock.call(call) for call in
expected_calls])
expected = ([self.store.WRITE_CHUNKSIZE for i in range(int(
data_len / self.store.WRITE_CHUNKSIZE))] +
[(data_len % self.store.WRITE_CHUNKSIZE)])
actual = ([len(args[0]) for args, kwargs in
write.call_args_list])
self.assertEqual(expected, actual)
self.assertEqual(data_len,
resize.call_args_list[-1][0][0])
self.assertEqual(data_len, ret[1])
class TestStore(base.StoreBaseTest, class TestStore(base.StoreBaseTest,
test_store_capabilities.TestStoreCapabilitiesChecking): test_store_capabilities.TestStoreCapabilitiesChecking):