Ramp up rbd resize to avoid excessive calls

Change the RBD store to resize the image by up to 8GiB
at the time to not resize on every write.

Trim the image in Ceph after all data has been written
to the actual size in case we overshot the resize.

Partial-Bug: #1792710
Related-to: spec-lite-Ceph-Store-Optimization

Change-Id: I7f0bffda222b663d4316c5d6c03fdbd0d3337035
This commit is contained in:
Erno Kuvaja 2020-08-17 16:35:03 +01:00 committed by Dan Smith
parent 3b71ce2d47
commit c43f19e845
2 changed files with 88 additions and 16 deletions

View File

@ -1,4 +1,5 @@
# Copyright 2010-2011 Josh Durgin
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -30,7 +31,7 @@ from glance_store import capabilities
from glance_store.common import utils
from glance_store import driver
from glance_store import exceptions
from glance_store.i18n import _, _LE, _LI
from glance_store.i18n import _, _LE, _LI, _LW
from glance_store import location
try:
@ -325,6 +326,8 @@ class Store(driver.Store):
reason=reason)
if self.backend_group:
self._set_url_prefix()
self.size = 0
self.resize_amount = self.WRITE_CHUNKSIZE
def _set_url_prefix(self):
fsid = None
@ -468,6 +471,18 @@ class Store(driver.Store):
# Such exception is not dangerous for us so it will be just logged
LOG.debug("Snapshot %s is unprotected already" % snap_name)
def _resize_on_write(self, image, image_size, bytes_written, chunk_length):
"""Handle the rbd resize when needed."""
if image_size != 0 or self.size >= bytes_written + chunk_length:
return self.size
new_size = self.size + self.resize_amount
LOG.debug("resizing image to %s KiB" % (new_size / units.Ki))
image.resize(new_size)
# Note(jokke): We double how much we grow the image each time
# up to 8gigs to avoid resizing for each write on bigger images
self.resize_amount = min(self.resize_amount * 2, 8 * units.Gi)
return new_size
@driver.back_compat_add
@capabilities.check
def add(self, image_id, image_file, image_size, hashing_algo, context=None,
@ -514,9 +529,9 @@ class Store(driver.Store):
LOG.debug('creating image %s with order %d and size %d',
image_name, order, image_size)
if image_size == 0:
LOG.warning(_("since image size is zero we will be doing "
"resize-before-write for each chunk which "
"will be considerably slower than normal"))
LOG.warning(_LW("Since image size is zero we will be "
"doing resize-before-write which will be "
"slower than normal"))
try:
loc = self._create_image(fsid, conn, ioctx, image_name,
@ -532,24 +547,27 @@ class Store(driver.Store):
chunks = utils.chunkreadable(image_file,
self.WRITE_CHUNKSIZE)
for chunk in chunks:
# If the image size provided is zero we need to do
# a resize for the amount we are writing. This will
# be slower so setting a higher chunk size may
# speed things up a bit.
if image_size == 0:
chunk_length = len(chunk)
length = offset + chunk_length
bytes_written += chunk_length
LOG.debug(_("resizing image to %s KiB") %
(length / units.Ki))
image.resize(length)
# NOTE(jokke): If we don't know image size we need
# to resize it on write. The resize amount will
# ramp up to 8 gigs.
chunk_length = len(chunk)
self.size = self._resize_on_write(image,
image_size,
bytes_written,
chunk_length)
LOG.debug(_("writing chunk at offset %s") %
(offset))
offset += image.write(chunk, offset)
bytes_written += chunk_length
os_hash_value.update(chunk)
checksum.update(chunk)
if verifier:
verifier.update(chunk)
# Lets trim the image in case we overshoot with resize
if image_size == 0:
image.resize(bytes_written)
if loc.snapshot:
image.create_snap(loc.snapshot)
image.protect_snap(loc.snapshot)

View File

@ -125,7 +125,7 @@ class MockRBD(object):
raise NotImplementedError()
def resize(self, *args, **kwargs):
raise NotImplementedError()
pass
def discard(self, offset, length):
raise NotImplementedError()
@ -168,6 +168,60 @@ class MockRBD(object):
RBD_FEATURE_LAYERING = 1
class TestReSize(base.StoreBaseTest,
test_store_capabilities.TestStoreCapabilitiesChecking):
def setUp(self):
"""Establish a clean test environment."""
super(TestReSize, self).setUp()
rbd_store.rados = MockRados
rbd_store.rbd = MockRBD
self.store = rbd_store.Store(self.conf)
self.store.configure()
self.store_specs = {'pool': 'fake_pool',
'image': 'fake_image',
'snapshot': 'fake_snapshot'}
self.location = rbd_store.StoreLocation(self.store_specs,
self.conf)
self.hash_algo = 'sha256'
def test_add_w_image_size_zero_less_resizes(self):
"""Assert that correct size is returned even though 0 was provided."""
# TODO(jokke): use the FakeData iterator once it exists.
data_len = 57 * units.Mi
data_iter = six.BytesIO(b'*' * data_len)
with mock.patch.object(rbd_store.rbd.Image, 'resize') as resize:
with mock.patch.object(rbd_store.rbd.Image, 'write') as write:
ret = self.store.add(
'fake_image_id', data_iter, 0, self.hash_algo)
# We expect to trim at the end so +1
expected = 1
expected_calls = []
data_len_temp = data_len
resize_amount = self.store.WRITE_CHUNKSIZE
while data_len_temp > 0:
expected_calls.append(resize_amount + (data_len -
data_len_temp))
data_len_temp -= resize_amount
resize_amount *= 2
expected += 1
self.assertEqual(expected, resize.call_count)
resize.assert_has_calls([mock.call(call) for call in
expected_calls])
expected = ([self.store.WRITE_CHUNKSIZE for i in range(int(
data_len / self.store.WRITE_CHUNKSIZE))] +
[(data_len % self.store.WRITE_CHUNKSIZE)])
actual = ([len(args[0]) for args, kwargs in
write.call_args_list])
self.assertEqual(expected, actual)
self.assertEqual(data_len,
resize.call_args_list[-1][0][0])
self.assertEqual(data_len, ret[1])
class TestStore(base.StoreBaseTest,
test_store_capabilities.TestStoreCapabilitiesChecking):