Enforce a maximum PER_THREAD_CACHE_SIZE in PerThreadCache

When calling operations that create a lot of ProjectControls (e.g.
ListProjects) we observe OOMs in our Gerrit jobs from time to time. This
commit puts a max. limit on the cache size to prevent this from
happening.

The approach implemented is not an LRU. On purpose, we want to keep
PerThreadCache simple and don't add a dependency on e.g. Guava's LRU.

Change-Id: Ic2f6e11719b52ee2112f72698e887cbf8ca254d0
This commit is contained in:
Patrick Hiesel 2018-05-07 14:59:04 +02:00
parent 864fa45d83
commit e638d589fa
2 changed files with 30 additions and 2 deletions

View File

@ -43,9 +43,19 @@ import java.util.function.Supplier;
* <p>Lastly, this class offers a cache, that requires callers to also provide a {@code Supplier} in * <p>Lastly, this class offers a cache, that requires callers to also provide a {@code Supplier} in
* case the object is not present in the cache, while {@code CurrentUser} provides a storage where * case the object is not present in the cache, while {@code CurrentUser} provides a storage where
* just retrieving stored values is a valid operation. * just retrieving stored values is a valid operation.
*
* <p>To prevent OOM errors on requests that would cache a lot of objects, this class enforces an
* internal limit after which no new elements are cached. All {@code get} calls are served by
* invoking the {@code Supplier} after that.
*/ */
public class PerThreadCache implements AutoCloseable { public class PerThreadCache implements AutoCloseable {
private static final ThreadLocal<PerThreadCache> CACHE = new ThreadLocal<>(); private static final ThreadLocal<PerThreadCache> CACHE = new ThreadLocal<>();
/**
* Cache at maximum 50 values per thread. This value was chosen arbitrarily. Some endpoints (like
* ListProjects) break the assumption that the data cached in a request is limited. To prevent
* this class from accumulating an unbound number of objects, we enforce this limit.
*/
private static final int PER_THREAD_CACHE_SIZE = 50;
/** /**
* Unique key for key-value mappings stored in PerThreadCache. The key is based on the value's * Unique key for key-value mappings stored in PerThreadCache. The key is based on the value's
@ -109,7 +119,7 @@ public class PerThreadCache implements AutoCloseable {
return cache != null ? cache.get(key, loader) : loader.get(); return cache != null ? cache.get(key, loader) : loader.get();
} }
private final Map<Key<?>, Object> cache = Maps.newHashMapWithExpectedSize(10); private final Map<Key<?>, Object> cache = Maps.newHashMapWithExpectedSize(PER_THREAD_CACHE_SIZE);
private PerThreadCache() {} private PerThreadCache() {}
@ -122,7 +132,9 @@ public class PerThreadCache implements AutoCloseable {
T value = (T) cache.get(key); T value = (T) cache.get(key);
if (value == null) { if (value == null) {
value = loader.get(); value = loader.get();
cache.put(key, value); if (cache.size() < PER_THREAD_CACHE_SIZE) {
cache.put(key, value);
}
} }
return value; return value;
} }

View File

@ -83,4 +83,20 @@ public class PerThreadCacheTest {
PerThreadCache.create(); PerThreadCache.create();
} }
} }
@Test
public void enforceMaxSize() {
try (PerThreadCache cache = PerThreadCache.create()) {
// Fill the cache
for (int i = 0; i < 50; i++) {
PerThreadCache.Key<String> key = PerThreadCache.Key.create(String.class, i);
cache.get(key, () -> "cached value");
}
// Assert that the value was not persisted
PerThreadCache.Key<String> key = PerThreadCache.Key.create(String.class, 1000);
cache.get(key, () -> "new value");
String value = cache.get(key, () -> "directly served");
assertThat(value).isEqualTo("directly served");
}
}
} }