IndexConfig: Add maxPages option

With an index system like Lucene, which only supports "pagination" by
skipping results, a search with a start offset actually has to iterate
through the skipped results. This means that paginating through a long
list of results is a quadratic operation.

Allow administrators to limit the amount of work done by such
pagination requests, by setting a limit on the number of pages of
results. Larger result sets may thus require larger limits (possibly
requiring extra permission) to paginate through.

Change-Id: Ie62f506e3b479992358f70a3c6d685749f43f0da
This commit is contained in:
Dave Borowitz 2015-04-22 17:35:34 -07:00
parent b82fbcb584
commit f56d365d04
4 changed files with 63 additions and 10 deletions

View File

@ -2213,6 +2213,16 @@ result lists). Set to 0 for no limit.
+ +
Defaults to no limit. Defaults to no limit.
[[index.maxPages]]index.maxPages::
+
Maximum number of pages of search results to allow, as index
implementations may have to scan through large numbers of skipped
results when searching with an offset. Requesting results starting past
this threshold times the requested limit will result in an error. Set to
0 for no limit.
+
Defaults to no limit.
==== Lucene configuration ==== Lucene configuration
Open and closed changes are indexed in separate indexes named Open and closed changes are indexed in separate indexes named

View File

@ -30,21 +30,30 @@ import org.eclipse.jgit.lib.Config;
@AutoValue @AutoValue
public abstract class IndexConfig { public abstract class IndexConfig {
public static IndexConfig createDefault() { public static IndexConfig createDefault() {
return create(0); return create(0, 0);
} }
public static IndexConfig fromConfig(Config cfg) { public static IndexConfig fromConfig(Config cfg) {
return create(cfg.getInt("index", null, "maxLimit", 0)); return create(
cfg.getInt("index", null, "maxLimit", 0),
cfg.getInt("index", null, "maxPages", 0));
} }
public static IndexConfig create(int maxLimit) { public static IndexConfig create(int maxLimit, int maxPages) {
if (maxLimit == 0) { return new AutoValue_IndexConfig(
maxLimit = Integer.MAX_VALUE; checkLimit(maxLimit, "maxLimit"),
} else { checkLimit(maxPages, "maxPages"));
checkArgument(maxLimit > 0, "maxLimit must be positive: %s", maxLimit); }
private static int checkLimit(int limit, String name) {
if (limit == 0) {
return Integer.MAX_VALUE;
} }
return new AutoValue_IndexConfig(maxLimit); checkArgument(limit > 0, "%s must be positive: %s", name, limit);
return limit;
} }
public abstract int maxLimit(); public abstract int maxLimit();
public abstract int maxPages();
} }

View File

@ -132,6 +132,13 @@ public class QueryProcessor {
if (limit == getBackendSupportedLimit()) { if (limit == getBackendSupportedLimit()) {
limit--; limit--;
} }
int page = (start / limit) + 1;
if (page > indexConfig.maxPages()) {
throw new QueryParseException(
"Cannot go beyond page " + indexConfig.maxPages() + "of results");
}
Predicate<ChangeData> s = queryRewriter.rewrite(q, start, limit + 1); Predicate<ChangeData> s = queryRewriter.rewrite(q, start, limit + 1);
if (!(s instanceof ChangeDataSource)) { if (!(s instanceof ChangeDataSource)) {
q = Predicate.and(open(), q); q = Predicate.and(open(), q);

View File

@ -84,9 +84,19 @@ import java.util.concurrent.atomic.AtomicLong;
@Ignore @Ignore
@RunWith(ConfigSuite.class) @RunWith(ConfigSuite.class)
public abstract class AbstractQueryChangesTest { public abstract class AbstractQueryChangesTest {
@ConfigSuite.Default
public static Config defaultConfig() {
return updateConfig(new Config());
}
@ConfigSuite.Config @ConfigSuite.Config
public static Config noteDbEnabled() { public static Config noteDbEnabled() {
return NotesMigration.allEnabledConfig(); return updateConfig(NotesMigration.allEnabledConfig());
}
private static Config updateConfig(Config cfg) {
cfg.setInt("index", null, "maxPages", 10);
return cfg;
} }
@ConfigSuite.Parameter public Config config; @ConfigSuite.Parameter public Config config;
@ -556,6 +566,19 @@ public abstract class AbstractQueryChangesTest {
assertQuery(newQuery("status:new limit:2").withStart(3)); assertQuery(newQuery("status:new limit:2").withStart(3));
} }
@Test
public void maxPages() throws Exception {
TestRepository<InMemoryRepository> repo = createProject("repo");
Change change = newChange(repo, null, null, null, null).insert();
QueryRequest query = newQuery("status:new").withLimit(10);
assertQuery(query, change);
assertQuery(query.withStart(1));
assertQuery(query.withStart(99));
assertBadQuery(query.withStart(100));
assertQuery(query.withLimit(100).withStart(100));
}
@Test @Test
public void updateOrder() throws Exception { public void updateOrder() throws Exception {
clockStepMs = MILLISECONDS.convert(2, MINUTES); clockStepMs = MILLISECONDS.convert(2, MINUTES);
@ -1073,8 +1096,12 @@ public abstract class AbstractQueryChangesTest {
} }
protected void assertBadQuery(Object query) throws Exception { protected void assertBadQuery(Object query) throws Exception {
assertBadQuery(newQuery(query));
}
protected void assertBadQuery(QueryRequest query) throws Exception {
try { try {
newQuery(query).get(); query.get();
fail("expected BadRequestException for query: " + query); fail("expected BadRequestException for query: " + query);
} catch (BadRequestException e) { } catch (BadRequestException e) {
// Expected. // Expected.