2018-04-21 12:04:24 -04:00
|
|
|
// Copyright (C) 2018 The Android Open Source Project
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
syntax = "proto3";
|
|
|
|
|
|
|
|
package gerrit.cache;
|
|
|
|
|
|
|
|
option java_package = "com.google.gerrit.server.cache.proto";
|
|
|
|
|
|
|
|
// Serialized form of com.google.gerrit.server.change.CHangeKindCacheImpl.Key.
|
|
|
|
// Next ID: 4
|
|
|
|
message ChangeKindKeyProto {
|
|
|
|
bytes prior = 1;
|
|
|
|
bytes next = 2;
|
|
|
|
string strategy_name = 3;
|
|
|
|
}
|
2018-05-01 09:27:46 -04:00
|
|
|
|
|
|
|
// Serialized form of
|
|
|
|
// com.google.gerrit.server.change.MergeabilityCacheImpl.EntryKey.
|
|
|
|
// Next ID: 5
|
|
|
|
message MergeabilityKeyProto {
|
|
|
|
bytes commit = 1;
|
|
|
|
bytes into = 2;
|
|
|
|
string submit_type = 3;
|
|
|
|
string merge_strategy = 4;
|
|
|
|
}
|
2018-05-01 10:22:02 -04:00
|
|
|
|
|
|
|
// Serialized form of com.google.gerrit.extensions.auth.oauth.OAuthToken.
|
|
|
|
// Next ID: 6
|
|
|
|
message OAuthTokenProto {
|
|
|
|
string token = 1;
|
|
|
|
string secret = 2;
|
|
|
|
string raw = 3;
|
2020-03-02 13:51:04 +01:00
|
|
|
// Epoch millis.
|
|
|
|
int64 expires_at_millis = 4;
|
2018-05-15 16:37:02 -07:00
|
|
|
string provider_id = 5;
|
2018-05-01 10:22:02 -04:00
|
|
|
}
|
Optionally persist ChangeNotesCache
Loading ChangeNotes into the ChangeNotesCache should generally be pretty
fast when the underlying git repository storage is fast, but there are
some situations where that is not the case:
* The repo hasn't been GC'ed in a while, so may contain a lot of loose
objects.
* On googlesource.com using the JGit DFS backend, when GC has happened
recently and the DFS block cache is cold.
These problems are particularly noticeable on a cold server start.
As an optional optimization, allow persisting the ChangeNotesCache. For
installations where cache loading latency hasn't proven to be a problem,
it may not be worth the disk space, but we think it will make a
difference for googlesource.com.
Writing the necessary protos was a bit of work, but actually the
marginal cost of tweaking fields should be relatively low, and any
change should cause a small test to fail, so we should be able to detect
any changes as they arise. I explicitly chose to reuse existing
serialization mechanisms where possible (ProtobufCodecs, JSON), to limit
the size of this change. This is just cache data, so it's not like it
has to be particularly pretty or long-lasting.
This change is not intended to indicate we are giving up on optimizing
loading ChangeNotes from storage, but is more of a bandaid for fixing
performance problems in production today.
Change-Id: I1ffe15fe56b6822b7f9af55635b063793e66d6fd
2018-05-01 12:31:48 -04:00
|
|
|
|
|
|
|
|
|
|
|
// Serialized form of com.google.gerrit.server.notedb.ChangeNotesCache.Key.
|
|
|
|
// Next ID: 4
|
|
|
|
message ChangeNotesKeyProto {
|
|
|
|
string project = 1;
|
|
|
|
int32 change_id = 2;
|
|
|
|
bytes id = 3;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Serialized from of com.google.gerrit.server.notedb.ChangeNotesState.
|
|
|
|
//
|
|
|
|
// Note on embedded protos: this is just for storing in a cache, so some formats
|
|
|
|
// were chosen ease of coding the initial implementation. In particular, where
|
|
|
|
// there already exists another serialization mechanism in Gerrit for
|
|
|
|
// serializing a particular field, we use that rather than defining a new proto
|
2018-12-14 16:06:41 +01:00
|
|
|
// type. This includes types that can be serialized to proto using
|
|
|
|
// ProtoConverters as well as NoteDb and indexed types that are serialized using
|
|
|
|
// JSON. We can always revisit this decision later; it just requires bumping the
|
|
|
|
// cache version.
|
Optionally persist ChangeNotesCache
Loading ChangeNotes into the ChangeNotesCache should generally be pretty
fast when the underlying git repository storage is fast, but there are
some situations where that is not the case:
* The repo hasn't been GC'ed in a while, so may contain a lot of loose
objects.
* On googlesource.com using the JGit DFS backend, when GC has happened
recently and the DFS block cache is cold.
These problems are particularly noticeable on a cold server start.
As an optional optimization, allow persisting the ChangeNotesCache. For
installations where cache loading latency hasn't proven to be a problem,
it may not be worth the disk space, but we think it will make a
difference for googlesource.com.
Writing the necessary protos was a bit of work, but actually the
marginal cost of tweaking fields should be relatively low, and any
change should cause a small test to fail, so we should be able to detect
any changes as they arise. I explicitly chose to reuse existing
serialization mechanisms where possible (ProtobufCodecs, JSON), to limit
the size of this change. This is just cache data, so it's not like it
has to be particularly pretty or long-lasting.
This change is not intended to indicate we are giving up on optimizing
loading ChangeNotes from storage, but is more of a bandaid for fixing
performance problems in production today.
Change-Id: I1ffe15fe56b6822b7f9af55635b063793e66d6fd
2018-05-01 12:31:48 -04:00
|
|
|
//
|
|
|
|
// Note on nullability: there are a lot of nullable fields in ChangeNotesState
|
|
|
|
// and its dependencies. It's likely we could make some of them non-nullable,
|
|
|
|
// but each one of those would be a potentially significant amount of cleanup,
|
|
|
|
// and there's no guarantee we'd be able to eliminate all of them. (For a less
|
|
|
|
// complex class, it's likely the cleanup would be more feasible.)
|
|
|
|
//
|
|
|
|
// Instead, we just take the tedious yet simple approach of having a "has_foo"
|
|
|
|
// field for each nullable field "foo", indicating whether or not foo is null.
|
|
|
|
//
|
2020-02-17 16:25:43 +01:00
|
|
|
// Next ID: 24
|
Optionally persist ChangeNotesCache
Loading ChangeNotes into the ChangeNotesCache should generally be pretty
fast when the underlying git repository storage is fast, but there are
some situations where that is not the case:
* The repo hasn't been GC'ed in a while, so may contain a lot of loose
objects.
* On googlesource.com using the JGit DFS backend, when GC has happened
recently and the DFS block cache is cold.
These problems are particularly noticeable on a cold server start.
As an optional optimization, allow persisting the ChangeNotesCache. For
installations where cache loading latency hasn't proven to be a problem,
it may not be worth the disk space, but we think it will make a
difference for googlesource.com.
Writing the necessary protos was a bit of work, but actually the
marginal cost of tweaking fields should be relatively low, and any
change should cause a small test to fail, so we should be able to detect
any changes as they arise. I explicitly chose to reuse existing
serialization mechanisms where possible (ProtobufCodecs, JSON), to limit
the size of this change. This is just cache data, so it's not like it
has to be particularly pretty or long-lasting.
This change is not intended to indicate we are giving up on optimizing
loading ChangeNotes from storage, but is more of a bandaid for fixing
performance problems in production today.
Change-Id: I1ffe15fe56b6822b7f9af55635b063793e66d6fd
2018-05-01 12:31:48 -04:00
|
|
|
message ChangeNotesStateProto {
|
|
|
|
// Effectively required, even though the corresponding ChangeNotesState field
|
|
|
|
// is optional, since the field is only absent when NoteDb is disabled, in
|
|
|
|
// which case attempting to use the ChangeNotesCache is programmer error.
|
|
|
|
bytes meta_id = 1;
|
|
|
|
|
|
|
|
int32 change_id = 2;
|
|
|
|
|
2019-11-12 13:53:29 -08:00
|
|
|
// Next ID: 26
|
Optionally persist ChangeNotesCache
Loading ChangeNotes into the ChangeNotesCache should generally be pretty
fast when the underlying git repository storage is fast, but there are
some situations where that is not the case:
* The repo hasn't been GC'ed in a while, so may contain a lot of loose
objects.
* On googlesource.com using the JGit DFS backend, when GC has happened
recently and the DFS block cache is cold.
These problems are particularly noticeable on a cold server start.
As an optional optimization, allow persisting the ChangeNotesCache. For
installations where cache loading latency hasn't proven to be a problem,
it may not be worth the disk space, but we think it will make a
difference for googlesource.com.
Writing the necessary protos was a bit of work, but actually the
marginal cost of tweaking fields should be relatively low, and any
change should cause a small test to fail, so we should be able to detect
any changes as they arise. I explicitly chose to reuse existing
serialization mechanisms where possible (ProtobufCodecs, JSON), to limit
the size of this change. This is just cache data, so it's not like it
has to be particularly pretty or long-lasting.
This change is not intended to indicate we are giving up on optimizing
loading ChangeNotes from storage, but is more of a bandaid for fixing
performance problems in production today.
Change-Id: I1ffe15fe56b6822b7f9af55635b063793e66d6fd
2018-05-01 12:31:48 -04:00
|
|
|
message ChangeColumnsProto {
|
|
|
|
string change_key = 1;
|
|
|
|
|
2020-03-02 13:51:04 +01:00
|
|
|
// Epoch millis.
|
|
|
|
int64 created_on_millis = 2;
|
Optionally persist ChangeNotesCache
Loading ChangeNotes into the ChangeNotesCache should generally be pretty
fast when the underlying git repository storage is fast, but there are
some situations where that is not the case:
* The repo hasn't been GC'ed in a while, so may contain a lot of loose
objects.
* On googlesource.com using the JGit DFS backend, when GC has happened
recently and the DFS block cache is cold.
These problems are particularly noticeable on a cold server start.
As an optional optimization, allow persisting the ChangeNotesCache. For
installations where cache loading latency hasn't proven to be a problem,
it may not be worth the disk space, but we think it will make a
difference for googlesource.com.
Writing the necessary protos was a bit of work, but actually the
marginal cost of tweaking fields should be relatively low, and any
change should cause a small test to fail, so we should be able to detect
any changes as they arise. I explicitly chose to reuse existing
serialization mechanisms where possible (ProtobufCodecs, JSON), to limit
the size of this change. This is just cache data, so it's not like it
has to be particularly pretty or long-lasting.
This change is not intended to indicate we are giving up on optimizing
loading ChangeNotes from storage, but is more of a bandaid for fixing
performance problems in production today.
Change-Id: I1ffe15fe56b6822b7f9af55635b063793e66d6fd
2018-05-01 12:31:48 -04:00
|
|
|
|
2020-03-02 13:51:04 +01:00
|
|
|
// Epoch millis.
|
|
|
|
int64 last_updated_on_millis = 3;
|
Optionally persist ChangeNotesCache
Loading ChangeNotes into the ChangeNotesCache should generally be pretty
fast when the underlying git repository storage is fast, but there are
some situations where that is not the case:
* The repo hasn't been GC'ed in a while, so may contain a lot of loose
objects.
* On googlesource.com using the JGit DFS backend, when GC has happened
recently and the DFS block cache is cold.
These problems are particularly noticeable on a cold server start.
As an optional optimization, allow persisting the ChangeNotesCache. For
installations where cache loading latency hasn't proven to be a problem,
it may not be worth the disk space, but we think it will make a
difference for googlesource.com.
Writing the necessary protos was a bit of work, but actually the
marginal cost of tweaking fields should be relatively low, and any
change should cause a small test to fail, so we should be able to detect
any changes as they arise. I explicitly chose to reuse existing
serialization mechanisms where possible (ProtobufCodecs, JSON), to limit
the size of this change. This is just cache data, so it's not like it
has to be particularly pretty or long-lasting.
This change is not intended to indicate we are giving up on optimizing
loading ChangeNotes from storage, but is more of a bandaid for fixing
performance problems in production today.
Change-Id: I1ffe15fe56b6822b7f9af55635b063793e66d6fd
2018-05-01 12:31:48 -04:00
|
|
|
|
|
|
|
int32 owner = 4;
|
|
|
|
|
|
|
|
string branch = 5;
|
|
|
|
|
|
|
|
int32 current_patch_set_id = 6;
|
|
|
|
bool has_current_patch_set_id = 7;
|
|
|
|
|
|
|
|
string subject = 8;
|
|
|
|
|
|
|
|
string topic = 9;
|
|
|
|
bool has_topic = 10;
|
|
|
|
|
|
|
|
string original_subject = 11;
|
|
|
|
bool has_original_subject = 12;
|
|
|
|
|
|
|
|
string submission_id = 13;
|
|
|
|
bool has_submission_id = 14;
|
|
|
|
|
2019-10-16 15:09:56 +02:00
|
|
|
reserved 15; // assignee
|
|
|
|
reserved 16; // has_assignee
|
Optionally persist ChangeNotesCache
Loading ChangeNotes into the ChangeNotesCache should generally be pretty
fast when the underlying git repository storage is fast, but there are
some situations where that is not the case:
* The repo hasn't been GC'ed in a while, so may contain a lot of loose
objects.
* On googlesource.com using the JGit DFS backend, when GC has happened
recently and the DFS block cache is cold.
These problems are particularly noticeable on a cold server start.
As an optional optimization, allow persisting the ChangeNotesCache. For
installations where cache loading latency hasn't proven to be a problem,
it may not be worth the disk space, but we think it will make a
difference for googlesource.com.
Writing the necessary protos was a bit of work, but actually the
marginal cost of tweaking fields should be relatively low, and any
change should cause a small test to fail, so we should be able to detect
any changes as they arise. I explicitly chose to reuse existing
serialization mechanisms where possible (ProtobufCodecs, JSON), to limit
the size of this change. This is just cache data, so it's not like it
has to be particularly pretty or long-lasting.
This change is not intended to indicate we are giving up on optimizing
loading ChangeNotes from storage, but is more of a bandaid for fixing
performance problems in production today.
Change-Id: I1ffe15fe56b6822b7f9af55635b063793e66d6fd
2018-05-01 12:31:48 -04:00
|
|
|
|
|
|
|
string status = 17;
|
|
|
|
bool has_status = 18;
|
|
|
|
|
|
|
|
bool is_private = 19;
|
|
|
|
|
|
|
|
bool work_in_progress = 20;
|
|
|
|
|
|
|
|
bool review_started = 21;
|
|
|
|
|
|
|
|
int32 revert_of = 22;
|
|
|
|
bool has_revert_of = 23;
|
2019-11-12 13:53:29 -08:00
|
|
|
|
|
|
|
string cherry_pick_of = 24;
|
|
|
|
bool has_cherry_pick_of = 25;
|
Optionally persist ChangeNotesCache
Loading ChangeNotes into the ChangeNotesCache should generally be pretty
fast when the underlying git repository storage is fast, but there are
some situations where that is not the case:
* The repo hasn't been GC'ed in a while, so may contain a lot of loose
objects.
* On googlesource.com using the JGit DFS backend, when GC has happened
recently and the DFS block cache is cold.
These problems are particularly noticeable on a cold server start.
As an optional optimization, allow persisting the ChangeNotesCache. For
installations where cache loading latency hasn't proven to be a problem,
it may not be worth the disk space, but we think it will make a
difference for googlesource.com.
Writing the necessary protos was a bit of work, but actually the
marginal cost of tweaking fields should be relatively low, and any
change should cause a small test to fail, so we should be able to detect
any changes as they arise. I explicitly chose to reuse existing
serialization mechanisms where possible (ProtobufCodecs, JSON), to limit
the size of this change. This is just cache data, so it's not like it
has to be particularly pretty or long-lasting.
This change is not intended to indicate we are giving up on optimizing
loading ChangeNotes from storage, but is more of a bandaid for fixing
performance problems in production today.
Change-Id: I1ffe15fe56b6822b7f9af55635b063793e66d6fd
2018-05-01 12:31:48 -04:00
|
|
|
}
|
|
|
|
// Effectively required, even though the corresponding ChangeNotesState field
|
|
|
|
// is optional, since the field is only absent when NoteDb is disabled, in
|
|
|
|
// which case attempting to use the ChangeNotesCache is programmer error.
|
|
|
|
ChangeColumnsProto columns = 3;
|
|
|
|
|
2020-02-17 16:25:43 +01:00
|
|
|
reserved 4; // past_assignee
|
Optionally persist ChangeNotesCache
Loading ChangeNotes into the ChangeNotesCache should generally be pretty
fast when the underlying git repository storage is fast, but there are
some situations where that is not the case:
* The repo hasn't been GC'ed in a while, so may contain a lot of loose
objects.
* On googlesource.com using the JGit DFS backend, when GC has happened
recently and the DFS block cache is cold.
These problems are particularly noticeable on a cold server start.
As an optional optimization, allow persisting the ChangeNotesCache. For
installations where cache loading latency hasn't proven to be a problem,
it may not be worth the disk space, but we think it will make a
difference for googlesource.com.
Writing the necessary protos was a bit of work, but actually the
marginal cost of tweaking fields should be relatively low, and any
change should cause a small test to fail, so we should be able to detect
any changes as they arise. I explicitly chose to reuse existing
serialization mechanisms where possible (ProtobufCodecs, JSON), to limit
the size of this change. This is just cache data, so it's not like it
has to be particularly pretty or long-lasting.
This change is not intended to indicate we are giving up on optimizing
loading ChangeNotes from storage, but is more of a bandaid for fixing
performance problems in production today.
Change-Id: I1ffe15fe56b6822b7f9af55635b063793e66d6fd
2018-05-01 12:31:48 -04:00
|
|
|
|
|
|
|
repeated string hashtag = 5;
|
|
|
|
|
2018-12-14 16:06:41 +01:00
|
|
|
// Raw PatchSet proto as produced by PatchSetProtoConverter.
|
Optionally persist ChangeNotesCache
Loading ChangeNotes into the ChangeNotesCache should generally be pretty
fast when the underlying git repository storage is fast, but there are
some situations where that is not the case:
* The repo hasn't been GC'ed in a while, so may contain a lot of loose
objects.
* On googlesource.com using the JGit DFS backend, when GC has happened
recently and the DFS block cache is cold.
These problems are particularly noticeable on a cold server start.
As an optional optimization, allow persisting the ChangeNotesCache. For
installations where cache loading latency hasn't proven to be a problem,
it may not be worth the disk space, but we think it will make a
difference for googlesource.com.
Writing the necessary protos was a bit of work, but actually the
marginal cost of tweaking fields should be relatively low, and any
change should cause a small test to fail, so we should be able to detect
any changes as they arise. I explicitly chose to reuse existing
serialization mechanisms where possible (ProtobufCodecs, JSON), to limit
the size of this change. This is just cache data, so it's not like it
has to be particularly pretty or long-lasting.
This change is not intended to indicate we are giving up on optimizing
loading ChangeNotes from storage, but is more of a bandaid for fixing
performance problems in production today.
Change-Id: I1ffe15fe56b6822b7f9af55635b063793e66d6fd
2018-05-01 12:31:48 -04:00
|
|
|
repeated bytes patch_set = 6;
|
|
|
|
|
2018-12-14 16:06:41 +01:00
|
|
|
// Raw PatchSetApproval proto as produced by PatchSetApprovalProtoConverter.
|
Optionally persist ChangeNotesCache
Loading ChangeNotes into the ChangeNotesCache should generally be pretty
fast when the underlying git repository storage is fast, but there are
some situations where that is not the case:
* The repo hasn't been GC'ed in a while, so may contain a lot of loose
objects.
* On googlesource.com using the JGit DFS backend, when GC has happened
recently and the DFS block cache is cold.
These problems are particularly noticeable on a cold server start.
As an optional optimization, allow persisting the ChangeNotesCache. For
installations where cache loading latency hasn't proven to be a problem,
it may not be worth the disk space, but we think it will make a
difference for googlesource.com.
Writing the necessary protos was a bit of work, but actually the
marginal cost of tweaking fields should be relatively low, and any
change should cause a small test to fail, so we should be able to detect
any changes as they arise. I explicitly chose to reuse existing
serialization mechanisms where possible (ProtobufCodecs, JSON), to limit
the size of this change. This is just cache data, so it's not like it
has to be particularly pretty or long-lasting.
This change is not intended to indicate we are giving up on optimizing
loading ChangeNotes from storage, but is more of a bandaid for fixing
performance problems in production today.
Change-Id: I1ffe15fe56b6822b7f9af55635b063793e66d6fd
2018-05-01 12:31:48 -04:00
|
|
|
repeated bytes approval = 7;
|
|
|
|
|
|
|
|
// Next ID: 4
|
|
|
|
message ReviewerSetEntryProto {
|
|
|
|
string state = 1;
|
|
|
|
int32 account_id = 2;
|
2020-03-02 13:51:04 +01:00
|
|
|
// Epoch millis.
|
|
|
|
int64 timestamp_millis = 3;
|
Optionally persist ChangeNotesCache
Loading ChangeNotes into the ChangeNotesCache should generally be pretty
fast when the underlying git repository storage is fast, but there are
some situations where that is not the case:
* The repo hasn't been GC'ed in a while, so may contain a lot of loose
objects.
* On googlesource.com using the JGit DFS backend, when GC has happened
recently and the DFS block cache is cold.
These problems are particularly noticeable on a cold server start.
As an optional optimization, allow persisting the ChangeNotesCache. For
installations where cache loading latency hasn't proven to be a problem,
it may not be worth the disk space, but we think it will make a
difference for googlesource.com.
Writing the necessary protos was a bit of work, but actually the
marginal cost of tweaking fields should be relatively low, and any
change should cause a small test to fail, so we should be able to detect
any changes as they arise. I explicitly chose to reuse existing
serialization mechanisms where possible (ProtobufCodecs, JSON), to limit
the size of this change. This is just cache data, so it's not like it
has to be particularly pretty or long-lasting.
This change is not intended to indicate we are giving up on optimizing
loading ChangeNotes from storage, but is more of a bandaid for fixing
performance problems in production today.
Change-Id: I1ffe15fe56b6822b7f9af55635b063793e66d6fd
2018-05-01 12:31:48 -04:00
|
|
|
}
|
|
|
|
repeated ReviewerSetEntryProto reviewer = 8;
|
|
|
|
|
|
|
|
// Next ID: 4
|
|
|
|
message ReviewerByEmailSetEntryProto {
|
|
|
|
string state = 1;
|
|
|
|
string address = 2;
|
2020-03-02 13:51:04 +01:00
|
|
|
// Epoch millis.
|
|
|
|
int64 timestamp_millis = 3;
|
Optionally persist ChangeNotesCache
Loading ChangeNotes into the ChangeNotesCache should generally be pretty
fast when the underlying git repository storage is fast, but there are
some situations where that is not the case:
* The repo hasn't been GC'ed in a while, so may contain a lot of loose
objects.
* On googlesource.com using the JGit DFS backend, when GC has happened
recently and the DFS block cache is cold.
These problems are particularly noticeable on a cold server start.
As an optional optimization, allow persisting the ChangeNotesCache. For
installations where cache loading latency hasn't proven to be a problem,
it may not be worth the disk space, but we think it will make a
difference for googlesource.com.
Writing the necessary protos was a bit of work, but actually the
marginal cost of tweaking fields should be relatively low, and any
change should cause a small test to fail, so we should be able to detect
any changes as they arise. I explicitly chose to reuse existing
serialization mechanisms where possible (ProtobufCodecs, JSON), to limit
the size of this change. This is just cache data, so it's not like it
has to be particularly pretty or long-lasting.
This change is not intended to indicate we are giving up on optimizing
loading ChangeNotes from storage, but is more of a bandaid for fixing
performance problems in production today.
Change-Id: I1ffe15fe56b6822b7f9af55635b063793e66d6fd
2018-05-01 12:31:48 -04:00
|
|
|
}
|
|
|
|
repeated ReviewerByEmailSetEntryProto reviewer_by_email = 9;
|
|
|
|
|
|
|
|
repeated ReviewerSetEntryProto pending_reviewer = 10;
|
|
|
|
|
|
|
|
repeated ReviewerByEmailSetEntryProto pending_reviewer_by_email = 11;
|
|
|
|
|
|
|
|
repeated int32 past_reviewer = 12;
|
|
|
|
|
|
|
|
// Next ID: 5
|
|
|
|
message ReviewerStatusUpdateProto {
|
2020-02-17 16:25:43 +01:00
|
|
|
// Epoch millis.
|
2020-03-02 13:51:04 +01:00
|
|
|
int64 timestamp_millis = 1;
|
Optionally persist ChangeNotesCache
Loading ChangeNotes into the ChangeNotesCache should generally be pretty
fast when the underlying git repository storage is fast, but there are
some situations where that is not the case:
* The repo hasn't been GC'ed in a while, so may contain a lot of loose
objects.
* On googlesource.com using the JGit DFS backend, when GC has happened
recently and the DFS block cache is cold.
These problems are particularly noticeable on a cold server start.
As an optional optimization, allow persisting the ChangeNotesCache. For
installations where cache loading latency hasn't proven to be a problem,
it may not be worth the disk space, but we think it will make a
difference for googlesource.com.
Writing the necessary protos was a bit of work, but actually the
marginal cost of tweaking fields should be relatively low, and any
change should cause a small test to fail, so we should be able to detect
any changes as they arise. I explicitly chose to reuse existing
serialization mechanisms where possible (ProtobufCodecs, JSON), to limit
the size of this change. This is just cache data, so it's not like it
has to be particularly pretty or long-lasting.
This change is not intended to indicate we are giving up on optimizing
loading ChangeNotes from storage, but is more of a bandaid for fixing
performance problems in production today.
Change-Id: I1ffe15fe56b6822b7f9af55635b063793e66d6fd
2018-05-01 12:31:48 -04:00
|
|
|
int32 updated_by = 2;
|
|
|
|
int32 reviewer = 3;
|
|
|
|
string state = 4;
|
|
|
|
}
|
|
|
|
repeated ReviewerStatusUpdateProto reviewer_update = 13;
|
|
|
|
|
|
|
|
// JSON produced from
|
|
|
|
// com.google.gerrit.server.index.change.ChangeField.StoredSubmitRecord.
|
|
|
|
repeated string submit_record = 14;
|
|
|
|
|
2018-12-14 16:06:41 +01:00
|
|
|
// Raw ChangeMessage proto as produced by ChangeMessageProtoConverter.
|
Optionally persist ChangeNotesCache
Loading ChangeNotes into the ChangeNotesCache should generally be pretty
fast when the underlying git repository storage is fast, but there are
some situations where that is not the case:
* The repo hasn't been GC'ed in a while, so may contain a lot of loose
objects.
* On googlesource.com using the JGit DFS backend, when GC has happened
recently and the DFS block cache is cold.
These problems are particularly noticeable on a cold server start.
As an optional optimization, allow persisting the ChangeNotesCache. For
installations where cache loading latency hasn't proven to be a problem,
it may not be worth the disk space, but we think it will make a
difference for googlesource.com.
Writing the necessary protos was a bit of work, but actually the
marginal cost of tweaking fields should be relatively low, and any
change should cause a small test to fail, so we should be able to detect
any changes as they arise. I explicitly chose to reuse existing
serialization mechanisms where possible (ProtobufCodecs, JSON), to limit
the size of this change. This is just cache data, so it's not like it
has to be particularly pretty or long-lasting.
This change is not intended to indicate we are giving up on optimizing
loading ChangeNotes from storage, but is more of a bandaid for fixing
performance problems in production today.
Change-Id: I1ffe15fe56b6822b7f9af55635b063793e66d6fd
2018-05-01 12:31:48 -04:00
|
|
|
repeated bytes change_message = 15;
|
|
|
|
|
2019-05-26 14:11:47 +02:00
|
|
|
// JSON produced from com.google.gerrit.entities.Comment.
|
Optionally persist ChangeNotesCache
Loading ChangeNotes into the ChangeNotesCache should generally be pretty
fast when the underlying git repository storage is fast, but there are
some situations where that is not the case:
* The repo hasn't been GC'ed in a while, so may contain a lot of loose
objects.
* On googlesource.com using the JGit DFS backend, when GC has happened
recently and the DFS block cache is cold.
These problems are particularly noticeable on a cold server start.
As an optional optimization, allow persisting the ChangeNotesCache. For
installations where cache loading latency hasn't proven to be a problem,
it may not be worth the disk space, but we think it will make a
difference for googlesource.com.
Writing the necessary protos was a bit of work, but actually the
marginal cost of tweaking fields should be relatively low, and any
change should cause a small test to fail, so we should be able to detect
any changes as they arise. I explicitly chose to reuse existing
serialization mechanisms where possible (ProtobufCodecs, JSON), to limit
the size of this change. This is just cache data, so it's not like it
has to be particularly pretty or long-lasting.
This change is not intended to indicate we are giving up on optimizing
loading ChangeNotes from storage, but is more of a bandaid for fixing
performance problems in production today.
Change-Id: I1ffe15fe56b6822b7f9af55635b063793e66d6fd
2018-05-01 12:31:48 -04:00
|
|
|
repeated string published_comment = 16;
|
|
|
|
|
2018-12-13 14:30:48 -08:00
|
|
|
reserved 17; // read_only_until
|
|
|
|
reserved 18; // has_read_only_until
|
2019-04-15 14:33:22 -07:00
|
|
|
|
|
|
|
// Number of updates to the change's meta ref.
|
|
|
|
int32 update_count = 19;
|
2019-08-03 17:05:19 +02:00
|
|
|
|
|
|
|
string server_id = 20;
|
|
|
|
bool has_server_id = 21;
|
2019-10-16 15:09:56 +02:00
|
|
|
|
|
|
|
message AssigneeStatusUpdateProto {
|
2020-02-17 16:25:43 +01:00
|
|
|
// Epoch millis.
|
2020-03-02 13:51:04 +01:00
|
|
|
int64 timestamp_millis = 1;
|
2019-10-16 15:09:56 +02:00
|
|
|
int32 updated_by = 2;
|
|
|
|
int32 current_assignee = 3;
|
|
|
|
bool has_current_assignee = 4;
|
|
|
|
}
|
|
|
|
repeated AssigneeStatusUpdateProto assignee_update = 22;
|
2018-05-18 08:11:22 -04:00
|
|
|
|
2020-03-06 13:44:25 +01:00
|
|
|
// An update to the attention set of the change. See class AttentionSetUpdate
|
|
|
|
// for context.
|
|
|
|
message AttentionSetUpdateProto {
|
2020-02-17 16:25:43 +01:00
|
|
|
// Epoch millis.
|
2020-03-02 13:51:04 +01:00
|
|
|
int64 timestamp_millis = 1;
|
2020-02-17 16:25:43 +01:00
|
|
|
int32 account = 2;
|
2020-03-06 13:44:25 +01:00
|
|
|
// Maps to enum AttentionSetUpdate.Operation
|
2020-02-17 16:25:43 +01:00
|
|
|
string operation = 3;
|
|
|
|
string reason = 4;
|
|
|
|
}
|
2020-03-06 13:44:25 +01:00
|
|
|
repeated AttentionSetUpdateProto attention_set_update = 23;
|
2020-02-17 16:25:43 +01:00
|
|
|
}
|
2018-05-18 08:11:22 -04:00
|
|
|
|
|
|
|
// Serialized form of com.google.gerrit.server.query.change.ConflictKey
|
|
|
|
message ConflictKeyProto {
|
|
|
|
bytes commit = 1;
|
|
|
|
bytes other_commit = 2;
|
|
|
|
string submit_type = 3;
|
|
|
|
bool content_merge = 4;
|
|
|
|
}
|
Serialize TagSetHolder with protobuf
TagSetHolder and TagSet have some slightly exotic properties:
* A subclass of AtomicReference<ObjectId> with additional fields,
CachedRef.
* An ObjectIdOwnerMap, which requires a custom subclass of ObjectId
with additional fields.
* BitSets.
* Volatile field references.
However, it still boils down to collections of value types, so the
resulting TagSetProto structure is pretty straightforward.
The most annoying thing is that the AtomicReference and ObjectId
subclasses can't reasonably implement equals(), so the tests need to
have more detailed assertions which reach into what would otherwise be
private fields.
While we're in there, eliminate the intermediate EntryVal class, which
was serving no purpose other than to hold the readObject/writeObject
methods for Java serialization.
It is quite possible that this change will be slower to deserialize than
using Java serialization, since it was previously able to directly
deserialize the internal data structures, whereas we now have to build
these structures piece by piece. However, as with the rest of the
serialization code, we assume that proto is good enough until proven
otherwise.
Beyond that, we don't attempt to further rework the tag cache types or
the cache as a whole. In particular:
* Continue to use volatile types to handle incrementally updating
specific cache entries.
* Using composition instead of inheritance for CachedRef is out of
scope. However, note that using protobuf for serialization means
that we can make this change without flushing the cache.
* Using a less exotic type than ObjectIdOwnerMap would probably
require some benchmarking to prove that it's worth making the
change.
Change-Id: I08623b3f51ef1a0541559bbb2360c0d06a9de9d4
2018-07-23 15:28:22 -07:00
|
|
|
|
|
|
|
// Serialized form of com.google.gerrit.server.query.git.TagSetHolder.
|
|
|
|
// Next ID: 3
|
|
|
|
message TagSetHolderProto {
|
|
|
|
string project_name = 1;
|
|
|
|
|
|
|
|
// Next ID: 4
|
|
|
|
message TagSetProto {
|
|
|
|
string project_name = 1;
|
|
|
|
|
|
|
|
// Next ID: 3
|
|
|
|
message CachedRefProto {
|
|
|
|
bytes id = 1;
|
|
|
|
int32 flag = 2;
|
|
|
|
}
|
|
|
|
map<string, CachedRefProto> ref = 2;
|
|
|
|
|
|
|
|
// Next ID: 3
|
|
|
|
message TagProto {
|
|
|
|
bytes id = 1;
|
|
|
|
bytes flags = 2;
|
|
|
|
}
|
|
|
|
repeated TagProto tag = 3;
|
|
|
|
}
|
|
|
|
TagSetProto tags = 2;
|
|
|
|
}
|
Optionally persist ExternalIdCache
A significant source of latency on googlesource.com is reading in new
pack files from persistent storage to parse refs/meta/external-ids,
sometimes several seconds per load in the case of a virtual host with
thousands of users. This crops up when external IDs are updated as well
as on cold server starts. Both of these instances are pretty frequent:
external IDs are updated every time a new user registers, and servers
are typically updated daily. Serializing this cache is a relatively
cheap engineering cost to avoid these multi-second stalls, which are
quite annoying for users.
A server updating the external-ids ref bypasses a cache load, and
instead updates the state in-memory, then puts the result back into the
cache at the new ObjectId. When persisting the cache, this put also puts
the result into persistent storage. In a multi-master environment, there
is a race window between updating the ref value and putting the new
persistent object, which means other masters reading the ref value may
not find the persisted object. If multiple servers are receiving
concurrent requests, there's a good chance one or more masters will have
to re-load the data from NoteDb. This can be improved, but requires more
surgery to ExternalIdCache to replace the value in the cache prior to
committing the ref update.
Change-Id: I31d31d8ed490d01ce963a8162afba3daf9c1efff
2018-08-21 13:43:10 -07:00
|
|
|
|
|
|
|
// Serialized form of
|
|
|
|
// com.google.gerrit.server.account.externalids.AllExternalIds.
|
|
|
|
// Next ID: 2
|
|
|
|
message AllExternalIdsProto {
|
|
|
|
// Next ID: 6
|
|
|
|
message ExternalIdProto {
|
|
|
|
string key = 1;
|
|
|
|
int32 accountId = 2;
|
|
|
|
string email = 3;
|
|
|
|
string password = 4;
|
|
|
|
bytes blobId = 5;
|
|
|
|
}
|
|
|
|
repeated ExternalIdProto external_id = 1;
|
|
|
|
}
|
2019-02-19 09:09:22 +01:00
|
|
|
|
|
|
|
// Key for com.google.gerrit.server.git.PureRevertCache.
|
|
|
|
// Next ID: 4
|
|
|
|
message PureRevertKeyProto {
|
|
|
|
string project = 1;
|
|
|
|
bytes claimed_original = 2;
|
|
|
|
bytes claimed_revert = 3;
|
|
|
|
}
|
2020-03-05 15:23:29 +01:00
|
|
|
|
|
|
|
// Key for com.google.gerrit.server.account.ProjectWatches.ProjectWatcheKey.
|
|
|
|
// Next ID: 3
|
|
|
|
message ProjectWatchKeyProto {
|
|
|
|
string project = 1;
|
|
|
|
string filter = 2;
|
|
|
|
}
|
2020-03-05 16:24:07 +01:00
|
|
|
|
|
|
|
// Serialized form of
|
|
|
|
// com.google.gerrit.entities.Account.
|
|
|
|
// Next ID: 9
|
|
|
|
message AccountProto {
|
|
|
|
int32 id = 1;
|
|
|
|
int64 registered_on = 2;
|
|
|
|
string full_name = 3;
|
|
|
|
string display_name = 4;
|
|
|
|
string preferred_email = 5;
|
|
|
|
bool inactive = 6;
|
|
|
|
string status = 7;
|
|
|
|
string meta_id = 8;
|
|
|
|
}
|