Skip to content

Add team id to data move metadata and fetch team id in storage server. #12149

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions fdbclient/ServerKnobs.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1118,6 +1118,9 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
init( FETCH_SHARD_BUFFER_BYTE_LIMIT, 20e6 ); if( randomize && BUGGIFY ) FETCH_SHARD_BUFFER_BYTE_LIMIT = 1;
init( FETCH_SHARD_UPDATES_BYTE_LIMIT, 2500000 ); if( randomize && BUGGIFY ) FETCH_SHARD_UPDATES_BYTE_LIMIT = 100;

// Storage Server with Physical Shard
init( SS_GET_DATA_MOVE_ID, false); if ( isSimulated ) SS_GET_DATA_MOVE_ID = SHARD_ENCODE_LOCATION_METADATA;

//Wait Failure
init( MAX_OUTSTANDING_WAIT_FAILURE_REQUESTS, 250 ); if( randomize && BUGGIFY ) MAX_OUTSTANDING_WAIT_FAILURE_REQUESTS = 2;
init( WAIT_FAILURE_DELAY_LIMIT, 1.0 ); if( randomize && BUGGIFY ) WAIT_FAILURE_DELAY_LIMIT = 5.0;
Expand Down
3 changes: 3 additions & 0 deletions fdbclient/include/fdbclient/ServerKnobs.h
Original file line number Diff line number Diff line change
Expand Up @@ -1157,6 +1157,9 @@ class SWIFT_CXX_IMMORTAL_SINGLETON_TYPE ServerKnobs : public KnobsImpl<ServerKno
int FETCH_SHARD_BUFFER_BYTE_LIMIT;
int FETCH_SHARD_UPDATES_BYTE_LIMIT;

// Storage Server with Physical Shard
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would appreciate a more informative comment. Does this comment as is refer to SS_GET_DATA_MOVE_ID?

bool SS_GET_DATA_MOVE_ID;

// Wait Failure
int MAX_OUTSTANDING_WAIT_FAILURE_REQUESTS;
double WAIT_FAILURE_DELAY_LIMIT;
Expand Down
5 changes: 4 additions & 1 deletion fdbclient/include/fdbclient/StorageCheckpoint.h
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,7 @@ struct DataMoveMetaData {
Running = 2, // System keyspace has been modified, data move in action.
Completing = 3, // Data transfer has finished, finalizing system keyspace.
Deleting = 4, // Data move is cancelled.
Completed = 5, // Data move is finished, metadata pending deletion.
};

constexpr static FileIdentifier file_identifier = 13804362;
Expand All @@ -175,6 +176,7 @@ struct DataMoveMetaData {
int16_t phase; // DataMoveMetaData::Phase.
int8_t mode;
Optional<BulkLoadTaskState> bulkLoadTaskState; // set if the data move is a bulk load data move
Optional<std::unordered_map<std::string, std::string>> dcTeamIds; // map of dcId to teamId

DataMoveMetaData() = default;
DataMoveMetaData(UID id, Version version, KeyRange range) : id(id), version(version), priority(0), mode(0) {
Expand Down Expand Up @@ -202,7 +204,8 @@ struct DataMoveMetaData {

template <class Ar>
void serialize(Ar& ar) {
serializer(ar, id, version, ranges, priority, src, dest, checkpoints, phase, mode, bulkLoadTaskState);
serializer(
ar, id, version, ranges, priority, src, dest, checkpoints, phase, mode, bulkLoadTaskState, dcTeamIds);
}
};

Expand Down
22 changes: 16 additions & 6 deletions fdbclient/include/fdbclient/StorageServerShard.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,18 +44,27 @@ struct StorageServerShard {
const uint64_t id,
const uint64_t desiredId,
ShardState shardState,
Optional<UID> moveInShardId)
UID moveInShardId)
: range(range), version(version), id(id), desiredId(desiredId), shardState(shardState),
moveInShardId(moveInShardId) {}
StorageServerShard(KeyRange range,
Version version,
const uint64_t id,
const uint64_t desiredId,
ShardState shardState)
: range(range), version(version), id(id), desiredId(desiredId), shardState(shardState) {}
ShardState shardState,
std::string teamId)
: range(range), version(version), id(id), desiredId(desiredId), shardState(shardState), teamId(teamId) {
if (shardState != NotAssigned) {
ASSERT_ABORT(id != 0UL);
ASSERT_ABORT(desiredId != 0UL);
}
if (shardState == ReadWrite && version != 0) {
ASSERT_ABORT(!teamId.empty());
}
}

static StorageServerShard notAssigned(KeyRange range, Version version = 0) {
return StorageServerShard(range, version, 0, 0, NotAssigned);
return StorageServerShard(range, version, 0, 0, NotAssigned, (std::string) "");
}

ShardState getShardState() const { return static_cast<ShardState>(this->shardState); };
Expand Down Expand Up @@ -86,7 +95,7 @@ struct StorageServerShard {
std::string res = "StorageServerShard: [Range]: " + Traceable<KeyRangeRef>::toString(range) +
" [Shard ID]: " + format("%016llx", this->id) + " [Version]: " + std::to_string(version) +
" [State]: " + getShardStateString() +
" [Desired Shard ID]: " + format("%016llx", this->desiredId);
" [Desired Shard ID]: " + format("%016llx", this->desiredId) + " [ Team ID ]: " + teamId;
if (moveInShardId.present()) {
res += " [MoveInShard ID]: " + this->moveInShardId.get().toString();
}
Expand All @@ -95,7 +104,7 @@ struct StorageServerShard {

template <class Ar>
void serialize(Ar& ar) {
serializer(ar, range, version, id, desiredId, shardState, moveInShardId);
serializer(ar, range, version, id, desiredId, shardState, moveInShardId, teamId);
}

KeyRange range;
Expand All @@ -104,6 +113,7 @@ struct StorageServerShard {
uint64_t desiredId; // The intended shard ID.
int8_t shardState;
Optional<UID> moveInShardId; // If present, it is the associated MoveInShardMetaData.
std::string teamId;
};

#endif
37 changes: 35 additions & 2 deletions fdbserver/MoveKeys.actor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1712,7 +1712,7 @@ ACTOR static Future<Void> startMoveShards(Database occ,
serverListEntries.push_back(tr.get(serverListKeyFor(servers[s])));
}
std::vector<Optional<Value>> serverListValues = wait(getAll(serverListEntries));

state std::unordered_map<std::string, std::vector<std::string>> dcServers;
for (int s = 0; s < serverListValues.size(); s++) {
if (!serverListValues[s].present()) {
// Attempt to move onto a server that isn't in serverList (removed or never added to the
Expand All @@ -1721,6 +1721,13 @@ ACTOR static Future<Void> startMoveShards(Database occ,
// TODO(psm): Mark the data move as 'deleting'.
throw move_to_removed_server();
}
auto si = decodeServerListValue(serverListValues[s].get());
ASSERT(si.id() == servers[s]);
auto it = dcServers.find(si.locality.describeDcId());
if (it == dcServers.end()) {
dcServers[si.locality.describeDcId()] = std::vector<std::string>();
}
dcServers[si.locality.describeDcId()].push_back(si.id().shortString());
}

currentKeys = KeyRangeRef(begin, keys.end);
Expand All @@ -1733,6 +1740,15 @@ ACTOR static Future<Void> startMoveShards(Database occ,
state Key endKey = old.back().key;
currentKeys = KeyRangeRef(currentKeys.begin, endKey);

if (ranges.front() != currentKeys) {
TraceEvent("MoveShardsPartialRange")
.detail("ExpectedRange", ranges.front())
.detail("ActualRange", currentKeys)
.detail("DataMoveId", dataMoveId)
.detail("RowLimit", SERVER_KNOBS->MOVE_SHARD_KRM_ROW_LIMIT)
.detail("ByteLimit", SERVER_KNOBS->MOVE_SHARD_KRM_BYTE_LIMIT);
}

// Check that enough servers for each shard are in the correct state
state RangeResult UIDtoTagMap = wait(tr.getRange(serverTagKeys, CLIENT_KNOBS->TOO_MANY));
ASSERT(!UIDtoTagMap.more && UIDtoTagMap.size() < CLIENT_KNOBS->TOO_MANY);
Expand Down Expand Up @@ -1806,6 +1822,7 @@ ACTOR static Future<Void> startMoveShards(Database occ,
TraceEvent(
SevWarn, "StartMoveShardsCancelConflictingDataMove", relocationIntervalId)
.detail("Range", rangeIntersectKeys)
.detail("CurrentDataMoveRange", ranges[0])
.detail("DataMoveID", dataMoveId.toString())
.detail("ExistingDataMoveID", destId.toString());
wait(cleanUpDataMove(occ, destId, lock, startMoveKeysLock, keys, ddEnabledState));
Expand Down Expand Up @@ -1868,6 +1885,20 @@ ACTOR static Future<Void> startMoveShards(Database occ,
dataMove.ranges.clear();
dataMove.ranges.push_back(KeyRangeRef(keys.begin, currentKeys.end));
dataMove.dest.insert(servers.begin(), servers.end());
dataMove.dcTeamIds = std::unordered_map<std::string, std::string>();
for (auto& [dc, serverIds] : dcServers) {
std::sort(serverIds.begin(), serverIds.end());
std::string teamId;
for (const auto& serverId : serverIds) {
if (teamId.size() == 0) {
teamId = serverId;
} else {
teamId += "," + serverId;
}
}
// Use the concatenated server ids as the team id to avoid conflicts.
dataMove.dcTeamIds.get()[dc] = teamId;
}
}

if (currentKeys.end == keys.end) {
Expand Down Expand Up @@ -2355,7 +2386,9 @@ ACTOR static Future<Void> finishMoveShards(Database occ,
dataMove.bulkLoadTaskState = newBulkLoadTaskState;
}
wait(deleteCheckpoints(&tr, dataMove.checkpoints, dataMoveId));
tr.clear(dataMoveKeyFor(dataMoveId));
// tr.clear(dataMoveKeyFor(dataMoveId));
dataMove.phase = DataMoveMetaData::Completed;
tr.set(dataMoveKeyFor(dataMoveId), dataMoveValue(dataMove));
TraceEvent(sevDm, "FinishMoveShardsDeleteMetaData", relocationIntervalId)
.detail("DataMove", dataMove.toString());
} else if (!bulkLoadTaskState.present()) {
Expand Down
Loading