Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.ClosePointInTimeRequest;
import org.elasticsearch.action.search.ClosePointInTimeResponse;
import org.elasticsearch.action.search.OpenPointInTimeRequest;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.action.search.TransportClosePointInTimeAction;
Expand Down Expand Up @@ -109,39 +110,9 @@ public void testSearcherId() throws Exception {

public void testRetryPointInTime() throws Exception {
final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
assertAcked(
indicesAdmin().prepareCreate(indexName)
.setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(1, 5)).build())
.setMapping("""
{"properties":{"created_date":{"type": "date", "format": "yyyy-MM-dd"}}}""")
);
final List<IndexRequestBuilder> indexRequestBuilders = new ArrayList<>();
final int docCount = between(0, 100);
for (int i = 0; i < docCount; i++) {
indexRequestBuilders.add(prepareIndex(indexName).setSource("created_date", "2011-02-02"));
}
indexRandom(true, false, indexRequestBuilders);
assertThat(
indicesAdmin().prepareForceMerge(indexName).setOnlyExpungeDeletes(true).setFlush(true).get().getFailedShards(),
equalTo(0)
);
refresh(indexName);
// force merge with expunge deletes is not merging down to one segment only
forceMerge(false);

final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
createRepository(repositoryName, "fs");

final SnapshotId snapshotOne = createSnapshot(repositoryName, "snapshot-1", List.of(indexName)).snapshotId();
assertAcked(indicesAdmin().prepareDelete(indexName));

final int numberOfReplicas = between(0, 2);
final Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas).build();
internalCluster().ensureAtLeastNumDataNodes(numberOfReplicas + 1);

mountSnapshot(repositoryName, snapshotOne.getName(), indexName, indexName, indexSettings);
ensureGreen(indexName);

int numShards = between(1, 5);
createTestIndex(indexName, docCount, numShards);
final OpenPointInTimeRequest openRequest = new OpenPointInTimeRequest(indexName).indicesOptions(
IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED
).keepAlive(TimeValue.timeValueMinutes(2));
Expand Down Expand Up @@ -172,4 +143,81 @@ public void testRetryPointInTime() throws Exception {
client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId)).actionGet();
}
}

/**
* Test that for searchable snapshots, we can retry PIT searches even after the PIT has been closed (simulating also expired PITs).
*/
public void testRetryRemovedPointInTime() throws Exception {
final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
final int docCount = between(0, 100);
int numShards = between(1, 5);
createTestIndex(indexName, docCount, numShards);

final OpenPointInTimeRequest openRequest = new OpenPointInTimeRequest(indexName).indicesOptions(
IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED
).keepAlive(TimeValue.timeValueMinutes(1));
final BytesReference pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openRequest).actionGet().getPointInTimeId();

try {
assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> {
assertThat(resp.pointInTimeId(), equalTo(pitId));
assertHitCount(resp, docCount);
});

// remove PIT contexts by closing it. This should be similar to expired PIT contexts eventually be removed
ClosePointInTimeResponse closePointInTimeResponse = client().execute(
TransportClosePointInTimeAction.TYPE,
new ClosePointInTimeRequest(pitId)
).actionGet();
assertEquals(numShards, closePointInTimeResponse.getNumFreed());

assertNoFailuresAndResponse(
prepareSearch().setQuery(new RangeQueryBuilder("created_date").gte("2011-01-01").lte("2011-12-12"))
.setSearchType(SearchType.QUERY_THEN_FETCH)
.setPreFilterShardSize(between(1, 10))
.setAllowPartialSearchResults(true)
.setPointInTime(new PointInTimeBuilder(pitId)),
resp -> {
assertThat(resp.pointInTimeId(), equalTo(pitId));
assertHitCount(resp, docCount);
}
);
} finally {
client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId)).actionGet();
}
}

private void createTestIndex(String indexName, int docCount, int numShards) throws Exception {
assertAcked(
indicesAdmin().prepareCreate(indexName)
.setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards).build())
.setMapping("""
{"properties":{"created_date":{"type": "date", "format": "yyyy-MM-dd"}}}""")
);
final List<IndexRequestBuilder> indexRequestBuilders = new ArrayList<>();
for (int i = 0; i < docCount; i++) {
indexRequestBuilders.add(prepareIndex(indexName).setSource("created_date", "2011-02-02"));
}
indexRandom(true, false, indexRequestBuilders);
assertThat(
indicesAdmin().prepareForceMerge(indexName).setOnlyExpungeDeletes(true).setFlush(true).get().getFailedShards(),
equalTo(0)
);
refresh(indexName);
// force merge with expunge deletes is not merging down to one segment only
forceMerge(false);

final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
createRepository(repositoryName, "fs");

final SnapshotId snapshotOne = createSnapshot(repositoryName, "snapshot-1", List.of(indexName)).snapshotId();
assertAcked(indicesAdmin().prepareDelete(indexName));

final int numberOfReplicas = between(0, 2);
final Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas).build();
internalCluster().ensureAtLeastNumDataNodes(numberOfReplicas + 1);

mountSnapshot(repositoryName, snapshotOne.getName(), indexName, indexName, indexSettings);
ensureGreen(indexName);
}
}